1 /* 2 * QLogic qlcnic NIC Driver 3 * Copyright (c) 2009-2013 QLogic Corporation 4 * 5 * See LICENSE.qlcnic for copyright and licensing details. 6 */ 7 8 #include "qlcnic.h" 9 10 static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = { 11 {QLCNIC_CMD_CREATE_RX_CTX, 4, 1}, 12 {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1}, 13 {QLCNIC_CMD_CREATE_TX_CTX, 4, 1}, 14 {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1}, 15 {QLCNIC_CMD_INTRPT_TEST, 4, 1}, 16 {QLCNIC_CMD_SET_MTU, 4, 1}, 17 {QLCNIC_CMD_READ_PHY, 4, 2}, 18 {QLCNIC_CMD_WRITE_PHY, 5, 1}, 19 {QLCNIC_CMD_READ_HW_REG, 4, 1}, 20 {QLCNIC_CMD_GET_FLOW_CTL, 4, 2}, 21 {QLCNIC_CMD_SET_FLOW_CTL, 4, 1}, 22 {QLCNIC_CMD_READ_MAX_MTU, 4, 2}, 23 {QLCNIC_CMD_READ_MAX_LRO, 4, 2}, 24 {QLCNIC_CMD_MAC_ADDRESS, 4, 3}, 25 {QLCNIC_CMD_GET_PCI_INFO, 4, 1}, 26 {QLCNIC_CMD_GET_NIC_INFO, 4, 1}, 27 {QLCNIC_CMD_SET_NIC_INFO, 4, 1}, 28 {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3}, 29 {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1}, 30 {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3}, 31 {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1}, 32 {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1}, 33 {QLCNIC_CMD_GET_MAC_STATS, 4, 1}, 34 {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3}, 35 {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1}, 36 {QLCNIC_CMD_CONFIG_PORT, 4, 1}, 37 {QLCNIC_CMD_TEMP_SIZE, 4, 4}, 38 {QLCNIC_CMD_GET_TEMP_HDR, 4, 1}, 39 {QLCNIC_CMD_SET_DRV_VER, 4, 1}, 40 }; 41 42 static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw) 43 { 44 return (ahw->pci_func & 0xff) | ((ahw->fw_hal_version & 0xff) << 8) | 45 (0xcafe << 16); 46 } 47 48 /* Allocate mailbox registers */ 49 int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, 50 struct qlcnic_adapter *adapter, u32 type) 51 { 52 int i, size; 53 const struct qlcnic_mailbox_metadata *mbx_tbl; 54 55 mbx_tbl = qlcnic_mbx_tbl; 56 size = ARRAY_SIZE(qlcnic_mbx_tbl); 57 for (i = 0; i < size; i++) { 58 if (type == mbx_tbl[i].cmd) { 59 mbx->req.num = mbx_tbl[i].in_args; 60 mbx->rsp.num = mbx_tbl[i].out_args; 61 mbx->req.arg = kcalloc(mbx->req.num, 62 sizeof(u32), GFP_ATOMIC); 63 if (!mbx->req.arg) 64 return -ENOMEM; 65 mbx->rsp.arg = kcalloc(mbx->rsp.num, 66 sizeof(u32), GFP_ATOMIC); 67 if (!mbx->rsp.arg) { 68 kfree(mbx->req.arg); 69 mbx->req.arg = NULL; 70 return -ENOMEM; 71 } 72 memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num); 73 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); 74 mbx->req.arg[0] = type; 75 break; 76 } 77 } 78 return 0; 79 } 80 81 /* Free up mailbox registers */ 82 void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd) 83 { 84 kfree(cmd->req.arg); 85 cmd->req.arg = NULL; 86 kfree(cmd->rsp.arg); 87 cmd->rsp.arg = NULL; 88 } 89 90 static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func) 91 { 92 int i; 93 94 for (i = 0; i < adapter->ahw->act_pci_func; i++) { 95 if (adapter->npars[i].pci_func == pci_func) 96 return i; 97 } 98 99 return -1; 100 } 101 102 static u32 103 qlcnic_poll_rsp(struct qlcnic_adapter *adapter) 104 { 105 u32 rsp; 106 int timeout = 0; 107 108 do { 109 /* give atleast 1ms for firmware to respond */ 110 mdelay(1); 111 112 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT) 113 return QLCNIC_CDRP_RSP_TIMEOUT; 114 115 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET); 116 } while (!QLCNIC_CDRP_IS_RSP(rsp)); 117 118 return rsp; 119 } 120 121 int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, 122 struct qlcnic_cmd_args *cmd) 123 { 124 int i; 125 u32 rsp; 126 u32 signature; 127 struct pci_dev *pdev = adapter->pdev; 128 struct qlcnic_hardware_context *ahw = adapter->ahw; 129 const char *fmt; 130 131 signature = qlcnic_get_cmd_signature(ahw); 132 133 /* Acquire semaphore before accessing CRB */ 134 if (qlcnic_api_lock(adapter)) { 135 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; 136 return cmd->rsp.arg[0]; 137 } 138 139 QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature); 140 for (i = 1; i < QLCNIC_CDRP_MAX_ARGS; i++) 141 QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]); 142 QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, 143 QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0])); 144 rsp = qlcnic_poll_rsp(adapter); 145 146 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) { 147 dev_err(&pdev->dev, "card response timeout.\n"); 148 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; 149 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) { 150 cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1)); 151 switch (cmd->rsp.arg[0]) { 152 case QLCNIC_RCODE_INVALID_ARGS: 153 fmt = "CDRP invalid args: [%d]\n"; 154 break; 155 case QLCNIC_RCODE_NOT_SUPPORTED: 156 case QLCNIC_RCODE_NOT_IMPL: 157 fmt = "CDRP command not supported: [%d]\n"; 158 break; 159 case QLCNIC_RCODE_NOT_PERMITTED: 160 fmt = "CDRP requested action not permitted: [%d]\n"; 161 break; 162 case QLCNIC_RCODE_INVALID: 163 fmt = "CDRP invalid or unknown cmd received: [%d]\n"; 164 break; 165 case QLCNIC_RCODE_TIMEOUT: 166 fmt = "CDRP command timeout: [%d]\n"; 167 break; 168 default: 169 fmt = "CDRP command failed: [%d]\n"; 170 break; 171 } 172 dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]); 173 } else if (rsp == QLCNIC_CDRP_RSP_OK) 174 cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS; 175 176 for (i = 1; i < cmd->rsp.num; i++) 177 cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i)); 178 179 /* Release semaphore */ 180 qlcnic_api_unlock(adapter); 181 return cmd->rsp.arg[0]; 182 } 183 184 int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter) 185 { 186 struct qlcnic_cmd_args cmd; 187 u32 arg1, arg2, arg3; 188 char drv_string[12]; 189 int err = 0; 190 191 memset(drv_string, 0, sizeof(drv_string)); 192 snprintf(drv_string, sizeof(drv_string), "%d"".""%d"".""%d", 193 _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR, 194 _QLCNIC_LINUX_SUBVERSION); 195 196 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_DRV_VER); 197 memcpy(&arg1, drv_string, sizeof(u32)); 198 memcpy(&arg2, drv_string + 4, sizeof(u32)); 199 memcpy(&arg3, drv_string + 8, sizeof(u32)); 200 201 cmd.req.arg[1] = arg1; 202 cmd.req.arg[2] = arg2; 203 cmd.req.arg[3] = arg3; 204 205 err = qlcnic_issue_cmd(adapter, &cmd); 206 if (err) { 207 dev_info(&adapter->pdev->dev, 208 "Failed to set driver version in firmware\n"); 209 return -EIO; 210 } 211 212 return 0; 213 } 214 215 int 216 qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu) 217 { 218 int err = 0; 219 struct qlcnic_cmd_args cmd; 220 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 221 222 if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE) 223 return err; 224 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU); 225 cmd.req.arg[1] = recv_ctx->context_id; 226 cmd.req.arg[2] = mtu; 227 228 err = qlcnic_issue_cmd(adapter, &cmd); 229 if (err) { 230 dev_err(&adapter->pdev->dev, "Failed to set mtu\n"); 231 err = -EIO; 232 } 233 qlcnic_free_mbx_args(&cmd); 234 return err; 235 } 236 237 int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) 238 { 239 void *addr; 240 struct qlcnic_hostrq_rx_ctx *prq; 241 struct qlcnic_cardrsp_rx_ctx *prsp; 242 struct qlcnic_hostrq_rds_ring *prq_rds; 243 struct qlcnic_hostrq_sds_ring *prq_sds; 244 struct qlcnic_cardrsp_rds_ring *prsp_rds; 245 struct qlcnic_cardrsp_sds_ring *prsp_sds; 246 struct qlcnic_host_rds_ring *rds_ring; 247 struct qlcnic_host_sds_ring *sds_ring; 248 struct qlcnic_cmd_args cmd; 249 250 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; 251 u64 phys_addr; 252 253 u8 i, nrds_rings, nsds_rings; 254 u16 temp_u16; 255 size_t rq_size, rsp_size; 256 u32 cap, reg, val, reg2; 257 int err; 258 259 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 260 261 nrds_rings = adapter->max_rds_rings; 262 nsds_rings = adapter->max_sds_rings; 263 264 rq_size = 265 SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings, 266 nsds_rings); 267 rsp_size = 268 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings, 269 nsds_rings); 270 271 addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, 272 &hostrq_phys_addr, GFP_KERNEL); 273 if (addr == NULL) 274 return -ENOMEM; 275 prq = addr; 276 277 addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, 278 &cardrsp_phys_addr, GFP_KERNEL); 279 if (addr == NULL) { 280 err = -ENOMEM; 281 goto out_free_rq; 282 } 283 prsp = addr; 284 285 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); 286 287 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN 288 | QLCNIC_CAP0_VALIDOFF); 289 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); 290 291 temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler); 292 prq->valid_field_offset = cpu_to_le16(temp_u16); 293 prq->txrx_sds_binding = nsds_rings - 1; 294 295 prq->capabilities[0] = cpu_to_le32(cap); 296 prq->host_int_crb_mode = 297 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); 298 prq->host_rds_crb_mode = 299 cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE); 300 301 prq->num_rds_rings = cpu_to_le16(nrds_rings); 302 prq->num_sds_rings = cpu_to_le16(nsds_rings); 303 prq->rds_ring_offset = 0; 304 305 val = le32_to_cpu(prq->rds_ring_offset) + 306 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings); 307 prq->sds_ring_offset = cpu_to_le32(val); 308 309 prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data + 310 le32_to_cpu(prq->rds_ring_offset)); 311 312 for (i = 0; i < nrds_rings; i++) { 313 314 rds_ring = &recv_ctx->rds_rings[i]; 315 rds_ring->producer = 0; 316 317 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); 318 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); 319 prq_rds[i].ring_kind = cpu_to_le32(i); 320 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); 321 } 322 323 prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data + 324 le32_to_cpu(prq->sds_ring_offset)); 325 326 for (i = 0; i < nsds_rings; i++) { 327 328 sds_ring = &recv_ctx->sds_rings[i]; 329 sds_ring->consumer = 0; 330 memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring)); 331 332 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); 333 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); 334 prq_sds[i].msi_index = cpu_to_le16(i); 335 } 336 337 phys_addr = hostrq_phys_addr; 338 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX); 339 cmd.req.arg[1] = MSD(phys_addr); 340 cmd.req.arg[2] = LSD(phys_addr); 341 cmd.req.arg[3] = rq_size; 342 err = qlcnic_issue_cmd(adapter, &cmd); 343 if (err) { 344 dev_err(&adapter->pdev->dev, 345 "Failed to create rx ctx in firmware%d\n", err); 346 goto out_free_rsp; 347 } 348 349 prsp_rds = ((struct qlcnic_cardrsp_rds_ring *) 350 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]); 351 352 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { 353 rds_ring = &recv_ctx->rds_rings[i]; 354 355 reg = le32_to_cpu(prsp_rds[i].host_producer_crb); 356 rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg; 357 } 358 359 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) 360 &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); 361 362 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { 363 sds_ring = &recv_ctx->sds_rings[i]; 364 365 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); 366 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); 367 368 sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg; 369 sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2; 370 } 371 372 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); 373 recv_ctx->context_id = le16_to_cpu(prsp->context_id); 374 recv_ctx->virt_port = prsp->virt_port; 375 376 out_free_rsp: 377 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp, 378 cardrsp_phys_addr); 379 qlcnic_free_mbx_args(&cmd); 380 out_free_rq: 381 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr); 382 return err; 383 } 384 385 static void 386 qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter) 387 { 388 int err; 389 struct qlcnic_cmd_args cmd; 390 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 391 392 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX); 393 cmd.req.arg[1] = recv_ctx->context_id; 394 err = qlcnic_issue_cmd(adapter, &cmd); 395 if (err) 396 dev_err(&adapter->pdev->dev, 397 "Failed to destroy rx ctx in firmware\n"); 398 399 recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED; 400 qlcnic_free_mbx_args(&cmd); 401 } 402 403 int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, 404 struct qlcnic_host_tx_ring *tx_ring, 405 int ring) 406 { 407 struct qlcnic_hostrq_tx_ctx *prq; 408 struct qlcnic_hostrq_cds_ring *prq_cds; 409 struct qlcnic_cardrsp_tx_ctx *prsp; 410 void *rq_addr, *rsp_addr; 411 size_t rq_size, rsp_size; 412 u32 temp; 413 struct qlcnic_cmd_args cmd; 414 int err; 415 u64 phys_addr; 416 dma_addr_t rq_phys_addr, rsp_phys_addr; 417 418 /* reset host resources */ 419 tx_ring->producer = 0; 420 tx_ring->sw_consumer = 0; 421 *(tx_ring->hw_consumer) = 0; 422 423 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); 424 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, 425 &rq_phys_addr, GFP_KERNEL); 426 if (!rq_addr) 427 return -ENOMEM; 428 429 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); 430 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, 431 &rsp_phys_addr, GFP_KERNEL); 432 if (!rsp_addr) { 433 err = -ENOMEM; 434 goto out_free_rq; 435 } 436 437 memset(rq_addr, 0, rq_size); 438 prq = rq_addr; 439 440 memset(rsp_addr, 0, rsp_size); 441 prsp = rsp_addr; 442 443 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); 444 445 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN | 446 QLCNIC_CAP0_LSO); 447 prq->capabilities[0] = cpu_to_le32(temp); 448 449 prq->host_int_crb_mode = 450 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED); 451 prq->msi_index = 0; 452 453 prq->interrupt_ctl = 0; 454 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr); 455 456 prq_cds = &prq->cds_ring; 457 458 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); 459 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); 460 461 phys_addr = rq_phys_addr; 462 463 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); 464 cmd.req.arg[1] = MSD(phys_addr); 465 cmd.req.arg[2] = LSD(phys_addr); 466 cmd.req.arg[3] = rq_size; 467 err = qlcnic_issue_cmd(adapter, &cmd); 468 469 if (err == QLCNIC_RCODE_SUCCESS) { 470 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); 471 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp; 472 tx_ring->ctx_id = le16_to_cpu(prsp->context_id); 473 } else { 474 dev_err(&adapter->pdev->dev, 475 "Failed to create tx ctx in firmware%d\n", err); 476 err = -EIO; 477 } 478 479 dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr, 480 rsp_phys_addr); 481 482 out_free_rq: 483 dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr); 484 qlcnic_free_mbx_args(&cmd); 485 486 return err; 487 } 488 489 static void 490 qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter, 491 struct qlcnic_host_tx_ring *tx_ring) 492 { 493 struct qlcnic_cmd_args cmd; 494 495 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX); 496 cmd.req.arg[1] = tx_ring->ctx_id; 497 if (qlcnic_issue_cmd(adapter, &cmd)) 498 dev_err(&adapter->pdev->dev, 499 "Failed to destroy tx ctx in firmware\n"); 500 qlcnic_free_mbx_args(&cmd); 501 } 502 503 int 504 qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config) 505 { 506 int err; 507 struct qlcnic_cmd_args cmd; 508 509 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT); 510 cmd.req.arg[1] = config; 511 err = qlcnic_issue_cmd(adapter, &cmd); 512 qlcnic_free_mbx_args(&cmd); 513 return err; 514 } 515 516 int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) 517 { 518 void *addr; 519 int err, ring; 520 struct qlcnic_recv_context *recv_ctx; 521 struct qlcnic_host_rds_ring *rds_ring; 522 struct qlcnic_host_sds_ring *sds_ring; 523 struct qlcnic_host_tx_ring *tx_ring; 524 __le32 *ptr; 525 526 struct pci_dev *pdev = adapter->pdev; 527 528 recv_ctx = adapter->recv_ctx; 529 530 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { 531 tx_ring = &adapter->tx_ring[ring]; 532 ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32), 533 &tx_ring->hw_cons_phys_addr, 534 GFP_KERNEL); 535 536 if (ptr == NULL) { 537 dev_err(&pdev->dev, "failed to allocate tx consumer\n"); 538 return -ENOMEM; 539 } 540 tx_ring->hw_consumer = ptr; 541 /* cmd desc ring */ 542 addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring), 543 &tx_ring->phys_addr, 544 GFP_KERNEL); 545 546 if (addr == NULL) { 547 dev_err(&pdev->dev, 548 "failed to allocate tx desc ring\n"); 549 err = -ENOMEM; 550 goto err_out_free; 551 } 552 553 tx_ring->desc_head = addr; 554 } 555 556 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 557 rds_ring = &recv_ctx->rds_rings[ring]; 558 addr = dma_alloc_coherent(&adapter->pdev->dev, 559 RCV_DESC_RINGSIZE(rds_ring), 560 &rds_ring->phys_addr, GFP_KERNEL); 561 if (addr == NULL) { 562 dev_err(&pdev->dev, 563 "failed to allocate rds ring [%d]\n", ring); 564 err = -ENOMEM; 565 goto err_out_free; 566 } 567 rds_ring->desc_head = addr; 568 569 } 570 571 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 572 sds_ring = &recv_ctx->sds_rings[ring]; 573 574 addr = dma_alloc_coherent(&adapter->pdev->dev, 575 STATUS_DESC_RINGSIZE(sds_ring), 576 &sds_ring->phys_addr, GFP_KERNEL); 577 if (addr == NULL) { 578 dev_err(&pdev->dev, 579 "failed to allocate sds ring [%d]\n", ring); 580 err = -ENOMEM; 581 goto err_out_free; 582 } 583 sds_ring->desc_head = addr; 584 } 585 586 return 0; 587 588 err_out_free: 589 qlcnic_free_hw_resources(adapter); 590 return err; 591 } 592 593 int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev) 594 { 595 int i, err, ring; 596 597 if (dev->flags & QLCNIC_NEED_FLR) { 598 pci_reset_function(dev->pdev); 599 dev->flags &= ~QLCNIC_NEED_FLR; 600 } 601 602 if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) { 603 if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) { 604 err = qlcnic_83xx_config_intrpt(dev, 1); 605 if (err) 606 return err; 607 } 608 } 609 610 err = qlcnic_fw_cmd_create_rx_ctx(dev); 611 if (err) 612 goto err_out; 613 614 for (ring = 0; ring < dev->max_drv_tx_rings; ring++) { 615 err = qlcnic_fw_cmd_create_tx_ctx(dev, 616 &dev->tx_ring[ring], 617 ring); 618 if (err) { 619 qlcnic_fw_cmd_destroy_rx_ctx(dev); 620 if (ring == 0) 621 goto err_out; 622 623 for (i = 0; i < ring; i++) 624 qlcnic_fw_cmd_destroy_tx_ctx(dev, 625 &dev->tx_ring[i]); 626 627 goto err_out; 628 } 629 } 630 631 set_bit(__QLCNIC_FW_ATTACHED, &dev->state); 632 return 0; 633 634 err_out: 635 if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) { 636 if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) 637 qlcnic_83xx_config_intrpt(dev, 0); 638 } 639 return err; 640 } 641 642 void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) 643 { 644 int ring; 645 646 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) { 647 qlcnic_fw_cmd_destroy_rx_ctx(adapter); 648 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) 649 qlcnic_fw_cmd_destroy_tx_ctx(adapter, 650 &adapter->tx_ring[ring]); 651 652 if (qlcnic_83xx_check(adapter) && 653 (adapter->flags & QLCNIC_MSIX_ENABLED)) { 654 if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) 655 qlcnic_83xx_config_intrpt(adapter, 0); 656 } 657 /* Allow dma queues to drain after context reset */ 658 mdelay(20); 659 } 660 } 661 662 void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) 663 { 664 struct qlcnic_recv_context *recv_ctx; 665 struct qlcnic_host_rds_ring *rds_ring; 666 struct qlcnic_host_sds_ring *sds_ring; 667 struct qlcnic_host_tx_ring *tx_ring; 668 int ring; 669 670 recv_ctx = adapter->recv_ctx; 671 672 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { 673 tx_ring = &adapter->tx_ring[ring]; 674 if (tx_ring->hw_consumer != NULL) { 675 dma_free_coherent(&adapter->pdev->dev, sizeof(u32), 676 tx_ring->hw_consumer, 677 tx_ring->hw_cons_phys_addr); 678 679 tx_ring->hw_consumer = NULL; 680 } 681 682 if (tx_ring->desc_head != NULL) { 683 dma_free_coherent(&adapter->pdev->dev, 684 TX_DESC_RINGSIZE(tx_ring), 685 tx_ring->desc_head, 686 tx_ring->phys_addr); 687 tx_ring->desc_head = NULL; 688 } 689 } 690 691 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 692 rds_ring = &recv_ctx->rds_rings[ring]; 693 694 if (rds_ring->desc_head != NULL) { 695 dma_free_coherent(&adapter->pdev->dev, 696 RCV_DESC_RINGSIZE(rds_ring), 697 rds_ring->desc_head, 698 rds_ring->phys_addr); 699 rds_ring->desc_head = NULL; 700 } 701 } 702 703 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 704 sds_ring = &recv_ctx->sds_rings[ring]; 705 706 if (sds_ring->desc_head != NULL) { 707 dma_free_coherent(&adapter->pdev->dev, 708 STATUS_DESC_RINGSIZE(sds_ring), 709 sds_ring->desc_head, 710 sds_ring->phys_addr); 711 sds_ring->desc_head = NULL; 712 } 713 } 714 } 715 716 717 int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac) 718 { 719 int err, i; 720 struct qlcnic_cmd_args cmd; 721 u32 mac_low, mac_high; 722 723 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); 724 cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8; 725 err = qlcnic_issue_cmd(adapter, &cmd); 726 727 if (err == QLCNIC_RCODE_SUCCESS) { 728 mac_low = cmd.rsp.arg[1]; 729 mac_high = cmd.rsp.arg[2]; 730 731 for (i = 0; i < 2; i++) 732 mac[i] = (u8) (mac_high >> ((1 - i) * 8)); 733 for (i = 2; i < 6; i++) 734 mac[i] = (u8) (mac_low >> ((5 - i) * 8)); 735 } else { 736 dev_err(&adapter->pdev->dev, 737 "Failed to get mac address%d\n", err); 738 err = -EIO; 739 } 740 qlcnic_free_mbx_args(&cmd); 741 return err; 742 } 743 744 /* Get info of a NIC partition */ 745 int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter, 746 struct qlcnic_info *npar_info, u8 func_id) 747 { 748 int err; 749 dma_addr_t nic_dma_t; 750 const struct qlcnic_info_le *nic_info; 751 void *nic_info_addr; 752 struct qlcnic_cmd_args cmd; 753 size_t nic_size = sizeof(struct qlcnic_info_le); 754 755 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 756 &nic_dma_t, GFP_KERNEL); 757 if (!nic_info_addr) 758 return -ENOMEM; 759 memset(nic_info_addr, 0, nic_size); 760 761 nic_info = nic_info_addr; 762 763 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); 764 cmd.req.arg[1] = MSD(nic_dma_t); 765 cmd.req.arg[2] = LSD(nic_dma_t); 766 cmd.req.arg[3] = (func_id << 16 | nic_size); 767 err = qlcnic_issue_cmd(adapter, &cmd); 768 if (err != QLCNIC_RCODE_SUCCESS) { 769 dev_err(&adapter->pdev->dev, 770 "Failed to get nic info%d\n", err); 771 err = -EIO; 772 } else { 773 npar_info->pci_func = le16_to_cpu(nic_info->pci_func); 774 npar_info->op_mode = le16_to_cpu(nic_info->op_mode); 775 npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw); 776 npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw); 777 npar_info->phys_port = le16_to_cpu(nic_info->phys_port); 778 npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode); 779 npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques); 780 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); 781 npar_info->capabilities = le32_to_cpu(nic_info->capabilities); 782 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); 783 } 784 785 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, 786 nic_dma_t); 787 qlcnic_free_mbx_args(&cmd); 788 789 return err; 790 } 791 792 /* Configure a NIC partition */ 793 int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter, 794 struct qlcnic_info *nic) 795 { 796 int err = -EIO; 797 dma_addr_t nic_dma_t; 798 void *nic_info_addr; 799 struct qlcnic_cmd_args cmd; 800 struct qlcnic_info_le *nic_info; 801 size_t nic_size = sizeof(struct qlcnic_info_le); 802 803 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 804 return err; 805 806 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 807 &nic_dma_t, GFP_KERNEL); 808 if (!nic_info_addr) 809 return -ENOMEM; 810 811 memset(nic_info_addr, 0, nic_size); 812 nic_info = nic_info_addr; 813 814 nic_info->pci_func = cpu_to_le16(nic->pci_func); 815 nic_info->op_mode = cpu_to_le16(nic->op_mode); 816 nic_info->phys_port = cpu_to_le16(nic->phys_port); 817 nic_info->switch_mode = cpu_to_le16(nic->switch_mode); 818 nic_info->capabilities = cpu_to_le32(nic->capabilities); 819 nic_info->max_mac_filters = nic->max_mac_filters; 820 nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques); 821 nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques); 822 nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw); 823 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw); 824 825 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); 826 cmd.req.arg[1] = MSD(nic_dma_t); 827 cmd.req.arg[2] = LSD(nic_dma_t); 828 cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size); 829 err = qlcnic_issue_cmd(adapter, &cmd); 830 831 if (err != QLCNIC_RCODE_SUCCESS) { 832 dev_err(&adapter->pdev->dev, 833 "Failed to set nic info%d\n", err); 834 err = -EIO; 835 } 836 837 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, 838 nic_dma_t); 839 qlcnic_free_mbx_args(&cmd); 840 841 return err; 842 } 843 844 /* Get PCI Info of a partition */ 845 int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter, 846 struct qlcnic_pci_info *pci_info) 847 { 848 int err = 0, i; 849 struct qlcnic_cmd_args cmd; 850 dma_addr_t pci_info_dma_t; 851 struct qlcnic_pci_info_le *npar; 852 void *pci_info_addr; 853 size_t npar_size = sizeof(struct qlcnic_pci_info_le); 854 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; 855 856 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, 857 &pci_info_dma_t, GFP_KERNEL); 858 if (!pci_info_addr) 859 return -ENOMEM; 860 memset(pci_info_addr, 0, pci_size); 861 862 npar = pci_info_addr; 863 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); 864 cmd.req.arg[1] = MSD(pci_info_dma_t); 865 cmd.req.arg[2] = LSD(pci_info_dma_t); 866 cmd.req.arg[3] = pci_size; 867 err = qlcnic_issue_cmd(adapter, &cmd); 868 869 adapter->ahw->act_pci_func = 0; 870 if (err == QLCNIC_RCODE_SUCCESS) { 871 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) { 872 pci_info->id = le16_to_cpu(npar->id); 873 pci_info->active = le16_to_cpu(npar->active); 874 pci_info->type = le16_to_cpu(npar->type); 875 if (pci_info->type == QLCNIC_TYPE_NIC) 876 adapter->ahw->act_pci_func++; 877 pci_info->default_port = 878 le16_to_cpu(npar->default_port); 879 pci_info->tx_min_bw = 880 le16_to_cpu(npar->tx_min_bw); 881 pci_info->tx_max_bw = 882 le16_to_cpu(npar->tx_max_bw); 883 memcpy(pci_info->mac, npar->mac, ETH_ALEN); 884 } 885 } else { 886 dev_err(&adapter->pdev->dev, 887 "Failed to get PCI Info%d\n", err); 888 err = -EIO; 889 } 890 891 dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr, 892 pci_info_dma_t); 893 qlcnic_free_mbx_args(&cmd); 894 895 return err; 896 } 897 898 /* Configure eSwitch for port mirroring */ 899 int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, 900 u8 enable_mirroring, u8 pci_func) 901 { 902 int err = -EIO; 903 u32 arg1; 904 struct qlcnic_cmd_args cmd; 905 906 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC || 907 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) 908 return err; 909 910 arg1 = id | (enable_mirroring ? BIT_4 : 0); 911 arg1 |= pci_func << 8; 912 913 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORTMIRRORING); 914 cmd.req.arg[1] = arg1; 915 err = qlcnic_issue_cmd(adapter, &cmd); 916 917 if (err != QLCNIC_RCODE_SUCCESS) 918 dev_err(&adapter->pdev->dev, 919 "Failed to configure port mirroring%d on eswitch:%d\n", 920 pci_func, id); 921 else 922 dev_info(&adapter->pdev->dev, 923 "Configured eSwitch %d for port mirroring:%d\n", 924 id, pci_func); 925 qlcnic_free_mbx_args(&cmd); 926 927 return err; 928 } 929 930 int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, 931 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { 932 933 size_t stats_size = sizeof(struct qlcnic_esw_stats_le); 934 struct qlcnic_esw_stats_le *stats; 935 dma_addr_t stats_dma_t; 936 void *stats_addr; 937 u32 arg1; 938 struct qlcnic_cmd_args cmd; 939 int err; 940 941 if (esw_stats == NULL) 942 return -ENOMEM; 943 944 if ((adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) && 945 (func != adapter->ahw->pci_func)) { 946 dev_err(&adapter->pdev->dev, 947 "Not privilege to query stats for func=%d", func); 948 return -EIO; 949 } 950 951 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, 952 &stats_dma_t, GFP_KERNEL); 953 if (!stats_addr) { 954 dev_err(&adapter->pdev->dev, "Unable to allocate memory\n"); 955 return -ENOMEM; 956 } 957 memset(stats_addr, 0, stats_size); 958 959 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; 960 arg1 |= rx_tx << 15 | stats_size << 16; 961 962 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS); 963 cmd.req.arg[1] = arg1; 964 cmd.req.arg[2] = MSD(stats_dma_t); 965 cmd.req.arg[3] = LSD(stats_dma_t); 966 err = qlcnic_issue_cmd(adapter, &cmd); 967 968 if (!err) { 969 stats = stats_addr; 970 esw_stats->context_id = le16_to_cpu(stats->context_id); 971 esw_stats->version = le16_to_cpu(stats->version); 972 esw_stats->size = le16_to_cpu(stats->size); 973 esw_stats->multicast_frames = 974 le64_to_cpu(stats->multicast_frames); 975 esw_stats->broadcast_frames = 976 le64_to_cpu(stats->broadcast_frames); 977 esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames); 978 esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames); 979 esw_stats->local_frames = le64_to_cpu(stats->local_frames); 980 esw_stats->errors = le64_to_cpu(stats->errors); 981 esw_stats->numbytes = le64_to_cpu(stats->numbytes); 982 } 983 984 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, 985 stats_dma_t); 986 qlcnic_free_mbx_args(&cmd); 987 988 return err; 989 } 990 991 /* This routine will retrieve the MAC statistics from firmware */ 992 int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, 993 struct qlcnic_mac_statistics *mac_stats) 994 { 995 struct qlcnic_mac_statistics_le *stats; 996 struct qlcnic_cmd_args cmd; 997 size_t stats_size = sizeof(struct qlcnic_mac_statistics_le); 998 dma_addr_t stats_dma_t; 999 void *stats_addr; 1000 int err; 1001 1002 if (mac_stats == NULL) 1003 return -ENOMEM; 1004 1005 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, 1006 &stats_dma_t, GFP_KERNEL); 1007 if (!stats_addr) { 1008 dev_err(&adapter->pdev->dev, 1009 "%s: Unable to allocate memory.\n", __func__); 1010 return -ENOMEM; 1011 } 1012 memset(stats_addr, 0, stats_size); 1013 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS); 1014 cmd.req.arg[1] = stats_size << 16; 1015 cmd.req.arg[2] = MSD(stats_dma_t); 1016 cmd.req.arg[3] = LSD(stats_dma_t); 1017 err = qlcnic_issue_cmd(adapter, &cmd); 1018 if (!err) { 1019 stats = stats_addr; 1020 mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames); 1021 mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes); 1022 mac_stats->mac_tx_mcast_pkts = 1023 le64_to_cpu(stats->mac_tx_mcast_pkts); 1024 mac_stats->mac_tx_bcast_pkts = 1025 le64_to_cpu(stats->mac_tx_bcast_pkts); 1026 mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames); 1027 mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes); 1028 mac_stats->mac_rx_mcast_pkts = 1029 le64_to_cpu(stats->mac_rx_mcast_pkts); 1030 mac_stats->mac_rx_length_error = 1031 le64_to_cpu(stats->mac_rx_length_error); 1032 mac_stats->mac_rx_length_small = 1033 le64_to_cpu(stats->mac_rx_length_small); 1034 mac_stats->mac_rx_length_large = 1035 le64_to_cpu(stats->mac_rx_length_large); 1036 mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber); 1037 mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped); 1038 mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error); 1039 } else { 1040 dev_err(&adapter->pdev->dev, 1041 "%s: Get mac stats failed, err=%d.\n", __func__, err); 1042 } 1043 1044 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, 1045 stats_dma_t); 1046 1047 qlcnic_free_mbx_args(&cmd); 1048 1049 return err; 1050 } 1051 1052 int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch, 1053 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { 1054 1055 struct __qlcnic_esw_statistics port_stats; 1056 u8 i; 1057 int ret = -EIO; 1058 1059 if (esw_stats == NULL) 1060 return -ENOMEM; 1061 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 1062 return -EIO; 1063 if (adapter->npars == NULL) 1064 return -EIO; 1065 1066 memset(esw_stats, 0, sizeof(u64)); 1067 esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL; 1068 esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL; 1069 esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL; 1070 esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL; 1071 esw_stats->errors = QLCNIC_STATS_NOT_AVAIL; 1072 esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL; 1073 esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL; 1074 esw_stats->context_id = eswitch; 1075 1076 for (i = 0; i < adapter->ahw->act_pci_func; i++) { 1077 if (adapter->npars[i].phy_port != eswitch) 1078 continue; 1079 1080 memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics)); 1081 if (qlcnic_get_port_stats(adapter, adapter->npars[i].pci_func, 1082 rx_tx, &port_stats)) 1083 continue; 1084 1085 esw_stats->size = port_stats.size; 1086 esw_stats->version = port_stats.version; 1087 QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames, 1088 port_stats.unicast_frames); 1089 QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames, 1090 port_stats.multicast_frames); 1091 QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames, 1092 port_stats.broadcast_frames); 1093 QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames, 1094 port_stats.dropped_frames); 1095 QLCNIC_ADD_ESW_STATS(esw_stats->errors, 1096 port_stats.errors); 1097 QLCNIC_ADD_ESW_STATS(esw_stats->local_frames, 1098 port_stats.local_frames); 1099 QLCNIC_ADD_ESW_STATS(esw_stats->numbytes, 1100 port_stats.numbytes); 1101 ret = 0; 1102 } 1103 return ret; 1104 } 1105 1106 int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw, 1107 const u8 port, const u8 rx_tx) 1108 { 1109 int err; 1110 u32 arg1; 1111 struct qlcnic_cmd_args cmd; 1112 1113 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 1114 return -EIO; 1115 1116 if (func_esw == QLCNIC_STATS_PORT) { 1117 if (port >= QLCNIC_MAX_PCI_FUNC) 1118 goto err_ret; 1119 } else if (func_esw == QLCNIC_STATS_ESWITCH) { 1120 if (port >= QLCNIC_NIU_MAX_XG_PORTS) 1121 goto err_ret; 1122 } else { 1123 goto err_ret; 1124 } 1125 1126 if (rx_tx > QLCNIC_QUERY_TX_COUNTER) 1127 goto err_ret; 1128 1129 arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12; 1130 arg1 |= BIT_14 | rx_tx << 15; 1131 1132 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS); 1133 cmd.req.arg[1] = arg1; 1134 err = qlcnic_issue_cmd(adapter, &cmd); 1135 qlcnic_free_mbx_args(&cmd); 1136 return err; 1137 1138 err_ret: 1139 dev_err(&adapter->pdev->dev, 1140 "Invalid args func_esw %d port %d rx_ctx %d\n", 1141 func_esw, port, rx_tx); 1142 return -EIO; 1143 } 1144 1145 static int 1146 __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, 1147 u32 *arg1, u32 *arg2) 1148 { 1149 int err = -EIO; 1150 struct qlcnic_cmd_args cmd; 1151 u8 pci_func; 1152 pci_func = (*arg1 >> 8); 1153 1154 qlcnic_alloc_mbx_args(&cmd, adapter, 1155 QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG); 1156 cmd.req.arg[1] = *arg1; 1157 err = qlcnic_issue_cmd(adapter, &cmd); 1158 *arg1 = cmd.rsp.arg[1]; 1159 *arg2 = cmd.rsp.arg[2]; 1160 qlcnic_free_mbx_args(&cmd); 1161 1162 if (err == QLCNIC_RCODE_SUCCESS) 1163 dev_info(&adapter->pdev->dev, 1164 "eSwitch port config for pci func %d\n", pci_func); 1165 else 1166 dev_err(&adapter->pdev->dev, 1167 "Failed to get eswitch port config for pci func %d\n", 1168 pci_func); 1169 return err; 1170 } 1171 /* Configure eSwitch port 1172 op_mode = 0 for setting default port behavior 1173 op_mode = 1 for setting vlan id 1174 op_mode = 2 for deleting vlan id 1175 op_type = 0 for vlan_id 1176 op_type = 1 for port vlan_id 1177 */ 1178 int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, 1179 struct qlcnic_esw_func_cfg *esw_cfg) 1180 { 1181 int err = -EIO, index; 1182 u32 arg1, arg2 = 0; 1183 struct qlcnic_cmd_args cmd; 1184 u8 pci_func; 1185 1186 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 1187 return err; 1188 pci_func = esw_cfg->pci_func; 1189 index = qlcnic_is_valid_nic_func(adapter, pci_func); 1190 if (index < 0) 1191 return err; 1192 arg1 = (adapter->npars[index].phy_port & BIT_0); 1193 arg1 |= (pci_func << 8); 1194 1195 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) 1196 return err; 1197 arg1 &= ~(0x0ff << 8); 1198 arg1 |= (pci_func << 8); 1199 arg1 &= ~(BIT_2 | BIT_3); 1200 switch (esw_cfg->op_mode) { 1201 case QLCNIC_PORT_DEFAULTS: 1202 arg1 |= (BIT_4 | BIT_6 | BIT_7); 1203 arg2 |= (BIT_0 | BIT_1); 1204 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) 1205 arg2 |= (BIT_2 | BIT_3); 1206 if (!(esw_cfg->discard_tagged)) 1207 arg1 &= ~BIT_4; 1208 if (!(esw_cfg->promisc_mode)) 1209 arg1 &= ~BIT_6; 1210 if (!(esw_cfg->mac_override)) 1211 arg1 &= ~BIT_7; 1212 if (!(esw_cfg->mac_anti_spoof)) 1213 arg2 &= ~BIT_0; 1214 if (!(esw_cfg->offload_flags & BIT_0)) 1215 arg2 &= ~(BIT_1 | BIT_2 | BIT_3); 1216 if (!(esw_cfg->offload_flags & BIT_1)) 1217 arg2 &= ~BIT_2; 1218 if (!(esw_cfg->offload_flags & BIT_2)) 1219 arg2 &= ~BIT_3; 1220 break; 1221 case QLCNIC_ADD_VLAN: 1222 arg1 |= (BIT_2 | BIT_5); 1223 arg1 |= (esw_cfg->vlan_id << 16); 1224 break; 1225 case QLCNIC_DEL_VLAN: 1226 arg1 |= (BIT_3 | BIT_5); 1227 arg1 &= ~(0x0ffff << 16); 1228 break; 1229 default: 1230 return err; 1231 } 1232 1233 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_ESWITCH); 1234 cmd.req.arg[1] = arg1; 1235 cmd.req.arg[2] = arg2; 1236 err = qlcnic_issue_cmd(adapter, &cmd); 1237 qlcnic_free_mbx_args(&cmd); 1238 1239 if (err != QLCNIC_RCODE_SUCCESS) 1240 dev_err(&adapter->pdev->dev, 1241 "Failed to configure eswitch pci func %d\n", pci_func); 1242 else 1243 dev_info(&adapter->pdev->dev, 1244 "Configured eSwitch for pci func %d\n", pci_func); 1245 1246 return err; 1247 } 1248 1249 int 1250 qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, 1251 struct qlcnic_esw_func_cfg *esw_cfg) 1252 { 1253 u32 arg1, arg2; 1254 int index; 1255 u8 phy_port; 1256 1257 if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) { 1258 index = qlcnic_is_valid_nic_func(adapter, esw_cfg->pci_func); 1259 if (index < 0) 1260 return -EIO; 1261 phy_port = adapter->npars[index].phy_port; 1262 } else { 1263 phy_port = adapter->ahw->physical_port; 1264 } 1265 arg1 = phy_port; 1266 arg1 |= (esw_cfg->pci_func << 8); 1267 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) 1268 return -EIO; 1269 1270 esw_cfg->discard_tagged = !!(arg1 & BIT_4); 1271 esw_cfg->host_vlan_tag = !!(arg1 & BIT_5); 1272 esw_cfg->promisc_mode = !!(arg1 & BIT_6); 1273 esw_cfg->mac_override = !!(arg1 & BIT_7); 1274 esw_cfg->vlan_id = LSW(arg1 >> 16); 1275 esw_cfg->mac_anti_spoof = (arg2 & 0x1); 1276 esw_cfg->offload_flags = ((arg2 >> 1) & 0x7); 1277 1278 return 0; 1279 } 1280