1 /* 2 * QLogic qlcnic NIC Driver 3 * Copyright (c) 2009-2013 QLogic Corporation 4 * 5 * See LICENSE.qlcnic for copyright and licensing details. 6 */ 7 8 #include "qlcnic.h" 9 10 static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = { 11 {QLCNIC_CMD_CREATE_RX_CTX, 4, 1}, 12 {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1}, 13 {QLCNIC_CMD_CREATE_TX_CTX, 4, 1}, 14 {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1}, 15 {QLCNIC_CMD_INTRPT_TEST, 4, 1}, 16 {QLCNIC_CMD_SET_MTU, 4, 1}, 17 {QLCNIC_CMD_READ_PHY, 4, 2}, 18 {QLCNIC_CMD_WRITE_PHY, 5, 1}, 19 {QLCNIC_CMD_READ_HW_REG, 4, 1}, 20 {QLCNIC_CMD_GET_FLOW_CTL, 4, 2}, 21 {QLCNIC_CMD_SET_FLOW_CTL, 4, 1}, 22 {QLCNIC_CMD_READ_MAX_MTU, 4, 2}, 23 {QLCNIC_CMD_READ_MAX_LRO, 4, 2}, 24 {QLCNIC_CMD_MAC_ADDRESS, 4, 3}, 25 {QLCNIC_CMD_GET_PCI_INFO, 4, 1}, 26 {QLCNIC_CMD_GET_NIC_INFO, 4, 1}, 27 {QLCNIC_CMD_SET_NIC_INFO, 4, 1}, 28 {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3}, 29 {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1}, 30 {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3}, 31 {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1}, 32 {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1}, 33 {QLCNIC_CMD_GET_MAC_STATS, 4, 1}, 34 {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3}, 35 {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1}, 36 {QLCNIC_CMD_CONFIG_PORT, 4, 1}, 37 {QLCNIC_CMD_TEMP_SIZE, 4, 4}, 38 {QLCNIC_CMD_GET_TEMP_HDR, 4, 1}, 39 {QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1}, 40 {QLCNIC_CMD_GET_LED_STATUS, 4, 2}, 41 {QLCNIC_CMD_MQ_TX_CONFIG_INTR, 2, 3}, 42 {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2}, 43 {QLCNIC_CMD_DCB_QUERY_PARAM, 4, 1}, 44 }; 45 46 static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw) 47 { 48 return (ahw->pci_func & 0xff) | ((ahw->fw_hal_version & 0xff) << 8) | 49 (0xcafe << 16); 50 } 51 52 /* Allocate mailbox registers */ 53 int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, 54 struct qlcnic_adapter *adapter, u32 type) 55 { 56 int i, size; 57 const struct qlcnic_mailbox_metadata *mbx_tbl; 58 59 mbx_tbl = qlcnic_mbx_tbl; 60 size = ARRAY_SIZE(qlcnic_mbx_tbl); 61 for (i = 0; i < size; i++) { 62 if (type == mbx_tbl[i].cmd) { 63 mbx->req.num = mbx_tbl[i].in_args; 64 mbx->rsp.num = mbx_tbl[i].out_args; 65 mbx->req.arg = kcalloc(mbx->req.num, 66 sizeof(u32), GFP_ATOMIC); 67 if (!mbx->req.arg) 68 return -ENOMEM; 69 mbx->rsp.arg = kcalloc(mbx->rsp.num, 70 sizeof(u32), GFP_ATOMIC); 71 if (!mbx->rsp.arg) { 72 kfree(mbx->req.arg); 73 mbx->req.arg = NULL; 74 return -ENOMEM; 75 } 76 memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num); 77 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); 78 mbx->req.arg[0] = type; 79 break; 80 } 81 } 82 return 0; 83 } 84 85 /* Free up mailbox registers */ 86 void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd) 87 { 88 kfree(cmd->req.arg); 89 cmd->req.arg = NULL; 90 kfree(cmd->rsp.arg); 91 cmd->rsp.arg = NULL; 92 } 93 94 static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func) 95 { 96 int i; 97 98 for (i = 0; i < adapter->ahw->act_pci_func; i++) { 99 if (adapter->npars[i].pci_func == pci_func) 100 return i; 101 } 102 103 return -1; 104 } 105 106 static u32 107 qlcnic_poll_rsp(struct qlcnic_adapter *adapter) 108 { 109 u32 rsp; 110 int timeout = 0, err = 0; 111 112 do { 113 /* give atleast 1ms for firmware to respond */ 114 mdelay(1); 115 116 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT) 117 return QLCNIC_CDRP_RSP_TIMEOUT; 118 119 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET, &err); 120 } while (!QLCNIC_CDRP_IS_RSP(rsp)); 121 122 return rsp; 123 } 124 125 int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, 126 struct qlcnic_cmd_args *cmd) 127 { 128 int i, err = 0; 129 u32 rsp; 130 u32 signature; 131 struct pci_dev *pdev = adapter->pdev; 132 struct qlcnic_hardware_context *ahw = adapter->ahw; 133 const char *fmt; 134 135 signature = qlcnic_get_cmd_signature(ahw); 136 137 /* Acquire semaphore before accessing CRB */ 138 if (qlcnic_api_lock(adapter)) { 139 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; 140 return cmd->rsp.arg[0]; 141 } 142 143 QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature); 144 for (i = 1; i < QLCNIC_CDRP_MAX_ARGS; i++) 145 QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]); 146 QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, 147 QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0])); 148 rsp = qlcnic_poll_rsp(adapter); 149 150 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) { 151 dev_err(&pdev->dev, "card response timeout.\n"); 152 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; 153 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) { 154 cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err); 155 switch (cmd->rsp.arg[0]) { 156 case QLCNIC_RCODE_INVALID_ARGS: 157 fmt = "CDRP invalid args: [%d]\n"; 158 break; 159 case QLCNIC_RCODE_NOT_SUPPORTED: 160 case QLCNIC_RCODE_NOT_IMPL: 161 fmt = "CDRP command not supported: [%d]\n"; 162 break; 163 case QLCNIC_RCODE_NOT_PERMITTED: 164 fmt = "CDRP requested action not permitted: [%d]\n"; 165 break; 166 case QLCNIC_RCODE_INVALID: 167 fmt = "CDRP invalid or unknown cmd received: [%d]\n"; 168 break; 169 case QLCNIC_RCODE_TIMEOUT: 170 fmt = "CDRP command timeout: [%d]\n"; 171 break; 172 default: 173 fmt = "CDRP command failed: [%d]\n"; 174 break; 175 } 176 dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]); 177 qlcnic_dump_mbx(adapter, cmd); 178 } else if (rsp == QLCNIC_CDRP_RSP_OK) 179 cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS; 180 181 for (i = 1; i < cmd->rsp.num; i++) 182 cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i), &err); 183 184 /* Release semaphore */ 185 qlcnic_api_unlock(adapter); 186 return cmd->rsp.arg[0]; 187 } 188 189 int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter, u32 fw_cmd) 190 { 191 struct qlcnic_cmd_args cmd; 192 u32 arg1, arg2, arg3; 193 char drv_string[12]; 194 int err = 0; 195 196 memset(drv_string, 0, sizeof(drv_string)); 197 snprintf(drv_string, sizeof(drv_string), "%d"".""%d"".""%d", 198 _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR, 199 _QLCNIC_LINUX_SUBVERSION); 200 201 err = qlcnic_alloc_mbx_args(&cmd, adapter, fw_cmd); 202 if (err) 203 return err; 204 205 memcpy(&arg1, drv_string, sizeof(u32)); 206 memcpy(&arg2, drv_string + 4, sizeof(u32)); 207 memcpy(&arg3, drv_string + 8, sizeof(u32)); 208 209 cmd.req.arg[1] = arg1; 210 cmd.req.arg[2] = arg2; 211 cmd.req.arg[3] = arg3; 212 213 err = qlcnic_issue_cmd(adapter, &cmd); 214 if (err) { 215 dev_info(&adapter->pdev->dev, 216 "Failed to set driver version in firmware\n"); 217 err = -EIO; 218 } 219 qlcnic_free_mbx_args(&cmd); 220 return err; 221 } 222 223 int 224 qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu) 225 { 226 int err = 0; 227 struct qlcnic_cmd_args cmd; 228 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 229 230 if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE) 231 return err; 232 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU); 233 if (err) 234 return err; 235 236 cmd.req.arg[1] = recv_ctx->context_id; 237 cmd.req.arg[2] = mtu; 238 239 err = qlcnic_issue_cmd(adapter, &cmd); 240 if (err) { 241 dev_err(&adapter->pdev->dev, "Failed to set mtu\n"); 242 err = -EIO; 243 } 244 qlcnic_free_mbx_args(&cmd); 245 return err; 246 } 247 248 int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) 249 { 250 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 251 struct qlcnic_hardware_context *ahw = adapter->ahw; 252 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; 253 struct net_device *netdev = adapter->netdev; 254 u32 temp_intr_crb_mode, temp_rds_crb_mode; 255 struct qlcnic_cardrsp_rds_ring *prsp_rds; 256 struct qlcnic_cardrsp_sds_ring *prsp_sds; 257 struct qlcnic_hostrq_rds_ring *prq_rds; 258 struct qlcnic_hostrq_sds_ring *prq_sds; 259 struct qlcnic_host_rds_ring *rds_ring; 260 struct qlcnic_host_sds_ring *sds_ring; 261 struct qlcnic_cardrsp_rx_ctx *prsp; 262 struct qlcnic_hostrq_rx_ctx *prq; 263 u8 i, nrds_rings, nsds_rings; 264 struct qlcnic_cmd_args cmd; 265 size_t rq_size, rsp_size; 266 u32 cap, reg, val, reg2; 267 u64 phys_addr; 268 u16 temp_u16; 269 void *addr; 270 int err; 271 272 nrds_rings = adapter->max_rds_rings; 273 nsds_rings = adapter->drv_sds_rings; 274 275 rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings, 276 nsds_rings); 277 rsp_size = SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings, 278 nsds_rings); 279 280 addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, 281 &hostrq_phys_addr, GFP_KERNEL); 282 if (addr == NULL) 283 return -ENOMEM; 284 prq = addr; 285 286 addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, 287 &cardrsp_phys_addr, GFP_KERNEL); 288 if (addr == NULL) { 289 err = -ENOMEM; 290 goto out_free_rq; 291 } 292 prsp = addr; 293 294 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); 295 296 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN 297 | QLCNIC_CAP0_VALIDOFF); 298 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); 299 300 if (qlcnic_check_multi_tx(adapter) && 301 !adapter->ahw->diag_test) { 302 cap |= QLCNIC_CAP0_TX_MULTI; 303 } else { 304 temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler); 305 prq->valid_field_offset = cpu_to_le16(temp_u16); 306 prq->txrx_sds_binding = nsds_rings - 1; 307 temp_intr_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED; 308 prq->host_int_crb_mode = cpu_to_le32(temp_intr_crb_mode); 309 temp_rds_crb_mode = QLCNIC_HOST_RDS_CRB_MODE_UNIQUE; 310 prq->host_rds_crb_mode = cpu_to_le32(temp_rds_crb_mode); 311 } 312 313 prq->capabilities[0] = cpu_to_le32(cap); 314 315 prq->num_rds_rings = cpu_to_le16(nrds_rings); 316 prq->num_sds_rings = cpu_to_le16(nsds_rings); 317 prq->rds_ring_offset = 0; 318 319 val = le32_to_cpu(prq->rds_ring_offset) + 320 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings); 321 prq->sds_ring_offset = cpu_to_le32(val); 322 323 prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data + 324 le32_to_cpu(prq->rds_ring_offset)); 325 326 for (i = 0; i < nrds_rings; i++) { 327 rds_ring = &recv_ctx->rds_rings[i]; 328 rds_ring->producer = 0; 329 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); 330 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); 331 prq_rds[i].ring_kind = cpu_to_le32(i); 332 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); 333 } 334 335 prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data + 336 le32_to_cpu(prq->sds_ring_offset)); 337 338 for (i = 0; i < nsds_rings; i++) { 339 sds_ring = &recv_ctx->sds_rings[i]; 340 sds_ring->consumer = 0; 341 memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring)); 342 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); 343 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); 344 if (qlcnic_check_multi_tx(adapter) && 345 !adapter->ahw->diag_test) 346 prq_sds[i].msi_index = cpu_to_le16(ahw->intr_tbl[i].id); 347 else 348 prq_sds[i].msi_index = cpu_to_le16(i); 349 } 350 351 phys_addr = hostrq_phys_addr; 352 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX); 353 if (err) 354 goto out_free_rsp; 355 356 cmd.req.arg[1] = MSD(phys_addr); 357 cmd.req.arg[2] = LSD(phys_addr); 358 cmd.req.arg[3] = rq_size; 359 err = qlcnic_issue_cmd(adapter, &cmd); 360 if (err) { 361 dev_err(&adapter->pdev->dev, 362 "Failed to create rx ctx in firmware%d\n", err); 363 goto out_free_rsp; 364 } 365 366 prsp_rds = ((struct qlcnic_cardrsp_rds_ring *) 367 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]); 368 369 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { 370 rds_ring = &recv_ctx->rds_rings[i]; 371 reg = le32_to_cpu(prsp_rds[i].host_producer_crb); 372 rds_ring->crb_rcv_producer = ahw->pci_base0 + reg; 373 } 374 375 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) 376 &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); 377 378 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { 379 sds_ring = &recv_ctx->sds_rings[i]; 380 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); 381 if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) 382 reg2 = ahw->intr_tbl[i].src; 383 else 384 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); 385 386 sds_ring->crb_intr_mask = ahw->pci_base0 + reg2; 387 sds_ring->crb_sts_consumer = ahw->pci_base0 + reg; 388 } 389 390 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); 391 recv_ctx->context_id = le16_to_cpu(prsp->context_id); 392 recv_ctx->virt_port = prsp->virt_port; 393 394 netdev_info(netdev, "Rx Context[%d] Created, state 0x%x\n", 395 recv_ctx->context_id, recv_ctx->state); 396 qlcnic_free_mbx_args(&cmd); 397 398 out_free_rsp: 399 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp, 400 cardrsp_phys_addr); 401 out_free_rq: 402 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr); 403 404 return err; 405 } 406 407 void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter) 408 { 409 int err; 410 struct qlcnic_cmd_args cmd; 411 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 412 413 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX); 414 if (err) 415 return; 416 417 cmd.req.arg[1] = recv_ctx->context_id; 418 err = qlcnic_issue_cmd(adapter, &cmd); 419 if (err) 420 dev_err(&adapter->pdev->dev, 421 "Failed to destroy rx ctx in firmware\n"); 422 423 recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED; 424 qlcnic_free_mbx_args(&cmd); 425 } 426 427 int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, 428 struct qlcnic_host_tx_ring *tx_ring, 429 int ring) 430 { 431 struct qlcnic_hardware_context *ahw = adapter->ahw; 432 struct net_device *netdev = adapter->netdev; 433 struct qlcnic_hostrq_tx_ctx *prq; 434 struct qlcnic_hostrq_cds_ring *prq_cds; 435 struct qlcnic_cardrsp_tx_ctx *prsp; 436 struct qlcnic_cmd_args cmd; 437 u32 temp, intr_mask, temp_int_crb_mode; 438 dma_addr_t rq_phys_addr, rsp_phys_addr; 439 int temp_nsds_rings, index, err; 440 void *rq_addr, *rsp_addr; 441 size_t rq_size, rsp_size; 442 u64 phys_addr; 443 u16 msix_id; 444 445 /* reset host resources */ 446 tx_ring->producer = 0; 447 tx_ring->sw_consumer = 0; 448 *(tx_ring->hw_consumer) = 0; 449 450 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); 451 rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size, 452 &rq_phys_addr, GFP_KERNEL); 453 if (!rq_addr) 454 return -ENOMEM; 455 456 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); 457 rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size, 458 &rsp_phys_addr, GFP_KERNEL); 459 if (!rsp_addr) { 460 err = -ENOMEM; 461 goto out_free_rq; 462 } 463 464 prq = rq_addr; 465 prsp = rsp_addr; 466 467 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); 468 469 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN | 470 QLCNIC_CAP0_LSO); 471 if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) 472 temp |= QLCNIC_CAP0_TX_MULTI; 473 474 prq->capabilities[0] = cpu_to_le32(temp); 475 476 if (qlcnic_check_multi_tx(adapter) && 477 !adapter->ahw->diag_test) { 478 temp_nsds_rings = adapter->drv_sds_rings; 479 index = temp_nsds_rings + ring; 480 msix_id = ahw->intr_tbl[index].id; 481 prq->msi_index = cpu_to_le16(msix_id); 482 } else { 483 temp_int_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED; 484 prq->host_int_crb_mode = cpu_to_le32(temp_int_crb_mode); 485 prq->msi_index = 0; 486 } 487 488 prq->interrupt_ctl = 0; 489 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr); 490 491 prq_cds = &prq->cds_ring; 492 493 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); 494 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); 495 496 phys_addr = rq_phys_addr; 497 498 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); 499 if (err) 500 goto out_free_rsp; 501 502 cmd.req.arg[1] = MSD(phys_addr); 503 cmd.req.arg[2] = LSD(phys_addr); 504 cmd.req.arg[3] = rq_size; 505 err = qlcnic_issue_cmd(adapter, &cmd); 506 507 if (err == QLCNIC_RCODE_SUCCESS) { 508 tx_ring->state = le32_to_cpu(prsp->host_ctx_state); 509 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); 510 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp; 511 tx_ring->ctx_id = le16_to_cpu(prsp->context_id); 512 if (qlcnic_check_multi_tx(adapter) && 513 !adapter->ahw->diag_test && 514 (adapter->flags & QLCNIC_MSIX_ENABLED)) { 515 index = adapter->drv_sds_rings + ring; 516 intr_mask = ahw->intr_tbl[index].src; 517 tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask; 518 } 519 520 netdev_info(netdev, "Tx Context[0x%x] Created, state 0x%x\n", 521 tx_ring->ctx_id, tx_ring->state); 522 } else { 523 netdev_err(netdev, "Failed to create tx ctx in firmware%d\n", 524 err); 525 err = -EIO; 526 } 527 qlcnic_free_mbx_args(&cmd); 528 529 out_free_rsp: 530 dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr, 531 rsp_phys_addr); 532 out_free_rq: 533 dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr); 534 535 return err; 536 } 537 538 void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter, 539 struct qlcnic_host_tx_ring *tx_ring) 540 { 541 struct qlcnic_cmd_args cmd; 542 int ret; 543 544 ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX); 545 if (ret) 546 return; 547 548 cmd.req.arg[1] = tx_ring->ctx_id; 549 if (qlcnic_issue_cmd(adapter, &cmd)) 550 dev_err(&adapter->pdev->dev, 551 "Failed to destroy tx ctx in firmware\n"); 552 qlcnic_free_mbx_args(&cmd); 553 } 554 555 int 556 qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config) 557 { 558 int err; 559 struct qlcnic_cmd_args cmd; 560 561 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT); 562 if (err) 563 return err; 564 565 cmd.req.arg[1] = config; 566 err = qlcnic_issue_cmd(adapter, &cmd); 567 qlcnic_free_mbx_args(&cmd); 568 return err; 569 } 570 571 int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) 572 { 573 void *addr; 574 int err, ring; 575 struct qlcnic_recv_context *recv_ctx; 576 struct qlcnic_host_rds_ring *rds_ring; 577 struct qlcnic_host_sds_ring *sds_ring; 578 struct qlcnic_host_tx_ring *tx_ring; 579 __le32 *ptr; 580 581 struct pci_dev *pdev = adapter->pdev; 582 583 recv_ctx = adapter->recv_ctx; 584 585 for (ring = 0; ring < adapter->drv_tx_rings; ring++) { 586 tx_ring = &adapter->tx_ring[ring]; 587 ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32), 588 &tx_ring->hw_cons_phys_addr, 589 GFP_KERNEL); 590 if (ptr == NULL) 591 return -ENOMEM; 592 593 tx_ring->hw_consumer = ptr; 594 /* cmd desc ring */ 595 addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring), 596 &tx_ring->phys_addr, 597 GFP_KERNEL); 598 if (addr == NULL) { 599 err = -ENOMEM; 600 goto err_out_free; 601 } 602 603 tx_ring->desc_head = addr; 604 } 605 606 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 607 rds_ring = &recv_ctx->rds_rings[ring]; 608 addr = dma_alloc_coherent(&adapter->pdev->dev, 609 RCV_DESC_RINGSIZE(rds_ring), 610 &rds_ring->phys_addr, GFP_KERNEL); 611 if (addr == NULL) { 612 err = -ENOMEM; 613 goto err_out_free; 614 } 615 rds_ring->desc_head = addr; 616 617 } 618 619 for (ring = 0; ring < adapter->drv_sds_rings; ring++) { 620 sds_ring = &recv_ctx->sds_rings[ring]; 621 622 addr = dma_alloc_coherent(&adapter->pdev->dev, 623 STATUS_DESC_RINGSIZE(sds_ring), 624 &sds_ring->phys_addr, GFP_KERNEL); 625 if (addr == NULL) { 626 err = -ENOMEM; 627 goto err_out_free; 628 } 629 sds_ring->desc_head = addr; 630 } 631 632 return 0; 633 634 err_out_free: 635 qlcnic_free_hw_resources(adapter); 636 return err; 637 } 638 639 int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev) 640 { 641 int i, err, ring; 642 643 if (dev->flags & QLCNIC_NEED_FLR) { 644 pci_reset_function(dev->pdev); 645 dev->flags &= ~QLCNIC_NEED_FLR; 646 } 647 648 if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) { 649 if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) { 650 err = qlcnic_83xx_config_intrpt(dev, 1); 651 if (err) 652 return err; 653 } 654 } 655 656 if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) && 657 qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) { 658 err = qlcnic_82xx_mq_intrpt(dev, 1); 659 if (err) 660 return err; 661 } 662 663 err = qlcnic_fw_cmd_create_rx_ctx(dev); 664 if (err) 665 goto err_out; 666 667 for (ring = 0; ring < dev->drv_tx_rings; ring++) { 668 err = qlcnic_fw_cmd_create_tx_ctx(dev, 669 &dev->tx_ring[ring], 670 ring); 671 if (err) { 672 qlcnic_fw_cmd_del_rx_ctx(dev); 673 if (ring == 0) 674 goto err_out; 675 676 for (i = 0; i < ring; i++) 677 qlcnic_fw_cmd_del_tx_ctx(dev, &dev->tx_ring[i]); 678 679 goto err_out; 680 } 681 } 682 683 set_bit(__QLCNIC_FW_ATTACHED, &dev->state); 684 685 return 0; 686 687 err_out: 688 if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) && 689 qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) 690 qlcnic_82xx_config_intrpt(dev, 0); 691 692 if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) { 693 if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) 694 qlcnic_83xx_config_intrpt(dev, 0); 695 } 696 697 return err; 698 } 699 700 void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) 701 { 702 int ring; 703 704 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) { 705 qlcnic_fw_cmd_del_rx_ctx(adapter); 706 for (ring = 0; ring < adapter->drv_tx_rings; ring++) 707 qlcnic_fw_cmd_del_tx_ctx(adapter, 708 &adapter->tx_ring[ring]); 709 710 if (qlcnic_82xx_check(adapter) && 711 (adapter->flags & QLCNIC_MSIX_ENABLED) && 712 qlcnic_check_multi_tx(adapter) && 713 !adapter->ahw->diag_test) 714 qlcnic_82xx_config_intrpt(adapter, 0); 715 716 if (qlcnic_83xx_check(adapter) && 717 (adapter->flags & QLCNIC_MSIX_ENABLED)) { 718 if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) 719 qlcnic_83xx_config_intrpt(adapter, 0); 720 } 721 /* Allow dma queues to drain after context reset */ 722 mdelay(20); 723 } 724 } 725 726 void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) 727 { 728 struct qlcnic_recv_context *recv_ctx; 729 struct qlcnic_host_rds_ring *rds_ring; 730 struct qlcnic_host_sds_ring *sds_ring; 731 struct qlcnic_host_tx_ring *tx_ring; 732 int ring; 733 734 recv_ctx = adapter->recv_ctx; 735 736 for (ring = 0; ring < adapter->drv_tx_rings; ring++) { 737 tx_ring = &adapter->tx_ring[ring]; 738 if (tx_ring->hw_consumer != NULL) { 739 dma_free_coherent(&adapter->pdev->dev, sizeof(u32), 740 tx_ring->hw_consumer, 741 tx_ring->hw_cons_phys_addr); 742 743 tx_ring->hw_consumer = NULL; 744 } 745 746 if (tx_ring->desc_head != NULL) { 747 dma_free_coherent(&adapter->pdev->dev, 748 TX_DESC_RINGSIZE(tx_ring), 749 tx_ring->desc_head, 750 tx_ring->phys_addr); 751 tx_ring->desc_head = NULL; 752 } 753 } 754 755 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 756 rds_ring = &recv_ctx->rds_rings[ring]; 757 758 if (rds_ring->desc_head != NULL) { 759 dma_free_coherent(&adapter->pdev->dev, 760 RCV_DESC_RINGSIZE(rds_ring), 761 rds_ring->desc_head, 762 rds_ring->phys_addr); 763 rds_ring->desc_head = NULL; 764 } 765 } 766 767 for (ring = 0; ring < adapter->drv_sds_rings; ring++) { 768 sds_ring = &recv_ctx->sds_rings[ring]; 769 770 if (sds_ring->desc_head != NULL) { 771 dma_free_coherent(&adapter->pdev->dev, 772 STATUS_DESC_RINGSIZE(sds_ring), 773 sds_ring->desc_head, 774 sds_ring->phys_addr); 775 sds_ring->desc_head = NULL; 776 } 777 } 778 } 779 780 int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type) 781 { 782 struct qlcnic_hardware_context *ahw = adapter->ahw; 783 struct net_device *netdev = adapter->netdev; 784 struct qlcnic_cmd_args cmd; 785 u32 type, val; 786 int i, err = 0; 787 788 for (i = 0; i < ahw->num_msix; i++) { 789 qlcnic_alloc_mbx_args(&cmd, adapter, 790 QLCNIC_CMD_MQ_TX_CONFIG_INTR); 791 type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL; 792 val = type | (ahw->intr_tbl[i].type << 4); 793 if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX) 794 val |= (ahw->intr_tbl[i].id << 16); 795 cmd.req.arg[1] = val; 796 err = qlcnic_issue_cmd(adapter, &cmd); 797 if (err) { 798 netdev_err(netdev, "Failed to %s interrupts %d\n", 799 op_type == QLCNIC_INTRPT_ADD ? "Add" : 800 "Delete", err); 801 qlcnic_free_mbx_args(&cmd); 802 return err; 803 } 804 val = cmd.rsp.arg[1]; 805 if (LSB(val)) { 806 netdev_info(netdev, 807 "failed to configure interrupt for %d\n", 808 ahw->intr_tbl[i].id); 809 continue; 810 } 811 if (op_type) { 812 ahw->intr_tbl[i].id = MSW(val); 813 ahw->intr_tbl[i].enabled = 1; 814 ahw->intr_tbl[i].src = cmd.rsp.arg[2]; 815 } else { 816 ahw->intr_tbl[i].id = i; 817 ahw->intr_tbl[i].enabled = 0; 818 ahw->intr_tbl[i].src = 0; 819 } 820 qlcnic_free_mbx_args(&cmd); 821 } 822 823 return err; 824 } 825 826 int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac, 827 u8 function) 828 { 829 int err, i; 830 struct qlcnic_cmd_args cmd; 831 u32 mac_low, mac_high; 832 833 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); 834 if (err) 835 return err; 836 837 cmd.req.arg[1] = function | BIT_8; 838 err = qlcnic_issue_cmd(adapter, &cmd); 839 840 if (err == QLCNIC_RCODE_SUCCESS) { 841 mac_low = cmd.rsp.arg[1]; 842 mac_high = cmd.rsp.arg[2]; 843 844 for (i = 0; i < 2; i++) 845 mac[i] = (u8) (mac_high >> ((1 - i) * 8)); 846 for (i = 2; i < 6; i++) 847 mac[i] = (u8) (mac_low >> ((5 - i) * 8)); 848 } else { 849 dev_err(&adapter->pdev->dev, 850 "Failed to get mac address%d\n", err); 851 err = -EIO; 852 } 853 qlcnic_free_mbx_args(&cmd); 854 return err; 855 } 856 857 /* Get info of a NIC partition */ 858 int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter, 859 struct qlcnic_info *npar_info, u8 func_id) 860 { 861 int err; 862 dma_addr_t nic_dma_t; 863 const struct qlcnic_info_le *nic_info; 864 void *nic_info_addr; 865 struct qlcnic_cmd_args cmd; 866 size_t nic_size = sizeof(struct qlcnic_info_le); 867 868 nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, 869 &nic_dma_t, GFP_KERNEL); 870 if (!nic_info_addr) 871 return -ENOMEM; 872 873 nic_info = nic_info_addr; 874 875 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); 876 if (err) 877 goto out_free_dma; 878 879 cmd.req.arg[1] = MSD(nic_dma_t); 880 cmd.req.arg[2] = LSD(nic_dma_t); 881 cmd.req.arg[3] = (func_id << 16 | nic_size); 882 err = qlcnic_issue_cmd(adapter, &cmd); 883 if (err != QLCNIC_RCODE_SUCCESS) { 884 dev_err(&adapter->pdev->dev, 885 "Failed to get nic info%d\n", err); 886 err = -EIO; 887 } else { 888 npar_info->pci_func = le16_to_cpu(nic_info->pci_func); 889 npar_info->op_mode = le16_to_cpu(nic_info->op_mode); 890 npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw); 891 npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw); 892 npar_info->phys_port = le16_to_cpu(nic_info->phys_port); 893 npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode); 894 npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques); 895 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); 896 npar_info->capabilities = le32_to_cpu(nic_info->capabilities); 897 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); 898 adapter->max_tx_rings = npar_info->max_tx_ques; 899 adapter->max_sds_rings = npar_info->max_rx_ques; 900 } 901 902 qlcnic_free_mbx_args(&cmd); 903 out_free_dma: 904 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, 905 nic_dma_t); 906 907 return err; 908 } 909 910 /* Configure a NIC partition */ 911 int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter, 912 struct qlcnic_info *nic) 913 { 914 int err = -EIO; 915 dma_addr_t nic_dma_t; 916 void *nic_info_addr; 917 struct qlcnic_cmd_args cmd; 918 struct qlcnic_info_le *nic_info; 919 size_t nic_size = sizeof(struct qlcnic_info_le); 920 921 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 922 return err; 923 924 nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, 925 &nic_dma_t, GFP_KERNEL); 926 if (!nic_info_addr) 927 return -ENOMEM; 928 929 nic_info = nic_info_addr; 930 931 nic_info->pci_func = cpu_to_le16(nic->pci_func); 932 nic_info->op_mode = cpu_to_le16(nic->op_mode); 933 nic_info->phys_port = cpu_to_le16(nic->phys_port); 934 nic_info->switch_mode = cpu_to_le16(nic->switch_mode); 935 nic_info->capabilities = cpu_to_le32(nic->capabilities); 936 nic_info->max_mac_filters = nic->max_mac_filters; 937 nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques); 938 nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques); 939 nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw); 940 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw); 941 942 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); 943 if (err) 944 goto out_free_dma; 945 946 cmd.req.arg[1] = MSD(nic_dma_t); 947 cmd.req.arg[2] = LSD(nic_dma_t); 948 cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size); 949 err = qlcnic_issue_cmd(adapter, &cmd); 950 951 if (err != QLCNIC_RCODE_SUCCESS) { 952 dev_err(&adapter->pdev->dev, 953 "Failed to set nic info%d\n", err); 954 err = -EIO; 955 } 956 957 qlcnic_free_mbx_args(&cmd); 958 out_free_dma: 959 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, 960 nic_dma_t); 961 962 return err; 963 } 964 965 /* Get PCI Info of a partition */ 966 int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter, 967 struct qlcnic_pci_info *pci_info) 968 { 969 int err = 0, i; 970 struct qlcnic_cmd_args cmd; 971 dma_addr_t pci_info_dma_t; 972 struct qlcnic_pci_info_le *npar; 973 void *pci_info_addr; 974 size_t npar_size = sizeof(struct qlcnic_pci_info_le); 975 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; 976 977 pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size, 978 &pci_info_dma_t, GFP_KERNEL); 979 if (!pci_info_addr) 980 return -ENOMEM; 981 982 npar = pci_info_addr; 983 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); 984 if (err) 985 goto out_free_dma; 986 987 cmd.req.arg[1] = MSD(pci_info_dma_t); 988 cmd.req.arg[2] = LSD(pci_info_dma_t); 989 cmd.req.arg[3] = pci_size; 990 err = qlcnic_issue_cmd(adapter, &cmd); 991 992 adapter->ahw->act_pci_func = 0; 993 if (err == QLCNIC_RCODE_SUCCESS) { 994 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) { 995 pci_info->id = le16_to_cpu(npar->id); 996 pci_info->active = le16_to_cpu(npar->active); 997 pci_info->type = le16_to_cpu(npar->type); 998 if (pci_info->type == QLCNIC_TYPE_NIC) 999 adapter->ahw->act_pci_func++; 1000 pci_info->default_port = 1001 le16_to_cpu(npar->default_port); 1002 pci_info->tx_min_bw = 1003 le16_to_cpu(npar->tx_min_bw); 1004 pci_info->tx_max_bw = 1005 le16_to_cpu(npar->tx_max_bw); 1006 memcpy(pci_info->mac, npar->mac, ETH_ALEN); 1007 } 1008 } else { 1009 dev_err(&adapter->pdev->dev, 1010 "Failed to get PCI Info%d\n", err); 1011 err = -EIO; 1012 } 1013 1014 qlcnic_free_mbx_args(&cmd); 1015 out_free_dma: 1016 dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr, 1017 pci_info_dma_t); 1018 1019 return err; 1020 } 1021 1022 /* Configure eSwitch for port mirroring */ 1023 int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, 1024 u8 enable_mirroring, u8 pci_func) 1025 { 1026 struct device *dev = &adapter->pdev->dev; 1027 struct qlcnic_cmd_args cmd; 1028 int err = -EIO; 1029 u32 arg1; 1030 1031 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC || 1032 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) 1033 return err; 1034 1035 arg1 = id | (enable_mirroring ? BIT_4 : 0); 1036 arg1 |= pci_func << 8; 1037 1038 err = qlcnic_alloc_mbx_args(&cmd, adapter, 1039 QLCNIC_CMD_SET_PORTMIRRORING); 1040 if (err) 1041 return err; 1042 1043 cmd.req.arg[1] = arg1; 1044 err = qlcnic_issue_cmd(adapter, &cmd); 1045 1046 if (err != QLCNIC_RCODE_SUCCESS) 1047 dev_err(dev, "Failed to configure port mirroring for vNIC function %d on eSwitch %d\n", 1048 pci_func, id); 1049 else 1050 dev_info(dev, "Configured port mirroring for vNIC function %d on eSwitch %d\n", 1051 pci_func, id); 1052 qlcnic_free_mbx_args(&cmd); 1053 1054 return err; 1055 } 1056 1057 int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, 1058 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { 1059 1060 size_t stats_size = sizeof(struct qlcnic_esw_stats_le); 1061 struct qlcnic_esw_stats_le *stats; 1062 dma_addr_t stats_dma_t; 1063 void *stats_addr; 1064 u32 arg1; 1065 struct qlcnic_cmd_args cmd; 1066 int err; 1067 1068 if (esw_stats == NULL) 1069 return -ENOMEM; 1070 1071 if ((adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) && 1072 (func != adapter->ahw->pci_func)) { 1073 dev_err(&adapter->pdev->dev, 1074 "Not privilege to query stats for func=%d", func); 1075 return -EIO; 1076 } 1077 1078 stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, 1079 &stats_dma_t, GFP_KERNEL); 1080 if (!stats_addr) 1081 return -ENOMEM; 1082 1083 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; 1084 arg1 |= rx_tx << 15 | stats_size << 16; 1085 1086 err = qlcnic_alloc_mbx_args(&cmd, adapter, 1087 QLCNIC_CMD_GET_ESWITCH_STATS); 1088 if (err) 1089 goto out_free_dma; 1090 1091 cmd.req.arg[1] = arg1; 1092 cmd.req.arg[2] = MSD(stats_dma_t); 1093 cmd.req.arg[3] = LSD(stats_dma_t); 1094 err = qlcnic_issue_cmd(adapter, &cmd); 1095 1096 if (!err) { 1097 stats = stats_addr; 1098 esw_stats->context_id = le16_to_cpu(stats->context_id); 1099 esw_stats->version = le16_to_cpu(stats->version); 1100 esw_stats->size = le16_to_cpu(stats->size); 1101 esw_stats->multicast_frames = 1102 le64_to_cpu(stats->multicast_frames); 1103 esw_stats->broadcast_frames = 1104 le64_to_cpu(stats->broadcast_frames); 1105 esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames); 1106 esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames); 1107 esw_stats->local_frames = le64_to_cpu(stats->local_frames); 1108 esw_stats->errors = le64_to_cpu(stats->errors); 1109 esw_stats->numbytes = le64_to_cpu(stats->numbytes); 1110 } 1111 1112 qlcnic_free_mbx_args(&cmd); 1113 out_free_dma: 1114 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, 1115 stats_dma_t); 1116 1117 return err; 1118 } 1119 1120 /* This routine will retrieve the MAC statistics from firmware */ 1121 int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, 1122 struct qlcnic_mac_statistics *mac_stats) 1123 { 1124 struct qlcnic_mac_statistics_le *stats; 1125 struct qlcnic_cmd_args cmd; 1126 size_t stats_size = sizeof(struct qlcnic_mac_statistics_le); 1127 dma_addr_t stats_dma_t; 1128 void *stats_addr; 1129 int err; 1130 1131 if (mac_stats == NULL) 1132 return -ENOMEM; 1133 1134 stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, 1135 &stats_dma_t, GFP_KERNEL); 1136 if (!stats_addr) 1137 return -ENOMEM; 1138 1139 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS); 1140 if (err) 1141 goto out_free_dma; 1142 1143 cmd.req.arg[1] = stats_size << 16; 1144 cmd.req.arg[2] = MSD(stats_dma_t); 1145 cmd.req.arg[3] = LSD(stats_dma_t); 1146 err = qlcnic_issue_cmd(adapter, &cmd); 1147 if (!err) { 1148 stats = stats_addr; 1149 mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames); 1150 mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes); 1151 mac_stats->mac_tx_mcast_pkts = 1152 le64_to_cpu(stats->mac_tx_mcast_pkts); 1153 mac_stats->mac_tx_bcast_pkts = 1154 le64_to_cpu(stats->mac_tx_bcast_pkts); 1155 mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames); 1156 mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes); 1157 mac_stats->mac_rx_mcast_pkts = 1158 le64_to_cpu(stats->mac_rx_mcast_pkts); 1159 mac_stats->mac_rx_length_error = 1160 le64_to_cpu(stats->mac_rx_length_error); 1161 mac_stats->mac_rx_length_small = 1162 le64_to_cpu(stats->mac_rx_length_small); 1163 mac_stats->mac_rx_length_large = 1164 le64_to_cpu(stats->mac_rx_length_large); 1165 mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber); 1166 mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped); 1167 mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error); 1168 } else { 1169 dev_err(&adapter->pdev->dev, 1170 "%s: Get mac stats failed, err=%d.\n", __func__, err); 1171 } 1172 1173 qlcnic_free_mbx_args(&cmd); 1174 1175 out_free_dma: 1176 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, 1177 stats_dma_t); 1178 1179 return err; 1180 } 1181 1182 int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch, 1183 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { 1184 1185 struct __qlcnic_esw_statistics port_stats; 1186 u8 i; 1187 int ret = -EIO; 1188 1189 if (esw_stats == NULL) 1190 return -ENOMEM; 1191 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 1192 return -EIO; 1193 if (adapter->npars == NULL) 1194 return -EIO; 1195 1196 memset(esw_stats, 0, sizeof(u64)); 1197 esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL; 1198 esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL; 1199 esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL; 1200 esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL; 1201 esw_stats->errors = QLCNIC_STATS_NOT_AVAIL; 1202 esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL; 1203 esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL; 1204 esw_stats->context_id = eswitch; 1205 1206 for (i = 0; i < adapter->ahw->act_pci_func; i++) { 1207 if (adapter->npars[i].phy_port != eswitch) 1208 continue; 1209 1210 memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics)); 1211 if (qlcnic_get_port_stats(adapter, adapter->npars[i].pci_func, 1212 rx_tx, &port_stats)) 1213 continue; 1214 1215 esw_stats->size = port_stats.size; 1216 esw_stats->version = port_stats.version; 1217 QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames, 1218 port_stats.unicast_frames); 1219 QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames, 1220 port_stats.multicast_frames); 1221 QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames, 1222 port_stats.broadcast_frames); 1223 QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames, 1224 port_stats.dropped_frames); 1225 QLCNIC_ADD_ESW_STATS(esw_stats->errors, 1226 port_stats.errors); 1227 QLCNIC_ADD_ESW_STATS(esw_stats->local_frames, 1228 port_stats.local_frames); 1229 QLCNIC_ADD_ESW_STATS(esw_stats->numbytes, 1230 port_stats.numbytes); 1231 ret = 0; 1232 } 1233 return ret; 1234 } 1235 1236 int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw, 1237 const u8 port, const u8 rx_tx) 1238 { 1239 int err; 1240 u32 arg1; 1241 struct qlcnic_cmd_args cmd; 1242 1243 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 1244 return -EIO; 1245 1246 if (func_esw == QLCNIC_STATS_PORT) { 1247 if (port >= QLCNIC_MAX_PCI_FUNC) 1248 goto err_ret; 1249 } else if (func_esw == QLCNIC_STATS_ESWITCH) { 1250 if (port >= QLCNIC_NIU_MAX_XG_PORTS) 1251 goto err_ret; 1252 } else { 1253 goto err_ret; 1254 } 1255 1256 if (rx_tx > QLCNIC_QUERY_TX_COUNTER) 1257 goto err_ret; 1258 1259 arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12; 1260 arg1 |= BIT_14 | rx_tx << 15; 1261 1262 err = qlcnic_alloc_mbx_args(&cmd, adapter, 1263 QLCNIC_CMD_GET_ESWITCH_STATS); 1264 if (err) 1265 return err; 1266 1267 cmd.req.arg[1] = arg1; 1268 err = qlcnic_issue_cmd(adapter, &cmd); 1269 qlcnic_free_mbx_args(&cmd); 1270 return err; 1271 1272 err_ret: 1273 dev_err(&adapter->pdev->dev, 1274 "Invalid args func_esw %d port %d rx_ctx %d\n", 1275 func_esw, port, rx_tx); 1276 return -EIO; 1277 } 1278 1279 static int __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, 1280 u32 *arg1, u32 *arg2) 1281 { 1282 struct device *dev = &adapter->pdev->dev; 1283 struct qlcnic_cmd_args cmd; 1284 u8 pci_func = *arg1 >> 8; 1285 int err; 1286 1287 err = qlcnic_alloc_mbx_args(&cmd, adapter, 1288 QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG); 1289 if (err) 1290 return err; 1291 1292 cmd.req.arg[1] = *arg1; 1293 err = qlcnic_issue_cmd(adapter, &cmd); 1294 *arg1 = cmd.rsp.arg[1]; 1295 *arg2 = cmd.rsp.arg[2]; 1296 qlcnic_free_mbx_args(&cmd); 1297 1298 if (err == QLCNIC_RCODE_SUCCESS) 1299 dev_info(dev, "Get eSwitch port config for vNIC function %d\n", 1300 pci_func); 1301 else 1302 dev_err(dev, "Failed to get eswitch port config for vNIC function %d\n", 1303 pci_func); 1304 return err; 1305 } 1306 /* Configure eSwitch port 1307 op_mode = 0 for setting default port behavior 1308 op_mode = 1 for setting vlan id 1309 op_mode = 2 for deleting vlan id 1310 op_type = 0 for vlan_id 1311 op_type = 1 for port vlan_id 1312 */ 1313 int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, 1314 struct qlcnic_esw_func_cfg *esw_cfg) 1315 { 1316 struct device *dev = &adapter->pdev->dev; 1317 struct qlcnic_cmd_args cmd; 1318 int err = -EIO, index; 1319 u32 arg1, arg2 = 0; 1320 u8 pci_func; 1321 1322 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 1323 return err; 1324 pci_func = esw_cfg->pci_func; 1325 index = qlcnic_is_valid_nic_func(adapter, pci_func); 1326 if (index < 0) 1327 return err; 1328 arg1 = (adapter->npars[index].phy_port & BIT_0); 1329 arg1 |= (pci_func << 8); 1330 1331 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) 1332 return err; 1333 arg1 &= ~(0x0ff << 8); 1334 arg1 |= (pci_func << 8); 1335 arg1 &= ~(BIT_2 | BIT_3); 1336 switch (esw_cfg->op_mode) { 1337 case QLCNIC_PORT_DEFAULTS: 1338 arg1 |= (BIT_4 | BIT_6 | BIT_7); 1339 arg2 |= (BIT_0 | BIT_1); 1340 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) 1341 arg2 |= (BIT_2 | BIT_3); 1342 if (!(esw_cfg->discard_tagged)) 1343 arg1 &= ~BIT_4; 1344 if (!(esw_cfg->promisc_mode)) 1345 arg1 &= ~BIT_6; 1346 if (!(esw_cfg->mac_override)) 1347 arg1 &= ~BIT_7; 1348 if (!(esw_cfg->mac_anti_spoof)) 1349 arg2 &= ~BIT_0; 1350 if (!(esw_cfg->offload_flags & BIT_0)) 1351 arg2 &= ~(BIT_1 | BIT_2 | BIT_3); 1352 if (!(esw_cfg->offload_flags & BIT_1)) 1353 arg2 &= ~BIT_2; 1354 if (!(esw_cfg->offload_flags & BIT_2)) 1355 arg2 &= ~BIT_3; 1356 break; 1357 case QLCNIC_ADD_VLAN: 1358 arg1 |= (BIT_2 | BIT_5); 1359 arg1 |= (esw_cfg->vlan_id << 16); 1360 break; 1361 case QLCNIC_DEL_VLAN: 1362 arg1 |= (BIT_3 | BIT_5); 1363 arg1 &= ~(0x0ffff << 16); 1364 break; 1365 default: 1366 return err; 1367 } 1368 1369 err = qlcnic_alloc_mbx_args(&cmd, adapter, 1370 QLCNIC_CMD_CONFIGURE_ESWITCH); 1371 if (err) 1372 return err; 1373 1374 cmd.req.arg[1] = arg1; 1375 cmd.req.arg[2] = arg2; 1376 err = qlcnic_issue_cmd(adapter, &cmd); 1377 qlcnic_free_mbx_args(&cmd); 1378 1379 if (err != QLCNIC_RCODE_SUCCESS) 1380 dev_err(dev, "Failed to configure eswitch for vNIC function %d\n", 1381 pci_func); 1382 else 1383 dev_info(dev, "Configured eSwitch for vNIC function %d\n", 1384 pci_func); 1385 1386 return err; 1387 } 1388 1389 int 1390 qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, 1391 struct qlcnic_esw_func_cfg *esw_cfg) 1392 { 1393 u32 arg1, arg2; 1394 int index; 1395 u8 phy_port; 1396 1397 if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) { 1398 index = qlcnic_is_valid_nic_func(adapter, esw_cfg->pci_func); 1399 if (index < 0) 1400 return -EIO; 1401 phy_port = adapter->npars[index].phy_port; 1402 } else { 1403 phy_port = adapter->ahw->physical_port; 1404 } 1405 arg1 = phy_port; 1406 arg1 |= (esw_cfg->pci_func << 8); 1407 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) 1408 return -EIO; 1409 1410 esw_cfg->discard_tagged = !!(arg1 & BIT_4); 1411 esw_cfg->host_vlan_tag = !!(arg1 & BIT_5); 1412 esw_cfg->promisc_mode = !!(arg1 & BIT_6); 1413 esw_cfg->mac_override = !!(arg1 & BIT_7); 1414 esw_cfg->vlan_id = LSW(arg1 >> 16); 1415 esw_cfg->mac_anti_spoof = (arg2 & 0x1); 1416 esw_cfg->offload_flags = ((arg2 >> 1) & 0x7); 1417 1418 return 0; 1419 } 1420