1 /* 2 * QLogic qlcnic NIC Driver 3 * Copyright (c) 2009-2013 QLogic Corporation 4 * 5 * See LICENSE.qlcnic for copyright and licensing details. 6 */ 7 8 #include "qlcnic.h" 9 10 static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = { 11 {QLCNIC_CMD_CREATE_RX_CTX, 4, 1}, 12 {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1}, 13 {QLCNIC_CMD_CREATE_TX_CTX, 4, 1}, 14 {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1}, 15 {QLCNIC_CMD_INTRPT_TEST, 4, 1}, 16 {QLCNIC_CMD_SET_MTU, 4, 1}, 17 {QLCNIC_CMD_READ_PHY, 4, 2}, 18 {QLCNIC_CMD_WRITE_PHY, 5, 1}, 19 {QLCNIC_CMD_READ_HW_REG, 4, 1}, 20 {QLCNIC_CMD_GET_FLOW_CTL, 4, 2}, 21 {QLCNIC_CMD_SET_FLOW_CTL, 4, 1}, 22 {QLCNIC_CMD_READ_MAX_MTU, 4, 2}, 23 {QLCNIC_CMD_READ_MAX_LRO, 4, 2}, 24 {QLCNIC_CMD_MAC_ADDRESS, 4, 3}, 25 {QLCNIC_CMD_GET_PCI_INFO, 4, 1}, 26 {QLCNIC_CMD_GET_NIC_INFO, 4, 1}, 27 {QLCNIC_CMD_SET_NIC_INFO, 4, 1}, 28 {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3}, 29 {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1}, 30 {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3}, 31 {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1}, 32 {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1}, 33 {QLCNIC_CMD_GET_MAC_STATS, 4, 1}, 34 {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3}, 35 {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1}, 36 {QLCNIC_CMD_CONFIG_PORT, 4, 1}, 37 {QLCNIC_CMD_TEMP_SIZE, 4, 4}, 38 {QLCNIC_CMD_GET_TEMP_HDR, 4, 1}, 39 {QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1}, 40 {QLCNIC_CMD_GET_LED_STATUS, 4, 2}, 41 {QLCNIC_CMD_MQ_TX_CONFIG_INTR, 2, 3}, 42 {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2}, 43 {QLCNIC_CMD_DCB_QUERY_PARAM, 4, 1}, 44 }; 45 46 static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw) 47 { 48 return (ahw->pci_func & 0xff) | ((ahw->fw_hal_version & 0xff) << 8) | 49 (0xcafe << 16); 50 } 51 52 /* Allocate mailbox registers */ 53 int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx, 54 struct qlcnic_adapter *adapter, u32 type) 55 { 56 int i, size; 57 const struct qlcnic_mailbox_metadata *mbx_tbl; 58 59 mbx_tbl = qlcnic_mbx_tbl; 60 size = ARRAY_SIZE(qlcnic_mbx_tbl); 61 for (i = 0; i < size; i++) { 62 if (type == mbx_tbl[i].cmd) { 63 mbx->req.num = mbx_tbl[i].in_args; 64 mbx->rsp.num = mbx_tbl[i].out_args; 65 mbx->req.arg = kcalloc(mbx->req.num, 66 sizeof(u32), GFP_ATOMIC); 67 if (!mbx->req.arg) 68 return -ENOMEM; 69 mbx->rsp.arg = kcalloc(mbx->rsp.num, 70 sizeof(u32), GFP_ATOMIC); 71 if (!mbx->rsp.arg) { 72 kfree(mbx->req.arg); 73 mbx->req.arg = NULL; 74 return -ENOMEM; 75 } 76 memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num); 77 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); 78 mbx->req.arg[0] = type; 79 break; 80 } 81 } 82 return 0; 83 } 84 85 /* Free up mailbox registers */ 86 void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd) 87 { 88 kfree(cmd->req.arg); 89 cmd->req.arg = NULL; 90 kfree(cmd->rsp.arg); 91 cmd->rsp.arg = NULL; 92 } 93 94 static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func) 95 { 96 int i; 97 98 for (i = 0; i < adapter->ahw->act_pci_func; i++) { 99 if (adapter->npars[i].pci_func == pci_func) 100 return i; 101 } 102 103 return -1; 104 } 105 106 static u32 107 qlcnic_poll_rsp(struct qlcnic_adapter *adapter) 108 { 109 u32 rsp; 110 int timeout = 0, err = 0; 111 112 do { 113 /* give atleast 1ms for firmware to respond */ 114 mdelay(1); 115 116 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT) 117 return QLCNIC_CDRP_RSP_TIMEOUT; 118 119 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET, &err); 120 } while (!QLCNIC_CDRP_IS_RSP(rsp)); 121 122 return rsp; 123 } 124 125 int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter, 126 struct qlcnic_cmd_args *cmd) 127 { 128 int i, err = 0; 129 u32 rsp; 130 u32 signature; 131 struct pci_dev *pdev = adapter->pdev; 132 struct qlcnic_hardware_context *ahw = adapter->ahw; 133 const char *fmt; 134 135 signature = qlcnic_get_cmd_signature(ahw); 136 137 /* Acquire semaphore before accessing CRB */ 138 if (qlcnic_api_lock(adapter)) { 139 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; 140 return cmd->rsp.arg[0]; 141 } 142 143 QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature); 144 for (i = 1; i < QLCNIC_CDRP_MAX_ARGS; i++) 145 QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]); 146 QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, 147 QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0])); 148 rsp = qlcnic_poll_rsp(adapter); 149 150 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) { 151 dev_err(&pdev->dev, "card response timeout.\n"); 152 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; 153 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) { 154 cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err); 155 switch (cmd->rsp.arg[0]) { 156 case QLCNIC_RCODE_INVALID_ARGS: 157 fmt = "CDRP invalid args: [%d]\n"; 158 break; 159 case QLCNIC_RCODE_NOT_SUPPORTED: 160 case QLCNIC_RCODE_NOT_IMPL: 161 fmt = "CDRP command not supported: [%d]\n"; 162 break; 163 case QLCNIC_RCODE_NOT_PERMITTED: 164 fmt = "CDRP requested action not permitted: [%d]\n"; 165 break; 166 case QLCNIC_RCODE_INVALID: 167 fmt = "CDRP invalid or unknown cmd received: [%d]\n"; 168 break; 169 case QLCNIC_RCODE_TIMEOUT: 170 fmt = "CDRP command timeout: [%d]\n"; 171 break; 172 default: 173 fmt = "CDRP command failed: [%d]\n"; 174 break; 175 } 176 dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]); 177 qlcnic_dump_mbx(adapter, cmd); 178 } else if (rsp == QLCNIC_CDRP_RSP_OK) 179 cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS; 180 181 for (i = 1; i < cmd->rsp.num; i++) 182 cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i), &err); 183 184 /* Release semaphore */ 185 qlcnic_api_unlock(adapter); 186 return cmd->rsp.arg[0]; 187 } 188 189 int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter, u32 fw_cmd) 190 { 191 struct qlcnic_cmd_args cmd; 192 u32 arg1, arg2, arg3; 193 char drv_string[12]; 194 int err = 0; 195 196 memset(drv_string, 0, sizeof(drv_string)); 197 snprintf(drv_string, sizeof(drv_string), "%d"".""%d"".""%d", 198 _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR, 199 _QLCNIC_LINUX_SUBVERSION); 200 201 err = qlcnic_alloc_mbx_args(&cmd, adapter, fw_cmd); 202 if (err) 203 return err; 204 205 memcpy(&arg1, drv_string, sizeof(u32)); 206 memcpy(&arg2, drv_string + 4, sizeof(u32)); 207 memcpy(&arg3, drv_string + 8, sizeof(u32)); 208 209 cmd.req.arg[1] = arg1; 210 cmd.req.arg[2] = arg2; 211 cmd.req.arg[3] = arg3; 212 213 err = qlcnic_issue_cmd(adapter, &cmd); 214 if (err) { 215 dev_info(&adapter->pdev->dev, 216 "Failed to set driver version in firmware\n"); 217 err = -EIO; 218 } 219 qlcnic_free_mbx_args(&cmd); 220 return err; 221 } 222 223 int 224 qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu) 225 { 226 int err = 0; 227 struct qlcnic_cmd_args cmd; 228 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 229 230 if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE) 231 return err; 232 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU); 233 if (err) 234 return err; 235 236 cmd.req.arg[1] = recv_ctx->context_id; 237 cmd.req.arg[2] = mtu; 238 239 err = qlcnic_issue_cmd(adapter, &cmd); 240 if (err) { 241 dev_err(&adapter->pdev->dev, "Failed to set mtu\n"); 242 err = -EIO; 243 } 244 qlcnic_free_mbx_args(&cmd); 245 return err; 246 } 247 248 int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter) 249 { 250 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 251 struct qlcnic_hardware_context *ahw = adapter->ahw; 252 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; 253 struct net_device *netdev = adapter->netdev; 254 u32 temp_intr_crb_mode, temp_rds_crb_mode; 255 struct qlcnic_cardrsp_rds_ring *prsp_rds; 256 struct qlcnic_cardrsp_sds_ring *prsp_sds; 257 struct qlcnic_hostrq_rds_ring *prq_rds; 258 struct qlcnic_hostrq_sds_ring *prq_sds; 259 struct qlcnic_host_rds_ring *rds_ring; 260 struct qlcnic_host_sds_ring *sds_ring; 261 struct qlcnic_cardrsp_rx_ctx *prsp; 262 struct qlcnic_hostrq_rx_ctx *prq; 263 u8 i, nrds_rings, nsds_rings; 264 struct qlcnic_cmd_args cmd; 265 size_t rq_size, rsp_size; 266 u32 cap, reg, val, reg2; 267 u64 phys_addr; 268 u16 temp_u16; 269 void *addr; 270 int err; 271 272 nrds_rings = adapter->max_rds_rings; 273 nsds_rings = adapter->max_sds_rings; 274 275 rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings, 276 nsds_rings); 277 rsp_size = SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings, 278 nsds_rings); 279 280 addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, 281 &hostrq_phys_addr, GFP_KERNEL); 282 if (addr == NULL) 283 return -ENOMEM; 284 prq = addr; 285 286 addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, 287 &cardrsp_phys_addr, GFP_KERNEL); 288 if (addr == NULL) { 289 err = -ENOMEM; 290 goto out_free_rq; 291 } 292 prsp = addr; 293 294 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); 295 296 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN 297 | QLCNIC_CAP0_VALIDOFF); 298 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS); 299 300 if (qlcnic_check_multi_tx(adapter) && 301 !adapter->ahw->diag_test) { 302 cap |= QLCNIC_CAP0_TX_MULTI; 303 } else { 304 temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler); 305 prq->valid_field_offset = cpu_to_le16(temp_u16); 306 prq->txrx_sds_binding = nsds_rings - 1; 307 temp_intr_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED; 308 prq->host_int_crb_mode = cpu_to_le32(temp_intr_crb_mode); 309 temp_rds_crb_mode = QLCNIC_HOST_RDS_CRB_MODE_UNIQUE; 310 prq->host_rds_crb_mode = cpu_to_le32(temp_rds_crb_mode); 311 } 312 313 prq->capabilities[0] = cpu_to_le32(cap); 314 315 prq->num_rds_rings = cpu_to_le16(nrds_rings); 316 prq->num_sds_rings = cpu_to_le16(nsds_rings); 317 prq->rds_ring_offset = 0; 318 319 val = le32_to_cpu(prq->rds_ring_offset) + 320 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings); 321 prq->sds_ring_offset = cpu_to_le32(val); 322 323 prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data + 324 le32_to_cpu(prq->rds_ring_offset)); 325 326 for (i = 0; i < nrds_rings; i++) { 327 rds_ring = &recv_ctx->rds_rings[i]; 328 rds_ring->producer = 0; 329 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); 330 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); 331 prq_rds[i].ring_kind = cpu_to_le32(i); 332 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); 333 } 334 335 prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data + 336 le32_to_cpu(prq->sds_ring_offset)); 337 338 for (i = 0; i < nsds_rings; i++) { 339 sds_ring = &recv_ctx->sds_rings[i]; 340 sds_ring->consumer = 0; 341 memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring)); 342 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); 343 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); 344 if (qlcnic_check_multi_tx(adapter) && 345 !adapter->ahw->diag_test) 346 prq_sds[i].msi_index = cpu_to_le16(ahw->intr_tbl[i].id); 347 else 348 prq_sds[i].msi_index = cpu_to_le16(i); 349 } 350 351 phys_addr = hostrq_phys_addr; 352 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX); 353 if (err) 354 goto out_free_rsp; 355 356 cmd.req.arg[1] = MSD(phys_addr); 357 cmd.req.arg[2] = LSD(phys_addr); 358 cmd.req.arg[3] = rq_size; 359 err = qlcnic_issue_cmd(adapter, &cmd); 360 if (err) { 361 dev_err(&adapter->pdev->dev, 362 "Failed to create rx ctx in firmware%d\n", err); 363 goto out_free_rsp; 364 } 365 366 prsp_rds = ((struct qlcnic_cardrsp_rds_ring *) 367 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]); 368 369 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { 370 rds_ring = &recv_ctx->rds_rings[i]; 371 reg = le32_to_cpu(prsp_rds[i].host_producer_crb); 372 rds_ring->crb_rcv_producer = ahw->pci_base0 + reg; 373 } 374 375 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *) 376 &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); 377 378 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { 379 sds_ring = &recv_ctx->sds_rings[i]; 380 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); 381 if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) 382 reg2 = ahw->intr_tbl[i].src; 383 else 384 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb); 385 386 sds_ring->crb_intr_mask = ahw->pci_base0 + reg2; 387 sds_ring->crb_sts_consumer = ahw->pci_base0 + reg; 388 } 389 390 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); 391 recv_ctx->context_id = le16_to_cpu(prsp->context_id); 392 recv_ctx->virt_port = prsp->virt_port; 393 394 netdev_info(netdev, "Rx Context[%d] Created, state 0x%x\n", 395 recv_ctx->context_id, recv_ctx->state); 396 qlcnic_free_mbx_args(&cmd); 397 398 out_free_rsp: 399 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp, 400 cardrsp_phys_addr); 401 out_free_rq: 402 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr); 403 404 return err; 405 } 406 407 void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter) 408 { 409 int err; 410 struct qlcnic_cmd_args cmd; 411 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 412 413 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX); 414 if (err) 415 return; 416 417 cmd.req.arg[1] = recv_ctx->context_id; 418 err = qlcnic_issue_cmd(adapter, &cmd); 419 if (err) 420 dev_err(&adapter->pdev->dev, 421 "Failed to destroy rx ctx in firmware\n"); 422 423 recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED; 424 qlcnic_free_mbx_args(&cmd); 425 } 426 427 int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter, 428 struct qlcnic_host_tx_ring *tx_ring, 429 int ring) 430 { 431 struct qlcnic_hardware_context *ahw = adapter->ahw; 432 struct net_device *netdev = adapter->netdev; 433 struct qlcnic_hostrq_tx_ctx *prq; 434 struct qlcnic_hostrq_cds_ring *prq_cds; 435 struct qlcnic_cardrsp_tx_ctx *prsp; 436 struct qlcnic_cmd_args cmd; 437 u32 temp, intr_mask, temp_int_crb_mode; 438 dma_addr_t rq_phys_addr, rsp_phys_addr; 439 int temp_nsds_rings, index, err; 440 void *rq_addr, *rsp_addr; 441 size_t rq_size, rsp_size; 442 u64 phys_addr; 443 u16 msix_id; 444 445 /* reset host resources */ 446 tx_ring->producer = 0; 447 tx_ring->sw_consumer = 0; 448 *(tx_ring->hw_consumer) = 0; 449 450 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); 451 rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size, 452 &rq_phys_addr, GFP_KERNEL); 453 if (!rq_addr) 454 return -ENOMEM; 455 456 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); 457 rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size, 458 &rsp_phys_addr, GFP_KERNEL); 459 if (!rsp_addr) { 460 err = -ENOMEM; 461 goto out_free_rq; 462 } 463 464 prq = rq_addr; 465 prsp = rsp_addr; 466 467 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); 468 469 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN | 470 QLCNIC_CAP0_LSO); 471 if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) 472 temp |= QLCNIC_CAP0_TX_MULTI; 473 474 prq->capabilities[0] = cpu_to_le32(temp); 475 476 if (qlcnic_check_multi_tx(adapter) && 477 !adapter->ahw->diag_test) { 478 temp_nsds_rings = adapter->max_sds_rings; 479 index = temp_nsds_rings + ring; 480 msix_id = ahw->intr_tbl[index].id; 481 prq->msi_index = cpu_to_le16(msix_id); 482 } else { 483 temp_int_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED; 484 prq->host_int_crb_mode = cpu_to_le32(temp_int_crb_mode); 485 prq->msi_index = 0; 486 } 487 488 prq->interrupt_ctl = 0; 489 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr); 490 491 prq_cds = &prq->cds_ring; 492 493 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); 494 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); 495 496 phys_addr = rq_phys_addr; 497 498 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); 499 if (err) 500 goto out_free_rsp; 501 502 cmd.req.arg[1] = MSD(phys_addr); 503 cmd.req.arg[2] = LSD(phys_addr); 504 cmd.req.arg[3] = rq_size; 505 err = qlcnic_issue_cmd(adapter, &cmd); 506 507 if (err == QLCNIC_RCODE_SUCCESS) { 508 tx_ring->state = le32_to_cpu(prsp->host_ctx_state); 509 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); 510 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp; 511 tx_ring->ctx_id = le16_to_cpu(prsp->context_id); 512 if (qlcnic_check_multi_tx(adapter) && 513 !adapter->ahw->diag_test && 514 (adapter->flags & QLCNIC_MSIX_ENABLED)) { 515 index = adapter->max_sds_rings + ring; 516 intr_mask = ahw->intr_tbl[index].src; 517 tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask; 518 } 519 520 netdev_info(netdev, "Tx Context[0x%x] Created, state 0x%x\n", 521 tx_ring->ctx_id, tx_ring->state); 522 } else { 523 netdev_err(netdev, "Failed to create tx ctx in firmware%d\n", 524 err); 525 err = -EIO; 526 } 527 qlcnic_free_mbx_args(&cmd); 528 529 out_free_rsp: 530 dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr, 531 rsp_phys_addr); 532 out_free_rq: 533 dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr); 534 535 return err; 536 } 537 538 void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter, 539 struct qlcnic_host_tx_ring *tx_ring) 540 { 541 struct qlcnic_cmd_args cmd; 542 int ret; 543 544 ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX); 545 if (ret) 546 return; 547 548 cmd.req.arg[1] = tx_ring->ctx_id; 549 if (qlcnic_issue_cmd(adapter, &cmd)) 550 dev_err(&adapter->pdev->dev, 551 "Failed to destroy tx ctx in firmware\n"); 552 qlcnic_free_mbx_args(&cmd); 553 } 554 555 int 556 qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config) 557 { 558 int err; 559 struct qlcnic_cmd_args cmd; 560 561 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT); 562 if (err) 563 return err; 564 565 cmd.req.arg[1] = config; 566 err = qlcnic_issue_cmd(adapter, &cmd); 567 qlcnic_free_mbx_args(&cmd); 568 return err; 569 } 570 571 int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter) 572 { 573 void *addr; 574 int err, ring; 575 struct qlcnic_recv_context *recv_ctx; 576 struct qlcnic_host_rds_ring *rds_ring; 577 struct qlcnic_host_sds_ring *sds_ring; 578 struct qlcnic_host_tx_ring *tx_ring; 579 __le32 *ptr; 580 581 struct pci_dev *pdev = adapter->pdev; 582 583 recv_ctx = adapter->recv_ctx; 584 585 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { 586 tx_ring = &adapter->tx_ring[ring]; 587 ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32), 588 &tx_ring->hw_cons_phys_addr, 589 GFP_KERNEL); 590 if (ptr == NULL) 591 return -ENOMEM; 592 593 tx_ring->hw_consumer = ptr; 594 /* cmd desc ring */ 595 addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring), 596 &tx_ring->phys_addr, 597 GFP_KERNEL); 598 if (addr == NULL) { 599 err = -ENOMEM; 600 goto err_out_free; 601 } 602 603 tx_ring->desc_head = addr; 604 } 605 606 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 607 rds_ring = &recv_ctx->rds_rings[ring]; 608 addr = dma_alloc_coherent(&adapter->pdev->dev, 609 RCV_DESC_RINGSIZE(rds_ring), 610 &rds_ring->phys_addr, GFP_KERNEL); 611 if (addr == NULL) { 612 err = -ENOMEM; 613 goto err_out_free; 614 } 615 rds_ring->desc_head = addr; 616 617 } 618 619 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 620 sds_ring = &recv_ctx->sds_rings[ring]; 621 622 addr = dma_alloc_coherent(&adapter->pdev->dev, 623 STATUS_DESC_RINGSIZE(sds_ring), 624 &sds_ring->phys_addr, GFP_KERNEL); 625 if (addr == NULL) { 626 err = -ENOMEM; 627 goto err_out_free; 628 } 629 sds_ring->desc_head = addr; 630 } 631 632 return 0; 633 634 err_out_free: 635 qlcnic_free_hw_resources(adapter); 636 return err; 637 } 638 639 int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev) 640 { 641 int i, err, ring; 642 643 if (dev->flags & QLCNIC_NEED_FLR) { 644 pci_reset_function(dev->pdev); 645 dev->flags &= ~QLCNIC_NEED_FLR; 646 } 647 648 if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) { 649 if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) { 650 err = qlcnic_83xx_config_intrpt(dev, 1); 651 if (err) 652 return err; 653 } 654 } 655 656 if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) && 657 qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) { 658 err = qlcnic_82xx_mq_intrpt(dev, 1); 659 if (err) 660 return err; 661 } 662 663 err = qlcnic_fw_cmd_create_rx_ctx(dev); 664 if (err) 665 goto err_out; 666 667 for (ring = 0; ring < dev->max_drv_tx_rings; ring++) { 668 err = qlcnic_fw_cmd_create_tx_ctx(dev, 669 &dev->tx_ring[ring], 670 ring); 671 if (err) { 672 qlcnic_fw_cmd_del_rx_ctx(dev); 673 if (ring == 0) 674 goto err_out; 675 676 for (i = 0; i < ring; i++) 677 qlcnic_fw_cmd_del_tx_ctx(dev, &dev->tx_ring[i]); 678 679 goto err_out; 680 } 681 } 682 683 set_bit(__QLCNIC_FW_ATTACHED, &dev->state); 684 685 return 0; 686 687 err_out: 688 if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) && 689 qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) 690 qlcnic_82xx_config_intrpt(dev, 0); 691 692 if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) { 693 if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) 694 qlcnic_83xx_config_intrpt(dev, 0); 695 } 696 697 return err; 698 } 699 700 void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) 701 { 702 int ring; 703 704 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) { 705 qlcnic_fw_cmd_del_rx_ctx(adapter); 706 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) 707 qlcnic_fw_cmd_del_tx_ctx(adapter, 708 &adapter->tx_ring[ring]); 709 710 if (qlcnic_82xx_check(adapter) && 711 (adapter->flags & QLCNIC_MSIX_ENABLED) && 712 qlcnic_check_multi_tx(adapter) && 713 !adapter->ahw->diag_test) 714 qlcnic_82xx_config_intrpt(adapter, 0); 715 716 if (qlcnic_83xx_check(adapter) && 717 (adapter->flags & QLCNIC_MSIX_ENABLED)) { 718 if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) 719 qlcnic_83xx_config_intrpt(adapter, 0); 720 } 721 /* Allow dma queues to drain after context reset */ 722 mdelay(20); 723 } 724 } 725 726 void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter) 727 { 728 struct qlcnic_recv_context *recv_ctx; 729 struct qlcnic_host_rds_ring *rds_ring; 730 struct qlcnic_host_sds_ring *sds_ring; 731 struct qlcnic_host_tx_ring *tx_ring; 732 int ring; 733 734 recv_ctx = adapter->recv_ctx; 735 736 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { 737 tx_ring = &adapter->tx_ring[ring]; 738 if (tx_ring->hw_consumer != NULL) { 739 dma_free_coherent(&adapter->pdev->dev, sizeof(u32), 740 tx_ring->hw_consumer, 741 tx_ring->hw_cons_phys_addr); 742 743 tx_ring->hw_consumer = NULL; 744 } 745 746 if (tx_ring->desc_head != NULL) { 747 dma_free_coherent(&adapter->pdev->dev, 748 TX_DESC_RINGSIZE(tx_ring), 749 tx_ring->desc_head, 750 tx_ring->phys_addr); 751 tx_ring->desc_head = NULL; 752 } 753 } 754 755 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 756 rds_ring = &recv_ctx->rds_rings[ring]; 757 758 if (rds_ring->desc_head != NULL) { 759 dma_free_coherent(&adapter->pdev->dev, 760 RCV_DESC_RINGSIZE(rds_ring), 761 rds_ring->desc_head, 762 rds_ring->phys_addr); 763 rds_ring->desc_head = NULL; 764 } 765 } 766 767 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 768 sds_ring = &recv_ctx->sds_rings[ring]; 769 770 if (sds_ring->desc_head != NULL) { 771 dma_free_coherent(&adapter->pdev->dev, 772 STATUS_DESC_RINGSIZE(sds_ring), 773 sds_ring->desc_head, 774 sds_ring->phys_addr); 775 sds_ring->desc_head = NULL; 776 } 777 } 778 } 779 780 int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type) 781 { 782 struct qlcnic_hardware_context *ahw = adapter->ahw; 783 struct net_device *netdev = adapter->netdev; 784 struct qlcnic_cmd_args cmd; 785 u32 type, val; 786 int i, err = 0; 787 788 for (i = 0; i < ahw->num_msix; i++) { 789 qlcnic_alloc_mbx_args(&cmd, adapter, 790 QLCNIC_CMD_MQ_TX_CONFIG_INTR); 791 type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL; 792 val = type | (ahw->intr_tbl[i].type << 4); 793 if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX) 794 val |= (ahw->intr_tbl[i].id << 16); 795 cmd.req.arg[1] = val; 796 err = qlcnic_issue_cmd(adapter, &cmd); 797 if (err) { 798 netdev_err(netdev, "Failed to %s interrupts %d\n", 799 op_type == QLCNIC_INTRPT_ADD ? "Add" : 800 "Delete", err); 801 qlcnic_free_mbx_args(&cmd); 802 return err; 803 } 804 val = cmd.rsp.arg[1]; 805 if (LSB(val)) { 806 netdev_info(netdev, 807 "failed to configure interrupt for %d\n", 808 ahw->intr_tbl[i].id); 809 continue; 810 } 811 if (op_type) { 812 ahw->intr_tbl[i].id = MSW(val); 813 ahw->intr_tbl[i].enabled = 1; 814 ahw->intr_tbl[i].src = cmd.rsp.arg[2]; 815 } else { 816 ahw->intr_tbl[i].id = i; 817 ahw->intr_tbl[i].enabled = 0; 818 ahw->intr_tbl[i].src = 0; 819 } 820 qlcnic_free_mbx_args(&cmd); 821 } 822 823 return err; 824 } 825 826 int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac, 827 u8 function) 828 { 829 int err, i; 830 struct qlcnic_cmd_args cmd; 831 u32 mac_low, mac_high; 832 833 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); 834 if (err) 835 return err; 836 837 cmd.req.arg[1] = function | BIT_8; 838 err = qlcnic_issue_cmd(adapter, &cmd); 839 840 if (err == QLCNIC_RCODE_SUCCESS) { 841 mac_low = cmd.rsp.arg[1]; 842 mac_high = cmd.rsp.arg[2]; 843 844 for (i = 0; i < 2; i++) 845 mac[i] = (u8) (mac_high >> ((1 - i) * 8)); 846 for (i = 2; i < 6; i++) 847 mac[i] = (u8) (mac_low >> ((5 - i) * 8)); 848 } else { 849 dev_err(&adapter->pdev->dev, 850 "Failed to get mac address%d\n", err); 851 err = -EIO; 852 } 853 qlcnic_free_mbx_args(&cmd); 854 return err; 855 } 856 857 /* Get info of a NIC partition */ 858 int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter, 859 struct qlcnic_info *npar_info, u8 func_id) 860 { 861 int err; 862 dma_addr_t nic_dma_t; 863 const struct qlcnic_info_le *nic_info; 864 void *nic_info_addr; 865 struct qlcnic_cmd_args cmd; 866 size_t nic_size = sizeof(struct qlcnic_info_le); 867 868 nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, 869 &nic_dma_t, GFP_KERNEL); 870 if (!nic_info_addr) 871 return -ENOMEM; 872 873 nic_info = nic_info_addr; 874 875 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); 876 if (err) 877 goto out_free_dma; 878 879 cmd.req.arg[1] = MSD(nic_dma_t); 880 cmd.req.arg[2] = LSD(nic_dma_t); 881 cmd.req.arg[3] = (func_id << 16 | nic_size); 882 err = qlcnic_issue_cmd(adapter, &cmd); 883 if (err != QLCNIC_RCODE_SUCCESS) { 884 dev_err(&adapter->pdev->dev, 885 "Failed to get nic info%d\n", err); 886 err = -EIO; 887 } else { 888 npar_info->pci_func = le16_to_cpu(nic_info->pci_func); 889 npar_info->op_mode = le16_to_cpu(nic_info->op_mode); 890 npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw); 891 npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw); 892 npar_info->phys_port = le16_to_cpu(nic_info->phys_port); 893 npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode); 894 npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques); 895 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); 896 npar_info->capabilities = le32_to_cpu(nic_info->capabilities); 897 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); 898 } 899 900 qlcnic_free_mbx_args(&cmd); 901 out_free_dma: 902 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, 903 nic_dma_t); 904 905 return err; 906 } 907 908 /* Configure a NIC partition */ 909 int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter, 910 struct qlcnic_info *nic) 911 { 912 int err = -EIO; 913 dma_addr_t nic_dma_t; 914 void *nic_info_addr; 915 struct qlcnic_cmd_args cmd; 916 struct qlcnic_info_le *nic_info; 917 size_t nic_size = sizeof(struct qlcnic_info_le); 918 919 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 920 return err; 921 922 nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, 923 &nic_dma_t, GFP_KERNEL); 924 if (!nic_info_addr) 925 return -ENOMEM; 926 927 nic_info = nic_info_addr; 928 929 nic_info->pci_func = cpu_to_le16(nic->pci_func); 930 nic_info->op_mode = cpu_to_le16(nic->op_mode); 931 nic_info->phys_port = cpu_to_le16(nic->phys_port); 932 nic_info->switch_mode = cpu_to_le16(nic->switch_mode); 933 nic_info->capabilities = cpu_to_le32(nic->capabilities); 934 nic_info->max_mac_filters = nic->max_mac_filters; 935 nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques); 936 nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques); 937 nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw); 938 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw); 939 940 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO); 941 if (err) 942 goto out_free_dma; 943 944 cmd.req.arg[1] = MSD(nic_dma_t); 945 cmd.req.arg[2] = LSD(nic_dma_t); 946 cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size); 947 err = qlcnic_issue_cmd(adapter, &cmd); 948 949 if (err != QLCNIC_RCODE_SUCCESS) { 950 dev_err(&adapter->pdev->dev, 951 "Failed to set nic info%d\n", err); 952 err = -EIO; 953 } 954 955 qlcnic_free_mbx_args(&cmd); 956 out_free_dma: 957 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr, 958 nic_dma_t); 959 960 return err; 961 } 962 963 /* Get PCI Info of a partition */ 964 int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter, 965 struct qlcnic_pci_info *pci_info) 966 { 967 int err = 0, i; 968 struct qlcnic_cmd_args cmd; 969 dma_addr_t pci_info_dma_t; 970 struct qlcnic_pci_info_le *npar; 971 void *pci_info_addr; 972 size_t npar_size = sizeof(struct qlcnic_pci_info_le); 973 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; 974 975 pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size, 976 &pci_info_dma_t, GFP_KERNEL); 977 if (!pci_info_addr) 978 return -ENOMEM; 979 980 npar = pci_info_addr; 981 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); 982 if (err) 983 goto out_free_dma; 984 985 cmd.req.arg[1] = MSD(pci_info_dma_t); 986 cmd.req.arg[2] = LSD(pci_info_dma_t); 987 cmd.req.arg[3] = pci_size; 988 err = qlcnic_issue_cmd(adapter, &cmd); 989 990 adapter->ahw->act_pci_func = 0; 991 if (err == QLCNIC_RCODE_SUCCESS) { 992 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) { 993 pci_info->id = le16_to_cpu(npar->id); 994 pci_info->active = le16_to_cpu(npar->active); 995 pci_info->type = le16_to_cpu(npar->type); 996 if (pci_info->type == QLCNIC_TYPE_NIC) 997 adapter->ahw->act_pci_func++; 998 pci_info->default_port = 999 le16_to_cpu(npar->default_port); 1000 pci_info->tx_min_bw = 1001 le16_to_cpu(npar->tx_min_bw); 1002 pci_info->tx_max_bw = 1003 le16_to_cpu(npar->tx_max_bw); 1004 memcpy(pci_info->mac, npar->mac, ETH_ALEN); 1005 } 1006 } else { 1007 dev_err(&adapter->pdev->dev, 1008 "Failed to get PCI Info%d\n", err); 1009 err = -EIO; 1010 } 1011 1012 qlcnic_free_mbx_args(&cmd); 1013 out_free_dma: 1014 dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr, 1015 pci_info_dma_t); 1016 1017 return err; 1018 } 1019 1020 /* Configure eSwitch for port mirroring */ 1021 int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id, 1022 u8 enable_mirroring, u8 pci_func) 1023 { 1024 struct device *dev = &adapter->pdev->dev; 1025 struct qlcnic_cmd_args cmd; 1026 int err = -EIO; 1027 u32 arg1; 1028 1029 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC || 1030 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) 1031 return err; 1032 1033 arg1 = id | (enable_mirroring ? BIT_4 : 0); 1034 arg1 |= pci_func << 8; 1035 1036 err = qlcnic_alloc_mbx_args(&cmd, adapter, 1037 QLCNIC_CMD_SET_PORTMIRRORING); 1038 if (err) 1039 return err; 1040 1041 cmd.req.arg[1] = arg1; 1042 err = qlcnic_issue_cmd(adapter, &cmd); 1043 1044 if (err != QLCNIC_RCODE_SUCCESS) 1045 dev_err(dev, "Failed to configure port mirroring for vNIC function %d on eSwitch %d\n", 1046 pci_func, id); 1047 else 1048 dev_info(dev, "Configured port mirroring for vNIC function %d on eSwitch %d\n", 1049 pci_func, id); 1050 qlcnic_free_mbx_args(&cmd); 1051 1052 return err; 1053 } 1054 1055 int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, 1056 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { 1057 1058 size_t stats_size = sizeof(struct qlcnic_esw_stats_le); 1059 struct qlcnic_esw_stats_le *stats; 1060 dma_addr_t stats_dma_t; 1061 void *stats_addr; 1062 u32 arg1; 1063 struct qlcnic_cmd_args cmd; 1064 int err; 1065 1066 if (esw_stats == NULL) 1067 return -ENOMEM; 1068 1069 if ((adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) && 1070 (func != adapter->ahw->pci_func)) { 1071 dev_err(&adapter->pdev->dev, 1072 "Not privilege to query stats for func=%d", func); 1073 return -EIO; 1074 } 1075 1076 stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, 1077 &stats_dma_t, GFP_KERNEL); 1078 if (!stats_addr) 1079 return -ENOMEM; 1080 1081 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; 1082 arg1 |= rx_tx << 15 | stats_size << 16; 1083 1084 err = qlcnic_alloc_mbx_args(&cmd, adapter, 1085 QLCNIC_CMD_GET_ESWITCH_STATS); 1086 if (err) 1087 goto out_free_dma; 1088 1089 cmd.req.arg[1] = arg1; 1090 cmd.req.arg[2] = MSD(stats_dma_t); 1091 cmd.req.arg[3] = LSD(stats_dma_t); 1092 err = qlcnic_issue_cmd(adapter, &cmd); 1093 1094 if (!err) { 1095 stats = stats_addr; 1096 esw_stats->context_id = le16_to_cpu(stats->context_id); 1097 esw_stats->version = le16_to_cpu(stats->version); 1098 esw_stats->size = le16_to_cpu(stats->size); 1099 esw_stats->multicast_frames = 1100 le64_to_cpu(stats->multicast_frames); 1101 esw_stats->broadcast_frames = 1102 le64_to_cpu(stats->broadcast_frames); 1103 esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames); 1104 esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames); 1105 esw_stats->local_frames = le64_to_cpu(stats->local_frames); 1106 esw_stats->errors = le64_to_cpu(stats->errors); 1107 esw_stats->numbytes = le64_to_cpu(stats->numbytes); 1108 } 1109 1110 qlcnic_free_mbx_args(&cmd); 1111 out_free_dma: 1112 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, 1113 stats_dma_t); 1114 1115 return err; 1116 } 1117 1118 /* This routine will retrieve the MAC statistics from firmware */ 1119 int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, 1120 struct qlcnic_mac_statistics *mac_stats) 1121 { 1122 struct qlcnic_mac_statistics_le *stats; 1123 struct qlcnic_cmd_args cmd; 1124 size_t stats_size = sizeof(struct qlcnic_mac_statistics_le); 1125 dma_addr_t stats_dma_t; 1126 void *stats_addr; 1127 int err; 1128 1129 if (mac_stats == NULL) 1130 return -ENOMEM; 1131 1132 stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, 1133 &stats_dma_t, GFP_KERNEL); 1134 if (!stats_addr) 1135 return -ENOMEM; 1136 1137 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS); 1138 if (err) 1139 goto out_free_dma; 1140 1141 cmd.req.arg[1] = stats_size << 16; 1142 cmd.req.arg[2] = MSD(stats_dma_t); 1143 cmd.req.arg[3] = LSD(stats_dma_t); 1144 err = qlcnic_issue_cmd(adapter, &cmd); 1145 if (!err) { 1146 stats = stats_addr; 1147 mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames); 1148 mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes); 1149 mac_stats->mac_tx_mcast_pkts = 1150 le64_to_cpu(stats->mac_tx_mcast_pkts); 1151 mac_stats->mac_tx_bcast_pkts = 1152 le64_to_cpu(stats->mac_tx_bcast_pkts); 1153 mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames); 1154 mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes); 1155 mac_stats->mac_rx_mcast_pkts = 1156 le64_to_cpu(stats->mac_rx_mcast_pkts); 1157 mac_stats->mac_rx_length_error = 1158 le64_to_cpu(stats->mac_rx_length_error); 1159 mac_stats->mac_rx_length_small = 1160 le64_to_cpu(stats->mac_rx_length_small); 1161 mac_stats->mac_rx_length_large = 1162 le64_to_cpu(stats->mac_rx_length_large); 1163 mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber); 1164 mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped); 1165 mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error); 1166 } else { 1167 dev_err(&adapter->pdev->dev, 1168 "%s: Get mac stats failed, err=%d.\n", __func__, err); 1169 } 1170 1171 qlcnic_free_mbx_args(&cmd); 1172 1173 out_free_dma: 1174 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr, 1175 stats_dma_t); 1176 1177 return err; 1178 } 1179 1180 int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch, 1181 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { 1182 1183 struct __qlcnic_esw_statistics port_stats; 1184 u8 i; 1185 int ret = -EIO; 1186 1187 if (esw_stats == NULL) 1188 return -ENOMEM; 1189 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 1190 return -EIO; 1191 if (adapter->npars == NULL) 1192 return -EIO; 1193 1194 memset(esw_stats, 0, sizeof(u64)); 1195 esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL; 1196 esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL; 1197 esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL; 1198 esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL; 1199 esw_stats->errors = QLCNIC_STATS_NOT_AVAIL; 1200 esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL; 1201 esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL; 1202 esw_stats->context_id = eswitch; 1203 1204 for (i = 0; i < adapter->ahw->act_pci_func; i++) { 1205 if (adapter->npars[i].phy_port != eswitch) 1206 continue; 1207 1208 memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics)); 1209 if (qlcnic_get_port_stats(adapter, adapter->npars[i].pci_func, 1210 rx_tx, &port_stats)) 1211 continue; 1212 1213 esw_stats->size = port_stats.size; 1214 esw_stats->version = port_stats.version; 1215 QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames, 1216 port_stats.unicast_frames); 1217 QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames, 1218 port_stats.multicast_frames); 1219 QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames, 1220 port_stats.broadcast_frames); 1221 QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames, 1222 port_stats.dropped_frames); 1223 QLCNIC_ADD_ESW_STATS(esw_stats->errors, 1224 port_stats.errors); 1225 QLCNIC_ADD_ESW_STATS(esw_stats->local_frames, 1226 port_stats.local_frames); 1227 QLCNIC_ADD_ESW_STATS(esw_stats->numbytes, 1228 port_stats.numbytes); 1229 ret = 0; 1230 } 1231 return ret; 1232 } 1233 1234 int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw, 1235 const u8 port, const u8 rx_tx) 1236 { 1237 int err; 1238 u32 arg1; 1239 struct qlcnic_cmd_args cmd; 1240 1241 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 1242 return -EIO; 1243 1244 if (func_esw == QLCNIC_STATS_PORT) { 1245 if (port >= QLCNIC_MAX_PCI_FUNC) 1246 goto err_ret; 1247 } else if (func_esw == QLCNIC_STATS_ESWITCH) { 1248 if (port >= QLCNIC_NIU_MAX_XG_PORTS) 1249 goto err_ret; 1250 } else { 1251 goto err_ret; 1252 } 1253 1254 if (rx_tx > QLCNIC_QUERY_TX_COUNTER) 1255 goto err_ret; 1256 1257 arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12; 1258 arg1 |= BIT_14 | rx_tx << 15; 1259 1260 err = qlcnic_alloc_mbx_args(&cmd, adapter, 1261 QLCNIC_CMD_GET_ESWITCH_STATS); 1262 if (err) 1263 return err; 1264 1265 cmd.req.arg[1] = arg1; 1266 err = qlcnic_issue_cmd(adapter, &cmd); 1267 qlcnic_free_mbx_args(&cmd); 1268 return err; 1269 1270 err_ret: 1271 dev_err(&adapter->pdev->dev, 1272 "Invalid args func_esw %d port %d rx_ctx %d\n", 1273 func_esw, port, rx_tx); 1274 return -EIO; 1275 } 1276 1277 static int __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, 1278 u32 *arg1, u32 *arg2) 1279 { 1280 struct device *dev = &adapter->pdev->dev; 1281 struct qlcnic_cmd_args cmd; 1282 u8 pci_func = *arg1 >> 8; 1283 int err; 1284 1285 err = qlcnic_alloc_mbx_args(&cmd, adapter, 1286 QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG); 1287 if (err) 1288 return err; 1289 1290 cmd.req.arg[1] = *arg1; 1291 err = qlcnic_issue_cmd(adapter, &cmd); 1292 *arg1 = cmd.rsp.arg[1]; 1293 *arg2 = cmd.rsp.arg[2]; 1294 qlcnic_free_mbx_args(&cmd); 1295 1296 if (err == QLCNIC_RCODE_SUCCESS) 1297 dev_info(dev, "Get eSwitch port config for vNIC function %d\n", 1298 pci_func); 1299 else 1300 dev_err(dev, "Failed to get eswitch port config for vNIC function %d\n", 1301 pci_func); 1302 return err; 1303 } 1304 /* Configure eSwitch port 1305 op_mode = 0 for setting default port behavior 1306 op_mode = 1 for setting vlan id 1307 op_mode = 2 for deleting vlan id 1308 op_type = 0 for vlan_id 1309 op_type = 1 for port vlan_id 1310 */ 1311 int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, 1312 struct qlcnic_esw_func_cfg *esw_cfg) 1313 { 1314 struct device *dev = &adapter->pdev->dev; 1315 struct qlcnic_cmd_args cmd; 1316 int err = -EIO, index; 1317 u32 arg1, arg2 = 0; 1318 u8 pci_func; 1319 1320 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 1321 return err; 1322 pci_func = esw_cfg->pci_func; 1323 index = qlcnic_is_valid_nic_func(adapter, pci_func); 1324 if (index < 0) 1325 return err; 1326 arg1 = (adapter->npars[index].phy_port & BIT_0); 1327 arg1 |= (pci_func << 8); 1328 1329 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) 1330 return err; 1331 arg1 &= ~(0x0ff << 8); 1332 arg1 |= (pci_func << 8); 1333 arg1 &= ~(BIT_2 | BIT_3); 1334 switch (esw_cfg->op_mode) { 1335 case QLCNIC_PORT_DEFAULTS: 1336 arg1 |= (BIT_4 | BIT_6 | BIT_7); 1337 arg2 |= (BIT_0 | BIT_1); 1338 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) 1339 arg2 |= (BIT_2 | BIT_3); 1340 if (!(esw_cfg->discard_tagged)) 1341 arg1 &= ~BIT_4; 1342 if (!(esw_cfg->promisc_mode)) 1343 arg1 &= ~BIT_6; 1344 if (!(esw_cfg->mac_override)) 1345 arg1 &= ~BIT_7; 1346 if (!(esw_cfg->mac_anti_spoof)) 1347 arg2 &= ~BIT_0; 1348 if (!(esw_cfg->offload_flags & BIT_0)) 1349 arg2 &= ~(BIT_1 | BIT_2 | BIT_3); 1350 if (!(esw_cfg->offload_flags & BIT_1)) 1351 arg2 &= ~BIT_2; 1352 if (!(esw_cfg->offload_flags & BIT_2)) 1353 arg2 &= ~BIT_3; 1354 break; 1355 case QLCNIC_ADD_VLAN: 1356 arg1 |= (BIT_2 | BIT_5); 1357 arg1 |= (esw_cfg->vlan_id << 16); 1358 break; 1359 case QLCNIC_DEL_VLAN: 1360 arg1 |= (BIT_3 | BIT_5); 1361 arg1 &= ~(0x0ffff << 16); 1362 break; 1363 default: 1364 return err; 1365 } 1366 1367 err = qlcnic_alloc_mbx_args(&cmd, adapter, 1368 QLCNIC_CMD_CONFIGURE_ESWITCH); 1369 if (err) 1370 return err; 1371 1372 cmd.req.arg[1] = arg1; 1373 cmd.req.arg[2] = arg2; 1374 err = qlcnic_issue_cmd(adapter, &cmd); 1375 qlcnic_free_mbx_args(&cmd); 1376 1377 if (err != QLCNIC_RCODE_SUCCESS) 1378 dev_err(dev, "Failed to configure eswitch for vNIC function %d\n", 1379 pci_func); 1380 else 1381 dev_info(dev, "Configured eSwitch for vNIC function %d\n", 1382 pci_func); 1383 1384 return err; 1385 } 1386 1387 int 1388 qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter, 1389 struct qlcnic_esw_func_cfg *esw_cfg) 1390 { 1391 u32 arg1, arg2; 1392 int index; 1393 u8 phy_port; 1394 1395 if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) { 1396 index = qlcnic_is_valid_nic_func(adapter, esw_cfg->pci_func); 1397 if (index < 0) 1398 return -EIO; 1399 phy_port = adapter->npars[index].phy_port; 1400 } else { 1401 phy_port = adapter->ahw->physical_port; 1402 } 1403 arg1 = phy_port; 1404 arg1 |= (esw_cfg->pci_func << 8); 1405 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) 1406 return -EIO; 1407 1408 esw_cfg->discard_tagged = !!(arg1 & BIT_4); 1409 esw_cfg->host_vlan_tag = !!(arg1 & BIT_5); 1410 esw_cfg->promisc_mode = !!(arg1 & BIT_6); 1411 esw_cfg->mac_override = !!(arg1 & BIT_7); 1412 esw_cfg->vlan_id = LSW(arg1 >> 16); 1413 esw_cfg->mac_anti_spoof = (arg2 & 0x1); 1414 esw_cfg->offload_flags = ((arg2 >> 1) & 0x7); 1415 1416 return 0; 1417 } 1418