1 /* 2 * QLogic qlcnic NIC Driver 3 * Copyright (c) 2009-2013 QLogic Corporation 4 * 5 * See LICENSE.qlcnic for copyright and licensing details. 6 */ 7 8 #include "qlcnic_sriov.h" 9 #include "qlcnic.h" 10 #include "qlcnic_83xx_hw.h" 11 #include <linux/types.h> 12 13 #define QLC_BC_COMMAND 0 14 #define QLC_BC_RESPONSE 1 15 16 #define QLC_MBOX_RESP_TIMEOUT (10 * HZ) 17 #define QLC_MBOX_CH_FREE_TIMEOUT (10 * HZ) 18 19 #define QLC_BC_MSG 0 20 #define QLC_BC_CFREE 1 21 #define QLC_BC_FLR 2 22 #define QLC_BC_HDR_SZ 16 23 #define QLC_BC_PAYLOAD_SZ (1024 - QLC_BC_HDR_SZ) 24 25 #define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF 2048 26 #define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF 512 27 28 #define QLC_83XX_VF_RESET_FAIL_THRESH 8 29 #define QLC_BC_CMD_MAX_RETRY_CNT 5 30 31 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); 32 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); 33 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); 34 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *); 35 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *); 36 static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *, 37 struct qlcnic_cmd_args *); 38 static void qlcnic_sriov_process_bc_cmd(struct work_struct *); 39 40 static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = { 41 .read_crb = qlcnic_83xx_read_crb, 42 .write_crb = qlcnic_83xx_write_crb, 43 .read_reg = qlcnic_83xx_rd_reg_indirect, 44 .write_reg = qlcnic_83xx_wrt_reg_indirect, 45 .get_mac_address = qlcnic_83xx_get_mac_address, 46 .setup_intr = qlcnic_83xx_setup_intr, 47 .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args, 48 .mbx_cmd = qlcnic_sriov_vf_mbx_op, 49 .get_func_no = qlcnic_83xx_get_func_no, 50 .api_lock = qlcnic_83xx_cam_lock, 51 .api_unlock = qlcnic_83xx_cam_unlock, 52 .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag, 53 .create_rx_ctx = qlcnic_83xx_create_rx_ctx, 54 .create_tx_ctx = qlcnic_83xx_create_tx_ctx, 55 .del_rx_ctx = qlcnic_83xx_del_rx_ctx, 56 .del_tx_ctx = qlcnic_83xx_del_tx_ctx, 57 .setup_link_event = qlcnic_83xx_setup_link_event, 58 .get_nic_info = qlcnic_83xx_get_nic_info, 59 .get_pci_info = qlcnic_83xx_get_pci_info, 60 .set_nic_info = qlcnic_83xx_set_nic_info, 61 .change_macvlan = qlcnic_83xx_sre_macaddr_change, 62 .napi_enable = qlcnic_83xx_napi_enable, 63 .napi_disable = qlcnic_83xx_napi_disable, 64 .config_intr_coal = qlcnic_83xx_config_intr_coal, 65 .config_rss = qlcnic_83xx_config_rss, 66 .config_hw_lro = qlcnic_83xx_config_hw_lro, 67 .config_promisc_mode = qlcnic_83xx_nic_set_promisc, 68 .change_l2_filter = qlcnic_83xx_change_l2_filter, 69 .get_board_info = qlcnic_83xx_get_port_info, 70 .free_mac_list = qlcnic_sriov_vf_free_mac_list, 71 }; 72 73 static struct qlcnic_nic_template qlcnic_sriov_vf_ops = { 74 .config_bridged_mode = qlcnic_config_bridged_mode, 75 .config_led = qlcnic_config_led, 76 .cancel_idc_work = qlcnic_sriov_vf_cancel_fw_work, 77 .napi_add = qlcnic_83xx_napi_add, 78 .napi_del = qlcnic_83xx_napi_del, 79 .shutdown = qlcnic_sriov_vf_shutdown, 80 .resume = qlcnic_sriov_vf_resume, 81 .config_ipaddr = qlcnic_83xx_config_ipaddr, 82 .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr, 83 }; 84 85 static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = { 86 {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2}, 87 {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2}, 88 {QLCNIC_BC_CMD_GET_ACL, 3, 14}, 89 {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2}, 90 }; 91 92 static inline bool qlcnic_sriov_bc_msg_check(u32 val) 93 { 94 return (val & (1 << QLC_BC_MSG)) ? true : false; 95 } 96 97 static inline bool qlcnic_sriov_channel_free_check(u32 val) 98 { 99 return (val & (1 << QLC_BC_CFREE)) ? true : false; 100 } 101 102 static inline bool qlcnic_sriov_flr_check(u32 val) 103 { 104 return (val & (1 << QLC_BC_FLR)) ? true : false; 105 } 106 107 static inline u8 qlcnic_sriov_target_func_id(u32 val) 108 { 109 return (val >> 4) & 0xff; 110 } 111 112 static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id) 113 { 114 struct pci_dev *dev = adapter->pdev; 115 int pos; 116 u16 stride, offset; 117 118 if (qlcnic_sriov_vf_check(adapter)) 119 return 0; 120 121 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 122 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset); 123 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride); 124 125 return (dev->devfn + offset + stride * vf_id) & 0xff; 126 } 127 128 int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) 129 { 130 struct qlcnic_sriov *sriov; 131 struct qlcnic_back_channel *bc; 132 struct workqueue_struct *wq; 133 struct qlcnic_vport *vp; 134 struct qlcnic_vf_info *vf; 135 int err, i; 136 137 if (!qlcnic_sriov_enable_check(adapter)) 138 return -EIO; 139 140 sriov = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL); 141 if (!sriov) 142 return -ENOMEM; 143 144 adapter->ahw->sriov = sriov; 145 sriov->num_vfs = num_vfs; 146 bc = &sriov->bc; 147 sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) * 148 num_vfs, GFP_KERNEL); 149 if (!sriov->vf_info) { 150 err = -ENOMEM; 151 goto qlcnic_free_sriov; 152 } 153 154 wq = create_singlethread_workqueue("bc-trans"); 155 if (wq == NULL) { 156 err = -ENOMEM; 157 dev_err(&adapter->pdev->dev, 158 "Cannot create bc-trans workqueue\n"); 159 goto qlcnic_free_vf_info; 160 } 161 162 bc->bc_trans_wq = wq; 163 164 wq = create_singlethread_workqueue("async"); 165 if (wq == NULL) { 166 err = -ENOMEM; 167 dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n"); 168 goto qlcnic_destroy_trans_wq; 169 } 170 171 bc->bc_async_wq = wq; 172 INIT_LIST_HEAD(&bc->async_list); 173 174 for (i = 0; i < num_vfs; i++) { 175 vf = &sriov->vf_info[i]; 176 vf->adapter = adapter; 177 vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i); 178 mutex_init(&vf->send_cmd_lock); 179 INIT_LIST_HEAD(&vf->rcv_act.wait_list); 180 INIT_LIST_HEAD(&vf->rcv_pend.wait_list); 181 spin_lock_init(&vf->rcv_act.lock); 182 spin_lock_init(&vf->rcv_pend.lock); 183 init_completion(&vf->ch_free_cmpl); 184 185 INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd); 186 187 if (qlcnic_sriov_pf_check(adapter)) { 188 vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL); 189 if (!vp) { 190 err = -ENOMEM; 191 goto qlcnic_destroy_async_wq; 192 } 193 sriov->vf_info[i].vp = vp; 194 vp->max_tx_bw = MAX_BW; 195 vp->spoofchk = true; 196 random_ether_addr(vp->mac); 197 dev_info(&adapter->pdev->dev, 198 "MAC Address %pM is configured for VF %d\n", 199 vp->mac, i); 200 } 201 } 202 203 return 0; 204 205 qlcnic_destroy_async_wq: 206 destroy_workqueue(bc->bc_async_wq); 207 208 qlcnic_destroy_trans_wq: 209 destroy_workqueue(bc->bc_trans_wq); 210 211 qlcnic_free_vf_info: 212 kfree(sriov->vf_info); 213 214 qlcnic_free_sriov: 215 kfree(adapter->ahw->sriov); 216 return err; 217 } 218 219 void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list) 220 { 221 struct qlcnic_bc_trans *trans; 222 struct qlcnic_cmd_args cmd; 223 unsigned long flags; 224 225 spin_lock_irqsave(&t_list->lock, flags); 226 227 while (!list_empty(&t_list->wait_list)) { 228 trans = list_first_entry(&t_list->wait_list, 229 struct qlcnic_bc_trans, list); 230 list_del(&trans->list); 231 t_list->count--; 232 cmd.req.arg = (u32 *)trans->req_pay; 233 cmd.rsp.arg = (u32 *)trans->rsp_pay; 234 qlcnic_free_mbx_args(&cmd); 235 qlcnic_sriov_cleanup_transaction(trans); 236 } 237 238 spin_unlock_irqrestore(&t_list->lock, flags); 239 } 240 241 void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) 242 { 243 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 244 struct qlcnic_back_channel *bc = &sriov->bc; 245 struct qlcnic_vf_info *vf; 246 int i; 247 248 if (!qlcnic_sriov_enable_check(adapter)) 249 return; 250 251 qlcnic_sriov_cleanup_async_list(bc); 252 destroy_workqueue(bc->bc_async_wq); 253 254 for (i = 0; i < sriov->num_vfs; i++) { 255 vf = &sriov->vf_info[i]; 256 qlcnic_sriov_cleanup_list(&vf->rcv_pend); 257 cancel_work_sync(&vf->trans_work); 258 qlcnic_sriov_cleanup_list(&vf->rcv_act); 259 } 260 261 destroy_workqueue(bc->bc_trans_wq); 262 263 for (i = 0; i < sriov->num_vfs; i++) 264 kfree(sriov->vf_info[i].vp); 265 266 kfree(sriov->vf_info); 267 kfree(adapter->ahw->sriov); 268 } 269 270 static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter) 271 { 272 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); 273 qlcnic_sriov_cfg_bc_intr(adapter, 0); 274 __qlcnic_sriov_cleanup(adapter); 275 } 276 277 void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) 278 { 279 if (qlcnic_sriov_pf_check(adapter)) 280 qlcnic_sriov_pf_cleanup(adapter); 281 282 if (qlcnic_sriov_vf_check(adapter)) 283 qlcnic_sriov_vf_cleanup(adapter); 284 } 285 286 static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, 287 u32 *pay, u8 pci_func, u8 size) 288 { 289 u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0; 290 struct qlcnic_hardware_context *ahw = adapter->ahw; 291 unsigned long flags; 292 u16 opcode; 293 u8 mbx_err_code; 294 int i, j; 295 296 opcode = ((struct qlcnic_bc_hdr *)hdr)->cmd_op; 297 298 if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { 299 dev_info(&adapter->pdev->dev, 300 "Mailbox cmd attempted, 0x%x\n", opcode); 301 dev_info(&adapter->pdev->dev, "Mailbox detached\n"); 302 return 0; 303 } 304 305 spin_lock_irqsave(&ahw->mbx_lock, flags); 306 307 mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); 308 if (mbx_val) { 309 QLCDB(adapter, DRV, "Mailbox cmd attempted, 0x%x\n", opcode); 310 spin_unlock_irqrestore(&ahw->mbx_lock, flags); 311 return QLCNIC_RCODE_TIMEOUT; 312 } 313 /* Fill in mailbox registers */ 314 val = size + (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); 315 mbx_cmd = 0x31 | (val << 16) | (adapter->ahw->fw_hal_version << 29); 316 317 writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0)); 318 mbx_cmd = 0x1 | (1 << 4); 319 320 if (qlcnic_sriov_pf_check(adapter)) 321 mbx_cmd |= (pci_func << 5); 322 323 writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1)); 324 for (i = 2, j = 0; j < (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); 325 i++, j++) { 326 writel(*(hdr++), QLCNIC_MBX_HOST(ahw, i)); 327 } 328 for (j = 0; j < size; j++, i++) 329 writel(*(pay++), QLCNIC_MBX_HOST(ahw, i)); 330 331 /* Signal FW about the impending command */ 332 QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); 333 334 /* Waiting for the mailbox cmd to complete and while waiting here 335 * some AEN might arrive. If more than 5 seconds expire we can 336 * assume something is wrong. 337 */ 338 poll: 339 rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time); 340 if (rsp != QLCNIC_RCODE_TIMEOUT) { 341 /* Get the FW response data */ 342 fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); 343 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { 344 __qlcnic_83xx_process_aen(adapter); 345 goto poll; 346 } 347 mbx_err_code = QLCNIC_MBX_STATUS(fw_data); 348 rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); 349 opcode = QLCNIC_MBX_RSP(fw_data); 350 351 switch (mbx_err_code) { 352 case QLCNIC_MBX_RSP_OK: 353 case QLCNIC_MBX_PORT_RSP_OK: 354 rsp = QLCNIC_RCODE_SUCCESS; 355 break; 356 default: 357 if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) { 358 rsp = qlcnic_83xx_mac_rcode(adapter); 359 if (!rsp) 360 goto out; 361 } 362 dev_err(&adapter->pdev->dev, 363 "MBX command 0x%x failed with err:0x%x\n", 364 opcode, mbx_err_code); 365 rsp = mbx_err_code; 366 break; 367 } 368 goto out; 369 } 370 371 dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n", 372 QLCNIC_MBX_RSP(mbx_cmd)); 373 rsp = QLCNIC_RCODE_TIMEOUT; 374 out: 375 /* clear fw mbx control register */ 376 QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER); 377 spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags); 378 return rsp; 379 } 380 381 static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter) 382 { 383 adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF; 384 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G; 385 adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF; 386 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; 387 adapter->num_txd = MAX_CMD_DESCRIPTORS; 388 adapter->max_rds_rings = MAX_RDS_RINGS; 389 } 390 391 int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter, 392 struct qlcnic_info *npar_info, u16 vport_id) 393 { 394 struct device *dev = &adapter->pdev->dev; 395 struct qlcnic_cmd_args cmd; 396 int err; 397 u32 status; 398 399 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); 400 if (err) 401 return err; 402 403 cmd.req.arg[1] = vport_id << 16 | 0x1; 404 err = qlcnic_issue_cmd(adapter, &cmd); 405 if (err) { 406 dev_err(&adapter->pdev->dev, 407 "Failed to get vport info, err=%d\n", err); 408 qlcnic_free_mbx_args(&cmd); 409 return err; 410 } 411 412 status = cmd.rsp.arg[2] & 0xffff; 413 if (status & BIT_0) 414 npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]); 415 if (status & BIT_1) 416 npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]); 417 if (status & BIT_2) 418 npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]); 419 if (status & BIT_3) 420 npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]); 421 if (status & BIT_4) 422 npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]); 423 if (status & BIT_5) 424 npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]); 425 if (status & BIT_6) 426 npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]); 427 if (status & BIT_7) 428 npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]); 429 if (status & BIT_8) 430 npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]); 431 if (status & BIT_9) 432 npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]); 433 434 npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]); 435 npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]); 436 npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]); 437 npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]); 438 439 dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n" 440 "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n" 441 "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n" 442 "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n" 443 "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n" 444 "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n", 445 npar_info->min_tx_bw, npar_info->max_tx_bw, 446 npar_info->max_tx_ques, npar_info->max_tx_mac_filters, 447 npar_info->max_rx_mcast_mac_filters, 448 npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr, 449 npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings, 450 npar_info->max_rx_buf_rings, npar_info->max_rx_ques, 451 npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs, 452 npar_info->max_remote_ipv6_addrs); 453 454 qlcnic_free_mbx_args(&cmd); 455 return err; 456 } 457 458 static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter, 459 struct qlcnic_cmd_args *cmd) 460 { 461 adapter->rx_pvid = (cmd->rsp.arg[1] >> 16) & 0xffff; 462 adapter->flags &= ~QLCNIC_TAGGING_ENABLED; 463 return 0; 464 } 465 466 static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter, 467 struct qlcnic_cmd_args *cmd) 468 { 469 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 470 int i, num_vlans; 471 u16 *vlans; 472 473 if (sriov->allowed_vlans) 474 return 0; 475 476 sriov->any_vlan = cmd->rsp.arg[2] & 0xf; 477 if (!sriov->any_vlan) 478 return 0; 479 480 sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16; 481 num_vlans = sriov->num_allowed_vlans; 482 sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL); 483 if (!sriov->allowed_vlans) 484 return -ENOMEM; 485 486 vlans = (u16 *)&cmd->rsp.arg[3]; 487 for (i = 0; i < num_vlans; i++) 488 sriov->allowed_vlans[i] = vlans[i]; 489 490 return 0; 491 } 492 493 static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter) 494 { 495 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 496 struct qlcnic_cmd_args cmd; 497 int ret; 498 499 ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL); 500 if (ret) 501 return ret; 502 503 ret = qlcnic_issue_cmd(adapter, &cmd); 504 if (ret) { 505 dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n", 506 ret); 507 } else { 508 sriov->vlan_mode = cmd.rsp.arg[1] & 0x3; 509 switch (sriov->vlan_mode) { 510 case QLC_GUEST_VLAN_MODE: 511 ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd); 512 break; 513 case QLC_PVID_MODE: 514 ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd); 515 break; 516 } 517 } 518 519 qlcnic_free_mbx_args(&cmd); 520 return ret; 521 } 522 523 static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter) 524 { 525 struct qlcnic_info nic_info; 526 struct qlcnic_hardware_context *ahw = adapter->ahw; 527 int err; 528 529 err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0); 530 if (err) 531 return err; 532 533 err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func); 534 if (err) 535 return -EIO; 536 537 err = qlcnic_sriov_get_vf_acl(adapter); 538 if (err) 539 return err; 540 541 if (qlcnic_83xx_get_port_info(adapter)) 542 return -EIO; 543 544 qlcnic_sriov_vf_cfg_buff_desc(adapter); 545 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; 546 dev_info(&adapter->pdev->dev, "HAL Version: %d\n", 547 adapter->ahw->fw_hal_version); 548 549 ahw->physical_port = (u8) nic_info.phys_port; 550 ahw->switch_mode = nic_info.switch_mode; 551 ahw->max_mtu = nic_info.max_mtu; 552 ahw->op_mode = nic_info.op_mode; 553 ahw->capabilities = nic_info.capabilities; 554 return 0; 555 } 556 557 static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter, 558 int pci_using_dac) 559 { 560 int err; 561 562 INIT_LIST_HEAD(&adapter->vf_mc_list); 563 if (!qlcnic_use_msi_x && !!qlcnic_use_msi) 564 dev_warn(&adapter->pdev->dev, 565 "83xx adapter do not support MSI interrupts\n"); 566 567 err = qlcnic_setup_intr(adapter, 1); 568 if (err) { 569 dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n"); 570 goto err_out_disable_msi; 571 } 572 573 err = qlcnic_83xx_setup_mbx_intr(adapter); 574 if (err) 575 goto err_out_disable_msi; 576 577 err = qlcnic_sriov_init(adapter, 1); 578 if (err) 579 goto err_out_disable_mbx_intr; 580 581 err = qlcnic_sriov_cfg_bc_intr(adapter, 1); 582 if (err) 583 goto err_out_cleanup_sriov; 584 585 err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); 586 if (err) 587 goto err_out_disable_bc_intr; 588 589 err = qlcnic_sriov_vf_init_driver(adapter); 590 if (err) 591 goto err_out_send_channel_term; 592 593 err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac); 594 if (err) 595 goto err_out_send_channel_term; 596 597 pci_set_drvdata(adapter->pdev, adapter); 598 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", 599 adapter->netdev->name); 600 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, 601 adapter->ahw->idc.delay); 602 return 0; 603 604 err_out_send_channel_term: 605 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); 606 607 err_out_disable_bc_intr: 608 qlcnic_sriov_cfg_bc_intr(adapter, 0); 609 610 err_out_cleanup_sriov: 611 __qlcnic_sriov_cleanup(adapter); 612 613 err_out_disable_mbx_intr: 614 qlcnic_83xx_free_mbx_intr(adapter); 615 616 err_out_disable_msi: 617 qlcnic_teardown_intr(adapter); 618 return err; 619 } 620 621 static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter) 622 { 623 u32 state; 624 625 do { 626 msleep(20); 627 if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT) 628 return -EIO; 629 state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); 630 } while (state != QLC_83XX_IDC_DEV_READY); 631 632 return 0; 633 } 634 635 int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac) 636 { 637 struct qlcnic_hardware_context *ahw = adapter->ahw; 638 int err; 639 640 spin_lock_init(&ahw->mbx_lock); 641 set_bit(QLC_83XX_MBX_READY, &ahw->idc.status); 642 set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status); 643 ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; 644 ahw->reset_context = 0; 645 adapter->fw_fail_cnt = 0; 646 ahw->msix_supported = 1; 647 adapter->need_fw_reset = 0; 648 adapter->flags |= QLCNIC_TX_INTR_SHARED; 649 650 err = qlcnic_sriov_check_dev_ready(adapter); 651 if (err) 652 return err; 653 654 err = qlcnic_sriov_setup_vf(adapter, pci_using_dac); 655 if (err) 656 return err; 657 658 if (qlcnic_read_mac_addr(adapter)) 659 dev_warn(&adapter->pdev->dev, "failed to read mac addr\n"); 660 661 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work); 662 663 clear_bit(__QLCNIC_RESETTING, &adapter->state); 664 return 0; 665 } 666 667 void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter) 668 { 669 struct qlcnic_hardware_context *ahw = adapter->ahw; 670 671 ahw->op_mode = QLCNIC_SRIOV_VF_FUNC; 672 dev_info(&adapter->pdev->dev, 673 "HAL Version: %d Non Privileged SRIOV function\n", 674 ahw->fw_hal_version); 675 adapter->nic_ops = &qlcnic_sriov_vf_ops; 676 set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state); 677 return; 678 } 679 680 void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw) 681 { 682 ahw->hw_ops = &qlcnic_sriov_vf_hw_ops; 683 ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl; 684 ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl; 685 } 686 687 static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag) 688 { 689 u32 pay_size; 690 691 pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ); 692 693 if (pay_size) 694 pay_size = QLC_BC_PAYLOAD_SZ; 695 else 696 pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ; 697 698 return pay_size; 699 } 700 701 int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func) 702 { 703 struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info; 704 u8 i; 705 706 if (qlcnic_sriov_vf_check(adapter)) 707 return 0; 708 709 for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) { 710 if (vf_info[i].pci_func == pci_func) 711 return i; 712 } 713 714 return -EINVAL; 715 } 716 717 static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans) 718 { 719 *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC); 720 if (!*trans) 721 return -ENOMEM; 722 723 init_completion(&(*trans)->resp_cmpl); 724 return 0; 725 } 726 727 static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr, 728 u32 size) 729 { 730 *hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC); 731 if (!*hdr) 732 return -ENOMEM; 733 734 return 0; 735 } 736 737 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type) 738 { 739 const struct qlcnic_mailbox_metadata *mbx_tbl; 740 int i, size; 741 742 mbx_tbl = qlcnic_sriov_bc_mbx_tbl; 743 size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl); 744 745 for (i = 0; i < size; i++) { 746 if (type == mbx_tbl[i].cmd) { 747 mbx->op_type = QLC_BC_CMD; 748 mbx->req.num = mbx_tbl[i].in_args; 749 mbx->rsp.num = mbx_tbl[i].out_args; 750 mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32), 751 GFP_ATOMIC); 752 if (!mbx->req.arg) 753 return -ENOMEM; 754 mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32), 755 GFP_ATOMIC); 756 if (!mbx->rsp.arg) { 757 kfree(mbx->req.arg); 758 mbx->req.arg = NULL; 759 return -ENOMEM; 760 } 761 memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num); 762 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); 763 mbx->req.arg[0] = (type | (mbx->req.num << 16) | 764 (3 << 29)); 765 return 0; 766 } 767 } 768 return -EINVAL; 769 } 770 771 static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans, 772 struct qlcnic_cmd_args *cmd, 773 u16 seq, u8 msg_type) 774 { 775 struct qlcnic_bc_hdr *hdr; 776 int i; 777 u32 num_regs, bc_pay_sz; 778 u16 remainder; 779 u8 cmd_op, num_frags, t_num_frags; 780 781 bc_pay_sz = QLC_BC_PAYLOAD_SZ; 782 if (msg_type == QLC_BC_COMMAND) { 783 trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg; 784 trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg; 785 num_regs = cmd->req.num; 786 trans->req_pay_size = (num_regs * 4); 787 num_regs = cmd->rsp.num; 788 trans->rsp_pay_size = (num_regs * 4); 789 cmd_op = cmd->req.arg[0] & 0xff; 790 remainder = (trans->req_pay_size) % (bc_pay_sz); 791 num_frags = (trans->req_pay_size) / (bc_pay_sz); 792 if (remainder) 793 num_frags++; 794 t_num_frags = num_frags; 795 if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags)) 796 return -ENOMEM; 797 remainder = (trans->rsp_pay_size) % (bc_pay_sz); 798 num_frags = (trans->rsp_pay_size) / (bc_pay_sz); 799 if (remainder) 800 num_frags++; 801 if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags)) 802 return -ENOMEM; 803 num_frags = t_num_frags; 804 hdr = trans->req_hdr; 805 } else { 806 cmd->req.arg = (u32 *)trans->req_pay; 807 cmd->rsp.arg = (u32 *)trans->rsp_pay; 808 cmd_op = cmd->req.arg[0] & 0xff; 809 remainder = (trans->rsp_pay_size) % (bc_pay_sz); 810 num_frags = (trans->rsp_pay_size) / (bc_pay_sz); 811 if (remainder) 812 num_frags++; 813 cmd->req.num = trans->req_pay_size / 4; 814 cmd->rsp.num = trans->rsp_pay_size / 4; 815 hdr = trans->rsp_hdr; 816 } 817 818 trans->trans_id = seq; 819 trans->cmd_id = cmd_op; 820 for (i = 0; i < num_frags; i++) { 821 hdr[i].version = 2; 822 hdr[i].msg_type = msg_type; 823 hdr[i].op_type = cmd->op_type; 824 hdr[i].num_cmds = 1; 825 hdr[i].num_frags = num_frags; 826 hdr[i].frag_num = i + 1; 827 hdr[i].cmd_op = cmd_op; 828 hdr[i].seq_id = seq; 829 } 830 return 0; 831 } 832 833 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans) 834 { 835 if (!trans) 836 return; 837 kfree(trans->req_hdr); 838 kfree(trans->rsp_hdr); 839 kfree(trans); 840 } 841 842 static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf, 843 struct qlcnic_bc_trans *trans, u8 type) 844 { 845 struct qlcnic_trans_list *t_list; 846 unsigned long flags; 847 int ret = 0; 848 849 if (type == QLC_BC_RESPONSE) { 850 t_list = &vf->rcv_act; 851 spin_lock_irqsave(&t_list->lock, flags); 852 t_list->count--; 853 list_del(&trans->list); 854 if (t_list->count > 0) 855 ret = 1; 856 spin_unlock_irqrestore(&t_list->lock, flags); 857 } 858 if (type == QLC_BC_COMMAND) { 859 while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state)) 860 msleep(100); 861 vf->send_cmd = NULL; 862 clear_bit(QLC_BC_VF_SEND, &vf->state); 863 } 864 return ret; 865 } 866 867 static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov, 868 struct qlcnic_vf_info *vf, 869 work_func_t func) 870 { 871 if (test_bit(QLC_BC_VF_FLR, &vf->state) || 872 vf->adapter->need_fw_reset) 873 return; 874 875 queue_work(sriov->bc.bc_trans_wq, &vf->trans_work); 876 } 877 878 static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans) 879 { 880 struct completion *cmpl = &trans->resp_cmpl; 881 882 if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT)) 883 trans->trans_state = QLC_END; 884 else 885 trans->trans_state = QLC_ABORT; 886 887 return; 888 } 889 890 static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans, 891 u8 type) 892 { 893 if (type == QLC_BC_RESPONSE) { 894 trans->curr_rsp_frag++; 895 if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags) 896 trans->trans_state = QLC_INIT; 897 else 898 trans->trans_state = QLC_END; 899 } else { 900 trans->curr_req_frag++; 901 if (trans->curr_req_frag < trans->req_hdr->num_frags) 902 trans->trans_state = QLC_INIT; 903 else 904 trans->trans_state = QLC_WAIT_FOR_RESP; 905 } 906 } 907 908 static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans, 909 u8 type) 910 { 911 struct qlcnic_vf_info *vf = trans->vf; 912 struct completion *cmpl = &vf->ch_free_cmpl; 913 914 if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) { 915 trans->trans_state = QLC_ABORT; 916 return; 917 } 918 919 clear_bit(QLC_BC_VF_CHANNEL, &vf->state); 920 qlcnic_sriov_handle_multi_frags(trans, type); 921 } 922 923 static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter, 924 u32 *hdr, u32 *pay, u32 size) 925 { 926 struct qlcnic_hardware_context *ahw = adapter->ahw; 927 u32 fw_mbx; 928 u8 i, max = 2, hdr_size, j; 929 930 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); 931 max = (size / sizeof(u32)) + hdr_size; 932 933 fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0)); 934 for (i = 2, j = 0; j < hdr_size; i++, j++) 935 *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i)); 936 for (; j < max; i++, j++) 937 *(pay++) = readl(QLCNIC_MBX_FW(ahw, i)); 938 } 939 940 static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf) 941 { 942 int ret = -EBUSY; 943 u32 timeout = 10000; 944 945 do { 946 if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) { 947 ret = 0; 948 break; 949 } 950 mdelay(1); 951 } while (--timeout); 952 953 return ret; 954 } 955 956 static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type) 957 { 958 struct qlcnic_vf_info *vf = trans->vf; 959 u32 pay_size, hdr_size; 960 u32 *hdr, *pay; 961 int ret; 962 u8 pci_func = trans->func_id; 963 964 if (__qlcnic_sriov_issue_bc_post(vf)) 965 return -EBUSY; 966 967 if (type == QLC_BC_COMMAND) { 968 hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag); 969 pay = (u32 *)(trans->req_pay + trans->curr_req_frag); 970 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); 971 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, 972 trans->curr_req_frag); 973 pay_size = (pay_size / sizeof(u32)); 974 } else { 975 hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag); 976 pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag); 977 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); 978 pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size, 979 trans->curr_rsp_frag); 980 pay_size = (pay_size / sizeof(u32)); 981 } 982 983 ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay, 984 pci_func, pay_size); 985 return ret; 986 } 987 988 static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans, 989 struct qlcnic_vf_info *vf, u8 type) 990 { 991 bool flag = true; 992 int err = -EIO; 993 994 while (flag) { 995 if (test_bit(QLC_BC_VF_FLR, &vf->state) || 996 vf->adapter->need_fw_reset) 997 trans->trans_state = QLC_ABORT; 998 999 switch (trans->trans_state) { 1000 case QLC_INIT: 1001 trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE; 1002 if (qlcnic_sriov_issue_bc_post(trans, type)) 1003 trans->trans_state = QLC_ABORT; 1004 break; 1005 case QLC_WAIT_FOR_CHANNEL_FREE: 1006 qlcnic_sriov_wait_for_channel_free(trans, type); 1007 break; 1008 case QLC_WAIT_FOR_RESP: 1009 qlcnic_sriov_wait_for_resp(trans); 1010 break; 1011 case QLC_END: 1012 err = 0; 1013 flag = false; 1014 break; 1015 case QLC_ABORT: 1016 err = -EIO; 1017 flag = false; 1018 clear_bit(QLC_BC_VF_CHANNEL, &vf->state); 1019 break; 1020 default: 1021 err = -EIO; 1022 flag = false; 1023 } 1024 } 1025 return err; 1026 } 1027 1028 static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter, 1029 struct qlcnic_bc_trans *trans, int pci_func) 1030 { 1031 struct qlcnic_vf_info *vf; 1032 int err, index = qlcnic_sriov_func_to_index(adapter, pci_func); 1033 1034 if (index < 0) 1035 return -EIO; 1036 1037 vf = &adapter->ahw->sriov->vf_info[index]; 1038 trans->vf = vf; 1039 trans->func_id = pci_func; 1040 1041 if (!test_bit(QLC_BC_VF_STATE, &vf->state)) { 1042 if (qlcnic_sriov_pf_check(adapter)) 1043 return -EIO; 1044 if (qlcnic_sriov_vf_check(adapter) && 1045 trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT) 1046 return -EIO; 1047 } 1048 1049 mutex_lock(&vf->send_cmd_lock); 1050 vf->send_cmd = trans; 1051 err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND); 1052 qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND); 1053 mutex_unlock(&vf->send_cmd_lock); 1054 return err; 1055 } 1056 1057 static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter, 1058 struct qlcnic_bc_trans *trans, 1059 struct qlcnic_cmd_args *cmd) 1060 { 1061 #ifdef CONFIG_QLCNIC_SRIOV 1062 if (qlcnic_sriov_pf_check(adapter)) { 1063 qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd); 1064 return; 1065 } 1066 #endif 1067 cmd->rsp.arg[0] |= (0x9 << 25); 1068 return; 1069 } 1070 1071 static void qlcnic_sriov_process_bc_cmd(struct work_struct *work) 1072 { 1073 struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info, 1074 trans_work); 1075 struct qlcnic_bc_trans *trans = NULL; 1076 struct qlcnic_adapter *adapter = vf->adapter; 1077 struct qlcnic_cmd_args cmd; 1078 u8 req; 1079 1080 if (adapter->need_fw_reset) 1081 return; 1082 1083 if (test_bit(QLC_BC_VF_FLR, &vf->state)) 1084 return; 1085 1086 trans = list_first_entry(&vf->rcv_act.wait_list, 1087 struct qlcnic_bc_trans, list); 1088 adapter = vf->adapter; 1089 1090 if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id, 1091 QLC_BC_RESPONSE)) 1092 goto cleanup_trans; 1093 1094 __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd); 1095 trans->trans_state = QLC_INIT; 1096 __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE); 1097 1098 cleanup_trans: 1099 qlcnic_free_mbx_args(&cmd); 1100 req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE); 1101 qlcnic_sriov_cleanup_transaction(trans); 1102 if (req) 1103 qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf, 1104 qlcnic_sriov_process_bc_cmd); 1105 } 1106 1107 static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr, 1108 struct qlcnic_vf_info *vf) 1109 { 1110 struct qlcnic_bc_trans *trans; 1111 u32 pay_size; 1112 1113 if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state)) 1114 return; 1115 1116 trans = vf->send_cmd; 1117 1118 if (trans == NULL) 1119 goto clear_send; 1120 1121 if (trans->trans_id != hdr->seq_id) 1122 goto clear_send; 1123 1124 pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size, 1125 trans->curr_rsp_frag); 1126 qlcnic_sriov_pull_bc_msg(vf->adapter, 1127 (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag), 1128 (u32 *)(trans->rsp_pay + trans->curr_rsp_frag), 1129 pay_size); 1130 if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags) 1131 goto clear_send; 1132 1133 complete(&trans->resp_cmpl); 1134 1135 clear_send: 1136 clear_bit(QLC_BC_VF_SEND, &vf->state); 1137 } 1138 1139 int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov, 1140 struct qlcnic_vf_info *vf, 1141 struct qlcnic_bc_trans *trans) 1142 { 1143 struct qlcnic_trans_list *t_list = &vf->rcv_act; 1144 1145 t_list->count++; 1146 list_add_tail(&trans->list, &t_list->wait_list); 1147 if (t_list->count == 1) 1148 qlcnic_sriov_schedule_bc_cmd(sriov, vf, 1149 qlcnic_sriov_process_bc_cmd); 1150 return 0; 1151 } 1152 1153 static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov, 1154 struct qlcnic_vf_info *vf, 1155 struct qlcnic_bc_trans *trans) 1156 { 1157 struct qlcnic_trans_list *t_list = &vf->rcv_act; 1158 1159 spin_lock(&t_list->lock); 1160 1161 __qlcnic_sriov_add_act_list(sriov, vf, trans); 1162 1163 spin_unlock(&t_list->lock); 1164 return 0; 1165 } 1166 1167 static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov, 1168 struct qlcnic_vf_info *vf, 1169 struct qlcnic_bc_hdr *hdr) 1170 { 1171 struct qlcnic_bc_trans *trans = NULL; 1172 struct list_head *node; 1173 u32 pay_size, curr_frag; 1174 u8 found = 0, active = 0; 1175 1176 spin_lock(&vf->rcv_pend.lock); 1177 if (vf->rcv_pend.count > 0) { 1178 list_for_each(node, &vf->rcv_pend.wait_list) { 1179 trans = list_entry(node, struct qlcnic_bc_trans, list); 1180 if (trans->trans_id == hdr->seq_id) { 1181 found = 1; 1182 break; 1183 } 1184 } 1185 } 1186 1187 if (found) { 1188 curr_frag = trans->curr_req_frag; 1189 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, 1190 curr_frag); 1191 qlcnic_sriov_pull_bc_msg(vf->adapter, 1192 (u32 *)(trans->req_hdr + curr_frag), 1193 (u32 *)(trans->req_pay + curr_frag), 1194 pay_size); 1195 trans->curr_req_frag++; 1196 if (trans->curr_req_frag >= hdr->num_frags) { 1197 vf->rcv_pend.count--; 1198 list_del(&trans->list); 1199 active = 1; 1200 } 1201 } 1202 spin_unlock(&vf->rcv_pend.lock); 1203 1204 if (active) 1205 if (qlcnic_sriov_add_act_list(sriov, vf, trans)) 1206 qlcnic_sriov_cleanup_transaction(trans); 1207 1208 return; 1209 } 1210 1211 static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov, 1212 struct qlcnic_bc_hdr *hdr, 1213 struct qlcnic_vf_info *vf) 1214 { 1215 struct qlcnic_bc_trans *trans; 1216 struct qlcnic_adapter *adapter = vf->adapter; 1217 struct qlcnic_cmd_args cmd; 1218 u32 pay_size; 1219 int err; 1220 u8 cmd_op; 1221 1222 if (adapter->need_fw_reset) 1223 return; 1224 1225 if (!test_bit(QLC_BC_VF_STATE, &vf->state) && 1226 hdr->op_type != QLC_BC_CMD && 1227 hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT) 1228 return; 1229 1230 if (hdr->frag_num > 1) { 1231 qlcnic_sriov_handle_pending_trans(sriov, vf, hdr); 1232 return; 1233 } 1234 1235 cmd_op = hdr->cmd_op; 1236 if (qlcnic_sriov_alloc_bc_trans(&trans)) 1237 return; 1238 1239 if (hdr->op_type == QLC_BC_CMD) 1240 err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op); 1241 else 1242 err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op); 1243 1244 if (err) { 1245 qlcnic_sriov_cleanup_transaction(trans); 1246 return; 1247 } 1248 1249 cmd.op_type = hdr->op_type; 1250 if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id, 1251 QLC_BC_COMMAND)) { 1252 qlcnic_free_mbx_args(&cmd); 1253 qlcnic_sriov_cleanup_transaction(trans); 1254 return; 1255 } 1256 1257 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, 1258 trans->curr_req_frag); 1259 qlcnic_sriov_pull_bc_msg(vf->adapter, 1260 (u32 *)(trans->req_hdr + trans->curr_req_frag), 1261 (u32 *)(trans->req_pay + trans->curr_req_frag), 1262 pay_size); 1263 trans->func_id = vf->pci_func; 1264 trans->vf = vf; 1265 trans->trans_id = hdr->seq_id; 1266 trans->curr_req_frag++; 1267 1268 if (qlcnic_sriov_soft_flr_check(adapter, trans, vf)) 1269 return; 1270 1271 if (trans->curr_req_frag == trans->req_hdr->num_frags) { 1272 if (qlcnic_sriov_add_act_list(sriov, vf, trans)) { 1273 qlcnic_free_mbx_args(&cmd); 1274 qlcnic_sriov_cleanup_transaction(trans); 1275 } 1276 } else { 1277 spin_lock(&vf->rcv_pend.lock); 1278 list_add_tail(&trans->list, &vf->rcv_pend.wait_list); 1279 vf->rcv_pend.count++; 1280 spin_unlock(&vf->rcv_pend.lock); 1281 } 1282 } 1283 1284 static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov, 1285 struct qlcnic_vf_info *vf) 1286 { 1287 struct qlcnic_bc_hdr hdr; 1288 u32 *ptr = (u32 *)&hdr; 1289 u8 msg_type, i; 1290 1291 for (i = 2; i < 6; i++) 1292 ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i)); 1293 msg_type = hdr.msg_type; 1294 1295 switch (msg_type) { 1296 case QLC_BC_COMMAND: 1297 qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf); 1298 break; 1299 case QLC_BC_RESPONSE: 1300 qlcnic_sriov_handle_bc_resp(&hdr, vf); 1301 break; 1302 } 1303 } 1304 1305 static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov, 1306 struct qlcnic_vf_info *vf) 1307 { 1308 struct qlcnic_adapter *adapter = vf->adapter; 1309 1310 if (qlcnic_sriov_pf_check(adapter)) 1311 qlcnic_sriov_pf_handle_flr(sriov, vf); 1312 else 1313 dev_err(&adapter->pdev->dev, 1314 "Invalid event to VF. VF should not get FLR event\n"); 1315 } 1316 1317 void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event) 1318 { 1319 struct qlcnic_vf_info *vf; 1320 struct qlcnic_sriov *sriov; 1321 int index; 1322 u8 pci_func; 1323 1324 sriov = adapter->ahw->sriov; 1325 pci_func = qlcnic_sriov_target_func_id(event); 1326 index = qlcnic_sriov_func_to_index(adapter, pci_func); 1327 1328 if (index < 0) 1329 return; 1330 1331 vf = &sriov->vf_info[index]; 1332 vf->pci_func = pci_func; 1333 1334 if (qlcnic_sriov_channel_free_check(event)) 1335 complete(&vf->ch_free_cmpl); 1336 1337 if (qlcnic_sriov_flr_check(event)) { 1338 qlcnic_sriov_handle_flr_event(sriov, vf); 1339 return; 1340 } 1341 1342 if (qlcnic_sriov_bc_msg_check(event)) 1343 qlcnic_sriov_handle_msg_event(sriov, vf); 1344 } 1345 1346 int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable) 1347 { 1348 struct qlcnic_cmd_args cmd; 1349 int err; 1350 1351 if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state)) 1352 return 0; 1353 1354 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP)) 1355 return -ENOMEM; 1356 1357 if (enable) 1358 cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7); 1359 1360 err = qlcnic_83xx_mbx_op(adapter, &cmd); 1361 1362 if (err != QLCNIC_RCODE_SUCCESS) { 1363 dev_err(&adapter->pdev->dev, 1364 "Failed to %s bc events, err=%d\n", 1365 (enable ? "enable" : "disable"), err); 1366 } 1367 1368 qlcnic_free_mbx_args(&cmd); 1369 return err; 1370 } 1371 1372 static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter, 1373 struct qlcnic_bc_trans *trans) 1374 { 1375 u8 max = QLC_BC_CMD_MAX_RETRY_CNT; 1376 u32 state; 1377 1378 state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); 1379 if (state == QLC_83XX_IDC_DEV_READY) { 1380 msleep(20); 1381 clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state); 1382 trans->trans_state = QLC_INIT; 1383 if (++adapter->fw_fail_cnt > max) 1384 return -EIO; 1385 else 1386 return 0; 1387 } 1388 1389 return -EIO; 1390 } 1391 1392 static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter, 1393 struct qlcnic_cmd_args *cmd) 1394 { 1395 struct qlcnic_hardware_context *ahw = adapter->ahw; 1396 struct device *dev = &adapter->pdev->dev; 1397 struct qlcnic_bc_trans *trans; 1398 int err; 1399 u32 rsp_data, opcode, mbx_err_code, rsp; 1400 u16 seq = ++adapter->ahw->sriov->bc.trans_counter; 1401 u8 func = ahw->pci_func; 1402 1403 rsp = qlcnic_sriov_alloc_bc_trans(&trans); 1404 if (rsp) 1405 return rsp; 1406 1407 rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND); 1408 if (rsp) 1409 goto cleanup_transaction; 1410 1411 retry: 1412 if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { 1413 rsp = -EIO; 1414 QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n", 1415 QLCNIC_MBX_RSP(cmd->req.arg[0]), func); 1416 goto err_out; 1417 } 1418 1419 err = qlcnic_sriov_send_bc_cmd(adapter, trans, func); 1420 if (err) { 1421 dev_err(dev, "MBX command 0x%x timed out for VF %d\n", 1422 (cmd->req.arg[0] & 0xffff), func); 1423 rsp = QLCNIC_RCODE_TIMEOUT; 1424 1425 /* After adapter reset PF driver may take some time to 1426 * respond to VF's request. Retry request till maximum retries. 1427 */ 1428 if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) && 1429 !qlcnic_sriov_retry_bc_cmd(adapter, trans)) 1430 goto retry; 1431 1432 goto err_out; 1433 } 1434 1435 rsp_data = cmd->rsp.arg[0]; 1436 mbx_err_code = QLCNIC_MBX_STATUS(rsp_data); 1437 opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]); 1438 1439 if ((mbx_err_code == QLCNIC_MBX_RSP_OK) || 1440 (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) { 1441 rsp = QLCNIC_RCODE_SUCCESS; 1442 } else { 1443 rsp = mbx_err_code; 1444 if (!rsp) 1445 rsp = 1; 1446 dev_err(dev, 1447 "MBX command 0x%x failed with err:0x%x for VF %d\n", 1448 opcode, mbx_err_code, func); 1449 } 1450 1451 err_out: 1452 if (rsp == QLCNIC_RCODE_TIMEOUT) { 1453 ahw->reset_context = 1; 1454 adapter->need_fw_reset = 1; 1455 clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status); 1456 } 1457 1458 cleanup_transaction: 1459 qlcnic_sriov_cleanup_transaction(trans); 1460 return rsp; 1461 } 1462 1463 int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op) 1464 { 1465 struct qlcnic_cmd_args cmd; 1466 struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0]; 1467 int ret; 1468 1469 if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op)) 1470 return -ENOMEM; 1471 1472 ret = qlcnic_issue_cmd(adapter, &cmd); 1473 if (ret) { 1474 dev_err(&adapter->pdev->dev, 1475 "Failed bc channel %s %d\n", cmd_op ? "term" : "init", 1476 ret); 1477 goto out; 1478 } 1479 1480 cmd_op = (cmd.rsp.arg[0] & 0xff); 1481 if (cmd.rsp.arg[0] >> 25 == 2) 1482 return 2; 1483 if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) 1484 set_bit(QLC_BC_VF_STATE, &vf->state); 1485 else 1486 clear_bit(QLC_BC_VF_STATE, &vf->state); 1487 1488 out: 1489 qlcnic_free_mbx_args(&cmd); 1490 return ret; 1491 } 1492 1493 void qlcnic_vf_add_mc_list(struct net_device *netdev, u16 vlan) 1494 { 1495 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1496 struct qlcnic_mac_list_s *cur; 1497 struct list_head *head, tmp_list; 1498 1499 INIT_LIST_HEAD(&tmp_list); 1500 head = &adapter->vf_mc_list; 1501 netif_addr_lock_bh(netdev); 1502 1503 while (!list_empty(head)) { 1504 cur = list_entry(head->next, struct qlcnic_mac_list_s, list); 1505 list_move(&cur->list, &tmp_list); 1506 } 1507 1508 netif_addr_unlock_bh(netdev); 1509 1510 while (!list_empty(&tmp_list)) { 1511 cur = list_entry((&tmp_list)->next, 1512 struct qlcnic_mac_list_s, list); 1513 qlcnic_nic_add_mac(adapter, cur->mac_addr, vlan); 1514 list_del(&cur->list); 1515 kfree(cur); 1516 } 1517 } 1518 1519 void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) 1520 { 1521 struct list_head *head = &bc->async_list; 1522 struct qlcnic_async_work_list *entry; 1523 1524 while (!list_empty(head)) { 1525 entry = list_entry(head->next, struct qlcnic_async_work_list, 1526 list); 1527 cancel_work_sync(&entry->work); 1528 list_del(&entry->list); 1529 kfree(entry); 1530 } 1531 } 1532 1533 static void qlcnic_sriov_vf_set_multi(struct net_device *netdev) 1534 { 1535 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1536 u16 vlan; 1537 1538 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) 1539 return; 1540 1541 vlan = adapter->ahw->sriov->vlan; 1542 __qlcnic_set_multi(netdev, vlan); 1543 } 1544 1545 static void qlcnic_sriov_handle_async_multi(struct work_struct *work) 1546 { 1547 struct qlcnic_async_work_list *entry; 1548 struct net_device *netdev; 1549 1550 entry = container_of(work, struct qlcnic_async_work_list, work); 1551 netdev = (struct net_device *)entry->ptr; 1552 1553 qlcnic_sriov_vf_set_multi(netdev); 1554 return; 1555 } 1556 1557 static struct qlcnic_async_work_list * 1558 qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc) 1559 { 1560 struct list_head *node; 1561 struct qlcnic_async_work_list *entry = NULL; 1562 u8 empty = 0; 1563 1564 list_for_each(node, &bc->async_list) { 1565 entry = list_entry(node, struct qlcnic_async_work_list, list); 1566 if (!work_pending(&entry->work)) { 1567 empty = 1; 1568 break; 1569 } 1570 } 1571 1572 if (!empty) { 1573 entry = kzalloc(sizeof(struct qlcnic_async_work_list), 1574 GFP_ATOMIC); 1575 if (entry == NULL) 1576 return NULL; 1577 list_add_tail(&entry->list, &bc->async_list); 1578 } 1579 1580 return entry; 1581 } 1582 1583 static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc, 1584 work_func_t func, void *data) 1585 { 1586 struct qlcnic_async_work_list *entry = NULL; 1587 1588 entry = qlcnic_sriov_get_free_node_async_work(bc); 1589 if (!entry) 1590 return; 1591 1592 entry->ptr = data; 1593 INIT_WORK(&entry->work, func); 1594 queue_work(bc->bc_async_wq, &entry->work); 1595 } 1596 1597 void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev) 1598 { 1599 1600 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1601 struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc; 1602 1603 if (adapter->need_fw_reset) 1604 return; 1605 1606 qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi, 1607 netdev); 1608 } 1609 1610 static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter) 1611 { 1612 int err; 1613 1614 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); 1615 qlcnic_83xx_enable_mbx_intrpt(adapter); 1616 1617 err = qlcnic_sriov_cfg_bc_intr(adapter, 1); 1618 if (err) 1619 return err; 1620 1621 err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); 1622 if (err) 1623 goto err_out_cleanup_bc_intr; 1624 1625 err = qlcnic_sriov_vf_init_driver(adapter); 1626 if (err) 1627 goto err_out_term_channel; 1628 1629 return 0; 1630 1631 err_out_term_channel: 1632 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); 1633 1634 err_out_cleanup_bc_intr: 1635 qlcnic_sriov_cfg_bc_intr(adapter, 0); 1636 return err; 1637 } 1638 1639 static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter) 1640 { 1641 struct net_device *netdev = adapter->netdev; 1642 1643 if (netif_running(netdev)) { 1644 if (!qlcnic_up(adapter, netdev)) 1645 qlcnic_restore_indev_addr(netdev, NETDEV_UP); 1646 } 1647 1648 netif_device_attach(netdev); 1649 } 1650 1651 static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter) 1652 { 1653 struct qlcnic_hardware_context *ahw = adapter->ahw; 1654 struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl; 1655 struct net_device *netdev = adapter->netdev; 1656 u8 i, max_ints = ahw->num_msix - 1; 1657 1658 qlcnic_83xx_disable_mbx_intr(adapter); 1659 netif_device_detach(netdev); 1660 if (netif_running(netdev)) 1661 qlcnic_down(adapter, netdev); 1662 1663 for (i = 0; i < max_ints; i++) { 1664 intr_tbl[i].id = i; 1665 intr_tbl[i].enabled = 0; 1666 intr_tbl[i].src = 0; 1667 } 1668 ahw->reset_context = 0; 1669 } 1670 1671 static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter) 1672 { 1673 struct qlcnic_hardware_context *ahw = adapter->ahw; 1674 struct device *dev = &adapter->pdev->dev; 1675 struct qlc_83xx_idc *idc = &ahw->idc; 1676 u8 func = ahw->pci_func; 1677 u32 state; 1678 1679 if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) || 1680 (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) { 1681 if (!qlcnic_sriov_vf_reinit_driver(adapter)) { 1682 qlcnic_sriov_vf_attach(adapter); 1683 adapter->fw_fail_cnt = 0; 1684 dev_info(dev, 1685 "%s: Reinitialization of VF 0x%x done after FW reset\n", 1686 __func__, func); 1687 } else { 1688 dev_err(dev, 1689 "%s: Reinitialization of VF 0x%x failed after FW reset\n", 1690 __func__, func); 1691 state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE); 1692 dev_info(dev, "Current state 0x%x after FW reset\n", 1693 state); 1694 } 1695 } 1696 1697 return 0; 1698 } 1699 1700 static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter) 1701 { 1702 struct qlcnic_hardware_context *ahw = adapter->ahw; 1703 struct device *dev = &adapter->pdev->dev; 1704 struct qlc_83xx_idc *idc = &ahw->idc; 1705 u8 func = ahw->pci_func; 1706 u32 state; 1707 1708 adapter->reset_ctx_cnt++; 1709 1710 /* Skip the context reset and check if FW is hung */ 1711 if (adapter->reset_ctx_cnt < 3) { 1712 adapter->need_fw_reset = 1; 1713 clear_bit(QLC_83XX_MBX_READY, &idc->status); 1714 dev_info(dev, 1715 "Resetting context, wait here to check if FW is in failed state\n"); 1716 return 0; 1717 } 1718 1719 /* Check if number of resets exceed the threshold. 1720 * If it exceeds the threshold just fail the VF. 1721 */ 1722 if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) { 1723 clear_bit(QLC_83XX_MODULE_LOADED, &idc->status); 1724 adapter->tx_timeo_cnt = 0; 1725 adapter->fw_fail_cnt = 0; 1726 adapter->reset_ctx_cnt = 0; 1727 qlcnic_sriov_vf_detach(adapter); 1728 dev_err(dev, 1729 "Device context resets have exceeded the threshold, device interface will be shutdown\n"); 1730 return -EIO; 1731 } 1732 1733 dev_info(dev, "Resetting context of VF 0x%x\n", func); 1734 dev_info(dev, "%s: Context reset count %d for VF 0x%x\n", 1735 __func__, adapter->reset_ctx_cnt, func); 1736 set_bit(__QLCNIC_RESETTING, &adapter->state); 1737 adapter->need_fw_reset = 1; 1738 clear_bit(QLC_83XX_MBX_READY, &idc->status); 1739 qlcnic_sriov_vf_detach(adapter); 1740 adapter->need_fw_reset = 0; 1741 1742 if (!qlcnic_sriov_vf_reinit_driver(adapter)) { 1743 qlcnic_sriov_vf_attach(adapter); 1744 adapter->tx_timeo_cnt = 0; 1745 adapter->reset_ctx_cnt = 0; 1746 adapter->fw_fail_cnt = 0; 1747 dev_info(dev, "Done resetting context for VF 0x%x\n", func); 1748 } else { 1749 dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n", 1750 __func__, func); 1751 state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE); 1752 dev_info(dev, "%s: Current state 0x%x\n", __func__, state); 1753 } 1754 1755 return 0; 1756 } 1757 1758 static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter) 1759 { 1760 struct qlcnic_hardware_context *ahw = adapter->ahw; 1761 int ret = 0; 1762 1763 if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY) 1764 ret = qlcnic_sriov_vf_handle_dev_ready(adapter); 1765 else if (ahw->reset_context) 1766 ret = qlcnic_sriov_vf_handle_context_reset(adapter); 1767 1768 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1769 return ret; 1770 } 1771 1772 static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter) 1773 { 1774 struct qlc_83xx_idc *idc = &adapter->ahw->idc; 1775 1776 dev_err(&adapter->pdev->dev, "Device is in failed state\n"); 1777 if (idc->prev_state == QLC_83XX_IDC_DEV_READY) 1778 qlcnic_sriov_vf_detach(adapter); 1779 1780 clear_bit(QLC_83XX_MODULE_LOADED, &idc->status); 1781 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1782 return -EIO; 1783 } 1784 1785 static int 1786 qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter) 1787 { 1788 struct qlc_83xx_idc *idc = &adapter->ahw->idc; 1789 1790 dev_info(&adapter->pdev->dev, "Device is in quiescent state\n"); 1791 if (idc->prev_state == QLC_83XX_IDC_DEV_READY) { 1792 set_bit(__QLCNIC_RESETTING, &adapter->state); 1793 adapter->tx_timeo_cnt = 0; 1794 adapter->reset_ctx_cnt = 0; 1795 clear_bit(QLC_83XX_MBX_READY, &idc->status); 1796 qlcnic_sriov_vf_detach(adapter); 1797 } 1798 1799 return 0; 1800 } 1801 1802 static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter) 1803 { 1804 struct qlc_83xx_idc *idc = &adapter->ahw->idc; 1805 u8 func = adapter->ahw->pci_func; 1806 1807 if (idc->prev_state == QLC_83XX_IDC_DEV_READY) { 1808 dev_err(&adapter->pdev->dev, 1809 "Firmware hang detected by VF 0x%x\n", func); 1810 set_bit(__QLCNIC_RESETTING, &adapter->state); 1811 adapter->tx_timeo_cnt = 0; 1812 adapter->reset_ctx_cnt = 0; 1813 clear_bit(QLC_83XX_MBX_READY, &idc->status); 1814 qlcnic_sriov_vf_detach(adapter); 1815 } 1816 return 0; 1817 } 1818 1819 static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter) 1820 { 1821 dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__); 1822 return 0; 1823 } 1824 1825 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work) 1826 { 1827 struct qlcnic_adapter *adapter; 1828 struct qlc_83xx_idc *idc; 1829 int ret = 0; 1830 1831 adapter = container_of(work, struct qlcnic_adapter, fw_work.work); 1832 idc = &adapter->ahw->idc; 1833 idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); 1834 1835 switch (idc->curr_state) { 1836 case QLC_83XX_IDC_DEV_READY: 1837 ret = qlcnic_sriov_vf_idc_ready_state(adapter); 1838 break; 1839 case QLC_83XX_IDC_DEV_NEED_RESET: 1840 case QLC_83XX_IDC_DEV_INIT: 1841 ret = qlcnic_sriov_vf_idc_init_reset_state(adapter); 1842 break; 1843 case QLC_83XX_IDC_DEV_NEED_QUISCENT: 1844 ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter); 1845 break; 1846 case QLC_83XX_IDC_DEV_FAILED: 1847 ret = qlcnic_sriov_vf_idc_failed_state(adapter); 1848 break; 1849 case QLC_83XX_IDC_DEV_QUISCENT: 1850 break; 1851 default: 1852 ret = qlcnic_sriov_vf_idc_unknown_state(adapter); 1853 } 1854 1855 idc->prev_state = idc->curr_state; 1856 if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status)) 1857 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, 1858 idc->delay); 1859 } 1860 1861 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter) 1862 { 1863 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 1864 msleep(20); 1865 1866 clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); 1867 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1868 cancel_delayed_work_sync(&adapter->fw_work); 1869 } 1870 1871 static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_sriov *sriov, 1872 u16 vid, u8 enable) 1873 { 1874 u16 vlan = sriov->vlan; 1875 u8 allowed = 0; 1876 int i; 1877 1878 if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE) 1879 return -EINVAL; 1880 1881 if (enable) { 1882 if (vlan) 1883 return -EINVAL; 1884 1885 if (sriov->any_vlan) { 1886 for (i = 0; i < sriov->num_allowed_vlans; i++) { 1887 if (sriov->allowed_vlans[i] == vid) 1888 allowed = 1; 1889 } 1890 1891 if (!allowed) 1892 return -EINVAL; 1893 } 1894 } else { 1895 if (!vlan || vlan != vid) 1896 return -EINVAL; 1897 } 1898 1899 return 0; 1900 } 1901 1902 int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter, 1903 u16 vid, u8 enable) 1904 { 1905 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 1906 struct qlcnic_cmd_args cmd; 1907 int ret; 1908 1909 if (vid == 0) 1910 return 0; 1911 1912 ret = qlcnic_sriov_validate_vlan_cfg(sriov, vid, enable); 1913 if (ret) 1914 return ret; 1915 1916 ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, 1917 QLCNIC_BC_CMD_CFG_GUEST_VLAN); 1918 if (ret) 1919 return ret; 1920 1921 cmd.req.arg[1] = (enable & 1) | vid << 16; 1922 1923 qlcnic_sriov_cleanup_async_list(&sriov->bc); 1924 ret = qlcnic_issue_cmd(adapter, &cmd); 1925 if (ret) { 1926 dev_err(&adapter->pdev->dev, 1927 "Failed to configure guest VLAN, err=%d\n", ret); 1928 } else { 1929 qlcnic_free_mac_list(adapter); 1930 1931 if (enable) 1932 sriov->vlan = vid; 1933 else 1934 sriov->vlan = 0; 1935 1936 qlcnic_sriov_vf_set_multi(adapter->netdev); 1937 } 1938 1939 qlcnic_free_mbx_args(&cmd); 1940 return ret; 1941 } 1942 1943 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter) 1944 { 1945 struct list_head *head = &adapter->mac_list; 1946 struct qlcnic_mac_list_s *cur; 1947 u16 vlan; 1948 1949 vlan = adapter->ahw->sriov->vlan; 1950 1951 while (!list_empty(head)) { 1952 cur = list_entry(head->next, struct qlcnic_mac_list_s, list); 1953 qlcnic_sre_macaddr_change(adapter, cur->mac_addr, 1954 vlan, QLCNIC_MAC_DEL); 1955 list_del(&cur->list); 1956 kfree(cur); 1957 } 1958 } 1959 1960 int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev) 1961 { 1962 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); 1963 struct net_device *netdev = adapter->netdev; 1964 int retval; 1965 1966 netif_device_detach(netdev); 1967 qlcnic_cancel_idc_work(adapter); 1968 1969 if (netif_running(netdev)) 1970 qlcnic_down(adapter, netdev); 1971 1972 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); 1973 qlcnic_sriov_cfg_bc_intr(adapter, 0); 1974 qlcnic_83xx_disable_mbx_intr(adapter); 1975 cancel_delayed_work_sync(&adapter->idc_aen_work); 1976 1977 retval = pci_save_state(pdev); 1978 if (retval) 1979 return retval; 1980 1981 return 0; 1982 } 1983 1984 int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter) 1985 { 1986 struct qlc_83xx_idc *idc = &adapter->ahw->idc; 1987 struct net_device *netdev = adapter->netdev; 1988 int err; 1989 1990 set_bit(QLC_83XX_MODULE_LOADED, &idc->status); 1991 qlcnic_83xx_enable_mbx_intrpt(adapter); 1992 err = qlcnic_sriov_cfg_bc_intr(adapter, 1); 1993 if (err) 1994 return err; 1995 1996 err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); 1997 if (!err) { 1998 if (netif_running(netdev)) { 1999 err = qlcnic_up(adapter, netdev); 2000 if (!err) 2001 qlcnic_restore_indev_addr(netdev, NETDEV_UP); 2002 } 2003 } 2004 2005 netif_device_attach(netdev); 2006 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, 2007 idc->delay); 2008 return err; 2009 } 2010