1 /* 2 * QLogic qlcnic NIC Driver 3 * Copyright (c) 2009-2013 QLogic Corporation 4 * 5 * See LICENSE.qlcnic for copyright and licensing details. 6 */ 7 8 #include "qlcnic_sriov.h" 9 #include "qlcnic.h" 10 #include "qlcnic_83xx_hw.h" 11 #include <linux/types.h> 12 13 #define QLC_BC_COMMAND 0 14 #define QLC_BC_RESPONSE 1 15 16 #define QLC_MBOX_RESP_TIMEOUT (10 * HZ) 17 #define QLC_MBOX_CH_FREE_TIMEOUT (10 * HZ) 18 19 #define QLC_BC_MSG 0 20 #define QLC_BC_CFREE 1 21 #define QLC_BC_FLR 2 22 #define QLC_BC_HDR_SZ 16 23 #define QLC_BC_PAYLOAD_SZ (1024 - QLC_BC_HDR_SZ) 24 25 #define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF 2048 26 #define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF 512 27 28 #define QLC_83XX_VF_RESET_FAIL_THRESH 8 29 #define QLC_BC_CMD_MAX_RETRY_CNT 5 30 31 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); 32 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); 33 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); 34 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *); 35 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *); 36 static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *, 37 struct qlcnic_cmd_args *); 38 static void qlcnic_sriov_process_bc_cmd(struct work_struct *); 39 40 static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = { 41 .read_crb = qlcnic_83xx_read_crb, 42 .write_crb = qlcnic_83xx_write_crb, 43 .read_reg = qlcnic_83xx_rd_reg_indirect, 44 .write_reg = qlcnic_83xx_wrt_reg_indirect, 45 .get_mac_address = qlcnic_83xx_get_mac_address, 46 .setup_intr = qlcnic_83xx_setup_intr, 47 .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args, 48 .mbx_cmd = qlcnic_sriov_issue_cmd, 49 .get_func_no = qlcnic_83xx_get_func_no, 50 .api_lock = qlcnic_83xx_cam_lock, 51 .api_unlock = qlcnic_83xx_cam_unlock, 52 .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag, 53 .create_rx_ctx = qlcnic_83xx_create_rx_ctx, 54 .create_tx_ctx = qlcnic_83xx_create_tx_ctx, 55 .del_rx_ctx = qlcnic_83xx_del_rx_ctx, 56 .del_tx_ctx = qlcnic_83xx_del_tx_ctx, 57 .setup_link_event = qlcnic_83xx_setup_link_event, 58 .get_nic_info = qlcnic_83xx_get_nic_info, 59 .get_pci_info = qlcnic_83xx_get_pci_info, 60 .set_nic_info = qlcnic_83xx_set_nic_info, 61 .change_macvlan = qlcnic_83xx_sre_macaddr_change, 62 .napi_enable = qlcnic_83xx_napi_enable, 63 .napi_disable = qlcnic_83xx_napi_disable, 64 .config_intr_coal = qlcnic_83xx_config_intr_coal, 65 .config_rss = qlcnic_83xx_config_rss, 66 .config_hw_lro = qlcnic_83xx_config_hw_lro, 67 .config_promisc_mode = qlcnic_83xx_nic_set_promisc, 68 .change_l2_filter = qlcnic_83xx_change_l2_filter, 69 .get_board_info = qlcnic_83xx_get_port_info, 70 .free_mac_list = qlcnic_sriov_vf_free_mac_list, 71 }; 72 73 static struct qlcnic_nic_template qlcnic_sriov_vf_ops = { 74 .config_bridged_mode = qlcnic_config_bridged_mode, 75 .config_led = qlcnic_config_led, 76 .cancel_idc_work = qlcnic_sriov_vf_cancel_fw_work, 77 .napi_add = qlcnic_83xx_napi_add, 78 .napi_del = qlcnic_83xx_napi_del, 79 .shutdown = qlcnic_sriov_vf_shutdown, 80 .resume = qlcnic_sriov_vf_resume, 81 .config_ipaddr = qlcnic_83xx_config_ipaddr, 82 .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr, 83 }; 84 85 static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = { 86 {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2}, 87 {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2}, 88 {QLCNIC_BC_CMD_GET_ACL, 3, 14}, 89 {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2}, 90 }; 91 92 static inline bool qlcnic_sriov_bc_msg_check(u32 val) 93 { 94 return (val & (1 << QLC_BC_MSG)) ? true : false; 95 } 96 97 static inline bool qlcnic_sriov_channel_free_check(u32 val) 98 { 99 return (val & (1 << QLC_BC_CFREE)) ? true : false; 100 } 101 102 static inline bool qlcnic_sriov_flr_check(u32 val) 103 { 104 return (val & (1 << QLC_BC_FLR)) ? true : false; 105 } 106 107 static inline u8 qlcnic_sriov_target_func_id(u32 val) 108 { 109 return (val >> 4) & 0xff; 110 } 111 112 static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id) 113 { 114 struct pci_dev *dev = adapter->pdev; 115 int pos; 116 u16 stride, offset; 117 118 if (qlcnic_sriov_vf_check(adapter)) 119 return 0; 120 121 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 122 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset); 123 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride); 124 125 return (dev->devfn + offset + stride * vf_id) & 0xff; 126 } 127 128 int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) 129 { 130 struct qlcnic_sriov *sriov; 131 struct qlcnic_back_channel *bc; 132 struct workqueue_struct *wq; 133 struct qlcnic_vport *vp; 134 struct qlcnic_vf_info *vf; 135 int err, i; 136 137 if (!qlcnic_sriov_enable_check(adapter)) 138 return -EIO; 139 140 sriov = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL); 141 if (!sriov) 142 return -ENOMEM; 143 144 adapter->ahw->sriov = sriov; 145 sriov->num_vfs = num_vfs; 146 bc = &sriov->bc; 147 sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) * 148 num_vfs, GFP_KERNEL); 149 if (!sriov->vf_info) { 150 err = -ENOMEM; 151 goto qlcnic_free_sriov; 152 } 153 154 wq = create_singlethread_workqueue("bc-trans"); 155 if (wq == NULL) { 156 err = -ENOMEM; 157 dev_err(&adapter->pdev->dev, 158 "Cannot create bc-trans workqueue\n"); 159 goto qlcnic_free_vf_info; 160 } 161 162 bc->bc_trans_wq = wq; 163 164 wq = create_singlethread_workqueue("async"); 165 if (wq == NULL) { 166 err = -ENOMEM; 167 dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n"); 168 goto qlcnic_destroy_trans_wq; 169 } 170 171 bc->bc_async_wq = wq; 172 INIT_LIST_HEAD(&bc->async_list); 173 174 for (i = 0; i < num_vfs; i++) { 175 vf = &sriov->vf_info[i]; 176 vf->adapter = adapter; 177 vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i); 178 mutex_init(&vf->send_cmd_lock); 179 INIT_LIST_HEAD(&vf->rcv_act.wait_list); 180 INIT_LIST_HEAD(&vf->rcv_pend.wait_list); 181 spin_lock_init(&vf->rcv_act.lock); 182 spin_lock_init(&vf->rcv_pend.lock); 183 init_completion(&vf->ch_free_cmpl); 184 185 INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd); 186 187 if (qlcnic_sriov_pf_check(adapter)) { 188 vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL); 189 if (!vp) { 190 err = -ENOMEM; 191 goto qlcnic_destroy_async_wq; 192 } 193 sriov->vf_info[i].vp = vp; 194 vp->max_tx_bw = MAX_BW; 195 vp->spoofchk = true; 196 random_ether_addr(vp->mac); 197 dev_info(&adapter->pdev->dev, 198 "MAC Address %pM is configured for VF %d\n", 199 vp->mac, i); 200 } 201 } 202 203 return 0; 204 205 qlcnic_destroy_async_wq: 206 destroy_workqueue(bc->bc_async_wq); 207 208 qlcnic_destroy_trans_wq: 209 destroy_workqueue(bc->bc_trans_wq); 210 211 qlcnic_free_vf_info: 212 kfree(sriov->vf_info); 213 214 qlcnic_free_sriov: 215 kfree(adapter->ahw->sriov); 216 return err; 217 } 218 219 void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list) 220 { 221 struct qlcnic_bc_trans *trans; 222 struct qlcnic_cmd_args cmd; 223 unsigned long flags; 224 225 spin_lock_irqsave(&t_list->lock, flags); 226 227 while (!list_empty(&t_list->wait_list)) { 228 trans = list_first_entry(&t_list->wait_list, 229 struct qlcnic_bc_trans, list); 230 list_del(&trans->list); 231 t_list->count--; 232 cmd.req.arg = (u32 *)trans->req_pay; 233 cmd.rsp.arg = (u32 *)trans->rsp_pay; 234 qlcnic_free_mbx_args(&cmd); 235 qlcnic_sriov_cleanup_transaction(trans); 236 } 237 238 spin_unlock_irqrestore(&t_list->lock, flags); 239 } 240 241 void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) 242 { 243 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 244 struct qlcnic_back_channel *bc = &sriov->bc; 245 struct qlcnic_vf_info *vf; 246 int i; 247 248 if (!qlcnic_sriov_enable_check(adapter)) 249 return; 250 251 qlcnic_sriov_cleanup_async_list(bc); 252 destroy_workqueue(bc->bc_async_wq); 253 254 for (i = 0; i < sriov->num_vfs; i++) { 255 vf = &sriov->vf_info[i]; 256 qlcnic_sriov_cleanup_list(&vf->rcv_pend); 257 cancel_work_sync(&vf->trans_work); 258 qlcnic_sriov_cleanup_list(&vf->rcv_act); 259 } 260 261 destroy_workqueue(bc->bc_trans_wq); 262 263 for (i = 0; i < sriov->num_vfs; i++) 264 kfree(sriov->vf_info[i].vp); 265 266 kfree(sriov->vf_info); 267 kfree(adapter->ahw->sriov); 268 } 269 270 static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter) 271 { 272 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); 273 qlcnic_sriov_cfg_bc_intr(adapter, 0); 274 __qlcnic_sriov_cleanup(adapter); 275 } 276 277 void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) 278 { 279 if (qlcnic_sriov_pf_check(adapter)) 280 qlcnic_sriov_pf_cleanup(adapter); 281 282 if (qlcnic_sriov_vf_check(adapter)) 283 qlcnic_sriov_vf_cleanup(adapter); 284 } 285 286 static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, 287 u32 *pay, u8 pci_func, u8 size) 288 { 289 struct qlcnic_hardware_context *ahw = adapter->ahw; 290 struct qlcnic_mailbox *mbx = ahw->mailbox; 291 struct qlcnic_cmd_args cmd; 292 unsigned long timeout; 293 int err; 294 295 memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); 296 cmd.hdr = hdr; 297 cmd.pay = pay; 298 cmd.pay_size = size; 299 cmd.func_num = pci_func; 300 cmd.op_type = QLC_83XX_MBX_POST_BC_OP; 301 cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op; 302 303 err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout); 304 if (err) { 305 dev_err(&adapter->pdev->dev, 306 "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", 307 __func__, cmd.cmd_op, cmd.type, ahw->pci_func, 308 ahw->op_mode); 309 return err; 310 } 311 312 if (!wait_for_completion_timeout(&cmd.completion, timeout)) { 313 dev_err(&adapter->pdev->dev, 314 "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", 315 __func__, cmd.cmd_op, cmd.type, ahw->pci_func, 316 ahw->op_mode); 317 flush_workqueue(mbx->work_q); 318 } 319 320 return cmd.rsp_opcode; 321 } 322 323 static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter) 324 { 325 adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF; 326 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G; 327 adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF; 328 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; 329 adapter->num_txd = MAX_CMD_DESCRIPTORS; 330 adapter->max_rds_rings = MAX_RDS_RINGS; 331 } 332 333 int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter, 334 struct qlcnic_info *npar_info, u16 vport_id) 335 { 336 struct device *dev = &adapter->pdev->dev; 337 struct qlcnic_cmd_args cmd; 338 int err; 339 u32 status; 340 341 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); 342 if (err) 343 return err; 344 345 cmd.req.arg[1] = vport_id << 16 | 0x1; 346 err = qlcnic_issue_cmd(adapter, &cmd); 347 if (err) { 348 dev_err(&adapter->pdev->dev, 349 "Failed to get vport info, err=%d\n", err); 350 qlcnic_free_mbx_args(&cmd); 351 return err; 352 } 353 354 status = cmd.rsp.arg[2] & 0xffff; 355 if (status & BIT_0) 356 npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]); 357 if (status & BIT_1) 358 npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]); 359 if (status & BIT_2) 360 npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]); 361 if (status & BIT_3) 362 npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]); 363 if (status & BIT_4) 364 npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]); 365 if (status & BIT_5) 366 npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]); 367 if (status & BIT_6) 368 npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]); 369 if (status & BIT_7) 370 npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]); 371 if (status & BIT_8) 372 npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]); 373 if (status & BIT_9) 374 npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]); 375 376 npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]); 377 npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]); 378 npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]); 379 npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]); 380 381 dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n" 382 "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n" 383 "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n" 384 "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n" 385 "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n" 386 "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n", 387 npar_info->min_tx_bw, npar_info->max_tx_bw, 388 npar_info->max_tx_ques, npar_info->max_tx_mac_filters, 389 npar_info->max_rx_mcast_mac_filters, 390 npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr, 391 npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings, 392 npar_info->max_rx_buf_rings, npar_info->max_rx_ques, 393 npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs, 394 npar_info->max_remote_ipv6_addrs); 395 396 qlcnic_free_mbx_args(&cmd); 397 return err; 398 } 399 400 static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter, 401 struct qlcnic_cmd_args *cmd) 402 { 403 adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff; 404 adapter->flags &= ~QLCNIC_TAGGING_ENABLED; 405 return 0; 406 } 407 408 static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter, 409 struct qlcnic_cmd_args *cmd) 410 { 411 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 412 int i, num_vlans; 413 u16 *vlans; 414 415 if (sriov->allowed_vlans) 416 return 0; 417 418 sriov->any_vlan = cmd->rsp.arg[2] & 0xf; 419 if (!sriov->any_vlan) 420 return 0; 421 422 sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16; 423 num_vlans = sriov->num_allowed_vlans; 424 sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL); 425 if (!sriov->allowed_vlans) 426 return -ENOMEM; 427 428 vlans = (u16 *)&cmd->rsp.arg[3]; 429 for (i = 0; i < num_vlans; i++) 430 sriov->allowed_vlans[i] = vlans[i]; 431 432 return 0; 433 } 434 435 static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter, 436 struct qlcnic_info *info) 437 { 438 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 439 struct qlcnic_cmd_args cmd; 440 int ret = 0; 441 442 ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL); 443 if (ret) 444 return ret; 445 446 ret = qlcnic_issue_cmd(adapter, &cmd); 447 if (ret) { 448 dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n", 449 ret); 450 } else { 451 sriov->vlan_mode = cmd.rsp.arg[1] & 0x3; 452 switch (sriov->vlan_mode) { 453 case QLC_GUEST_VLAN_MODE: 454 ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd); 455 break; 456 case QLC_PVID_MODE: 457 ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd); 458 break; 459 } 460 } 461 462 qlcnic_free_mbx_args(&cmd); 463 return ret; 464 } 465 466 static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter) 467 { 468 struct qlcnic_hardware_context *ahw = adapter->ahw; 469 struct qlcnic_info nic_info; 470 int err; 471 472 err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0); 473 if (err) 474 return err; 475 476 err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func); 477 if (err) 478 return -EIO; 479 480 err = qlcnic_sriov_get_vf_acl(adapter, &nic_info); 481 if (err) 482 return err; 483 484 if (qlcnic_83xx_get_port_info(adapter)) 485 return -EIO; 486 487 qlcnic_sriov_vf_cfg_buff_desc(adapter); 488 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; 489 dev_info(&adapter->pdev->dev, "HAL Version: %d\n", 490 adapter->ahw->fw_hal_version); 491 492 ahw->physical_port = (u8) nic_info.phys_port; 493 ahw->switch_mode = nic_info.switch_mode; 494 ahw->max_mtu = nic_info.max_mtu; 495 ahw->op_mode = nic_info.op_mode; 496 ahw->capabilities = nic_info.capabilities; 497 return 0; 498 } 499 500 static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter, 501 int pci_using_dac) 502 { 503 int err; 504 505 INIT_LIST_HEAD(&adapter->vf_mc_list); 506 if (!qlcnic_use_msi_x && !!qlcnic_use_msi) 507 dev_warn(&adapter->pdev->dev, 508 "Device does not support MSI interrupts\n"); 509 510 err = qlcnic_setup_intr(adapter, 1, 0); 511 if (err) { 512 dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n"); 513 goto err_out_disable_msi; 514 } 515 516 err = qlcnic_83xx_setup_mbx_intr(adapter); 517 if (err) 518 goto err_out_disable_msi; 519 520 err = qlcnic_sriov_init(adapter, 1); 521 if (err) 522 goto err_out_disable_mbx_intr; 523 524 err = qlcnic_sriov_cfg_bc_intr(adapter, 1); 525 if (err) 526 goto err_out_cleanup_sriov; 527 528 err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); 529 if (err) 530 goto err_out_disable_bc_intr; 531 532 err = qlcnic_sriov_vf_init_driver(adapter); 533 if (err) 534 goto err_out_send_channel_term; 535 536 if (adapter->dcb && qlcnic_dcb_attach(adapter)) 537 qlcnic_clear_dcb_ops(adapter); 538 539 err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac); 540 if (err) 541 goto err_out_send_channel_term; 542 543 pci_set_drvdata(adapter->pdev, adapter); 544 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", 545 adapter->netdev->name); 546 547 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, 548 adapter->ahw->idc.delay); 549 return 0; 550 551 err_out_send_channel_term: 552 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); 553 554 err_out_disable_bc_intr: 555 qlcnic_sriov_cfg_bc_intr(adapter, 0); 556 557 err_out_cleanup_sriov: 558 __qlcnic_sriov_cleanup(adapter); 559 560 err_out_disable_mbx_intr: 561 qlcnic_83xx_free_mbx_intr(adapter); 562 563 err_out_disable_msi: 564 qlcnic_teardown_intr(adapter); 565 return err; 566 } 567 568 static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter) 569 { 570 u32 state; 571 572 do { 573 msleep(20); 574 if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT) 575 return -EIO; 576 state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); 577 } while (state != QLC_83XX_IDC_DEV_READY); 578 579 return 0; 580 } 581 582 int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac) 583 { 584 struct qlcnic_hardware_context *ahw = adapter->ahw; 585 int err; 586 587 set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status); 588 ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; 589 ahw->reset_context = 0; 590 adapter->fw_fail_cnt = 0; 591 ahw->msix_supported = 1; 592 adapter->need_fw_reset = 0; 593 adapter->flags |= QLCNIC_TX_INTR_SHARED; 594 595 err = qlcnic_sriov_check_dev_ready(adapter); 596 if (err) 597 return err; 598 599 err = qlcnic_sriov_setup_vf(adapter, pci_using_dac); 600 if (err) 601 return err; 602 603 if (qlcnic_read_mac_addr(adapter)) 604 dev_warn(&adapter->pdev->dev, "failed to read mac addr\n"); 605 606 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work); 607 608 clear_bit(__QLCNIC_RESETTING, &adapter->state); 609 return 0; 610 } 611 612 void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter) 613 { 614 struct qlcnic_hardware_context *ahw = adapter->ahw; 615 616 ahw->op_mode = QLCNIC_SRIOV_VF_FUNC; 617 dev_info(&adapter->pdev->dev, 618 "HAL Version: %d Non Privileged SRIOV function\n", 619 ahw->fw_hal_version); 620 adapter->nic_ops = &qlcnic_sriov_vf_ops; 621 set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state); 622 return; 623 } 624 625 void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw) 626 { 627 ahw->hw_ops = &qlcnic_sriov_vf_hw_ops; 628 ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl; 629 ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl; 630 } 631 632 static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag) 633 { 634 u32 pay_size; 635 636 pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ); 637 638 if (pay_size) 639 pay_size = QLC_BC_PAYLOAD_SZ; 640 else 641 pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ; 642 643 return pay_size; 644 } 645 646 int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func) 647 { 648 struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info; 649 u8 i; 650 651 if (qlcnic_sriov_vf_check(adapter)) 652 return 0; 653 654 for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) { 655 if (vf_info[i].pci_func == pci_func) 656 return i; 657 } 658 659 return -EINVAL; 660 } 661 662 static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans) 663 { 664 *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC); 665 if (!*trans) 666 return -ENOMEM; 667 668 init_completion(&(*trans)->resp_cmpl); 669 return 0; 670 } 671 672 static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr, 673 u32 size) 674 { 675 *hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC); 676 if (!*hdr) 677 return -ENOMEM; 678 679 return 0; 680 } 681 682 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type) 683 { 684 const struct qlcnic_mailbox_metadata *mbx_tbl; 685 int i, size; 686 687 mbx_tbl = qlcnic_sriov_bc_mbx_tbl; 688 size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl); 689 690 for (i = 0; i < size; i++) { 691 if (type == mbx_tbl[i].cmd) { 692 mbx->op_type = QLC_BC_CMD; 693 mbx->req.num = mbx_tbl[i].in_args; 694 mbx->rsp.num = mbx_tbl[i].out_args; 695 mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32), 696 GFP_ATOMIC); 697 if (!mbx->req.arg) 698 return -ENOMEM; 699 mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32), 700 GFP_ATOMIC); 701 if (!mbx->rsp.arg) { 702 kfree(mbx->req.arg); 703 mbx->req.arg = NULL; 704 return -ENOMEM; 705 } 706 memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num); 707 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); 708 mbx->req.arg[0] = (type | (mbx->req.num << 16) | 709 (3 << 29)); 710 mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16; 711 return 0; 712 } 713 } 714 return -EINVAL; 715 } 716 717 static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans, 718 struct qlcnic_cmd_args *cmd, 719 u16 seq, u8 msg_type) 720 { 721 struct qlcnic_bc_hdr *hdr; 722 int i; 723 u32 num_regs, bc_pay_sz; 724 u16 remainder; 725 u8 cmd_op, num_frags, t_num_frags; 726 727 bc_pay_sz = QLC_BC_PAYLOAD_SZ; 728 if (msg_type == QLC_BC_COMMAND) { 729 trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg; 730 trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg; 731 num_regs = cmd->req.num; 732 trans->req_pay_size = (num_regs * 4); 733 num_regs = cmd->rsp.num; 734 trans->rsp_pay_size = (num_regs * 4); 735 cmd_op = cmd->req.arg[0] & 0xff; 736 remainder = (trans->req_pay_size) % (bc_pay_sz); 737 num_frags = (trans->req_pay_size) / (bc_pay_sz); 738 if (remainder) 739 num_frags++; 740 t_num_frags = num_frags; 741 if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags)) 742 return -ENOMEM; 743 remainder = (trans->rsp_pay_size) % (bc_pay_sz); 744 num_frags = (trans->rsp_pay_size) / (bc_pay_sz); 745 if (remainder) 746 num_frags++; 747 if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags)) 748 return -ENOMEM; 749 num_frags = t_num_frags; 750 hdr = trans->req_hdr; 751 } else { 752 cmd->req.arg = (u32 *)trans->req_pay; 753 cmd->rsp.arg = (u32 *)trans->rsp_pay; 754 cmd_op = cmd->req.arg[0] & 0xff; 755 remainder = (trans->rsp_pay_size) % (bc_pay_sz); 756 num_frags = (trans->rsp_pay_size) / (bc_pay_sz); 757 if (remainder) 758 num_frags++; 759 cmd->req.num = trans->req_pay_size / 4; 760 cmd->rsp.num = trans->rsp_pay_size / 4; 761 hdr = trans->rsp_hdr; 762 cmd->op_type = trans->req_hdr->op_type; 763 } 764 765 trans->trans_id = seq; 766 trans->cmd_id = cmd_op; 767 for (i = 0; i < num_frags; i++) { 768 hdr[i].version = 2; 769 hdr[i].msg_type = msg_type; 770 hdr[i].op_type = cmd->op_type; 771 hdr[i].num_cmds = 1; 772 hdr[i].num_frags = num_frags; 773 hdr[i].frag_num = i + 1; 774 hdr[i].cmd_op = cmd_op; 775 hdr[i].seq_id = seq; 776 } 777 return 0; 778 } 779 780 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans) 781 { 782 if (!trans) 783 return; 784 kfree(trans->req_hdr); 785 kfree(trans->rsp_hdr); 786 kfree(trans); 787 } 788 789 static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf, 790 struct qlcnic_bc_trans *trans, u8 type) 791 { 792 struct qlcnic_trans_list *t_list; 793 unsigned long flags; 794 int ret = 0; 795 796 if (type == QLC_BC_RESPONSE) { 797 t_list = &vf->rcv_act; 798 spin_lock_irqsave(&t_list->lock, flags); 799 t_list->count--; 800 list_del(&trans->list); 801 if (t_list->count > 0) 802 ret = 1; 803 spin_unlock_irqrestore(&t_list->lock, flags); 804 } 805 if (type == QLC_BC_COMMAND) { 806 while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state)) 807 msleep(100); 808 vf->send_cmd = NULL; 809 clear_bit(QLC_BC_VF_SEND, &vf->state); 810 } 811 return ret; 812 } 813 814 static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov, 815 struct qlcnic_vf_info *vf, 816 work_func_t func) 817 { 818 if (test_bit(QLC_BC_VF_FLR, &vf->state) || 819 vf->adapter->need_fw_reset) 820 return; 821 822 queue_work(sriov->bc.bc_trans_wq, &vf->trans_work); 823 } 824 825 static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans) 826 { 827 struct completion *cmpl = &trans->resp_cmpl; 828 829 if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT)) 830 trans->trans_state = QLC_END; 831 else 832 trans->trans_state = QLC_ABORT; 833 834 return; 835 } 836 837 static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans, 838 u8 type) 839 { 840 if (type == QLC_BC_RESPONSE) { 841 trans->curr_rsp_frag++; 842 if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags) 843 trans->trans_state = QLC_INIT; 844 else 845 trans->trans_state = QLC_END; 846 } else { 847 trans->curr_req_frag++; 848 if (trans->curr_req_frag < trans->req_hdr->num_frags) 849 trans->trans_state = QLC_INIT; 850 else 851 trans->trans_state = QLC_WAIT_FOR_RESP; 852 } 853 } 854 855 static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans, 856 u8 type) 857 { 858 struct qlcnic_vf_info *vf = trans->vf; 859 struct completion *cmpl = &vf->ch_free_cmpl; 860 861 if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) { 862 trans->trans_state = QLC_ABORT; 863 return; 864 } 865 866 clear_bit(QLC_BC_VF_CHANNEL, &vf->state); 867 qlcnic_sriov_handle_multi_frags(trans, type); 868 } 869 870 static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter, 871 u32 *hdr, u32 *pay, u32 size) 872 { 873 struct qlcnic_hardware_context *ahw = adapter->ahw; 874 u32 fw_mbx; 875 u8 i, max = 2, hdr_size, j; 876 877 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); 878 max = (size / sizeof(u32)) + hdr_size; 879 880 fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0)); 881 for (i = 2, j = 0; j < hdr_size; i++, j++) 882 *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i)); 883 for (; j < max; i++, j++) 884 *(pay++) = readl(QLCNIC_MBX_FW(ahw, i)); 885 } 886 887 static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf) 888 { 889 int ret = -EBUSY; 890 u32 timeout = 10000; 891 892 do { 893 if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) { 894 ret = 0; 895 break; 896 } 897 mdelay(1); 898 } while (--timeout); 899 900 return ret; 901 } 902 903 static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type) 904 { 905 struct qlcnic_vf_info *vf = trans->vf; 906 u32 pay_size, hdr_size; 907 u32 *hdr, *pay; 908 int ret; 909 u8 pci_func = trans->func_id; 910 911 if (__qlcnic_sriov_issue_bc_post(vf)) 912 return -EBUSY; 913 914 if (type == QLC_BC_COMMAND) { 915 hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag); 916 pay = (u32 *)(trans->req_pay + trans->curr_req_frag); 917 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); 918 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, 919 trans->curr_req_frag); 920 pay_size = (pay_size / sizeof(u32)); 921 } else { 922 hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag); 923 pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag); 924 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); 925 pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size, 926 trans->curr_rsp_frag); 927 pay_size = (pay_size / sizeof(u32)); 928 } 929 930 ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay, 931 pci_func, pay_size); 932 return ret; 933 } 934 935 static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans, 936 struct qlcnic_vf_info *vf, u8 type) 937 { 938 bool flag = true; 939 int err = -EIO; 940 941 while (flag) { 942 if (test_bit(QLC_BC_VF_FLR, &vf->state) || 943 vf->adapter->need_fw_reset) 944 trans->trans_state = QLC_ABORT; 945 946 switch (trans->trans_state) { 947 case QLC_INIT: 948 trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE; 949 if (qlcnic_sriov_issue_bc_post(trans, type)) 950 trans->trans_state = QLC_ABORT; 951 break; 952 case QLC_WAIT_FOR_CHANNEL_FREE: 953 qlcnic_sriov_wait_for_channel_free(trans, type); 954 break; 955 case QLC_WAIT_FOR_RESP: 956 qlcnic_sriov_wait_for_resp(trans); 957 break; 958 case QLC_END: 959 err = 0; 960 flag = false; 961 break; 962 case QLC_ABORT: 963 err = -EIO; 964 flag = false; 965 clear_bit(QLC_BC_VF_CHANNEL, &vf->state); 966 break; 967 default: 968 err = -EIO; 969 flag = false; 970 } 971 } 972 return err; 973 } 974 975 static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter, 976 struct qlcnic_bc_trans *trans, int pci_func) 977 { 978 struct qlcnic_vf_info *vf; 979 int err, index = qlcnic_sriov_func_to_index(adapter, pci_func); 980 981 if (index < 0) 982 return -EIO; 983 984 vf = &adapter->ahw->sriov->vf_info[index]; 985 trans->vf = vf; 986 trans->func_id = pci_func; 987 988 if (!test_bit(QLC_BC_VF_STATE, &vf->state)) { 989 if (qlcnic_sriov_pf_check(adapter)) 990 return -EIO; 991 if (qlcnic_sriov_vf_check(adapter) && 992 trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT) 993 return -EIO; 994 } 995 996 mutex_lock(&vf->send_cmd_lock); 997 vf->send_cmd = trans; 998 err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND); 999 qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND); 1000 mutex_unlock(&vf->send_cmd_lock); 1001 return err; 1002 } 1003 1004 static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter, 1005 struct qlcnic_bc_trans *trans, 1006 struct qlcnic_cmd_args *cmd) 1007 { 1008 #ifdef CONFIG_QLCNIC_SRIOV 1009 if (qlcnic_sriov_pf_check(adapter)) { 1010 qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd); 1011 return; 1012 } 1013 #endif 1014 cmd->rsp.arg[0] |= (0x9 << 25); 1015 return; 1016 } 1017 1018 static void qlcnic_sriov_process_bc_cmd(struct work_struct *work) 1019 { 1020 struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info, 1021 trans_work); 1022 struct qlcnic_bc_trans *trans = NULL; 1023 struct qlcnic_adapter *adapter = vf->adapter; 1024 struct qlcnic_cmd_args cmd; 1025 u8 req; 1026 1027 if (adapter->need_fw_reset) 1028 return; 1029 1030 if (test_bit(QLC_BC_VF_FLR, &vf->state)) 1031 return; 1032 1033 memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); 1034 trans = list_first_entry(&vf->rcv_act.wait_list, 1035 struct qlcnic_bc_trans, list); 1036 adapter = vf->adapter; 1037 1038 if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id, 1039 QLC_BC_RESPONSE)) 1040 goto cleanup_trans; 1041 1042 __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd); 1043 trans->trans_state = QLC_INIT; 1044 __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE); 1045 1046 cleanup_trans: 1047 qlcnic_free_mbx_args(&cmd); 1048 req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE); 1049 qlcnic_sriov_cleanup_transaction(trans); 1050 if (req) 1051 qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf, 1052 qlcnic_sriov_process_bc_cmd); 1053 } 1054 1055 static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr, 1056 struct qlcnic_vf_info *vf) 1057 { 1058 struct qlcnic_bc_trans *trans; 1059 u32 pay_size; 1060 1061 if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state)) 1062 return; 1063 1064 trans = vf->send_cmd; 1065 1066 if (trans == NULL) 1067 goto clear_send; 1068 1069 if (trans->trans_id != hdr->seq_id) 1070 goto clear_send; 1071 1072 pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size, 1073 trans->curr_rsp_frag); 1074 qlcnic_sriov_pull_bc_msg(vf->adapter, 1075 (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag), 1076 (u32 *)(trans->rsp_pay + trans->curr_rsp_frag), 1077 pay_size); 1078 if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags) 1079 goto clear_send; 1080 1081 complete(&trans->resp_cmpl); 1082 1083 clear_send: 1084 clear_bit(QLC_BC_VF_SEND, &vf->state); 1085 } 1086 1087 int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov, 1088 struct qlcnic_vf_info *vf, 1089 struct qlcnic_bc_trans *trans) 1090 { 1091 struct qlcnic_trans_list *t_list = &vf->rcv_act; 1092 1093 t_list->count++; 1094 list_add_tail(&trans->list, &t_list->wait_list); 1095 if (t_list->count == 1) 1096 qlcnic_sriov_schedule_bc_cmd(sriov, vf, 1097 qlcnic_sriov_process_bc_cmd); 1098 return 0; 1099 } 1100 1101 static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov, 1102 struct qlcnic_vf_info *vf, 1103 struct qlcnic_bc_trans *trans) 1104 { 1105 struct qlcnic_trans_list *t_list = &vf->rcv_act; 1106 1107 spin_lock(&t_list->lock); 1108 1109 __qlcnic_sriov_add_act_list(sriov, vf, trans); 1110 1111 spin_unlock(&t_list->lock); 1112 return 0; 1113 } 1114 1115 static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov, 1116 struct qlcnic_vf_info *vf, 1117 struct qlcnic_bc_hdr *hdr) 1118 { 1119 struct qlcnic_bc_trans *trans = NULL; 1120 struct list_head *node; 1121 u32 pay_size, curr_frag; 1122 u8 found = 0, active = 0; 1123 1124 spin_lock(&vf->rcv_pend.lock); 1125 if (vf->rcv_pend.count > 0) { 1126 list_for_each(node, &vf->rcv_pend.wait_list) { 1127 trans = list_entry(node, struct qlcnic_bc_trans, list); 1128 if (trans->trans_id == hdr->seq_id) { 1129 found = 1; 1130 break; 1131 } 1132 } 1133 } 1134 1135 if (found) { 1136 curr_frag = trans->curr_req_frag; 1137 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, 1138 curr_frag); 1139 qlcnic_sriov_pull_bc_msg(vf->adapter, 1140 (u32 *)(trans->req_hdr + curr_frag), 1141 (u32 *)(trans->req_pay + curr_frag), 1142 pay_size); 1143 trans->curr_req_frag++; 1144 if (trans->curr_req_frag >= hdr->num_frags) { 1145 vf->rcv_pend.count--; 1146 list_del(&trans->list); 1147 active = 1; 1148 } 1149 } 1150 spin_unlock(&vf->rcv_pend.lock); 1151 1152 if (active) 1153 if (qlcnic_sriov_add_act_list(sriov, vf, trans)) 1154 qlcnic_sriov_cleanup_transaction(trans); 1155 1156 return; 1157 } 1158 1159 static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov, 1160 struct qlcnic_bc_hdr *hdr, 1161 struct qlcnic_vf_info *vf) 1162 { 1163 struct qlcnic_bc_trans *trans; 1164 struct qlcnic_adapter *adapter = vf->adapter; 1165 struct qlcnic_cmd_args cmd; 1166 u32 pay_size; 1167 int err; 1168 u8 cmd_op; 1169 1170 if (adapter->need_fw_reset) 1171 return; 1172 1173 if (!test_bit(QLC_BC_VF_STATE, &vf->state) && 1174 hdr->op_type != QLC_BC_CMD && 1175 hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT) 1176 return; 1177 1178 if (hdr->frag_num > 1) { 1179 qlcnic_sriov_handle_pending_trans(sriov, vf, hdr); 1180 return; 1181 } 1182 1183 memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); 1184 cmd_op = hdr->cmd_op; 1185 if (qlcnic_sriov_alloc_bc_trans(&trans)) 1186 return; 1187 1188 if (hdr->op_type == QLC_BC_CMD) 1189 err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op); 1190 else 1191 err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op); 1192 1193 if (err) { 1194 qlcnic_sriov_cleanup_transaction(trans); 1195 return; 1196 } 1197 1198 cmd.op_type = hdr->op_type; 1199 if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id, 1200 QLC_BC_COMMAND)) { 1201 qlcnic_free_mbx_args(&cmd); 1202 qlcnic_sriov_cleanup_transaction(trans); 1203 return; 1204 } 1205 1206 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, 1207 trans->curr_req_frag); 1208 qlcnic_sriov_pull_bc_msg(vf->adapter, 1209 (u32 *)(trans->req_hdr + trans->curr_req_frag), 1210 (u32 *)(trans->req_pay + trans->curr_req_frag), 1211 pay_size); 1212 trans->func_id = vf->pci_func; 1213 trans->vf = vf; 1214 trans->trans_id = hdr->seq_id; 1215 trans->curr_req_frag++; 1216 1217 if (qlcnic_sriov_soft_flr_check(adapter, trans, vf)) 1218 return; 1219 1220 if (trans->curr_req_frag == trans->req_hdr->num_frags) { 1221 if (qlcnic_sriov_add_act_list(sriov, vf, trans)) { 1222 qlcnic_free_mbx_args(&cmd); 1223 qlcnic_sriov_cleanup_transaction(trans); 1224 } 1225 } else { 1226 spin_lock(&vf->rcv_pend.lock); 1227 list_add_tail(&trans->list, &vf->rcv_pend.wait_list); 1228 vf->rcv_pend.count++; 1229 spin_unlock(&vf->rcv_pend.lock); 1230 } 1231 } 1232 1233 static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov, 1234 struct qlcnic_vf_info *vf) 1235 { 1236 struct qlcnic_bc_hdr hdr; 1237 u32 *ptr = (u32 *)&hdr; 1238 u8 msg_type, i; 1239 1240 for (i = 2; i < 6; i++) 1241 ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i)); 1242 msg_type = hdr.msg_type; 1243 1244 switch (msg_type) { 1245 case QLC_BC_COMMAND: 1246 qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf); 1247 break; 1248 case QLC_BC_RESPONSE: 1249 qlcnic_sriov_handle_bc_resp(&hdr, vf); 1250 break; 1251 } 1252 } 1253 1254 static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov, 1255 struct qlcnic_vf_info *vf) 1256 { 1257 struct qlcnic_adapter *adapter = vf->adapter; 1258 1259 if (qlcnic_sriov_pf_check(adapter)) 1260 qlcnic_sriov_pf_handle_flr(sriov, vf); 1261 else 1262 dev_err(&adapter->pdev->dev, 1263 "Invalid event to VF. VF should not get FLR event\n"); 1264 } 1265 1266 void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event) 1267 { 1268 struct qlcnic_vf_info *vf; 1269 struct qlcnic_sriov *sriov; 1270 int index; 1271 u8 pci_func; 1272 1273 sriov = adapter->ahw->sriov; 1274 pci_func = qlcnic_sriov_target_func_id(event); 1275 index = qlcnic_sriov_func_to_index(adapter, pci_func); 1276 1277 if (index < 0) 1278 return; 1279 1280 vf = &sriov->vf_info[index]; 1281 vf->pci_func = pci_func; 1282 1283 if (qlcnic_sriov_channel_free_check(event)) 1284 complete(&vf->ch_free_cmpl); 1285 1286 if (qlcnic_sriov_flr_check(event)) { 1287 qlcnic_sriov_handle_flr_event(sriov, vf); 1288 return; 1289 } 1290 1291 if (qlcnic_sriov_bc_msg_check(event)) 1292 qlcnic_sriov_handle_msg_event(sriov, vf); 1293 } 1294 1295 int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable) 1296 { 1297 struct qlcnic_cmd_args cmd; 1298 int err; 1299 1300 if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state)) 1301 return 0; 1302 1303 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP)) 1304 return -ENOMEM; 1305 1306 if (enable) 1307 cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7); 1308 1309 err = qlcnic_83xx_issue_cmd(adapter, &cmd); 1310 1311 if (err != QLCNIC_RCODE_SUCCESS) { 1312 dev_err(&adapter->pdev->dev, 1313 "Failed to %s bc events, err=%d\n", 1314 (enable ? "enable" : "disable"), err); 1315 } 1316 1317 qlcnic_free_mbx_args(&cmd); 1318 return err; 1319 } 1320 1321 static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter, 1322 struct qlcnic_bc_trans *trans) 1323 { 1324 u8 max = QLC_BC_CMD_MAX_RETRY_CNT; 1325 u32 state; 1326 1327 state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); 1328 if (state == QLC_83XX_IDC_DEV_READY) { 1329 msleep(20); 1330 clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state); 1331 trans->trans_state = QLC_INIT; 1332 if (++adapter->fw_fail_cnt > max) 1333 return -EIO; 1334 else 1335 return 0; 1336 } 1337 1338 return -EIO; 1339 } 1340 1341 static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter, 1342 struct qlcnic_cmd_args *cmd) 1343 { 1344 struct qlcnic_hardware_context *ahw = adapter->ahw; 1345 struct qlcnic_mailbox *mbx = ahw->mailbox; 1346 struct device *dev = &adapter->pdev->dev; 1347 struct qlcnic_bc_trans *trans; 1348 int err; 1349 u32 rsp_data, opcode, mbx_err_code, rsp; 1350 u16 seq = ++adapter->ahw->sriov->bc.trans_counter; 1351 u8 func = ahw->pci_func; 1352 1353 rsp = qlcnic_sriov_alloc_bc_trans(&trans); 1354 if (rsp) 1355 return rsp; 1356 1357 rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND); 1358 if (rsp) 1359 goto cleanup_transaction; 1360 1361 retry: 1362 if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) { 1363 rsp = -EIO; 1364 QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n", 1365 QLCNIC_MBX_RSP(cmd->req.arg[0]), func); 1366 goto err_out; 1367 } 1368 1369 err = qlcnic_sriov_send_bc_cmd(adapter, trans, func); 1370 if (err) { 1371 dev_err(dev, "MBX command 0x%x timed out for VF %d\n", 1372 (cmd->req.arg[0] & 0xffff), func); 1373 rsp = QLCNIC_RCODE_TIMEOUT; 1374 1375 /* After adapter reset PF driver may take some time to 1376 * respond to VF's request. Retry request till maximum retries. 1377 */ 1378 if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) && 1379 !qlcnic_sriov_retry_bc_cmd(adapter, trans)) 1380 goto retry; 1381 1382 goto err_out; 1383 } 1384 1385 rsp_data = cmd->rsp.arg[0]; 1386 mbx_err_code = QLCNIC_MBX_STATUS(rsp_data); 1387 opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]); 1388 1389 if ((mbx_err_code == QLCNIC_MBX_RSP_OK) || 1390 (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) { 1391 rsp = QLCNIC_RCODE_SUCCESS; 1392 } else { 1393 rsp = mbx_err_code; 1394 if (!rsp) 1395 rsp = 1; 1396 dev_err(dev, 1397 "MBX command 0x%x failed with err:0x%x for VF %d\n", 1398 opcode, mbx_err_code, func); 1399 } 1400 1401 err_out: 1402 if (rsp == QLCNIC_RCODE_TIMEOUT) { 1403 ahw->reset_context = 1; 1404 adapter->need_fw_reset = 1; 1405 clear_bit(QLC_83XX_MBX_READY, &mbx->status); 1406 } 1407 1408 cleanup_transaction: 1409 qlcnic_sriov_cleanup_transaction(trans); 1410 return rsp; 1411 } 1412 1413 int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op) 1414 { 1415 struct qlcnic_cmd_args cmd; 1416 struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0]; 1417 int ret; 1418 1419 if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op)) 1420 return -ENOMEM; 1421 1422 ret = qlcnic_issue_cmd(adapter, &cmd); 1423 if (ret) { 1424 dev_err(&adapter->pdev->dev, 1425 "Failed bc channel %s %d\n", cmd_op ? "term" : "init", 1426 ret); 1427 goto out; 1428 } 1429 1430 cmd_op = (cmd.rsp.arg[0] & 0xff); 1431 if (cmd.rsp.arg[0] >> 25 == 2) 1432 return 2; 1433 if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) 1434 set_bit(QLC_BC_VF_STATE, &vf->state); 1435 else 1436 clear_bit(QLC_BC_VF_STATE, &vf->state); 1437 1438 out: 1439 qlcnic_free_mbx_args(&cmd); 1440 return ret; 1441 } 1442 1443 void qlcnic_vf_add_mc_list(struct net_device *netdev, u16 vlan) 1444 { 1445 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1446 struct qlcnic_mac_list_s *cur; 1447 struct list_head *head, tmp_list; 1448 1449 INIT_LIST_HEAD(&tmp_list); 1450 head = &adapter->vf_mc_list; 1451 netif_addr_lock_bh(netdev); 1452 1453 while (!list_empty(head)) { 1454 cur = list_entry(head->next, struct qlcnic_mac_list_s, list); 1455 list_move(&cur->list, &tmp_list); 1456 } 1457 1458 netif_addr_unlock_bh(netdev); 1459 1460 while (!list_empty(&tmp_list)) { 1461 cur = list_entry((&tmp_list)->next, 1462 struct qlcnic_mac_list_s, list); 1463 qlcnic_nic_add_mac(adapter, cur->mac_addr, vlan); 1464 list_del(&cur->list); 1465 kfree(cur); 1466 } 1467 } 1468 1469 void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) 1470 { 1471 struct list_head *head = &bc->async_list; 1472 struct qlcnic_async_work_list *entry; 1473 1474 while (!list_empty(head)) { 1475 entry = list_entry(head->next, struct qlcnic_async_work_list, 1476 list); 1477 cancel_work_sync(&entry->work); 1478 list_del(&entry->list); 1479 kfree(entry); 1480 } 1481 } 1482 1483 static void qlcnic_sriov_vf_set_multi(struct net_device *netdev) 1484 { 1485 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1486 u16 vlan; 1487 1488 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) 1489 return; 1490 1491 vlan = adapter->ahw->sriov->vlan; 1492 __qlcnic_set_multi(netdev, vlan); 1493 } 1494 1495 static void qlcnic_sriov_handle_async_multi(struct work_struct *work) 1496 { 1497 struct qlcnic_async_work_list *entry; 1498 struct net_device *netdev; 1499 1500 entry = container_of(work, struct qlcnic_async_work_list, work); 1501 netdev = (struct net_device *)entry->ptr; 1502 1503 qlcnic_sriov_vf_set_multi(netdev); 1504 return; 1505 } 1506 1507 static struct qlcnic_async_work_list * 1508 qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc) 1509 { 1510 struct list_head *node; 1511 struct qlcnic_async_work_list *entry = NULL; 1512 u8 empty = 0; 1513 1514 list_for_each(node, &bc->async_list) { 1515 entry = list_entry(node, struct qlcnic_async_work_list, list); 1516 if (!work_pending(&entry->work)) { 1517 empty = 1; 1518 break; 1519 } 1520 } 1521 1522 if (!empty) { 1523 entry = kzalloc(sizeof(struct qlcnic_async_work_list), 1524 GFP_ATOMIC); 1525 if (entry == NULL) 1526 return NULL; 1527 list_add_tail(&entry->list, &bc->async_list); 1528 } 1529 1530 return entry; 1531 } 1532 1533 static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc, 1534 work_func_t func, void *data) 1535 { 1536 struct qlcnic_async_work_list *entry = NULL; 1537 1538 entry = qlcnic_sriov_get_free_node_async_work(bc); 1539 if (!entry) 1540 return; 1541 1542 entry->ptr = data; 1543 INIT_WORK(&entry->work, func); 1544 queue_work(bc->bc_async_wq, &entry->work); 1545 } 1546 1547 void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev) 1548 { 1549 1550 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1551 struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc; 1552 1553 if (adapter->need_fw_reset) 1554 return; 1555 1556 qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi, 1557 netdev); 1558 } 1559 1560 static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter) 1561 { 1562 int err; 1563 1564 adapter->need_fw_reset = 0; 1565 qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox); 1566 qlcnic_83xx_enable_mbx_interrupt(adapter); 1567 1568 err = qlcnic_sriov_cfg_bc_intr(adapter, 1); 1569 if (err) 1570 return err; 1571 1572 err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); 1573 if (err) 1574 goto err_out_cleanup_bc_intr; 1575 1576 err = qlcnic_sriov_vf_init_driver(adapter); 1577 if (err) 1578 goto err_out_term_channel; 1579 1580 qlcnic_dcb_get_info(adapter); 1581 1582 return 0; 1583 1584 err_out_term_channel: 1585 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); 1586 1587 err_out_cleanup_bc_intr: 1588 qlcnic_sriov_cfg_bc_intr(adapter, 0); 1589 return err; 1590 } 1591 1592 static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter) 1593 { 1594 struct net_device *netdev = adapter->netdev; 1595 1596 if (netif_running(netdev)) { 1597 if (!qlcnic_up(adapter, netdev)) 1598 qlcnic_restore_indev_addr(netdev, NETDEV_UP); 1599 } 1600 1601 netif_device_attach(netdev); 1602 } 1603 1604 static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter) 1605 { 1606 struct qlcnic_hardware_context *ahw = adapter->ahw; 1607 struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl; 1608 struct net_device *netdev = adapter->netdev; 1609 u8 i, max_ints = ahw->num_msix - 1; 1610 1611 netif_device_detach(netdev); 1612 qlcnic_83xx_detach_mailbox_work(adapter); 1613 qlcnic_83xx_disable_mbx_intr(adapter); 1614 1615 if (netif_running(netdev)) 1616 qlcnic_down(adapter, netdev); 1617 1618 for (i = 0; i < max_ints; i++) { 1619 intr_tbl[i].id = i; 1620 intr_tbl[i].enabled = 0; 1621 intr_tbl[i].src = 0; 1622 } 1623 ahw->reset_context = 0; 1624 } 1625 1626 static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter) 1627 { 1628 struct qlcnic_hardware_context *ahw = adapter->ahw; 1629 struct device *dev = &adapter->pdev->dev; 1630 struct qlc_83xx_idc *idc = &ahw->idc; 1631 u8 func = ahw->pci_func; 1632 u32 state; 1633 1634 if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) || 1635 (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) { 1636 if (!qlcnic_sriov_vf_reinit_driver(adapter)) { 1637 qlcnic_sriov_vf_attach(adapter); 1638 adapter->fw_fail_cnt = 0; 1639 dev_info(dev, 1640 "%s: Reinitialization of VF 0x%x done after FW reset\n", 1641 __func__, func); 1642 } else { 1643 dev_err(dev, 1644 "%s: Reinitialization of VF 0x%x failed after FW reset\n", 1645 __func__, func); 1646 state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE); 1647 dev_info(dev, "Current state 0x%x after FW reset\n", 1648 state); 1649 } 1650 } 1651 1652 return 0; 1653 } 1654 1655 static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter) 1656 { 1657 struct qlcnic_hardware_context *ahw = adapter->ahw; 1658 struct qlcnic_mailbox *mbx = ahw->mailbox; 1659 struct device *dev = &adapter->pdev->dev; 1660 struct qlc_83xx_idc *idc = &ahw->idc; 1661 u8 func = ahw->pci_func; 1662 u32 state; 1663 1664 adapter->reset_ctx_cnt++; 1665 1666 /* Skip the context reset and check if FW is hung */ 1667 if (adapter->reset_ctx_cnt < 3) { 1668 adapter->need_fw_reset = 1; 1669 clear_bit(QLC_83XX_MBX_READY, &mbx->status); 1670 dev_info(dev, 1671 "Resetting context, wait here to check if FW is in failed state\n"); 1672 return 0; 1673 } 1674 1675 /* Check if number of resets exceed the threshold. 1676 * If it exceeds the threshold just fail the VF. 1677 */ 1678 if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) { 1679 clear_bit(QLC_83XX_MODULE_LOADED, &idc->status); 1680 adapter->tx_timeo_cnt = 0; 1681 adapter->fw_fail_cnt = 0; 1682 adapter->reset_ctx_cnt = 0; 1683 qlcnic_sriov_vf_detach(adapter); 1684 dev_err(dev, 1685 "Device context resets have exceeded the threshold, device interface will be shutdown\n"); 1686 return -EIO; 1687 } 1688 1689 dev_info(dev, "Resetting context of VF 0x%x\n", func); 1690 dev_info(dev, "%s: Context reset count %d for VF 0x%x\n", 1691 __func__, adapter->reset_ctx_cnt, func); 1692 set_bit(__QLCNIC_RESETTING, &adapter->state); 1693 adapter->need_fw_reset = 1; 1694 clear_bit(QLC_83XX_MBX_READY, &mbx->status); 1695 qlcnic_sriov_vf_detach(adapter); 1696 adapter->need_fw_reset = 0; 1697 1698 if (!qlcnic_sriov_vf_reinit_driver(adapter)) { 1699 qlcnic_sriov_vf_attach(adapter); 1700 adapter->tx_timeo_cnt = 0; 1701 adapter->reset_ctx_cnt = 0; 1702 adapter->fw_fail_cnt = 0; 1703 dev_info(dev, "Done resetting context for VF 0x%x\n", func); 1704 } else { 1705 dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n", 1706 __func__, func); 1707 state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE); 1708 dev_info(dev, "%s: Current state 0x%x\n", __func__, state); 1709 } 1710 1711 return 0; 1712 } 1713 1714 static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter) 1715 { 1716 struct qlcnic_hardware_context *ahw = adapter->ahw; 1717 int ret = 0; 1718 1719 if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY) 1720 ret = qlcnic_sriov_vf_handle_dev_ready(adapter); 1721 else if (ahw->reset_context) 1722 ret = qlcnic_sriov_vf_handle_context_reset(adapter); 1723 1724 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1725 return ret; 1726 } 1727 1728 static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter) 1729 { 1730 struct qlc_83xx_idc *idc = &adapter->ahw->idc; 1731 1732 dev_err(&adapter->pdev->dev, "Device is in failed state\n"); 1733 if (idc->prev_state == QLC_83XX_IDC_DEV_READY) 1734 qlcnic_sriov_vf_detach(adapter); 1735 1736 clear_bit(QLC_83XX_MODULE_LOADED, &idc->status); 1737 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1738 return -EIO; 1739 } 1740 1741 static int 1742 qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter) 1743 { 1744 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; 1745 struct qlc_83xx_idc *idc = &adapter->ahw->idc; 1746 1747 dev_info(&adapter->pdev->dev, "Device is in quiescent state\n"); 1748 if (idc->prev_state == QLC_83XX_IDC_DEV_READY) { 1749 set_bit(__QLCNIC_RESETTING, &adapter->state); 1750 adapter->tx_timeo_cnt = 0; 1751 adapter->reset_ctx_cnt = 0; 1752 clear_bit(QLC_83XX_MBX_READY, &mbx->status); 1753 qlcnic_sriov_vf_detach(adapter); 1754 } 1755 1756 return 0; 1757 } 1758 1759 static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter) 1760 { 1761 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; 1762 struct qlc_83xx_idc *idc = &adapter->ahw->idc; 1763 u8 func = adapter->ahw->pci_func; 1764 1765 if (idc->prev_state == QLC_83XX_IDC_DEV_READY) { 1766 dev_err(&adapter->pdev->dev, 1767 "Firmware hang detected by VF 0x%x\n", func); 1768 set_bit(__QLCNIC_RESETTING, &adapter->state); 1769 adapter->tx_timeo_cnt = 0; 1770 adapter->reset_ctx_cnt = 0; 1771 clear_bit(QLC_83XX_MBX_READY, &mbx->status); 1772 qlcnic_sriov_vf_detach(adapter); 1773 } 1774 return 0; 1775 } 1776 1777 static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter) 1778 { 1779 dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__); 1780 return 0; 1781 } 1782 1783 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work) 1784 { 1785 struct qlcnic_adapter *adapter; 1786 struct qlc_83xx_idc *idc; 1787 int ret = 0; 1788 1789 adapter = container_of(work, struct qlcnic_adapter, fw_work.work); 1790 idc = &adapter->ahw->idc; 1791 idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); 1792 1793 switch (idc->curr_state) { 1794 case QLC_83XX_IDC_DEV_READY: 1795 ret = qlcnic_sriov_vf_idc_ready_state(adapter); 1796 break; 1797 case QLC_83XX_IDC_DEV_NEED_RESET: 1798 case QLC_83XX_IDC_DEV_INIT: 1799 ret = qlcnic_sriov_vf_idc_init_reset_state(adapter); 1800 break; 1801 case QLC_83XX_IDC_DEV_NEED_QUISCENT: 1802 ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter); 1803 break; 1804 case QLC_83XX_IDC_DEV_FAILED: 1805 ret = qlcnic_sriov_vf_idc_failed_state(adapter); 1806 break; 1807 case QLC_83XX_IDC_DEV_QUISCENT: 1808 break; 1809 default: 1810 ret = qlcnic_sriov_vf_idc_unknown_state(adapter); 1811 } 1812 1813 idc->prev_state = idc->curr_state; 1814 if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status)) 1815 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, 1816 idc->delay); 1817 } 1818 1819 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter) 1820 { 1821 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 1822 msleep(20); 1823 1824 clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); 1825 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1826 cancel_delayed_work_sync(&adapter->fw_work); 1827 } 1828 1829 static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_sriov *sriov, 1830 u16 vid, u8 enable) 1831 { 1832 u16 vlan = sriov->vlan; 1833 u8 allowed = 0; 1834 int i; 1835 1836 if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE) 1837 return -EINVAL; 1838 1839 if (enable) { 1840 if (vlan) 1841 return -EINVAL; 1842 1843 if (sriov->any_vlan) { 1844 for (i = 0; i < sriov->num_allowed_vlans; i++) { 1845 if (sriov->allowed_vlans[i] == vid) 1846 allowed = 1; 1847 } 1848 1849 if (!allowed) 1850 return -EINVAL; 1851 } 1852 } else { 1853 if (!vlan || vlan != vid) 1854 return -EINVAL; 1855 } 1856 1857 return 0; 1858 } 1859 1860 int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter, 1861 u16 vid, u8 enable) 1862 { 1863 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 1864 struct qlcnic_cmd_args cmd; 1865 int ret; 1866 1867 if (vid == 0) 1868 return 0; 1869 1870 ret = qlcnic_sriov_validate_vlan_cfg(sriov, vid, enable); 1871 if (ret) 1872 return ret; 1873 1874 ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, 1875 QLCNIC_BC_CMD_CFG_GUEST_VLAN); 1876 if (ret) 1877 return ret; 1878 1879 cmd.req.arg[1] = (enable & 1) | vid << 16; 1880 1881 qlcnic_sriov_cleanup_async_list(&sriov->bc); 1882 ret = qlcnic_issue_cmd(adapter, &cmd); 1883 if (ret) { 1884 dev_err(&adapter->pdev->dev, 1885 "Failed to configure guest VLAN, err=%d\n", ret); 1886 } else { 1887 qlcnic_free_mac_list(adapter); 1888 1889 if (enable) 1890 sriov->vlan = vid; 1891 else 1892 sriov->vlan = 0; 1893 1894 qlcnic_sriov_vf_set_multi(adapter->netdev); 1895 } 1896 1897 qlcnic_free_mbx_args(&cmd); 1898 return ret; 1899 } 1900 1901 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter) 1902 { 1903 struct list_head *head = &adapter->mac_list; 1904 struct qlcnic_mac_list_s *cur; 1905 u16 vlan; 1906 1907 vlan = adapter->ahw->sriov->vlan; 1908 1909 while (!list_empty(head)) { 1910 cur = list_entry(head->next, struct qlcnic_mac_list_s, list); 1911 qlcnic_sre_macaddr_change(adapter, cur->mac_addr, 1912 vlan, QLCNIC_MAC_DEL); 1913 list_del(&cur->list); 1914 kfree(cur); 1915 } 1916 } 1917 1918 int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev) 1919 { 1920 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); 1921 struct net_device *netdev = adapter->netdev; 1922 int retval; 1923 1924 netif_device_detach(netdev); 1925 qlcnic_cancel_idc_work(adapter); 1926 1927 if (netif_running(netdev)) 1928 qlcnic_down(adapter, netdev); 1929 1930 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); 1931 qlcnic_sriov_cfg_bc_intr(adapter, 0); 1932 qlcnic_83xx_disable_mbx_intr(adapter); 1933 cancel_delayed_work_sync(&adapter->idc_aen_work); 1934 1935 retval = pci_save_state(pdev); 1936 if (retval) 1937 return retval; 1938 1939 return 0; 1940 } 1941 1942 int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter) 1943 { 1944 struct qlc_83xx_idc *idc = &adapter->ahw->idc; 1945 struct net_device *netdev = adapter->netdev; 1946 int err; 1947 1948 set_bit(QLC_83XX_MODULE_LOADED, &idc->status); 1949 qlcnic_83xx_enable_mbx_interrupt(adapter); 1950 err = qlcnic_sriov_cfg_bc_intr(adapter, 1); 1951 if (err) 1952 return err; 1953 1954 err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); 1955 if (!err) { 1956 if (netif_running(netdev)) { 1957 err = qlcnic_up(adapter, netdev); 1958 if (!err) 1959 qlcnic_restore_indev_addr(netdev, NETDEV_UP); 1960 } 1961 } 1962 1963 netif_device_attach(netdev); 1964 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, 1965 idc->delay); 1966 return err; 1967 } 1968