1 /* 2 * QLogic qlcnic NIC Driver 3 * Copyright (c) 2009-2013 QLogic Corporation 4 * 5 * See LICENSE.qlcnic for copyright and licensing details. 6 */ 7 8 #include "qlcnic_sriov.h" 9 #include "qlcnic.h" 10 #include "qlcnic_83xx_hw.h" 11 #include <linux/types.h> 12 13 #define QLC_BC_COMMAND 0 14 #define QLC_BC_RESPONSE 1 15 16 #define QLC_MBOX_RESP_TIMEOUT (10 * HZ) 17 #define QLC_MBOX_CH_FREE_TIMEOUT (10 * HZ) 18 19 #define QLC_BC_MSG 0 20 #define QLC_BC_CFREE 1 21 #define QLC_BC_FLR 2 22 #define QLC_BC_HDR_SZ 16 23 #define QLC_BC_PAYLOAD_SZ (1024 - QLC_BC_HDR_SZ) 24 25 #define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF 2048 26 #define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF 512 27 28 #define QLC_83XX_VF_RESET_FAIL_THRESH 8 29 #define QLC_BC_CMD_MAX_RETRY_CNT 5 30 31 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); 32 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); 33 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); 34 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *); 35 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *); 36 static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *, 37 struct qlcnic_cmd_args *); 38 static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8); 39 static void qlcnic_sriov_process_bc_cmd(struct work_struct *); 40 static int qlcnic_sriov_vf_shutdown(struct pci_dev *); 41 static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *); 42 43 static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = { 44 .read_crb = qlcnic_83xx_read_crb, 45 .write_crb = qlcnic_83xx_write_crb, 46 .read_reg = qlcnic_83xx_rd_reg_indirect, 47 .write_reg = qlcnic_83xx_wrt_reg_indirect, 48 .get_mac_address = qlcnic_83xx_get_mac_address, 49 .setup_intr = qlcnic_83xx_setup_intr, 50 .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args, 51 .mbx_cmd = qlcnic_sriov_issue_cmd, 52 .get_func_no = qlcnic_83xx_get_func_no, 53 .api_lock = qlcnic_83xx_cam_lock, 54 .api_unlock = qlcnic_83xx_cam_unlock, 55 .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag, 56 .create_rx_ctx = qlcnic_83xx_create_rx_ctx, 57 .create_tx_ctx = qlcnic_83xx_create_tx_ctx, 58 .del_rx_ctx = qlcnic_83xx_del_rx_ctx, 59 .del_tx_ctx = qlcnic_83xx_del_tx_ctx, 60 .setup_link_event = qlcnic_83xx_setup_link_event, 61 .get_nic_info = qlcnic_83xx_get_nic_info, 62 .get_pci_info = qlcnic_83xx_get_pci_info, 63 .set_nic_info = qlcnic_83xx_set_nic_info, 64 .change_macvlan = qlcnic_83xx_sre_macaddr_change, 65 .napi_enable = qlcnic_83xx_napi_enable, 66 .napi_disable = qlcnic_83xx_napi_disable, 67 .config_intr_coal = qlcnic_83xx_config_intr_coal, 68 .config_rss = qlcnic_83xx_config_rss, 69 .config_hw_lro = qlcnic_83xx_config_hw_lro, 70 .config_promisc_mode = qlcnic_83xx_nic_set_promisc, 71 .change_l2_filter = qlcnic_83xx_change_l2_filter, 72 .get_board_info = qlcnic_83xx_get_port_info, 73 .free_mac_list = qlcnic_sriov_vf_free_mac_list, 74 .enable_sds_intr = qlcnic_83xx_enable_sds_intr, 75 .disable_sds_intr = qlcnic_83xx_disable_sds_intr, 76 }; 77 78 static struct qlcnic_nic_template qlcnic_sriov_vf_ops = { 79 .config_bridged_mode = qlcnic_config_bridged_mode, 80 .config_led = qlcnic_config_led, 81 .cancel_idc_work = qlcnic_sriov_vf_cancel_fw_work, 82 .napi_add = qlcnic_83xx_napi_add, 83 .napi_del = qlcnic_83xx_napi_del, 84 .shutdown = qlcnic_sriov_vf_shutdown, 85 .resume = qlcnic_sriov_vf_resume, 86 .config_ipaddr = qlcnic_83xx_config_ipaddr, 87 .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr, 88 }; 89 90 static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = { 91 {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2}, 92 {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2}, 93 {QLCNIC_BC_CMD_GET_ACL, 3, 14}, 94 {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2}, 95 }; 96 97 static inline bool qlcnic_sriov_bc_msg_check(u32 val) 98 { 99 return (val & (1 << QLC_BC_MSG)) ? true : false; 100 } 101 102 static inline bool qlcnic_sriov_channel_free_check(u32 val) 103 { 104 return (val & (1 << QLC_BC_CFREE)) ? true : false; 105 } 106 107 static inline bool qlcnic_sriov_flr_check(u32 val) 108 { 109 return (val & (1 << QLC_BC_FLR)) ? true : false; 110 } 111 112 static inline u8 qlcnic_sriov_target_func_id(u32 val) 113 { 114 return (val >> 4) & 0xff; 115 } 116 117 static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id) 118 { 119 struct pci_dev *dev = adapter->pdev; 120 int pos; 121 u16 stride, offset; 122 123 if (qlcnic_sriov_vf_check(adapter)) 124 return 0; 125 126 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 127 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset); 128 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride); 129 130 return (dev->devfn + offset + stride * vf_id) & 0xff; 131 } 132 133 int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) 134 { 135 struct qlcnic_sriov *sriov; 136 struct qlcnic_back_channel *bc; 137 struct workqueue_struct *wq; 138 struct qlcnic_vport *vp; 139 struct qlcnic_vf_info *vf; 140 int err, i; 141 142 if (!qlcnic_sriov_enable_check(adapter)) 143 return -EIO; 144 145 sriov = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL); 146 if (!sriov) 147 return -ENOMEM; 148 149 adapter->ahw->sriov = sriov; 150 sriov->num_vfs = num_vfs; 151 bc = &sriov->bc; 152 sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) * 153 num_vfs, GFP_KERNEL); 154 if (!sriov->vf_info) { 155 err = -ENOMEM; 156 goto qlcnic_free_sriov; 157 } 158 159 wq = create_singlethread_workqueue("bc-trans"); 160 if (wq == NULL) { 161 err = -ENOMEM; 162 dev_err(&adapter->pdev->dev, 163 "Cannot create bc-trans workqueue\n"); 164 goto qlcnic_free_vf_info; 165 } 166 167 bc->bc_trans_wq = wq; 168 169 wq = create_singlethread_workqueue("async"); 170 if (wq == NULL) { 171 err = -ENOMEM; 172 dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n"); 173 goto qlcnic_destroy_trans_wq; 174 } 175 176 bc->bc_async_wq = wq; 177 INIT_LIST_HEAD(&bc->async_list); 178 179 for (i = 0; i < num_vfs; i++) { 180 vf = &sriov->vf_info[i]; 181 vf->adapter = adapter; 182 vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i); 183 mutex_init(&vf->send_cmd_lock); 184 mutex_init(&vf->vlan_list_lock); 185 INIT_LIST_HEAD(&vf->rcv_act.wait_list); 186 INIT_LIST_HEAD(&vf->rcv_pend.wait_list); 187 spin_lock_init(&vf->rcv_act.lock); 188 spin_lock_init(&vf->rcv_pend.lock); 189 init_completion(&vf->ch_free_cmpl); 190 191 INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd); 192 193 if (qlcnic_sriov_pf_check(adapter)) { 194 vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL); 195 if (!vp) { 196 err = -ENOMEM; 197 goto qlcnic_destroy_async_wq; 198 } 199 sriov->vf_info[i].vp = vp; 200 vp->max_tx_bw = MAX_BW; 201 vp->spoofchk = true; 202 random_ether_addr(vp->mac); 203 dev_info(&adapter->pdev->dev, 204 "MAC Address %pM is configured for VF %d\n", 205 vp->mac, i); 206 } 207 } 208 209 return 0; 210 211 qlcnic_destroy_async_wq: 212 destroy_workqueue(bc->bc_async_wq); 213 214 qlcnic_destroy_trans_wq: 215 destroy_workqueue(bc->bc_trans_wq); 216 217 qlcnic_free_vf_info: 218 kfree(sriov->vf_info); 219 220 qlcnic_free_sriov: 221 kfree(adapter->ahw->sriov); 222 return err; 223 } 224 225 void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list) 226 { 227 struct qlcnic_bc_trans *trans; 228 struct qlcnic_cmd_args cmd; 229 unsigned long flags; 230 231 spin_lock_irqsave(&t_list->lock, flags); 232 233 while (!list_empty(&t_list->wait_list)) { 234 trans = list_first_entry(&t_list->wait_list, 235 struct qlcnic_bc_trans, list); 236 list_del(&trans->list); 237 t_list->count--; 238 cmd.req.arg = (u32 *)trans->req_pay; 239 cmd.rsp.arg = (u32 *)trans->rsp_pay; 240 qlcnic_free_mbx_args(&cmd); 241 qlcnic_sriov_cleanup_transaction(trans); 242 } 243 244 spin_unlock_irqrestore(&t_list->lock, flags); 245 } 246 247 void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) 248 { 249 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 250 struct qlcnic_back_channel *bc = &sriov->bc; 251 struct qlcnic_vf_info *vf; 252 int i; 253 254 if (!qlcnic_sriov_enable_check(adapter)) 255 return; 256 257 qlcnic_sriov_cleanup_async_list(bc); 258 destroy_workqueue(bc->bc_async_wq); 259 260 for (i = 0; i < sriov->num_vfs; i++) { 261 vf = &sriov->vf_info[i]; 262 qlcnic_sriov_cleanup_list(&vf->rcv_pend); 263 cancel_work_sync(&vf->trans_work); 264 qlcnic_sriov_cleanup_list(&vf->rcv_act); 265 } 266 267 destroy_workqueue(bc->bc_trans_wq); 268 269 for (i = 0; i < sriov->num_vfs; i++) 270 kfree(sriov->vf_info[i].vp); 271 272 kfree(sriov->vf_info); 273 kfree(adapter->ahw->sriov); 274 } 275 276 static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter) 277 { 278 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); 279 qlcnic_sriov_cfg_bc_intr(adapter, 0); 280 __qlcnic_sriov_cleanup(adapter); 281 } 282 283 void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) 284 { 285 if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state)) 286 return; 287 288 qlcnic_sriov_free_vlans(adapter); 289 290 if (qlcnic_sriov_pf_check(adapter)) 291 qlcnic_sriov_pf_cleanup(adapter); 292 293 if (qlcnic_sriov_vf_check(adapter)) 294 qlcnic_sriov_vf_cleanup(adapter); 295 } 296 297 static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, 298 u32 *pay, u8 pci_func, u8 size) 299 { 300 struct qlcnic_hardware_context *ahw = adapter->ahw; 301 struct qlcnic_mailbox *mbx = ahw->mailbox; 302 struct qlcnic_cmd_args cmd; 303 unsigned long timeout; 304 int err; 305 306 memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); 307 cmd.hdr = hdr; 308 cmd.pay = pay; 309 cmd.pay_size = size; 310 cmd.func_num = pci_func; 311 cmd.op_type = QLC_83XX_MBX_POST_BC_OP; 312 cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op; 313 314 err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout); 315 if (err) { 316 dev_err(&adapter->pdev->dev, 317 "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", 318 __func__, cmd.cmd_op, cmd.type, ahw->pci_func, 319 ahw->op_mode); 320 return err; 321 } 322 323 if (!wait_for_completion_timeout(&cmd.completion, timeout)) { 324 dev_err(&adapter->pdev->dev, 325 "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n", 326 __func__, cmd.cmd_op, cmd.type, ahw->pci_func, 327 ahw->op_mode); 328 flush_workqueue(mbx->work_q); 329 } 330 331 return cmd.rsp_opcode; 332 } 333 334 static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter) 335 { 336 adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF; 337 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G; 338 adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF; 339 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; 340 adapter->num_txd = MAX_CMD_DESCRIPTORS; 341 adapter->max_rds_rings = MAX_RDS_RINGS; 342 } 343 344 int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter, 345 struct qlcnic_info *npar_info, u16 vport_id) 346 { 347 struct device *dev = &adapter->pdev->dev; 348 struct qlcnic_cmd_args cmd; 349 int err; 350 u32 status; 351 352 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO); 353 if (err) 354 return err; 355 356 cmd.req.arg[1] = vport_id << 16 | 0x1; 357 err = qlcnic_issue_cmd(adapter, &cmd); 358 if (err) { 359 dev_err(&adapter->pdev->dev, 360 "Failed to get vport info, err=%d\n", err); 361 qlcnic_free_mbx_args(&cmd); 362 return err; 363 } 364 365 status = cmd.rsp.arg[2] & 0xffff; 366 if (status & BIT_0) 367 npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]); 368 if (status & BIT_1) 369 npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]); 370 if (status & BIT_2) 371 npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]); 372 if (status & BIT_3) 373 npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]); 374 if (status & BIT_4) 375 npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]); 376 if (status & BIT_5) 377 npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]); 378 if (status & BIT_6) 379 npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]); 380 if (status & BIT_7) 381 npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]); 382 if (status & BIT_8) 383 npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]); 384 if (status & BIT_9) 385 npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]); 386 387 npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]); 388 npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]); 389 npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]); 390 npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]); 391 392 dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n" 393 "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n" 394 "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n" 395 "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n" 396 "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n" 397 "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n", 398 npar_info->min_tx_bw, npar_info->max_tx_bw, 399 npar_info->max_tx_ques, npar_info->max_tx_mac_filters, 400 npar_info->max_rx_mcast_mac_filters, 401 npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr, 402 npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings, 403 npar_info->max_rx_buf_rings, npar_info->max_rx_ques, 404 npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs, 405 npar_info->max_remote_ipv6_addrs); 406 407 qlcnic_free_mbx_args(&cmd); 408 return err; 409 } 410 411 static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter, 412 struct qlcnic_cmd_args *cmd) 413 { 414 adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff; 415 adapter->flags &= ~QLCNIC_TAGGING_ENABLED; 416 return 0; 417 } 418 419 static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter, 420 struct qlcnic_cmd_args *cmd) 421 { 422 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 423 int i, num_vlans; 424 u16 *vlans; 425 426 if (sriov->allowed_vlans) 427 return 0; 428 429 sriov->any_vlan = cmd->rsp.arg[2] & 0xf; 430 sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16; 431 dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n", 432 sriov->num_allowed_vlans); 433 434 qlcnic_sriov_alloc_vlans(adapter); 435 436 if (!sriov->any_vlan) 437 return 0; 438 439 num_vlans = sriov->num_allowed_vlans; 440 sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL); 441 if (!sriov->allowed_vlans) 442 return -ENOMEM; 443 444 vlans = (u16 *)&cmd->rsp.arg[3]; 445 for (i = 0; i < num_vlans; i++) 446 sriov->allowed_vlans[i] = vlans[i]; 447 448 return 0; 449 } 450 451 static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter) 452 { 453 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 454 struct qlcnic_cmd_args cmd; 455 int ret = 0; 456 457 ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL); 458 if (ret) 459 return ret; 460 461 ret = qlcnic_issue_cmd(adapter, &cmd); 462 if (ret) { 463 dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n", 464 ret); 465 } else { 466 sriov->vlan_mode = cmd.rsp.arg[1] & 0x3; 467 switch (sriov->vlan_mode) { 468 case QLC_GUEST_VLAN_MODE: 469 ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd); 470 break; 471 case QLC_PVID_MODE: 472 ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd); 473 break; 474 } 475 } 476 477 qlcnic_free_mbx_args(&cmd); 478 return ret; 479 } 480 481 static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter) 482 { 483 struct qlcnic_hardware_context *ahw = adapter->ahw; 484 struct qlcnic_info nic_info; 485 int err; 486 487 err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0); 488 if (err) 489 return err; 490 491 ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters; 492 493 err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func); 494 if (err) 495 return -EIO; 496 497 if (qlcnic_83xx_get_port_info(adapter)) 498 return -EIO; 499 500 qlcnic_sriov_vf_cfg_buff_desc(adapter); 501 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED; 502 dev_info(&adapter->pdev->dev, "HAL Version: %d\n", 503 adapter->ahw->fw_hal_version); 504 505 ahw->physical_port = (u8) nic_info.phys_port; 506 ahw->switch_mode = nic_info.switch_mode; 507 ahw->max_mtu = nic_info.max_mtu; 508 ahw->op_mode = nic_info.op_mode; 509 ahw->capabilities = nic_info.capabilities; 510 return 0; 511 } 512 513 static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter, 514 int pci_using_dac) 515 { 516 int err; 517 518 INIT_LIST_HEAD(&adapter->vf_mc_list); 519 if (!qlcnic_use_msi_x && !!qlcnic_use_msi) 520 dev_warn(&adapter->pdev->dev, 521 "Device does not support MSI interrupts\n"); 522 523 /* compute and set default and max tx/sds rings */ 524 qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING); 525 qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING); 526 527 err = qlcnic_setup_intr(adapter); 528 if (err) { 529 dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n"); 530 goto err_out_disable_msi; 531 } 532 533 err = qlcnic_83xx_setup_mbx_intr(adapter); 534 if (err) 535 goto err_out_disable_msi; 536 537 err = qlcnic_sriov_init(adapter, 1); 538 if (err) 539 goto err_out_disable_mbx_intr; 540 541 err = qlcnic_sriov_cfg_bc_intr(adapter, 1); 542 if (err) 543 goto err_out_cleanup_sriov; 544 545 err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); 546 if (err) 547 goto err_out_disable_bc_intr; 548 549 err = qlcnic_sriov_vf_init_driver(adapter); 550 if (err) 551 goto err_out_send_channel_term; 552 553 err = qlcnic_sriov_get_vf_acl(adapter); 554 if (err) 555 goto err_out_send_channel_term; 556 557 err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac); 558 if (err) 559 goto err_out_send_channel_term; 560 561 pci_set_drvdata(adapter->pdev, adapter); 562 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n", 563 adapter->netdev->name); 564 565 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, 566 adapter->ahw->idc.delay); 567 return 0; 568 569 err_out_send_channel_term: 570 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); 571 572 err_out_disable_bc_intr: 573 qlcnic_sriov_cfg_bc_intr(adapter, 0); 574 575 err_out_cleanup_sriov: 576 __qlcnic_sriov_cleanup(adapter); 577 578 err_out_disable_mbx_intr: 579 qlcnic_83xx_free_mbx_intr(adapter); 580 581 err_out_disable_msi: 582 qlcnic_teardown_intr(adapter); 583 return err; 584 } 585 586 static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter) 587 { 588 u32 state; 589 590 do { 591 msleep(20); 592 if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT) 593 return -EIO; 594 state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); 595 } while (state != QLC_83XX_IDC_DEV_READY); 596 597 return 0; 598 } 599 600 int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac) 601 { 602 struct qlcnic_hardware_context *ahw = adapter->ahw; 603 int err; 604 605 set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status); 606 ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; 607 ahw->reset_context = 0; 608 adapter->fw_fail_cnt = 0; 609 ahw->msix_supported = 1; 610 adapter->need_fw_reset = 0; 611 adapter->flags |= QLCNIC_TX_INTR_SHARED; 612 613 err = qlcnic_sriov_check_dev_ready(adapter); 614 if (err) 615 return err; 616 617 err = qlcnic_sriov_setup_vf(adapter, pci_using_dac); 618 if (err) 619 return err; 620 621 if (qlcnic_read_mac_addr(adapter)) 622 dev_warn(&adapter->pdev->dev, "failed to read mac addr\n"); 623 624 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work); 625 626 clear_bit(__QLCNIC_RESETTING, &adapter->state); 627 return 0; 628 } 629 630 void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter) 631 { 632 struct qlcnic_hardware_context *ahw = adapter->ahw; 633 634 ahw->op_mode = QLCNIC_SRIOV_VF_FUNC; 635 dev_info(&adapter->pdev->dev, 636 "HAL Version: %d Non Privileged SRIOV function\n", 637 ahw->fw_hal_version); 638 adapter->nic_ops = &qlcnic_sriov_vf_ops; 639 set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state); 640 return; 641 } 642 643 void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw) 644 { 645 ahw->hw_ops = &qlcnic_sriov_vf_hw_ops; 646 ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl; 647 ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl; 648 } 649 650 static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag) 651 { 652 u32 pay_size; 653 654 pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ); 655 656 if (pay_size) 657 pay_size = QLC_BC_PAYLOAD_SZ; 658 else 659 pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ; 660 661 return pay_size; 662 } 663 664 int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func) 665 { 666 struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info; 667 u8 i; 668 669 if (qlcnic_sriov_vf_check(adapter)) 670 return 0; 671 672 for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) { 673 if (vf_info[i].pci_func == pci_func) 674 return i; 675 } 676 677 return -EINVAL; 678 } 679 680 static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans) 681 { 682 *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC); 683 if (!*trans) 684 return -ENOMEM; 685 686 init_completion(&(*trans)->resp_cmpl); 687 return 0; 688 } 689 690 static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr, 691 u32 size) 692 { 693 *hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC); 694 if (!*hdr) 695 return -ENOMEM; 696 697 return 0; 698 } 699 700 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type) 701 { 702 const struct qlcnic_mailbox_metadata *mbx_tbl; 703 int i, size; 704 705 mbx_tbl = qlcnic_sriov_bc_mbx_tbl; 706 size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl); 707 708 for (i = 0; i < size; i++) { 709 if (type == mbx_tbl[i].cmd) { 710 mbx->op_type = QLC_BC_CMD; 711 mbx->req.num = mbx_tbl[i].in_args; 712 mbx->rsp.num = mbx_tbl[i].out_args; 713 mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32), 714 GFP_ATOMIC); 715 if (!mbx->req.arg) 716 return -ENOMEM; 717 mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32), 718 GFP_ATOMIC); 719 if (!mbx->rsp.arg) { 720 kfree(mbx->req.arg); 721 mbx->req.arg = NULL; 722 return -ENOMEM; 723 } 724 memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num); 725 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); 726 mbx->req.arg[0] = (type | (mbx->req.num << 16) | 727 (3 << 29)); 728 mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16; 729 return 0; 730 } 731 } 732 return -EINVAL; 733 } 734 735 static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans, 736 struct qlcnic_cmd_args *cmd, 737 u16 seq, u8 msg_type) 738 { 739 struct qlcnic_bc_hdr *hdr; 740 int i; 741 u32 num_regs, bc_pay_sz; 742 u16 remainder; 743 u8 cmd_op, num_frags, t_num_frags; 744 745 bc_pay_sz = QLC_BC_PAYLOAD_SZ; 746 if (msg_type == QLC_BC_COMMAND) { 747 trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg; 748 trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg; 749 num_regs = cmd->req.num; 750 trans->req_pay_size = (num_regs * 4); 751 num_regs = cmd->rsp.num; 752 trans->rsp_pay_size = (num_regs * 4); 753 cmd_op = cmd->req.arg[0] & 0xff; 754 remainder = (trans->req_pay_size) % (bc_pay_sz); 755 num_frags = (trans->req_pay_size) / (bc_pay_sz); 756 if (remainder) 757 num_frags++; 758 t_num_frags = num_frags; 759 if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags)) 760 return -ENOMEM; 761 remainder = (trans->rsp_pay_size) % (bc_pay_sz); 762 num_frags = (trans->rsp_pay_size) / (bc_pay_sz); 763 if (remainder) 764 num_frags++; 765 if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags)) 766 return -ENOMEM; 767 num_frags = t_num_frags; 768 hdr = trans->req_hdr; 769 } else { 770 cmd->req.arg = (u32 *)trans->req_pay; 771 cmd->rsp.arg = (u32 *)trans->rsp_pay; 772 cmd_op = cmd->req.arg[0] & 0xff; 773 remainder = (trans->rsp_pay_size) % (bc_pay_sz); 774 num_frags = (trans->rsp_pay_size) / (bc_pay_sz); 775 if (remainder) 776 num_frags++; 777 cmd->req.num = trans->req_pay_size / 4; 778 cmd->rsp.num = trans->rsp_pay_size / 4; 779 hdr = trans->rsp_hdr; 780 cmd->op_type = trans->req_hdr->op_type; 781 } 782 783 trans->trans_id = seq; 784 trans->cmd_id = cmd_op; 785 for (i = 0; i < num_frags; i++) { 786 hdr[i].version = 2; 787 hdr[i].msg_type = msg_type; 788 hdr[i].op_type = cmd->op_type; 789 hdr[i].num_cmds = 1; 790 hdr[i].num_frags = num_frags; 791 hdr[i].frag_num = i + 1; 792 hdr[i].cmd_op = cmd_op; 793 hdr[i].seq_id = seq; 794 } 795 return 0; 796 } 797 798 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans) 799 { 800 if (!trans) 801 return; 802 kfree(trans->req_hdr); 803 kfree(trans->rsp_hdr); 804 kfree(trans); 805 } 806 807 static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf, 808 struct qlcnic_bc_trans *trans, u8 type) 809 { 810 struct qlcnic_trans_list *t_list; 811 unsigned long flags; 812 int ret = 0; 813 814 if (type == QLC_BC_RESPONSE) { 815 t_list = &vf->rcv_act; 816 spin_lock_irqsave(&t_list->lock, flags); 817 t_list->count--; 818 list_del(&trans->list); 819 if (t_list->count > 0) 820 ret = 1; 821 spin_unlock_irqrestore(&t_list->lock, flags); 822 } 823 if (type == QLC_BC_COMMAND) { 824 while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state)) 825 msleep(100); 826 vf->send_cmd = NULL; 827 clear_bit(QLC_BC_VF_SEND, &vf->state); 828 } 829 return ret; 830 } 831 832 static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov, 833 struct qlcnic_vf_info *vf, 834 work_func_t func) 835 { 836 if (test_bit(QLC_BC_VF_FLR, &vf->state) || 837 vf->adapter->need_fw_reset) 838 return; 839 840 queue_work(sriov->bc.bc_trans_wq, &vf->trans_work); 841 } 842 843 static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans) 844 { 845 struct completion *cmpl = &trans->resp_cmpl; 846 847 if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT)) 848 trans->trans_state = QLC_END; 849 else 850 trans->trans_state = QLC_ABORT; 851 852 return; 853 } 854 855 static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans, 856 u8 type) 857 { 858 if (type == QLC_BC_RESPONSE) { 859 trans->curr_rsp_frag++; 860 if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags) 861 trans->trans_state = QLC_INIT; 862 else 863 trans->trans_state = QLC_END; 864 } else { 865 trans->curr_req_frag++; 866 if (trans->curr_req_frag < trans->req_hdr->num_frags) 867 trans->trans_state = QLC_INIT; 868 else 869 trans->trans_state = QLC_WAIT_FOR_RESP; 870 } 871 } 872 873 static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans, 874 u8 type) 875 { 876 struct qlcnic_vf_info *vf = trans->vf; 877 struct completion *cmpl = &vf->ch_free_cmpl; 878 879 if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) { 880 trans->trans_state = QLC_ABORT; 881 return; 882 } 883 884 clear_bit(QLC_BC_VF_CHANNEL, &vf->state); 885 qlcnic_sriov_handle_multi_frags(trans, type); 886 } 887 888 static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter, 889 u32 *hdr, u32 *pay, u32 size) 890 { 891 struct qlcnic_hardware_context *ahw = adapter->ahw; 892 u32 fw_mbx; 893 u8 i, max = 2, hdr_size, j; 894 895 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); 896 max = (size / sizeof(u32)) + hdr_size; 897 898 fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0)); 899 for (i = 2, j = 0; j < hdr_size; i++, j++) 900 *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i)); 901 for (; j < max; i++, j++) 902 *(pay++) = readl(QLCNIC_MBX_FW(ahw, i)); 903 } 904 905 static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf) 906 { 907 int ret = -EBUSY; 908 u32 timeout = 10000; 909 910 do { 911 if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) { 912 ret = 0; 913 break; 914 } 915 mdelay(1); 916 } while (--timeout); 917 918 return ret; 919 } 920 921 static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type) 922 { 923 struct qlcnic_vf_info *vf = trans->vf; 924 u32 pay_size, hdr_size; 925 u32 *hdr, *pay; 926 int ret; 927 u8 pci_func = trans->func_id; 928 929 if (__qlcnic_sriov_issue_bc_post(vf)) 930 return -EBUSY; 931 932 if (type == QLC_BC_COMMAND) { 933 hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag); 934 pay = (u32 *)(trans->req_pay + trans->curr_req_frag); 935 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); 936 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, 937 trans->curr_req_frag); 938 pay_size = (pay_size / sizeof(u32)); 939 } else { 940 hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag); 941 pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag); 942 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); 943 pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size, 944 trans->curr_rsp_frag); 945 pay_size = (pay_size / sizeof(u32)); 946 } 947 948 ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay, 949 pci_func, pay_size); 950 return ret; 951 } 952 953 static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans, 954 struct qlcnic_vf_info *vf, u8 type) 955 { 956 bool flag = true; 957 int err = -EIO; 958 959 while (flag) { 960 if (test_bit(QLC_BC_VF_FLR, &vf->state) || 961 vf->adapter->need_fw_reset) 962 trans->trans_state = QLC_ABORT; 963 964 switch (trans->trans_state) { 965 case QLC_INIT: 966 trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE; 967 if (qlcnic_sriov_issue_bc_post(trans, type)) 968 trans->trans_state = QLC_ABORT; 969 break; 970 case QLC_WAIT_FOR_CHANNEL_FREE: 971 qlcnic_sriov_wait_for_channel_free(trans, type); 972 break; 973 case QLC_WAIT_FOR_RESP: 974 qlcnic_sriov_wait_for_resp(trans); 975 break; 976 case QLC_END: 977 err = 0; 978 flag = false; 979 break; 980 case QLC_ABORT: 981 err = -EIO; 982 flag = false; 983 clear_bit(QLC_BC_VF_CHANNEL, &vf->state); 984 break; 985 default: 986 err = -EIO; 987 flag = false; 988 } 989 } 990 return err; 991 } 992 993 static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter, 994 struct qlcnic_bc_trans *trans, int pci_func) 995 { 996 struct qlcnic_vf_info *vf; 997 int err, index = qlcnic_sriov_func_to_index(adapter, pci_func); 998 999 if (index < 0) 1000 return -EIO; 1001 1002 vf = &adapter->ahw->sriov->vf_info[index]; 1003 trans->vf = vf; 1004 trans->func_id = pci_func; 1005 1006 if (!test_bit(QLC_BC_VF_STATE, &vf->state)) { 1007 if (qlcnic_sriov_pf_check(adapter)) 1008 return -EIO; 1009 if (qlcnic_sriov_vf_check(adapter) && 1010 trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT) 1011 return -EIO; 1012 } 1013 1014 mutex_lock(&vf->send_cmd_lock); 1015 vf->send_cmd = trans; 1016 err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND); 1017 qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND); 1018 mutex_unlock(&vf->send_cmd_lock); 1019 return err; 1020 } 1021 1022 static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter, 1023 struct qlcnic_bc_trans *trans, 1024 struct qlcnic_cmd_args *cmd) 1025 { 1026 #ifdef CONFIG_QLCNIC_SRIOV 1027 if (qlcnic_sriov_pf_check(adapter)) { 1028 qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd); 1029 return; 1030 } 1031 #endif 1032 cmd->rsp.arg[0] |= (0x9 << 25); 1033 return; 1034 } 1035 1036 static void qlcnic_sriov_process_bc_cmd(struct work_struct *work) 1037 { 1038 struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info, 1039 trans_work); 1040 struct qlcnic_bc_trans *trans = NULL; 1041 struct qlcnic_adapter *adapter = vf->adapter; 1042 struct qlcnic_cmd_args cmd; 1043 u8 req; 1044 1045 if (adapter->need_fw_reset) 1046 return; 1047 1048 if (test_bit(QLC_BC_VF_FLR, &vf->state)) 1049 return; 1050 1051 memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); 1052 trans = list_first_entry(&vf->rcv_act.wait_list, 1053 struct qlcnic_bc_trans, list); 1054 adapter = vf->adapter; 1055 1056 if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id, 1057 QLC_BC_RESPONSE)) 1058 goto cleanup_trans; 1059 1060 __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd); 1061 trans->trans_state = QLC_INIT; 1062 __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE); 1063 1064 cleanup_trans: 1065 qlcnic_free_mbx_args(&cmd); 1066 req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE); 1067 qlcnic_sriov_cleanup_transaction(trans); 1068 if (req) 1069 qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf, 1070 qlcnic_sriov_process_bc_cmd); 1071 } 1072 1073 static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr, 1074 struct qlcnic_vf_info *vf) 1075 { 1076 struct qlcnic_bc_trans *trans; 1077 u32 pay_size; 1078 1079 if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state)) 1080 return; 1081 1082 trans = vf->send_cmd; 1083 1084 if (trans == NULL) 1085 goto clear_send; 1086 1087 if (trans->trans_id != hdr->seq_id) 1088 goto clear_send; 1089 1090 pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size, 1091 trans->curr_rsp_frag); 1092 qlcnic_sriov_pull_bc_msg(vf->adapter, 1093 (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag), 1094 (u32 *)(trans->rsp_pay + trans->curr_rsp_frag), 1095 pay_size); 1096 if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags) 1097 goto clear_send; 1098 1099 complete(&trans->resp_cmpl); 1100 1101 clear_send: 1102 clear_bit(QLC_BC_VF_SEND, &vf->state); 1103 } 1104 1105 int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov, 1106 struct qlcnic_vf_info *vf, 1107 struct qlcnic_bc_trans *trans) 1108 { 1109 struct qlcnic_trans_list *t_list = &vf->rcv_act; 1110 1111 t_list->count++; 1112 list_add_tail(&trans->list, &t_list->wait_list); 1113 if (t_list->count == 1) 1114 qlcnic_sriov_schedule_bc_cmd(sriov, vf, 1115 qlcnic_sriov_process_bc_cmd); 1116 return 0; 1117 } 1118 1119 static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov, 1120 struct qlcnic_vf_info *vf, 1121 struct qlcnic_bc_trans *trans) 1122 { 1123 struct qlcnic_trans_list *t_list = &vf->rcv_act; 1124 1125 spin_lock(&t_list->lock); 1126 1127 __qlcnic_sriov_add_act_list(sriov, vf, trans); 1128 1129 spin_unlock(&t_list->lock); 1130 return 0; 1131 } 1132 1133 static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov, 1134 struct qlcnic_vf_info *vf, 1135 struct qlcnic_bc_hdr *hdr) 1136 { 1137 struct qlcnic_bc_trans *trans = NULL; 1138 struct list_head *node; 1139 u32 pay_size, curr_frag; 1140 u8 found = 0, active = 0; 1141 1142 spin_lock(&vf->rcv_pend.lock); 1143 if (vf->rcv_pend.count > 0) { 1144 list_for_each(node, &vf->rcv_pend.wait_list) { 1145 trans = list_entry(node, struct qlcnic_bc_trans, list); 1146 if (trans->trans_id == hdr->seq_id) { 1147 found = 1; 1148 break; 1149 } 1150 } 1151 } 1152 1153 if (found) { 1154 curr_frag = trans->curr_req_frag; 1155 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, 1156 curr_frag); 1157 qlcnic_sriov_pull_bc_msg(vf->adapter, 1158 (u32 *)(trans->req_hdr + curr_frag), 1159 (u32 *)(trans->req_pay + curr_frag), 1160 pay_size); 1161 trans->curr_req_frag++; 1162 if (trans->curr_req_frag >= hdr->num_frags) { 1163 vf->rcv_pend.count--; 1164 list_del(&trans->list); 1165 active = 1; 1166 } 1167 } 1168 spin_unlock(&vf->rcv_pend.lock); 1169 1170 if (active) 1171 if (qlcnic_sriov_add_act_list(sriov, vf, trans)) 1172 qlcnic_sriov_cleanup_transaction(trans); 1173 1174 return; 1175 } 1176 1177 static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov, 1178 struct qlcnic_bc_hdr *hdr, 1179 struct qlcnic_vf_info *vf) 1180 { 1181 struct qlcnic_bc_trans *trans; 1182 struct qlcnic_adapter *adapter = vf->adapter; 1183 struct qlcnic_cmd_args cmd; 1184 u32 pay_size; 1185 int err; 1186 u8 cmd_op; 1187 1188 if (adapter->need_fw_reset) 1189 return; 1190 1191 if (!test_bit(QLC_BC_VF_STATE, &vf->state) && 1192 hdr->op_type != QLC_BC_CMD && 1193 hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT) 1194 return; 1195 1196 if (hdr->frag_num > 1) { 1197 qlcnic_sriov_handle_pending_trans(sriov, vf, hdr); 1198 return; 1199 } 1200 1201 memset(&cmd, 0, sizeof(struct qlcnic_cmd_args)); 1202 cmd_op = hdr->cmd_op; 1203 if (qlcnic_sriov_alloc_bc_trans(&trans)) 1204 return; 1205 1206 if (hdr->op_type == QLC_BC_CMD) 1207 err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op); 1208 else 1209 err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op); 1210 1211 if (err) { 1212 qlcnic_sriov_cleanup_transaction(trans); 1213 return; 1214 } 1215 1216 cmd.op_type = hdr->op_type; 1217 if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id, 1218 QLC_BC_COMMAND)) { 1219 qlcnic_free_mbx_args(&cmd); 1220 qlcnic_sriov_cleanup_transaction(trans); 1221 return; 1222 } 1223 1224 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, 1225 trans->curr_req_frag); 1226 qlcnic_sriov_pull_bc_msg(vf->adapter, 1227 (u32 *)(trans->req_hdr + trans->curr_req_frag), 1228 (u32 *)(trans->req_pay + trans->curr_req_frag), 1229 pay_size); 1230 trans->func_id = vf->pci_func; 1231 trans->vf = vf; 1232 trans->trans_id = hdr->seq_id; 1233 trans->curr_req_frag++; 1234 1235 if (qlcnic_sriov_soft_flr_check(adapter, trans, vf)) 1236 return; 1237 1238 if (trans->curr_req_frag == trans->req_hdr->num_frags) { 1239 if (qlcnic_sriov_add_act_list(sriov, vf, trans)) { 1240 qlcnic_free_mbx_args(&cmd); 1241 qlcnic_sriov_cleanup_transaction(trans); 1242 } 1243 } else { 1244 spin_lock(&vf->rcv_pend.lock); 1245 list_add_tail(&trans->list, &vf->rcv_pend.wait_list); 1246 vf->rcv_pend.count++; 1247 spin_unlock(&vf->rcv_pend.lock); 1248 } 1249 } 1250 1251 static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov, 1252 struct qlcnic_vf_info *vf) 1253 { 1254 struct qlcnic_bc_hdr hdr; 1255 u32 *ptr = (u32 *)&hdr; 1256 u8 msg_type, i; 1257 1258 for (i = 2; i < 6; i++) 1259 ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i)); 1260 msg_type = hdr.msg_type; 1261 1262 switch (msg_type) { 1263 case QLC_BC_COMMAND: 1264 qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf); 1265 break; 1266 case QLC_BC_RESPONSE: 1267 qlcnic_sriov_handle_bc_resp(&hdr, vf); 1268 break; 1269 } 1270 } 1271 1272 static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov, 1273 struct qlcnic_vf_info *vf) 1274 { 1275 struct qlcnic_adapter *adapter = vf->adapter; 1276 1277 if (qlcnic_sriov_pf_check(adapter)) 1278 qlcnic_sriov_pf_handle_flr(sriov, vf); 1279 else 1280 dev_err(&adapter->pdev->dev, 1281 "Invalid event to VF. VF should not get FLR event\n"); 1282 } 1283 1284 void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event) 1285 { 1286 struct qlcnic_vf_info *vf; 1287 struct qlcnic_sriov *sriov; 1288 int index; 1289 u8 pci_func; 1290 1291 sriov = adapter->ahw->sriov; 1292 pci_func = qlcnic_sriov_target_func_id(event); 1293 index = qlcnic_sriov_func_to_index(adapter, pci_func); 1294 1295 if (index < 0) 1296 return; 1297 1298 vf = &sriov->vf_info[index]; 1299 vf->pci_func = pci_func; 1300 1301 if (qlcnic_sriov_channel_free_check(event)) 1302 complete(&vf->ch_free_cmpl); 1303 1304 if (qlcnic_sriov_flr_check(event)) { 1305 qlcnic_sriov_handle_flr_event(sriov, vf); 1306 return; 1307 } 1308 1309 if (qlcnic_sriov_bc_msg_check(event)) 1310 qlcnic_sriov_handle_msg_event(sriov, vf); 1311 } 1312 1313 int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable) 1314 { 1315 struct qlcnic_cmd_args cmd; 1316 int err; 1317 1318 if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state)) 1319 return 0; 1320 1321 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP)) 1322 return -ENOMEM; 1323 1324 if (enable) 1325 cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7); 1326 1327 err = qlcnic_83xx_issue_cmd(adapter, &cmd); 1328 1329 if (err != QLCNIC_RCODE_SUCCESS) { 1330 dev_err(&adapter->pdev->dev, 1331 "Failed to %s bc events, err=%d\n", 1332 (enable ? "enable" : "disable"), err); 1333 } 1334 1335 qlcnic_free_mbx_args(&cmd); 1336 return err; 1337 } 1338 1339 static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter, 1340 struct qlcnic_bc_trans *trans) 1341 { 1342 u8 max = QLC_BC_CMD_MAX_RETRY_CNT; 1343 u32 state; 1344 1345 state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); 1346 if (state == QLC_83XX_IDC_DEV_READY) { 1347 msleep(20); 1348 clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state); 1349 trans->trans_state = QLC_INIT; 1350 if (++adapter->fw_fail_cnt > max) 1351 return -EIO; 1352 else 1353 return 0; 1354 } 1355 1356 return -EIO; 1357 } 1358 1359 static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter, 1360 struct qlcnic_cmd_args *cmd) 1361 { 1362 struct qlcnic_hardware_context *ahw = adapter->ahw; 1363 struct qlcnic_mailbox *mbx = ahw->mailbox; 1364 struct device *dev = &adapter->pdev->dev; 1365 struct qlcnic_bc_trans *trans; 1366 int err; 1367 u32 rsp_data, opcode, mbx_err_code, rsp; 1368 u16 seq = ++adapter->ahw->sriov->bc.trans_counter; 1369 u8 func = ahw->pci_func; 1370 1371 rsp = qlcnic_sriov_alloc_bc_trans(&trans); 1372 if (rsp) 1373 return rsp; 1374 1375 rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND); 1376 if (rsp) 1377 goto cleanup_transaction; 1378 1379 retry: 1380 if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) { 1381 rsp = -EIO; 1382 QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n", 1383 QLCNIC_MBX_RSP(cmd->req.arg[0]), func); 1384 goto err_out; 1385 } 1386 1387 err = qlcnic_sriov_send_bc_cmd(adapter, trans, func); 1388 if (err) { 1389 dev_err(dev, "MBX command 0x%x timed out for VF %d\n", 1390 (cmd->req.arg[0] & 0xffff), func); 1391 rsp = QLCNIC_RCODE_TIMEOUT; 1392 1393 /* After adapter reset PF driver may take some time to 1394 * respond to VF's request. Retry request till maximum retries. 1395 */ 1396 if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) && 1397 !qlcnic_sriov_retry_bc_cmd(adapter, trans)) 1398 goto retry; 1399 1400 goto err_out; 1401 } 1402 1403 rsp_data = cmd->rsp.arg[0]; 1404 mbx_err_code = QLCNIC_MBX_STATUS(rsp_data); 1405 opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]); 1406 1407 if ((mbx_err_code == QLCNIC_MBX_RSP_OK) || 1408 (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) { 1409 rsp = QLCNIC_RCODE_SUCCESS; 1410 } else { 1411 rsp = mbx_err_code; 1412 if (!rsp) 1413 rsp = 1; 1414 dev_err(dev, 1415 "MBX command 0x%x failed with err:0x%x for VF %d\n", 1416 opcode, mbx_err_code, func); 1417 } 1418 1419 err_out: 1420 if (rsp == QLCNIC_RCODE_TIMEOUT) { 1421 ahw->reset_context = 1; 1422 adapter->need_fw_reset = 1; 1423 clear_bit(QLC_83XX_MBX_READY, &mbx->status); 1424 } 1425 1426 cleanup_transaction: 1427 qlcnic_sriov_cleanup_transaction(trans); 1428 return rsp; 1429 } 1430 1431 static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op) 1432 { 1433 struct qlcnic_cmd_args cmd; 1434 struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0]; 1435 int ret; 1436 1437 if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op)) 1438 return -ENOMEM; 1439 1440 ret = qlcnic_issue_cmd(adapter, &cmd); 1441 if (ret) { 1442 dev_err(&adapter->pdev->dev, 1443 "Failed bc channel %s %d\n", cmd_op ? "term" : "init", 1444 ret); 1445 goto out; 1446 } 1447 1448 cmd_op = (cmd.rsp.arg[0] & 0xff); 1449 if (cmd.rsp.arg[0] >> 25 == 2) 1450 return 2; 1451 if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) 1452 set_bit(QLC_BC_VF_STATE, &vf->state); 1453 else 1454 clear_bit(QLC_BC_VF_STATE, &vf->state); 1455 1456 out: 1457 qlcnic_free_mbx_args(&cmd); 1458 return ret; 1459 } 1460 1461 static void qlcnic_vf_add_mc_list(struct net_device *netdev) 1462 { 1463 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1464 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 1465 struct qlcnic_mac_vlan_list *cur; 1466 struct list_head *head, tmp_list; 1467 struct qlcnic_vf_info *vf; 1468 u16 vlan_id; 1469 int i; 1470 1471 static const u8 bcast_addr[ETH_ALEN] = { 1472 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 1473 }; 1474 1475 vf = &adapter->ahw->sriov->vf_info[0]; 1476 INIT_LIST_HEAD(&tmp_list); 1477 head = &adapter->vf_mc_list; 1478 netif_addr_lock_bh(netdev); 1479 1480 while (!list_empty(head)) { 1481 cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list); 1482 list_move(&cur->list, &tmp_list); 1483 } 1484 1485 netif_addr_unlock_bh(netdev); 1486 1487 while (!list_empty(&tmp_list)) { 1488 cur = list_entry((&tmp_list)->next, 1489 struct qlcnic_mac_vlan_list, list); 1490 if (!qlcnic_sriov_check_any_vlan(vf)) { 1491 qlcnic_nic_add_mac(adapter, bcast_addr, 0); 1492 qlcnic_nic_add_mac(adapter, cur->mac_addr, 0); 1493 } else { 1494 mutex_lock(&vf->vlan_list_lock); 1495 for (i = 0; i < sriov->num_allowed_vlans; i++) { 1496 vlan_id = vf->sriov_vlans[i]; 1497 if (vlan_id) { 1498 qlcnic_nic_add_mac(adapter, bcast_addr, 1499 vlan_id); 1500 qlcnic_nic_add_mac(adapter, 1501 cur->mac_addr, 1502 vlan_id); 1503 } 1504 } 1505 mutex_unlock(&vf->vlan_list_lock); 1506 if (qlcnic_84xx_check(adapter)) { 1507 qlcnic_nic_add_mac(adapter, bcast_addr, 0); 1508 qlcnic_nic_add_mac(adapter, cur->mac_addr, 0); 1509 } 1510 } 1511 list_del(&cur->list); 1512 kfree(cur); 1513 } 1514 } 1515 1516 void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) 1517 { 1518 struct list_head *head = &bc->async_list; 1519 struct qlcnic_async_work_list *entry; 1520 1521 while (!list_empty(head)) { 1522 entry = list_entry(head->next, struct qlcnic_async_work_list, 1523 list); 1524 cancel_work_sync(&entry->work); 1525 list_del(&entry->list); 1526 kfree(entry); 1527 } 1528 } 1529 1530 static void qlcnic_sriov_vf_set_multi(struct net_device *netdev) 1531 { 1532 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1533 struct qlcnic_hardware_context *ahw = adapter->ahw; 1534 u32 mode = VPORT_MISS_MODE_DROP; 1535 1536 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) 1537 return; 1538 1539 if (netdev->flags & IFF_PROMISC) { 1540 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) 1541 mode = VPORT_MISS_MODE_ACCEPT_ALL; 1542 } else if ((netdev->flags & IFF_ALLMULTI) || 1543 (netdev_mc_count(netdev) > ahw->max_mc_count)) { 1544 mode = VPORT_MISS_MODE_ACCEPT_MULTI; 1545 } 1546 1547 if (qlcnic_sriov_vf_check(adapter)) 1548 qlcnic_vf_add_mc_list(netdev); 1549 1550 qlcnic_nic_set_promisc(adapter, mode); 1551 } 1552 1553 static void qlcnic_sriov_handle_async_multi(struct work_struct *work) 1554 { 1555 struct qlcnic_async_work_list *entry; 1556 struct net_device *netdev; 1557 1558 entry = container_of(work, struct qlcnic_async_work_list, work); 1559 netdev = (struct net_device *)entry->ptr; 1560 1561 qlcnic_sriov_vf_set_multi(netdev); 1562 return; 1563 } 1564 1565 static struct qlcnic_async_work_list * 1566 qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc) 1567 { 1568 struct list_head *node; 1569 struct qlcnic_async_work_list *entry = NULL; 1570 u8 empty = 0; 1571 1572 list_for_each(node, &bc->async_list) { 1573 entry = list_entry(node, struct qlcnic_async_work_list, list); 1574 if (!work_pending(&entry->work)) { 1575 empty = 1; 1576 break; 1577 } 1578 } 1579 1580 if (!empty) { 1581 entry = kzalloc(sizeof(struct qlcnic_async_work_list), 1582 GFP_ATOMIC); 1583 if (entry == NULL) 1584 return NULL; 1585 list_add_tail(&entry->list, &bc->async_list); 1586 } 1587 1588 return entry; 1589 } 1590 1591 static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc, 1592 work_func_t func, void *data) 1593 { 1594 struct qlcnic_async_work_list *entry = NULL; 1595 1596 entry = qlcnic_sriov_get_free_node_async_work(bc); 1597 if (!entry) 1598 return; 1599 1600 entry->ptr = data; 1601 INIT_WORK(&entry->work, func); 1602 queue_work(bc->bc_async_wq, &entry->work); 1603 } 1604 1605 void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev) 1606 { 1607 1608 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1609 struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc; 1610 1611 if (adapter->need_fw_reset) 1612 return; 1613 1614 qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi, 1615 netdev); 1616 } 1617 1618 static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter) 1619 { 1620 int err; 1621 1622 adapter->need_fw_reset = 0; 1623 qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox); 1624 qlcnic_83xx_enable_mbx_interrupt(adapter); 1625 1626 err = qlcnic_sriov_cfg_bc_intr(adapter, 1); 1627 if (err) 1628 return err; 1629 1630 err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); 1631 if (err) 1632 goto err_out_cleanup_bc_intr; 1633 1634 err = qlcnic_sriov_vf_init_driver(adapter); 1635 if (err) 1636 goto err_out_term_channel; 1637 1638 return 0; 1639 1640 err_out_term_channel: 1641 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); 1642 1643 err_out_cleanup_bc_intr: 1644 qlcnic_sriov_cfg_bc_intr(adapter, 0); 1645 return err; 1646 } 1647 1648 static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter) 1649 { 1650 struct net_device *netdev = adapter->netdev; 1651 1652 if (netif_running(netdev)) { 1653 if (!qlcnic_up(adapter, netdev)) 1654 qlcnic_restore_indev_addr(netdev, NETDEV_UP); 1655 } 1656 1657 netif_device_attach(netdev); 1658 } 1659 1660 static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter) 1661 { 1662 struct qlcnic_hardware_context *ahw = adapter->ahw; 1663 struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl; 1664 struct net_device *netdev = adapter->netdev; 1665 u8 i, max_ints = ahw->num_msix - 1; 1666 1667 netif_device_detach(netdev); 1668 qlcnic_83xx_detach_mailbox_work(adapter); 1669 qlcnic_83xx_disable_mbx_intr(adapter); 1670 1671 if (netif_running(netdev)) 1672 qlcnic_down(adapter, netdev); 1673 1674 for (i = 0; i < max_ints; i++) { 1675 intr_tbl[i].id = i; 1676 intr_tbl[i].enabled = 0; 1677 intr_tbl[i].src = 0; 1678 } 1679 ahw->reset_context = 0; 1680 } 1681 1682 static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter) 1683 { 1684 struct qlcnic_hardware_context *ahw = adapter->ahw; 1685 struct device *dev = &adapter->pdev->dev; 1686 struct qlc_83xx_idc *idc = &ahw->idc; 1687 u8 func = ahw->pci_func; 1688 u32 state; 1689 1690 if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) || 1691 (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) { 1692 if (!qlcnic_sriov_vf_reinit_driver(adapter)) { 1693 qlcnic_sriov_vf_attach(adapter); 1694 adapter->fw_fail_cnt = 0; 1695 dev_info(dev, 1696 "%s: Reinitialization of VF 0x%x done after FW reset\n", 1697 __func__, func); 1698 } else { 1699 dev_err(dev, 1700 "%s: Reinitialization of VF 0x%x failed after FW reset\n", 1701 __func__, func); 1702 state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE); 1703 dev_info(dev, "Current state 0x%x after FW reset\n", 1704 state); 1705 } 1706 } 1707 1708 return 0; 1709 } 1710 1711 static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter) 1712 { 1713 struct qlcnic_hardware_context *ahw = adapter->ahw; 1714 struct qlcnic_mailbox *mbx = ahw->mailbox; 1715 struct device *dev = &adapter->pdev->dev; 1716 struct qlc_83xx_idc *idc = &ahw->idc; 1717 u8 func = ahw->pci_func; 1718 u32 state; 1719 1720 adapter->reset_ctx_cnt++; 1721 1722 /* Skip the context reset and check if FW is hung */ 1723 if (adapter->reset_ctx_cnt < 3) { 1724 adapter->need_fw_reset = 1; 1725 clear_bit(QLC_83XX_MBX_READY, &mbx->status); 1726 dev_info(dev, 1727 "Resetting context, wait here to check if FW is in failed state\n"); 1728 return 0; 1729 } 1730 1731 /* Check if number of resets exceed the threshold. 1732 * If it exceeds the threshold just fail the VF. 1733 */ 1734 if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) { 1735 clear_bit(QLC_83XX_MODULE_LOADED, &idc->status); 1736 adapter->tx_timeo_cnt = 0; 1737 adapter->fw_fail_cnt = 0; 1738 adapter->reset_ctx_cnt = 0; 1739 qlcnic_sriov_vf_detach(adapter); 1740 dev_err(dev, 1741 "Device context resets have exceeded the threshold, device interface will be shutdown\n"); 1742 return -EIO; 1743 } 1744 1745 dev_info(dev, "Resetting context of VF 0x%x\n", func); 1746 dev_info(dev, "%s: Context reset count %d for VF 0x%x\n", 1747 __func__, adapter->reset_ctx_cnt, func); 1748 set_bit(__QLCNIC_RESETTING, &adapter->state); 1749 adapter->need_fw_reset = 1; 1750 clear_bit(QLC_83XX_MBX_READY, &mbx->status); 1751 qlcnic_sriov_vf_detach(adapter); 1752 adapter->need_fw_reset = 0; 1753 1754 if (!qlcnic_sriov_vf_reinit_driver(adapter)) { 1755 qlcnic_sriov_vf_attach(adapter); 1756 adapter->tx_timeo_cnt = 0; 1757 adapter->reset_ctx_cnt = 0; 1758 adapter->fw_fail_cnt = 0; 1759 dev_info(dev, "Done resetting context for VF 0x%x\n", func); 1760 } else { 1761 dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n", 1762 __func__, func); 1763 state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE); 1764 dev_info(dev, "%s: Current state 0x%x\n", __func__, state); 1765 } 1766 1767 return 0; 1768 } 1769 1770 static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter) 1771 { 1772 struct qlcnic_hardware_context *ahw = adapter->ahw; 1773 int ret = 0; 1774 1775 if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY) 1776 ret = qlcnic_sriov_vf_handle_dev_ready(adapter); 1777 else if (ahw->reset_context) 1778 ret = qlcnic_sriov_vf_handle_context_reset(adapter); 1779 1780 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1781 return ret; 1782 } 1783 1784 static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter) 1785 { 1786 struct qlc_83xx_idc *idc = &adapter->ahw->idc; 1787 1788 dev_err(&adapter->pdev->dev, "Device is in failed state\n"); 1789 if (idc->prev_state == QLC_83XX_IDC_DEV_READY) 1790 qlcnic_sriov_vf_detach(adapter); 1791 1792 clear_bit(QLC_83XX_MODULE_LOADED, &idc->status); 1793 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1794 return -EIO; 1795 } 1796 1797 static int 1798 qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter) 1799 { 1800 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; 1801 struct qlc_83xx_idc *idc = &adapter->ahw->idc; 1802 1803 dev_info(&adapter->pdev->dev, "Device is in quiescent state\n"); 1804 if (idc->prev_state == QLC_83XX_IDC_DEV_READY) { 1805 set_bit(__QLCNIC_RESETTING, &adapter->state); 1806 adapter->tx_timeo_cnt = 0; 1807 adapter->reset_ctx_cnt = 0; 1808 clear_bit(QLC_83XX_MBX_READY, &mbx->status); 1809 qlcnic_sriov_vf_detach(adapter); 1810 } 1811 1812 return 0; 1813 } 1814 1815 static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter) 1816 { 1817 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox; 1818 struct qlc_83xx_idc *idc = &adapter->ahw->idc; 1819 u8 func = adapter->ahw->pci_func; 1820 1821 if (idc->prev_state == QLC_83XX_IDC_DEV_READY) { 1822 dev_err(&adapter->pdev->dev, 1823 "Firmware hang detected by VF 0x%x\n", func); 1824 set_bit(__QLCNIC_RESETTING, &adapter->state); 1825 adapter->tx_timeo_cnt = 0; 1826 adapter->reset_ctx_cnt = 0; 1827 clear_bit(QLC_83XX_MBX_READY, &mbx->status); 1828 qlcnic_sriov_vf_detach(adapter); 1829 } 1830 return 0; 1831 } 1832 1833 static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter) 1834 { 1835 dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__); 1836 return 0; 1837 } 1838 1839 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work) 1840 { 1841 struct qlcnic_adapter *adapter; 1842 struct qlc_83xx_idc *idc; 1843 int ret = 0; 1844 1845 adapter = container_of(work, struct qlcnic_adapter, fw_work.work); 1846 idc = &adapter->ahw->idc; 1847 idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE); 1848 1849 switch (idc->curr_state) { 1850 case QLC_83XX_IDC_DEV_READY: 1851 ret = qlcnic_sriov_vf_idc_ready_state(adapter); 1852 break; 1853 case QLC_83XX_IDC_DEV_NEED_RESET: 1854 case QLC_83XX_IDC_DEV_INIT: 1855 ret = qlcnic_sriov_vf_idc_init_reset_state(adapter); 1856 break; 1857 case QLC_83XX_IDC_DEV_NEED_QUISCENT: 1858 ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter); 1859 break; 1860 case QLC_83XX_IDC_DEV_FAILED: 1861 ret = qlcnic_sriov_vf_idc_failed_state(adapter); 1862 break; 1863 case QLC_83XX_IDC_DEV_QUISCENT: 1864 break; 1865 default: 1866 ret = qlcnic_sriov_vf_idc_unknown_state(adapter); 1867 } 1868 1869 idc->prev_state = idc->curr_state; 1870 if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status)) 1871 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, 1872 idc->delay); 1873 } 1874 1875 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter) 1876 { 1877 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 1878 msleep(20); 1879 1880 clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); 1881 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1882 cancel_delayed_work_sync(&adapter->fw_work); 1883 } 1884 1885 static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov, 1886 struct qlcnic_vf_info *vf, u16 vlan_id) 1887 { 1888 int i, err = -EINVAL; 1889 1890 if (!vf->sriov_vlans) 1891 return err; 1892 1893 mutex_lock(&vf->vlan_list_lock); 1894 1895 for (i = 0; i < sriov->num_allowed_vlans; i++) { 1896 if (vf->sriov_vlans[i] == vlan_id) { 1897 err = 0; 1898 break; 1899 } 1900 } 1901 1902 mutex_unlock(&vf->vlan_list_lock); 1903 return err; 1904 } 1905 1906 static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov, 1907 struct qlcnic_vf_info *vf) 1908 { 1909 int err = 0; 1910 1911 mutex_lock(&vf->vlan_list_lock); 1912 1913 if (vf->num_vlan >= sriov->num_allowed_vlans) 1914 err = -EINVAL; 1915 1916 mutex_unlock(&vf->vlan_list_lock); 1917 return err; 1918 } 1919 1920 static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter, 1921 u16 vid, u8 enable) 1922 { 1923 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 1924 struct qlcnic_vf_info *vf; 1925 bool vlan_exist; 1926 u8 allowed = 0; 1927 int i; 1928 1929 vf = &adapter->ahw->sriov->vf_info[0]; 1930 vlan_exist = qlcnic_sriov_check_any_vlan(vf); 1931 if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE) 1932 return -EINVAL; 1933 1934 if (enable) { 1935 if (qlcnic_83xx_vf_check(adapter) && vlan_exist) 1936 return -EINVAL; 1937 1938 if (qlcnic_sriov_validate_num_vlans(sriov, vf)) 1939 return -EINVAL; 1940 1941 if (sriov->any_vlan) { 1942 for (i = 0; i < sriov->num_allowed_vlans; i++) { 1943 if (sriov->allowed_vlans[i] == vid) 1944 allowed = 1; 1945 } 1946 1947 if (!allowed) 1948 return -EINVAL; 1949 } 1950 } else { 1951 if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid)) 1952 return -EINVAL; 1953 } 1954 1955 return 0; 1956 } 1957 1958 static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id, 1959 enum qlcnic_vlan_operations opcode) 1960 { 1961 struct qlcnic_adapter *adapter = vf->adapter; 1962 struct qlcnic_sriov *sriov; 1963 1964 sriov = adapter->ahw->sriov; 1965 1966 if (!vf->sriov_vlans) 1967 return; 1968 1969 mutex_lock(&vf->vlan_list_lock); 1970 1971 switch (opcode) { 1972 case QLC_VLAN_ADD: 1973 qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id); 1974 break; 1975 case QLC_VLAN_DELETE: 1976 qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id); 1977 break; 1978 default: 1979 netdev_err(adapter->netdev, "Invalid VLAN operation\n"); 1980 } 1981 1982 mutex_unlock(&vf->vlan_list_lock); 1983 return; 1984 } 1985 1986 int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter, 1987 u16 vid, u8 enable) 1988 { 1989 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 1990 struct qlcnic_vf_info *vf; 1991 struct qlcnic_cmd_args cmd; 1992 int ret; 1993 1994 if (vid == 0) 1995 return 0; 1996 1997 vf = &adapter->ahw->sriov->vf_info[0]; 1998 ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable); 1999 if (ret) 2000 return ret; 2001 2002 ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, 2003 QLCNIC_BC_CMD_CFG_GUEST_VLAN); 2004 if (ret) 2005 return ret; 2006 2007 cmd.req.arg[1] = (enable & 1) | vid << 16; 2008 2009 qlcnic_sriov_cleanup_async_list(&sriov->bc); 2010 ret = qlcnic_issue_cmd(adapter, &cmd); 2011 if (ret) { 2012 dev_err(&adapter->pdev->dev, 2013 "Failed to configure guest VLAN, err=%d\n", ret); 2014 } else { 2015 qlcnic_free_mac_list(adapter); 2016 2017 if (enable) 2018 qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD); 2019 else 2020 qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE); 2021 2022 qlcnic_set_multi(adapter->netdev); 2023 } 2024 2025 qlcnic_free_mbx_args(&cmd); 2026 return ret; 2027 } 2028 2029 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter) 2030 { 2031 struct list_head *head = &adapter->mac_list; 2032 struct qlcnic_mac_vlan_list *cur; 2033 2034 while (!list_empty(head)) { 2035 cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list); 2036 qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id, 2037 QLCNIC_MAC_DEL); 2038 list_del(&cur->list); 2039 kfree(cur); 2040 } 2041 } 2042 2043 2044 static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev) 2045 { 2046 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); 2047 struct net_device *netdev = adapter->netdev; 2048 int retval; 2049 2050 netif_device_detach(netdev); 2051 qlcnic_cancel_idc_work(adapter); 2052 2053 if (netif_running(netdev)) 2054 qlcnic_down(adapter, netdev); 2055 2056 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM); 2057 qlcnic_sriov_cfg_bc_intr(adapter, 0); 2058 qlcnic_83xx_disable_mbx_intr(adapter); 2059 cancel_delayed_work_sync(&adapter->idc_aen_work); 2060 2061 retval = pci_save_state(pdev); 2062 if (retval) 2063 return retval; 2064 2065 return 0; 2066 } 2067 2068 static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter) 2069 { 2070 struct qlc_83xx_idc *idc = &adapter->ahw->idc; 2071 struct net_device *netdev = adapter->netdev; 2072 int err; 2073 2074 set_bit(QLC_83XX_MODULE_LOADED, &idc->status); 2075 qlcnic_83xx_enable_mbx_interrupt(adapter); 2076 err = qlcnic_sriov_cfg_bc_intr(adapter, 1); 2077 if (err) 2078 return err; 2079 2080 err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT); 2081 if (!err) { 2082 if (netif_running(netdev)) { 2083 err = qlcnic_up(adapter, netdev); 2084 if (!err) 2085 qlcnic_restore_indev_addr(netdev, NETDEV_UP); 2086 } 2087 } 2088 2089 netif_device_attach(netdev); 2090 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state, 2091 idc->delay); 2092 return err; 2093 } 2094 2095 void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter) 2096 { 2097 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 2098 struct qlcnic_vf_info *vf; 2099 int i; 2100 2101 for (i = 0; i < sriov->num_vfs; i++) { 2102 vf = &sriov->vf_info[i]; 2103 vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans, 2104 sizeof(*vf->sriov_vlans), GFP_KERNEL); 2105 } 2106 } 2107 2108 void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter) 2109 { 2110 struct qlcnic_sriov *sriov = adapter->ahw->sriov; 2111 struct qlcnic_vf_info *vf; 2112 int i; 2113 2114 for (i = 0; i < sriov->num_vfs; i++) { 2115 vf = &sriov->vf_info[i]; 2116 kfree(vf->sriov_vlans); 2117 vf->sriov_vlans = NULL; 2118 } 2119 } 2120 2121 void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov, 2122 struct qlcnic_vf_info *vf, u16 vlan_id) 2123 { 2124 int i; 2125 2126 for (i = 0; i < sriov->num_allowed_vlans; i++) { 2127 if (!vf->sriov_vlans[i]) { 2128 vf->sriov_vlans[i] = vlan_id; 2129 vf->num_vlan++; 2130 return; 2131 } 2132 } 2133 } 2134 2135 void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov, 2136 struct qlcnic_vf_info *vf, u16 vlan_id) 2137 { 2138 int i; 2139 2140 for (i = 0; i < sriov->num_allowed_vlans; i++) { 2141 if (vf->sriov_vlans[i] == vlan_id) { 2142 vf->sriov_vlans[i] = 0; 2143 vf->num_vlan--; 2144 return; 2145 } 2146 } 2147 } 2148 2149 bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf) 2150 { 2151 bool err = false; 2152 2153 mutex_lock(&vf->vlan_list_lock); 2154 2155 if (vf->num_vlan) 2156 err = true; 2157 2158 mutex_unlock(&vf->vlan_list_lock); 2159 return err; 2160 } 2161