1 /* bnx2x_sriov.c: Broadcom Everest network driver. 2 * 3 * Copyright 2009-2012 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 17 * Ariel Elior <ariele@broadcom.com> 18 * 19 */ 20 #include "bnx2x.h" 21 #include "bnx2x_init.h" 22 #include "bnx2x_cmn.h" 23 #include "bnx2x_sriov.h" 24 25 /* General service functions */ 26 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 27 u16 pf_id) 28 { 29 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 30 pf_id); 31 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 32 pf_id); 33 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 34 pf_id); 35 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 36 pf_id); 37 } 38 39 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 40 u8 enable) 41 { 42 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 43 enable); 44 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 45 enable); 46 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 47 enable); 48 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 49 enable); 50 } 51 52 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 53 { 54 int idx; 55 56 for_each_vf(bp, idx) 57 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) 58 break; 59 return idx; 60 } 61 62 static 63 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 64 { 65 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); 66 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; 67 } 68 69 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, 70 u8 igu_sb_id, u8 segment, u16 index, u8 op, 71 u8 update) 72 { 73 /* acking a VF sb through the PF - use the GRC */ 74 u32 ctl; 75 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 76 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 77 u32 func_encode = vf->abs_vfid; 78 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; 79 struct igu_regular cmd_data = {0}; 80 81 cmd_data.sb_id_and_flags = 82 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 83 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 84 (update << IGU_REGULAR_BUPDATE_SHIFT) | 85 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 86 87 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 88 func_encode << IGU_CTRL_REG_FID_SHIFT | 89 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 90 91 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 92 cmd_data.sb_id_and_flags, igu_addr_data); 93 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); 94 mmiowb(); 95 barrier(); 96 97 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 98 ctl, igu_addr_ctl); 99 REG_WR(bp, igu_addr_ctl, ctl); 100 mmiowb(); 101 barrier(); 102 } 103 /* VFOP - VF slow-path operation support */ 104 105 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000 106 107 /* VFOP operations states */ 108 enum bnx2x_vfop_qctor_state { 109 BNX2X_VFOP_QCTOR_INIT, 110 BNX2X_VFOP_QCTOR_SETUP, 111 BNX2X_VFOP_QCTOR_INT_EN 112 }; 113 114 enum bnx2x_vfop_vlan_mac_state { 115 BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 116 BNX2X_VFOP_VLAN_MAC_CLEAR, 117 BNX2X_VFOP_VLAN_MAC_CHK_DONE, 118 BNX2X_VFOP_MAC_CONFIG_LIST, 119 BNX2X_VFOP_VLAN_CONFIG_LIST, 120 BNX2X_VFOP_VLAN_CONFIG_LIST_0 121 }; 122 123 enum bnx2x_vfop_qsetup_state { 124 BNX2X_VFOP_QSETUP_CTOR, 125 BNX2X_VFOP_QSETUP_VLAN0, 126 BNX2X_VFOP_QSETUP_DONE 127 }; 128 129 enum bnx2x_vfop_mcast_state { 130 BNX2X_VFOP_MCAST_DEL, 131 BNX2X_VFOP_MCAST_ADD, 132 BNX2X_VFOP_MCAST_CHK_DONE 133 }; 134 135 enum bnx2x_vfop_rxmode_state { 136 BNX2X_VFOP_RXMODE_CONFIG, 137 BNX2X_VFOP_RXMODE_DONE 138 }; 139 140 #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) 141 142 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 143 struct bnx2x_queue_init_params *init_params, 144 struct bnx2x_queue_setup_params *setup_params, 145 u16 q_idx, u16 sb_idx) 146 { 147 DP(BNX2X_MSG_IOV, 148 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", 149 vf->abs_vfid, 150 q_idx, 151 sb_idx, 152 init_params->tx.sb_cq_index, 153 init_params->tx.hc_rate, 154 setup_params->flags, 155 setup_params->txq_params.traffic_type); 156 } 157 158 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 159 struct bnx2x_queue_init_params *init_params, 160 struct bnx2x_queue_setup_params *setup_params, 161 u16 q_idx, u16 sb_idx) 162 { 163 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; 164 165 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" 166 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", 167 vf->abs_vfid, 168 q_idx, 169 sb_idx, 170 init_params->rx.sb_cq_index, 171 init_params->rx.hc_rate, 172 setup_params->gen_params.mtu, 173 rxq_params->buf_sz, 174 rxq_params->sge_buf_sz, 175 rxq_params->max_sges_pkt, 176 rxq_params->tpa_agg_sz, 177 setup_params->flags, 178 rxq_params->drop_flags, 179 rxq_params->cache_line_log); 180 } 181 182 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 183 struct bnx2x_virtf *vf, 184 struct bnx2x_vf_queue *q, 185 struct bnx2x_vfop_qctor_params *p, 186 unsigned long q_type) 187 { 188 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; 189 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; 190 191 /* INIT */ 192 193 /* Enable host coalescing in the transition to INIT state */ 194 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) 195 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); 196 197 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) 198 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); 199 200 /* FW SB ID */ 201 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 202 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 203 204 /* context */ 205 init_p->cxts[0] = q->cxt; 206 207 /* SETUP */ 208 209 /* Setup-op general parameters */ 210 setup_p->gen_params.spcl_id = vf->sp_cl_id; 211 setup_p->gen_params.stat_id = vfq_stat_id(vf, q); 212 213 /* Setup-op pause params: 214 * Nothing to do, the pause thresholds are set by default to 0 which 215 * effectively turns off the feature for this queue. We don't want 216 * one queue (VF) to interfering with another queue (another VF) 217 */ 218 if (vf->cfg_flags & VF_CFG_FW_FC) 219 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", 220 vf->abs_vfid); 221 /* Setup-op flags: 222 * collect statistics, zero statistics, local-switching, security, 223 * OV for Flex10, RSS and MCAST for leading 224 */ 225 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) 226 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); 227 228 /* for VFs, enable tx switching, bd coherency, and mac address 229 * anti-spoofing 230 */ 231 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); 232 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 233 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 234 235 if (vfq_is_leading(q)) { 236 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags); 237 __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags); 238 } 239 240 /* Setup-op rx parameters */ 241 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 242 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 243 244 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); 245 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); 246 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); 247 248 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) 249 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; 250 } 251 252 /* Setup-op tx parameters */ 253 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { 254 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; 255 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 256 } 257 } 258 259 /* VFOP queue construction */ 260 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf) 261 { 262 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 263 struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor; 264 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 265 enum bnx2x_vfop_qctor_state state = vfop->state; 266 267 bnx2x_vfop_reset_wq(vf); 268 269 if (vfop->rc < 0) 270 goto op_err; 271 272 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 273 274 switch (state) { 275 case BNX2X_VFOP_QCTOR_INIT: 276 277 /* has this queue already been opened? */ 278 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 279 BNX2X_Q_LOGICAL_STATE_ACTIVE) { 280 DP(BNX2X_MSG_IOV, 281 "Entered qctor but queue was already up. Aborting gracefully\n"); 282 goto op_done; 283 } 284 285 /* next state */ 286 vfop->state = BNX2X_VFOP_QCTOR_SETUP; 287 288 q_params->cmd = BNX2X_Q_CMD_INIT; 289 vfop->rc = bnx2x_queue_state_change(bp, q_params); 290 291 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 292 293 case BNX2X_VFOP_QCTOR_SETUP: 294 /* next state */ 295 vfop->state = BNX2X_VFOP_QCTOR_INT_EN; 296 297 /* copy pre-prepared setup params to the queue-state params */ 298 vfop->op_p->qctor.qstate.params.setup = 299 vfop->op_p->qctor.prep_qsetup; 300 301 q_params->cmd = BNX2X_Q_CMD_SETUP; 302 vfop->rc = bnx2x_queue_state_change(bp, q_params); 303 304 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 305 306 case BNX2X_VFOP_QCTOR_INT_EN: 307 308 /* enable interrupts */ 309 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx), 310 USTORM_ID, 0, IGU_INT_ENABLE, 0); 311 goto op_done; 312 default: 313 bnx2x_vfop_default(state); 314 } 315 op_err: 316 BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n", 317 vf->abs_vfid, args->qid, q_params->cmd, vfop->rc); 318 op_done: 319 bnx2x_vfop_end(bp, vf, vfop); 320 op_pending: 321 return; 322 } 323 324 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp, 325 struct bnx2x_virtf *vf, 326 struct bnx2x_vfop_cmd *cmd, 327 int qid) 328 { 329 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 330 331 if (vfop) { 332 vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 333 334 vfop->args.qctor.qid = qid; 335 vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx); 336 337 bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT, 338 bnx2x_vfop_qctor, cmd->done); 339 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor, 340 cmd->block); 341 } 342 return -ENOMEM; 343 } 344 345 static void 346 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) 347 { 348 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 349 if (vf) { 350 if (!vf_sb_count(vf)) 351 vf->igu_base_id = igu_sb_id; 352 ++vf_sb_count(vf); 353 } 354 } 355 356 /* VFOP MAC/VLAN helpers */ 357 static inline void bnx2x_vfop_credit(struct bnx2x *bp, 358 struct bnx2x_vfop *vfop, 359 struct bnx2x_vlan_mac_obj *obj) 360 { 361 struct bnx2x_vfop_args_filters *args = &vfop->args.filters; 362 363 /* update credit only if there is no error 364 * and a valid credit counter 365 */ 366 if (!vfop->rc && args->credit) { 367 int cnt = 0; 368 struct list_head *pos; 369 370 list_for_each(pos, &obj->head) 371 cnt++; 372 373 atomic_set(args->credit, cnt); 374 } 375 } 376 377 static int bnx2x_vfop_set_user_req(struct bnx2x *bp, 378 struct bnx2x_vfop_filter *pos, 379 struct bnx2x_vlan_mac_data *user_req) 380 { 381 user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD : 382 BNX2X_VLAN_MAC_DEL; 383 384 switch (pos->type) { 385 case BNX2X_VFOP_FILTER_MAC: 386 memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN); 387 break; 388 case BNX2X_VFOP_FILTER_VLAN: 389 user_req->u.vlan.vlan = pos->vid; 390 break; 391 default: 392 BNX2X_ERR("Invalid filter type, skipping\n"); 393 return 1; 394 } 395 return 0; 396 } 397 398 static int 399 bnx2x_vfop_config_vlan0(struct bnx2x *bp, 400 struct bnx2x_vlan_mac_ramrod_params *vlan_mac, 401 bool add) 402 { 403 int rc; 404 405 vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD : 406 BNX2X_VLAN_MAC_DEL; 407 vlan_mac->user_req.u.vlan.vlan = 0; 408 409 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 410 if (rc == -EEXIST) 411 rc = 0; 412 return rc; 413 } 414 415 static int bnx2x_vfop_config_list(struct bnx2x *bp, 416 struct bnx2x_vfop_filters *filters, 417 struct bnx2x_vlan_mac_ramrod_params *vlan_mac) 418 { 419 struct bnx2x_vfop_filter *pos, *tmp; 420 struct list_head rollback_list, *filters_list = &filters->head; 421 struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req; 422 int rc = 0, cnt = 0; 423 424 INIT_LIST_HEAD(&rollback_list); 425 426 list_for_each_entry_safe(pos, tmp, filters_list, link) { 427 if (bnx2x_vfop_set_user_req(bp, pos, user_req)) 428 continue; 429 430 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 431 if (rc >= 0) { 432 cnt += pos->add ? 1 : -1; 433 list_del(&pos->link); 434 list_add(&pos->link, &rollback_list); 435 rc = 0; 436 } else if (rc == -EEXIST) { 437 rc = 0; 438 } else { 439 BNX2X_ERR("Failed to add a new vlan_mac command\n"); 440 break; 441 } 442 } 443 444 /* rollback if error or too many rules added */ 445 if (rc || cnt > filters->add_cnt) { 446 BNX2X_ERR("error or too many rules added. Performing rollback\n"); 447 list_for_each_entry_safe(pos, tmp, &rollback_list, link) { 448 pos->add = !pos->add; /* reverse op */ 449 bnx2x_vfop_set_user_req(bp, pos, user_req); 450 bnx2x_config_vlan_mac(bp, vlan_mac); 451 list_del(&pos->link); 452 } 453 cnt = 0; 454 if (!rc) 455 rc = -EINVAL; 456 } 457 filters->add_cnt = cnt; 458 return rc; 459 } 460 461 /* VFOP set VLAN/MAC */ 462 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) 463 { 464 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 465 struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac; 466 struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj; 467 struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter; 468 469 enum bnx2x_vfop_vlan_mac_state state = vfop->state; 470 471 if (vfop->rc < 0) 472 goto op_err; 473 474 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 475 476 bnx2x_vfop_reset_wq(vf); 477 478 switch (state) { 479 case BNX2X_VFOP_VLAN_MAC_CLEAR: 480 /* next state */ 481 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 482 483 /* do delete */ 484 vfop->rc = obj->delete_all(bp, obj, 485 &vlan_mac->user_req.vlan_mac_flags, 486 &vlan_mac->ramrod_flags); 487 488 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 489 490 case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE: 491 /* next state */ 492 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 493 494 /* do config */ 495 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 496 if (vfop->rc == -EEXIST) 497 vfop->rc = 0; 498 499 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 500 501 case BNX2X_VFOP_VLAN_MAC_CHK_DONE: 502 vfop->rc = !!obj->raw.check_pending(&obj->raw); 503 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 504 505 case BNX2X_VFOP_MAC_CONFIG_LIST: 506 /* next state */ 507 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 508 509 /* do list config */ 510 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 511 if (vfop->rc) 512 goto op_err; 513 514 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 515 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 516 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 517 518 case BNX2X_VFOP_VLAN_CONFIG_LIST: 519 /* next state */ 520 vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0; 521 522 /* remove vlan0 - could be no-op */ 523 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false); 524 if (vfop->rc) 525 goto op_err; 526 527 /* Do vlan list config. if this operation fails we try to 528 * restore vlan0 to keep the queue is working order 529 */ 530 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 531 if (!vfop->rc) { 532 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 533 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 534 } 535 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */ 536 537 case BNX2X_VFOP_VLAN_CONFIG_LIST_0: 538 /* next state */ 539 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 540 541 if (list_empty(&obj->head)) 542 /* add vlan0 */ 543 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true); 544 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 545 546 default: 547 bnx2x_vfop_default(state); 548 } 549 op_err: 550 BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc); 551 op_done: 552 kfree(filters); 553 bnx2x_vfop_credit(bp, vfop, obj); 554 bnx2x_vfop_end(bp, vf, vfop); 555 op_pending: 556 return; 557 } 558 559 struct bnx2x_vfop_vlan_mac_flags { 560 bool drv_only; 561 bool dont_consume; 562 bool single_cmd; 563 bool add; 564 }; 565 566 static void 567 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 568 struct bnx2x_vfop_vlan_mac_flags *flags) 569 { 570 struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req; 571 572 memset(ramrod, 0, sizeof(*ramrod)); 573 574 /* ramrod flags */ 575 if (flags->drv_only) 576 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags); 577 if (flags->single_cmd) 578 set_bit(RAMROD_EXEC, &ramrod->ramrod_flags); 579 580 /* mac_vlan flags */ 581 if (flags->dont_consume) 582 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags); 583 584 /* cmd */ 585 ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL; 586 } 587 588 static inline void 589 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 590 struct bnx2x_vfop_vlan_mac_flags *flags) 591 { 592 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags); 593 set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags); 594 } 595 596 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, 597 struct bnx2x_virtf *vf, 598 struct bnx2x_vfop_cmd *cmd, 599 struct bnx2x_vfop_filters *macs, 600 int qid, bool drv_only) 601 { 602 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 603 604 if (vfop) { 605 struct bnx2x_vfop_args_filters filters = { 606 .multi_filter = macs, 607 .credit = NULL, /* consume credit */ 608 }; 609 struct bnx2x_vfop_vlan_mac_flags flags = { 610 .drv_only = drv_only, 611 .dont_consume = (filters.credit != NULL), 612 .single_cmd = false, 613 .add = false, /* don't care since only the items in the 614 * filters list affect the sp operation, 615 * not the list itself 616 */ 617 }; 618 struct bnx2x_vlan_mac_ramrod_params *ramrod = 619 &vf->op_params.vlan_mac; 620 621 /* set ramrod params */ 622 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 623 624 /* set object */ 625 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 626 627 /* set extra args */ 628 filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX; 629 vfop->args.filters = filters; 630 631 bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST, 632 bnx2x_vfop_vlan_mac, cmd->done); 633 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 634 cmd->block); 635 } 636 return -ENOMEM; 637 } 638 639 int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, 640 struct bnx2x_virtf *vf, 641 struct bnx2x_vfop_cmd *cmd, 642 int qid, u16 vid, bool add) 643 { 644 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 645 646 if (vfop) { 647 struct bnx2x_vfop_args_filters filters = { 648 .multi_filter = NULL, /* single command */ 649 .credit = &bnx2x_vfq(vf, qid, vlan_count), 650 }; 651 struct bnx2x_vfop_vlan_mac_flags flags = { 652 .drv_only = false, 653 .dont_consume = (filters.credit != NULL), 654 .single_cmd = true, 655 .add = add, 656 }; 657 struct bnx2x_vlan_mac_ramrod_params *ramrod = 658 &vf->op_params.vlan_mac; 659 660 /* set ramrod params */ 661 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 662 ramrod->user_req.u.vlan.vlan = vid; 663 664 /* set object */ 665 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 666 667 /* set extra args */ 668 vfop->args.filters = filters; 669 670 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 671 bnx2x_vfop_vlan_mac, cmd->done); 672 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 673 cmd->block); 674 } 675 return -ENOMEM; 676 } 677 678 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, 679 struct bnx2x_virtf *vf, 680 struct bnx2x_vfop_cmd *cmd, 681 struct bnx2x_vfop_filters *vlans, 682 int qid, bool drv_only) 683 { 684 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 685 686 if (vfop) { 687 struct bnx2x_vfop_args_filters filters = { 688 .multi_filter = vlans, 689 .credit = &bnx2x_vfq(vf, qid, vlan_count), 690 }; 691 struct bnx2x_vfop_vlan_mac_flags flags = { 692 .drv_only = drv_only, 693 .dont_consume = (filters.credit != NULL), 694 .single_cmd = false, 695 .add = false, /* don't care */ 696 }; 697 struct bnx2x_vlan_mac_ramrod_params *ramrod = 698 &vf->op_params.vlan_mac; 699 700 /* set ramrod params */ 701 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 702 703 /* set object */ 704 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 705 706 /* set extra args */ 707 filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) - 708 atomic_read(filters.credit); 709 710 vfop->args.filters = filters; 711 712 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST, 713 bnx2x_vfop_vlan_mac, cmd->done); 714 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 715 cmd->block); 716 } 717 return -ENOMEM; 718 } 719 720 /* VFOP queue setup (queue constructor + set vlan 0) */ 721 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf) 722 { 723 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 724 int qid = vfop->args.qctor.qid; 725 enum bnx2x_vfop_qsetup_state state = vfop->state; 726 struct bnx2x_vfop_cmd cmd = { 727 .done = bnx2x_vfop_qsetup, 728 .block = false, 729 }; 730 731 if (vfop->rc < 0) 732 goto op_err; 733 734 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 735 736 switch (state) { 737 case BNX2X_VFOP_QSETUP_CTOR: 738 /* init the queue ctor command */ 739 vfop->state = BNX2X_VFOP_QSETUP_VLAN0; 740 vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid); 741 if (vfop->rc) 742 goto op_err; 743 return; 744 745 case BNX2X_VFOP_QSETUP_VLAN0: 746 /* skip if non-leading or FPGA/EMU*/ 747 if (qid) 748 goto op_done; 749 750 /* init the queue set-vlan command (for vlan 0) */ 751 vfop->state = BNX2X_VFOP_QSETUP_DONE; 752 vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true); 753 if (vfop->rc) 754 goto op_err; 755 return; 756 op_err: 757 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); 758 op_done: 759 case BNX2X_VFOP_QSETUP_DONE: 760 bnx2x_vfop_end(bp, vf, vfop); 761 return; 762 default: 763 bnx2x_vfop_default(state); 764 } 765 } 766 767 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, 768 struct bnx2x_virtf *vf, 769 struct bnx2x_vfop_cmd *cmd, 770 int qid) 771 { 772 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 773 774 if (vfop) { 775 vfop->args.qctor.qid = qid; 776 777 bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR, 778 bnx2x_vfop_qsetup, cmd->done); 779 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup, 780 cmd->block); 781 } 782 return -ENOMEM; 783 } 784 785 /* VFOP multi-casts */ 786 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf) 787 { 788 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 789 struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast; 790 struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw; 791 struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list; 792 enum bnx2x_vfop_mcast_state state = vfop->state; 793 int i; 794 795 bnx2x_vfop_reset_wq(vf); 796 797 if (vfop->rc < 0) 798 goto op_err; 799 800 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 801 802 switch (state) { 803 case BNX2X_VFOP_MCAST_DEL: 804 /* clear existing mcasts */ 805 vfop->state = BNX2X_VFOP_MCAST_ADD; 806 vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL); 807 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 808 809 case BNX2X_VFOP_MCAST_ADD: 810 if (raw->check_pending(raw)) 811 goto op_pending; 812 813 if (args->mc_num) { 814 /* update mcast list on the ramrod params */ 815 INIT_LIST_HEAD(&mcast->mcast_list); 816 for (i = 0; i < args->mc_num; i++) 817 list_add_tail(&(args->mc[i].link), 818 &mcast->mcast_list); 819 /* add new mcasts */ 820 vfop->state = BNX2X_VFOP_MCAST_CHK_DONE; 821 vfop->rc = bnx2x_config_mcast(bp, mcast, 822 BNX2X_MCAST_CMD_ADD); 823 } 824 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 825 826 case BNX2X_VFOP_MCAST_CHK_DONE: 827 vfop->rc = raw->check_pending(raw) ? 1 : 0; 828 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 829 default: 830 bnx2x_vfop_default(state); 831 } 832 op_err: 833 BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc); 834 op_done: 835 kfree(args->mc); 836 bnx2x_vfop_end(bp, vf, vfop); 837 op_pending: 838 return; 839 } 840 841 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, 842 struct bnx2x_virtf *vf, 843 struct bnx2x_vfop_cmd *cmd, 844 bnx2x_mac_addr_t *mcasts, 845 int mcast_num, bool drv_only) 846 { 847 struct bnx2x_vfop *vfop = NULL; 848 size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem); 849 struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) : 850 NULL; 851 852 if (!mc_sz || mc) { 853 vfop = bnx2x_vfop_add(bp, vf); 854 if (vfop) { 855 int i; 856 struct bnx2x_mcast_ramrod_params *ramrod = 857 &vf->op_params.mcast; 858 859 /* set ramrod params */ 860 memset(ramrod, 0, sizeof(*ramrod)); 861 ramrod->mcast_obj = &vf->mcast_obj; 862 if (drv_only) 863 set_bit(RAMROD_DRV_CLR_ONLY, 864 &ramrod->ramrod_flags); 865 866 /* copy mcasts pointers */ 867 vfop->args.mc_list.mc_num = mcast_num; 868 vfop->args.mc_list.mc = mc; 869 for (i = 0; i < mcast_num; i++) 870 mc[i].mac = mcasts[i]; 871 872 bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL, 873 bnx2x_vfop_mcast, cmd->done); 874 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast, 875 cmd->block); 876 } else { 877 kfree(mc); 878 } 879 } 880 return -ENOMEM; 881 } 882 883 /* VFOP rx-mode */ 884 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) 885 { 886 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 887 struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode; 888 enum bnx2x_vfop_rxmode_state state = vfop->state; 889 890 bnx2x_vfop_reset_wq(vf); 891 892 if (vfop->rc < 0) 893 goto op_err; 894 895 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 896 897 switch (state) { 898 case BNX2X_VFOP_RXMODE_CONFIG: 899 /* next state */ 900 vfop->state = BNX2X_VFOP_RXMODE_DONE; 901 902 vfop->rc = bnx2x_config_rx_mode(bp, ramrod); 903 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 904 op_err: 905 BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc); 906 op_done: 907 case BNX2X_VFOP_RXMODE_DONE: 908 bnx2x_vfop_end(bp, vf, vfop); 909 return; 910 default: 911 bnx2x_vfop_default(state); 912 } 913 op_pending: 914 return; 915 } 916 917 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, 918 struct bnx2x_virtf *vf, 919 struct bnx2x_vfop_cmd *cmd, 920 int qid, unsigned long accept_flags) 921 { 922 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); 923 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 924 925 if (vfop) { 926 struct bnx2x_rx_mode_ramrod_params *ramrod = 927 &vf->op_params.rx_mode; 928 929 memset(ramrod, 0, sizeof(*ramrod)); 930 931 /* Prepare ramrod parameters */ 932 ramrod->cid = vfq->cid; 933 ramrod->cl_id = vfq_cl_id(vf, vfq); 934 ramrod->rx_mode_obj = &bp->rx_mode_obj; 935 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); 936 937 ramrod->rx_accept_flags = accept_flags; 938 ramrod->tx_accept_flags = accept_flags; 939 ramrod->pstate = &vf->filter_state; 940 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; 941 942 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 943 set_bit(RAMROD_RX, &ramrod->ramrod_flags); 944 set_bit(RAMROD_TX, &ramrod->ramrod_flags); 945 946 ramrod->rdata = 947 bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); 948 ramrod->rdata_mapping = 949 bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); 950 951 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, 952 bnx2x_vfop_rxmode, cmd->done); 953 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode, 954 cmd->block); 955 } 956 return -ENOMEM; 957 } 958 959 /* VF enable primitives 960 * when pretend is required the caller is responsible 961 * for calling pretend prior to calling these routines 962 */ 963 964 /* called only on E1H or E2. 965 * When pretending to be PF, the pretend value is the function number 0...7 966 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID 967 * combination 968 */ 969 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val) 970 { 971 u32 pretend_reg; 972 973 if (CHIP_IS_E1H(bp) && pretend_func_val > E1H_FUNC_MAX) 974 return -1; 975 976 /* get my own pretend register */ 977 pretend_reg = bnx2x_get_pretend_reg(bp); 978 REG_WR(bp, pretend_reg, pretend_func_val); 979 REG_RD(bp, pretend_reg); 980 return 0; 981 } 982 983 /* internal vf enable - until vf is enabled internally all transactions 984 * are blocked. this routine should always be called last with pretend. 985 */ 986 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) 987 { 988 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); 989 } 990 991 /* clears vf error in all semi blocks */ 992 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) 993 { 994 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); 995 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); 996 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); 997 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); 998 } 999 1000 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) 1001 { 1002 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; 1003 u32 was_err_reg = 0; 1004 1005 switch (was_err_group) { 1006 case 0: 1007 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; 1008 break; 1009 case 1: 1010 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; 1011 break; 1012 case 2: 1013 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; 1014 break; 1015 case 3: 1016 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; 1017 break; 1018 } 1019 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); 1020 } 1021 1022 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) 1023 { 1024 int i; 1025 u32 val; 1026 1027 /* Set VF masks and configuration - pretend */ 1028 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1029 1030 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 1031 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 1032 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 1033 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 1034 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 1035 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 1036 1037 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 1038 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); 1039 if (vf->cfg_flags & VF_CFG_INT_SIMD) 1040 val |= IGU_VF_CONF_SINGLE_ISR_EN; 1041 val &= ~IGU_VF_CONF_PARENT_MASK; 1042 val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */ 1043 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 1044 1045 DP(BNX2X_MSG_IOV, 1046 "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n", 1047 vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION)); 1048 1049 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1050 1051 /* iterate over all queues, clear sb consumer */ 1052 for (i = 0; i < vf_sb_count(vf); i++) { 1053 u8 igu_sb_id = vf_igu_sb(vf, i); 1054 1055 /* zero prod memory */ 1056 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); 1057 1058 /* clear sb state machine */ 1059 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, 1060 false /* VF */); 1061 1062 /* disable + update */ 1063 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, 1064 IGU_INT_DISABLE, 1); 1065 } 1066 } 1067 1068 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) 1069 { 1070 /* set the VF-PF association in the FW */ 1071 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); 1072 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); 1073 1074 /* clear vf errors*/ 1075 bnx2x_vf_semi_clear_err(bp, abs_vfid); 1076 bnx2x_vf_pglue_clear_err(bp, abs_vfid); 1077 1078 /* internal vf-enable - pretend */ 1079 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); 1080 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); 1081 bnx2x_vf_enable_internal(bp, true); 1082 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1083 } 1084 1085 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) 1086 { 1087 /* Reset vf in IGU interrupts are still disabled */ 1088 bnx2x_vf_igu_reset(bp, vf); 1089 1090 /* pretend to enable the vf with the PBF */ 1091 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1092 REG_WR(bp, PBF_REG_DISABLE_VF, 0); 1093 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1094 } 1095 1096 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) 1097 { 1098 struct pci_dev *dev; 1099 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 1100 1101 if (!vf) 1102 goto unknown_dev; 1103 1104 dev = pci_get_bus_and_slot(vf->bus, vf->devfn); 1105 if (dev) 1106 return bnx2x_is_pcie_pending(dev); 1107 1108 unknown_dev: 1109 BNX2X_ERR("Unknown device\n"); 1110 return false; 1111 } 1112 1113 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) 1114 { 1115 /* Wait 100ms */ 1116 msleep(100); 1117 1118 /* Verify no pending pci transactions */ 1119 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) 1120 BNX2X_ERR("PCIE Transactions still pending\n"); 1121 1122 return 0; 1123 } 1124 1125 /* must be called after the number of PF queues and the number of VFs are 1126 * both known 1127 */ 1128 static void 1129 bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc) 1130 { 1131 u16 vlan_count = 0; 1132 1133 /* will be set only during VF-ACQUIRE */ 1134 resc->num_rxqs = 0; 1135 resc->num_txqs = 0; 1136 1137 /* no credit calculcis for macs (just yet) */ 1138 resc->num_mac_filters = 1; 1139 1140 /* divvy up vlan rules */ 1141 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 1142 vlan_count = 1 << ilog2(vlan_count); 1143 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); 1144 1145 /* no real limitation */ 1146 resc->num_mc_filters = 0; 1147 1148 /* num_sbs already set */ 1149 } 1150 1151 /* IOV global initialization routines */ 1152 void bnx2x_iov_init_dq(struct bnx2x *bp) 1153 { 1154 if (!IS_SRIOV(bp)) 1155 return; 1156 1157 /* Set the DQ such that the CID reflect the abs_vfid */ 1158 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); 1159 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 1160 1161 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to 1162 * the PF L2 queues 1163 */ 1164 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 1165 1166 /* The VF window size is the log2 of the max number of CIDs per VF */ 1167 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 1168 1169 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 1170 * the Pf doorbell size although the 2 are independent. 1171 */ 1172 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 1173 BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT); 1174 1175 /* No security checks for now - 1176 * configure single rule (out of 16) mask = 0x1, value = 0x0, 1177 * CID range 0 - 0x1ffff 1178 */ 1179 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); 1180 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); 1181 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1182 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1183 1184 /* set the number of VF alllowed doorbells to the full DQ range */ 1185 REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); 1186 1187 /* set the VF doorbell threshold */ 1188 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 1189 } 1190 1191 void bnx2x_iov_init_dmae(struct bnx2x *bp) 1192 { 1193 DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF"); 1194 if (!IS_SRIOV(bp)) 1195 return; 1196 1197 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 1198 } 1199 1200 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1201 { 1202 struct pci_dev *dev = bp->pdev; 1203 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1204 1205 return dev->bus->number + ((dev->devfn + iov->offset + 1206 iov->stride * vfid) >> 8); 1207 } 1208 1209 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) 1210 { 1211 struct pci_dev *dev = bp->pdev; 1212 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1213 1214 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; 1215 } 1216 1217 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) 1218 { 1219 int i, n; 1220 struct pci_dev *dev = bp->pdev; 1221 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1222 1223 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { 1224 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); 1225 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); 1226 1227 do_div(size, iov->total); 1228 vf->bars[n].bar = start + size * vf->abs_vfid; 1229 vf->bars[n].size = size; 1230 } 1231 } 1232 1233 static int bnx2x_ari_enabled(struct pci_dev *dev) 1234 { 1235 return dev->bus->self && dev->bus->self->ari_enabled; 1236 } 1237 1238 static void 1239 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) 1240 { 1241 int sb_id; 1242 u32 val; 1243 u8 fid; 1244 1245 /* IGU in normal mode - read CAM */ 1246 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 1247 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); 1248 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 1249 continue; 1250 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1251 if (!(fid & IGU_FID_ENCODE_IS_PF)) 1252 bnx2x_vf_set_igu_info(bp, sb_id, 1253 (fid & IGU_FID_VF_NUM_MASK)); 1254 1255 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1256 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 1257 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 1258 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 1259 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1260 } 1261 } 1262 1263 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1264 { 1265 if (bp->vfdb) { 1266 kfree(bp->vfdb->vfqs); 1267 kfree(bp->vfdb->vfs); 1268 kfree(bp->vfdb); 1269 } 1270 bp->vfdb = NULL; 1271 } 1272 1273 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1274 { 1275 int pos; 1276 struct pci_dev *dev = bp->pdev; 1277 1278 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 1279 if (!pos) { 1280 BNX2X_ERR("failed to find SRIOV capability in device\n"); 1281 return -ENODEV; 1282 } 1283 1284 iov->pos = pos; 1285 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); 1286 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 1287 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); 1288 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); 1289 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 1290 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 1291 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 1292 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 1293 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 1294 1295 return 0; 1296 } 1297 1298 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1299 { 1300 u32 val; 1301 1302 /* read the SRIOV capability structure 1303 * The fields can be read via configuration read or 1304 * directly from the device (starting at offset PCICFG_OFFSET) 1305 */ 1306 if (bnx2x_sriov_pci_cfg_info(bp, iov)) 1307 return -ENODEV; 1308 1309 /* get the number of SRIOV bars */ 1310 iov->nres = 0; 1311 1312 /* read the first_vfid */ 1313 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); 1314 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) 1315 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); 1316 1317 DP(BNX2X_MSG_IOV, 1318 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 1319 BP_FUNC(bp), 1320 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, 1321 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 1322 1323 return 0; 1324 } 1325 1326 static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp) 1327 { 1328 int i; 1329 u8 queue_count = 0; 1330 1331 if (IS_SRIOV(bp)) 1332 for_each_vf(bp, i) 1333 queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs); 1334 1335 return queue_count; 1336 } 1337 1338 /* must be called after PF bars are mapped */ 1339 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1340 int num_vfs_param) 1341 { 1342 int err, i, qcount; 1343 struct bnx2x_sriov *iov; 1344 struct pci_dev *dev = bp->pdev; 1345 1346 bp->vfdb = NULL; 1347 1348 /* verify is pf */ 1349 if (IS_VF(bp)) 1350 return 0; 1351 1352 /* verify sriov capability is present in configuration space */ 1353 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) 1354 return 0; 1355 1356 /* verify chip revision */ 1357 if (CHIP_IS_E1x(bp)) 1358 return 0; 1359 1360 /* check if SRIOV support is turned off */ 1361 if (!num_vfs_param) 1362 return 0; 1363 1364 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ 1365 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { 1366 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", 1367 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); 1368 return 0; 1369 } 1370 1371 /* SRIOV can be enabled only with MSIX */ 1372 if (int_mode_param == BNX2X_INT_MODE_MSI || 1373 int_mode_param == BNX2X_INT_MODE_INTX) 1374 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); 1375 1376 err = -EIO; 1377 /* verify ari is enabled */ 1378 if (!bnx2x_ari_enabled(bp->pdev)) { 1379 BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n"); 1380 return err; 1381 } 1382 1383 /* verify igu is in normal mode */ 1384 if (CHIP_INT_MODE_IS_BC(bp)) { 1385 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 1386 return err; 1387 } 1388 1389 /* allocate the vfs database */ 1390 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); 1391 if (!bp->vfdb) { 1392 BNX2X_ERR("failed to allocate vf database\n"); 1393 err = -ENOMEM; 1394 goto failed; 1395 } 1396 1397 /* get the sriov info - Linux already collected all the pertinent 1398 * information, however the sriov structure is for the private use 1399 * of the pci module. Also we want this information regardless 1400 * of the hyper-visor. 1401 */ 1402 iov = &(bp->vfdb->sriov); 1403 err = bnx2x_sriov_info(bp, iov); 1404 if (err) 1405 goto failed; 1406 1407 /* SR-IOV capability was enabled but there are no VFs*/ 1408 if (iov->total == 0) 1409 goto failed; 1410 1411 /* calculate the actual number of VFs */ 1412 iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param); 1413 1414 /* allocate the vf array */ 1415 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 1416 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); 1417 if (!bp->vfdb->vfs) { 1418 BNX2X_ERR("failed to allocate vf array\n"); 1419 err = -ENOMEM; 1420 goto failed; 1421 } 1422 1423 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ 1424 for_each_vf(bp, i) { 1425 bnx2x_vf(bp, i, index) = i; 1426 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; 1427 bnx2x_vf(bp, i, state) = VF_FREE; 1428 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head)); 1429 mutex_init(&bnx2x_vf(bp, i, op_mutex)); 1430 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; 1431 } 1432 1433 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 1434 bnx2x_get_vf_igu_cam_info(bp); 1435 1436 /* get the total queue count and allocate the global queue arrays */ 1437 qcount = bnx2x_iov_get_max_queue_count(bp); 1438 1439 /* allocate the queue arrays for all VFs */ 1440 bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue), 1441 GFP_KERNEL); 1442 if (!bp->vfdb->vfqs) { 1443 BNX2X_ERR("failed to allocate vf queue array\n"); 1444 err = -ENOMEM; 1445 goto failed; 1446 } 1447 1448 return 0; 1449 failed: 1450 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 1451 __bnx2x_iov_free_vfdb(bp); 1452 return err; 1453 } 1454 1455 void bnx2x_iov_remove_one(struct bnx2x *bp) 1456 { 1457 /* if SRIOV is not enabled there's nothing to do */ 1458 if (!IS_SRIOV(bp)) 1459 return; 1460 1461 /* free vf database */ 1462 __bnx2x_iov_free_vfdb(bp); 1463 } 1464 1465 void bnx2x_iov_free_mem(struct bnx2x *bp) 1466 { 1467 int i; 1468 1469 if (!IS_SRIOV(bp)) 1470 return; 1471 1472 /* free vfs hw contexts */ 1473 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1474 struct hw_dma *cxt = &bp->vfdb->context[i]; 1475 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); 1476 } 1477 1478 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, 1479 BP_VFDB(bp)->sp_dma.mapping, 1480 BP_VFDB(bp)->sp_dma.size); 1481 1482 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, 1483 BP_VF_MBX_DMA(bp)->mapping, 1484 BP_VF_MBX_DMA(bp)->size); 1485 } 1486 1487 int bnx2x_iov_alloc_mem(struct bnx2x *bp) 1488 { 1489 size_t tot_size; 1490 int i, rc = 0; 1491 1492 if (!IS_SRIOV(bp)) 1493 return rc; 1494 1495 /* allocate vfs hw contexts */ 1496 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * 1497 BNX2X_CIDS_PER_VF * sizeof(union cdu_context); 1498 1499 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1500 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); 1501 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); 1502 1503 if (cxt->size) { 1504 BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size); 1505 } else { 1506 cxt->addr = NULL; 1507 cxt->mapping = 0; 1508 } 1509 tot_size -= cxt->size; 1510 } 1511 1512 /* allocate vfs ramrods dma memory - client_init and set_mac */ 1513 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); 1514 BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping, 1515 tot_size); 1516 BP_VFDB(bp)->sp_dma.size = tot_size; 1517 1518 /* allocate mailboxes */ 1519 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; 1520 BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping, 1521 tot_size); 1522 BP_VF_MBX_DMA(bp)->size = tot_size; 1523 1524 return 0; 1525 1526 alloc_mem_err: 1527 return -ENOMEM; 1528 } 1529 1530 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 1531 struct bnx2x_vf_queue *q) 1532 { 1533 u8 cl_id = vfq_cl_id(vf, q); 1534 u8 func_id = FW_VF_HANDLE(vf->abs_vfid); 1535 unsigned long q_type = 0; 1536 1537 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 1538 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 1539 1540 /* Queue State object */ 1541 bnx2x_init_queue_obj(bp, &q->sp_obj, 1542 cl_id, &q->cid, 1, func_id, 1543 bnx2x_vf_sp(bp, vf, q_data), 1544 bnx2x_vf_sp_map(bp, vf, q_data), 1545 q_type); 1546 1547 DP(BNX2X_MSG_IOV, 1548 "initialized vf %d's queue object. func id set to %d\n", 1549 vf->abs_vfid, q->sp_obj.func_id); 1550 1551 /* mac/vlan objects are per queue, but only those 1552 * that belong to the leading queue are initialized 1553 */ 1554 if (vfq_is_leading(q)) { 1555 /* mac */ 1556 bnx2x_init_mac_obj(bp, &q->mac_obj, 1557 cl_id, q->cid, func_id, 1558 bnx2x_vf_sp(bp, vf, mac_rdata), 1559 bnx2x_vf_sp_map(bp, vf, mac_rdata), 1560 BNX2X_FILTER_MAC_PENDING, 1561 &vf->filter_state, 1562 BNX2X_OBJ_TYPE_RX_TX, 1563 &bp->macs_pool); 1564 /* vlan */ 1565 bnx2x_init_vlan_obj(bp, &q->vlan_obj, 1566 cl_id, q->cid, func_id, 1567 bnx2x_vf_sp(bp, vf, vlan_rdata), 1568 bnx2x_vf_sp_map(bp, vf, vlan_rdata), 1569 BNX2X_FILTER_VLAN_PENDING, 1570 &vf->filter_state, 1571 BNX2X_OBJ_TYPE_RX_TX, 1572 &bp->vlans_pool); 1573 1574 /* mcast */ 1575 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, 1576 q->cid, func_id, func_id, 1577 bnx2x_vf_sp(bp, vf, mcast_rdata), 1578 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 1579 BNX2X_FILTER_MCAST_PENDING, 1580 &vf->filter_state, 1581 BNX2X_OBJ_TYPE_RX_TX); 1582 1583 vf->leading_rss = cl_id; 1584 } 1585 } 1586 1587 /* called by bnx2x_nic_load */ 1588 int bnx2x_iov_nic_init(struct bnx2x *bp) 1589 { 1590 int vfid, qcount, i; 1591 1592 if (!IS_SRIOV(bp)) { 1593 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 1594 return 0; 1595 } 1596 1597 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); 1598 1599 /* initialize vf database */ 1600 for_each_vf(bp, vfid) { 1601 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 1602 1603 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * 1604 BNX2X_CIDS_PER_VF; 1605 1606 union cdu_context *base_cxt = (union cdu_context *) 1607 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 1608 (base_vf_cid & (ILT_PAGE_CIDS-1)); 1609 1610 DP(BNX2X_MSG_IOV, 1611 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", 1612 vf->abs_vfid, vf_sb_count(vf), base_vf_cid, 1613 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 1614 1615 /* init statically provisioned resources */ 1616 bnx2x_iov_static_resc(bp, &vf->alloc_resc); 1617 1618 /* queues are initialized during VF-ACQUIRE */ 1619 1620 /* reserve the vf vlan credit */ 1621 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf)); 1622 1623 vf->filter_state = 0; 1624 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 1625 1626 /* init mcast object - This object will be re-initialized 1627 * during VF-ACQUIRE with the proper cl_id and cid. 1628 * It needs to be initialized here so that it can be safely 1629 * handled by a subsequent FLR flow. 1630 */ 1631 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, 1632 0xFF, 0xFF, 0xFF, 1633 bnx2x_vf_sp(bp, vf, mcast_rdata), 1634 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 1635 BNX2X_FILTER_MCAST_PENDING, 1636 &vf->filter_state, 1637 BNX2X_OBJ_TYPE_RX_TX); 1638 1639 /* set the mailbox message addresses */ 1640 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) 1641 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * 1642 MBX_MSG_ALIGNED_SIZE); 1643 1644 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + 1645 vfid * MBX_MSG_ALIGNED_SIZE; 1646 1647 /* Enable vf mailbox */ 1648 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 1649 } 1650 1651 /* Final VF init */ 1652 qcount = 0; 1653 for_each_vf(bp, i) { 1654 struct bnx2x_virtf *vf = BP_VF(bp, i); 1655 1656 /* fill in the BDF and bars */ 1657 vf->bus = bnx2x_vf_bus(bp, i); 1658 vf->devfn = bnx2x_vf_devfn(bp, i); 1659 bnx2x_vf_set_bars(bp, vf); 1660 1661 DP(BNX2X_MSG_IOV, 1662 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", 1663 vf->abs_vfid, vf->bus, vf->devfn, 1664 (unsigned)vf->bars[0].bar, vf->bars[0].size, 1665 (unsigned)vf->bars[1].bar, vf->bars[1].size, 1666 (unsigned)vf->bars[2].bar, vf->bars[2].size); 1667 1668 /* set local queue arrays */ 1669 vf->vfqs = &bp->vfdb->vfqs[qcount]; 1670 qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs); 1671 } 1672 1673 return 0; 1674 } 1675 1676 /* called by bnx2x_init_hw_func, returns the next ilt line */ 1677 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) 1678 { 1679 int i; 1680 struct bnx2x_ilt *ilt = BP_ILT(bp); 1681 1682 if (!IS_SRIOV(bp)) 1683 return line; 1684 1685 /* set vfs ilt lines */ 1686 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1687 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); 1688 1689 ilt->lines[line+i].page = hw_cxt->addr; 1690 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 1691 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 1692 } 1693 return line + i; 1694 } 1695 1696 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) 1697 { 1698 return ((cid >= BNX2X_FIRST_VF_CID) && 1699 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); 1700 } 1701 1702 static 1703 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, 1704 struct bnx2x_vf_queue *vfq, 1705 union event_ring_elem *elem) 1706 { 1707 unsigned long ramrod_flags = 0; 1708 int rc = 0; 1709 1710 /* Always push next commands out, don't wait here */ 1711 set_bit(RAMROD_CONT, &ramrod_flags); 1712 1713 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 1714 case BNX2X_FILTER_MAC_PENDING: 1715 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, 1716 &ramrod_flags); 1717 break; 1718 case BNX2X_FILTER_VLAN_PENDING: 1719 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, 1720 &ramrod_flags); 1721 break; 1722 default: 1723 BNX2X_ERR("Unsupported classification command: %d\n", 1724 elem->message.data.eth_event.echo); 1725 return; 1726 } 1727 if (rc < 0) 1728 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 1729 else if (rc > 0) 1730 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); 1731 } 1732 1733 static 1734 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, 1735 struct bnx2x_virtf *vf) 1736 { 1737 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 1738 int rc; 1739 1740 rparam.mcast_obj = &vf->mcast_obj; 1741 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); 1742 1743 /* If there are pending mcast commands - send them */ 1744 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { 1745 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 1746 if (rc < 0) 1747 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 1748 rc); 1749 } 1750 } 1751 1752 static 1753 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, 1754 struct bnx2x_virtf *vf) 1755 { 1756 smp_mb__before_clear_bit(); 1757 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 1758 smp_mb__after_clear_bit(); 1759 } 1760 1761 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) 1762 { 1763 struct bnx2x_virtf *vf; 1764 int qidx = 0, abs_vfid; 1765 u8 opcode; 1766 u16 cid = 0xffff; 1767 1768 if (!IS_SRIOV(bp)) 1769 return 1; 1770 1771 /* first get the cid - the only events we handle here are cfc-delete 1772 * and set-mac completion 1773 */ 1774 opcode = elem->message.opcode; 1775 1776 switch (opcode) { 1777 case EVENT_RING_OPCODE_CFC_DEL: 1778 cid = SW_CID((__force __le32) 1779 elem->message.data.cfc_del_event.cid); 1780 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); 1781 break; 1782 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 1783 case EVENT_RING_OPCODE_MULTICAST_RULES: 1784 case EVENT_RING_OPCODE_FILTERS_RULES: 1785 cid = (elem->message.data.eth_event.echo & 1786 BNX2X_SWCID_MASK); 1787 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 1788 break; 1789 case EVENT_RING_OPCODE_VF_FLR: 1790 abs_vfid = elem->message.data.vf_flr_event.vf_id; 1791 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", 1792 abs_vfid); 1793 goto get_vf; 1794 case EVENT_RING_OPCODE_MALICIOUS_VF: 1795 abs_vfid = elem->message.data.malicious_vf_event.vf_id; 1796 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n", 1797 abs_vfid); 1798 goto get_vf; 1799 default: 1800 return 1; 1801 } 1802 1803 /* check if the cid is the VF range */ 1804 if (!bnx2x_iov_is_vf_cid(bp, cid)) { 1805 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); 1806 return 1; 1807 } 1808 1809 /* extract vf and rxq index from vf_cid - relies on the following: 1810 * 1. vfid on cid reflects the true abs_vfid 1811 * 2. the max number of VFs (per path) is 64 1812 */ 1813 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); 1814 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 1815 get_vf: 1816 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 1817 1818 if (!vf) { 1819 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", 1820 cid, abs_vfid); 1821 return 0; 1822 } 1823 1824 switch (opcode) { 1825 case EVENT_RING_OPCODE_CFC_DEL: 1826 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", 1827 vf->abs_vfid, qidx); 1828 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, 1829 &vfq_get(vf, 1830 qidx)->sp_obj, 1831 BNX2X_Q_CMD_CFC_DEL); 1832 break; 1833 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 1834 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", 1835 vf->abs_vfid, qidx); 1836 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); 1837 break; 1838 case EVENT_RING_OPCODE_MULTICAST_RULES: 1839 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", 1840 vf->abs_vfid, qidx); 1841 bnx2x_vf_handle_mcast_eqe(bp, vf); 1842 break; 1843 case EVENT_RING_OPCODE_FILTERS_RULES: 1844 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", 1845 vf->abs_vfid, qidx); 1846 bnx2x_vf_handle_filters_eqe(bp, vf); 1847 break; 1848 case EVENT_RING_OPCODE_VF_FLR: 1849 DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n", 1850 vf->abs_vfid); 1851 /* Do nothing for now */ 1852 break; 1853 case EVENT_RING_OPCODE_MALICIOUS_VF: 1854 DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n", 1855 vf->abs_vfid); 1856 /* Do nothing for now */ 1857 break; 1858 } 1859 /* SRIOV: reschedule any 'in_progress' operations */ 1860 bnx2x_iov_sp_event(bp, cid, false); 1861 1862 return 0; 1863 } 1864 1865 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) 1866 { 1867 /* extract the vf from vf_cid - relies on the following: 1868 * 1. vfid on cid reflects the true abs_vfid 1869 * 2. the max number of VFs (per path) is 64 1870 */ 1871 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 1872 return bnx2x_vf_by_abs_fid(bp, abs_vfid); 1873 } 1874 1875 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 1876 struct bnx2x_queue_sp_obj **q_obj) 1877 { 1878 struct bnx2x_virtf *vf; 1879 1880 if (!IS_SRIOV(bp)) 1881 return; 1882 1883 vf = bnx2x_vf_by_cid(bp, vf_cid); 1884 1885 if (vf) { 1886 /* extract queue index from vf_cid - relies on the following: 1887 * 1. vfid on cid reflects the true abs_vfid 1888 * 2. the max number of VFs (per path) is 64 1889 */ 1890 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); 1891 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); 1892 } else { 1893 BNX2X_ERR("No vf matching cid %d\n", vf_cid); 1894 } 1895 } 1896 1897 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work) 1898 { 1899 struct bnx2x_virtf *vf; 1900 1901 /* check if the cid is the VF range */ 1902 if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid)) 1903 return; 1904 1905 vf = bnx2x_vf_by_cid(bp, vf_cid); 1906 if (vf) { 1907 /* set in_progress flag */ 1908 atomic_set(&vf->op_in_progress, 1); 1909 if (queue_work) 1910 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 1911 } 1912 } 1913 1914 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) 1915 { 1916 int i; 1917 int first_queue_query_index, num_queues_req; 1918 dma_addr_t cur_data_offset; 1919 struct stats_query_entry *cur_query_entry; 1920 u8 stats_count = 0; 1921 bool is_fcoe = false; 1922 1923 if (!IS_SRIOV(bp)) 1924 return; 1925 1926 if (!NO_FCOE(bp)) 1927 is_fcoe = true; 1928 1929 /* fcoe adds one global request and one queue request */ 1930 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; 1931 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1932 (is_fcoe ? 0 : 1); 1933 1934 DP(BNX2X_MSG_IOV, 1935 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", 1936 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, 1937 first_queue_query_index + num_queues_req); 1938 1939 cur_data_offset = bp->fw_stats_data_mapping + 1940 offsetof(struct bnx2x_fw_stats_data, queue_stats) + 1941 num_queues_req * sizeof(struct per_queue_stats); 1942 1943 cur_query_entry = &bp->fw_stats_req-> 1944 query[first_queue_query_index + num_queues_req]; 1945 1946 for_each_vf(bp, i) { 1947 int j; 1948 struct bnx2x_virtf *vf = BP_VF(bp, i); 1949 1950 if (vf->state != VF_ENABLED) { 1951 DP(BNX2X_MSG_IOV, 1952 "vf %d not enabled so no stats for it\n", 1953 vf->abs_vfid); 1954 continue; 1955 } 1956 1957 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); 1958 for_each_vfq(vf, j) { 1959 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 1960 1961 /* collect stats fro active queues only */ 1962 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 1963 BNX2X_Q_LOGICAL_STATE_STOPPED) 1964 continue; 1965 1966 /* create stats query entry for this queue */ 1967 cur_query_entry->kind = STATS_TYPE_QUEUE; 1968 cur_query_entry->index = vfq_cl_id(vf, rxq); 1969 cur_query_entry->funcID = 1970 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 1971 cur_query_entry->address.hi = 1972 cpu_to_le32(U64_HI(vf->fw_stat_map)); 1973 cur_query_entry->address.lo = 1974 cpu_to_le32(U64_LO(vf->fw_stat_map)); 1975 DP(BNX2X_MSG_IOV, 1976 "added address %x %x for vf %d queue %d client %d\n", 1977 cur_query_entry->address.hi, 1978 cur_query_entry->address.lo, cur_query_entry->funcID, 1979 j, cur_query_entry->index); 1980 cur_query_entry++; 1981 cur_data_offset += sizeof(struct per_queue_stats); 1982 stats_count++; 1983 } 1984 } 1985 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 1986 } 1987 1988 void bnx2x_iov_sp_task(struct bnx2x *bp) 1989 { 1990 int i; 1991 1992 if (!IS_SRIOV(bp)) 1993 return; 1994 /* Iterate over all VFs and invoke state transition for VFs with 1995 * 'in-progress' slow-path operations 1996 */ 1997 DP(BNX2X_MSG_IOV, "searching for pending vf operations\n"); 1998 for_each_vf(bp, i) { 1999 struct bnx2x_virtf *vf = BP_VF(bp, i); 2000 2001 if (!list_empty(&vf->op_list_head) && 2002 atomic_read(&vf->op_in_progress)) { 2003 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); 2004 bnx2x_vfop_cur(bp, vf)->transition(bp, vf); 2005 } 2006 } 2007 } 2008 2009 static inline 2010 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) 2011 { 2012 int i; 2013 struct bnx2x_virtf *vf = NULL; 2014 2015 for_each_vf(bp, i) { 2016 vf = BP_VF(bp, i); 2017 if (stat_id >= vf->igu_base_id && 2018 stat_id < vf->igu_base_id + vf_sb_count(vf)) 2019 break; 2020 } 2021 return vf; 2022 } 2023 2024 /* VF API helpers */ 2025 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, 2026 u8 enable) 2027 { 2028 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; 2029 u32 val = enable ? (abs_vfid | (1 << 6)) : 0; 2030 2031 REG_WR(bp, reg, val); 2032 } 2033 2034 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) 2035 { 2036 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), 2037 BNX2X_VF_MAX_QUEUES); 2038 } 2039 2040 static 2041 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, 2042 struct vf_pf_resc_request *req_resc) 2043 { 2044 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2045 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2046 2047 return ((req_resc->num_rxqs <= rxq_cnt) && 2048 (req_resc->num_txqs <= txq_cnt) && 2049 (req_resc->num_sbs <= vf_sb_count(vf)) && 2050 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 2051 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); 2052 } 2053 2054 /* CORE VF API */ 2055 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 2056 struct vf_pf_resc_request *resc) 2057 { 2058 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * 2059 BNX2X_CIDS_PER_VF; 2060 2061 union cdu_context *base_cxt = (union cdu_context *) 2062 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2063 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2064 int i; 2065 2066 /* if state is 'acquired' the VF was not released or FLR'd, in 2067 * this case the returned resources match the acquired already 2068 * acquired resources. Verify that the requested numbers do 2069 * not exceed the already acquired numbers. 2070 */ 2071 if (vf->state == VF_ACQUIRED) { 2072 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", 2073 vf->abs_vfid); 2074 2075 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2076 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", 2077 vf->abs_vfid); 2078 return -EINVAL; 2079 } 2080 return 0; 2081 } 2082 2083 /* Otherwise vf state must be 'free' or 'reset' */ 2084 if (vf->state != VF_FREE && vf->state != VF_RESET) { 2085 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", 2086 vf->abs_vfid, vf->state); 2087 return -EINVAL; 2088 } 2089 2090 /* static allocation: 2091 * the global maximum number are fixed per VF. fail the request if 2092 * requested number exceed these globals 2093 */ 2094 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2095 DP(BNX2X_MSG_IOV, 2096 "cannot fulfill vf resource request. Placing maximal available values in response\n"); 2097 /* set the max resource in the vf */ 2098 return -ENOMEM; 2099 } 2100 2101 /* Set resources counters - 0 request means max available */ 2102 vf_sb_count(vf) = resc->num_sbs; 2103 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2104 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2105 if (resc->num_mac_filters) 2106 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 2107 if (resc->num_vlan_filters) 2108 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; 2109 2110 DP(BNX2X_MSG_IOV, 2111 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2112 vf_sb_count(vf), vf_rxq_count(vf), 2113 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2114 vf_vlan_rules_cnt(vf)); 2115 2116 /* Initialize the queues */ 2117 if (!vf->vfqs) { 2118 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); 2119 return -EINVAL; 2120 } 2121 2122 for_each_vfq(vf, i) { 2123 struct bnx2x_vf_queue *q = vfq_get(vf, i); 2124 2125 if (!q) { 2126 DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i); 2127 return -EINVAL; 2128 } 2129 2130 q->index = i; 2131 q->cxt = &((base_cxt + i)->eth); 2132 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; 2133 2134 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", 2135 vf->abs_vfid, i, q->index, q->cid, q->cxt); 2136 2137 /* init SP objects */ 2138 bnx2x_vfq_init(bp, vf, q); 2139 } 2140 vf->state = VF_ACQUIRED; 2141 return 0; 2142 } 2143 2144 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) 2145 { 2146 struct bnx2x_func_init_params func_init = {0}; 2147 u16 flags = 0; 2148 int i; 2149 2150 /* the sb resources are initialized at this point, do the 2151 * FW/HW initializations 2152 */ 2153 for_each_vf_sb(vf, i) 2154 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, 2155 vf_igu_sb(vf, i), vf_igu_sb(vf, i)); 2156 2157 /* Sanity checks */ 2158 if (vf->state != VF_ACQUIRED) { 2159 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", 2160 vf->abs_vfid, vf->state); 2161 return -EINVAL; 2162 } 2163 /* FLR cleanup epilogue */ 2164 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) 2165 return -EBUSY; 2166 2167 /* reset IGU VF statistics: MSIX */ 2168 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); 2169 2170 /* vf init */ 2171 if (vf->cfg_flags & VF_CFG_STATS) 2172 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); 2173 2174 if (vf->cfg_flags & VF_CFG_TPA) 2175 flags |= FUNC_FLG_TPA; 2176 2177 if (is_vf_multi(vf)) 2178 flags |= FUNC_FLG_RSS; 2179 2180 /* function setup */ 2181 func_init.func_flgs = flags; 2182 func_init.pf_id = BP_FUNC(bp); 2183 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); 2184 func_init.fw_stat_map = vf->fw_stat_map; 2185 func_init.spq_map = vf->spq_map; 2186 func_init.spq_prod = 0; 2187 bnx2x_func_init(bp, &func_init); 2188 2189 /* Enable the vf */ 2190 bnx2x_vf_enable_access(bp, vf->abs_vfid); 2191 bnx2x_vf_enable_traffic(bp, vf); 2192 2193 /* queue protection table */ 2194 for_each_vfq(vf, i) 2195 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2196 vfq_qzone_id(vf, vfq_get(vf, i)), true); 2197 2198 vf->state = VF_ENABLED; 2199 2200 return 0; 2201 } 2202 2203 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 2204 enum channel_tlvs tlv) 2205 { 2206 /* lock the channel */ 2207 mutex_lock(&vf->op_mutex); 2208 2209 /* record the locking op */ 2210 vf->op_current = tlv; 2211 2212 /* log the lock */ 2213 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", 2214 vf->abs_vfid, tlv); 2215 } 2216 2217 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 2218 enum channel_tlvs expected_tlv) 2219 { 2220 WARN(expected_tlv != vf->op_current, 2221 "lock mismatch: expected %d found %d", expected_tlv, 2222 vf->op_current); 2223 2224 /* lock the channel */ 2225 mutex_unlock(&vf->op_mutex); 2226 2227 /* log the unlock */ 2228 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 2229 vf->abs_vfid, vf->op_current); 2230 2231 /* record the locking op */ 2232 vf->op_current = CHANNEL_TLV_NONE; 2233 } 2234