1 /* bnx2x_sriov.c: Broadcom Everest network driver. 2 * 3 * Copyright 2009-2013 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 17 * Ariel Elior <ariele@broadcom.com> 18 * 19 */ 20 #include "bnx2x.h" 21 #include "bnx2x_init.h" 22 #include "bnx2x_cmn.h" 23 #include "bnx2x_sp.h" 24 #include <linux/crc32.h> 25 #include <linux/if_vlan.h> 26 27 /* General service functions */ 28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 29 u16 pf_id) 30 { 31 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 32 pf_id); 33 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 34 pf_id); 35 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 36 pf_id); 37 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 38 pf_id); 39 } 40 41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 42 u8 enable) 43 { 44 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 45 enable); 46 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 47 enable); 48 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 49 enable); 50 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 51 enable); 52 } 53 54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 55 { 56 int idx; 57 58 for_each_vf(bp, idx) 59 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) 60 break; 61 return idx; 62 } 63 64 static 65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 66 { 67 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); 68 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; 69 } 70 71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, 72 u8 igu_sb_id, u8 segment, u16 index, u8 op, 73 u8 update) 74 { 75 /* acking a VF sb through the PF - use the GRC */ 76 u32 ctl; 77 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 78 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 79 u32 func_encode = vf->abs_vfid; 80 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; 81 struct igu_regular cmd_data = {0}; 82 83 cmd_data.sb_id_and_flags = 84 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 85 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 86 (update << IGU_REGULAR_BUPDATE_SHIFT) | 87 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 88 89 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 90 func_encode << IGU_CTRL_REG_FID_SHIFT | 91 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 92 93 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 94 cmd_data.sb_id_and_flags, igu_addr_data); 95 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); 96 mmiowb(); 97 barrier(); 98 99 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 100 ctl, igu_addr_ctl); 101 REG_WR(bp, igu_addr_ctl, ctl); 102 mmiowb(); 103 barrier(); 104 } 105 106 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp, 107 struct bnx2x_virtf *vf, 108 bool print_err) 109 { 110 if (!bnx2x_leading_vfq(vf, sp_initialized)) { 111 if (print_err) 112 BNX2X_ERR("Slowpath objects not yet initialized!\n"); 113 else 114 DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n"); 115 return false; 116 } 117 return true; 118 } 119 120 /* VFOP - VF slow-path operation support */ 121 122 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000 123 124 /* VFOP operations states */ 125 enum bnx2x_vfop_qctor_state { 126 BNX2X_VFOP_QCTOR_INIT, 127 BNX2X_VFOP_QCTOR_SETUP, 128 BNX2X_VFOP_QCTOR_INT_EN 129 }; 130 131 enum bnx2x_vfop_qdtor_state { 132 BNX2X_VFOP_QDTOR_HALT, 133 BNX2X_VFOP_QDTOR_TERMINATE, 134 BNX2X_VFOP_QDTOR_CFCDEL, 135 BNX2X_VFOP_QDTOR_DONE 136 }; 137 138 enum bnx2x_vfop_vlan_mac_state { 139 BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 140 BNX2X_VFOP_VLAN_MAC_CLEAR, 141 BNX2X_VFOP_VLAN_MAC_CHK_DONE, 142 BNX2X_VFOP_MAC_CONFIG_LIST, 143 BNX2X_VFOP_VLAN_CONFIG_LIST, 144 BNX2X_VFOP_VLAN_CONFIG_LIST_0 145 }; 146 147 enum bnx2x_vfop_qsetup_state { 148 BNX2X_VFOP_QSETUP_CTOR, 149 BNX2X_VFOP_QSETUP_VLAN0, 150 BNX2X_VFOP_QSETUP_DONE 151 }; 152 153 enum bnx2x_vfop_mcast_state { 154 BNX2X_VFOP_MCAST_DEL, 155 BNX2X_VFOP_MCAST_ADD, 156 BNX2X_VFOP_MCAST_CHK_DONE 157 }; 158 enum bnx2x_vfop_qflr_state { 159 BNX2X_VFOP_QFLR_CLR_VLAN, 160 BNX2X_VFOP_QFLR_CLR_MAC, 161 BNX2X_VFOP_QFLR_TERMINATE, 162 BNX2X_VFOP_QFLR_DONE 163 }; 164 165 enum bnx2x_vfop_flr_state { 166 BNX2X_VFOP_FLR_QUEUES, 167 BNX2X_VFOP_FLR_HW 168 }; 169 170 enum bnx2x_vfop_close_state { 171 BNX2X_VFOP_CLOSE_QUEUES, 172 BNX2X_VFOP_CLOSE_HW 173 }; 174 175 enum bnx2x_vfop_rxmode_state { 176 BNX2X_VFOP_RXMODE_CONFIG, 177 BNX2X_VFOP_RXMODE_DONE 178 }; 179 180 enum bnx2x_vfop_qteardown_state { 181 BNX2X_VFOP_QTEARDOWN_RXMODE, 182 BNX2X_VFOP_QTEARDOWN_CLR_VLAN, 183 BNX2X_VFOP_QTEARDOWN_CLR_MAC, 184 BNX2X_VFOP_QTEARDOWN_CLR_MCAST, 185 BNX2X_VFOP_QTEARDOWN_QDTOR, 186 BNX2X_VFOP_QTEARDOWN_DONE 187 }; 188 189 enum bnx2x_vfop_rss_state { 190 BNX2X_VFOP_RSS_CONFIG, 191 BNX2X_VFOP_RSS_DONE 192 }; 193 194 enum bnx2x_vfop_tpa_state { 195 BNX2X_VFOP_TPA_CONFIG, 196 BNX2X_VFOP_TPA_DONE 197 }; 198 199 #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) 200 201 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 202 struct bnx2x_queue_init_params *init_params, 203 struct bnx2x_queue_setup_params *setup_params, 204 u16 q_idx, u16 sb_idx) 205 { 206 DP(BNX2X_MSG_IOV, 207 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", 208 vf->abs_vfid, 209 q_idx, 210 sb_idx, 211 init_params->tx.sb_cq_index, 212 init_params->tx.hc_rate, 213 setup_params->flags, 214 setup_params->txq_params.traffic_type); 215 } 216 217 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 218 struct bnx2x_queue_init_params *init_params, 219 struct bnx2x_queue_setup_params *setup_params, 220 u16 q_idx, u16 sb_idx) 221 { 222 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; 223 224 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" 225 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", 226 vf->abs_vfid, 227 q_idx, 228 sb_idx, 229 init_params->rx.sb_cq_index, 230 init_params->rx.hc_rate, 231 setup_params->gen_params.mtu, 232 rxq_params->buf_sz, 233 rxq_params->sge_buf_sz, 234 rxq_params->max_sges_pkt, 235 rxq_params->tpa_agg_sz, 236 setup_params->flags, 237 rxq_params->drop_flags, 238 rxq_params->cache_line_log); 239 } 240 241 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 242 struct bnx2x_virtf *vf, 243 struct bnx2x_vf_queue *q, 244 struct bnx2x_vfop_qctor_params *p, 245 unsigned long q_type) 246 { 247 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; 248 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; 249 250 /* INIT */ 251 252 /* Enable host coalescing in the transition to INIT state */ 253 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) 254 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); 255 256 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) 257 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); 258 259 /* FW SB ID */ 260 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 261 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 262 263 /* context */ 264 init_p->cxts[0] = q->cxt; 265 266 /* SETUP */ 267 268 /* Setup-op general parameters */ 269 setup_p->gen_params.spcl_id = vf->sp_cl_id; 270 setup_p->gen_params.stat_id = vfq_stat_id(vf, q); 271 272 /* Setup-op pause params: 273 * Nothing to do, the pause thresholds are set by default to 0 which 274 * effectively turns off the feature for this queue. We don't want 275 * one queue (VF) to interfering with another queue (another VF) 276 */ 277 if (vf->cfg_flags & VF_CFG_FW_FC) 278 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", 279 vf->abs_vfid); 280 /* Setup-op flags: 281 * collect statistics, zero statistics, local-switching, security, 282 * OV for Flex10, RSS and MCAST for leading 283 */ 284 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) 285 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); 286 287 /* for VFs, enable tx switching, bd coherency, and mac address 288 * anti-spoofing 289 */ 290 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); 291 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 292 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 293 294 /* Setup-op rx parameters */ 295 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 296 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 297 298 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); 299 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); 300 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); 301 302 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) 303 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; 304 } 305 306 /* Setup-op tx parameters */ 307 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { 308 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; 309 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 310 } 311 } 312 313 /* VFOP queue construction */ 314 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf) 315 { 316 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 317 struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor; 318 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 319 enum bnx2x_vfop_qctor_state state = vfop->state; 320 321 bnx2x_vfop_reset_wq(vf); 322 323 if (vfop->rc < 0) 324 goto op_err; 325 326 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 327 328 switch (state) { 329 case BNX2X_VFOP_QCTOR_INIT: 330 331 /* has this queue already been opened? */ 332 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 333 BNX2X_Q_LOGICAL_STATE_ACTIVE) { 334 DP(BNX2X_MSG_IOV, 335 "Entered qctor but queue was already up. Aborting gracefully\n"); 336 goto op_done; 337 } 338 339 /* next state */ 340 vfop->state = BNX2X_VFOP_QCTOR_SETUP; 341 342 q_params->cmd = BNX2X_Q_CMD_INIT; 343 vfop->rc = bnx2x_queue_state_change(bp, q_params); 344 345 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 346 347 case BNX2X_VFOP_QCTOR_SETUP: 348 /* next state */ 349 vfop->state = BNX2X_VFOP_QCTOR_INT_EN; 350 351 /* copy pre-prepared setup params to the queue-state params */ 352 vfop->op_p->qctor.qstate.params.setup = 353 vfop->op_p->qctor.prep_qsetup; 354 355 q_params->cmd = BNX2X_Q_CMD_SETUP; 356 vfop->rc = bnx2x_queue_state_change(bp, q_params); 357 358 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 359 360 case BNX2X_VFOP_QCTOR_INT_EN: 361 362 /* enable interrupts */ 363 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx), 364 USTORM_ID, 0, IGU_INT_ENABLE, 0); 365 goto op_done; 366 default: 367 bnx2x_vfop_default(state); 368 } 369 op_err: 370 BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n", 371 vf->abs_vfid, args->qid, q_params->cmd, vfop->rc); 372 op_done: 373 bnx2x_vfop_end(bp, vf, vfop); 374 op_pending: 375 return; 376 } 377 378 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp, 379 struct bnx2x_virtf *vf, 380 struct bnx2x_vfop_cmd *cmd, 381 int qid) 382 { 383 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 384 385 if (vfop) { 386 vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 387 388 vfop->args.qctor.qid = qid; 389 vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx); 390 391 bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT, 392 bnx2x_vfop_qctor, cmd->done); 393 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor, 394 cmd->block); 395 } 396 return -ENOMEM; 397 } 398 399 /* VFOP queue destruction */ 400 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) 401 { 402 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 403 struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor; 404 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 405 enum bnx2x_vfop_qdtor_state state = vfop->state; 406 407 bnx2x_vfop_reset_wq(vf); 408 409 if (vfop->rc < 0) 410 goto op_err; 411 412 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 413 414 switch (state) { 415 case BNX2X_VFOP_QDTOR_HALT: 416 417 /* has this queue already been stopped? */ 418 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 419 BNX2X_Q_LOGICAL_STATE_STOPPED) { 420 DP(BNX2X_MSG_IOV, 421 "Entered qdtor but queue was already stopped. Aborting gracefully\n"); 422 423 /* next state */ 424 vfop->state = BNX2X_VFOP_QDTOR_DONE; 425 426 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 427 } 428 429 /* next state */ 430 vfop->state = BNX2X_VFOP_QDTOR_TERMINATE; 431 432 q_params->cmd = BNX2X_Q_CMD_HALT; 433 vfop->rc = bnx2x_queue_state_change(bp, q_params); 434 435 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 436 437 case BNX2X_VFOP_QDTOR_TERMINATE: 438 /* next state */ 439 vfop->state = BNX2X_VFOP_QDTOR_CFCDEL; 440 441 q_params->cmd = BNX2X_Q_CMD_TERMINATE; 442 vfop->rc = bnx2x_queue_state_change(bp, q_params); 443 444 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 445 446 case BNX2X_VFOP_QDTOR_CFCDEL: 447 /* next state */ 448 vfop->state = BNX2X_VFOP_QDTOR_DONE; 449 450 q_params->cmd = BNX2X_Q_CMD_CFC_DEL; 451 vfop->rc = bnx2x_queue_state_change(bp, q_params); 452 453 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 454 op_err: 455 BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n", 456 vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc); 457 op_done: 458 case BNX2X_VFOP_QDTOR_DONE: 459 /* invalidate the context */ 460 if (qdtor->cxt) { 461 qdtor->cxt->ustorm_ag_context.cdu_usage = 0; 462 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; 463 } 464 bnx2x_vfop_end(bp, vf, vfop); 465 return; 466 default: 467 bnx2x_vfop_default(state); 468 } 469 op_pending: 470 return; 471 } 472 473 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, 474 struct bnx2x_virtf *vf, 475 struct bnx2x_vfop_cmd *cmd, 476 int qid) 477 { 478 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 479 480 if (vfop) { 481 struct bnx2x_queue_state_params *qstate = 482 &vf->op_params.qctor.qstate; 483 484 memset(qstate, 0, sizeof(*qstate)); 485 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 486 487 vfop->args.qdtor.qid = qid; 488 vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt); 489 490 bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT, 491 bnx2x_vfop_qdtor, cmd->done); 492 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, 493 cmd->block); 494 } else { 495 BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid); 496 return -ENOMEM; 497 } 498 } 499 500 static void 501 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) 502 { 503 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 504 if (vf) { 505 /* the first igu entry belonging to VFs of this PF */ 506 if (!BP_VFDB(bp)->first_vf_igu_entry) 507 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; 508 509 /* the first igu entry belonging to this VF */ 510 if (!vf_sb_count(vf)) 511 vf->igu_base_id = igu_sb_id; 512 513 ++vf_sb_count(vf); 514 ++vf->sb_count; 515 } 516 BP_VFDB(bp)->vf_sbs_pool++; 517 } 518 519 /* VFOP MAC/VLAN helpers */ 520 static inline void bnx2x_vfop_credit(struct bnx2x *bp, 521 struct bnx2x_vfop *vfop, 522 struct bnx2x_vlan_mac_obj *obj) 523 { 524 struct bnx2x_vfop_args_filters *args = &vfop->args.filters; 525 526 /* update credit only if there is no error 527 * and a valid credit counter 528 */ 529 if (!vfop->rc && args->credit) { 530 struct list_head *pos; 531 int read_lock; 532 int cnt = 0; 533 534 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); 535 if (read_lock) 536 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); 537 538 list_for_each(pos, &obj->head) 539 cnt++; 540 541 if (!read_lock) 542 bnx2x_vlan_mac_h_read_unlock(bp, obj); 543 544 atomic_set(args->credit, cnt); 545 } 546 } 547 548 static int bnx2x_vfop_set_user_req(struct bnx2x *bp, 549 struct bnx2x_vfop_filter *pos, 550 struct bnx2x_vlan_mac_data *user_req) 551 { 552 user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD : 553 BNX2X_VLAN_MAC_DEL; 554 555 switch (pos->type) { 556 case BNX2X_VFOP_FILTER_MAC: 557 memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN); 558 break; 559 case BNX2X_VFOP_FILTER_VLAN: 560 user_req->u.vlan.vlan = pos->vid; 561 break; 562 default: 563 BNX2X_ERR("Invalid filter type, skipping\n"); 564 return 1; 565 } 566 return 0; 567 } 568 569 static int bnx2x_vfop_config_list(struct bnx2x *bp, 570 struct bnx2x_vfop_filters *filters, 571 struct bnx2x_vlan_mac_ramrod_params *vlan_mac) 572 { 573 struct bnx2x_vfop_filter *pos, *tmp; 574 struct list_head rollback_list, *filters_list = &filters->head; 575 struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req; 576 int rc = 0, cnt = 0; 577 578 INIT_LIST_HEAD(&rollback_list); 579 580 list_for_each_entry_safe(pos, tmp, filters_list, link) { 581 if (bnx2x_vfop_set_user_req(bp, pos, user_req)) 582 continue; 583 584 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 585 if (rc >= 0) { 586 cnt += pos->add ? 1 : -1; 587 list_move(&pos->link, &rollback_list); 588 rc = 0; 589 } else if (rc == -EEXIST) { 590 rc = 0; 591 } else { 592 BNX2X_ERR("Failed to add a new vlan_mac command\n"); 593 break; 594 } 595 } 596 597 /* rollback if error or too many rules added */ 598 if (rc || cnt > filters->add_cnt) { 599 BNX2X_ERR("error or too many rules added. Performing rollback\n"); 600 list_for_each_entry_safe(pos, tmp, &rollback_list, link) { 601 pos->add = !pos->add; /* reverse op */ 602 bnx2x_vfop_set_user_req(bp, pos, user_req); 603 bnx2x_config_vlan_mac(bp, vlan_mac); 604 list_del(&pos->link); 605 } 606 cnt = 0; 607 if (!rc) 608 rc = -EINVAL; 609 } 610 filters->add_cnt = cnt; 611 return rc; 612 } 613 614 /* VFOP set VLAN/MAC */ 615 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) 616 { 617 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 618 struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac; 619 struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj; 620 struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter; 621 622 enum bnx2x_vfop_vlan_mac_state state = vfop->state; 623 624 if (vfop->rc < 0) 625 goto op_err; 626 627 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 628 629 bnx2x_vfop_reset_wq(vf); 630 631 switch (state) { 632 case BNX2X_VFOP_VLAN_MAC_CLEAR: 633 /* next state */ 634 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 635 636 /* do delete */ 637 vfop->rc = obj->delete_all(bp, obj, 638 &vlan_mac->user_req.vlan_mac_flags, 639 &vlan_mac->ramrod_flags); 640 641 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 642 643 case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE: 644 /* next state */ 645 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 646 647 /* do config */ 648 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 649 if (vfop->rc == -EEXIST) 650 vfop->rc = 0; 651 652 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 653 654 case BNX2X_VFOP_VLAN_MAC_CHK_DONE: 655 vfop->rc = !!obj->raw.check_pending(&obj->raw); 656 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 657 658 case BNX2X_VFOP_MAC_CONFIG_LIST: 659 /* next state */ 660 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 661 662 /* do list config */ 663 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 664 if (vfop->rc) 665 goto op_err; 666 667 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 668 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 669 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 670 671 case BNX2X_VFOP_VLAN_CONFIG_LIST: 672 /* next state */ 673 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 674 675 /* do list config */ 676 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 677 if (!vfop->rc) { 678 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 679 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 680 } 681 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 682 683 default: 684 bnx2x_vfop_default(state); 685 } 686 op_err: 687 BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc); 688 op_done: 689 kfree(filters); 690 bnx2x_vfop_credit(bp, vfop, obj); 691 bnx2x_vfop_end(bp, vf, vfop); 692 op_pending: 693 return; 694 } 695 696 struct bnx2x_vfop_vlan_mac_flags { 697 bool drv_only; 698 bool dont_consume; 699 bool single_cmd; 700 bool add; 701 }; 702 703 static void 704 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 705 struct bnx2x_vfop_vlan_mac_flags *flags) 706 { 707 struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req; 708 709 memset(ramrod, 0, sizeof(*ramrod)); 710 711 /* ramrod flags */ 712 if (flags->drv_only) 713 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags); 714 if (flags->single_cmd) 715 set_bit(RAMROD_EXEC, &ramrod->ramrod_flags); 716 717 /* mac_vlan flags */ 718 if (flags->dont_consume) 719 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags); 720 721 /* cmd */ 722 ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL; 723 } 724 725 static inline void 726 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 727 struct bnx2x_vfop_vlan_mac_flags *flags) 728 { 729 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags); 730 set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags); 731 } 732 733 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, 734 struct bnx2x_virtf *vf, 735 struct bnx2x_vfop_cmd *cmd, 736 int qid, bool drv_only) 737 { 738 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 739 740 if (vfop) { 741 struct bnx2x_vfop_args_filters filters = { 742 .multi_filter = NULL, /* single */ 743 .credit = NULL, /* consume credit */ 744 }; 745 struct bnx2x_vfop_vlan_mac_flags flags = { 746 .drv_only = drv_only, 747 .dont_consume = (filters.credit != NULL), 748 .single_cmd = true, 749 .add = false /* don't care */, 750 }; 751 struct bnx2x_vlan_mac_ramrod_params *ramrod = 752 &vf->op_params.vlan_mac; 753 754 /* set ramrod params */ 755 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 756 757 /* set object */ 758 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 759 760 /* set extra args */ 761 vfop->args.filters = filters; 762 763 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 764 bnx2x_vfop_vlan_mac, cmd->done); 765 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 766 cmd->block); 767 } 768 return -ENOMEM; 769 } 770 771 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, 772 struct bnx2x_virtf *vf, 773 struct bnx2x_vfop_cmd *cmd, 774 struct bnx2x_vfop_filters *macs, 775 int qid, bool drv_only) 776 { 777 struct bnx2x_vfop *vfop; 778 779 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 780 return -EINVAL; 781 782 vfop = bnx2x_vfop_add(bp, vf); 783 if (vfop) { 784 struct bnx2x_vfop_args_filters filters = { 785 .multi_filter = macs, 786 .credit = NULL, /* consume credit */ 787 }; 788 struct bnx2x_vfop_vlan_mac_flags flags = { 789 .drv_only = drv_only, 790 .dont_consume = (filters.credit != NULL), 791 .single_cmd = false, 792 .add = false, /* don't care since only the items in the 793 * filters list affect the sp operation, 794 * not the list itself 795 */ 796 }; 797 struct bnx2x_vlan_mac_ramrod_params *ramrod = 798 &vf->op_params.vlan_mac; 799 800 /* set ramrod params */ 801 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 802 803 /* set object */ 804 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 805 806 /* set extra args */ 807 filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX; 808 vfop->args.filters = filters; 809 810 bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST, 811 bnx2x_vfop_vlan_mac, cmd->done); 812 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 813 cmd->block); 814 } 815 return -ENOMEM; 816 } 817 818 static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, 819 struct bnx2x_virtf *vf, 820 struct bnx2x_vfop_cmd *cmd, 821 int qid, u16 vid, bool add) 822 { 823 struct bnx2x_vfop *vfop; 824 825 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 826 return -EINVAL; 827 828 vfop = bnx2x_vfop_add(bp, vf); 829 if (vfop) { 830 struct bnx2x_vfop_args_filters filters = { 831 .multi_filter = NULL, /* single command */ 832 .credit = &bnx2x_vfq(vf, qid, vlan_count), 833 }; 834 struct bnx2x_vfop_vlan_mac_flags flags = { 835 .drv_only = false, 836 .dont_consume = (filters.credit != NULL), 837 .single_cmd = true, 838 .add = add, 839 }; 840 struct bnx2x_vlan_mac_ramrod_params *ramrod = 841 &vf->op_params.vlan_mac; 842 843 /* set ramrod params */ 844 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 845 ramrod->user_req.u.vlan.vlan = vid; 846 847 /* set object */ 848 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 849 850 /* set extra args */ 851 vfop->args.filters = filters; 852 853 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 854 bnx2x_vfop_vlan_mac, cmd->done); 855 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 856 cmd->block); 857 } 858 return -ENOMEM; 859 } 860 861 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, 862 struct bnx2x_virtf *vf, 863 struct bnx2x_vfop_cmd *cmd, 864 int qid, bool drv_only) 865 { 866 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 867 868 if (vfop) { 869 struct bnx2x_vfop_args_filters filters = { 870 .multi_filter = NULL, /* single command */ 871 .credit = &bnx2x_vfq(vf, qid, vlan_count), 872 }; 873 struct bnx2x_vfop_vlan_mac_flags flags = { 874 .drv_only = drv_only, 875 .dont_consume = (filters.credit != NULL), 876 .single_cmd = true, 877 .add = false, /* don't care */ 878 }; 879 struct bnx2x_vlan_mac_ramrod_params *ramrod = 880 &vf->op_params.vlan_mac; 881 882 /* set ramrod params */ 883 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 884 885 /* set object */ 886 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 887 888 /* set extra args */ 889 vfop->args.filters = filters; 890 891 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 892 bnx2x_vfop_vlan_mac, cmd->done); 893 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 894 cmd->block); 895 } 896 return -ENOMEM; 897 } 898 899 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, 900 struct bnx2x_virtf *vf, 901 struct bnx2x_vfop_cmd *cmd, 902 struct bnx2x_vfop_filters *vlans, 903 int qid, bool drv_only) 904 { 905 struct bnx2x_vfop *vfop; 906 907 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 908 return -EINVAL; 909 910 vfop = bnx2x_vfop_add(bp, vf); 911 if (vfop) { 912 struct bnx2x_vfop_args_filters filters = { 913 .multi_filter = vlans, 914 .credit = &bnx2x_vfq(vf, qid, vlan_count), 915 }; 916 struct bnx2x_vfop_vlan_mac_flags flags = { 917 .drv_only = drv_only, 918 .dont_consume = (filters.credit != NULL), 919 .single_cmd = false, 920 .add = false, /* don't care */ 921 }; 922 struct bnx2x_vlan_mac_ramrod_params *ramrod = 923 &vf->op_params.vlan_mac; 924 925 /* set ramrod params */ 926 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 927 928 /* set object */ 929 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 930 931 /* set extra args */ 932 filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) - 933 atomic_read(filters.credit); 934 935 vfop->args.filters = filters; 936 937 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST, 938 bnx2x_vfop_vlan_mac, cmd->done); 939 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 940 cmd->block); 941 } 942 return -ENOMEM; 943 } 944 945 /* VFOP queue setup (queue constructor + set vlan 0) */ 946 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf) 947 { 948 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 949 int qid = vfop->args.qctor.qid; 950 enum bnx2x_vfop_qsetup_state state = vfop->state; 951 struct bnx2x_vfop_cmd cmd = { 952 .done = bnx2x_vfop_qsetup, 953 .block = false, 954 }; 955 956 if (vfop->rc < 0) 957 goto op_err; 958 959 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 960 961 switch (state) { 962 case BNX2X_VFOP_QSETUP_CTOR: 963 /* init the queue ctor command */ 964 vfop->state = BNX2X_VFOP_QSETUP_VLAN0; 965 vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid); 966 if (vfop->rc) 967 goto op_err; 968 return; 969 970 case BNX2X_VFOP_QSETUP_VLAN0: 971 /* skip if non-leading or FPGA/EMU*/ 972 if (qid) 973 goto op_done; 974 975 /* init the queue set-vlan command (for vlan 0) */ 976 vfop->state = BNX2X_VFOP_QSETUP_DONE; 977 vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true); 978 if (vfop->rc) 979 goto op_err; 980 return; 981 op_err: 982 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); 983 op_done: 984 case BNX2X_VFOP_QSETUP_DONE: 985 vf->cfg_flags |= VF_CFG_VLAN; 986 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, 987 BNX2X_MSG_IOV); 988 bnx2x_vfop_end(bp, vf, vfop); 989 return; 990 default: 991 bnx2x_vfop_default(state); 992 } 993 } 994 995 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, 996 struct bnx2x_virtf *vf, 997 struct bnx2x_vfop_cmd *cmd, 998 int qid) 999 { 1000 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1001 1002 if (vfop) { 1003 vfop->args.qctor.qid = qid; 1004 1005 bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR, 1006 bnx2x_vfop_qsetup, cmd->done); 1007 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup, 1008 cmd->block); 1009 } 1010 return -ENOMEM; 1011 } 1012 1013 /* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */ 1014 static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) 1015 { 1016 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1017 int qid = vfop->args.qx.qid; 1018 enum bnx2x_vfop_qflr_state state = vfop->state; 1019 struct bnx2x_queue_state_params *qstate; 1020 struct bnx2x_vfop_cmd cmd; 1021 1022 bnx2x_vfop_reset_wq(vf); 1023 1024 if (vfop->rc < 0) 1025 goto op_err; 1026 1027 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state); 1028 1029 cmd.done = bnx2x_vfop_qflr; 1030 cmd.block = false; 1031 1032 switch (state) { 1033 case BNX2X_VFOP_QFLR_CLR_VLAN: 1034 /* vlan-clear-all: driver-only, don't consume credit */ 1035 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; 1036 1037 /* the vlan_mac vfop will re-schedule us */ 1038 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true); 1039 if (vfop->rc) 1040 goto op_err; 1041 return; 1042 1043 case BNX2X_VFOP_QFLR_CLR_MAC: 1044 /* mac-clear-all: driver only consume credit */ 1045 vfop->state = BNX2X_VFOP_QFLR_TERMINATE; 1046 /* the vlan_mac vfop will re-schedule us */ 1047 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true); 1048 if (vfop->rc) 1049 goto op_err; 1050 return; 1051 1052 case BNX2X_VFOP_QFLR_TERMINATE: 1053 qstate = &vfop->op_p->qctor.qstate; 1054 memset(qstate , 0, sizeof(*qstate)); 1055 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 1056 vfop->state = BNX2X_VFOP_QFLR_DONE; 1057 1058 DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n", 1059 vf->abs_vfid, qstate->q_obj->state); 1060 1061 if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) { 1062 qstate->q_obj->state = BNX2X_Q_STATE_STOPPED; 1063 qstate->cmd = BNX2X_Q_CMD_TERMINATE; 1064 vfop->rc = bnx2x_queue_state_change(bp, qstate); 1065 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND); 1066 } else { 1067 goto op_done; 1068 } 1069 1070 op_err: 1071 BNX2X_ERR("QFLR[%d:%d] error: rc %d\n", 1072 vf->abs_vfid, qid, vfop->rc); 1073 op_done: 1074 case BNX2X_VFOP_QFLR_DONE: 1075 bnx2x_vfop_end(bp, vf, vfop); 1076 return; 1077 default: 1078 bnx2x_vfop_default(state); 1079 } 1080 op_pending: 1081 return; 1082 } 1083 1084 static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp, 1085 struct bnx2x_virtf *vf, 1086 struct bnx2x_vfop_cmd *cmd, 1087 int qid) 1088 { 1089 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1090 1091 if (vfop) { 1092 vfop->args.qx.qid = qid; 1093 if ((qid == LEADING_IDX) && 1094 bnx2x_validate_vf_sp_objs(bp, vf, false)) 1095 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN, 1096 bnx2x_vfop_qflr, cmd->done); 1097 else 1098 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_TERMINATE, 1099 bnx2x_vfop_qflr, cmd->done); 1100 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr, 1101 cmd->block); 1102 } 1103 return -ENOMEM; 1104 } 1105 1106 /* VFOP multi-casts */ 1107 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf) 1108 { 1109 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1110 struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast; 1111 struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw; 1112 struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list; 1113 enum bnx2x_vfop_mcast_state state = vfop->state; 1114 int i; 1115 1116 bnx2x_vfop_reset_wq(vf); 1117 1118 if (vfop->rc < 0) 1119 goto op_err; 1120 1121 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1122 1123 switch (state) { 1124 case BNX2X_VFOP_MCAST_DEL: 1125 /* clear existing mcasts */ 1126 vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD 1127 : BNX2X_VFOP_MCAST_CHK_DONE; 1128 mcast->mcast_list_len = vf->mcast_list_len; 1129 vf->mcast_list_len = args->mc_num; 1130 vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL); 1131 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 1132 1133 case BNX2X_VFOP_MCAST_ADD: 1134 if (raw->check_pending(raw)) 1135 goto op_pending; 1136 1137 /* update mcast list on the ramrod params */ 1138 INIT_LIST_HEAD(&mcast->mcast_list); 1139 for (i = 0; i < args->mc_num; i++) 1140 list_add_tail(&(args->mc[i].link), 1141 &mcast->mcast_list); 1142 mcast->mcast_list_len = args->mc_num; 1143 1144 /* add new mcasts */ 1145 vfop->state = BNX2X_VFOP_MCAST_CHK_DONE; 1146 vfop->rc = bnx2x_config_mcast(bp, mcast, 1147 BNX2X_MCAST_CMD_ADD); 1148 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1149 1150 case BNX2X_VFOP_MCAST_CHK_DONE: 1151 vfop->rc = raw->check_pending(raw) ? 1 : 0; 1152 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1153 default: 1154 bnx2x_vfop_default(state); 1155 } 1156 op_err: 1157 BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc); 1158 op_done: 1159 kfree(args->mc); 1160 bnx2x_vfop_end(bp, vf, vfop); 1161 op_pending: 1162 return; 1163 } 1164 1165 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, 1166 struct bnx2x_virtf *vf, 1167 struct bnx2x_vfop_cmd *cmd, 1168 bnx2x_mac_addr_t *mcasts, 1169 int mcast_num, bool drv_only) 1170 { 1171 struct bnx2x_vfop *vfop = NULL; 1172 size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem); 1173 struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) : 1174 NULL; 1175 1176 if (!mc_sz || mc) { 1177 vfop = bnx2x_vfop_add(bp, vf); 1178 if (vfop) { 1179 int i; 1180 struct bnx2x_mcast_ramrod_params *ramrod = 1181 &vf->op_params.mcast; 1182 1183 /* set ramrod params */ 1184 memset(ramrod, 0, sizeof(*ramrod)); 1185 ramrod->mcast_obj = &vf->mcast_obj; 1186 if (drv_only) 1187 set_bit(RAMROD_DRV_CLR_ONLY, 1188 &ramrod->ramrod_flags); 1189 1190 /* copy mcasts pointers */ 1191 vfop->args.mc_list.mc_num = mcast_num; 1192 vfop->args.mc_list.mc = mc; 1193 for (i = 0; i < mcast_num; i++) 1194 mc[i].mac = mcasts[i]; 1195 1196 bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL, 1197 bnx2x_vfop_mcast, cmd->done); 1198 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast, 1199 cmd->block); 1200 } else { 1201 kfree(mc); 1202 } 1203 } 1204 return -ENOMEM; 1205 } 1206 1207 /* VFOP rx-mode */ 1208 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) 1209 { 1210 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1211 struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode; 1212 enum bnx2x_vfop_rxmode_state state = vfop->state; 1213 1214 bnx2x_vfop_reset_wq(vf); 1215 1216 if (vfop->rc < 0) 1217 goto op_err; 1218 1219 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1220 1221 switch (state) { 1222 case BNX2X_VFOP_RXMODE_CONFIG: 1223 /* next state */ 1224 vfop->state = BNX2X_VFOP_RXMODE_DONE; 1225 1226 /* record the accept flags in vfdb so hypervisor can modify them 1227 * if necessary 1228 */ 1229 bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) = 1230 ramrod->rx_accept_flags; 1231 vfop->rc = bnx2x_config_rx_mode(bp, ramrod); 1232 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1233 op_err: 1234 BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc); 1235 op_done: 1236 case BNX2X_VFOP_RXMODE_DONE: 1237 bnx2x_vfop_end(bp, vf, vfop); 1238 return; 1239 default: 1240 bnx2x_vfop_default(state); 1241 } 1242 op_pending: 1243 return; 1244 } 1245 1246 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, 1247 struct bnx2x_rx_mode_ramrod_params *ramrod, 1248 struct bnx2x_virtf *vf, 1249 unsigned long accept_flags) 1250 { 1251 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); 1252 1253 memset(ramrod, 0, sizeof(*ramrod)); 1254 ramrod->cid = vfq->cid; 1255 ramrod->cl_id = vfq_cl_id(vf, vfq); 1256 ramrod->rx_mode_obj = &bp->rx_mode_obj; 1257 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); 1258 ramrod->rx_accept_flags = accept_flags; 1259 ramrod->tx_accept_flags = accept_flags; 1260 ramrod->pstate = &vf->filter_state; 1261 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; 1262 1263 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 1264 set_bit(RAMROD_RX, &ramrod->ramrod_flags); 1265 set_bit(RAMROD_TX, &ramrod->ramrod_flags); 1266 1267 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); 1268 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); 1269 } 1270 1271 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, 1272 struct bnx2x_virtf *vf, 1273 struct bnx2x_vfop_cmd *cmd, 1274 int qid, unsigned long accept_flags) 1275 { 1276 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1277 1278 if (vfop) { 1279 struct bnx2x_rx_mode_ramrod_params *ramrod = 1280 &vf->op_params.rx_mode; 1281 1282 bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags); 1283 1284 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, 1285 bnx2x_vfop_rxmode, cmd->done); 1286 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode, 1287 cmd->block); 1288 } 1289 return -ENOMEM; 1290 } 1291 1292 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs, 1293 * queue destructor) 1294 */ 1295 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf) 1296 { 1297 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1298 int qid = vfop->args.qx.qid; 1299 enum bnx2x_vfop_qteardown_state state = vfop->state; 1300 struct bnx2x_vfop_cmd cmd; 1301 1302 if (vfop->rc < 0) 1303 goto op_err; 1304 1305 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1306 1307 cmd.done = bnx2x_vfop_qdown; 1308 cmd.block = false; 1309 1310 switch (state) { 1311 case BNX2X_VFOP_QTEARDOWN_RXMODE: 1312 /* Drop all */ 1313 if (bnx2x_validate_vf_sp_objs(bp, vf, true)) 1314 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN; 1315 else 1316 vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR; 1317 vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0); 1318 if (vfop->rc) 1319 goto op_err; 1320 return; 1321 1322 case BNX2X_VFOP_QTEARDOWN_CLR_VLAN: 1323 /* vlan-clear-all: don't consume credit */ 1324 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC; 1325 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false); 1326 if (vfop->rc) 1327 goto op_err; 1328 return; 1329 1330 case BNX2X_VFOP_QTEARDOWN_CLR_MAC: 1331 /* mac-clear-all: consume credit */ 1332 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST; 1333 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false); 1334 if (vfop->rc) 1335 goto op_err; 1336 return; 1337 1338 case BNX2X_VFOP_QTEARDOWN_CLR_MCAST: 1339 vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR; 1340 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false); 1341 if (vfop->rc) 1342 goto op_err; 1343 return; 1344 1345 case BNX2X_VFOP_QTEARDOWN_QDTOR: 1346 /* run the queue destruction flow */ 1347 DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n"); 1348 vfop->state = BNX2X_VFOP_QTEARDOWN_DONE; 1349 DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n"); 1350 vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid); 1351 DP(BNX2X_MSG_IOV, "returned from cmd\n"); 1352 if (vfop->rc) 1353 goto op_err; 1354 return; 1355 op_err: 1356 BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n", 1357 vf->abs_vfid, qid, vfop->rc); 1358 1359 case BNX2X_VFOP_QTEARDOWN_DONE: 1360 bnx2x_vfop_end(bp, vf, vfop); 1361 return; 1362 default: 1363 bnx2x_vfop_default(state); 1364 } 1365 } 1366 1367 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, 1368 struct bnx2x_virtf *vf, 1369 struct bnx2x_vfop_cmd *cmd, 1370 int qid) 1371 { 1372 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1373 1374 /* for non leading queues skip directly to qdown sate */ 1375 if (vfop) { 1376 vfop->args.qx.qid = qid; 1377 bnx2x_vfop_opset(qid == LEADING_IDX ? 1378 BNX2X_VFOP_QTEARDOWN_RXMODE : 1379 BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown, 1380 cmd->done); 1381 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, 1382 cmd->block); 1383 } 1384 1385 return -ENOMEM; 1386 } 1387 1388 /* VF enable primitives 1389 * when pretend is required the caller is responsible 1390 * for calling pretend prior to calling these routines 1391 */ 1392 1393 /* internal vf enable - until vf is enabled internally all transactions 1394 * are blocked. This routine should always be called last with pretend. 1395 */ 1396 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) 1397 { 1398 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); 1399 } 1400 1401 /* clears vf error in all semi blocks */ 1402 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) 1403 { 1404 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); 1405 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); 1406 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); 1407 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); 1408 } 1409 1410 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) 1411 { 1412 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; 1413 u32 was_err_reg = 0; 1414 1415 switch (was_err_group) { 1416 case 0: 1417 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; 1418 break; 1419 case 1: 1420 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; 1421 break; 1422 case 2: 1423 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; 1424 break; 1425 case 3: 1426 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; 1427 break; 1428 } 1429 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); 1430 } 1431 1432 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) 1433 { 1434 int i; 1435 u32 val; 1436 1437 /* Set VF masks and configuration - pretend */ 1438 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1439 1440 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 1441 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 1442 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 1443 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 1444 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 1445 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 1446 1447 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 1448 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); 1449 if (vf->cfg_flags & VF_CFG_INT_SIMD) 1450 val |= IGU_VF_CONF_SINGLE_ISR_EN; 1451 val &= ~IGU_VF_CONF_PARENT_MASK; 1452 val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT; 1453 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 1454 1455 DP(BNX2X_MSG_IOV, 1456 "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n", 1457 vf->abs_vfid, val); 1458 1459 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1460 1461 /* iterate over all queues, clear sb consumer */ 1462 for (i = 0; i < vf_sb_count(vf); i++) { 1463 u8 igu_sb_id = vf_igu_sb(vf, i); 1464 1465 /* zero prod memory */ 1466 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); 1467 1468 /* clear sb state machine */ 1469 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, 1470 false /* VF */); 1471 1472 /* disable + update */ 1473 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, 1474 IGU_INT_DISABLE, 1); 1475 } 1476 } 1477 1478 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) 1479 { 1480 /* set the VF-PF association in the FW */ 1481 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); 1482 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); 1483 1484 /* clear vf errors*/ 1485 bnx2x_vf_semi_clear_err(bp, abs_vfid); 1486 bnx2x_vf_pglue_clear_err(bp, abs_vfid); 1487 1488 /* internal vf-enable - pretend */ 1489 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); 1490 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); 1491 bnx2x_vf_enable_internal(bp, true); 1492 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1493 } 1494 1495 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) 1496 { 1497 /* Reset vf in IGU interrupts are still disabled */ 1498 bnx2x_vf_igu_reset(bp, vf); 1499 1500 /* pretend to enable the vf with the PBF */ 1501 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1502 REG_WR(bp, PBF_REG_DISABLE_VF, 0); 1503 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1504 } 1505 1506 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) 1507 { 1508 struct pci_dev *dev; 1509 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 1510 1511 if (!vf) 1512 return false; 1513 1514 dev = pci_get_bus_and_slot(vf->bus, vf->devfn); 1515 if (dev) 1516 return bnx2x_is_pcie_pending(dev); 1517 return false; 1518 } 1519 1520 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) 1521 { 1522 /* Verify no pending pci transactions */ 1523 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) 1524 BNX2X_ERR("PCIE Transactions still pending\n"); 1525 1526 return 0; 1527 } 1528 1529 /* must be called after the number of PF queues and the number of VFs are 1530 * both known 1531 */ 1532 static void 1533 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 1534 { 1535 struct vf_pf_resc_request *resc = &vf->alloc_resc; 1536 u16 vlan_count = 0; 1537 1538 /* will be set only during VF-ACQUIRE */ 1539 resc->num_rxqs = 0; 1540 resc->num_txqs = 0; 1541 1542 /* no credit calculations for macs (just yet) */ 1543 resc->num_mac_filters = 1; 1544 1545 /* divvy up vlan rules */ 1546 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 1547 vlan_count = 1 << ilog2(vlan_count); 1548 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); 1549 1550 /* no real limitation */ 1551 resc->num_mc_filters = 0; 1552 1553 /* num_sbs already set */ 1554 resc->num_sbs = vf->sb_count; 1555 } 1556 1557 /* FLR routines: */ 1558 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 1559 { 1560 /* reset the state variables */ 1561 bnx2x_iov_static_resc(bp, vf); 1562 vf->state = VF_FREE; 1563 } 1564 1565 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) 1566 { 1567 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 1568 1569 /* DQ usage counter */ 1570 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1571 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, 1572 "DQ VF usage counter timed out", 1573 poll_cnt); 1574 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1575 1576 /* FW cleanup command - poll for the results */ 1577 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), 1578 poll_cnt)) 1579 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid); 1580 1581 /* verify TX hw is flushed */ 1582 bnx2x_tx_hw_flushed(bp, poll_cnt); 1583 } 1584 1585 static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) 1586 { 1587 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1588 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; 1589 enum bnx2x_vfop_flr_state state = vfop->state; 1590 struct bnx2x_vfop_cmd cmd = { 1591 .done = bnx2x_vfop_flr, 1592 .block = false, 1593 }; 1594 1595 if (vfop->rc < 0) 1596 goto op_err; 1597 1598 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1599 1600 switch (state) { 1601 case BNX2X_VFOP_FLR_QUEUES: 1602 /* the cleanup operations are valid if and only if the VF 1603 * was first acquired. 1604 */ 1605 if (++(qx->qid) < vf_rxq_count(vf)) { 1606 vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd, 1607 qx->qid); 1608 if (vfop->rc) 1609 goto op_err; 1610 return; 1611 } 1612 /* remove multicasts */ 1613 vfop->state = BNX2X_VFOP_FLR_HW; 1614 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 1615 0, true); 1616 if (vfop->rc) 1617 goto op_err; 1618 return; 1619 case BNX2X_VFOP_FLR_HW: 1620 1621 /* dispatch final cleanup and wait for HW queues to flush */ 1622 bnx2x_vf_flr_clnup_hw(bp, vf); 1623 1624 /* release VF resources */ 1625 bnx2x_vf_free_resc(bp, vf); 1626 1627 /* re-open the mailbox */ 1628 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 1629 1630 goto op_done; 1631 default: 1632 bnx2x_vfop_default(state); 1633 } 1634 op_err: 1635 BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc); 1636 op_done: 1637 vf->flr_clnup_stage = VF_FLR_ACK; 1638 bnx2x_vfop_end(bp, vf, vfop); 1639 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 1640 } 1641 1642 static int bnx2x_vfop_flr_cmd(struct bnx2x *bp, 1643 struct bnx2x_virtf *vf, 1644 vfop_handler_t done) 1645 { 1646 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1647 if (vfop) { 1648 vfop->args.qx.qid = -1; /* loop */ 1649 bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES, 1650 bnx2x_vfop_flr, done); 1651 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false); 1652 } 1653 return -ENOMEM; 1654 } 1655 1656 static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf) 1657 { 1658 int i = prev_vf ? prev_vf->index + 1 : 0; 1659 struct bnx2x_virtf *vf; 1660 1661 /* find next VF to cleanup */ 1662 next_vf_to_clean: 1663 for (; 1664 i < BNX2X_NR_VIRTFN(bp) && 1665 (bnx2x_vf(bp, i, state) != VF_RESET || 1666 bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN); 1667 i++) 1668 ; 1669 1670 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i, 1671 BNX2X_NR_VIRTFN(bp)); 1672 1673 if (i < BNX2X_NR_VIRTFN(bp)) { 1674 vf = BP_VF(bp, i); 1675 1676 /* lock the vf pf channel */ 1677 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 1678 1679 /* invoke the VF FLR SM */ 1680 if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) { 1681 BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n", 1682 vf->abs_vfid); 1683 1684 /* mark the VF to be ACKED and continue */ 1685 vf->flr_clnup_stage = VF_FLR_ACK; 1686 goto next_vf_to_clean; 1687 } 1688 return; 1689 } 1690 1691 /* we are done, update vf records */ 1692 for_each_vf(bp, i) { 1693 vf = BP_VF(bp, i); 1694 1695 if (vf->flr_clnup_stage != VF_FLR_ACK) 1696 continue; 1697 1698 vf->flr_clnup_stage = VF_FLR_EPILOG; 1699 } 1700 1701 /* Acknowledge the handled VFs. 1702 * we are acknowledge all the vfs which an flr was requested for, even 1703 * if amongst them there are such that we never opened, since the mcp 1704 * will interrupt us immediately again if we only ack some of the bits, 1705 * resulting in an endless loop. This can happen for example in KVM 1706 * where an 'all ones' flr request is sometimes given by hyper visor 1707 */ 1708 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n", 1709 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1710 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1711 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 1712 bp->vfdb->flrd_vfs[i]); 1713 1714 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); 1715 1716 /* clear the acked bits - better yet if the MCP implemented 1717 * write to clear semantics 1718 */ 1719 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1720 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); 1721 } 1722 1723 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) 1724 { 1725 int i; 1726 1727 /* Read FLR'd VFs */ 1728 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1729 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); 1730 1731 DP(BNX2X_MSG_MCP, 1732 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n", 1733 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1734 1735 for_each_vf(bp, i) { 1736 struct bnx2x_virtf *vf = BP_VF(bp, i); 1737 u32 reset = 0; 1738 1739 if (vf->abs_vfid < 32) 1740 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); 1741 else 1742 reset = bp->vfdb->flrd_vfs[1] & 1743 (1 << (vf->abs_vfid - 32)); 1744 1745 if (reset) { 1746 /* set as reset and ready for cleanup */ 1747 vf->state = VF_RESET; 1748 vf->flr_clnup_stage = VF_FLR_CLN; 1749 1750 DP(BNX2X_MSG_IOV, 1751 "Initiating Final cleanup for VF %d\n", 1752 vf->abs_vfid); 1753 } 1754 } 1755 1756 /* do the FLR cleanup for all marked VFs*/ 1757 bnx2x_vf_flr_clnup(bp, NULL); 1758 } 1759 1760 /* IOV global initialization routines */ 1761 void bnx2x_iov_init_dq(struct bnx2x *bp) 1762 { 1763 if (!IS_SRIOV(bp)) 1764 return; 1765 1766 /* Set the DQ such that the CID reflect the abs_vfid */ 1767 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); 1768 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 1769 1770 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to 1771 * the PF L2 queues 1772 */ 1773 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 1774 1775 /* The VF window size is the log2 of the max number of CIDs per VF */ 1776 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 1777 1778 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 1779 * the Pf doorbell size although the 2 are independent. 1780 */ 1781 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); 1782 1783 /* No security checks for now - 1784 * configure single rule (out of 16) mask = 0x1, value = 0x0, 1785 * CID range 0 - 0x1ffff 1786 */ 1787 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); 1788 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); 1789 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1790 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1791 1792 /* set the VF doorbell threshold */ 1793 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 1794 } 1795 1796 void bnx2x_iov_init_dmae(struct bnx2x *bp) 1797 { 1798 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) 1799 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 1800 } 1801 1802 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1803 { 1804 struct pci_dev *dev = bp->pdev; 1805 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1806 1807 return dev->bus->number + ((dev->devfn + iov->offset + 1808 iov->stride * vfid) >> 8); 1809 } 1810 1811 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) 1812 { 1813 struct pci_dev *dev = bp->pdev; 1814 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1815 1816 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; 1817 } 1818 1819 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) 1820 { 1821 int i, n; 1822 struct pci_dev *dev = bp->pdev; 1823 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1824 1825 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { 1826 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); 1827 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); 1828 1829 size /= iov->total; 1830 vf->bars[n].bar = start + size * vf->abs_vfid; 1831 vf->bars[n].size = size; 1832 } 1833 } 1834 1835 static int bnx2x_ari_enabled(struct pci_dev *dev) 1836 { 1837 return dev->bus->self && dev->bus->self->ari_enabled; 1838 } 1839 1840 static void 1841 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) 1842 { 1843 int sb_id; 1844 u32 val; 1845 u8 fid, current_pf = 0; 1846 1847 /* IGU in normal mode - read CAM */ 1848 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 1849 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); 1850 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 1851 continue; 1852 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1853 if (fid & IGU_FID_ENCODE_IS_PF) 1854 current_pf = fid & IGU_FID_PF_NUM_MASK; 1855 else if (current_pf == BP_FUNC(bp)) 1856 bnx2x_vf_set_igu_info(bp, sb_id, 1857 (fid & IGU_FID_VF_NUM_MASK)); 1858 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1859 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 1860 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 1861 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 1862 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1863 } 1864 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); 1865 } 1866 1867 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1868 { 1869 if (bp->vfdb) { 1870 kfree(bp->vfdb->vfqs); 1871 kfree(bp->vfdb->vfs); 1872 kfree(bp->vfdb); 1873 } 1874 bp->vfdb = NULL; 1875 } 1876 1877 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1878 { 1879 int pos; 1880 struct pci_dev *dev = bp->pdev; 1881 1882 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 1883 if (!pos) { 1884 BNX2X_ERR("failed to find SRIOV capability in device\n"); 1885 return -ENODEV; 1886 } 1887 1888 iov->pos = pos; 1889 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); 1890 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 1891 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); 1892 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); 1893 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 1894 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 1895 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 1896 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 1897 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 1898 1899 return 0; 1900 } 1901 1902 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1903 { 1904 u32 val; 1905 1906 /* read the SRIOV capability structure 1907 * The fields can be read via configuration read or 1908 * directly from the device (starting at offset PCICFG_OFFSET) 1909 */ 1910 if (bnx2x_sriov_pci_cfg_info(bp, iov)) 1911 return -ENODEV; 1912 1913 /* get the number of SRIOV bars */ 1914 iov->nres = 0; 1915 1916 /* read the first_vfid */ 1917 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); 1918 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) 1919 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); 1920 1921 DP(BNX2X_MSG_IOV, 1922 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 1923 BP_FUNC(bp), 1924 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, 1925 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 1926 1927 return 0; 1928 } 1929 1930 /* must be called after PF bars are mapped */ 1931 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1932 int num_vfs_param) 1933 { 1934 int err, i; 1935 struct bnx2x_sriov *iov; 1936 struct pci_dev *dev = bp->pdev; 1937 1938 bp->vfdb = NULL; 1939 1940 /* verify is pf */ 1941 if (IS_VF(bp)) 1942 return 0; 1943 1944 /* verify sriov capability is present in configuration space */ 1945 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) 1946 return 0; 1947 1948 /* verify chip revision */ 1949 if (CHIP_IS_E1x(bp)) 1950 return 0; 1951 1952 /* check if SRIOV support is turned off */ 1953 if (!num_vfs_param) 1954 return 0; 1955 1956 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ 1957 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { 1958 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", 1959 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); 1960 return 0; 1961 } 1962 1963 /* SRIOV can be enabled only with MSIX */ 1964 if (int_mode_param == BNX2X_INT_MODE_MSI || 1965 int_mode_param == BNX2X_INT_MODE_INTX) { 1966 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); 1967 return 0; 1968 } 1969 1970 err = -EIO; 1971 /* verify ari is enabled */ 1972 if (!bnx2x_ari_enabled(bp->pdev)) { 1973 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n"); 1974 return 0; 1975 } 1976 1977 /* verify igu is in normal mode */ 1978 if (CHIP_INT_MODE_IS_BC(bp)) { 1979 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 1980 return 0; 1981 } 1982 1983 /* allocate the vfs database */ 1984 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); 1985 if (!bp->vfdb) { 1986 BNX2X_ERR("failed to allocate vf database\n"); 1987 err = -ENOMEM; 1988 goto failed; 1989 } 1990 1991 /* get the sriov info - Linux already collected all the pertinent 1992 * information, however the sriov structure is for the private use 1993 * of the pci module. Also we want this information regardless 1994 * of the hyper-visor. 1995 */ 1996 iov = &(bp->vfdb->sriov); 1997 err = bnx2x_sriov_info(bp, iov); 1998 if (err) 1999 goto failed; 2000 2001 /* SR-IOV capability was enabled but there are no VFs*/ 2002 if (iov->total == 0) 2003 goto failed; 2004 2005 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); 2006 2007 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n", 2008 num_vfs_param, iov->nr_virtfn); 2009 2010 /* allocate the vf array */ 2011 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 2012 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); 2013 if (!bp->vfdb->vfs) { 2014 BNX2X_ERR("failed to allocate vf array\n"); 2015 err = -ENOMEM; 2016 goto failed; 2017 } 2018 2019 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ 2020 for_each_vf(bp, i) { 2021 bnx2x_vf(bp, i, index) = i; 2022 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; 2023 bnx2x_vf(bp, i, state) = VF_FREE; 2024 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head)); 2025 mutex_init(&bnx2x_vf(bp, i, op_mutex)); 2026 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; 2027 } 2028 2029 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 2030 bnx2x_get_vf_igu_cam_info(bp); 2031 2032 /* allocate the queue arrays for all VFs */ 2033 bp->vfdb->vfqs = kzalloc( 2034 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), 2035 GFP_KERNEL); 2036 2037 DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs); 2038 2039 if (!bp->vfdb->vfqs) { 2040 BNX2X_ERR("failed to allocate vf queue array\n"); 2041 err = -ENOMEM; 2042 goto failed; 2043 } 2044 2045 return 0; 2046 failed: 2047 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 2048 __bnx2x_iov_free_vfdb(bp); 2049 return err; 2050 } 2051 2052 void bnx2x_iov_remove_one(struct bnx2x *bp) 2053 { 2054 int vf_idx; 2055 2056 /* if SRIOV is not enabled there's nothing to do */ 2057 if (!IS_SRIOV(bp)) 2058 return; 2059 2060 DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); 2061 pci_disable_sriov(bp->pdev); 2062 DP(BNX2X_MSG_IOV, "sriov disabled\n"); 2063 2064 /* disable access to all VFs */ 2065 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { 2066 bnx2x_pretend_func(bp, 2067 HW_VF_HANDLE(bp, 2068 bp->vfdb->sriov.first_vf_in_pf + 2069 vf_idx)); 2070 DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n", 2071 bp->vfdb->sriov.first_vf_in_pf + vf_idx); 2072 bnx2x_vf_enable_internal(bp, 0); 2073 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2074 } 2075 2076 /* free vf database */ 2077 __bnx2x_iov_free_vfdb(bp); 2078 } 2079 2080 void bnx2x_iov_free_mem(struct bnx2x *bp) 2081 { 2082 int i; 2083 2084 if (!IS_SRIOV(bp)) 2085 return; 2086 2087 /* free vfs hw contexts */ 2088 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2089 struct hw_dma *cxt = &bp->vfdb->context[i]; 2090 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); 2091 } 2092 2093 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, 2094 BP_VFDB(bp)->sp_dma.mapping, 2095 BP_VFDB(bp)->sp_dma.size); 2096 2097 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, 2098 BP_VF_MBX_DMA(bp)->mapping, 2099 BP_VF_MBX_DMA(bp)->size); 2100 2101 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, 2102 BP_VF_BULLETIN_DMA(bp)->mapping, 2103 BP_VF_BULLETIN_DMA(bp)->size); 2104 } 2105 2106 int bnx2x_iov_alloc_mem(struct bnx2x *bp) 2107 { 2108 size_t tot_size; 2109 int i, rc = 0; 2110 2111 if (!IS_SRIOV(bp)) 2112 return rc; 2113 2114 /* allocate vfs hw contexts */ 2115 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * 2116 BNX2X_CIDS_PER_VF * sizeof(union cdu_context); 2117 2118 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2119 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); 2120 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); 2121 2122 if (cxt->size) { 2123 BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size); 2124 } else { 2125 cxt->addr = NULL; 2126 cxt->mapping = 0; 2127 } 2128 tot_size -= cxt->size; 2129 } 2130 2131 /* allocate vfs ramrods dma memory - client_init and set_mac */ 2132 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); 2133 BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping, 2134 tot_size); 2135 BP_VFDB(bp)->sp_dma.size = tot_size; 2136 2137 /* allocate mailboxes */ 2138 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; 2139 BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping, 2140 tot_size); 2141 BP_VF_MBX_DMA(bp)->size = tot_size; 2142 2143 /* allocate local bulletin boards */ 2144 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; 2145 BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr, 2146 &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size); 2147 BP_VF_BULLETIN_DMA(bp)->size = tot_size; 2148 2149 return 0; 2150 2151 alloc_mem_err: 2152 return -ENOMEM; 2153 } 2154 2155 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 2156 struct bnx2x_vf_queue *q) 2157 { 2158 u8 cl_id = vfq_cl_id(vf, q); 2159 u8 func_id = FW_VF_HANDLE(vf->abs_vfid); 2160 unsigned long q_type = 0; 2161 2162 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 2163 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 2164 2165 /* Queue State object */ 2166 bnx2x_init_queue_obj(bp, &q->sp_obj, 2167 cl_id, &q->cid, 1, func_id, 2168 bnx2x_vf_sp(bp, vf, q_data), 2169 bnx2x_vf_sp_map(bp, vf, q_data), 2170 q_type); 2171 2172 /* sp indication is set only when vlan/mac/etc. are initialized */ 2173 q->sp_initialized = false; 2174 2175 DP(BNX2X_MSG_IOV, 2176 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", 2177 vf->abs_vfid, q->sp_obj.func_id, q->cid); 2178 } 2179 2180 /* called by bnx2x_nic_load */ 2181 int bnx2x_iov_nic_init(struct bnx2x *bp) 2182 { 2183 int vfid; 2184 2185 if (!IS_SRIOV(bp)) { 2186 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 2187 return 0; 2188 } 2189 2190 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); 2191 2192 /* let FLR complete ... */ 2193 msleep(100); 2194 2195 /* initialize vf database */ 2196 for_each_vf(bp, vfid) { 2197 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 2198 2199 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * 2200 BNX2X_CIDS_PER_VF; 2201 2202 union cdu_context *base_cxt = (union cdu_context *) 2203 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2204 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2205 2206 DP(BNX2X_MSG_IOV, 2207 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", 2208 vf->abs_vfid, vf_sb_count(vf), base_vf_cid, 2209 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 2210 2211 /* init statically provisioned resources */ 2212 bnx2x_iov_static_resc(bp, vf); 2213 2214 /* queues are initialized during VF-ACQUIRE */ 2215 2216 /* reserve the vf vlan credit */ 2217 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf)); 2218 2219 vf->filter_state = 0; 2220 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 2221 2222 /* init mcast object - This object will be re-initialized 2223 * during VF-ACQUIRE with the proper cl_id and cid. 2224 * It needs to be initialized here so that it can be safely 2225 * handled by a subsequent FLR flow. 2226 */ 2227 vf->mcast_list_len = 0; 2228 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, 2229 0xFF, 0xFF, 0xFF, 2230 bnx2x_vf_sp(bp, vf, mcast_rdata), 2231 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 2232 BNX2X_FILTER_MCAST_PENDING, 2233 &vf->filter_state, 2234 BNX2X_OBJ_TYPE_RX_TX); 2235 2236 /* set the mailbox message addresses */ 2237 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) 2238 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * 2239 MBX_MSG_ALIGNED_SIZE); 2240 2241 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + 2242 vfid * MBX_MSG_ALIGNED_SIZE; 2243 2244 /* Enable vf mailbox */ 2245 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 2246 } 2247 2248 /* Final VF init */ 2249 for_each_vf(bp, vfid) { 2250 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 2251 2252 /* fill in the BDF and bars */ 2253 vf->bus = bnx2x_vf_bus(bp, vfid); 2254 vf->devfn = bnx2x_vf_devfn(bp, vfid); 2255 bnx2x_vf_set_bars(bp, vf); 2256 2257 DP(BNX2X_MSG_IOV, 2258 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", 2259 vf->abs_vfid, vf->bus, vf->devfn, 2260 (unsigned)vf->bars[0].bar, vf->bars[0].size, 2261 (unsigned)vf->bars[1].bar, vf->bars[1].size, 2262 (unsigned)vf->bars[2].bar, vf->bars[2].size); 2263 } 2264 2265 return 0; 2266 } 2267 2268 /* called by bnx2x_chip_cleanup */ 2269 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) 2270 { 2271 int i; 2272 2273 if (!IS_SRIOV(bp)) 2274 return 0; 2275 2276 /* release all the VFs */ 2277 for_each_vf(bp, i) 2278 bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */ 2279 2280 return 0; 2281 } 2282 2283 /* called by bnx2x_init_hw_func, returns the next ilt line */ 2284 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) 2285 { 2286 int i; 2287 struct bnx2x_ilt *ilt = BP_ILT(bp); 2288 2289 if (!IS_SRIOV(bp)) 2290 return line; 2291 2292 /* set vfs ilt lines */ 2293 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2294 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); 2295 2296 ilt->lines[line+i].page = hw_cxt->addr; 2297 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 2298 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 2299 } 2300 return line + i; 2301 } 2302 2303 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) 2304 { 2305 return ((cid >= BNX2X_FIRST_VF_CID) && 2306 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); 2307 } 2308 2309 static 2310 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, 2311 struct bnx2x_vf_queue *vfq, 2312 union event_ring_elem *elem) 2313 { 2314 unsigned long ramrod_flags = 0; 2315 int rc = 0; 2316 2317 /* Always push next commands out, don't wait here */ 2318 set_bit(RAMROD_CONT, &ramrod_flags); 2319 2320 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 2321 case BNX2X_FILTER_MAC_PENDING: 2322 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, 2323 &ramrod_flags); 2324 break; 2325 case BNX2X_FILTER_VLAN_PENDING: 2326 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, 2327 &ramrod_flags); 2328 break; 2329 default: 2330 BNX2X_ERR("Unsupported classification command: %d\n", 2331 elem->message.data.eth_event.echo); 2332 return; 2333 } 2334 if (rc < 0) 2335 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 2336 else if (rc > 0) 2337 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); 2338 } 2339 2340 static 2341 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, 2342 struct bnx2x_virtf *vf) 2343 { 2344 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 2345 int rc; 2346 2347 rparam.mcast_obj = &vf->mcast_obj; 2348 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); 2349 2350 /* If there are pending mcast commands - send them */ 2351 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { 2352 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 2353 if (rc < 0) 2354 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 2355 rc); 2356 } 2357 } 2358 2359 static 2360 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, 2361 struct bnx2x_virtf *vf) 2362 { 2363 smp_mb__before_clear_bit(); 2364 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 2365 smp_mb__after_clear_bit(); 2366 } 2367 2368 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) 2369 { 2370 struct bnx2x_virtf *vf; 2371 int qidx = 0, abs_vfid; 2372 u8 opcode; 2373 u16 cid = 0xffff; 2374 2375 if (!IS_SRIOV(bp)) 2376 return 1; 2377 2378 /* first get the cid - the only events we handle here are cfc-delete 2379 * and set-mac completion 2380 */ 2381 opcode = elem->message.opcode; 2382 2383 switch (opcode) { 2384 case EVENT_RING_OPCODE_CFC_DEL: 2385 cid = SW_CID((__force __le32) 2386 elem->message.data.cfc_del_event.cid); 2387 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); 2388 break; 2389 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 2390 case EVENT_RING_OPCODE_MULTICAST_RULES: 2391 case EVENT_RING_OPCODE_FILTERS_RULES: 2392 cid = (elem->message.data.eth_event.echo & 2393 BNX2X_SWCID_MASK); 2394 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 2395 break; 2396 case EVENT_RING_OPCODE_VF_FLR: 2397 abs_vfid = elem->message.data.vf_flr_event.vf_id; 2398 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", 2399 abs_vfid); 2400 goto get_vf; 2401 case EVENT_RING_OPCODE_MALICIOUS_VF: 2402 abs_vfid = elem->message.data.malicious_vf_event.vf_id; 2403 BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n", 2404 abs_vfid, 2405 elem->message.data.malicious_vf_event.err_id); 2406 goto get_vf; 2407 default: 2408 return 1; 2409 } 2410 2411 /* check if the cid is the VF range */ 2412 if (!bnx2x_iov_is_vf_cid(bp, cid)) { 2413 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); 2414 return 1; 2415 } 2416 2417 /* extract vf and rxq index from vf_cid - relies on the following: 2418 * 1. vfid on cid reflects the true abs_vfid 2419 * 2. The max number of VFs (per path) is 64 2420 */ 2421 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); 2422 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2423 get_vf: 2424 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 2425 2426 if (!vf) { 2427 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", 2428 cid, abs_vfid); 2429 return 0; 2430 } 2431 2432 switch (opcode) { 2433 case EVENT_RING_OPCODE_CFC_DEL: 2434 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", 2435 vf->abs_vfid, qidx); 2436 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, 2437 &vfq_get(vf, 2438 qidx)->sp_obj, 2439 BNX2X_Q_CMD_CFC_DEL); 2440 break; 2441 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 2442 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", 2443 vf->abs_vfid, qidx); 2444 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); 2445 break; 2446 case EVENT_RING_OPCODE_MULTICAST_RULES: 2447 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", 2448 vf->abs_vfid, qidx); 2449 bnx2x_vf_handle_mcast_eqe(bp, vf); 2450 break; 2451 case EVENT_RING_OPCODE_FILTERS_RULES: 2452 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", 2453 vf->abs_vfid, qidx); 2454 bnx2x_vf_handle_filters_eqe(bp, vf); 2455 break; 2456 case EVENT_RING_OPCODE_VF_FLR: 2457 case EVENT_RING_OPCODE_MALICIOUS_VF: 2458 /* Do nothing for now */ 2459 return 0; 2460 } 2461 /* SRIOV: reschedule any 'in_progress' operations */ 2462 bnx2x_iov_sp_event(bp, cid, false); 2463 2464 return 0; 2465 } 2466 2467 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) 2468 { 2469 /* extract the vf from vf_cid - relies on the following: 2470 * 1. vfid on cid reflects the true abs_vfid 2471 * 2. The max number of VFs (per path) is 64 2472 */ 2473 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2474 return bnx2x_vf_by_abs_fid(bp, abs_vfid); 2475 } 2476 2477 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 2478 struct bnx2x_queue_sp_obj **q_obj) 2479 { 2480 struct bnx2x_virtf *vf; 2481 2482 if (!IS_SRIOV(bp)) 2483 return; 2484 2485 vf = bnx2x_vf_by_cid(bp, vf_cid); 2486 2487 if (vf) { 2488 /* extract queue index from vf_cid - relies on the following: 2489 * 1. vfid on cid reflects the true abs_vfid 2490 * 2. The max number of VFs (per path) is 64 2491 */ 2492 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); 2493 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); 2494 } else { 2495 BNX2X_ERR("No vf matching cid %d\n", vf_cid); 2496 } 2497 } 2498 2499 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work) 2500 { 2501 struct bnx2x_virtf *vf; 2502 2503 /* check if the cid is the VF range */ 2504 if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid)) 2505 return; 2506 2507 vf = bnx2x_vf_by_cid(bp, vf_cid); 2508 if (vf) { 2509 /* set in_progress flag */ 2510 atomic_set(&vf->op_in_progress, 1); 2511 if (queue_work) 2512 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 2513 } 2514 } 2515 2516 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) 2517 { 2518 int i; 2519 int first_queue_query_index, num_queues_req; 2520 dma_addr_t cur_data_offset; 2521 struct stats_query_entry *cur_query_entry; 2522 u8 stats_count = 0; 2523 bool is_fcoe = false; 2524 2525 if (!IS_SRIOV(bp)) 2526 return; 2527 2528 if (!NO_FCOE(bp)) 2529 is_fcoe = true; 2530 2531 /* fcoe adds one global request and one queue request */ 2532 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; 2533 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 2534 (is_fcoe ? 0 : 1); 2535 2536 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), 2537 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", 2538 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, 2539 first_queue_query_index + num_queues_req); 2540 2541 cur_data_offset = bp->fw_stats_data_mapping + 2542 offsetof(struct bnx2x_fw_stats_data, queue_stats) + 2543 num_queues_req * sizeof(struct per_queue_stats); 2544 2545 cur_query_entry = &bp->fw_stats_req-> 2546 query[first_queue_query_index + num_queues_req]; 2547 2548 for_each_vf(bp, i) { 2549 int j; 2550 struct bnx2x_virtf *vf = BP_VF(bp, i); 2551 2552 if (vf->state != VF_ENABLED) { 2553 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), 2554 "vf %d not enabled so no stats for it\n", 2555 vf->abs_vfid); 2556 continue; 2557 } 2558 2559 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); 2560 for_each_vfq(vf, j) { 2561 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 2562 2563 dma_addr_t q_stats_addr = 2564 vf->fw_stat_map + j * vf->stats_stride; 2565 2566 /* collect stats fro active queues only */ 2567 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 2568 BNX2X_Q_LOGICAL_STATE_STOPPED) 2569 continue; 2570 2571 /* create stats query entry for this queue */ 2572 cur_query_entry->kind = STATS_TYPE_QUEUE; 2573 cur_query_entry->index = vfq_stat_id(vf, rxq); 2574 cur_query_entry->funcID = 2575 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 2576 cur_query_entry->address.hi = 2577 cpu_to_le32(U64_HI(q_stats_addr)); 2578 cur_query_entry->address.lo = 2579 cpu_to_le32(U64_LO(q_stats_addr)); 2580 DP(BNX2X_MSG_IOV, 2581 "added address %x %x for vf %d queue %d client %d\n", 2582 cur_query_entry->address.hi, 2583 cur_query_entry->address.lo, cur_query_entry->funcID, 2584 j, cur_query_entry->index); 2585 cur_query_entry++; 2586 cur_data_offset += sizeof(struct per_queue_stats); 2587 stats_count++; 2588 2589 /* all stats are coalesced to the leading queue */ 2590 if (vf->cfg_flags & VF_CFG_STATS_COALESCE) 2591 break; 2592 } 2593 } 2594 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 2595 } 2596 2597 void bnx2x_iov_sp_task(struct bnx2x *bp) 2598 { 2599 int i; 2600 2601 if (!IS_SRIOV(bp)) 2602 return; 2603 /* Iterate over all VFs and invoke state transition for VFs with 2604 * 'in-progress' slow-path operations 2605 */ 2606 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_SP), 2607 "searching for pending vf operations\n"); 2608 for_each_vf(bp, i) { 2609 struct bnx2x_virtf *vf = BP_VF(bp, i); 2610 2611 if (!vf) { 2612 BNX2X_ERR("VF was null! skipping...\n"); 2613 continue; 2614 } 2615 2616 if (!list_empty(&vf->op_list_head) && 2617 atomic_read(&vf->op_in_progress)) { 2618 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); 2619 bnx2x_vfop_cur(bp, vf)->transition(bp, vf); 2620 } 2621 } 2622 } 2623 2624 static inline 2625 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) 2626 { 2627 int i; 2628 struct bnx2x_virtf *vf = NULL; 2629 2630 for_each_vf(bp, i) { 2631 vf = BP_VF(bp, i); 2632 if (stat_id >= vf->igu_base_id && 2633 stat_id < vf->igu_base_id + vf_sb_count(vf)) 2634 break; 2635 } 2636 return vf; 2637 } 2638 2639 /* VF API helpers */ 2640 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, 2641 u8 enable) 2642 { 2643 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; 2644 u32 val = enable ? (abs_vfid | (1 << 6)) : 0; 2645 2646 REG_WR(bp, reg, val); 2647 } 2648 2649 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) 2650 { 2651 int i; 2652 2653 for_each_vfq(vf, i) 2654 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2655 vfq_qzone_id(vf, vfq_get(vf, i)), false); 2656 } 2657 2658 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) 2659 { 2660 u32 val; 2661 2662 /* clear the VF configuration - pretend */ 2663 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 2664 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 2665 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | 2666 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK); 2667 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 2668 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2669 } 2670 2671 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) 2672 { 2673 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), 2674 BNX2X_VF_MAX_QUEUES); 2675 } 2676 2677 static 2678 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, 2679 struct vf_pf_resc_request *req_resc) 2680 { 2681 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2682 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2683 2684 return ((req_resc->num_rxqs <= rxq_cnt) && 2685 (req_resc->num_txqs <= txq_cnt) && 2686 (req_resc->num_sbs <= vf_sb_count(vf)) && 2687 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 2688 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); 2689 } 2690 2691 /* CORE VF API */ 2692 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 2693 struct vf_pf_resc_request *resc) 2694 { 2695 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * 2696 BNX2X_CIDS_PER_VF; 2697 2698 union cdu_context *base_cxt = (union cdu_context *) 2699 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2700 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2701 int i; 2702 2703 /* if state is 'acquired' the VF was not released or FLR'd, in 2704 * this case the returned resources match the acquired already 2705 * acquired resources. Verify that the requested numbers do 2706 * not exceed the already acquired numbers. 2707 */ 2708 if (vf->state == VF_ACQUIRED) { 2709 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", 2710 vf->abs_vfid); 2711 2712 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2713 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", 2714 vf->abs_vfid); 2715 return -EINVAL; 2716 } 2717 return 0; 2718 } 2719 2720 /* Otherwise vf state must be 'free' or 'reset' */ 2721 if (vf->state != VF_FREE && vf->state != VF_RESET) { 2722 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", 2723 vf->abs_vfid, vf->state); 2724 return -EINVAL; 2725 } 2726 2727 /* static allocation: 2728 * the global maximum number are fixed per VF. Fail the request if 2729 * requested number exceed these globals 2730 */ 2731 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2732 DP(BNX2X_MSG_IOV, 2733 "cannot fulfill vf resource request. Placing maximal available values in response\n"); 2734 /* set the max resource in the vf */ 2735 return -ENOMEM; 2736 } 2737 2738 /* Set resources counters - 0 request means max available */ 2739 vf_sb_count(vf) = resc->num_sbs; 2740 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2741 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2742 if (resc->num_mac_filters) 2743 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 2744 if (resc->num_vlan_filters) 2745 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; 2746 2747 DP(BNX2X_MSG_IOV, 2748 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2749 vf_sb_count(vf), vf_rxq_count(vf), 2750 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2751 vf_vlan_rules_cnt(vf)); 2752 2753 /* Initialize the queues */ 2754 if (!vf->vfqs) { 2755 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); 2756 return -EINVAL; 2757 } 2758 2759 for_each_vfq(vf, i) { 2760 struct bnx2x_vf_queue *q = vfq_get(vf, i); 2761 2762 if (!q) { 2763 BNX2X_ERR("q number %d was not allocated\n", i); 2764 return -EINVAL; 2765 } 2766 2767 q->index = i; 2768 q->cxt = &((base_cxt + i)->eth); 2769 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; 2770 2771 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", 2772 vf->abs_vfid, i, q->index, q->cid, q->cxt); 2773 2774 /* init SP objects */ 2775 bnx2x_vfq_init(bp, vf, q); 2776 } 2777 vf->state = VF_ACQUIRED; 2778 return 0; 2779 } 2780 2781 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) 2782 { 2783 struct bnx2x_func_init_params func_init = {0}; 2784 u16 flags = 0; 2785 int i; 2786 2787 /* the sb resources are initialized at this point, do the 2788 * FW/HW initializations 2789 */ 2790 for_each_vf_sb(vf, i) 2791 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, 2792 vf_igu_sb(vf, i), vf_igu_sb(vf, i)); 2793 2794 /* Sanity checks */ 2795 if (vf->state != VF_ACQUIRED) { 2796 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", 2797 vf->abs_vfid, vf->state); 2798 return -EINVAL; 2799 } 2800 2801 /* let FLR complete ... */ 2802 msleep(100); 2803 2804 /* FLR cleanup epilogue */ 2805 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) 2806 return -EBUSY; 2807 2808 /* reset IGU VF statistics: MSIX */ 2809 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); 2810 2811 /* vf init */ 2812 if (vf->cfg_flags & VF_CFG_STATS) 2813 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); 2814 2815 if (vf->cfg_flags & VF_CFG_TPA) 2816 flags |= FUNC_FLG_TPA; 2817 2818 if (is_vf_multi(vf)) 2819 flags |= FUNC_FLG_RSS; 2820 2821 /* function setup */ 2822 func_init.func_flgs = flags; 2823 func_init.pf_id = BP_FUNC(bp); 2824 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); 2825 func_init.fw_stat_map = vf->fw_stat_map; 2826 func_init.spq_map = vf->spq_map; 2827 func_init.spq_prod = 0; 2828 bnx2x_func_init(bp, &func_init); 2829 2830 /* Enable the vf */ 2831 bnx2x_vf_enable_access(bp, vf->abs_vfid); 2832 bnx2x_vf_enable_traffic(bp, vf); 2833 2834 /* queue protection table */ 2835 for_each_vfq(vf, i) 2836 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2837 vfq_qzone_id(vf, vfq_get(vf, i)), true); 2838 2839 vf->state = VF_ENABLED; 2840 2841 /* update vf bulletin board */ 2842 bnx2x_post_vf_bulletin(bp, vf->index); 2843 2844 return 0; 2845 } 2846 2847 struct set_vf_state_cookie { 2848 struct bnx2x_virtf *vf; 2849 u8 state; 2850 }; 2851 2852 static void bnx2x_set_vf_state(void *cookie) 2853 { 2854 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; 2855 2856 p->vf->state = p->state; 2857 } 2858 2859 /* VFOP close (teardown the queues, delete mcasts and close HW) */ 2860 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) 2861 { 2862 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2863 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; 2864 enum bnx2x_vfop_close_state state = vfop->state; 2865 struct bnx2x_vfop_cmd cmd = { 2866 .done = bnx2x_vfop_close, 2867 .block = false, 2868 }; 2869 2870 if (vfop->rc < 0) 2871 goto op_err; 2872 2873 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 2874 2875 switch (state) { 2876 case BNX2X_VFOP_CLOSE_QUEUES: 2877 2878 if (++(qx->qid) < vf_rxq_count(vf)) { 2879 vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid); 2880 if (vfop->rc) 2881 goto op_err; 2882 return; 2883 } 2884 vfop->state = BNX2X_VFOP_CLOSE_HW; 2885 vfop->rc = 0; 2886 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 2887 2888 case BNX2X_VFOP_CLOSE_HW: 2889 2890 /* disable the interrupts */ 2891 DP(BNX2X_MSG_IOV, "disabling igu\n"); 2892 bnx2x_vf_igu_disable(bp, vf); 2893 2894 /* disable the VF */ 2895 DP(BNX2X_MSG_IOV, "clearing qtbl\n"); 2896 bnx2x_vf_clr_qtbl(bp, vf); 2897 2898 goto op_done; 2899 default: 2900 bnx2x_vfop_default(state); 2901 } 2902 op_err: 2903 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); 2904 op_done: 2905 2906 /* need to make sure there are no outstanding stats ramrods which may 2907 * cause the device to access the VF's stats buffer which it will free 2908 * as soon as we return from the close flow. 2909 */ 2910 { 2911 struct set_vf_state_cookie cookie; 2912 2913 cookie.vf = vf; 2914 cookie.state = VF_ACQUIRED; 2915 bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); 2916 } 2917 2918 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2919 bnx2x_vfop_end(bp, vf, vfop); 2920 op_pending: 2921 /* Not supported at the moment; Exists for macros only */ 2922 return; 2923 } 2924 2925 int bnx2x_vfop_close_cmd(struct bnx2x *bp, 2926 struct bnx2x_virtf *vf, 2927 struct bnx2x_vfop_cmd *cmd) 2928 { 2929 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 2930 if (vfop) { 2931 vfop->args.qx.qid = -1; /* loop */ 2932 bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES, 2933 bnx2x_vfop_close, cmd->done); 2934 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close, 2935 cmd->block); 2936 } 2937 return -ENOMEM; 2938 } 2939 2940 /* VF release can be called either: 1. The VF was acquired but 2941 * not enabled 2. the vf was enabled or in the process of being 2942 * enabled 2943 */ 2944 static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) 2945 { 2946 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2947 struct bnx2x_vfop_cmd cmd = { 2948 .done = bnx2x_vfop_release, 2949 .block = false, 2950 }; 2951 2952 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2953 2954 if (vfop->rc < 0) 2955 goto op_err; 2956 2957 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, 2958 vf->state == VF_FREE ? "Free" : 2959 vf->state == VF_ACQUIRED ? "Acquired" : 2960 vf->state == VF_ENABLED ? "Enabled" : 2961 vf->state == VF_RESET ? "Reset" : 2962 "Unknown"); 2963 2964 switch (vf->state) { 2965 case VF_ENABLED: 2966 vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); 2967 if (vfop->rc) 2968 goto op_err; 2969 return; 2970 2971 case VF_ACQUIRED: 2972 DP(BNX2X_MSG_IOV, "about to free resources\n"); 2973 bnx2x_vf_free_resc(bp, vf); 2974 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2975 goto op_done; 2976 2977 case VF_FREE: 2978 case VF_RESET: 2979 /* do nothing */ 2980 goto op_done; 2981 default: 2982 bnx2x_vfop_default(vf->state); 2983 } 2984 op_err: 2985 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc); 2986 op_done: 2987 bnx2x_vfop_end(bp, vf, vfop); 2988 } 2989 2990 static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf) 2991 { 2992 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2993 enum bnx2x_vfop_rss_state state; 2994 2995 if (!vfop) { 2996 BNX2X_ERR("vfop was null\n"); 2997 return; 2998 } 2999 3000 state = vfop->state; 3001 bnx2x_vfop_reset_wq(vf); 3002 3003 if (vfop->rc < 0) 3004 goto op_err; 3005 3006 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 3007 3008 switch (state) { 3009 case BNX2X_VFOP_RSS_CONFIG: 3010 /* next state */ 3011 vfop->state = BNX2X_VFOP_RSS_DONE; 3012 bnx2x_config_rss(bp, &vfop->op_p->rss); 3013 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 3014 op_err: 3015 BNX2X_ERR("RSS error: rc %d\n", vfop->rc); 3016 op_done: 3017 case BNX2X_VFOP_RSS_DONE: 3018 bnx2x_vfop_end(bp, vf, vfop); 3019 return; 3020 default: 3021 bnx2x_vfop_default(state); 3022 } 3023 op_pending: 3024 return; 3025 } 3026 3027 int bnx2x_vfop_release_cmd(struct bnx2x *bp, 3028 struct bnx2x_virtf *vf, 3029 struct bnx2x_vfop_cmd *cmd) 3030 { 3031 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 3032 if (vfop) { 3033 bnx2x_vfop_opset(-1, /* use vf->state */ 3034 bnx2x_vfop_release, cmd->done); 3035 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release, 3036 cmd->block); 3037 } 3038 return -ENOMEM; 3039 } 3040 3041 int bnx2x_vfop_rss_cmd(struct bnx2x *bp, 3042 struct bnx2x_virtf *vf, 3043 struct bnx2x_vfop_cmd *cmd) 3044 { 3045 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 3046 3047 if (vfop) { 3048 bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss, 3049 cmd->done); 3050 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss, 3051 cmd->block); 3052 } 3053 return -ENOMEM; 3054 } 3055 3056 /* VFOP tpa update, send update on all queues */ 3057 static void bnx2x_vfop_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf) 3058 { 3059 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 3060 struct bnx2x_vfop_args_tpa *tpa_args = &vfop->args.tpa; 3061 enum bnx2x_vfop_tpa_state state = vfop->state; 3062 3063 bnx2x_vfop_reset_wq(vf); 3064 3065 if (vfop->rc < 0) 3066 goto op_err; 3067 3068 DP(BNX2X_MSG_IOV, "vf[%d:%d] STATE: %d\n", 3069 vf->abs_vfid, tpa_args->qid, 3070 state); 3071 3072 switch (state) { 3073 case BNX2X_VFOP_TPA_CONFIG: 3074 3075 if (tpa_args->qid < vf_rxq_count(vf)) { 3076 struct bnx2x_queue_state_params *qstate = 3077 &vf->op_params.qstate; 3078 3079 qstate->q_obj = &bnx2x_vfq(vf, tpa_args->qid, sp_obj); 3080 3081 /* The only thing that changes for the ramrod params 3082 * between calls is the sge_map 3083 */ 3084 qstate->params.update_tpa.sge_map = 3085 tpa_args->sge_map[tpa_args->qid]; 3086 3087 DP(BNX2X_MSG_IOV, "sge_addr[%d] %08x:%08x\n", 3088 tpa_args->qid, 3089 U64_HI(qstate->params.update_tpa.sge_map), 3090 U64_LO(qstate->params.update_tpa.sge_map)); 3091 qstate->cmd = BNX2X_Q_CMD_UPDATE_TPA; 3092 vfop->rc = bnx2x_queue_state_change(bp, qstate); 3093 3094 tpa_args->qid++; 3095 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 3096 } 3097 vfop->state = BNX2X_VFOP_TPA_DONE; 3098 vfop->rc = 0; 3099 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 3100 op_err: 3101 BNX2X_ERR("TPA update error: rc %d\n", vfop->rc); 3102 op_done: 3103 case BNX2X_VFOP_TPA_DONE: 3104 bnx2x_vfop_end(bp, vf, vfop); 3105 return; 3106 default: 3107 bnx2x_vfop_default(state); 3108 } 3109 op_pending: 3110 return; 3111 } 3112 3113 int bnx2x_vfop_tpa_cmd(struct bnx2x *bp, 3114 struct bnx2x_virtf *vf, 3115 struct bnx2x_vfop_cmd *cmd, 3116 struct vfpf_tpa_tlv *tpa_tlv) 3117 { 3118 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 3119 3120 if (vfop) { 3121 vfop->args.qx.qid = 0; /* loop */ 3122 memcpy(&vfop->args.tpa.sge_map, 3123 tpa_tlv->tpa_client_info.sge_addr, 3124 sizeof(vfop->args.tpa.sge_map)); 3125 bnx2x_vfop_opset(BNX2X_VFOP_TPA_CONFIG, 3126 bnx2x_vfop_tpa, cmd->done); 3127 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_tpa, 3128 cmd->block); 3129 } 3130 return -ENOMEM; 3131 } 3132 3133 /* VF release ~ VF close + VF release-resources 3134 * Release is the ultimate SW shutdown and is called whenever an 3135 * irrecoverable error is encountered. 3136 */ 3137 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) 3138 { 3139 struct bnx2x_vfop_cmd cmd = { 3140 .done = NULL, 3141 .block = block, 3142 }; 3143 int rc; 3144 3145 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); 3146 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 3147 3148 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); 3149 if (rc) 3150 WARN(rc, 3151 "VF[%d] Failed to allocate resources for release op- rc=%d\n", 3152 vf->abs_vfid, rc); 3153 } 3154 3155 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, 3156 struct bnx2x_virtf *vf, u32 *sbdf) 3157 { 3158 *sbdf = vf->devfn | (vf->bus << 8); 3159 } 3160 3161 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3162 enum channel_tlvs tlv) 3163 { 3164 /* we don't lock the channel for unsupported tlvs */ 3165 if (!bnx2x_tlv_supported(tlv)) { 3166 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n"); 3167 return; 3168 } 3169 3170 /* lock the channel */ 3171 mutex_lock(&vf->op_mutex); 3172 3173 /* record the locking op */ 3174 vf->op_current = tlv; 3175 3176 /* log the lock */ 3177 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", 3178 vf->abs_vfid, tlv); 3179 } 3180 3181 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3182 enum channel_tlvs expected_tlv) 3183 { 3184 enum channel_tlvs current_tlv; 3185 3186 if (!vf) { 3187 BNX2X_ERR("VF was %p\n", vf); 3188 return; 3189 } 3190 3191 current_tlv = vf->op_current; 3192 3193 /* we don't unlock the channel for unsupported tlvs */ 3194 if (!bnx2x_tlv_supported(expected_tlv)) 3195 return; 3196 3197 WARN(expected_tlv != vf->op_current, 3198 "lock mismatch: expected %d found %d", expected_tlv, 3199 vf->op_current); 3200 3201 /* record the locking op */ 3202 vf->op_current = CHANNEL_TLV_NONE; 3203 3204 /* lock the channel */ 3205 mutex_unlock(&vf->op_mutex); 3206 3207 /* log the unlock */ 3208 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 3209 vf->abs_vfid, vf->op_current); 3210 } 3211 3212 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) 3213 { 3214 struct bnx2x_queue_state_params q_params; 3215 u32 prev_flags; 3216 int i, rc; 3217 3218 /* Verify changes are needed and record current Tx switching state */ 3219 prev_flags = bp->flags; 3220 if (enable) 3221 bp->flags |= TX_SWITCHING; 3222 else 3223 bp->flags &= ~TX_SWITCHING; 3224 if (prev_flags == bp->flags) 3225 return 0; 3226 3227 /* Verify state enables the sending of queue ramrods */ 3228 if ((bp->state != BNX2X_STATE_OPEN) || 3229 (bnx2x_get_q_logical_state(bp, 3230 &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) != 3231 BNX2X_Q_LOGICAL_STATE_ACTIVE)) 3232 return 0; 3233 3234 /* send q. update ramrod to configure Tx switching */ 3235 memset(&q_params, 0, sizeof(q_params)); 3236 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3237 q_params.cmd = BNX2X_Q_CMD_UPDATE; 3238 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, 3239 &q_params.params.update.update_flags); 3240 if (enable) 3241 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING, 3242 &q_params.params.update.update_flags); 3243 else 3244 __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING, 3245 &q_params.params.update.update_flags); 3246 3247 /* send the ramrod on all the queues of the PF */ 3248 for_each_eth_queue(bp, i) { 3249 struct bnx2x_fastpath *fp = &bp->fp[i]; 3250 3251 /* Set the appropriate Queue object */ 3252 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 3253 3254 /* Update the Queue state */ 3255 rc = bnx2x_queue_state_change(bp, &q_params); 3256 if (rc) { 3257 BNX2X_ERR("Failed to configure Tx switching\n"); 3258 return rc; 3259 } 3260 } 3261 3262 DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled"); 3263 return 0; 3264 } 3265 3266 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) 3267 { 3268 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 3269 3270 if (!IS_SRIOV(bp)) { 3271 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n"); 3272 return -EINVAL; 3273 } 3274 3275 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 3276 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3277 3278 /* HW channel is only operational when PF is up */ 3279 if (bp->state != BNX2X_STATE_OPEN) { 3280 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n"); 3281 return -EINVAL; 3282 } 3283 3284 /* we are always bound by the total_vfs in the configuration space */ 3285 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { 3286 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n", 3287 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3288 num_vfs_param = BNX2X_NR_VIRTFN(bp); 3289 } 3290 3291 bp->requested_nr_virtfn = num_vfs_param; 3292 if (num_vfs_param == 0) { 3293 bnx2x_set_pf_tx_switching(bp, false); 3294 pci_disable_sriov(dev); 3295 return 0; 3296 } else { 3297 return bnx2x_enable_sriov(bp); 3298 } 3299 } 3300 3301 #define IGU_ENTRY_SIZE 4 3302 3303 int bnx2x_enable_sriov(struct bnx2x *bp) 3304 { 3305 int rc = 0, req_vfs = bp->requested_nr_virtfn; 3306 int vf_idx, sb_idx, vfq_idx, qcount, first_vf; 3307 u32 igu_entry, address; 3308 u16 num_vf_queues; 3309 3310 if (req_vfs == 0) 3311 return 0; 3312 3313 first_vf = bp->vfdb->sriov.first_vf_in_pf; 3314 3315 /* statically distribute vf sb pool between VFs */ 3316 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES, 3317 BP_VFDB(bp)->vf_sbs_pool / req_vfs); 3318 3319 /* zero previous values learned from igu cam */ 3320 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) { 3321 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 3322 3323 vf->sb_count = 0; 3324 vf_sb_count(BP_VF(bp, vf_idx)) = 0; 3325 } 3326 bp->vfdb->vf_sbs_pool = 0; 3327 3328 /* prepare IGU cam */ 3329 sb_idx = BP_VFDB(bp)->first_vf_igu_entry; 3330 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE; 3331 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 3332 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) { 3333 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT | 3334 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT | 3335 IGU_REG_MAPPING_MEMORY_VALID; 3336 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n", 3337 sb_idx, vf_idx); 3338 REG_WR(bp, address, igu_entry); 3339 sb_idx++; 3340 address += IGU_ENTRY_SIZE; 3341 } 3342 } 3343 3344 /* Reinitialize vf database according to igu cam */ 3345 bnx2x_get_vf_igu_cam_info(bp); 3346 3347 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n", 3348 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); 3349 3350 qcount = 0; 3351 for_each_vf(bp, vf_idx) { 3352 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 3353 3354 /* set local queue arrays */ 3355 vf->vfqs = &bp->vfdb->vfqs[qcount]; 3356 qcount += vf_sb_count(vf); 3357 bnx2x_iov_static_resc(bp, vf); 3358 } 3359 3360 /* prepare msix vectors in VF configuration space - the value in the 3361 * PCI configuration space should be the index of the last entry, 3362 * namely one less than the actual size of the table 3363 */ 3364 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 3365 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 3366 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 3367 num_vf_queues - 1); 3368 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", 3369 vf_idx, num_vf_queues - 1); 3370 } 3371 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 3372 3373 /* enable sriov. This will probe all the VFs, and consequentially cause 3374 * the "acquire" messages to appear on the VF PF channel. 3375 */ 3376 DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); 3377 bnx2x_disable_sriov(bp); 3378 3379 rc = bnx2x_set_pf_tx_switching(bp, true); 3380 if (rc) 3381 return rc; 3382 3383 rc = pci_enable_sriov(bp->pdev, req_vfs); 3384 if (rc) { 3385 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 3386 return rc; 3387 } 3388 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); 3389 return req_vfs; 3390 } 3391 3392 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) 3393 { 3394 int vfidx; 3395 struct pf_vf_bulletin_content *bulletin; 3396 3397 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); 3398 for_each_vf(bp, vfidx) { 3399 bulletin = BP_VF_BULLETIN(bp, vfidx); 3400 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) 3401 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); 3402 } 3403 } 3404 3405 void bnx2x_disable_sriov(struct bnx2x *bp) 3406 { 3407 pci_disable_sriov(bp->pdev); 3408 } 3409 3410 static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, 3411 struct bnx2x_virtf **vf, 3412 struct pf_vf_bulletin_content **bulletin) 3413 { 3414 if (bp->state != BNX2X_STATE_OPEN) { 3415 BNX2X_ERR("vf ndo called though PF is down\n"); 3416 return -EINVAL; 3417 } 3418 3419 if (!IS_SRIOV(bp)) { 3420 BNX2X_ERR("vf ndo called though sriov is disabled\n"); 3421 return -EINVAL; 3422 } 3423 3424 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { 3425 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n", 3426 vfidx, BNX2X_NR_VIRTFN(bp)); 3427 return -EINVAL; 3428 } 3429 3430 /* init members */ 3431 *vf = BP_VF(bp, vfidx); 3432 *bulletin = BP_VF_BULLETIN(bp, vfidx); 3433 3434 if (!*vf) { 3435 BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n", 3436 vfidx); 3437 return -EINVAL; 3438 } 3439 3440 if (!(*vf)->vfqs) { 3441 BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n", 3442 vfidx); 3443 return -EINVAL; 3444 } 3445 3446 if (!*bulletin) { 3447 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n", 3448 vfidx); 3449 return -EINVAL; 3450 } 3451 3452 return 0; 3453 } 3454 3455 int bnx2x_get_vf_config(struct net_device *dev, int vfidx, 3456 struct ifla_vf_info *ivi) 3457 { 3458 struct bnx2x *bp = netdev_priv(dev); 3459 struct bnx2x_virtf *vf = NULL; 3460 struct pf_vf_bulletin_content *bulletin = NULL; 3461 struct bnx2x_vlan_mac_obj *mac_obj; 3462 struct bnx2x_vlan_mac_obj *vlan_obj; 3463 int rc; 3464 3465 /* sanity and init */ 3466 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3467 if (rc) 3468 return rc; 3469 mac_obj = &bnx2x_leading_vfq(vf, mac_obj); 3470 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 3471 if (!mac_obj || !vlan_obj) { 3472 BNX2X_ERR("VF partially initialized\n"); 3473 return -EINVAL; 3474 } 3475 3476 ivi->vf = vfidx; 3477 ivi->qos = 0; 3478 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */ 3479 ivi->spoofchk = 1; /*always enabled */ 3480 if (vf->state == VF_ENABLED) { 3481 /* mac and vlan are in vlan_mac objects */ 3482 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) { 3483 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 3484 0, ETH_ALEN); 3485 vlan_obj->get_n_elements(bp, vlan_obj, 1, 3486 (u8 *)&ivi->vlan, 0, 3487 VLAN_HLEN); 3488 } 3489 } else { 3490 /* mac */ 3491 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 3492 /* mac configured by ndo so its in bulletin board */ 3493 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); 3494 else 3495 /* function has not been loaded yet. Show mac as 0s */ 3496 memset(&ivi->mac, 0, ETH_ALEN); 3497 3498 /* vlan */ 3499 if (bulletin->valid_bitmap & (1 << VLAN_VALID)) 3500 /* vlan configured by ndo so its in bulletin board */ 3501 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); 3502 else 3503 /* function has not been loaded yet. Show vlans as 0s */ 3504 memset(&ivi->vlan, 0, VLAN_HLEN); 3505 } 3506 3507 return 0; 3508 } 3509 3510 /* New mac for VF. Consider these cases: 3511 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and 3512 * supply at acquire. 3513 * 2. VF has already been acquired but has not yet initialized - store in local 3514 * bulletin board. mac will be posted on VF bulletin board after VF init. VF 3515 * will configure this mac when it is ready. 3516 * 3. VF has already initialized but has not yet setup a queue - post the new 3517 * mac on VF's bulletin board right now. VF will configure this mac when it 3518 * is ready. 3519 * 4. VF has already set a queue - delete any macs already configured for this 3520 * queue and manually config the new mac. 3521 * In any event, once this function has been called refuse any attempts by the 3522 * VF to configure any mac for itself except for this mac. In case of a race 3523 * where the VF fails to see the new post on its bulletin board before sending a 3524 * mac configuration request, the PF will simply fail the request and VF can try 3525 * again after consulting its bulletin board. 3526 */ 3527 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) 3528 { 3529 struct bnx2x *bp = netdev_priv(dev); 3530 int rc, q_logical_state; 3531 struct bnx2x_virtf *vf = NULL; 3532 struct pf_vf_bulletin_content *bulletin = NULL; 3533 3534 /* sanity and init */ 3535 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3536 if (rc) 3537 return rc; 3538 if (!is_valid_ether_addr(mac)) { 3539 BNX2X_ERR("mac address invalid\n"); 3540 return -EINVAL; 3541 } 3542 3543 /* update PF's copy of the VF's bulletin. Will no longer accept mac 3544 * configuration requests from vf unless match this mac 3545 */ 3546 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; 3547 memcpy(bulletin->mac, mac, ETH_ALEN); 3548 3549 /* Post update on VF's bulletin board */ 3550 rc = bnx2x_post_vf_bulletin(bp, vfidx); 3551 if (rc) { 3552 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); 3553 return rc; 3554 } 3555 3556 q_logical_state = 3557 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); 3558 if (vf->state == VF_ENABLED && 3559 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3560 /* configure the mac in device on this vf's queue */ 3561 unsigned long ramrod_flags = 0; 3562 struct bnx2x_vlan_mac_obj *mac_obj; 3563 3564 /* User should be able to see failure reason in system logs */ 3565 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 3566 return -EINVAL; 3567 3568 /* must lock vfpf channel to protect against vf flows */ 3569 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3570 3571 /* remove existing eth macs */ 3572 mac_obj = &bnx2x_leading_vfq(vf, mac_obj); 3573 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 3574 if (rc) { 3575 BNX2X_ERR("failed to delete eth macs\n"); 3576 rc = -EINVAL; 3577 goto out; 3578 } 3579 3580 /* remove existing uc list macs */ 3581 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); 3582 if (rc) { 3583 BNX2X_ERR("failed to delete uc_list macs\n"); 3584 rc = -EINVAL; 3585 goto out; 3586 } 3587 3588 /* configure the new mac to device */ 3589 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3590 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 3591 BNX2X_ETH_MAC, &ramrod_flags); 3592 3593 out: 3594 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3595 } 3596 3597 return 0; 3598 } 3599 3600 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 3601 { 3602 struct bnx2x_queue_state_params q_params = {NULL}; 3603 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 3604 struct bnx2x_queue_update_params *update_params; 3605 struct pf_vf_bulletin_content *bulletin = NULL; 3606 struct bnx2x_rx_mode_ramrod_params rx_ramrod; 3607 struct bnx2x *bp = netdev_priv(dev); 3608 struct bnx2x_vlan_mac_obj *vlan_obj; 3609 unsigned long vlan_mac_flags = 0; 3610 unsigned long ramrod_flags = 0; 3611 struct bnx2x_virtf *vf = NULL; 3612 unsigned long accept_flags; 3613 int rc; 3614 3615 /* sanity and init */ 3616 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3617 if (rc) 3618 return rc; 3619 3620 if (vlan > 4095) { 3621 BNX2X_ERR("illegal vlan value %d\n", vlan); 3622 return -EINVAL; 3623 } 3624 3625 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", 3626 vfidx, vlan, 0); 3627 3628 /* update PF's copy of the VF's bulletin. No point in posting the vlan 3629 * to the VF since it doesn't have anything to do with it. But it useful 3630 * to store it here in case the VF is not up yet and we can only 3631 * configure the vlan later when it does. Treat vlan id 0 as remove the 3632 * Host tag. 3633 */ 3634 if (vlan > 0) 3635 bulletin->valid_bitmap |= 1 << VLAN_VALID; 3636 else 3637 bulletin->valid_bitmap &= ~(1 << VLAN_VALID); 3638 bulletin->vlan = vlan; 3639 3640 /* is vf initialized and queue set up? */ 3641 if (vf->state != VF_ENABLED || 3642 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != 3643 BNX2X_Q_LOGICAL_STATE_ACTIVE) 3644 return rc; 3645 3646 /* User should be able to see error in system logs */ 3647 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 3648 return -EINVAL; 3649 3650 /* must lock vfpf channel to protect against vf flows */ 3651 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3652 3653 /* remove existing vlans */ 3654 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3655 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 3656 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 3657 &ramrod_flags); 3658 if (rc) { 3659 BNX2X_ERR("failed to delete vlans\n"); 3660 rc = -EINVAL; 3661 goto out; 3662 } 3663 3664 /* need to remove/add the VF's accept_any_vlan bit */ 3665 accept_flags = bnx2x_leading_vfq(vf, accept_flags); 3666 if (vlan) 3667 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 3668 else 3669 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 3670 3671 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, 3672 accept_flags); 3673 bnx2x_leading_vfq(vf, accept_flags) = accept_flags; 3674 bnx2x_config_rx_mode(bp, &rx_ramrod); 3675 3676 /* configure the new vlan to device */ 3677 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3678 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3679 ramrod_param.vlan_mac_obj = vlan_obj; 3680 ramrod_param.ramrod_flags = ramrod_flags; 3681 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 3682 &ramrod_param.user_req.vlan_mac_flags); 3683 ramrod_param.user_req.u.vlan.vlan = vlan; 3684 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 3685 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 3686 if (rc) { 3687 BNX2X_ERR("failed to configure vlan\n"); 3688 rc = -EINVAL; 3689 goto out; 3690 } 3691 3692 /* send queue update ramrod to configure default vlan and silent 3693 * vlan removal 3694 */ 3695 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3696 q_params.cmd = BNX2X_Q_CMD_UPDATE; 3697 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); 3698 update_params = &q_params.params.update; 3699 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 3700 &update_params->update_flags); 3701 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 3702 &update_params->update_flags); 3703 if (vlan == 0) { 3704 /* if vlan is 0 then we want to leave the VF traffic 3705 * untagged, and leave the incoming traffic untouched 3706 * (i.e. do not remove any vlan tags). 3707 */ 3708 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3709 &update_params->update_flags); 3710 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 3711 &update_params->update_flags); 3712 } else { 3713 /* configure default vlan to vf queue and set silent 3714 * vlan removal (the vf remains unaware of this vlan). 3715 */ 3716 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3717 &update_params->update_flags); 3718 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 3719 &update_params->update_flags); 3720 update_params->def_vlan = vlan; 3721 update_params->silent_removal_value = 3722 vlan & VLAN_VID_MASK; 3723 update_params->silent_removal_mask = VLAN_VID_MASK; 3724 } 3725 3726 /* Update the Queue state */ 3727 rc = bnx2x_queue_state_change(bp, &q_params); 3728 if (rc) { 3729 BNX2X_ERR("Failed to configure default VLAN\n"); 3730 goto out; 3731 } 3732 3733 3734 /* clear the flag indicating that this VF needs its vlan 3735 * (will only be set if the HV configured the Vlan before vf was 3736 * up and we were called because the VF came up later 3737 */ 3738 out: 3739 vf->cfg_flags &= ~VF_CFG_VLAN; 3740 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3741 3742 return rc; 3743 } 3744 3745 /* crc is the first field in the bulletin board. Compute the crc over the 3746 * entire bulletin board excluding the crc field itself. Use the length field 3747 * as the Bulletin Board was posted by a PF with possibly a different version 3748 * from the vf which will sample it. Therefore, the length is computed by the 3749 * PF and the used blindly by the VF. 3750 */ 3751 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, 3752 struct pf_vf_bulletin_content *bulletin) 3753 { 3754 return crc32(BULLETIN_CRC_SEED, 3755 ((u8 *)bulletin) + sizeof(bulletin->crc), 3756 bulletin->length - sizeof(bulletin->crc)); 3757 } 3758 3759 /* Check for new posts on the bulletin board */ 3760 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) 3761 { 3762 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; 3763 int attempts; 3764 3765 /* bulletin board hasn't changed since last sample */ 3766 if (bp->old_bulletin.version == bulletin.version) 3767 return PFVF_BULLETIN_UNCHANGED; 3768 3769 /* validate crc of new bulletin board */ 3770 if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) { 3771 /* sampling structure in mid post may result with corrupted data 3772 * validate crc to ensure coherency. 3773 */ 3774 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { 3775 bulletin = bp->pf2vf_bulletin->content; 3776 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, 3777 &bulletin)) 3778 break; 3779 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", 3780 bulletin.crc, 3781 bnx2x_crc_vf_bulletin(bp, &bulletin)); 3782 } 3783 if (attempts >= BULLETIN_ATTEMPTS) { 3784 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", 3785 attempts); 3786 return PFVF_BULLETIN_CRC_ERR; 3787 } 3788 } 3789 3790 /* the mac address in bulletin board is valid and is new */ 3791 if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID && 3792 !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) { 3793 /* update new mac to net device */ 3794 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); 3795 } 3796 3797 /* the vlan in bulletin board is valid and is new */ 3798 if (bulletin.valid_bitmap & 1 << VLAN_VALID) 3799 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN); 3800 3801 /* copy new bulletin board to bp */ 3802 bp->old_bulletin = bulletin; 3803 3804 return PFVF_BULLETIN_UPDATED; 3805 } 3806 3807 void bnx2x_timer_sriov(struct bnx2x *bp) 3808 { 3809 bnx2x_sample_bulletin(bp); 3810 3811 /* if channel is down we need to self destruct */ 3812 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) 3813 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 3814 BNX2X_MSG_IOV); 3815 } 3816 3817 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 3818 { 3819 /* vf doorbells are embedded within the regview */ 3820 return bp->regview + PXP_VF_ADDR_DB_START; 3821 } 3822 3823 int bnx2x_vf_pci_alloc(struct bnx2x *bp) 3824 { 3825 mutex_init(&bp->vf2pf_mutex); 3826 3827 /* allocate vf2pf mailbox for vf to pf channel */ 3828 BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping, 3829 sizeof(struct bnx2x_vf_mbx_msg)); 3830 3831 /* allocate pf 2 vf bulletin board */ 3832 BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping, 3833 sizeof(union pf_vf_bulletin)); 3834 3835 return 0; 3836 3837 alloc_mem_err: 3838 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3839 sizeof(struct bnx2x_vf_mbx_msg)); 3840 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, 3841 sizeof(union pf_vf_bulletin)); 3842 return -ENOMEM; 3843 } 3844 3845 void bnx2x_iov_channel_down(struct bnx2x *bp) 3846 { 3847 int vf_idx; 3848 struct pf_vf_bulletin_content *bulletin; 3849 3850 if (!IS_SRIOV(bp)) 3851 return; 3852 3853 for_each_vf(bp, vf_idx) { 3854 /* locate this VFs bulletin board and update the channel down 3855 * bit 3856 */ 3857 bulletin = BP_VF_BULLETIN(bp, vf_idx); 3858 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN; 3859 3860 /* update vf bulletin board */ 3861 bnx2x_post_vf_bulletin(bp, vf_idx); 3862 } 3863 } 3864