1 /* bnx2x_sriov.c: Broadcom Everest network driver. 2 * 3 * Copyright 2009-2013 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 17 * Ariel Elior <ariele@broadcom.com> 18 * 19 */ 20 #include "bnx2x.h" 21 #include "bnx2x_init.h" 22 #include "bnx2x_cmn.h" 23 #include "bnx2x_sp.h" 24 #include <linux/crc32.h> 25 #include <linux/if_vlan.h> 26 27 /* General service functions */ 28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 29 u16 pf_id) 30 { 31 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 32 pf_id); 33 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 34 pf_id); 35 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 36 pf_id); 37 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 38 pf_id); 39 } 40 41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 42 u8 enable) 43 { 44 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 45 enable); 46 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 47 enable); 48 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 49 enable); 50 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 51 enable); 52 } 53 54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 55 { 56 int idx; 57 58 for_each_vf(bp, idx) 59 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) 60 break; 61 return idx; 62 } 63 64 static 65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 66 { 67 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); 68 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; 69 } 70 71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, 72 u8 igu_sb_id, u8 segment, u16 index, u8 op, 73 u8 update) 74 { 75 /* acking a VF sb through the PF - use the GRC */ 76 u32 ctl; 77 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 78 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 79 u32 func_encode = vf->abs_vfid; 80 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; 81 struct igu_regular cmd_data = {0}; 82 83 cmd_data.sb_id_and_flags = 84 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 85 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 86 (update << IGU_REGULAR_BUPDATE_SHIFT) | 87 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 88 89 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 90 func_encode << IGU_CTRL_REG_FID_SHIFT | 91 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 92 93 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 94 cmd_data.sb_id_and_flags, igu_addr_data); 95 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); 96 mmiowb(); 97 barrier(); 98 99 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 100 ctl, igu_addr_ctl); 101 REG_WR(bp, igu_addr_ctl, ctl); 102 mmiowb(); 103 barrier(); 104 } 105 106 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp, 107 struct bnx2x_virtf *vf, 108 bool print_err) 109 { 110 if (!bnx2x_leading_vfq(vf, sp_initialized)) { 111 if (print_err) 112 BNX2X_ERR("Slowpath objects not yet initialized!\n"); 113 else 114 DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n"); 115 return false; 116 } 117 return true; 118 } 119 120 /* VFOP - VF slow-path operation support */ 121 122 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000 123 124 /* VFOP operations states */ 125 enum bnx2x_vfop_qctor_state { 126 BNX2X_VFOP_QCTOR_INIT, 127 BNX2X_VFOP_QCTOR_SETUP, 128 BNX2X_VFOP_QCTOR_INT_EN 129 }; 130 131 enum bnx2x_vfop_qdtor_state { 132 BNX2X_VFOP_QDTOR_HALT, 133 BNX2X_VFOP_QDTOR_TERMINATE, 134 BNX2X_VFOP_QDTOR_CFCDEL, 135 BNX2X_VFOP_QDTOR_DONE 136 }; 137 138 enum bnx2x_vfop_vlan_mac_state { 139 BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 140 BNX2X_VFOP_VLAN_MAC_CLEAR, 141 BNX2X_VFOP_VLAN_MAC_CHK_DONE, 142 BNX2X_VFOP_MAC_CONFIG_LIST, 143 BNX2X_VFOP_VLAN_CONFIG_LIST, 144 BNX2X_VFOP_VLAN_CONFIG_LIST_0 145 }; 146 147 enum bnx2x_vfop_qsetup_state { 148 BNX2X_VFOP_QSETUP_CTOR, 149 BNX2X_VFOP_QSETUP_VLAN0, 150 BNX2X_VFOP_QSETUP_DONE 151 }; 152 153 enum bnx2x_vfop_mcast_state { 154 BNX2X_VFOP_MCAST_DEL, 155 BNX2X_VFOP_MCAST_ADD, 156 BNX2X_VFOP_MCAST_CHK_DONE 157 }; 158 enum bnx2x_vfop_qflr_state { 159 BNX2X_VFOP_QFLR_CLR_VLAN, 160 BNX2X_VFOP_QFLR_CLR_MAC, 161 BNX2X_VFOP_QFLR_TERMINATE, 162 BNX2X_VFOP_QFLR_DONE 163 }; 164 165 enum bnx2x_vfop_flr_state { 166 BNX2X_VFOP_FLR_QUEUES, 167 BNX2X_VFOP_FLR_HW 168 }; 169 170 enum bnx2x_vfop_close_state { 171 BNX2X_VFOP_CLOSE_QUEUES, 172 BNX2X_VFOP_CLOSE_HW 173 }; 174 175 enum bnx2x_vfop_rxmode_state { 176 BNX2X_VFOP_RXMODE_CONFIG, 177 BNX2X_VFOP_RXMODE_DONE 178 }; 179 180 enum bnx2x_vfop_qteardown_state { 181 BNX2X_VFOP_QTEARDOWN_RXMODE, 182 BNX2X_VFOP_QTEARDOWN_CLR_VLAN, 183 BNX2X_VFOP_QTEARDOWN_CLR_MAC, 184 BNX2X_VFOP_QTEARDOWN_CLR_MCAST, 185 BNX2X_VFOP_QTEARDOWN_QDTOR, 186 BNX2X_VFOP_QTEARDOWN_DONE 187 }; 188 189 enum bnx2x_vfop_rss_state { 190 BNX2X_VFOP_RSS_CONFIG, 191 BNX2X_VFOP_RSS_DONE 192 }; 193 194 enum bnx2x_vfop_tpa_state { 195 BNX2X_VFOP_TPA_CONFIG, 196 BNX2X_VFOP_TPA_DONE 197 }; 198 199 #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) 200 201 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 202 struct bnx2x_queue_init_params *init_params, 203 struct bnx2x_queue_setup_params *setup_params, 204 u16 q_idx, u16 sb_idx) 205 { 206 DP(BNX2X_MSG_IOV, 207 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", 208 vf->abs_vfid, 209 q_idx, 210 sb_idx, 211 init_params->tx.sb_cq_index, 212 init_params->tx.hc_rate, 213 setup_params->flags, 214 setup_params->txq_params.traffic_type); 215 } 216 217 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 218 struct bnx2x_queue_init_params *init_params, 219 struct bnx2x_queue_setup_params *setup_params, 220 u16 q_idx, u16 sb_idx) 221 { 222 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; 223 224 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" 225 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", 226 vf->abs_vfid, 227 q_idx, 228 sb_idx, 229 init_params->rx.sb_cq_index, 230 init_params->rx.hc_rate, 231 setup_params->gen_params.mtu, 232 rxq_params->buf_sz, 233 rxq_params->sge_buf_sz, 234 rxq_params->max_sges_pkt, 235 rxq_params->tpa_agg_sz, 236 setup_params->flags, 237 rxq_params->drop_flags, 238 rxq_params->cache_line_log); 239 } 240 241 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 242 struct bnx2x_virtf *vf, 243 struct bnx2x_vf_queue *q, 244 struct bnx2x_vfop_qctor_params *p, 245 unsigned long q_type) 246 { 247 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; 248 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; 249 250 /* INIT */ 251 252 /* Enable host coalescing in the transition to INIT state */ 253 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) 254 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); 255 256 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) 257 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); 258 259 /* FW SB ID */ 260 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 261 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 262 263 /* context */ 264 init_p->cxts[0] = q->cxt; 265 266 /* SETUP */ 267 268 /* Setup-op general parameters */ 269 setup_p->gen_params.spcl_id = vf->sp_cl_id; 270 setup_p->gen_params.stat_id = vfq_stat_id(vf, q); 271 272 /* Setup-op pause params: 273 * Nothing to do, the pause thresholds are set by default to 0 which 274 * effectively turns off the feature for this queue. We don't want 275 * one queue (VF) to interfering with another queue (another VF) 276 */ 277 if (vf->cfg_flags & VF_CFG_FW_FC) 278 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", 279 vf->abs_vfid); 280 /* Setup-op flags: 281 * collect statistics, zero statistics, local-switching, security, 282 * OV for Flex10, RSS and MCAST for leading 283 */ 284 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) 285 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); 286 287 /* for VFs, enable tx switching, bd coherency, and mac address 288 * anti-spoofing 289 */ 290 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); 291 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 292 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 293 294 /* Setup-op rx parameters */ 295 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 296 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 297 298 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); 299 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); 300 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); 301 302 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) 303 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; 304 } 305 306 /* Setup-op tx parameters */ 307 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { 308 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; 309 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 310 } 311 } 312 313 /* VFOP queue construction */ 314 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf) 315 { 316 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 317 struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor; 318 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 319 enum bnx2x_vfop_qctor_state state = vfop->state; 320 321 bnx2x_vfop_reset_wq(vf); 322 323 if (vfop->rc < 0) 324 goto op_err; 325 326 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 327 328 switch (state) { 329 case BNX2X_VFOP_QCTOR_INIT: 330 331 /* has this queue already been opened? */ 332 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 333 BNX2X_Q_LOGICAL_STATE_ACTIVE) { 334 DP(BNX2X_MSG_IOV, 335 "Entered qctor but queue was already up. Aborting gracefully\n"); 336 goto op_done; 337 } 338 339 /* next state */ 340 vfop->state = BNX2X_VFOP_QCTOR_SETUP; 341 342 q_params->cmd = BNX2X_Q_CMD_INIT; 343 vfop->rc = bnx2x_queue_state_change(bp, q_params); 344 345 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 346 347 case BNX2X_VFOP_QCTOR_SETUP: 348 /* next state */ 349 vfop->state = BNX2X_VFOP_QCTOR_INT_EN; 350 351 /* copy pre-prepared setup params to the queue-state params */ 352 vfop->op_p->qctor.qstate.params.setup = 353 vfop->op_p->qctor.prep_qsetup; 354 355 q_params->cmd = BNX2X_Q_CMD_SETUP; 356 vfop->rc = bnx2x_queue_state_change(bp, q_params); 357 358 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 359 360 case BNX2X_VFOP_QCTOR_INT_EN: 361 362 /* enable interrupts */ 363 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx), 364 USTORM_ID, 0, IGU_INT_ENABLE, 0); 365 goto op_done; 366 default: 367 bnx2x_vfop_default(state); 368 } 369 op_err: 370 BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n", 371 vf->abs_vfid, args->qid, q_params->cmd, vfop->rc); 372 op_done: 373 bnx2x_vfop_end(bp, vf, vfop); 374 op_pending: 375 return; 376 } 377 378 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp, 379 struct bnx2x_virtf *vf, 380 struct bnx2x_vfop_cmd *cmd, 381 int qid) 382 { 383 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 384 385 if (vfop) { 386 vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 387 388 vfop->args.qctor.qid = qid; 389 vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx); 390 391 bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT, 392 bnx2x_vfop_qctor, cmd->done); 393 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor, 394 cmd->block); 395 } 396 return -ENOMEM; 397 } 398 399 /* VFOP queue destruction */ 400 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) 401 { 402 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 403 struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor; 404 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 405 enum bnx2x_vfop_qdtor_state state = vfop->state; 406 407 bnx2x_vfop_reset_wq(vf); 408 409 if (vfop->rc < 0) 410 goto op_err; 411 412 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 413 414 switch (state) { 415 case BNX2X_VFOP_QDTOR_HALT: 416 417 /* has this queue already been stopped? */ 418 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 419 BNX2X_Q_LOGICAL_STATE_STOPPED) { 420 DP(BNX2X_MSG_IOV, 421 "Entered qdtor but queue was already stopped. Aborting gracefully\n"); 422 423 /* next state */ 424 vfop->state = BNX2X_VFOP_QDTOR_DONE; 425 426 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 427 } 428 429 /* next state */ 430 vfop->state = BNX2X_VFOP_QDTOR_TERMINATE; 431 432 q_params->cmd = BNX2X_Q_CMD_HALT; 433 vfop->rc = bnx2x_queue_state_change(bp, q_params); 434 435 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 436 437 case BNX2X_VFOP_QDTOR_TERMINATE: 438 /* next state */ 439 vfop->state = BNX2X_VFOP_QDTOR_CFCDEL; 440 441 q_params->cmd = BNX2X_Q_CMD_TERMINATE; 442 vfop->rc = bnx2x_queue_state_change(bp, q_params); 443 444 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 445 446 case BNX2X_VFOP_QDTOR_CFCDEL: 447 /* next state */ 448 vfop->state = BNX2X_VFOP_QDTOR_DONE; 449 450 q_params->cmd = BNX2X_Q_CMD_CFC_DEL; 451 vfop->rc = bnx2x_queue_state_change(bp, q_params); 452 453 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 454 op_err: 455 BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n", 456 vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc); 457 op_done: 458 case BNX2X_VFOP_QDTOR_DONE: 459 /* invalidate the context */ 460 if (qdtor->cxt) { 461 qdtor->cxt->ustorm_ag_context.cdu_usage = 0; 462 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; 463 } 464 bnx2x_vfop_end(bp, vf, vfop); 465 return; 466 default: 467 bnx2x_vfop_default(state); 468 } 469 op_pending: 470 return; 471 } 472 473 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, 474 struct bnx2x_virtf *vf, 475 struct bnx2x_vfop_cmd *cmd, 476 int qid) 477 { 478 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 479 480 if (vfop) { 481 struct bnx2x_queue_state_params *qstate = 482 &vf->op_params.qctor.qstate; 483 484 memset(qstate, 0, sizeof(*qstate)); 485 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 486 487 vfop->args.qdtor.qid = qid; 488 vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt); 489 490 bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT, 491 bnx2x_vfop_qdtor, cmd->done); 492 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, 493 cmd->block); 494 } else { 495 BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid); 496 return -ENOMEM; 497 } 498 } 499 500 static void 501 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) 502 { 503 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 504 if (vf) { 505 /* the first igu entry belonging to VFs of this PF */ 506 if (!BP_VFDB(bp)->first_vf_igu_entry) 507 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; 508 509 /* the first igu entry belonging to this VF */ 510 if (!vf_sb_count(vf)) 511 vf->igu_base_id = igu_sb_id; 512 513 ++vf_sb_count(vf); 514 ++vf->sb_count; 515 } 516 BP_VFDB(bp)->vf_sbs_pool++; 517 } 518 519 /* VFOP MAC/VLAN helpers */ 520 static inline void bnx2x_vfop_credit(struct bnx2x *bp, 521 struct bnx2x_vfop *vfop, 522 struct bnx2x_vlan_mac_obj *obj) 523 { 524 struct bnx2x_vfop_args_filters *args = &vfop->args.filters; 525 526 /* update credit only if there is no error 527 * and a valid credit counter 528 */ 529 if (!vfop->rc && args->credit) { 530 struct list_head *pos; 531 int read_lock; 532 int cnt = 0; 533 534 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); 535 if (read_lock) 536 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); 537 538 list_for_each(pos, &obj->head) 539 cnt++; 540 541 if (!read_lock) 542 bnx2x_vlan_mac_h_read_unlock(bp, obj); 543 544 atomic_set(args->credit, cnt); 545 } 546 } 547 548 static int bnx2x_vfop_set_user_req(struct bnx2x *bp, 549 struct bnx2x_vfop_filter *pos, 550 struct bnx2x_vlan_mac_data *user_req) 551 { 552 user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD : 553 BNX2X_VLAN_MAC_DEL; 554 555 switch (pos->type) { 556 case BNX2X_VFOP_FILTER_MAC: 557 memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN); 558 break; 559 case BNX2X_VFOP_FILTER_VLAN: 560 user_req->u.vlan.vlan = pos->vid; 561 break; 562 default: 563 BNX2X_ERR("Invalid filter type, skipping\n"); 564 return 1; 565 } 566 return 0; 567 } 568 569 static int bnx2x_vfop_config_list(struct bnx2x *bp, 570 struct bnx2x_vfop_filters *filters, 571 struct bnx2x_vlan_mac_ramrod_params *vlan_mac) 572 { 573 struct bnx2x_vfop_filter *pos, *tmp; 574 struct list_head rollback_list, *filters_list = &filters->head; 575 struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req; 576 int rc = 0, cnt = 0; 577 578 INIT_LIST_HEAD(&rollback_list); 579 580 list_for_each_entry_safe(pos, tmp, filters_list, link) { 581 if (bnx2x_vfop_set_user_req(bp, pos, user_req)) 582 continue; 583 584 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 585 if (rc >= 0) { 586 cnt += pos->add ? 1 : -1; 587 list_move(&pos->link, &rollback_list); 588 rc = 0; 589 } else if (rc == -EEXIST) { 590 rc = 0; 591 } else { 592 BNX2X_ERR("Failed to add a new vlan_mac command\n"); 593 break; 594 } 595 } 596 597 /* rollback if error or too many rules added */ 598 if (rc || cnt > filters->add_cnt) { 599 BNX2X_ERR("error or too many rules added. Performing rollback\n"); 600 list_for_each_entry_safe(pos, tmp, &rollback_list, link) { 601 pos->add = !pos->add; /* reverse op */ 602 bnx2x_vfop_set_user_req(bp, pos, user_req); 603 bnx2x_config_vlan_mac(bp, vlan_mac); 604 list_del(&pos->link); 605 } 606 cnt = 0; 607 if (!rc) 608 rc = -EINVAL; 609 } 610 filters->add_cnt = cnt; 611 return rc; 612 } 613 614 /* VFOP set VLAN/MAC */ 615 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) 616 { 617 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 618 struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac; 619 struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj; 620 struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter; 621 622 enum bnx2x_vfop_vlan_mac_state state = vfop->state; 623 624 if (vfop->rc < 0) 625 goto op_err; 626 627 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 628 629 bnx2x_vfop_reset_wq(vf); 630 631 switch (state) { 632 case BNX2X_VFOP_VLAN_MAC_CLEAR: 633 /* next state */ 634 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 635 636 /* do delete */ 637 vfop->rc = obj->delete_all(bp, obj, 638 &vlan_mac->user_req.vlan_mac_flags, 639 &vlan_mac->ramrod_flags); 640 641 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 642 643 case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE: 644 /* next state */ 645 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 646 647 /* do config */ 648 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 649 if (vfop->rc == -EEXIST) 650 vfop->rc = 0; 651 652 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 653 654 case BNX2X_VFOP_VLAN_MAC_CHK_DONE: 655 vfop->rc = !!obj->raw.check_pending(&obj->raw); 656 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 657 658 case BNX2X_VFOP_MAC_CONFIG_LIST: 659 /* next state */ 660 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 661 662 /* do list config */ 663 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 664 if (vfop->rc) 665 goto op_err; 666 667 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 668 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 669 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 670 671 case BNX2X_VFOP_VLAN_CONFIG_LIST: 672 /* next state */ 673 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 674 675 /* do list config */ 676 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 677 if (!vfop->rc) { 678 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 679 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 680 } 681 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 682 683 default: 684 bnx2x_vfop_default(state); 685 } 686 op_err: 687 BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc); 688 op_done: 689 kfree(filters); 690 bnx2x_vfop_credit(bp, vfop, obj); 691 bnx2x_vfop_end(bp, vf, vfop); 692 op_pending: 693 return; 694 } 695 696 struct bnx2x_vfop_vlan_mac_flags { 697 bool drv_only; 698 bool dont_consume; 699 bool single_cmd; 700 bool add; 701 }; 702 703 static void 704 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 705 struct bnx2x_vfop_vlan_mac_flags *flags) 706 { 707 struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req; 708 709 memset(ramrod, 0, sizeof(*ramrod)); 710 711 /* ramrod flags */ 712 if (flags->drv_only) 713 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags); 714 if (flags->single_cmd) 715 set_bit(RAMROD_EXEC, &ramrod->ramrod_flags); 716 717 /* mac_vlan flags */ 718 if (flags->dont_consume) 719 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags); 720 721 /* cmd */ 722 ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL; 723 } 724 725 static inline void 726 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 727 struct bnx2x_vfop_vlan_mac_flags *flags) 728 { 729 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags); 730 set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags); 731 } 732 733 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, 734 struct bnx2x_virtf *vf, 735 struct bnx2x_vfop_cmd *cmd, 736 int qid, bool drv_only) 737 { 738 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 739 740 if (vfop) { 741 struct bnx2x_vfop_args_filters filters = { 742 .multi_filter = NULL, /* single */ 743 .credit = NULL, /* consume credit */ 744 }; 745 struct bnx2x_vfop_vlan_mac_flags flags = { 746 .drv_only = drv_only, 747 .dont_consume = (filters.credit != NULL), 748 .single_cmd = true, 749 .add = false /* don't care */, 750 }; 751 struct bnx2x_vlan_mac_ramrod_params *ramrod = 752 &vf->op_params.vlan_mac; 753 754 /* set ramrod params */ 755 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 756 757 /* set object */ 758 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 759 760 /* set extra args */ 761 vfop->args.filters = filters; 762 763 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 764 bnx2x_vfop_vlan_mac, cmd->done); 765 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 766 cmd->block); 767 } 768 return -ENOMEM; 769 } 770 771 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, 772 struct bnx2x_virtf *vf, 773 struct bnx2x_vfop_cmd *cmd, 774 struct bnx2x_vfop_filters *macs, 775 int qid, bool drv_only) 776 { 777 struct bnx2x_vfop *vfop; 778 779 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 780 return -EINVAL; 781 782 vfop = bnx2x_vfop_add(bp, vf); 783 if (vfop) { 784 struct bnx2x_vfop_args_filters filters = { 785 .multi_filter = macs, 786 .credit = NULL, /* consume credit */ 787 }; 788 struct bnx2x_vfop_vlan_mac_flags flags = { 789 .drv_only = drv_only, 790 .dont_consume = (filters.credit != NULL), 791 .single_cmd = false, 792 .add = false, /* don't care since only the items in the 793 * filters list affect the sp operation, 794 * not the list itself 795 */ 796 }; 797 struct bnx2x_vlan_mac_ramrod_params *ramrod = 798 &vf->op_params.vlan_mac; 799 800 /* set ramrod params */ 801 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 802 803 /* set object */ 804 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 805 806 /* set extra args */ 807 filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX; 808 vfop->args.filters = filters; 809 810 bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST, 811 bnx2x_vfop_vlan_mac, cmd->done); 812 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 813 cmd->block); 814 } 815 return -ENOMEM; 816 } 817 818 static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, 819 struct bnx2x_virtf *vf, 820 struct bnx2x_vfop_cmd *cmd, 821 int qid, u16 vid, bool add) 822 { 823 struct bnx2x_vfop *vfop; 824 825 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 826 return -EINVAL; 827 828 vfop = bnx2x_vfop_add(bp, vf); 829 if (vfop) { 830 struct bnx2x_vfop_args_filters filters = { 831 .multi_filter = NULL, /* single command */ 832 .credit = &bnx2x_vfq(vf, qid, vlan_count), 833 }; 834 struct bnx2x_vfop_vlan_mac_flags flags = { 835 .drv_only = false, 836 .dont_consume = (filters.credit != NULL), 837 .single_cmd = true, 838 .add = add, 839 }; 840 struct bnx2x_vlan_mac_ramrod_params *ramrod = 841 &vf->op_params.vlan_mac; 842 843 /* set ramrod params */ 844 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 845 ramrod->user_req.u.vlan.vlan = vid; 846 847 /* set object */ 848 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 849 850 /* set extra args */ 851 vfop->args.filters = filters; 852 853 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 854 bnx2x_vfop_vlan_mac, cmd->done); 855 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 856 cmd->block); 857 } 858 return -ENOMEM; 859 } 860 861 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, 862 struct bnx2x_virtf *vf, 863 struct bnx2x_vfop_cmd *cmd, 864 int qid, bool drv_only) 865 { 866 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 867 868 if (vfop) { 869 struct bnx2x_vfop_args_filters filters = { 870 .multi_filter = NULL, /* single command */ 871 .credit = &bnx2x_vfq(vf, qid, vlan_count), 872 }; 873 struct bnx2x_vfop_vlan_mac_flags flags = { 874 .drv_only = drv_only, 875 .dont_consume = (filters.credit != NULL), 876 .single_cmd = true, 877 .add = false, /* don't care */ 878 }; 879 struct bnx2x_vlan_mac_ramrod_params *ramrod = 880 &vf->op_params.vlan_mac; 881 882 /* set ramrod params */ 883 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 884 885 /* set object */ 886 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 887 888 /* set extra args */ 889 vfop->args.filters = filters; 890 891 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 892 bnx2x_vfop_vlan_mac, cmd->done); 893 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 894 cmd->block); 895 } 896 return -ENOMEM; 897 } 898 899 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, 900 struct bnx2x_virtf *vf, 901 struct bnx2x_vfop_cmd *cmd, 902 struct bnx2x_vfop_filters *vlans, 903 int qid, bool drv_only) 904 { 905 struct bnx2x_vfop *vfop; 906 907 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 908 return -EINVAL; 909 910 vfop = bnx2x_vfop_add(bp, vf); 911 if (vfop) { 912 struct bnx2x_vfop_args_filters filters = { 913 .multi_filter = vlans, 914 .credit = &bnx2x_vfq(vf, qid, vlan_count), 915 }; 916 struct bnx2x_vfop_vlan_mac_flags flags = { 917 .drv_only = drv_only, 918 .dont_consume = (filters.credit != NULL), 919 .single_cmd = false, 920 .add = false, /* don't care */ 921 }; 922 struct bnx2x_vlan_mac_ramrod_params *ramrod = 923 &vf->op_params.vlan_mac; 924 925 /* set ramrod params */ 926 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 927 928 /* set object */ 929 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 930 931 /* set extra args */ 932 filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) - 933 atomic_read(filters.credit); 934 935 vfop->args.filters = filters; 936 937 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST, 938 bnx2x_vfop_vlan_mac, cmd->done); 939 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 940 cmd->block); 941 } 942 return -ENOMEM; 943 } 944 945 /* VFOP queue setup (queue constructor + set vlan 0) */ 946 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf) 947 { 948 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 949 int qid = vfop->args.qctor.qid; 950 enum bnx2x_vfop_qsetup_state state = vfop->state; 951 struct bnx2x_vfop_cmd cmd = { 952 .done = bnx2x_vfop_qsetup, 953 .block = false, 954 }; 955 956 if (vfop->rc < 0) 957 goto op_err; 958 959 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 960 961 switch (state) { 962 case BNX2X_VFOP_QSETUP_CTOR: 963 /* init the queue ctor command */ 964 vfop->state = BNX2X_VFOP_QSETUP_VLAN0; 965 vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid); 966 if (vfop->rc) 967 goto op_err; 968 return; 969 970 case BNX2X_VFOP_QSETUP_VLAN0: 971 /* skip if non-leading or FPGA/EMU*/ 972 if (qid) 973 goto op_done; 974 975 /* init the queue set-vlan command (for vlan 0) */ 976 vfop->state = BNX2X_VFOP_QSETUP_DONE; 977 vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true); 978 if (vfop->rc) 979 goto op_err; 980 return; 981 op_err: 982 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); 983 op_done: 984 case BNX2X_VFOP_QSETUP_DONE: 985 vf->cfg_flags |= VF_CFG_VLAN; 986 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, 987 BNX2X_MSG_IOV); 988 bnx2x_vfop_end(bp, vf, vfop); 989 return; 990 default: 991 bnx2x_vfop_default(state); 992 } 993 } 994 995 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, 996 struct bnx2x_virtf *vf, 997 struct bnx2x_vfop_cmd *cmd, 998 int qid) 999 { 1000 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1001 1002 if (vfop) { 1003 vfop->args.qctor.qid = qid; 1004 1005 bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR, 1006 bnx2x_vfop_qsetup, cmd->done); 1007 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup, 1008 cmd->block); 1009 } 1010 return -ENOMEM; 1011 } 1012 1013 /* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */ 1014 static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) 1015 { 1016 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1017 int qid = vfop->args.qx.qid; 1018 enum bnx2x_vfop_qflr_state state = vfop->state; 1019 struct bnx2x_queue_state_params *qstate; 1020 struct bnx2x_vfop_cmd cmd; 1021 1022 bnx2x_vfop_reset_wq(vf); 1023 1024 if (vfop->rc < 0) 1025 goto op_err; 1026 1027 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state); 1028 1029 cmd.done = bnx2x_vfop_qflr; 1030 cmd.block = false; 1031 1032 switch (state) { 1033 case BNX2X_VFOP_QFLR_CLR_VLAN: 1034 /* vlan-clear-all: driver-only, don't consume credit */ 1035 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; 1036 1037 /* the vlan_mac vfop will re-schedule us */ 1038 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true); 1039 if (vfop->rc) 1040 goto op_err; 1041 return; 1042 1043 case BNX2X_VFOP_QFLR_CLR_MAC: 1044 /* mac-clear-all: driver only consume credit */ 1045 vfop->state = BNX2X_VFOP_QFLR_TERMINATE; 1046 /* the vlan_mac vfop will re-schedule us */ 1047 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true); 1048 if (vfop->rc) 1049 goto op_err; 1050 return; 1051 1052 case BNX2X_VFOP_QFLR_TERMINATE: 1053 qstate = &vfop->op_p->qctor.qstate; 1054 memset(qstate , 0, sizeof(*qstate)); 1055 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 1056 vfop->state = BNX2X_VFOP_QFLR_DONE; 1057 1058 DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n", 1059 vf->abs_vfid, qstate->q_obj->state); 1060 1061 if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) { 1062 qstate->q_obj->state = BNX2X_Q_STATE_STOPPED; 1063 qstate->cmd = BNX2X_Q_CMD_TERMINATE; 1064 vfop->rc = bnx2x_queue_state_change(bp, qstate); 1065 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND); 1066 } else { 1067 goto op_done; 1068 } 1069 1070 op_err: 1071 BNX2X_ERR("QFLR[%d:%d] error: rc %d\n", 1072 vf->abs_vfid, qid, vfop->rc); 1073 op_done: 1074 case BNX2X_VFOP_QFLR_DONE: 1075 bnx2x_vfop_end(bp, vf, vfop); 1076 return; 1077 default: 1078 bnx2x_vfop_default(state); 1079 } 1080 op_pending: 1081 return; 1082 } 1083 1084 static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp, 1085 struct bnx2x_virtf *vf, 1086 struct bnx2x_vfop_cmd *cmd, 1087 int qid) 1088 { 1089 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1090 1091 if (vfop) { 1092 vfop->args.qx.qid = qid; 1093 if ((qid == LEADING_IDX) && 1094 bnx2x_validate_vf_sp_objs(bp, vf, false)) 1095 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN, 1096 bnx2x_vfop_qflr, cmd->done); 1097 else 1098 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_TERMINATE, 1099 bnx2x_vfop_qflr, cmd->done); 1100 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr, 1101 cmd->block); 1102 } 1103 return -ENOMEM; 1104 } 1105 1106 /* VFOP multi-casts */ 1107 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf) 1108 { 1109 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1110 struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast; 1111 struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw; 1112 struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list; 1113 enum bnx2x_vfop_mcast_state state = vfop->state; 1114 int i; 1115 1116 bnx2x_vfop_reset_wq(vf); 1117 1118 if (vfop->rc < 0) 1119 goto op_err; 1120 1121 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1122 1123 switch (state) { 1124 case BNX2X_VFOP_MCAST_DEL: 1125 /* clear existing mcasts */ 1126 vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD 1127 : BNX2X_VFOP_MCAST_CHK_DONE; 1128 mcast->mcast_list_len = vf->mcast_list_len; 1129 vf->mcast_list_len = args->mc_num; 1130 vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL); 1131 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 1132 1133 case BNX2X_VFOP_MCAST_ADD: 1134 if (raw->check_pending(raw)) 1135 goto op_pending; 1136 1137 /* update mcast list on the ramrod params */ 1138 INIT_LIST_HEAD(&mcast->mcast_list); 1139 for (i = 0; i < args->mc_num; i++) 1140 list_add_tail(&(args->mc[i].link), 1141 &mcast->mcast_list); 1142 mcast->mcast_list_len = args->mc_num; 1143 1144 /* add new mcasts */ 1145 vfop->state = BNX2X_VFOP_MCAST_CHK_DONE; 1146 vfop->rc = bnx2x_config_mcast(bp, mcast, 1147 BNX2X_MCAST_CMD_ADD); 1148 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1149 1150 case BNX2X_VFOP_MCAST_CHK_DONE: 1151 vfop->rc = raw->check_pending(raw) ? 1 : 0; 1152 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1153 default: 1154 bnx2x_vfop_default(state); 1155 } 1156 op_err: 1157 BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc); 1158 op_done: 1159 kfree(args->mc); 1160 bnx2x_vfop_end(bp, vf, vfop); 1161 op_pending: 1162 return; 1163 } 1164 1165 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, 1166 struct bnx2x_virtf *vf, 1167 struct bnx2x_vfop_cmd *cmd, 1168 bnx2x_mac_addr_t *mcasts, 1169 int mcast_num, bool drv_only) 1170 { 1171 struct bnx2x_vfop *vfop = NULL; 1172 size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem); 1173 struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) : 1174 NULL; 1175 1176 if (!mc_sz || mc) { 1177 vfop = bnx2x_vfop_add(bp, vf); 1178 if (vfop) { 1179 int i; 1180 struct bnx2x_mcast_ramrod_params *ramrod = 1181 &vf->op_params.mcast; 1182 1183 /* set ramrod params */ 1184 memset(ramrod, 0, sizeof(*ramrod)); 1185 ramrod->mcast_obj = &vf->mcast_obj; 1186 if (drv_only) 1187 set_bit(RAMROD_DRV_CLR_ONLY, 1188 &ramrod->ramrod_flags); 1189 1190 /* copy mcasts pointers */ 1191 vfop->args.mc_list.mc_num = mcast_num; 1192 vfop->args.mc_list.mc = mc; 1193 for (i = 0; i < mcast_num; i++) 1194 mc[i].mac = mcasts[i]; 1195 1196 bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL, 1197 bnx2x_vfop_mcast, cmd->done); 1198 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast, 1199 cmd->block); 1200 } else { 1201 kfree(mc); 1202 } 1203 } 1204 return -ENOMEM; 1205 } 1206 1207 /* VFOP rx-mode */ 1208 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) 1209 { 1210 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1211 struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode; 1212 enum bnx2x_vfop_rxmode_state state = vfop->state; 1213 1214 bnx2x_vfop_reset_wq(vf); 1215 1216 if (vfop->rc < 0) 1217 goto op_err; 1218 1219 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1220 1221 switch (state) { 1222 case BNX2X_VFOP_RXMODE_CONFIG: 1223 /* next state */ 1224 vfop->state = BNX2X_VFOP_RXMODE_DONE; 1225 1226 /* record the accept flags in vfdb so hypervisor can modify them 1227 * if necessary 1228 */ 1229 bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) = 1230 ramrod->rx_accept_flags; 1231 vfop->rc = bnx2x_config_rx_mode(bp, ramrod); 1232 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1233 op_err: 1234 BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc); 1235 op_done: 1236 case BNX2X_VFOP_RXMODE_DONE: 1237 bnx2x_vfop_end(bp, vf, vfop); 1238 return; 1239 default: 1240 bnx2x_vfop_default(state); 1241 } 1242 op_pending: 1243 return; 1244 } 1245 1246 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, 1247 struct bnx2x_rx_mode_ramrod_params *ramrod, 1248 struct bnx2x_virtf *vf, 1249 unsigned long accept_flags) 1250 { 1251 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); 1252 1253 memset(ramrod, 0, sizeof(*ramrod)); 1254 ramrod->cid = vfq->cid; 1255 ramrod->cl_id = vfq_cl_id(vf, vfq); 1256 ramrod->rx_mode_obj = &bp->rx_mode_obj; 1257 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); 1258 ramrod->rx_accept_flags = accept_flags; 1259 ramrod->tx_accept_flags = accept_flags; 1260 ramrod->pstate = &vf->filter_state; 1261 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; 1262 1263 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 1264 set_bit(RAMROD_RX, &ramrod->ramrod_flags); 1265 set_bit(RAMROD_TX, &ramrod->ramrod_flags); 1266 1267 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); 1268 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); 1269 } 1270 1271 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, 1272 struct bnx2x_virtf *vf, 1273 struct bnx2x_vfop_cmd *cmd, 1274 int qid, unsigned long accept_flags) 1275 { 1276 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1277 1278 if (vfop) { 1279 struct bnx2x_rx_mode_ramrod_params *ramrod = 1280 &vf->op_params.rx_mode; 1281 1282 bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags); 1283 1284 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, 1285 bnx2x_vfop_rxmode, cmd->done); 1286 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode, 1287 cmd->block); 1288 } 1289 return -ENOMEM; 1290 } 1291 1292 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs, 1293 * queue destructor) 1294 */ 1295 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf) 1296 { 1297 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1298 int qid = vfop->args.qx.qid; 1299 enum bnx2x_vfop_qteardown_state state = vfop->state; 1300 struct bnx2x_vfop_cmd cmd; 1301 1302 if (vfop->rc < 0) 1303 goto op_err; 1304 1305 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1306 1307 cmd.done = bnx2x_vfop_qdown; 1308 cmd.block = false; 1309 1310 switch (state) { 1311 case BNX2X_VFOP_QTEARDOWN_RXMODE: 1312 /* Drop all */ 1313 if (bnx2x_validate_vf_sp_objs(bp, vf, true)) 1314 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN; 1315 else 1316 vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR; 1317 vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0); 1318 if (vfop->rc) 1319 goto op_err; 1320 return; 1321 1322 case BNX2X_VFOP_QTEARDOWN_CLR_VLAN: 1323 /* vlan-clear-all: don't consume credit */ 1324 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC; 1325 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false); 1326 if (vfop->rc) 1327 goto op_err; 1328 return; 1329 1330 case BNX2X_VFOP_QTEARDOWN_CLR_MAC: 1331 /* mac-clear-all: consume credit */ 1332 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST; 1333 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false); 1334 if (vfop->rc) 1335 goto op_err; 1336 return; 1337 1338 case BNX2X_VFOP_QTEARDOWN_CLR_MCAST: 1339 vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR; 1340 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false); 1341 if (vfop->rc) 1342 goto op_err; 1343 return; 1344 1345 case BNX2X_VFOP_QTEARDOWN_QDTOR: 1346 /* run the queue destruction flow */ 1347 DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n"); 1348 vfop->state = BNX2X_VFOP_QTEARDOWN_DONE; 1349 DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n"); 1350 vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid); 1351 DP(BNX2X_MSG_IOV, "returned from cmd\n"); 1352 if (vfop->rc) 1353 goto op_err; 1354 return; 1355 op_err: 1356 BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n", 1357 vf->abs_vfid, qid, vfop->rc); 1358 1359 case BNX2X_VFOP_QTEARDOWN_DONE: 1360 bnx2x_vfop_end(bp, vf, vfop); 1361 return; 1362 default: 1363 bnx2x_vfop_default(state); 1364 } 1365 } 1366 1367 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, 1368 struct bnx2x_virtf *vf, 1369 struct bnx2x_vfop_cmd *cmd, 1370 int qid) 1371 { 1372 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1373 1374 /* for non leading queues skip directly to qdown sate */ 1375 if (vfop) { 1376 vfop->args.qx.qid = qid; 1377 bnx2x_vfop_opset(qid == LEADING_IDX ? 1378 BNX2X_VFOP_QTEARDOWN_RXMODE : 1379 BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown, 1380 cmd->done); 1381 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, 1382 cmd->block); 1383 } 1384 1385 return -ENOMEM; 1386 } 1387 1388 /* VF enable primitives 1389 * when pretend is required the caller is responsible 1390 * for calling pretend prior to calling these routines 1391 */ 1392 1393 /* internal vf enable - until vf is enabled internally all transactions 1394 * are blocked. This routine should always be called last with pretend. 1395 */ 1396 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) 1397 { 1398 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); 1399 } 1400 1401 /* clears vf error in all semi blocks */ 1402 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) 1403 { 1404 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); 1405 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); 1406 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); 1407 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); 1408 } 1409 1410 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) 1411 { 1412 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; 1413 u32 was_err_reg = 0; 1414 1415 switch (was_err_group) { 1416 case 0: 1417 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; 1418 break; 1419 case 1: 1420 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; 1421 break; 1422 case 2: 1423 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; 1424 break; 1425 case 3: 1426 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; 1427 break; 1428 } 1429 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); 1430 } 1431 1432 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) 1433 { 1434 int i; 1435 u32 val; 1436 1437 /* Set VF masks and configuration - pretend */ 1438 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1439 1440 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 1441 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 1442 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 1443 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 1444 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 1445 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 1446 1447 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 1448 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); 1449 if (vf->cfg_flags & VF_CFG_INT_SIMD) 1450 val |= IGU_VF_CONF_SINGLE_ISR_EN; 1451 val &= ~IGU_VF_CONF_PARENT_MASK; 1452 val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT; 1453 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 1454 1455 DP(BNX2X_MSG_IOV, 1456 "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n", 1457 vf->abs_vfid, val); 1458 1459 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1460 1461 /* iterate over all queues, clear sb consumer */ 1462 for (i = 0; i < vf_sb_count(vf); i++) { 1463 u8 igu_sb_id = vf_igu_sb(vf, i); 1464 1465 /* zero prod memory */ 1466 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); 1467 1468 /* clear sb state machine */ 1469 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, 1470 false /* VF */); 1471 1472 /* disable + update */ 1473 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, 1474 IGU_INT_DISABLE, 1); 1475 } 1476 } 1477 1478 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) 1479 { 1480 /* set the VF-PF association in the FW */ 1481 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); 1482 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); 1483 1484 /* clear vf errors*/ 1485 bnx2x_vf_semi_clear_err(bp, abs_vfid); 1486 bnx2x_vf_pglue_clear_err(bp, abs_vfid); 1487 1488 /* internal vf-enable - pretend */ 1489 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); 1490 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); 1491 bnx2x_vf_enable_internal(bp, true); 1492 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1493 } 1494 1495 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) 1496 { 1497 /* Reset vf in IGU interrupts are still disabled */ 1498 bnx2x_vf_igu_reset(bp, vf); 1499 1500 /* pretend to enable the vf with the PBF */ 1501 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1502 REG_WR(bp, PBF_REG_DISABLE_VF, 0); 1503 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1504 } 1505 1506 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) 1507 { 1508 struct pci_dev *dev; 1509 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 1510 1511 if (!vf) 1512 return false; 1513 1514 dev = pci_get_bus_and_slot(vf->bus, vf->devfn); 1515 if (dev) 1516 return bnx2x_is_pcie_pending(dev); 1517 return false; 1518 } 1519 1520 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) 1521 { 1522 /* Verify no pending pci transactions */ 1523 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) 1524 BNX2X_ERR("PCIE Transactions still pending\n"); 1525 1526 return 0; 1527 } 1528 1529 /* must be called after the number of PF queues and the number of VFs are 1530 * both known 1531 */ 1532 static void 1533 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 1534 { 1535 struct vf_pf_resc_request *resc = &vf->alloc_resc; 1536 u16 vlan_count = 0; 1537 1538 /* will be set only during VF-ACQUIRE */ 1539 resc->num_rxqs = 0; 1540 resc->num_txqs = 0; 1541 1542 /* no credit calculations for macs (just yet) */ 1543 resc->num_mac_filters = 1; 1544 1545 /* divvy up vlan rules */ 1546 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 1547 vlan_count = 1 << ilog2(vlan_count); 1548 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); 1549 1550 /* no real limitation */ 1551 resc->num_mc_filters = 0; 1552 1553 /* num_sbs already set */ 1554 resc->num_sbs = vf->sb_count; 1555 } 1556 1557 /* FLR routines: */ 1558 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 1559 { 1560 /* reset the state variables */ 1561 bnx2x_iov_static_resc(bp, vf); 1562 vf->state = VF_FREE; 1563 } 1564 1565 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) 1566 { 1567 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 1568 1569 /* DQ usage counter */ 1570 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1571 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, 1572 "DQ VF usage counter timed out", 1573 poll_cnt); 1574 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1575 1576 /* FW cleanup command - poll for the results */ 1577 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), 1578 poll_cnt)) 1579 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid); 1580 1581 /* verify TX hw is flushed */ 1582 bnx2x_tx_hw_flushed(bp, poll_cnt); 1583 } 1584 1585 static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) 1586 { 1587 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1588 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; 1589 enum bnx2x_vfop_flr_state state = vfop->state; 1590 struct bnx2x_vfop_cmd cmd = { 1591 .done = bnx2x_vfop_flr, 1592 .block = false, 1593 }; 1594 1595 if (vfop->rc < 0) 1596 goto op_err; 1597 1598 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1599 1600 switch (state) { 1601 case BNX2X_VFOP_FLR_QUEUES: 1602 /* the cleanup operations are valid if and only if the VF 1603 * was first acquired. 1604 */ 1605 if (++(qx->qid) < vf_rxq_count(vf)) { 1606 vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd, 1607 qx->qid); 1608 if (vfop->rc) 1609 goto op_err; 1610 return; 1611 } 1612 /* remove multicasts */ 1613 vfop->state = BNX2X_VFOP_FLR_HW; 1614 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 1615 0, true); 1616 if (vfop->rc) 1617 goto op_err; 1618 return; 1619 case BNX2X_VFOP_FLR_HW: 1620 1621 /* dispatch final cleanup and wait for HW queues to flush */ 1622 bnx2x_vf_flr_clnup_hw(bp, vf); 1623 1624 /* release VF resources */ 1625 bnx2x_vf_free_resc(bp, vf); 1626 1627 /* re-open the mailbox */ 1628 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 1629 1630 goto op_done; 1631 default: 1632 bnx2x_vfop_default(state); 1633 } 1634 op_err: 1635 BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc); 1636 op_done: 1637 vf->flr_clnup_stage = VF_FLR_ACK; 1638 bnx2x_vfop_end(bp, vf, vfop); 1639 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 1640 } 1641 1642 static int bnx2x_vfop_flr_cmd(struct bnx2x *bp, 1643 struct bnx2x_virtf *vf, 1644 vfop_handler_t done) 1645 { 1646 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1647 if (vfop) { 1648 vfop->args.qx.qid = -1; /* loop */ 1649 bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES, 1650 bnx2x_vfop_flr, done); 1651 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false); 1652 } 1653 return -ENOMEM; 1654 } 1655 1656 static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf) 1657 { 1658 int i = prev_vf ? prev_vf->index + 1 : 0; 1659 struct bnx2x_virtf *vf; 1660 1661 /* find next VF to cleanup */ 1662 next_vf_to_clean: 1663 for (; 1664 i < BNX2X_NR_VIRTFN(bp) && 1665 (bnx2x_vf(bp, i, state) != VF_RESET || 1666 bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN); 1667 i++) 1668 ; 1669 1670 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i, 1671 BNX2X_NR_VIRTFN(bp)); 1672 1673 if (i < BNX2X_NR_VIRTFN(bp)) { 1674 vf = BP_VF(bp, i); 1675 1676 /* lock the vf pf channel */ 1677 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 1678 1679 /* invoke the VF FLR SM */ 1680 if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) { 1681 BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n", 1682 vf->abs_vfid); 1683 1684 /* mark the VF to be ACKED and continue */ 1685 vf->flr_clnup_stage = VF_FLR_ACK; 1686 goto next_vf_to_clean; 1687 } 1688 return; 1689 } 1690 1691 /* we are done, update vf records */ 1692 for_each_vf(bp, i) { 1693 vf = BP_VF(bp, i); 1694 1695 if (vf->flr_clnup_stage != VF_FLR_ACK) 1696 continue; 1697 1698 vf->flr_clnup_stage = VF_FLR_EPILOG; 1699 } 1700 1701 /* Acknowledge the handled VFs. 1702 * we are acknowledge all the vfs which an flr was requested for, even 1703 * if amongst them there are such that we never opened, since the mcp 1704 * will interrupt us immediately again if we only ack some of the bits, 1705 * resulting in an endless loop. This can happen for example in KVM 1706 * where an 'all ones' flr request is sometimes given by hyper visor 1707 */ 1708 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n", 1709 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1710 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1711 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 1712 bp->vfdb->flrd_vfs[i]); 1713 1714 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); 1715 1716 /* clear the acked bits - better yet if the MCP implemented 1717 * write to clear semantics 1718 */ 1719 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1720 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); 1721 } 1722 1723 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) 1724 { 1725 int i; 1726 1727 /* Read FLR'd VFs */ 1728 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1729 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); 1730 1731 DP(BNX2X_MSG_MCP, 1732 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n", 1733 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1734 1735 for_each_vf(bp, i) { 1736 struct bnx2x_virtf *vf = BP_VF(bp, i); 1737 u32 reset = 0; 1738 1739 if (vf->abs_vfid < 32) 1740 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); 1741 else 1742 reset = bp->vfdb->flrd_vfs[1] & 1743 (1 << (vf->abs_vfid - 32)); 1744 1745 if (reset) { 1746 /* set as reset and ready for cleanup */ 1747 vf->state = VF_RESET; 1748 vf->flr_clnup_stage = VF_FLR_CLN; 1749 1750 DP(BNX2X_MSG_IOV, 1751 "Initiating Final cleanup for VF %d\n", 1752 vf->abs_vfid); 1753 } 1754 } 1755 1756 /* do the FLR cleanup for all marked VFs*/ 1757 bnx2x_vf_flr_clnup(bp, NULL); 1758 } 1759 1760 /* IOV global initialization routines */ 1761 void bnx2x_iov_init_dq(struct bnx2x *bp) 1762 { 1763 if (!IS_SRIOV(bp)) 1764 return; 1765 1766 /* Set the DQ such that the CID reflect the abs_vfid */ 1767 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); 1768 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 1769 1770 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to 1771 * the PF L2 queues 1772 */ 1773 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 1774 1775 /* The VF window size is the log2 of the max number of CIDs per VF */ 1776 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 1777 1778 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 1779 * the Pf doorbell size although the 2 are independent. 1780 */ 1781 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); 1782 1783 /* No security checks for now - 1784 * configure single rule (out of 16) mask = 0x1, value = 0x0, 1785 * CID range 0 - 0x1ffff 1786 */ 1787 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); 1788 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); 1789 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1790 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1791 1792 /* set the VF doorbell threshold */ 1793 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 1794 } 1795 1796 void bnx2x_iov_init_dmae(struct bnx2x *bp) 1797 { 1798 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) 1799 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 1800 } 1801 1802 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1803 { 1804 struct pci_dev *dev = bp->pdev; 1805 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1806 1807 return dev->bus->number + ((dev->devfn + iov->offset + 1808 iov->stride * vfid) >> 8); 1809 } 1810 1811 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) 1812 { 1813 struct pci_dev *dev = bp->pdev; 1814 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1815 1816 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; 1817 } 1818 1819 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) 1820 { 1821 int i, n; 1822 struct pci_dev *dev = bp->pdev; 1823 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1824 1825 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { 1826 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); 1827 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); 1828 1829 size /= iov->total; 1830 vf->bars[n].bar = start + size * vf->abs_vfid; 1831 vf->bars[n].size = size; 1832 } 1833 } 1834 1835 static int bnx2x_ari_enabled(struct pci_dev *dev) 1836 { 1837 return dev->bus->self && dev->bus->self->ari_enabled; 1838 } 1839 1840 static void 1841 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) 1842 { 1843 int sb_id; 1844 u32 val; 1845 u8 fid, current_pf = 0; 1846 1847 /* IGU in normal mode - read CAM */ 1848 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 1849 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); 1850 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 1851 continue; 1852 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1853 if (fid & IGU_FID_ENCODE_IS_PF) 1854 current_pf = fid & IGU_FID_PF_NUM_MASK; 1855 else if (current_pf == BP_FUNC(bp)) 1856 bnx2x_vf_set_igu_info(bp, sb_id, 1857 (fid & IGU_FID_VF_NUM_MASK)); 1858 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1859 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 1860 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 1861 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 1862 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1863 } 1864 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); 1865 } 1866 1867 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1868 { 1869 if (bp->vfdb) { 1870 kfree(bp->vfdb->vfqs); 1871 kfree(bp->vfdb->vfs); 1872 kfree(bp->vfdb); 1873 } 1874 bp->vfdb = NULL; 1875 } 1876 1877 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1878 { 1879 int pos; 1880 struct pci_dev *dev = bp->pdev; 1881 1882 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 1883 if (!pos) { 1884 BNX2X_ERR("failed to find SRIOV capability in device\n"); 1885 return -ENODEV; 1886 } 1887 1888 iov->pos = pos; 1889 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); 1890 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 1891 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); 1892 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); 1893 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 1894 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 1895 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 1896 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 1897 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 1898 1899 return 0; 1900 } 1901 1902 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1903 { 1904 u32 val; 1905 1906 /* read the SRIOV capability structure 1907 * The fields can be read via configuration read or 1908 * directly from the device (starting at offset PCICFG_OFFSET) 1909 */ 1910 if (bnx2x_sriov_pci_cfg_info(bp, iov)) 1911 return -ENODEV; 1912 1913 /* get the number of SRIOV bars */ 1914 iov->nres = 0; 1915 1916 /* read the first_vfid */ 1917 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); 1918 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) 1919 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); 1920 1921 DP(BNX2X_MSG_IOV, 1922 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 1923 BP_FUNC(bp), 1924 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, 1925 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 1926 1927 return 0; 1928 } 1929 1930 /* must be called after PF bars are mapped */ 1931 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1932 int num_vfs_param) 1933 { 1934 int err, i; 1935 struct bnx2x_sriov *iov; 1936 struct pci_dev *dev = bp->pdev; 1937 1938 bp->vfdb = NULL; 1939 1940 /* verify is pf */ 1941 if (IS_VF(bp)) 1942 return 0; 1943 1944 /* verify sriov capability is present in configuration space */ 1945 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) 1946 return 0; 1947 1948 /* verify chip revision */ 1949 if (CHIP_IS_E1x(bp)) 1950 return 0; 1951 1952 /* check if SRIOV support is turned off */ 1953 if (!num_vfs_param) 1954 return 0; 1955 1956 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ 1957 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { 1958 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", 1959 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); 1960 return 0; 1961 } 1962 1963 /* SRIOV can be enabled only with MSIX */ 1964 if (int_mode_param == BNX2X_INT_MODE_MSI || 1965 int_mode_param == BNX2X_INT_MODE_INTX) { 1966 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); 1967 return 0; 1968 } 1969 1970 err = -EIO; 1971 /* verify ari is enabled */ 1972 if (!bnx2x_ari_enabled(bp->pdev)) { 1973 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n"); 1974 return 0; 1975 } 1976 1977 /* verify igu is in normal mode */ 1978 if (CHIP_INT_MODE_IS_BC(bp)) { 1979 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 1980 return 0; 1981 } 1982 1983 /* allocate the vfs database */ 1984 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); 1985 if (!bp->vfdb) { 1986 BNX2X_ERR("failed to allocate vf database\n"); 1987 err = -ENOMEM; 1988 goto failed; 1989 } 1990 1991 /* get the sriov info - Linux already collected all the pertinent 1992 * information, however the sriov structure is for the private use 1993 * of the pci module. Also we want this information regardless 1994 * of the hyper-visor. 1995 */ 1996 iov = &(bp->vfdb->sriov); 1997 err = bnx2x_sriov_info(bp, iov); 1998 if (err) 1999 goto failed; 2000 2001 /* SR-IOV capability was enabled but there are no VFs*/ 2002 if (iov->total == 0) 2003 goto failed; 2004 2005 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); 2006 2007 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n", 2008 num_vfs_param, iov->nr_virtfn); 2009 2010 /* allocate the vf array */ 2011 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 2012 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); 2013 if (!bp->vfdb->vfs) { 2014 BNX2X_ERR("failed to allocate vf array\n"); 2015 err = -ENOMEM; 2016 goto failed; 2017 } 2018 2019 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ 2020 for_each_vf(bp, i) { 2021 bnx2x_vf(bp, i, index) = i; 2022 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; 2023 bnx2x_vf(bp, i, state) = VF_FREE; 2024 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head)); 2025 mutex_init(&bnx2x_vf(bp, i, op_mutex)); 2026 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; 2027 } 2028 2029 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 2030 bnx2x_get_vf_igu_cam_info(bp); 2031 2032 /* allocate the queue arrays for all VFs */ 2033 bp->vfdb->vfqs = kzalloc( 2034 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), 2035 GFP_KERNEL); 2036 2037 DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs); 2038 2039 if (!bp->vfdb->vfqs) { 2040 BNX2X_ERR("failed to allocate vf queue array\n"); 2041 err = -ENOMEM; 2042 goto failed; 2043 } 2044 2045 return 0; 2046 failed: 2047 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 2048 __bnx2x_iov_free_vfdb(bp); 2049 return err; 2050 } 2051 2052 void bnx2x_iov_remove_one(struct bnx2x *bp) 2053 { 2054 int vf_idx; 2055 2056 /* if SRIOV is not enabled there's nothing to do */ 2057 if (!IS_SRIOV(bp)) 2058 return; 2059 2060 DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); 2061 pci_disable_sriov(bp->pdev); 2062 DP(BNX2X_MSG_IOV, "sriov disabled\n"); 2063 2064 /* disable access to all VFs */ 2065 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { 2066 bnx2x_pretend_func(bp, 2067 HW_VF_HANDLE(bp, 2068 bp->vfdb->sriov.first_vf_in_pf + 2069 vf_idx)); 2070 DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n", 2071 bp->vfdb->sriov.first_vf_in_pf + vf_idx); 2072 bnx2x_vf_enable_internal(bp, 0); 2073 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2074 } 2075 2076 /* free vf database */ 2077 __bnx2x_iov_free_vfdb(bp); 2078 } 2079 2080 void bnx2x_iov_free_mem(struct bnx2x *bp) 2081 { 2082 int i; 2083 2084 if (!IS_SRIOV(bp)) 2085 return; 2086 2087 /* free vfs hw contexts */ 2088 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2089 struct hw_dma *cxt = &bp->vfdb->context[i]; 2090 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); 2091 } 2092 2093 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, 2094 BP_VFDB(bp)->sp_dma.mapping, 2095 BP_VFDB(bp)->sp_dma.size); 2096 2097 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, 2098 BP_VF_MBX_DMA(bp)->mapping, 2099 BP_VF_MBX_DMA(bp)->size); 2100 2101 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, 2102 BP_VF_BULLETIN_DMA(bp)->mapping, 2103 BP_VF_BULLETIN_DMA(bp)->size); 2104 } 2105 2106 int bnx2x_iov_alloc_mem(struct bnx2x *bp) 2107 { 2108 size_t tot_size; 2109 int i, rc = 0; 2110 2111 if (!IS_SRIOV(bp)) 2112 return rc; 2113 2114 /* allocate vfs hw contexts */ 2115 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * 2116 BNX2X_CIDS_PER_VF * sizeof(union cdu_context); 2117 2118 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2119 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); 2120 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); 2121 2122 if (cxt->size) { 2123 cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size); 2124 if (!cxt->addr) 2125 goto alloc_mem_err; 2126 } else { 2127 cxt->addr = NULL; 2128 cxt->mapping = 0; 2129 } 2130 tot_size -= cxt->size; 2131 } 2132 2133 /* allocate vfs ramrods dma memory - client_init and set_mac */ 2134 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); 2135 BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping, 2136 tot_size); 2137 if (!BP_VFDB(bp)->sp_dma.addr) 2138 goto alloc_mem_err; 2139 BP_VFDB(bp)->sp_dma.size = tot_size; 2140 2141 /* allocate mailboxes */ 2142 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; 2143 BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping, 2144 tot_size); 2145 if (!BP_VF_MBX_DMA(bp)->addr) 2146 goto alloc_mem_err; 2147 2148 BP_VF_MBX_DMA(bp)->size = tot_size; 2149 2150 /* allocate local bulletin boards */ 2151 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; 2152 BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping, 2153 tot_size); 2154 if (!BP_VF_BULLETIN_DMA(bp)->addr) 2155 goto alloc_mem_err; 2156 2157 BP_VF_BULLETIN_DMA(bp)->size = tot_size; 2158 2159 return 0; 2160 2161 alloc_mem_err: 2162 return -ENOMEM; 2163 } 2164 2165 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 2166 struct bnx2x_vf_queue *q) 2167 { 2168 u8 cl_id = vfq_cl_id(vf, q); 2169 u8 func_id = FW_VF_HANDLE(vf->abs_vfid); 2170 unsigned long q_type = 0; 2171 2172 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 2173 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 2174 2175 /* Queue State object */ 2176 bnx2x_init_queue_obj(bp, &q->sp_obj, 2177 cl_id, &q->cid, 1, func_id, 2178 bnx2x_vf_sp(bp, vf, q_data), 2179 bnx2x_vf_sp_map(bp, vf, q_data), 2180 q_type); 2181 2182 /* sp indication is set only when vlan/mac/etc. are initialized */ 2183 q->sp_initialized = false; 2184 2185 DP(BNX2X_MSG_IOV, 2186 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", 2187 vf->abs_vfid, q->sp_obj.func_id, q->cid); 2188 } 2189 2190 /* called by bnx2x_nic_load */ 2191 int bnx2x_iov_nic_init(struct bnx2x *bp) 2192 { 2193 int vfid; 2194 2195 if (!IS_SRIOV(bp)) { 2196 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 2197 return 0; 2198 } 2199 2200 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); 2201 2202 /* let FLR complete ... */ 2203 msleep(100); 2204 2205 /* initialize vf database */ 2206 for_each_vf(bp, vfid) { 2207 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 2208 2209 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * 2210 BNX2X_CIDS_PER_VF; 2211 2212 union cdu_context *base_cxt = (union cdu_context *) 2213 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2214 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2215 2216 DP(BNX2X_MSG_IOV, 2217 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", 2218 vf->abs_vfid, vf_sb_count(vf), base_vf_cid, 2219 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 2220 2221 /* init statically provisioned resources */ 2222 bnx2x_iov_static_resc(bp, vf); 2223 2224 /* queues are initialized during VF-ACQUIRE */ 2225 2226 /* reserve the vf vlan credit */ 2227 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf)); 2228 2229 vf->filter_state = 0; 2230 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 2231 2232 /* init mcast object - This object will be re-initialized 2233 * during VF-ACQUIRE with the proper cl_id and cid. 2234 * It needs to be initialized here so that it can be safely 2235 * handled by a subsequent FLR flow. 2236 */ 2237 vf->mcast_list_len = 0; 2238 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, 2239 0xFF, 0xFF, 0xFF, 2240 bnx2x_vf_sp(bp, vf, mcast_rdata), 2241 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 2242 BNX2X_FILTER_MCAST_PENDING, 2243 &vf->filter_state, 2244 BNX2X_OBJ_TYPE_RX_TX); 2245 2246 /* set the mailbox message addresses */ 2247 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) 2248 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * 2249 MBX_MSG_ALIGNED_SIZE); 2250 2251 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + 2252 vfid * MBX_MSG_ALIGNED_SIZE; 2253 2254 /* Enable vf mailbox */ 2255 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 2256 } 2257 2258 /* Final VF init */ 2259 for_each_vf(bp, vfid) { 2260 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 2261 2262 /* fill in the BDF and bars */ 2263 vf->bus = bnx2x_vf_bus(bp, vfid); 2264 vf->devfn = bnx2x_vf_devfn(bp, vfid); 2265 bnx2x_vf_set_bars(bp, vf); 2266 2267 DP(BNX2X_MSG_IOV, 2268 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", 2269 vf->abs_vfid, vf->bus, vf->devfn, 2270 (unsigned)vf->bars[0].bar, vf->bars[0].size, 2271 (unsigned)vf->bars[1].bar, vf->bars[1].size, 2272 (unsigned)vf->bars[2].bar, vf->bars[2].size); 2273 } 2274 2275 return 0; 2276 } 2277 2278 /* called by bnx2x_chip_cleanup */ 2279 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) 2280 { 2281 int i; 2282 2283 if (!IS_SRIOV(bp)) 2284 return 0; 2285 2286 /* release all the VFs */ 2287 for_each_vf(bp, i) 2288 bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */ 2289 2290 return 0; 2291 } 2292 2293 /* called by bnx2x_init_hw_func, returns the next ilt line */ 2294 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) 2295 { 2296 int i; 2297 struct bnx2x_ilt *ilt = BP_ILT(bp); 2298 2299 if (!IS_SRIOV(bp)) 2300 return line; 2301 2302 /* set vfs ilt lines */ 2303 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2304 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); 2305 2306 ilt->lines[line+i].page = hw_cxt->addr; 2307 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 2308 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 2309 } 2310 return line + i; 2311 } 2312 2313 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) 2314 { 2315 return ((cid >= BNX2X_FIRST_VF_CID) && 2316 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); 2317 } 2318 2319 static 2320 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, 2321 struct bnx2x_vf_queue *vfq, 2322 union event_ring_elem *elem) 2323 { 2324 unsigned long ramrod_flags = 0; 2325 int rc = 0; 2326 2327 /* Always push next commands out, don't wait here */ 2328 set_bit(RAMROD_CONT, &ramrod_flags); 2329 2330 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 2331 case BNX2X_FILTER_MAC_PENDING: 2332 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, 2333 &ramrod_flags); 2334 break; 2335 case BNX2X_FILTER_VLAN_PENDING: 2336 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, 2337 &ramrod_flags); 2338 break; 2339 default: 2340 BNX2X_ERR("Unsupported classification command: %d\n", 2341 elem->message.data.eth_event.echo); 2342 return; 2343 } 2344 if (rc < 0) 2345 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 2346 else if (rc > 0) 2347 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); 2348 } 2349 2350 static 2351 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, 2352 struct bnx2x_virtf *vf) 2353 { 2354 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 2355 int rc; 2356 2357 rparam.mcast_obj = &vf->mcast_obj; 2358 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); 2359 2360 /* If there are pending mcast commands - send them */ 2361 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { 2362 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 2363 if (rc < 0) 2364 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 2365 rc); 2366 } 2367 } 2368 2369 static 2370 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, 2371 struct bnx2x_virtf *vf) 2372 { 2373 smp_mb__before_clear_bit(); 2374 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 2375 smp_mb__after_clear_bit(); 2376 } 2377 2378 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) 2379 { 2380 struct bnx2x_virtf *vf; 2381 int qidx = 0, abs_vfid; 2382 u8 opcode; 2383 u16 cid = 0xffff; 2384 2385 if (!IS_SRIOV(bp)) 2386 return 1; 2387 2388 /* first get the cid - the only events we handle here are cfc-delete 2389 * and set-mac completion 2390 */ 2391 opcode = elem->message.opcode; 2392 2393 switch (opcode) { 2394 case EVENT_RING_OPCODE_CFC_DEL: 2395 cid = SW_CID((__force __le32) 2396 elem->message.data.cfc_del_event.cid); 2397 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); 2398 break; 2399 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 2400 case EVENT_RING_OPCODE_MULTICAST_RULES: 2401 case EVENT_RING_OPCODE_FILTERS_RULES: 2402 cid = (elem->message.data.eth_event.echo & 2403 BNX2X_SWCID_MASK); 2404 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 2405 break; 2406 case EVENT_RING_OPCODE_VF_FLR: 2407 abs_vfid = elem->message.data.vf_flr_event.vf_id; 2408 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", 2409 abs_vfid); 2410 goto get_vf; 2411 case EVENT_RING_OPCODE_MALICIOUS_VF: 2412 abs_vfid = elem->message.data.malicious_vf_event.vf_id; 2413 BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n", 2414 abs_vfid, 2415 elem->message.data.malicious_vf_event.err_id); 2416 goto get_vf; 2417 default: 2418 return 1; 2419 } 2420 2421 /* check if the cid is the VF range */ 2422 if (!bnx2x_iov_is_vf_cid(bp, cid)) { 2423 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); 2424 return 1; 2425 } 2426 2427 /* extract vf and rxq index from vf_cid - relies on the following: 2428 * 1. vfid on cid reflects the true abs_vfid 2429 * 2. The max number of VFs (per path) is 64 2430 */ 2431 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); 2432 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2433 get_vf: 2434 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 2435 2436 if (!vf) { 2437 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", 2438 cid, abs_vfid); 2439 return 0; 2440 } 2441 2442 switch (opcode) { 2443 case EVENT_RING_OPCODE_CFC_DEL: 2444 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", 2445 vf->abs_vfid, qidx); 2446 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, 2447 &vfq_get(vf, 2448 qidx)->sp_obj, 2449 BNX2X_Q_CMD_CFC_DEL); 2450 break; 2451 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 2452 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", 2453 vf->abs_vfid, qidx); 2454 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); 2455 break; 2456 case EVENT_RING_OPCODE_MULTICAST_RULES: 2457 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", 2458 vf->abs_vfid, qidx); 2459 bnx2x_vf_handle_mcast_eqe(bp, vf); 2460 break; 2461 case EVENT_RING_OPCODE_FILTERS_RULES: 2462 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", 2463 vf->abs_vfid, qidx); 2464 bnx2x_vf_handle_filters_eqe(bp, vf); 2465 break; 2466 case EVENT_RING_OPCODE_VF_FLR: 2467 case EVENT_RING_OPCODE_MALICIOUS_VF: 2468 /* Do nothing for now */ 2469 return 0; 2470 } 2471 /* SRIOV: reschedule any 'in_progress' operations */ 2472 bnx2x_iov_sp_event(bp, cid, false); 2473 2474 return 0; 2475 } 2476 2477 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) 2478 { 2479 /* extract the vf from vf_cid - relies on the following: 2480 * 1. vfid on cid reflects the true abs_vfid 2481 * 2. The max number of VFs (per path) is 64 2482 */ 2483 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2484 return bnx2x_vf_by_abs_fid(bp, abs_vfid); 2485 } 2486 2487 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 2488 struct bnx2x_queue_sp_obj **q_obj) 2489 { 2490 struct bnx2x_virtf *vf; 2491 2492 if (!IS_SRIOV(bp)) 2493 return; 2494 2495 vf = bnx2x_vf_by_cid(bp, vf_cid); 2496 2497 if (vf) { 2498 /* extract queue index from vf_cid - relies on the following: 2499 * 1. vfid on cid reflects the true abs_vfid 2500 * 2. The max number of VFs (per path) is 64 2501 */ 2502 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); 2503 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); 2504 } else { 2505 BNX2X_ERR("No vf matching cid %d\n", vf_cid); 2506 } 2507 } 2508 2509 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work) 2510 { 2511 struct bnx2x_virtf *vf; 2512 2513 /* check if the cid is the VF range */ 2514 if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid)) 2515 return; 2516 2517 vf = bnx2x_vf_by_cid(bp, vf_cid); 2518 if (vf) { 2519 /* set in_progress flag */ 2520 atomic_set(&vf->op_in_progress, 1); 2521 if (queue_work) 2522 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 2523 } 2524 } 2525 2526 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) 2527 { 2528 int i; 2529 int first_queue_query_index, num_queues_req; 2530 dma_addr_t cur_data_offset; 2531 struct stats_query_entry *cur_query_entry; 2532 u8 stats_count = 0; 2533 bool is_fcoe = false; 2534 2535 if (!IS_SRIOV(bp)) 2536 return; 2537 2538 if (!NO_FCOE(bp)) 2539 is_fcoe = true; 2540 2541 /* fcoe adds one global request and one queue request */ 2542 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; 2543 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 2544 (is_fcoe ? 0 : 1); 2545 2546 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), 2547 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", 2548 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, 2549 first_queue_query_index + num_queues_req); 2550 2551 cur_data_offset = bp->fw_stats_data_mapping + 2552 offsetof(struct bnx2x_fw_stats_data, queue_stats) + 2553 num_queues_req * sizeof(struct per_queue_stats); 2554 2555 cur_query_entry = &bp->fw_stats_req-> 2556 query[first_queue_query_index + num_queues_req]; 2557 2558 for_each_vf(bp, i) { 2559 int j; 2560 struct bnx2x_virtf *vf = BP_VF(bp, i); 2561 2562 if (vf->state != VF_ENABLED) { 2563 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), 2564 "vf %d not enabled so no stats for it\n", 2565 vf->abs_vfid); 2566 continue; 2567 } 2568 2569 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); 2570 for_each_vfq(vf, j) { 2571 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 2572 2573 dma_addr_t q_stats_addr = 2574 vf->fw_stat_map + j * vf->stats_stride; 2575 2576 /* collect stats fro active queues only */ 2577 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 2578 BNX2X_Q_LOGICAL_STATE_STOPPED) 2579 continue; 2580 2581 /* create stats query entry for this queue */ 2582 cur_query_entry->kind = STATS_TYPE_QUEUE; 2583 cur_query_entry->index = vfq_stat_id(vf, rxq); 2584 cur_query_entry->funcID = 2585 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 2586 cur_query_entry->address.hi = 2587 cpu_to_le32(U64_HI(q_stats_addr)); 2588 cur_query_entry->address.lo = 2589 cpu_to_le32(U64_LO(q_stats_addr)); 2590 DP(BNX2X_MSG_IOV, 2591 "added address %x %x for vf %d queue %d client %d\n", 2592 cur_query_entry->address.hi, 2593 cur_query_entry->address.lo, cur_query_entry->funcID, 2594 j, cur_query_entry->index); 2595 cur_query_entry++; 2596 cur_data_offset += sizeof(struct per_queue_stats); 2597 stats_count++; 2598 2599 /* all stats are coalesced to the leading queue */ 2600 if (vf->cfg_flags & VF_CFG_STATS_COALESCE) 2601 break; 2602 } 2603 } 2604 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 2605 } 2606 2607 void bnx2x_iov_sp_task(struct bnx2x *bp) 2608 { 2609 int i; 2610 2611 if (!IS_SRIOV(bp)) 2612 return; 2613 /* Iterate over all VFs and invoke state transition for VFs with 2614 * 'in-progress' slow-path operations 2615 */ 2616 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_SP), 2617 "searching for pending vf operations\n"); 2618 for_each_vf(bp, i) { 2619 struct bnx2x_virtf *vf = BP_VF(bp, i); 2620 2621 if (!vf) { 2622 BNX2X_ERR("VF was null! skipping...\n"); 2623 continue; 2624 } 2625 2626 if (!list_empty(&vf->op_list_head) && 2627 atomic_read(&vf->op_in_progress)) { 2628 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); 2629 bnx2x_vfop_cur(bp, vf)->transition(bp, vf); 2630 } 2631 } 2632 } 2633 2634 static inline 2635 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) 2636 { 2637 int i; 2638 struct bnx2x_virtf *vf = NULL; 2639 2640 for_each_vf(bp, i) { 2641 vf = BP_VF(bp, i); 2642 if (stat_id >= vf->igu_base_id && 2643 stat_id < vf->igu_base_id + vf_sb_count(vf)) 2644 break; 2645 } 2646 return vf; 2647 } 2648 2649 /* VF API helpers */ 2650 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, 2651 u8 enable) 2652 { 2653 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; 2654 u32 val = enable ? (abs_vfid | (1 << 6)) : 0; 2655 2656 REG_WR(bp, reg, val); 2657 } 2658 2659 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) 2660 { 2661 int i; 2662 2663 for_each_vfq(vf, i) 2664 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2665 vfq_qzone_id(vf, vfq_get(vf, i)), false); 2666 } 2667 2668 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) 2669 { 2670 u32 val; 2671 2672 /* clear the VF configuration - pretend */ 2673 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 2674 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 2675 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | 2676 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK); 2677 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 2678 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2679 } 2680 2681 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) 2682 { 2683 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), 2684 BNX2X_VF_MAX_QUEUES); 2685 } 2686 2687 static 2688 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, 2689 struct vf_pf_resc_request *req_resc) 2690 { 2691 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2692 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2693 2694 return ((req_resc->num_rxqs <= rxq_cnt) && 2695 (req_resc->num_txqs <= txq_cnt) && 2696 (req_resc->num_sbs <= vf_sb_count(vf)) && 2697 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 2698 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); 2699 } 2700 2701 /* CORE VF API */ 2702 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 2703 struct vf_pf_resc_request *resc) 2704 { 2705 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * 2706 BNX2X_CIDS_PER_VF; 2707 2708 union cdu_context *base_cxt = (union cdu_context *) 2709 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2710 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2711 int i; 2712 2713 /* if state is 'acquired' the VF was not released or FLR'd, in 2714 * this case the returned resources match the acquired already 2715 * acquired resources. Verify that the requested numbers do 2716 * not exceed the already acquired numbers. 2717 */ 2718 if (vf->state == VF_ACQUIRED) { 2719 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", 2720 vf->abs_vfid); 2721 2722 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2723 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", 2724 vf->abs_vfid); 2725 return -EINVAL; 2726 } 2727 return 0; 2728 } 2729 2730 /* Otherwise vf state must be 'free' or 'reset' */ 2731 if (vf->state != VF_FREE && vf->state != VF_RESET) { 2732 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", 2733 vf->abs_vfid, vf->state); 2734 return -EINVAL; 2735 } 2736 2737 /* static allocation: 2738 * the global maximum number are fixed per VF. Fail the request if 2739 * requested number exceed these globals 2740 */ 2741 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2742 DP(BNX2X_MSG_IOV, 2743 "cannot fulfill vf resource request. Placing maximal available values in response\n"); 2744 /* set the max resource in the vf */ 2745 return -ENOMEM; 2746 } 2747 2748 /* Set resources counters - 0 request means max available */ 2749 vf_sb_count(vf) = resc->num_sbs; 2750 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2751 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2752 if (resc->num_mac_filters) 2753 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 2754 if (resc->num_vlan_filters) 2755 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; 2756 2757 DP(BNX2X_MSG_IOV, 2758 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2759 vf_sb_count(vf), vf_rxq_count(vf), 2760 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2761 vf_vlan_rules_cnt(vf)); 2762 2763 /* Initialize the queues */ 2764 if (!vf->vfqs) { 2765 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); 2766 return -EINVAL; 2767 } 2768 2769 for_each_vfq(vf, i) { 2770 struct bnx2x_vf_queue *q = vfq_get(vf, i); 2771 2772 if (!q) { 2773 BNX2X_ERR("q number %d was not allocated\n", i); 2774 return -EINVAL; 2775 } 2776 2777 q->index = i; 2778 q->cxt = &((base_cxt + i)->eth); 2779 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; 2780 2781 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", 2782 vf->abs_vfid, i, q->index, q->cid, q->cxt); 2783 2784 /* init SP objects */ 2785 bnx2x_vfq_init(bp, vf, q); 2786 } 2787 vf->state = VF_ACQUIRED; 2788 return 0; 2789 } 2790 2791 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) 2792 { 2793 struct bnx2x_func_init_params func_init = {0}; 2794 u16 flags = 0; 2795 int i; 2796 2797 /* the sb resources are initialized at this point, do the 2798 * FW/HW initializations 2799 */ 2800 for_each_vf_sb(vf, i) 2801 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, 2802 vf_igu_sb(vf, i), vf_igu_sb(vf, i)); 2803 2804 /* Sanity checks */ 2805 if (vf->state != VF_ACQUIRED) { 2806 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", 2807 vf->abs_vfid, vf->state); 2808 return -EINVAL; 2809 } 2810 2811 /* let FLR complete ... */ 2812 msleep(100); 2813 2814 /* FLR cleanup epilogue */ 2815 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) 2816 return -EBUSY; 2817 2818 /* reset IGU VF statistics: MSIX */ 2819 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); 2820 2821 /* vf init */ 2822 if (vf->cfg_flags & VF_CFG_STATS) 2823 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); 2824 2825 if (vf->cfg_flags & VF_CFG_TPA) 2826 flags |= FUNC_FLG_TPA; 2827 2828 if (is_vf_multi(vf)) 2829 flags |= FUNC_FLG_RSS; 2830 2831 /* function setup */ 2832 func_init.func_flgs = flags; 2833 func_init.pf_id = BP_FUNC(bp); 2834 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); 2835 func_init.fw_stat_map = vf->fw_stat_map; 2836 func_init.spq_map = vf->spq_map; 2837 func_init.spq_prod = 0; 2838 bnx2x_func_init(bp, &func_init); 2839 2840 /* Enable the vf */ 2841 bnx2x_vf_enable_access(bp, vf->abs_vfid); 2842 bnx2x_vf_enable_traffic(bp, vf); 2843 2844 /* queue protection table */ 2845 for_each_vfq(vf, i) 2846 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2847 vfq_qzone_id(vf, vfq_get(vf, i)), true); 2848 2849 vf->state = VF_ENABLED; 2850 2851 /* update vf bulletin board */ 2852 bnx2x_post_vf_bulletin(bp, vf->index); 2853 2854 return 0; 2855 } 2856 2857 struct set_vf_state_cookie { 2858 struct bnx2x_virtf *vf; 2859 u8 state; 2860 }; 2861 2862 static void bnx2x_set_vf_state(void *cookie) 2863 { 2864 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; 2865 2866 p->vf->state = p->state; 2867 } 2868 2869 /* VFOP close (teardown the queues, delete mcasts and close HW) */ 2870 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) 2871 { 2872 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2873 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; 2874 enum bnx2x_vfop_close_state state = vfop->state; 2875 struct bnx2x_vfop_cmd cmd = { 2876 .done = bnx2x_vfop_close, 2877 .block = false, 2878 }; 2879 2880 if (vfop->rc < 0) 2881 goto op_err; 2882 2883 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 2884 2885 switch (state) { 2886 case BNX2X_VFOP_CLOSE_QUEUES: 2887 2888 if (++(qx->qid) < vf_rxq_count(vf)) { 2889 vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid); 2890 if (vfop->rc) 2891 goto op_err; 2892 return; 2893 } 2894 vfop->state = BNX2X_VFOP_CLOSE_HW; 2895 vfop->rc = 0; 2896 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 2897 2898 case BNX2X_VFOP_CLOSE_HW: 2899 2900 /* disable the interrupts */ 2901 DP(BNX2X_MSG_IOV, "disabling igu\n"); 2902 bnx2x_vf_igu_disable(bp, vf); 2903 2904 /* disable the VF */ 2905 DP(BNX2X_MSG_IOV, "clearing qtbl\n"); 2906 bnx2x_vf_clr_qtbl(bp, vf); 2907 2908 goto op_done; 2909 default: 2910 bnx2x_vfop_default(state); 2911 } 2912 op_err: 2913 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); 2914 op_done: 2915 2916 /* need to make sure there are no outstanding stats ramrods which may 2917 * cause the device to access the VF's stats buffer which it will free 2918 * as soon as we return from the close flow. 2919 */ 2920 { 2921 struct set_vf_state_cookie cookie; 2922 2923 cookie.vf = vf; 2924 cookie.state = VF_ACQUIRED; 2925 bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); 2926 } 2927 2928 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2929 bnx2x_vfop_end(bp, vf, vfop); 2930 op_pending: 2931 /* Not supported at the moment; Exists for macros only */ 2932 return; 2933 } 2934 2935 int bnx2x_vfop_close_cmd(struct bnx2x *bp, 2936 struct bnx2x_virtf *vf, 2937 struct bnx2x_vfop_cmd *cmd) 2938 { 2939 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 2940 if (vfop) { 2941 vfop->args.qx.qid = -1; /* loop */ 2942 bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES, 2943 bnx2x_vfop_close, cmd->done); 2944 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close, 2945 cmd->block); 2946 } 2947 return -ENOMEM; 2948 } 2949 2950 /* VF release can be called either: 1. The VF was acquired but 2951 * not enabled 2. the vf was enabled or in the process of being 2952 * enabled 2953 */ 2954 static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) 2955 { 2956 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2957 struct bnx2x_vfop_cmd cmd = { 2958 .done = bnx2x_vfop_release, 2959 .block = false, 2960 }; 2961 2962 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2963 2964 if (vfop->rc < 0) 2965 goto op_err; 2966 2967 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, 2968 vf->state == VF_FREE ? "Free" : 2969 vf->state == VF_ACQUIRED ? "Acquired" : 2970 vf->state == VF_ENABLED ? "Enabled" : 2971 vf->state == VF_RESET ? "Reset" : 2972 "Unknown"); 2973 2974 switch (vf->state) { 2975 case VF_ENABLED: 2976 vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); 2977 if (vfop->rc) 2978 goto op_err; 2979 return; 2980 2981 case VF_ACQUIRED: 2982 DP(BNX2X_MSG_IOV, "about to free resources\n"); 2983 bnx2x_vf_free_resc(bp, vf); 2984 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2985 goto op_done; 2986 2987 case VF_FREE: 2988 case VF_RESET: 2989 /* do nothing */ 2990 goto op_done; 2991 default: 2992 bnx2x_vfop_default(vf->state); 2993 } 2994 op_err: 2995 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc); 2996 op_done: 2997 bnx2x_vfop_end(bp, vf, vfop); 2998 } 2999 3000 static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf) 3001 { 3002 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 3003 enum bnx2x_vfop_rss_state state; 3004 3005 if (!vfop) { 3006 BNX2X_ERR("vfop was null\n"); 3007 return; 3008 } 3009 3010 state = vfop->state; 3011 bnx2x_vfop_reset_wq(vf); 3012 3013 if (vfop->rc < 0) 3014 goto op_err; 3015 3016 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 3017 3018 switch (state) { 3019 case BNX2X_VFOP_RSS_CONFIG: 3020 /* next state */ 3021 vfop->state = BNX2X_VFOP_RSS_DONE; 3022 bnx2x_config_rss(bp, &vfop->op_p->rss); 3023 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 3024 op_err: 3025 BNX2X_ERR("RSS error: rc %d\n", vfop->rc); 3026 op_done: 3027 case BNX2X_VFOP_RSS_DONE: 3028 bnx2x_vfop_end(bp, vf, vfop); 3029 return; 3030 default: 3031 bnx2x_vfop_default(state); 3032 } 3033 op_pending: 3034 return; 3035 } 3036 3037 int bnx2x_vfop_release_cmd(struct bnx2x *bp, 3038 struct bnx2x_virtf *vf, 3039 struct bnx2x_vfop_cmd *cmd) 3040 { 3041 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 3042 if (vfop) { 3043 bnx2x_vfop_opset(-1, /* use vf->state */ 3044 bnx2x_vfop_release, cmd->done); 3045 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release, 3046 cmd->block); 3047 } 3048 return -ENOMEM; 3049 } 3050 3051 int bnx2x_vfop_rss_cmd(struct bnx2x *bp, 3052 struct bnx2x_virtf *vf, 3053 struct bnx2x_vfop_cmd *cmd) 3054 { 3055 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 3056 3057 if (vfop) { 3058 bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss, 3059 cmd->done); 3060 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss, 3061 cmd->block); 3062 } 3063 return -ENOMEM; 3064 } 3065 3066 /* VFOP tpa update, send update on all queues */ 3067 static void bnx2x_vfop_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf) 3068 { 3069 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 3070 struct bnx2x_vfop_args_tpa *tpa_args = &vfop->args.tpa; 3071 enum bnx2x_vfop_tpa_state state = vfop->state; 3072 3073 bnx2x_vfop_reset_wq(vf); 3074 3075 if (vfop->rc < 0) 3076 goto op_err; 3077 3078 DP(BNX2X_MSG_IOV, "vf[%d:%d] STATE: %d\n", 3079 vf->abs_vfid, tpa_args->qid, 3080 state); 3081 3082 switch (state) { 3083 case BNX2X_VFOP_TPA_CONFIG: 3084 3085 if (tpa_args->qid < vf_rxq_count(vf)) { 3086 struct bnx2x_queue_state_params *qstate = 3087 &vf->op_params.qstate; 3088 3089 qstate->q_obj = &bnx2x_vfq(vf, tpa_args->qid, sp_obj); 3090 3091 /* The only thing that changes for the ramrod params 3092 * between calls is the sge_map 3093 */ 3094 qstate->params.update_tpa.sge_map = 3095 tpa_args->sge_map[tpa_args->qid]; 3096 3097 DP(BNX2X_MSG_IOV, "sge_addr[%d] %08x:%08x\n", 3098 tpa_args->qid, 3099 U64_HI(qstate->params.update_tpa.sge_map), 3100 U64_LO(qstate->params.update_tpa.sge_map)); 3101 qstate->cmd = BNX2X_Q_CMD_UPDATE_TPA; 3102 vfop->rc = bnx2x_queue_state_change(bp, qstate); 3103 3104 tpa_args->qid++; 3105 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 3106 } 3107 vfop->state = BNX2X_VFOP_TPA_DONE; 3108 vfop->rc = 0; 3109 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 3110 op_err: 3111 BNX2X_ERR("TPA update error: rc %d\n", vfop->rc); 3112 op_done: 3113 case BNX2X_VFOP_TPA_DONE: 3114 bnx2x_vfop_end(bp, vf, vfop); 3115 return; 3116 default: 3117 bnx2x_vfop_default(state); 3118 } 3119 op_pending: 3120 return; 3121 } 3122 3123 int bnx2x_vfop_tpa_cmd(struct bnx2x *bp, 3124 struct bnx2x_virtf *vf, 3125 struct bnx2x_vfop_cmd *cmd, 3126 struct vfpf_tpa_tlv *tpa_tlv) 3127 { 3128 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 3129 3130 if (vfop) { 3131 vfop->args.qx.qid = 0; /* loop */ 3132 memcpy(&vfop->args.tpa.sge_map, 3133 tpa_tlv->tpa_client_info.sge_addr, 3134 sizeof(vfop->args.tpa.sge_map)); 3135 bnx2x_vfop_opset(BNX2X_VFOP_TPA_CONFIG, 3136 bnx2x_vfop_tpa, cmd->done); 3137 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_tpa, 3138 cmd->block); 3139 } 3140 return -ENOMEM; 3141 } 3142 3143 /* VF release ~ VF close + VF release-resources 3144 * Release is the ultimate SW shutdown and is called whenever an 3145 * irrecoverable error is encountered. 3146 */ 3147 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) 3148 { 3149 struct bnx2x_vfop_cmd cmd = { 3150 .done = NULL, 3151 .block = block, 3152 }; 3153 int rc; 3154 3155 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); 3156 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 3157 3158 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); 3159 if (rc) 3160 WARN(rc, 3161 "VF[%d] Failed to allocate resources for release op- rc=%d\n", 3162 vf->abs_vfid, rc); 3163 } 3164 3165 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, 3166 struct bnx2x_virtf *vf, u32 *sbdf) 3167 { 3168 *sbdf = vf->devfn | (vf->bus << 8); 3169 } 3170 3171 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3172 enum channel_tlvs tlv) 3173 { 3174 /* we don't lock the channel for unsupported tlvs */ 3175 if (!bnx2x_tlv_supported(tlv)) { 3176 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n"); 3177 return; 3178 } 3179 3180 /* lock the channel */ 3181 mutex_lock(&vf->op_mutex); 3182 3183 /* record the locking op */ 3184 vf->op_current = tlv; 3185 3186 /* log the lock */ 3187 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", 3188 vf->abs_vfid, tlv); 3189 } 3190 3191 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3192 enum channel_tlvs expected_tlv) 3193 { 3194 enum channel_tlvs current_tlv; 3195 3196 if (!vf) { 3197 BNX2X_ERR("VF was %p\n", vf); 3198 return; 3199 } 3200 3201 current_tlv = vf->op_current; 3202 3203 /* we don't unlock the channel for unsupported tlvs */ 3204 if (!bnx2x_tlv_supported(expected_tlv)) 3205 return; 3206 3207 WARN(expected_tlv != vf->op_current, 3208 "lock mismatch: expected %d found %d", expected_tlv, 3209 vf->op_current); 3210 3211 /* record the locking op */ 3212 vf->op_current = CHANNEL_TLV_NONE; 3213 3214 /* lock the channel */ 3215 mutex_unlock(&vf->op_mutex); 3216 3217 /* log the unlock */ 3218 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 3219 vf->abs_vfid, vf->op_current); 3220 } 3221 3222 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) 3223 { 3224 struct bnx2x_queue_state_params q_params; 3225 u32 prev_flags; 3226 int i, rc; 3227 3228 /* Verify changes are needed and record current Tx switching state */ 3229 prev_flags = bp->flags; 3230 if (enable) 3231 bp->flags |= TX_SWITCHING; 3232 else 3233 bp->flags &= ~TX_SWITCHING; 3234 if (prev_flags == bp->flags) 3235 return 0; 3236 3237 /* Verify state enables the sending of queue ramrods */ 3238 if ((bp->state != BNX2X_STATE_OPEN) || 3239 (bnx2x_get_q_logical_state(bp, 3240 &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) != 3241 BNX2X_Q_LOGICAL_STATE_ACTIVE)) 3242 return 0; 3243 3244 /* send q. update ramrod to configure Tx switching */ 3245 memset(&q_params, 0, sizeof(q_params)); 3246 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3247 q_params.cmd = BNX2X_Q_CMD_UPDATE; 3248 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, 3249 &q_params.params.update.update_flags); 3250 if (enable) 3251 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING, 3252 &q_params.params.update.update_flags); 3253 else 3254 __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING, 3255 &q_params.params.update.update_flags); 3256 3257 /* send the ramrod on all the queues of the PF */ 3258 for_each_eth_queue(bp, i) { 3259 struct bnx2x_fastpath *fp = &bp->fp[i]; 3260 3261 /* Set the appropriate Queue object */ 3262 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 3263 3264 /* Update the Queue state */ 3265 rc = bnx2x_queue_state_change(bp, &q_params); 3266 if (rc) { 3267 BNX2X_ERR("Failed to configure Tx switching\n"); 3268 return rc; 3269 } 3270 } 3271 3272 DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled"); 3273 return 0; 3274 } 3275 3276 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) 3277 { 3278 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 3279 3280 if (!IS_SRIOV(bp)) { 3281 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n"); 3282 return -EINVAL; 3283 } 3284 3285 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 3286 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3287 3288 /* HW channel is only operational when PF is up */ 3289 if (bp->state != BNX2X_STATE_OPEN) { 3290 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n"); 3291 return -EINVAL; 3292 } 3293 3294 /* we are always bound by the total_vfs in the configuration space */ 3295 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { 3296 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n", 3297 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3298 num_vfs_param = BNX2X_NR_VIRTFN(bp); 3299 } 3300 3301 bp->requested_nr_virtfn = num_vfs_param; 3302 if (num_vfs_param == 0) { 3303 bnx2x_set_pf_tx_switching(bp, false); 3304 pci_disable_sriov(dev); 3305 return 0; 3306 } else { 3307 return bnx2x_enable_sriov(bp); 3308 } 3309 } 3310 3311 #define IGU_ENTRY_SIZE 4 3312 3313 int bnx2x_enable_sriov(struct bnx2x *bp) 3314 { 3315 int rc = 0, req_vfs = bp->requested_nr_virtfn; 3316 int vf_idx, sb_idx, vfq_idx, qcount, first_vf; 3317 u32 igu_entry, address; 3318 u16 num_vf_queues; 3319 3320 if (req_vfs == 0) 3321 return 0; 3322 3323 first_vf = bp->vfdb->sriov.first_vf_in_pf; 3324 3325 /* statically distribute vf sb pool between VFs */ 3326 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES, 3327 BP_VFDB(bp)->vf_sbs_pool / req_vfs); 3328 3329 /* zero previous values learned from igu cam */ 3330 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) { 3331 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 3332 3333 vf->sb_count = 0; 3334 vf_sb_count(BP_VF(bp, vf_idx)) = 0; 3335 } 3336 bp->vfdb->vf_sbs_pool = 0; 3337 3338 /* prepare IGU cam */ 3339 sb_idx = BP_VFDB(bp)->first_vf_igu_entry; 3340 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE; 3341 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 3342 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) { 3343 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT | 3344 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT | 3345 IGU_REG_MAPPING_MEMORY_VALID; 3346 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n", 3347 sb_idx, vf_idx); 3348 REG_WR(bp, address, igu_entry); 3349 sb_idx++; 3350 address += IGU_ENTRY_SIZE; 3351 } 3352 } 3353 3354 /* Reinitialize vf database according to igu cam */ 3355 bnx2x_get_vf_igu_cam_info(bp); 3356 3357 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n", 3358 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); 3359 3360 qcount = 0; 3361 for_each_vf(bp, vf_idx) { 3362 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 3363 3364 /* set local queue arrays */ 3365 vf->vfqs = &bp->vfdb->vfqs[qcount]; 3366 qcount += vf_sb_count(vf); 3367 bnx2x_iov_static_resc(bp, vf); 3368 } 3369 3370 /* prepare msix vectors in VF configuration space - the value in the 3371 * PCI configuration space should be the index of the last entry, 3372 * namely one less than the actual size of the table 3373 */ 3374 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 3375 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 3376 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 3377 num_vf_queues - 1); 3378 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", 3379 vf_idx, num_vf_queues - 1); 3380 } 3381 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 3382 3383 /* enable sriov. This will probe all the VFs, and consequentially cause 3384 * the "acquire" messages to appear on the VF PF channel. 3385 */ 3386 DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); 3387 bnx2x_disable_sriov(bp); 3388 3389 rc = bnx2x_set_pf_tx_switching(bp, true); 3390 if (rc) 3391 return rc; 3392 3393 rc = pci_enable_sriov(bp->pdev, req_vfs); 3394 if (rc) { 3395 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 3396 return rc; 3397 } 3398 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); 3399 return req_vfs; 3400 } 3401 3402 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) 3403 { 3404 int vfidx; 3405 struct pf_vf_bulletin_content *bulletin; 3406 3407 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); 3408 for_each_vf(bp, vfidx) { 3409 bulletin = BP_VF_BULLETIN(bp, vfidx); 3410 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) 3411 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); 3412 } 3413 } 3414 3415 void bnx2x_disable_sriov(struct bnx2x *bp) 3416 { 3417 pci_disable_sriov(bp->pdev); 3418 } 3419 3420 static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, 3421 struct bnx2x_virtf **vf, 3422 struct pf_vf_bulletin_content **bulletin) 3423 { 3424 if (bp->state != BNX2X_STATE_OPEN) { 3425 BNX2X_ERR("vf ndo called though PF is down\n"); 3426 return -EINVAL; 3427 } 3428 3429 if (!IS_SRIOV(bp)) { 3430 BNX2X_ERR("vf ndo called though sriov is disabled\n"); 3431 return -EINVAL; 3432 } 3433 3434 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { 3435 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n", 3436 vfidx, BNX2X_NR_VIRTFN(bp)); 3437 return -EINVAL; 3438 } 3439 3440 /* init members */ 3441 *vf = BP_VF(bp, vfidx); 3442 *bulletin = BP_VF_BULLETIN(bp, vfidx); 3443 3444 if (!*vf) { 3445 BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n", 3446 vfidx); 3447 return -EINVAL; 3448 } 3449 3450 if (!(*vf)->vfqs) { 3451 BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n", 3452 vfidx); 3453 return -EINVAL; 3454 } 3455 3456 if (!*bulletin) { 3457 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n", 3458 vfidx); 3459 return -EINVAL; 3460 } 3461 3462 return 0; 3463 } 3464 3465 int bnx2x_get_vf_config(struct net_device *dev, int vfidx, 3466 struct ifla_vf_info *ivi) 3467 { 3468 struct bnx2x *bp = netdev_priv(dev); 3469 struct bnx2x_virtf *vf = NULL; 3470 struct pf_vf_bulletin_content *bulletin = NULL; 3471 struct bnx2x_vlan_mac_obj *mac_obj; 3472 struct bnx2x_vlan_mac_obj *vlan_obj; 3473 int rc; 3474 3475 /* sanity and init */ 3476 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3477 if (rc) 3478 return rc; 3479 mac_obj = &bnx2x_leading_vfq(vf, mac_obj); 3480 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 3481 if (!mac_obj || !vlan_obj) { 3482 BNX2X_ERR("VF partially initialized\n"); 3483 return -EINVAL; 3484 } 3485 3486 ivi->vf = vfidx; 3487 ivi->qos = 0; 3488 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */ 3489 ivi->spoofchk = 1; /*always enabled */ 3490 if (vf->state == VF_ENABLED) { 3491 /* mac and vlan are in vlan_mac objects */ 3492 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) { 3493 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 3494 0, ETH_ALEN); 3495 vlan_obj->get_n_elements(bp, vlan_obj, 1, 3496 (u8 *)&ivi->vlan, 0, 3497 VLAN_HLEN); 3498 } 3499 } else { 3500 /* mac */ 3501 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 3502 /* mac configured by ndo so its in bulletin board */ 3503 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); 3504 else 3505 /* function has not been loaded yet. Show mac as 0s */ 3506 memset(&ivi->mac, 0, ETH_ALEN); 3507 3508 /* vlan */ 3509 if (bulletin->valid_bitmap & (1 << VLAN_VALID)) 3510 /* vlan configured by ndo so its in bulletin board */ 3511 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); 3512 else 3513 /* function has not been loaded yet. Show vlans as 0s */ 3514 memset(&ivi->vlan, 0, VLAN_HLEN); 3515 } 3516 3517 return 0; 3518 } 3519 3520 /* New mac for VF. Consider these cases: 3521 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and 3522 * supply at acquire. 3523 * 2. VF has already been acquired but has not yet initialized - store in local 3524 * bulletin board. mac will be posted on VF bulletin board after VF init. VF 3525 * will configure this mac when it is ready. 3526 * 3. VF has already initialized but has not yet setup a queue - post the new 3527 * mac on VF's bulletin board right now. VF will configure this mac when it 3528 * is ready. 3529 * 4. VF has already set a queue - delete any macs already configured for this 3530 * queue and manually config the new mac. 3531 * In any event, once this function has been called refuse any attempts by the 3532 * VF to configure any mac for itself except for this mac. In case of a race 3533 * where the VF fails to see the new post on its bulletin board before sending a 3534 * mac configuration request, the PF will simply fail the request and VF can try 3535 * again after consulting its bulletin board. 3536 */ 3537 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) 3538 { 3539 struct bnx2x *bp = netdev_priv(dev); 3540 int rc, q_logical_state; 3541 struct bnx2x_virtf *vf = NULL; 3542 struct pf_vf_bulletin_content *bulletin = NULL; 3543 3544 /* sanity and init */ 3545 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3546 if (rc) 3547 return rc; 3548 if (!is_valid_ether_addr(mac)) { 3549 BNX2X_ERR("mac address invalid\n"); 3550 return -EINVAL; 3551 } 3552 3553 /* update PF's copy of the VF's bulletin. Will no longer accept mac 3554 * configuration requests from vf unless match this mac 3555 */ 3556 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; 3557 memcpy(bulletin->mac, mac, ETH_ALEN); 3558 3559 /* Post update on VF's bulletin board */ 3560 rc = bnx2x_post_vf_bulletin(bp, vfidx); 3561 if (rc) { 3562 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); 3563 return rc; 3564 } 3565 3566 q_logical_state = 3567 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); 3568 if (vf->state == VF_ENABLED && 3569 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3570 /* configure the mac in device on this vf's queue */ 3571 unsigned long ramrod_flags = 0; 3572 struct bnx2x_vlan_mac_obj *mac_obj; 3573 3574 /* User should be able to see failure reason in system logs */ 3575 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 3576 return -EINVAL; 3577 3578 /* must lock vfpf channel to protect against vf flows */ 3579 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3580 3581 /* remove existing eth macs */ 3582 mac_obj = &bnx2x_leading_vfq(vf, mac_obj); 3583 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 3584 if (rc) { 3585 BNX2X_ERR("failed to delete eth macs\n"); 3586 rc = -EINVAL; 3587 goto out; 3588 } 3589 3590 /* remove existing uc list macs */ 3591 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); 3592 if (rc) { 3593 BNX2X_ERR("failed to delete uc_list macs\n"); 3594 rc = -EINVAL; 3595 goto out; 3596 } 3597 3598 /* configure the new mac to device */ 3599 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3600 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 3601 BNX2X_ETH_MAC, &ramrod_flags); 3602 3603 out: 3604 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3605 } 3606 3607 return 0; 3608 } 3609 3610 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 3611 { 3612 struct bnx2x_queue_state_params q_params = {NULL}; 3613 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 3614 struct bnx2x_queue_update_params *update_params; 3615 struct pf_vf_bulletin_content *bulletin = NULL; 3616 struct bnx2x_rx_mode_ramrod_params rx_ramrod; 3617 struct bnx2x *bp = netdev_priv(dev); 3618 struct bnx2x_vlan_mac_obj *vlan_obj; 3619 unsigned long vlan_mac_flags = 0; 3620 unsigned long ramrod_flags = 0; 3621 struct bnx2x_virtf *vf = NULL; 3622 unsigned long accept_flags; 3623 int rc; 3624 3625 /* sanity and init */ 3626 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3627 if (rc) 3628 return rc; 3629 3630 if (vlan > 4095) { 3631 BNX2X_ERR("illegal vlan value %d\n", vlan); 3632 return -EINVAL; 3633 } 3634 3635 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", 3636 vfidx, vlan, 0); 3637 3638 /* update PF's copy of the VF's bulletin. No point in posting the vlan 3639 * to the VF since it doesn't have anything to do with it. But it useful 3640 * to store it here in case the VF is not up yet and we can only 3641 * configure the vlan later when it does. Treat vlan id 0 as remove the 3642 * Host tag. 3643 */ 3644 if (vlan > 0) 3645 bulletin->valid_bitmap |= 1 << VLAN_VALID; 3646 else 3647 bulletin->valid_bitmap &= ~(1 << VLAN_VALID); 3648 bulletin->vlan = vlan; 3649 3650 /* is vf initialized and queue set up? */ 3651 if (vf->state != VF_ENABLED || 3652 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != 3653 BNX2X_Q_LOGICAL_STATE_ACTIVE) 3654 return rc; 3655 3656 /* User should be able to see error in system logs */ 3657 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 3658 return -EINVAL; 3659 3660 /* must lock vfpf channel to protect against vf flows */ 3661 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3662 3663 /* remove existing vlans */ 3664 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3665 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 3666 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 3667 &ramrod_flags); 3668 if (rc) { 3669 BNX2X_ERR("failed to delete vlans\n"); 3670 rc = -EINVAL; 3671 goto out; 3672 } 3673 3674 /* need to remove/add the VF's accept_any_vlan bit */ 3675 accept_flags = bnx2x_leading_vfq(vf, accept_flags); 3676 if (vlan) 3677 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 3678 else 3679 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 3680 3681 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, 3682 accept_flags); 3683 bnx2x_leading_vfq(vf, accept_flags) = accept_flags; 3684 bnx2x_config_rx_mode(bp, &rx_ramrod); 3685 3686 /* configure the new vlan to device */ 3687 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3688 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3689 ramrod_param.vlan_mac_obj = vlan_obj; 3690 ramrod_param.ramrod_flags = ramrod_flags; 3691 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 3692 &ramrod_param.user_req.vlan_mac_flags); 3693 ramrod_param.user_req.u.vlan.vlan = vlan; 3694 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 3695 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 3696 if (rc) { 3697 BNX2X_ERR("failed to configure vlan\n"); 3698 rc = -EINVAL; 3699 goto out; 3700 } 3701 3702 /* send queue update ramrod to configure default vlan and silent 3703 * vlan removal 3704 */ 3705 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3706 q_params.cmd = BNX2X_Q_CMD_UPDATE; 3707 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); 3708 update_params = &q_params.params.update; 3709 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 3710 &update_params->update_flags); 3711 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 3712 &update_params->update_flags); 3713 if (vlan == 0) { 3714 /* if vlan is 0 then we want to leave the VF traffic 3715 * untagged, and leave the incoming traffic untouched 3716 * (i.e. do not remove any vlan tags). 3717 */ 3718 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3719 &update_params->update_flags); 3720 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 3721 &update_params->update_flags); 3722 } else { 3723 /* configure default vlan to vf queue and set silent 3724 * vlan removal (the vf remains unaware of this vlan). 3725 */ 3726 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3727 &update_params->update_flags); 3728 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 3729 &update_params->update_flags); 3730 update_params->def_vlan = vlan; 3731 update_params->silent_removal_value = 3732 vlan & VLAN_VID_MASK; 3733 update_params->silent_removal_mask = VLAN_VID_MASK; 3734 } 3735 3736 /* Update the Queue state */ 3737 rc = bnx2x_queue_state_change(bp, &q_params); 3738 if (rc) { 3739 BNX2X_ERR("Failed to configure default VLAN\n"); 3740 goto out; 3741 } 3742 3743 3744 /* clear the flag indicating that this VF needs its vlan 3745 * (will only be set if the HV configured the Vlan before vf was 3746 * up and we were called because the VF came up later 3747 */ 3748 out: 3749 vf->cfg_flags &= ~VF_CFG_VLAN; 3750 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3751 3752 return rc; 3753 } 3754 3755 /* crc is the first field in the bulletin board. Compute the crc over the 3756 * entire bulletin board excluding the crc field itself. Use the length field 3757 * as the Bulletin Board was posted by a PF with possibly a different version 3758 * from the vf which will sample it. Therefore, the length is computed by the 3759 * PF and the used blindly by the VF. 3760 */ 3761 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, 3762 struct pf_vf_bulletin_content *bulletin) 3763 { 3764 return crc32(BULLETIN_CRC_SEED, 3765 ((u8 *)bulletin) + sizeof(bulletin->crc), 3766 bulletin->length - sizeof(bulletin->crc)); 3767 } 3768 3769 /* Check for new posts on the bulletin board */ 3770 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) 3771 { 3772 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; 3773 int attempts; 3774 3775 /* bulletin board hasn't changed since last sample */ 3776 if (bp->old_bulletin.version == bulletin.version) 3777 return PFVF_BULLETIN_UNCHANGED; 3778 3779 /* validate crc of new bulletin board */ 3780 if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) { 3781 /* sampling structure in mid post may result with corrupted data 3782 * validate crc to ensure coherency. 3783 */ 3784 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { 3785 bulletin = bp->pf2vf_bulletin->content; 3786 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, 3787 &bulletin)) 3788 break; 3789 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", 3790 bulletin.crc, 3791 bnx2x_crc_vf_bulletin(bp, &bulletin)); 3792 } 3793 if (attempts >= BULLETIN_ATTEMPTS) { 3794 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", 3795 attempts); 3796 return PFVF_BULLETIN_CRC_ERR; 3797 } 3798 } 3799 3800 /* the mac address in bulletin board is valid and is new */ 3801 if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID && 3802 !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) { 3803 /* update new mac to net device */ 3804 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); 3805 } 3806 3807 /* the vlan in bulletin board is valid and is new */ 3808 if (bulletin.valid_bitmap & 1 << VLAN_VALID) 3809 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN); 3810 3811 /* copy new bulletin board to bp */ 3812 bp->old_bulletin = bulletin; 3813 3814 return PFVF_BULLETIN_UPDATED; 3815 } 3816 3817 void bnx2x_timer_sriov(struct bnx2x *bp) 3818 { 3819 bnx2x_sample_bulletin(bp); 3820 3821 /* if channel is down we need to self destruct */ 3822 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) 3823 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 3824 BNX2X_MSG_IOV); 3825 } 3826 3827 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 3828 { 3829 /* vf doorbells are embedded within the regview */ 3830 return bp->regview + PXP_VF_ADDR_DB_START; 3831 } 3832 3833 int bnx2x_vf_pci_alloc(struct bnx2x *bp) 3834 { 3835 mutex_init(&bp->vf2pf_mutex); 3836 3837 /* allocate vf2pf mailbox for vf to pf channel */ 3838 bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping, 3839 sizeof(struct bnx2x_vf_mbx_msg)); 3840 if (!bp->vf2pf_mbox) 3841 goto alloc_mem_err; 3842 3843 /* allocate pf 2 vf bulletin board */ 3844 bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping, 3845 sizeof(union pf_vf_bulletin)); 3846 if (!bp->pf2vf_bulletin) 3847 goto alloc_mem_err; 3848 3849 return 0; 3850 3851 alloc_mem_err: 3852 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3853 sizeof(struct bnx2x_vf_mbx_msg)); 3854 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, 3855 sizeof(union pf_vf_bulletin)); 3856 return -ENOMEM; 3857 } 3858 3859 void bnx2x_iov_channel_down(struct bnx2x *bp) 3860 { 3861 int vf_idx; 3862 struct pf_vf_bulletin_content *bulletin; 3863 3864 if (!IS_SRIOV(bp)) 3865 return; 3866 3867 for_each_vf(bp, vf_idx) { 3868 /* locate this VFs bulletin board and update the channel down 3869 * bit 3870 */ 3871 bulletin = BP_VF_BULLETIN(bp, vf_idx); 3872 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN; 3873 3874 /* update vf bulletin board */ 3875 bnx2x_post_vf_bulletin(bp, vf_idx); 3876 } 3877 } 3878