1 /* bnx2x_sriov.c: Broadcom Everest network driver. 2 * 3 * Copyright 2009-2013 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 17 * Ariel Elior <ariele@broadcom.com> 18 * 19 */ 20 #include "bnx2x.h" 21 #include "bnx2x_init.h" 22 #include "bnx2x_cmn.h" 23 #include "bnx2x_sp.h" 24 #include <linux/crc32.h> 25 #include <linux/if_vlan.h> 26 27 /* General service functions */ 28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 29 u16 pf_id) 30 { 31 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 32 pf_id); 33 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 34 pf_id); 35 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 36 pf_id); 37 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 38 pf_id); 39 } 40 41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 42 u8 enable) 43 { 44 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 45 enable); 46 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 47 enable); 48 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 49 enable); 50 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 51 enable); 52 } 53 54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 55 { 56 int idx; 57 58 for_each_vf(bp, idx) 59 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) 60 break; 61 return idx; 62 } 63 64 static 65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 66 { 67 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); 68 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; 69 } 70 71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, 72 u8 igu_sb_id, u8 segment, u16 index, u8 op, 73 u8 update) 74 { 75 /* acking a VF sb through the PF - use the GRC */ 76 u32 ctl; 77 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 78 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 79 u32 func_encode = vf->abs_vfid; 80 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; 81 struct igu_regular cmd_data = {0}; 82 83 cmd_data.sb_id_and_flags = 84 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 85 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 86 (update << IGU_REGULAR_BUPDATE_SHIFT) | 87 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 88 89 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 90 func_encode << IGU_CTRL_REG_FID_SHIFT | 91 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 92 93 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 94 cmd_data.sb_id_and_flags, igu_addr_data); 95 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); 96 mmiowb(); 97 barrier(); 98 99 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 100 ctl, igu_addr_ctl); 101 REG_WR(bp, igu_addr_ctl, ctl); 102 mmiowb(); 103 barrier(); 104 } 105 /* VFOP - VF slow-path operation support */ 106 107 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000 108 109 /* VFOP operations states */ 110 enum bnx2x_vfop_qctor_state { 111 BNX2X_VFOP_QCTOR_INIT, 112 BNX2X_VFOP_QCTOR_SETUP, 113 BNX2X_VFOP_QCTOR_INT_EN 114 }; 115 116 enum bnx2x_vfop_qdtor_state { 117 BNX2X_VFOP_QDTOR_HALT, 118 BNX2X_VFOP_QDTOR_TERMINATE, 119 BNX2X_VFOP_QDTOR_CFCDEL, 120 BNX2X_VFOP_QDTOR_DONE 121 }; 122 123 enum bnx2x_vfop_vlan_mac_state { 124 BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 125 BNX2X_VFOP_VLAN_MAC_CLEAR, 126 BNX2X_VFOP_VLAN_MAC_CHK_DONE, 127 BNX2X_VFOP_MAC_CONFIG_LIST, 128 BNX2X_VFOP_VLAN_CONFIG_LIST, 129 BNX2X_VFOP_VLAN_CONFIG_LIST_0 130 }; 131 132 enum bnx2x_vfop_qsetup_state { 133 BNX2X_VFOP_QSETUP_CTOR, 134 BNX2X_VFOP_QSETUP_VLAN0, 135 BNX2X_VFOP_QSETUP_DONE 136 }; 137 138 enum bnx2x_vfop_mcast_state { 139 BNX2X_VFOP_MCAST_DEL, 140 BNX2X_VFOP_MCAST_ADD, 141 BNX2X_VFOP_MCAST_CHK_DONE 142 }; 143 enum bnx2x_vfop_qflr_state { 144 BNX2X_VFOP_QFLR_CLR_VLAN, 145 BNX2X_VFOP_QFLR_CLR_MAC, 146 BNX2X_VFOP_QFLR_TERMINATE, 147 BNX2X_VFOP_QFLR_DONE 148 }; 149 150 enum bnx2x_vfop_flr_state { 151 BNX2X_VFOP_FLR_QUEUES, 152 BNX2X_VFOP_FLR_HW 153 }; 154 155 enum bnx2x_vfop_close_state { 156 BNX2X_VFOP_CLOSE_QUEUES, 157 BNX2X_VFOP_CLOSE_HW 158 }; 159 160 enum bnx2x_vfop_rxmode_state { 161 BNX2X_VFOP_RXMODE_CONFIG, 162 BNX2X_VFOP_RXMODE_DONE 163 }; 164 165 enum bnx2x_vfop_qteardown_state { 166 BNX2X_VFOP_QTEARDOWN_RXMODE, 167 BNX2X_VFOP_QTEARDOWN_CLR_VLAN, 168 BNX2X_VFOP_QTEARDOWN_CLR_MAC, 169 BNX2X_VFOP_QTEARDOWN_QDTOR, 170 BNX2X_VFOP_QTEARDOWN_DONE 171 }; 172 173 enum bnx2x_vfop_rss_state { 174 BNX2X_VFOP_RSS_CONFIG, 175 BNX2X_VFOP_RSS_DONE 176 }; 177 178 #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) 179 180 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 181 struct bnx2x_queue_init_params *init_params, 182 struct bnx2x_queue_setup_params *setup_params, 183 u16 q_idx, u16 sb_idx) 184 { 185 DP(BNX2X_MSG_IOV, 186 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", 187 vf->abs_vfid, 188 q_idx, 189 sb_idx, 190 init_params->tx.sb_cq_index, 191 init_params->tx.hc_rate, 192 setup_params->flags, 193 setup_params->txq_params.traffic_type); 194 } 195 196 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 197 struct bnx2x_queue_init_params *init_params, 198 struct bnx2x_queue_setup_params *setup_params, 199 u16 q_idx, u16 sb_idx) 200 { 201 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; 202 203 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" 204 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", 205 vf->abs_vfid, 206 q_idx, 207 sb_idx, 208 init_params->rx.sb_cq_index, 209 init_params->rx.hc_rate, 210 setup_params->gen_params.mtu, 211 rxq_params->buf_sz, 212 rxq_params->sge_buf_sz, 213 rxq_params->max_sges_pkt, 214 rxq_params->tpa_agg_sz, 215 setup_params->flags, 216 rxq_params->drop_flags, 217 rxq_params->cache_line_log); 218 } 219 220 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 221 struct bnx2x_virtf *vf, 222 struct bnx2x_vf_queue *q, 223 struct bnx2x_vfop_qctor_params *p, 224 unsigned long q_type) 225 { 226 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; 227 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; 228 229 /* INIT */ 230 231 /* Enable host coalescing in the transition to INIT state */ 232 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) 233 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); 234 235 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) 236 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); 237 238 /* FW SB ID */ 239 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 240 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 241 242 /* context */ 243 init_p->cxts[0] = q->cxt; 244 245 /* SETUP */ 246 247 /* Setup-op general parameters */ 248 setup_p->gen_params.spcl_id = vf->sp_cl_id; 249 setup_p->gen_params.stat_id = vfq_stat_id(vf, q); 250 251 /* Setup-op pause params: 252 * Nothing to do, the pause thresholds are set by default to 0 which 253 * effectively turns off the feature for this queue. We don't want 254 * one queue (VF) to interfering with another queue (another VF) 255 */ 256 if (vf->cfg_flags & VF_CFG_FW_FC) 257 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", 258 vf->abs_vfid); 259 /* Setup-op flags: 260 * collect statistics, zero statistics, local-switching, security, 261 * OV for Flex10, RSS and MCAST for leading 262 */ 263 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) 264 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); 265 266 /* for VFs, enable tx switching, bd coherency, and mac address 267 * anti-spoofing 268 */ 269 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); 270 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 271 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 272 273 /* Setup-op rx parameters */ 274 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 275 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 276 277 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); 278 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); 279 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); 280 281 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) 282 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; 283 } 284 285 /* Setup-op tx parameters */ 286 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { 287 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; 288 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 289 } 290 } 291 292 /* VFOP queue construction */ 293 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf) 294 { 295 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 296 struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor; 297 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 298 enum bnx2x_vfop_qctor_state state = vfop->state; 299 300 bnx2x_vfop_reset_wq(vf); 301 302 if (vfop->rc < 0) 303 goto op_err; 304 305 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 306 307 switch (state) { 308 case BNX2X_VFOP_QCTOR_INIT: 309 310 /* has this queue already been opened? */ 311 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 312 BNX2X_Q_LOGICAL_STATE_ACTIVE) { 313 DP(BNX2X_MSG_IOV, 314 "Entered qctor but queue was already up. Aborting gracefully\n"); 315 goto op_done; 316 } 317 318 /* next state */ 319 vfop->state = BNX2X_VFOP_QCTOR_SETUP; 320 321 q_params->cmd = BNX2X_Q_CMD_INIT; 322 vfop->rc = bnx2x_queue_state_change(bp, q_params); 323 324 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 325 326 case BNX2X_VFOP_QCTOR_SETUP: 327 /* next state */ 328 vfop->state = BNX2X_VFOP_QCTOR_INT_EN; 329 330 /* copy pre-prepared setup params to the queue-state params */ 331 vfop->op_p->qctor.qstate.params.setup = 332 vfop->op_p->qctor.prep_qsetup; 333 334 q_params->cmd = BNX2X_Q_CMD_SETUP; 335 vfop->rc = bnx2x_queue_state_change(bp, q_params); 336 337 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 338 339 case BNX2X_VFOP_QCTOR_INT_EN: 340 341 /* enable interrupts */ 342 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx), 343 USTORM_ID, 0, IGU_INT_ENABLE, 0); 344 goto op_done; 345 default: 346 bnx2x_vfop_default(state); 347 } 348 op_err: 349 BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n", 350 vf->abs_vfid, args->qid, q_params->cmd, vfop->rc); 351 op_done: 352 bnx2x_vfop_end(bp, vf, vfop); 353 op_pending: 354 return; 355 } 356 357 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp, 358 struct bnx2x_virtf *vf, 359 struct bnx2x_vfop_cmd *cmd, 360 int qid) 361 { 362 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 363 364 if (vfop) { 365 vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 366 367 vfop->args.qctor.qid = qid; 368 vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx); 369 370 bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT, 371 bnx2x_vfop_qctor, cmd->done); 372 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor, 373 cmd->block); 374 } 375 return -ENOMEM; 376 } 377 378 /* VFOP queue destruction */ 379 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) 380 { 381 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 382 struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor; 383 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 384 enum bnx2x_vfop_qdtor_state state = vfop->state; 385 386 bnx2x_vfop_reset_wq(vf); 387 388 if (vfop->rc < 0) 389 goto op_err; 390 391 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 392 393 switch (state) { 394 case BNX2X_VFOP_QDTOR_HALT: 395 396 /* has this queue already been stopped? */ 397 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 398 BNX2X_Q_LOGICAL_STATE_STOPPED) { 399 DP(BNX2X_MSG_IOV, 400 "Entered qdtor but queue was already stopped. Aborting gracefully\n"); 401 402 /* next state */ 403 vfop->state = BNX2X_VFOP_QDTOR_DONE; 404 405 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 406 } 407 408 /* next state */ 409 vfop->state = BNX2X_VFOP_QDTOR_TERMINATE; 410 411 q_params->cmd = BNX2X_Q_CMD_HALT; 412 vfop->rc = bnx2x_queue_state_change(bp, q_params); 413 414 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 415 416 case BNX2X_VFOP_QDTOR_TERMINATE: 417 /* next state */ 418 vfop->state = BNX2X_VFOP_QDTOR_CFCDEL; 419 420 q_params->cmd = BNX2X_Q_CMD_TERMINATE; 421 vfop->rc = bnx2x_queue_state_change(bp, q_params); 422 423 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 424 425 case BNX2X_VFOP_QDTOR_CFCDEL: 426 /* next state */ 427 vfop->state = BNX2X_VFOP_QDTOR_DONE; 428 429 q_params->cmd = BNX2X_Q_CMD_CFC_DEL; 430 vfop->rc = bnx2x_queue_state_change(bp, q_params); 431 432 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 433 op_err: 434 BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n", 435 vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc); 436 op_done: 437 case BNX2X_VFOP_QDTOR_DONE: 438 /* invalidate the context */ 439 if (qdtor->cxt) { 440 qdtor->cxt->ustorm_ag_context.cdu_usage = 0; 441 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; 442 } 443 bnx2x_vfop_end(bp, vf, vfop); 444 return; 445 default: 446 bnx2x_vfop_default(state); 447 } 448 op_pending: 449 return; 450 } 451 452 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, 453 struct bnx2x_virtf *vf, 454 struct bnx2x_vfop_cmd *cmd, 455 int qid) 456 { 457 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 458 459 if (vfop) { 460 struct bnx2x_queue_state_params *qstate = 461 &vf->op_params.qctor.qstate; 462 463 memset(qstate, 0, sizeof(*qstate)); 464 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 465 466 vfop->args.qdtor.qid = qid; 467 vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt); 468 469 bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT, 470 bnx2x_vfop_qdtor, cmd->done); 471 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, 472 cmd->block); 473 } else { 474 BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid); 475 return -ENOMEM; 476 } 477 } 478 479 static void 480 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) 481 { 482 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 483 if (vf) { 484 /* the first igu entry belonging to VFs of this PF */ 485 if (!BP_VFDB(bp)->first_vf_igu_entry) 486 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; 487 488 /* the first igu entry belonging to this VF */ 489 if (!vf_sb_count(vf)) 490 vf->igu_base_id = igu_sb_id; 491 492 ++vf_sb_count(vf); 493 ++vf->sb_count; 494 } 495 BP_VFDB(bp)->vf_sbs_pool++; 496 } 497 498 /* VFOP MAC/VLAN helpers */ 499 static inline void bnx2x_vfop_credit(struct bnx2x *bp, 500 struct bnx2x_vfop *vfop, 501 struct bnx2x_vlan_mac_obj *obj) 502 { 503 struct bnx2x_vfop_args_filters *args = &vfop->args.filters; 504 505 /* update credit only if there is no error 506 * and a valid credit counter 507 */ 508 if (!vfop->rc && args->credit) { 509 struct list_head *pos; 510 int read_lock; 511 int cnt = 0; 512 513 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); 514 if (read_lock) 515 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); 516 517 list_for_each(pos, &obj->head) 518 cnt++; 519 520 if (!read_lock) 521 bnx2x_vlan_mac_h_read_unlock(bp, obj); 522 523 atomic_set(args->credit, cnt); 524 } 525 } 526 527 static int bnx2x_vfop_set_user_req(struct bnx2x *bp, 528 struct bnx2x_vfop_filter *pos, 529 struct bnx2x_vlan_mac_data *user_req) 530 { 531 user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD : 532 BNX2X_VLAN_MAC_DEL; 533 534 switch (pos->type) { 535 case BNX2X_VFOP_FILTER_MAC: 536 memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN); 537 break; 538 case BNX2X_VFOP_FILTER_VLAN: 539 user_req->u.vlan.vlan = pos->vid; 540 break; 541 default: 542 BNX2X_ERR("Invalid filter type, skipping\n"); 543 return 1; 544 } 545 return 0; 546 } 547 548 static int bnx2x_vfop_config_list(struct bnx2x *bp, 549 struct bnx2x_vfop_filters *filters, 550 struct bnx2x_vlan_mac_ramrod_params *vlan_mac) 551 { 552 struct bnx2x_vfop_filter *pos, *tmp; 553 struct list_head rollback_list, *filters_list = &filters->head; 554 struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req; 555 int rc = 0, cnt = 0; 556 557 INIT_LIST_HEAD(&rollback_list); 558 559 list_for_each_entry_safe(pos, tmp, filters_list, link) { 560 if (bnx2x_vfop_set_user_req(bp, pos, user_req)) 561 continue; 562 563 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 564 if (rc >= 0) { 565 cnt += pos->add ? 1 : -1; 566 list_move(&pos->link, &rollback_list); 567 rc = 0; 568 } else if (rc == -EEXIST) { 569 rc = 0; 570 } else { 571 BNX2X_ERR("Failed to add a new vlan_mac command\n"); 572 break; 573 } 574 } 575 576 /* rollback if error or too many rules added */ 577 if (rc || cnt > filters->add_cnt) { 578 BNX2X_ERR("error or too many rules added. Performing rollback\n"); 579 list_for_each_entry_safe(pos, tmp, &rollback_list, link) { 580 pos->add = !pos->add; /* reverse op */ 581 bnx2x_vfop_set_user_req(bp, pos, user_req); 582 bnx2x_config_vlan_mac(bp, vlan_mac); 583 list_del(&pos->link); 584 } 585 cnt = 0; 586 if (!rc) 587 rc = -EINVAL; 588 } 589 filters->add_cnt = cnt; 590 return rc; 591 } 592 593 /* VFOP set VLAN/MAC */ 594 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) 595 { 596 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 597 struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac; 598 struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj; 599 struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter; 600 601 enum bnx2x_vfop_vlan_mac_state state = vfop->state; 602 603 if (vfop->rc < 0) 604 goto op_err; 605 606 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 607 608 bnx2x_vfop_reset_wq(vf); 609 610 switch (state) { 611 case BNX2X_VFOP_VLAN_MAC_CLEAR: 612 /* next state */ 613 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 614 615 /* do delete */ 616 vfop->rc = obj->delete_all(bp, obj, 617 &vlan_mac->user_req.vlan_mac_flags, 618 &vlan_mac->ramrod_flags); 619 620 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 621 622 case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE: 623 /* next state */ 624 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 625 626 /* do config */ 627 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 628 if (vfop->rc == -EEXIST) 629 vfop->rc = 0; 630 631 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 632 633 case BNX2X_VFOP_VLAN_MAC_CHK_DONE: 634 vfop->rc = !!obj->raw.check_pending(&obj->raw); 635 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 636 637 case BNX2X_VFOP_MAC_CONFIG_LIST: 638 /* next state */ 639 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 640 641 /* do list config */ 642 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 643 if (vfop->rc) 644 goto op_err; 645 646 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 647 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 648 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 649 650 case BNX2X_VFOP_VLAN_CONFIG_LIST: 651 /* next state */ 652 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 653 654 /* do list config */ 655 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 656 if (!vfop->rc) { 657 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 658 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 659 } 660 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 661 662 default: 663 bnx2x_vfop_default(state); 664 } 665 op_err: 666 BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc); 667 op_done: 668 kfree(filters); 669 bnx2x_vfop_credit(bp, vfop, obj); 670 bnx2x_vfop_end(bp, vf, vfop); 671 op_pending: 672 return; 673 } 674 675 struct bnx2x_vfop_vlan_mac_flags { 676 bool drv_only; 677 bool dont_consume; 678 bool single_cmd; 679 bool add; 680 }; 681 682 static void 683 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 684 struct bnx2x_vfop_vlan_mac_flags *flags) 685 { 686 struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req; 687 688 memset(ramrod, 0, sizeof(*ramrod)); 689 690 /* ramrod flags */ 691 if (flags->drv_only) 692 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags); 693 if (flags->single_cmd) 694 set_bit(RAMROD_EXEC, &ramrod->ramrod_flags); 695 696 /* mac_vlan flags */ 697 if (flags->dont_consume) 698 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags); 699 700 /* cmd */ 701 ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL; 702 } 703 704 static inline void 705 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 706 struct bnx2x_vfop_vlan_mac_flags *flags) 707 { 708 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags); 709 set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags); 710 } 711 712 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, 713 struct bnx2x_virtf *vf, 714 struct bnx2x_vfop_cmd *cmd, 715 int qid, bool drv_only) 716 { 717 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 718 int rc; 719 720 if (vfop) { 721 struct bnx2x_vfop_args_filters filters = { 722 .multi_filter = NULL, /* single */ 723 .credit = NULL, /* consume credit */ 724 }; 725 struct bnx2x_vfop_vlan_mac_flags flags = { 726 .drv_only = drv_only, 727 .dont_consume = (filters.credit != NULL), 728 .single_cmd = true, 729 .add = false /* don't care */, 730 }; 731 struct bnx2x_vlan_mac_ramrod_params *ramrod = 732 &vf->op_params.vlan_mac; 733 734 /* set ramrod params */ 735 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 736 737 /* set object */ 738 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); 739 if (rc) 740 return rc; 741 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 742 743 /* set extra args */ 744 vfop->args.filters = filters; 745 746 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 747 bnx2x_vfop_vlan_mac, cmd->done); 748 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 749 cmd->block); 750 } 751 return -ENOMEM; 752 } 753 754 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, 755 struct bnx2x_virtf *vf, 756 struct bnx2x_vfop_cmd *cmd, 757 struct bnx2x_vfop_filters *macs, 758 int qid, bool drv_only) 759 { 760 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 761 int rc; 762 763 if (vfop) { 764 struct bnx2x_vfop_args_filters filters = { 765 .multi_filter = macs, 766 .credit = NULL, /* consume credit */ 767 }; 768 struct bnx2x_vfop_vlan_mac_flags flags = { 769 .drv_only = drv_only, 770 .dont_consume = (filters.credit != NULL), 771 .single_cmd = false, 772 .add = false, /* don't care since only the items in the 773 * filters list affect the sp operation, 774 * not the list itself 775 */ 776 }; 777 struct bnx2x_vlan_mac_ramrod_params *ramrod = 778 &vf->op_params.vlan_mac; 779 780 /* set ramrod params */ 781 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 782 783 /* set object */ 784 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); 785 if (rc) 786 return rc; 787 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 788 789 /* set extra args */ 790 filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX; 791 vfop->args.filters = filters; 792 793 bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST, 794 bnx2x_vfop_vlan_mac, cmd->done); 795 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 796 cmd->block); 797 } 798 return -ENOMEM; 799 } 800 801 int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, 802 struct bnx2x_virtf *vf, 803 struct bnx2x_vfop_cmd *cmd, 804 int qid, u16 vid, bool add) 805 { 806 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 807 int rc; 808 809 if (vfop) { 810 struct bnx2x_vfop_args_filters filters = { 811 .multi_filter = NULL, /* single command */ 812 .credit = &bnx2x_vfq(vf, qid, vlan_count), 813 }; 814 struct bnx2x_vfop_vlan_mac_flags flags = { 815 .drv_only = false, 816 .dont_consume = (filters.credit != NULL), 817 .single_cmd = true, 818 .add = add, 819 }; 820 struct bnx2x_vlan_mac_ramrod_params *ramrod = 821 &vf->op_params.vlan_mac; 822 823 /* set ramrod params */ 824 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 825 ramrod->user_req.u.vlan.vlan = vid; 826 827 /* set object */ 828 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); 829 if (rc) 830 return rc; 831 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 832 833 /* set extra args */ 834 vfop->args.filters = filters; 835 836 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 837 bnx2x_vfop_vlan_mac, cmd->done); 838 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 839 cmd->block); 840 } 841 return -ENOMEM; 842 } 843 844 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, 845 struct bnx2x_virtf *vf, 846 struct bnx2x_vfop_cmd *cmd, 847 int qid, bool drv_only) 848 { 849 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 850 int rc; 851 852 if (vfop) { 853 struct bnx2x_vfop_args_filters filters = { 854 .multi_filter = NULL, /* single command */ 855 .credit = &bnx2x_vfq(vf, qid, vlan_count), 856 }; 857 struct bnx2x_vfop_vlan_mac_flags flags = { 858 .drv_only = drv_only, 859 .dont_consume = (filters.credit != NULL), 860 .single_cmd = true, 861 .add = false, /* don't care */ 862 }; 863 struct bnx2x_vlan_mac_ramrod_params *ramrod = 864 &vf->op_params.vlan_mac; 865 866 /* set ramrod params */ 867 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 868 869 /* set object */ 870 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); 871 if (rc) 872 return rc; 873 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 874 875 /* set extra args */ 876 vfop->args.filters = filters; 877 878 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 879 bnx2x_vfop_vlan_mac, cmd->done); 880 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 881 cmd->block); 882 } 883 return -ENOMEM; 884 } 885 886 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, 887 struct bnx2x_virtf *vf, 888 struct bnx2x_vfop_cmd *cmd, 889 struct bnx2x_vfop_filters *vlans, 890 int qid, bool drv_only) 891 { 892 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 893 int rc; 894 895 if (vfop) { 896 struct bnx2x_vfop_args_filters filters = { 897 .multi_filter = vlans, 898 .credit = &bnx2x_vfq(vf, qid, vlan_count), 899 }; 900 struct bnx2x_vfop_vlan_mac_flags flags = { 901 .drv_only = drv_only, 902 .dont_consume = (filters.credit != NULL), 903 .single_cmd = false, 904 .add = false, /* don't care */ 905 }; 906 struct bnx2x_vlan_mac_ramrod_params *ramrod = 907 &vf->op_params.vlan_mac; 908 909 /* set ramrod params */ 910 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 911 912 /* set object */ 913 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); 914 if (rc) 915 return rc; 916 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 917 918 /* set extra args */ 919 filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) - 920 atomic_read(filters.credit); 921 922 vfop->args.filters = filters; 923 924 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST, 925 bnx2x_vfop_vlan_mac, cmd->done); 926 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 927 cmd->block); 928 } 929 return -ENOMEM; 930 } 931 932 /* VFOP queue setup (queue constructor + set vlan 0) */ 933 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf) 934 { 935 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 936 int qid = vfop->args.qctor.qid; 937 enum bnx2x_vfop_qsetup_state state = vfop->state; 938 struct bnx2x_vfop_cmd cmd = { 939 .done = bnx2x_vfop_qsetup, 940 .block = false, 941 }; 942 943 if (vfop->rc < 0) 944 goto op_err; 945 946 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 947 948 switch (state) { 949 case BNX2X_VFOP_QSETUP_CTOR: 950 /* init the queue ctor command */ 951 vfop->state = BNX2X_VFOP_QSETUP_VLAN0; 952 vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid); 953 if (vfop->rc) 954 goto op_err; 955 return; 956 957 case BNX2X_VFOP_QSETUP_VLAN0: 958 /* skip if non-leading or FPGA/EMU*/ 959 if (qid) 960 goto op_done; 961 962 /* init the queue set-vlan command (for vlan 0) */ 963 vfop->state = BNX2X_VFOP_QSETUP_DONE; 964 vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true); 965 if (vfop->rc) 966 goto op_err; 967 return; 968 op_err: 969 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); 970 op_done: 971 case BNX2X_VFOP_QSETUP_DONE: 972 vf->cfg_flags |= VF_CFG_VLAN; 973 smp_mb__before_clear_bit(); 974 set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, 975 &bp->sp_rtnl_state); 976 smp_mb__after_clear_bit(); 977 schedule_delayed_work(&bp->sp_rtnl_task, 0); 978 bnx2x_vfop_end(bp, vf, vfop); 979 return; 980 default: 981 bnx2x_vfop_default(state); 982 } 983 } 984 985 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, 986 struct bnx2x_virtf *vf, 987 struct bnx2x_vfop_cmd *cmd, 988 int qid) 989 { 990 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 991 992 if (vfop) { 993 vfop->args.qctor.qid = qid; 994 995 bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR, 996 bnx2x_vfop_qsetup, cmd->done); 997 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup, 998 cmd->block); 999 } 1000 return -ENOMEM; 1001 } 1002 1003 /* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */ 1004 static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) 1005 { 1006 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1007 int qid = vfop->args.qx.qid; 1008 enum bnx2x_vfop_qflr_state state = vfop->state; 1009 struct bnx2x_queue_state_params *qstate; 1010 struct bnx2x_vfop_cmd cmd; 1011 1012 bnx2x_vfop_reset_wq(vf); 1013 1014 if (vfop->rc < 0) 1015 goto op_err; 1016 1017 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state); 1018 1019 cmd.done = bnx2x_vfop_qflr; 1020 cmd.block = false; 1021 1022 switch (state) { 1023 case BNX2X_VFOP_QFLR_CLR_VLAN: 1024 /* vlan-clear-all: driver-only, don't consume credit */ 1025 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; 1026 if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) 1027 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, 1028 true); 1029 if (vfop->rc) 1030 goto op_err; 1031 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 1032 1033 case BNX2X_VFOP_QFLR_CLR_MAC: 1034 /* mac-clear-all: driver only consume credit */ 1035 vfop->state = BNX2X_VFOP_QFLR_TERMINATE; 1036 if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) 1037 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, 1038 true); 1039 DP(BNX2X_MSG_IOV, 1040 "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d", 1041 vf->abs_vfid, vfop->rc); 1042 if (vfop->rc) 1043 goto op_err; 1044 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 1045 1046 case BNX2X_VFOP_QFLR_TERMINATE: 1047 qstate = &vfop->op_p->qctor.qstate; 1048 memset(qstate , 0, sizeof(*qstate)); 1049 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 1050 vfop->state = BNX2X_VFOP_QFLR_DONE; 1051 1052 DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n", 1053 vf->abs_vfid, qstate->q_obj->state); 1054 1055 if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) { 1056 qstate->q_obj->state = BNX2X_Q_STATE_STOPPED; 1057 qstate->cmd = BNX2X_Q_CMD_TERMINATE; 1058 vfop->rc = bnx2x_queue_state_change(bp, qstate); 1059 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND); 1060 } else { 1061 goto op_done; 1062 } 1063 1064 op_err: 1065 BNX2X_ERR("QFLR[%d:%d] error: rc %d\n", 1066 vf->abs_vfid, qid, vfop->rc); 1067 op_done: 1068 case BNX2X_VFOP_QFLR_DONE: 1069 bnx2x_vfop_end(bp, vf, vfop); 1070 return; 1071 default: 1072 bnx2x_vfop_default(state); 1073 } 1074 op_pending: 1075 return; 1076 } 1077 1078 static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp, 1079 struct bnx2x_virtf *vf, 1080 struct bnx2x_vfop_cmd *cmd, 1081 int qid) 1082 { 1083 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1084 1085 if (vfop) { 1086 vfop->args.qx.qid = qid; 1087 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN, 1088 bnx2x_vfop_qflr, cmd->done); 1089 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr, 1090 cmd->block); 1091 } 1092 return -ENOMEM; 1093 } 1094 1095 /* VFOP multi-casts */ 1096 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf) 1097 { 1098 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1099 struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast; 1100 struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw; 1101 struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list; 1102 enum bnx2x_vfop_mcast_state state = vfop->state; 1103 int i; 1104 1105 bnx2x_vfop_reset_wq(vf); 1106 1107 if (vfop->rc < 0) 1108 goto op_err; 1109 1110 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1111 1112 switch (state) { 1113 case BNX2X_VFOP_MCAST_DEL: 1114 /* clear existing mcasts */ 1115 vfop->state = BNX2X_VFOP_MCAST_ADD; 1116 vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL); 1117 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 1118 1119 case BNX2X_VFOP_MCAST_ADD: 1120 if (raw->check_pending(raw)) 1121 goto op_pending; 1122 1123 if (args->mc_num) { 1124 /* update mcast list on the ramrod params */ 1125 INIT_LIST_HEAD(&mcast->mcast_list); 1126 for (i = 0; i < args->mc_num; i++) 1127 list_add_tail(&(args->mc[i].link), 1128 &mcast->mcast_list); 1129 /* add new mcasts */ 1130 vfop->state = BNX2X_VFOP_MCAST_CHK_DONE; 1131 vfop->rc = bnx2x_config_mcast(bp, mcast, 1132 BNX2X_MCAST_CMD_ADD); 1133 } 1134 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1135 1136 case BNX2X_VFOP_MCAST_CHK_DONE: 1137 vfop->rc = raw->check_pending(raw) ? 1 : 0; 1138 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1139 default: 1140 bnx2x_vfop_default(state); 1141 } 1142 op_err: 1143 BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc); 1144 op_done: 1145 kfree(args->mc); 1146 bnx2x_vfop_end(bp, vf, vfop); 1147 op_pending: 1148 return; 1149 } 1150 1151 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, 1152 struct bnx2x_virtf *vf, 1153 struct bnx2x_vfop_cmd *cmd, 1154 bnx2x_mac_addr_t *mcasts, 1155 int mcast_num, bool drv_only) 1156 { 1157 struct bnx2x_vfop *vfop = NULL; 1158 size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem); 1159 struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) : 1160 NULL; 1161 1162 if (!mc_sz || mc) { 1163 vfop = bnx2x_vfop_add(bp, vf); 1164 if (vfop) { 1165 int i; 1166 struct bnx2x_mcast_ramrod_params *ramrod = 1167 &vf->op_params.mcast; 1168 1169 /* set ramrod params */ 1170 memset(ramrod, 0, sizeof(*ramrod)); 1171 ramrod->mcast_obj = &vf->mcast_obj; 1172 if (drv_only) 1173 set_bit(RAMROD_DRV_CLR_ONLY, 1174 &ramrod->ramrod_flags); 1175 1176 /* copy mcasts pointers */ 1177 vfop->args.mc_list.mc_num = mcast_num; 1178 vfop->args.mc_list.mc = mc; 1179 for (i = 0; i < mcast_num; i++) 1180 mc[i].mac = mcasts[i]; 1181 1182 bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL, 1183 bnx2x_vfop_mcast, cmd->done); 1184 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast, 1185 cmd->block); 1186 } else { 1187 kfree(mc); 1188 } 1189 } 1190 return -ENOMEM; 1191 } 1192 1193 /* VFOP rx-mode */ 1194 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) 1195 { 1196 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1197 struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode; 1198 enum bnx2x_vfop_rxmode_state state = vfop->state; 1199 1200 bnx2x_vfop_reset_wq(vf); 1201 1202 if (vfop->rc < 0) 1203 goto op_err; 1204 1205 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1206 1207 switch (state) { 1208 case BNX2X_VFOP_RXMODE_CONFIG: 1209 /* next state */ 1210 vfop->state = BNX2X_VFOP_RXMODE_DONE; 1211 1212 /* record the accept flags in vfdb so hypervisor can modify them 1213 * if necessary 1214 */ 1215 bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) = 1216 ramrod->rx_accept_flags; 1217 vfop->rc = bnx2x_config_rx_mode(bp, ramrod); 1218 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1219 op_err: 1220 BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc); 1221 op_done: 1222 case BNX2X_VFOP_RXMODE_DONE: 1223 bnx2x_vfop_end(bp, vf, vfop); 1224 return; 1225 default: 1226 bnx2x_vfop_default(state); 1227 } 1228 op_pending: 1229 return; 1230 } 1231 1232 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, 1233 struct bnx2x_rx_mode_ramrod_params *ramrod, 1234 struct bnx2x_virtf *vf, 1235 unsigned long accept_flags) 1236 { 1237 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); 1238 1239 memset(ramrod, 0, sizeof(*ramrod)); 1240 ramrod->cid = vfq->cid; 1241 ramrod->cl_id = vfq_cl_id(vf, vfq); 1242 ramrod->rx_mode_obj = &bp->rx_mode_obj; 1243 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); 1244 ramrod->rx_accept_flags = accept_flags; 1245 ramrod->tx_accept_flags = accept_flags; 1246 ramrod->pstate = &vf->filter_state; 1247 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; 1248 1249 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 1250 set_bit(RAMROD_RX, &ramrod->ramrod_flags); 1251 set_bit(RAMROD_TX, &ramrod->ramrod_flags); 1252 1253 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); 1254 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); 1255 } 1256 1257 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, 1258 struct bnx2x_virtf *vf, 1259 struct bnx2x_vfop_cmd *cmd, 1260 int qid, unsigned long accept_flags) 1261 { 1262 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1263 1264 if (vfop) { 1265 struct bnx2x_rx_mode_ramrod_params *ramrod = 1266 &vf->op_params.rx_mode; 1267 1268 bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags); 1269 1270 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, 1271 bnx2x_vfop_rxmode, cmd->done); 1272 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode, 1273 cmd->block); 1274 } 1275 return -ENOMEM; 1276 } 1277 1278 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs, 1279 * queue destructor) 1280 */ 1281 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf) 1282 { 1283 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1284 int qid = vfop->args.qx.qid; 1285 enum bnx2x_vfop_qteardown_state state = vfop->state; 1286 struct bnx2x_vfop_cmd cmd; 1287 1288 if (vfop->rc < 0) 1289 goto op_err; 1290 1291 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1292 1293 cmd.done = bnx2x_vfop_qdown; 1294 cmd.block = false; 1295 1296 switch (state) { 1297 case BNX2X_VFOP_QTEARDOWN_RXMODE: 1298 /* Drop all */ 1299 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN; 1300 vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0); 1301 if (vfop->rc) 1302 goto op_err; 1303 return; 1304 1305 case BNX2X_VFOP_QTEARDOWN_CLR_VLAN: 1306 /* vlan-clear-all: don't consume credit */ 1307 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC; 1308 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false); 1309 if (vfop->rc) 1310 goto op_err; 1311 return; 1312 1313 case BNX2X_VFOP_QTEARDOWN_CLR_MAC: 1314 /* mac-clear-all: consume credit */ 1315 vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR; 1316 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false); 1317 if (vfop->rc) 1318 goto op_err; 1319 return; 1320 1321 case BNX2X_VFOP_QTEARDOWN_QDTOR: 1322 /* run the queue destruction flow */ 1323 DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n"); 1324 vfop->state = BNX2X_VFOP_QTEARDOWN_DONE; 1325 DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n"); 1326 vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid); 1327 DP(BNX2X_MSG_IOV, "returned from cmd\n"); 1328 if (vfop->rc) 1329 goto op_err; 1330 return; 1331 op_err: 1332 BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n", 1333 vf->abs_vfid, qid, vfop->rc); 1334 1335 case BNX2X_VFOP_QTEARDOWN_DONE: 1336 bnx2x_vfop_end(bp, vf, vfop); 1337 return; 1338 default: 1339 bnx2x_vfop_default(state); 1340 } 1341 } 1342 1343 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, 1344 struct bnx2x_virtf *vf, 1345 struct bnx2x_vfop_cmd *cmd, 1346 int qid) 1347 { 1348 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1349 1350 /* for non leading queues skip directly to qdown sate */ 1351 if (vfop) { 1352 vfop->args.qx.qid = qid; 1353 bnx2x_vfop_opset(qid == LEADING_IDX ? 1354 BNX2X_VFOP_QTEARDOWN_RXMODE : 1355 BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown, 1356 cmd->done); 1357 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, 1358 cmd->block); 1359 } 1360 1361 return -ENOMEM; 1362 } 1363 1364 /* VF enable primitives 1365 * when pretend is required the caller is responsible 1366 * for calling pretend prior to calling these routines 1367 */ 1368 1369 /* internal vf enable - until vf is enabled internally all transactions 1370 * are blocked. This routine should always be called last with pretend. 1371 */ 1372 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) 1373 { 1374 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); 1375 } 1376 1377 /* clears vf error in all semi blocks */ 1378 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) 1379 { 1380 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); 1381 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); 1382 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); 1383 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); 1384 } 1385 1386 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) 1387 { 1388 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; 1389 u32 was_err_reg = 0; 1390 1391 switch (was_err_group) { 1392 case 0: 1393 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; 1394 break; 1395 case 1: 1396 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; 1397 break; 1398 case 2: 1399 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; 1400 break; 1401 case 3: 1402 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; 1403 break; 1404 } 1405 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); 1406 } 1407 1408 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) 1409 { 1410 int i; 1411 u32 val; 1412 1413 /* Set VF masks and configuration - pretend */ 1414 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1415 1416 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 1417 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 1418 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 1419 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 1420 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 1421 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 1422 1423 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 1424 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); 1425 if (vf->cfg_flags & VF_CFG_INT_SIMD) 1426 val |= IGU_VF_CONF_SINGLE_ISR_EN; 1427 val &= ~IGU_VF_CONF_PARENT_MASK; 1428 val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */ 1429 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 1430 1431 DP(BNX2X_MSG_IOV, 1432 "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n", 1433 vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION)); 1434 1435 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1436 1437 /* iterate over all queues, clear sb consumer */ 1438 for (i = 0; i < vf_sb_count(vf); i++) { 1439 u8 igu_sb_id = vf_igu_sb(vf, i); 1440 1441 /* zero prod memory */ 1442 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); 1443 1444 /* clear sb state machine */ 1445 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, 1446 false /* VF */); 1447 1448 /* disable + update */ 1449 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, 1450 IGU_INT_DISABLE, 1); 1451 } 1452 } 1453 1454 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) 1455 { 1456 /* set the VF-PF association in the FW */ 1457 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); 1458 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); 1459 1460 /* clear vf errors*/ 1461 bnx2x_vf_semi_clear_err(bp, abs_vfid); 1462 bnx2x_vf_pglue_clear_err(bp, abs_vfid); 1463 1464 /* internal vf-enable - pretend */ 1465 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); 1466 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); 1467 bnx2x_vf_enable_internal(bp, true); 1468 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1469 } 1470 1471 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) 1472 { 1473 /* Reset vf in IGU interrupts are still disabled */ 1474 bnx2x_vf_igu_reset(bp, vf); 1475 1476 /* pretend to enable the vf with the PBF */ 1477 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1478 REG_WR(bp, PBF_REG_DISABLE_VF, 0); 1479 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1480 } 1481 1482 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) 1483 { 1484 struct pci_dev *dev; 1485 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 1486 1487 if (!vf) 1488 return false; 1489 1490 dev = pci_get_bus_and_slot(vf->bus, vf->devfn); 1491 if (dev) 1492 return bnx2x_is_pcie_pending(dev); 1493 return false; 1494 } 1495 1496 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) 1497 { 1498 /* Verify no pending pci transactions */ 1499 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) 1500 BNX2X_ERR("PCIE Transactions still pending\n"); 1501 1502 return 0; 1503 } 1504 1505 /* must be called after the number of PF queues and the number of VFs are 1506 * both known 1507 */ 1508 static void 1509 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 1510 { 1511 struct vf_pf_resc_request *resc = &vf->alloc_resc; 1512 u16 vlan_count = 0; 1513 1514 /* will be set only during VF-ACQUIRE */ 1515 resc->num_rxqs = 0; 1516 resc->num_txqs = 0; 1517 1518 /* no credit calculations for macs (just yet) */ 1519 resc->num_mac_filters = 1; 1520 1521 /* divvy up vlan rules */ 1522 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 1523 vlan_count = 1 << ilog2(vlan_count); 1524 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); 1525 1526 /* no real limitation */ 1527 resc->num_mc_filters = 0; 1528 1529 /* num_sbs already set */ 1530 resc->num_sbs = vf->sb_count; 1531 } 1532 1533 /* FLR routines: */ 1534 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 1535 { 1536 /* reset the state variables */ 1537 bnx2x_iov_static_resc(bp, vf); 1538 vf->state = VF_FREE; 1539 } 1540 1541 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) 1542 { 1543 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 1544 1545 /* DQ usage counter */ 1546 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1547 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, 1548 "DQ VF usage counter timed out", 1549 poll_cnt); 1550 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1551 1552 /* FW cleanup command - poll for the results */ 1553 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), 1554 poll_cnt)) 1555 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid); 1556 1557 /* verify TX hw is flushed */ 1558 bnx2x_tx_hw_flushed(bp, poll_cnt); 1559 } 1560 1561 static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) 1562 { 1563 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1564 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; 1565 enum bnx2x_vfop_flr_state state = vfop->state; 1566 struct bnx2x_vfop_cmd cmd = { 1567 .done = bnx2x_vfop_flr, 1568 .block = false, 1569 }; 1570 1571 if (vfop->rc < 0) 1572 goto op_err; 1573 1574 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1575 1576 switch (state) { 1577 case BNX2X_VFOP_FLR_QUEUES: 1578 /* the cleanup operations are valid if and only if the VF 1579 * was first acquired. 1580 */ 1581 if (++(qx->qid) < vf_rxq_count(vf)) { 1582 vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd, 1583 qx->qid); 1584 if (vfop->rc) 1585 goto op_err; 1586 return; 1587 } 1588 /* remove multicasts */ 1589 vfop->state = BNX2X_VFOP_FLR_HW; 1590 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 1591 0, true); 1592 if (vfop->rc) 1593 goto op_err; 1594 return; 1595 case BNX2X_VFOP_FLR_HW: 1596 1597 /* dispatch final cleanup and wait for HW queues to flush */ 1598 bnx2x_vf_flr_clnup_hw(bp, vf); 1599 1600 /* release VF resources */ 1601 bnx2x_vf_free_resc(bp, vf); 1602 1603 /* re-open the mailbox */ 1604 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 1605 1606 goto op_done; 1607 default: 1608 bnx2x_vfop_default(state); 1609 } 1610 op_err: 1611 BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc); 1612 op_done: 1613 vf->flr_clnup_stage = VF_FLR_ACK; 1614 bnx2x_vfop_end(bp, vf, vfop); 1615 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 1616 } 1617 1618 static int bnx2x_vfop_flr_cmd(struct bnx2x *bp, 1619 struct bnx2x_virtf *vf, 1620 vfop_handler_t done) 1621 { 1622 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1623 if (vfop) { 1624 vfop->args.qx.qid = -1; /* loop */ 1625 bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES, 1626 bnx2x_vfop_flr, done); 1627 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false); 1628 } 1629 return -ENOMEM; 1630 } 1631 1632 static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf) 1633 { 1634 int i = prev_vf ? prev_vf->index + 1 : 0; 1635 struct bnx2x_virtf *vf; 1636 1637 /* find next VF to cleanup */ 1638 next_vf_to_clean: 1639 for (; 1640 i < BNX2X_NR_VIRTFN(bp) && 1641 (bnx2x_vf(bp, i, state) != VF_RESET || 1642 bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN); 1643 i++) 1644 ; 1645 1646 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i, 1647 BNX2X_NR_VIRTFN(bp)); 1648 1649 if (i < BNX2X_NR_VIRTFN(bp)) { 1650 vf = BP_VF(bp, i); 1651 1652 /* lock the vf pf channel */ 1653 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 1654 1655 /* invoke the VF FLR SM */ 1656 if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) { 1657 BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n", 1658 vf->abs_vfid); 1659 1660 /* mark the VF to be ACKED and continue */ 1661 vf->flr_clnup_stage = VF_FLR_ACK; 1662 goto next_vf_to_clean; 1663 } 1664 return; 1665 } 1666 1667 /* we are done, update vf records */ 1668 for_each_vf(bp, i) { 1669 vf = BP_VF(bp, i); 1670 1671 if (vf->flr_clnup_stage != VF_FLR_ACK) 1672 continue; 1673 1674 vf->flr_clnup_stage = VF_FLR_EPILOG; 1675 } 1676 1677 /* Acknowledge the handled VFs. 1678 * we are acknowledge all the vfs which an flr was requested for, even 1679 * if amongst them there are such that we never opened, since the mcp 1680 * will interrupt us immediately again if we only ack some of the bits, 1681 * resulting in an endless loop. This can happen for example in KVM 1682 * where an 'all ones' flr request is sometimes given by hyper visor 1683 */ 1684 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n", 1685 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1686 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1687 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 1688 bp->vfdb->flrd_vfs[i]); 1689 1690 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); 1691 1692 /* clear the acked bits - better yet if the MCP implemented 1693 * write to clear semantics 1694 */ 1695 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1696 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); 1697 } 1698 1699 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) 1700 { 1701 int i; 1702 1703 /* Read FLR'd VFs */ 1704 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1705 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); 1706 1707 DP(BNX2X_MSG_MCP, 1708 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n", 1709 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1710 1711 for_each_vf(bp, i) { 1712 struct bnx2x_virtf *vf = BP_VF(bp, i); 1713 u32 reset = 0; 1714 1715 if (vf->abs_vfid < 32) 1716 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); 1717 else 1718 reset = bp->vfdb->flrd_vfs[1] & 1719 (1 << (vf->abs_vfid - 32)); 1720 1721 if (reset) { 1722 /* set as reset and ready for cleanup */ 1723 vf->state = VF_RESET; 1724 vf->flr_clnup_stage = VF_FLR_CLN; 1725 1726 DP(BNX2X_MSG_IOV, 1727 "Initiating Final cleanup for VF %d\n", 1728 vf->abs_vfid); 1729 } 1730 } 1731 1732 /* do the FLR cleanup for all marked VFs*/ 1733 bnx2x_vf_flr_clnup(bp, NULL); 1734 } 1735 1736 /* IOV global initialization routines */ 1737 void bnx2x_iov_init_dq(struct bnx2x *bp) 1738 { 1739 if (!IS_SRIOV(bp)) 1740 return; 1741 1742 /* Set the DQ such that the CID reflect the abs_vfid */ 1743 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); 1744 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 1745 1746 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to 1747 * the PF L2 queues 1748 */ 1749 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 1750 1751 /* The VF window size is the log2 of the max number of CIDs per VF */ 1752 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 1753 1754 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 1755 * the Pf doorbell size although the 2 are independent. 1756 */ 1757 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); 1758 1759 /* No security checks for now - 1760 * configure single rule (out of 16) mask = 0x1, value = 0x0, 1761 * CID range 0 - 0x1ffff 1762 */ 1763 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); 1764 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); 1765 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1766 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1767 1768 /* set the VF doorbell threshold */ 1769 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 1770 } 1771 1772 void bnx2x_iov_init_dmae(struct bnx2x *bp) 1773 { 1774 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) 1775 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 1776 } 1777 1778 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1779 { 1780 struct pci_dev *dev = bp->pdev; 1781 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1782 1783 return dev->bus->number + ((dev->devfn + iov->offset + 1784 iov->stride * vfid) >> 8); 1785 } 1786 1787 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) 1788 { 1789 struct pci_dev *dev = bp->pdev; 1790 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1791 1792 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; 1793 } 1794 1795 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) 1796 { 1797 int i, n; 1798 struct pci_dev *dev = bp->pdev; 1799 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1800 1801 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { 1802 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); 1803 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); 1804 1805 size /= iov->total; 1806 vf->bars[n].bar = start + size * vf->abs_vfid; 1807 vf->bars[n].size = size; 1808 } 1809 } 1810 1811 static int bnx2x_ari_enabled(struct pci_dev *dev) 1812 { 1813 return dev->bus->self && dev->bus->self->ari_enabled; 1814 } 1815 1816 static void 1817 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) 1818 { 1819 int sb_id; 1820 u32 val; 1821 u8 fid, current_pf = 0; 1822 1823 /* IGU in normal mode - read CAM */ 1824 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 1825 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); 1826 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 1827 continue; 1828 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1829 if (fid & IGU_FID_ENCODE_IS_PF) 1830 current_pf = fid & IGU_FID_PF_NUM_MASK; 1831 else if (current_pf == BP_FUNC(bp)) 1832 bnx2x_vf_set_igu_info(bp, sb_id, 1833 (fid & IGU_FID_VF_NUM_MASK)); 1834 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1835 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 1836 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 1837 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 1838 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1839 } 1840 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); 1841 } 1842 1843 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1844 { 1845 if (bp->vfdb) { 1846 kfree(bp->vfdb->vfqs); 1847 kfree(bp->vfdb->vfs); 1848 kfree(bp->vfdb); 1849 } 1850 bp->vfdb = NULL; 1851 } 1852 1853 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1854 { 1855 int pos; 1856 struct pci_dev *dev = bp->pdev; 1857 1858 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 1859 if (!pos) { 1860 BNX2X_ERR("failed to find SRIOV capability in device\n"); 1861 return -ENODEV; 1862 } 1863 1864 iov->pos = pos; 1865 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); 1866 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 1867 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); 1868 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); 1869 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 1870 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 1871 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 1872 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 1873 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 1874 1875 return 0; 1876 } 1877 1878 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1879 { 1880 u32 val; 1881 1882 /* read the SRIOV capability structure 1883 * The fields can be read via configuration read or 1884 * directly from the device (starting at offset PCICFG_OFFSET) 1885 */ 1886 if (bnx2x_sriov_pci_cfg_info(bp, iov)) 1887 return -ENODEV; 1888 1889 /* get the number of SRIOV bars */ 1890 iov->nres = 0; 1891 1892 /* read the first_vfid */ 1893 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); 1894 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) 1895 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); 1896 1897 DP(BNX2X_MSG_IOV, 1898 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 1899 BP_FUNC(bp), 1900 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, 1901 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 1902 1903 return 0; 1904 } 1905 1906 /* must be called after PF bars are mapped */ 1907 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1908 int num_vfs_param) 1909 { 1910 int err, i; 1911 struct bnx2x_sriov *iov; 1912 struct pci_dev *dev = bp->pdev; 1913 1914 bp->vfdb = NULL; 1915 1916 /* verify is pf */ 1917 if (IS_VF(bp)) 1918 return 0; 1919 1920 /* verify sriov capability is present in configuration space */ 1921 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) 1922 return 0; 1923 1924 /* verify chip revision */ 1925 if (CHIP_IS_E1x(bp)) 1926 return 0; 1927 1928 /* check if SRIOV support is turned off */ 1929 if (!num_vfs_param) 1930 return 0; 1931 1932 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ 1933 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { 1934 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", 1935 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); 1936 return 0; 1937 } 1938 1939 /* SRIOV can be enabled only with MSIX */ 1940 if (int_mode_param == BNX2X_INT_MODE_MSI || 1941 int_mode_param == BNX2X_INT_MODE_INTX) { 1942 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); 1943 return 0; 1944 } 1945 1946 err = -EIO; 1947 /* verify ari is enabled */ 1948 if (!bnx2x_ari_enabled(bp->pdev)) { 1949 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n"); 1950 return 0; 1951 } 1952 1953 /* verify igu is in normal mode */ 1954 if (CHIP_INT_MODE_IS_BC(bp)) { 1955 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 1956 return 0; 1957 } 1958 1959 /* allocate the vfs database */ 1960 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); 1961 if (!bp->vfdb) { 1962 BNX2X_ERR("failed to allocate vf database\n"); 1963 err = -ENOMEM; 1964 goto failed; 1965 } 1966 1967 /* get the sriov info - Linux already collected all the pertinent 1968 * information, however the sriov structure is for the private use 1969 * of the pci module. Also we want this information regardless 1970 * of the hyper-visor. 1971 */ 1972 iov = &(bp->vfdb->sriov); 1973 err = bnx2x_sriov_info(bp, iov); 1974 if (err) 1975 goto failed; 1976 1977 /* SR-IOV capability was enabled but there are no VFs*/ 1978 if (iov->total == 0) 1979 goto failed; 1980 1981 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); 1982 1983 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n", 1984 num_vfs_param, iov->nr_virtfn); 1985 1986 /* allocate the vf array */ 1987 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 1988 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); 1989 if (!bp->vfdb->vfs) { 1990 BNX2X_ERR("failed to allocate vf array\n"); 1991 err = -ENOMEM; 1992 goto failed; 1993 } 1994 1995 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ 1996 for_each_vf(bp, i) { 1997 bnx2x_vf(bp, i, index) = i; 1998 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; 1999 bnx2x_vf(bp, i, state) = VF_FREE; 2000 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head)); 2001 mutex_init(&bnx2x_vf(bp, i, op_mutex)); 2002 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; 2003 } 2004 2005 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 2006 bnx2x_get_vf_igu_cam_info(bp); 2007 2008 /* allocate the queue arrays for all VFs */ 2009 bp->vfdb->vfqs = kzalloc( 2010 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), 2011 GFP_KERNEL); 2012 2013 DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs); 2014 2015 if (!bp->vfdb->vfqs) { 2016 BNX2X_ERR("failed to allocate vf queue array\n"); 2017 err = -ENOMEM; 2018 goto failed; 2019 } 2020 2021 return 0; 2022 failed: 2023 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 2024 __bnx2x_iov_free_vfdb(bp); 2025 return err; 2026 } 2027 2028 void bnx2x_iov_remove_one(struct bnx2x *bp) 2029 { 2030 int vf_idx; 2031 2032 /* if SRIOV is not enabled there's nothing to do */ 2033 if (!IS_SRIOV(bp)) 2034 return; 2035 2036 DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); 2037 pci_disable_sriov(bp->pdev); 2038 DP(BNX2X_MSG_IOV, "sriov disabled\n"); 2039 2040 /* disable access to all VFs */ 2041 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { 2042 bnx2x_pretend_func(bp, 2043 HW_VF_HANDLE(bp, 2044 bp->vfdb->sriov.first_vf_in_pf + 2045 vf_idx)); 2046 DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n", 2047 bp->vfdb->sriov.first_vf_in_pf + vf_idx); 2048 bnx2x_vf_enable_internal(bp, 0); 2049 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2050 } 2051 2052 /* free vf database */ 2053 __bnx2x_iov_free_vfdb(bp); 2054 } 2055 2056 void bnx2x_iov_free_mem(struct bnx2x *bp) 2057 { 2058 int i; 2059 2060 if (!IS_SRIOV(bp)) 2061 return; 2062 2063 /* free vfs hw contexts */ 2064 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2065 struct hw_dma *cxt = &bp->vfdb->context[i]; 2066 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); 2067 } 2068 2069 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, 2070 BP_VFDB(bp)->sp_dma.mapping, 2071 BP_VFDB(bp)->sp_dma.size); 2072 2073 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, 2074 BP_VF_MBX_DMA(bp)->mapping, 2075 BP_VF_MBX_DMA(bp)->size); 2076 2077 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, 2078 BP_VF_BULLETIN_DMA(bp)->mapping, 2079 BP_VF_BULLETIN_DMA(bp)->size); 2080 } 2081 2082 int bnx2x_iov_alloc_mem(struct bnx2x *bp) 2083 { 2084 size_t tot_size; 2085 int i, rc = 0; 2086 2087 if (!IS_SRIOV(bp)) 2088 return rc; 2089 2090 /* allocate vfs hw contexts */ 2091 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * 2092 BNX2X_CIDS_PER_VF * sizeof(union cdu_context); 2093 2094 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2095 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); 2096 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); 2097 2098 if (cxt->size) { 2099 BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size); 2100 } else { 2101 cxt->addr = NULL; 2102 cxt->mapping = 0; 2103 } 2104 tot_size -= cxt->size; 2105 } 2106 2107 /* allocate vfs ramrods dma memory - client_init and set_mac */ 2108 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); 2109 BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping, 2110 tot_size); 2111 BP_VFDB(bp)->sp_dma.size = tot_size; 2112 2113 /* allocate mailboxes */ 2114 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; 2115 BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping, 2116 tot_size); 2117 BP_VF_MBX_DMA(bp)->size = tot_size; 2118 2119 /* allocate local bulletin boards */ 2120 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; 2121 BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr, 2122 &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size); 2123 BP_VF_BULLETIN_DMA(bp)->size = tot_size; 2124 2125 return 0; 2126 2127 alloc_mem_err: 2128 return -ENOMEM; 2129 } 2130 2131 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 2132 struct bnx2x_vf_queue *q) 2133 { 2134 u8 cl_id = vfq_cl_id(vf, q); 2135 u8 func_id = FW_VF_HANDLE(vf->abs_vfid); 2136 unsigned long q_type = 0; 2137 2138 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 2139 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 2140 2141 /* Queue State object */ 2142 bnx2x_init_queue_obj(bp, &q->sp_obj, 2143 cl_id, &q->cid, 1, func_id, 2144 bnx2x_vf_sp(bp, vf, q_data), 2145 bnx2x_vf_sp_map(bp, vf, q_data), 2146 q_type); 2147 2148 DP(BNX2X_MSG_IOV, 2149 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", 2150 vf->abs_vfid, q->sp_obj.func_id, q->cid); 2151 } 2152 2153 /* called by bnx2x_nic_load */ 2154 int bnx2x_iov_nic_init(struct bnx2x *bp) 2155 { 2156 int vfid; 2157 2158 if (!IS_SRIOV(bp)) { 2159 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 2160 return 0; 2161 } 2162 2163 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); 2164 2165 /* let FLR complete ... */ 2166 msleep(100); 2167 2168 /* initialize vf database */ 2169 for_each_vf(bp, vfid) { 2170 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 2171 2172 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * 2173 BNX2X_CIDS_PER_VF; 2174 2175 union cdu_context *base_cxt = (union cdu_context *) 2176 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2177 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2178 2179 DP(BNX2X_MSG_IOV, 2180 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", 2181 vf->abs_vfid, vf_sb_count(vf), base_vf_cid, 2182 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 2183 2184 /* init statically provisioned resources */ 2185 bnx2x_iov_static_resc(bp, vf); 2186 2187 /* queues are initialized during VF-ACQUIRE */ 2188 2189 /* reserve the vf vlan credit */ 2190 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf)); 2191 2192 vf->filter_state = 0; 2193 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 2194 2195 /* init mcast object - This object will be re-initialized 2196 * during VF-ACQUIRE with the proper cl_id and cid. 2197 * It needs to be initialized here so that it can be safely 2198 * handled by a subsequent FLR flow. 2199 */ 2200 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, 2201 0xFF, 0xFF, 0xFF, 2202 bnx2x_vf_sp(bp, vf, mcast_rdata), 2203 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 2204 BNX2X_FILTER_MCAST_PENDING, 2205 &vf->filter_state, 2206 BNX2X_OBJ_TYPE_RX_TX); 2207 2208 /* set the mailbox message addresses */ 2209 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) 2210 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * 2211 MBX_MSG_ALIGNED_SIZE); 2212 2213 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + 2214 vfid * MBX_MSG_ALIGNED_SIZE; 2215 2216 /* Enable vf mailbox */ 2217 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 2218 } 2219 2220 /* Final VF init */ 2221 for_each_vf(bp, vfid) { 2222 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 2223 2224 /* fill in the BDF and bars */ 2225 vf->bus = bnx2x_vf_bus(bp, vfid); 2226 vf->devfn = bnx2x_vf_devfn(bp, vfid); 2227 bnx2x_vf_set_bars(bp, vf); 2228 2229 DP(BNX2X_MSG_IOV, 2230 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", 2231 vf->abs_vfid, vf->bus, vf->devfn, 2232 (unsigned)vf->bars[0].bar, vf->bars[0].size, 2233 (unsigned)vf->bars[1].bar, vf->bars[1].size, 2234 (unsigned)vf->bars[2].bar, vf->bars[2].size); 2235 } 2236 2237 return 0; 2238 } 2239 2240 /* called by bnx2x_chip_cleanup */ 2241 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) 2242 { 2243 int i; 2244 2245 if (!IS_SRIOV(bp)) 2246 return 0; 2247 2248 /* release all the VFs */ 2249 for_each_vf(bp, i) 2250 bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */ 2251 2252 return 0; 2253 } 2254 2255 /* called by bnx2x_init_hw_func, returns the next ilt line */ 2256 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) 2257 { 2258 int i; 2259 struct bnx2x_ilt *ilt = BP_ILT(bp); 2260 2261 if (!IS_SRIOV(bp)) 2262 return line; 2263 2264 /* set vfs ilt lines */ 2265 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2266 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); 2267 2268 ilt->lines[line+i].page = hw_cxt->addr; 2269 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 2270 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 2271 } 2272 return line + i; 2273 } 2274 2275 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) 2276 { 2277 return ((cid >= BNX2X_FIRST_VF_CID) && 2278 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); 2279 } 2280 2281 static 2282 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, 2283 struct bnx2x_vf_queue *vfq, 2284 union event_ring_elem *elem) 2285 { 2286 unsigned long ramrod_flags = 0; 2287 int rc = 0; 2288 2289 /* Always push next commands out, don't wait here */ 2290 set_bit(RAMROD_CONT, &ramrod_flags); 2291 2292 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 2293 case BNX2X_FILTER_MAC_PENDING: 2294 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, 2295 &ramrod_flags); 2296 break; 2297 case BNX2X_FILTER_VLAN_PENDING: 2298 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, 2299 &ramrod_flags); 2300 break; 2301 default: 2302 BNX2X_ERR("Unsupported classification command: %d\n", 2303 elem->message.data.eth_event.echo); 2304 return; 2305 } 2306 if (rc < 0) 2307 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 2308 else if (rc > 0) 2309 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); 2310 } 2311 2312 static 2313 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, 2314 struct bnx2x_virtf *vf) 2315 { 2316 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 2317 int rc; 2318 2319 rparam.mcast_obj = &vf->mcast_obj; 2320 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); 2321 2322 /* If there are pending mcast commands - send them */ 2323 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { 2324 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 2325 if (rc < 0) 2326 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 2327 rc); 2328 } 2329 } 2330 2331 static 2332 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, 2333 struct bnx2x_virtf *vf) 2334 { 2335 smp_mb__before_clear_bit(); 2336 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 2337 smp_mb__after_clear_bit(); 2338 } 2339 2340 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) 2341 { 2342 struct bnx2x_virtf *vf; 2343 int qidx = 0, abs_vfid; 2344 u8 opcode; 2345 u16 cid = 0xffff; 2346 2347 if (!IS_SRIOV(bp)) 2348 return 1; 2349 2350 /* first get the cid - the only events we handle here are cfc-delete 2351 * and set-mac completion 2352 */ 2353 opcode = elem->message.opcode; 2354 2355 switch (opcode) { 2356 case EVENT_RING_OPCODE_CFC_DEL: 2357 cid = SW_CID((__force __le32) 2358 elem->message.data.cfc_del_event.cid); 2359 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); 2360 break; 2361 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 2362 case EVENT_RING_OPCODE_MULTICAST_RULES: 2363 case EVENT_RING_OPCODE_FILTERS_RULES: 2364 cid = (elem->message.data.eth_event.echo & 2365 BNX2X_SWCID_MASK); 2366 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 2367 break; 2368 case EVENT_RING_OPCODE_VF_FLR: 2369 abs_vfid = elem->message.data.vf_flr_event.vf_id; 2370 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", 2371 abs_vfid); 2372 goto get_vf; 2373 case EVENT_RING_OPCODE_MALICIOUS_VF: 2374 abs_vfid = elem->message.data.malicious_vf_event.vf_id; 2375 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n", 2376 abs_vfid, elem->message.data.malicious_vf_event.err_id); 2377 goto get_vf; 2378 default: 2379 return 1; 2380 } 2381 2382 /* check if the cid is the VF range */ 2383 if (!bnx2x_iov_is_vf_cid(bp, cid)) { 2384 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); 2385 return 1; 2386 } 2387 2388 /* extract vf and rxq index from vf_cid - relies on the following: 2389 * 1. vfid on cid reflects the true abs_vfid 2390 * 2. The max number of VFs (per path) is 64 2391 */ 2392 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); 2393 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2394 get_vf: 2395 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 2396 2397 if (!vf) { 2398 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", 2399 cid, abs_vfid); 2400 return 0; 2401 } 2402 2403 switch (opcode) { 2404 case EVENT_RING_OPCODE_CFC_DEL: 2405 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", 2406 vf->abs_vfid, qidx); 2407 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, 2408 &vfq_get(vf, 2409 qidx)->sp_obj, 2410 BNX2X_Q_CMD_CFC_DEL); 2411 break; 2412 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 2413 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", 2414 vf->abs_vfid, qidx); 2415 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); 2416 break; 2417 case EVENT_RING_OPCODE_MULTICAST_RULES: 2418 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", 2419 vf->abs_vfid, qidx); 2420 bnx2x_vf_handle_mcast_eqe(bp, vf); 2421 break; 2422 case EVENT_RING_OPCODE_FILTERS_RULES: 2423 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", 2424 vf->abs_vfid, qidx); 2425 bnx2x_vf_handle_filters_eqe(bp, vf); 2426 break; 2427 case EVENT_RING_OPCODE_VF_FLR: 2428 DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n", 2429 vf->abs_vfid); 2430 /* Do nothing for now */ 2431 break; 2432 case EVENT_RING_OPCODE_MALICIOUS_VF: 2433 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n", 2434 abs_vfid, elem->message.data.malicious_vf_event.err_id); 2435 /* Do nothing for now */ 2436 break; 2437 } 2438 /* SRIOV: reschedule any 'in_progress' operations */ 2439 bnx2x_iov_sp_event(bp, cid, false); 2440 2441 return 0; 2442 } 2443 2444 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) 2445 { 2446 /* extract the vf from vf_cid - relies on the following: 2447 * 1. vfid on cid reflects the true abs_vfid 2448 * 2. The max number of VFs (per path) is 64 2449 */ 2450 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2451 return bnx2x_vf_by_abs_fid(bp, abs_vfid); 2452 } 2453 2454 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 2455 struct bnx2x_queue_sp_obj **q_obj) 2456 { 2457 struct bnx2x_virtf *vf; 2458 2459 if (!IS_SRIOV(bp)) 2460 return; 2461 2462 vf = bnx2x_vf_by_cid(bp, vf_cid); 2463 2464 if (vf) { 2465 /* extract queue index from vf_cid - relies on the following: 2466 * 1. vfid on cid reflects the true abs_vfid 2467 * 2. The max number of VFs (per path) is 64 2468 */ 2469 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); 2470 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); 2471 } else { 2472 BNX2X_ERR("No vf matching cid %d\n", vf_cid); 2473 } 2474 } 2475 2476 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work) 2477 { 2478 struct bnx2x_virtf *vf; 2479 2480 /* check if the cid is the VF range */ 2481 if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid)) 2482 return; 2483 2484 vf = bnx2x_vf_by_cid(bp, vf_cid); 2485 if (vf) { 2486 /* set in_progress flag */ 2487 atomic_set(&vf->op_in_progress, 1); 2488 if (queue_work) 2489 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 2490 } 2491 } 2492 2493 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) 2494 { 2495 int i; 2496 int first_queue_query_index, num_queues_req; 2497 dma_addr_t cur_data_offset; 2498 struct stats_query_entry *cur_query_entry; 2499 u8 stats_count = 0; 2500 bool is_fcoe = false; 2501 2502 if (!IS_SRIOV(bp)) 2503 return; 2504 2505 if (!NO_FCOE(bp)) 2506 is_fcoe = true; 2507 2508 /* fcoe adds one global request and one queue request */ 2509 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; 2510 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 2511 (is_fcoe ? 0 : 1); 2512 2513 DP(BNX2X_MSG_IOV, 2514 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", 2515 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, 2516 first_queue_query_index + num_queues_req); 2517 2518 cur_data_offset = bp->fw_stats_data_mapping + 2519 offsetof(struct bnx2x_fw_stats_data, queue_stats) + 2520 num_queues_req * sizeof(struct per_queue_stats); 2521 2522 cur_query_entry = &bp->fw_stats_req-> 2523 query[first_queue_query_index + num_queues_req]; 2524 2525 for_each_vf(bp, i) { 2526 int j; 2527 struct bnx2x_virtf *vf = BP_VF(bp, i); 2528 2529 if (vf->state != VF_ENABLED) { 2530 DP(BNX2X_MSG_IOV, 2531 "vf %d not enabled so no stats for it\n", 2532 vf->abs_vfid); 2533 continue; 2534 } 2535 2536 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); 2537 for_each_vfq(vf, j) { 2538 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 2539 2540 dma_addr_t q_stats_addr = 2541 vf->fw_stat_map + j * vf->stats_stride; 2542 2543 /* collect stats fro active queues only */ 2544 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 2545 BNX2X_Q_LOGICAL_STATE_STOPPED) 2546 continue; 2547 2548 /* create stats query entry for this queue */ 2549 cur_query_entry->kind = STATS_TYPE_QUEUE; 2550 cur_query_entry->index = vfq_stat_id(vf, rxq); 2551 cur_query_entry->funcID = 2552 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 2553 cur_query_entry->address.hi = 2554 cpu_to_le32(U64_HI(q_stats_addr)); 2555 cur_query_entry->address.lo = 2556 cpu_to_le32(U64_LO(q_stats_addr)); 2557 DP(BNX2X_MSG_IOV, 2558 "added address %x %x for vf %d queue %d client %d\n", 2559 cur_query_entry->address.hi, 2560 cur_query_entry->address.lo, cur_query_entry->funcID, 2561 j, cur_query_entry->index); 2562 cur_query_entry++; 2563 cur_data_offset += sizeof(struct per_queue_stats); 2564 stats_count++; 2565 2566 /* all stats are coalesced to the leading queue */ 2567 if (vf->cfg_flags & VF_CFG_STATS_COALESCE) 2568 break; 2569 } 2570 } 2571 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 2572 } 2573 2574 void bnx2x_iov_sp_task(struct bnx2x *bp) 2575 { 2576 int i; 2577 2578 if (!IS_SRIOV(bp)) 2579 return; 2580 /* Iterate over all VFs and invoke state transition for VFs with 2581 * 'in-progress' slow-path operations 2582 */ 2583 DP(BNX2X_MSG_IOV, "searching for pending vf operations\n"); 2584 for_each_vf(bp, i) { 2585 struct bnx2x_virtf *vf = BP_VF(bp, i); 2586 2587 if (!vf) { 2588 BNX2X_ERR("VF was null! skipping...\n"); 2589 continue; 2590 } 2591 2592 if (!list_empty(&vf->op_list_head) && 2593 atomic_read(&vf->op_in_progress)) { 2594 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); 2595 bnx2x_vfop_cur(bp, vf)->transition(bp, vf); 2596 } 2597 } 2598 } 2599 2600 static inline 2601 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) 2602 { 2603 int i; 2604 struct bnx2x_virtf *vf = NULL; 2605 2606 for_each_vf(bp, i) { 2607 vf = BP_VF(bp, i); 2608 if (stat_id >= vf->igu_base_id && 2609 stat_id < vf->igu_base_id + vf_sb_count(vf)) 2610 break; 2611 } 2612 return vf; 2613 } 2614 2615 /* VF API helpers */ 2616 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, 2617 u8 enable) 2618 { 2619 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; 2620 u32 val = enable ? (abs_vfid | (1 << 6)) : 0; 2621 2622 REG_WR(bp, reg, val); 2623 } 2624 2625 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) 2626 { 2627 int i; 2628 2629 for_each_vfq(vf, i) 2630 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2631 vfq_qzone_id(vf, vfq_get(vf, i)), false); 2632 } 2633 2634 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) 2635 { 2636 u32 val; 2637 2638 /* clear the VF configuration - pretend */ 2639 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 2640 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 2641 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | 2642 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK); 2643 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 2644 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2645 } 2646 2647 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) 2648 { 2649 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), 2650 BNX2X_VF_MAX_QUEUES); 2651 } 2652 2653 static 2654 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, 2655 struct vf_pf_resc_request *req_resc) 2656 { 2657 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2658 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2659 2660 return ((req_resc->num_rxqs <= rxq_cnt) && 2661 (req_resc->num_txqs <= txq_cnt) && 2662 (req_resc->num_sbs <= vf_sb_count(vf)) && 2663 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 2664 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); 2665 } 2666 2667 /* CORE VF API */ 2668 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 2669 struct vf_pf_resc_request *resc) 2670 { 2671 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * 2672 BNX2X_CIDS_PER_VF; 2673 2674 union cdu_context *base_cxt = (union cdu_context *) 2675 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2676 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2677 int i; 2678 2679 /* if state is 'acquired' the VF was not released or FLR'd, in 2680 * this case the returned resources match the acquired already 2681 * acquired resources. Verify that the requested numbers do 2682 * not exceed the already acquired numbers. 2683 */ 2684 if (vf->state == VF_ACQUIRED) { 2685 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", 2686 vf->abs_vfid); 2687 2688 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2689 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", 2690 vf->abs_vfid); 2691 return -EINVAL; 2692 } 2693 return 0; 2694 } 2695 2696 /* Otherwise vf state must be 'free' or 'reset' */ 2697 if (vf->state != VF_FREE && vf->state != VF_RESET) { 2698 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", 2699 vf->abs_vfid, vf->state); 2700 return -EINVAL; 2701 } 2702 2703 /* static allocation: 2704 * the global maximum number are fixed per VF. Fail the request if 2705 * requested number exceed these globals 2706 */ 2707 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2708 DP(BNX2X_MSG_IOV, 2709 "cannot fulfill vf resource request. Placing maximal available values in response\n"); 2710 /* set the max resource in the vf */ 2711 return -ENOMEM; 2712 } 2713 2714 /* Set resources counters - 0 request means max available */ 2715 vf_sb_count(vf) = resc->num_sbs; 2716 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2717 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2718 if (resc->num_mac_filters) 2719 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 2720 if (resc->num_vlan_filters) 2721 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; 2722 2723 DP(BNX2X_MSG_IOV, 2724 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2725 vf_sb_count(vf), vf_rxq_count(vf), 2726 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2727 vf_vlan_rules_cnt(vf)); 2728 2729 /* Initialize the queues */ 2730 if (!vf->vfqs) { 2731 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); 2732 return -EINVAL; 2733 } 2734 2735 for_each_vfq(vf, i) { 2736 struct bnx2x_vf_queue *q = vfq_get(vf, i); 2737 2738 if (!q) { 2739 BNX2X_ERR("q number %d was not allocated\n", i); 2740 return -EINVAL; 2741 } 2742 2743 q->index = i; 2744 q->cxt = &((base_cxt + i)->eth); 2745 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; 2746 2747 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", 2748 vf->abs_vfid, i, q->index, q->cid, q->cxt); 2749 2750 /* init SP objects */ 2751 bnx2x_vfq_init(bp, vf, q); 2752 } 2753 vf->state = VF_ACQUIRED; 2754 return 0; 2755 } 2756 2757 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) 2758 { 2759 struct bnx2x_func_init_params func_init = {0}; 2760 u16 flags = 0; 2761 int i; 2762 2763 /* the sb resources are initialized at this point, do the 2764 * FW/HW initializations 2765 */ 2766 for_each_vf_sb(vf, i) 2767 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, 2768 vf_igu_sb(vf, i), vf_igu_sb(vf, i)); 2769 2770 /* Sanity checks */ 2771 if (vf->state != VF_ACQUIRED) { 2772 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", 2773 vf->abs_vfid, vf->state); 2774 return -EINVAL; 2775 } 2776 2777 /* let FLR complete ... */ 2778 msleep(100); 2779 2780 /* FLR cleanup epilogue */ 2781 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) 2782 return -EBUSY; 2783 2784 /* reset IGU VF statistics: MSIX */ 2785 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); 2786 2787 /* vf init */ 2788 if (vf->cfg_flags & VF_CFG_STATS) 2789 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); 2790 2791 if (vf->cfg_flags & VF_CFG_TPA) 2792 flags |= FUNC_FLG_TPA; 2793 2794 if (is_vf_multi(vf)) 2795 flags |= FUNC_FLG_RSS; 2796 2797 /* function setup */ 2798 func_init.func_flgs = flags; 2799 func_init.pf_id = BP_FUNC(bp); 2800 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); 2801 func_init.fw_stat_map = vf->fw_stat_map; 2802 func_init.spq_map = vf->spq_map; 2803 func_init.spq_prod = 0; 2804 bnx2x_func_init(bp, &func_init); 2805 2806 /* Enable the vf */ 2807 bnx2x_vf_enable_access(bp, vf->abs_vfid); 2808 bnx2x_vf_enable_traffic(bp, vf); 2809 2810 /* queue protection table */ 2811 for_each_vfq(vf, i) 2812 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2813 vfq_qzone_id(vf, vfq_get(vf, i)), true); 2814 2815 vf->state = VF_ENABLED; 2816 2817 /* update vf bulletin board */ 2818 bnx2x_post_vf_bulletin(bp, vf->index); 2819 2820 return 0; 2821 } 2822 2823 struct set_vf_state_cookie { 2824 struct bnx2x_virtf *vf; 2825 u8 state; 2826 }; 2827 2828 static void bnx2x_set_vf_state(void *cookie) 2829 { 2830 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; 2831 2832 p->vf->state = p->state; 2833 } 2834 2835 /* VFOP close (teardown the queues, delete mcasts and close HW) */ 2836 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) 2837 { 2838 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2839 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; 2840 enum bnx2x_vfop_close_state state = vfop->state; 2841 struct bnx2x_vfop_cmd cmd = { 2842 .done = bnx2x_vfop_close, 2843 .block = false, 2844 }; 2845 2846 if (vfop->rc < 0) 2847 goto op_err; 2848 2849 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 2850 2851 switch (state) { 2852 case BNX2X_VFOP_CLOSE_QUEUES: 2853 2854 if (++(qx->qid) < vf_rxq_count(vf)) { 2855 vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid); 2856 if (vfop->rc) 2857 goto op_err; 2858 return; 2859 } 2860 2861 /* remove multicasts */ 2862 vfop->state = BNX2X_VFOP_CLOSE_HW; 2863 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false); 2864 if (vfop->rc) 2865 goto op_err; 2866 return; 2867 2868 case BNX2X_VFOP_CLOSE_HW: 2869 2870 /* disable the interrupts */ 2871 DP(BNX2X_MSG_IOV, "disabling igu\n"); 2872 bnx2x_vf_igu_disable(bp, vf); 2873 2874 /* disable the VF */ 2875 DP(BNX2X_MSG_IOV, "clearing qtbl\n"); 2876 bnx2x_vf_clr_qtbl(bp, vf); 2877 2878 goto op_done; 2879 default: 2880 bnx2x_vfop_default(state); 2881 } 2882 op_err: 2883 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); 2884 op_done: 2885 2886 /* need to make sure there are no outstanding stats ramrods which may 2887 * cause the device to access the VF's stats buffer which it will free 2888 * as soon as we return from the close flow. 2889 */ 2890 { 2891 struct set_vf_state_cookie cookie; 2892 2893 cookie.vf = vf; 2894 cookie.state = VF_ACQUIRED; 2895 bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); 2896 } 2897 2898 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2899 bnx2x_vfop_end(bp, vf, vfop); 2900 } 2901 2902 int bnx2x_vfop_close_cmd(struct bnx2x *bp, 2903 struct bnx2x_virtf *vf, 2904 struct bnx2x_vfop_cmd *cmd) 2905 { 2906 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 2907 if (vfop) { 2908 vfop->args.qx.qid = -1; /* loop */ 2909 bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES, 2910 bnx2x_vfop_close, cmd->done); 2911 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close, 2912 cmd->block); 2913 } 2914 return -ENOMEM; 2915 } 2916 2917 /* VF release can be called either: 1. The VF was acquired but 2918 * not enabled 2. the vf was enabled or in the process of being 2919 * enabled 2920 */ 2921 static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) 2922 { 2923 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2924 struct bnx2x_vfop_cmd cmd = { 2925 .done = bnx2x_vfop_release, 2926 .block = false, 2927 }; 2928 2929 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2930 2931 if (vfop->rc < 0) 2932 goto op_err; 2933 2934 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, 2935 vf->state == VF_FREE ? "Free" : 2936 vf->state == VF_ACQUIRED ? "Acquired" : 2937 vf->state == VF_ENABLED ? "Enabled" : 2938 vf->state == VF_RESET ? "Reset" : 2939 "Unknown"); 2940 2941 switch (vf->state) { 2942 case VF_ENABLED: 2943 vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); 2944 if (vfop->rc) 2945 goto op_err; 2946 return; 2947 2948 case VF_ACQUIRED: 2949 DP(BNX2X_MSG_IOV, "about to free resources\n"); 2950 bnx2x_vf_free_resc(bp, vf); 2951 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2952 goto op_done; 2953 2954 case VF_FREE: 2955 case VF_RESET: 2956 /* do nothing */ 2957 goto op_done; 2958 default: 2959 bnx2x_vfop_default(vf->state); 2960 } 2961 op_err: 2962 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc); 2963 op_done: 2964 bnx2x_vfop_end(bp, vf, vfop); 2965 } 2966 2967 static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf) 2968 { 2969 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2970 enum bnx2x_vfop_rss_state state; 2971 2972 if (!vfop) { 2973 BNX2X_ERR("vfop was null\n"); 2974 return; 2975 } 2976 2977 state = vfop->state; 2978 bnx2x_vfop_reset_wq(vf); 2979 2980 if (vfop->rc < 0) 2981 goto op_err; 2982 2983 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 2984 2985 switch (state) { 2986 case BNX2X_VFOP_RSS_CONFIG: 2987 /* next state */ 2988 vfop->state = BNX2X_VFOP_RSS_DONE; 2989 bnx2x_config_rss(bp, &vfop->op_p->rss); 2990 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 2991 op_err: 2992 BNX2X_ERR("RSS error: rc %d\n", vfop->rc); 2993 op_done: 2994 case BNX2X_VFOP_RSS_DONE: 2995 bnx2x_vfop_end(bp, vf, vfop); 2996 return; 2997 default: 2998 bnx2x_vfop_default(state); 2999 } 3000 op_pending: 3001 return; 3002 } 3003 3004 int bnx2x_vfop_release_cmd(struct bnx2x *bp, 3005 struct bnx2x_virtf *vf, 3006 struct bnx2x_vfop_cmd *cmd) 3007 { 3008 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 3009 if (vfop) { 3010 bnx2x_vfop_opset(-1, /* use vf->state */ 3011 bnx2x_vfop_release, cmd->done); 3012 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release, 3013 cmd->block); 3014 } 3015 return -ENOMEM; 3016 } 3017 3018 int bnx2x_vfop_rss_cmd(struct bnx2x *bp, 3019 struct bnx2x_virtf *vf, 3020 struct bnx2x_vfop_cmd *cmd) 3021 { 3022 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 3023 3024 if (vfop) { 3025 bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss, 3026 cmd->done); 3027 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss, 3028 cmd->block); 3029 } 3030 return -ENOMEM; 3031 } 3032 3033 /* VF release ~ VF close + VF release-resources 3034 * Release is the ultimate SW shutdown and is called whenever an 3035 * irrecoverable error is encountered. 3036 */ 3037 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) 3038 { 3039 struct bnx2x_vfop_cmd cmd = { 3040 .done = NULL, 3041 .block = block, 3042 }; 3043 int rc; 3044 3045 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); 3046 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 3047 3048 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); 3049 if (rc) 3050 WARN(rc, 3051 "VF[%d] Failed to allocate resources for release op- rc=%d\n", 3052 vf->abs_vfid, rc); 3053 } 3054 3055 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, 3056 struct bnx2x_virtf *vf, u32 *sbdf) 3057 { 3058 *sbdf = vf->devfn | (vf->bus << 8); 3059 } 3060 3061 static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf, 3062 struct bnx2x_vf_bar_info *bar_info) 3063 { 3064 int n; 3065 3066 bar_info->nr_bars = bp->vfdb->sriov.nres; 3067 for (n = 0; n < bar_info->nr_bars; n++) 3068 bar_info->bars[n] = vf->bars[n]; 3069 } 3070 3071 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3072 enum channel_tlvs tlv) 3073 { 3074 /* we don't lock the channel for unsupported tlvs */ 3075 if (!bnx2x_tlv_supported(tlv)) { 3076 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n"); 3077 return; 3078 } 3079 3080 /* lock the channel */ 3081 mutex_lock(&vf->op_mutex); 3082 3083 /* record the locking op */ 3084 vf->op_current = tlv; 3085 3086 /* log the lock */ 3087 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", 3088 vf->abs_vfid, tlv); 3089 } 3090 3091 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3092 enum channel_tlvs expected_tlv) 3093 { 3094 enum channel_tlvs current_tlv; 3095 3096 if (!vf) { 3097 BNX2X_ERR("VF was %p\n", vf); 3098 return; 3099 } 3100 3101 current_tlv = vf->op_current; 3102 3103 /* we don't unlock the channel for unsupported tlvs */ 3104 if (!bnx2x_tlv_supported(expected_tlv)) 3105 return; 3106 3107 WARN(expected_tlv != vf->op_current, 3108 "lock mismatch: expected %d found %d", expected_tlv, 3109 vf->op_current); 3110 3111 /* record the locking op */ 3112 vf->op_current = CHANNEL_TLV_NONE; 3113 3114 /* lock the channel */ 3115 mutex_unlock(&vf->op_mutex); 3116 3117 /* log the unlock */ 3118 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 3119 vf->abs_vfid, vf->op_current); 3120 } 3121 3122 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) 3123 { 3124 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 3125 3126 if (!IS_SRIOV(bp)) { 3127 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n"); 3128 return -EINVAL; 3129 } 3130 3131 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 3132 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3133 3134 /* HW channel is only operational when PF is up */ 3135 if (bp->state != BNX2X_STATE_OPEN) { 3136 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n"); 3137 return -EINVAL; 3138 } 3139 3140 /* we are always bound by the total_vfs in the configuration space */ 3141 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { 3142 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n", 3143 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3144 num_vfs_param = BNX2X_NR_VIRTFN(bp); 3145 } 3146 3147 bp->requested_nr_virtfn = num_vfs_param; 3148 if (num_vfs_param == 0) { 3149 pci_disable_sriov(dev); 3150 return 0; 3151 } else { 3152 return bnx2x_enable_sriov(bp); 3153 } 3154 } 3155 #define IGU_ENTRY_SIZE 4 3156 3157 int bnx2x_enable_sriov(struct bnx2x *bp) 3158 { 3159 int rc = 0, req_vfs = bp->requested_nr_virtfn; 3160 int vf_idx, sb_idx, vfq_idx, qcount, first_vf; 3161 u32 igu_entry, address; 3162 u16 num_vf_queues; 3163 3164 if (req_vfs == 0) 3165 return 0; 3166 3167 first_vf = bp->vfdb->sriov.first_vf_in_pf; 3168 3169 /* statically distribute vf sb pool between VFs */ 3170 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES, 3171 BP_VFDB(bp)->vf_sbs_pool / req_vfs); 3172 3173 /* zero previous values learned from igu cam */ 3174 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) { 3175 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 3176 3177 vf->sb_count = 0; 3178 vf_sb_count(BP_VF(bp, vf_idx)) = 0; 3179 } 3180 bp->vfdb->vf_sbs_pool = 0; 3181 3182 /* prepare IGU cam */ 3183 sb_idx = BP_VFDB(bp)->first_vf_igu_entry; 3184 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE; 3185 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 3186 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) { 3187 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT | 3188 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT | 3189 IGU_REG_MAPPING_MEMORY_VALID; 3190 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n", 3191 sb_idx, vf_idx); 3192 REG_WR(bp, address, igu_entry); 3193 sb_idx++; 3194 address += IGU_ENTRY_SIZE; 3195 } 3196 } 3197 3198 /* Reinitialize vf database according to igu cam */ 3199 bnx2x_get_vf_igu_cam_info(bp); 3200 3201 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n", 3202 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); 3203 3204 qcount = 0; 3205 for_each_vf(bp, vf_idx) { 3206 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 3207 3208 /* set local queue arrays */ 3209 vf->vfqs = &bp->vfdb->vfqs[qcount]; 3210 qcount += vf_sb_count(vf); 3211 bnx2x_iov_static_resc(bp, vf); 3212 } 3213 3214 /* prepare msix vectors in VF configuration space - the value in the 3215 * PCI configuration space should be the index of the last entry, 3216 * namely one less than the actual size of the table 3217 */ 3218 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 3219 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 3220 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 3221 num_vf_queues - 1); 3222 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", 3223 vf_idx, num_vf_queues - 1); 3224 } 3225 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 3226 3227 /* enable sriov. This will probe all the VFs, and consequentially cause 3228 * the "acquire" messages to appear on the VF PF channel. 3229 */ 3230 DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); 3231 bnx2x_disable_sriov(bp); 3232 rc = pci_enable_sriov(bp->pdev, req_vfs); 3233 if (rc) { 3234 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 3235 return rc; 3236 } 3237 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); 3238 return req_vfs; 3239 } 3240 3241 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) 3242 { 3243 int vfidx; 3244 struct pf_vf_bulletin_content *bulletin; 3245 3246 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); 3247 for_each_vf(bp, vfidx) { 3248 bulletin = BP_VF_BULLETIN(bp, vfidx); 3249 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) 3250 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); 3251 } 3252 } 3253 3254 void bnx2x_disable_sriov(struct bnx2x *bp) 3255 { 3256 pci_disable_sriov(bp->pdev); 3257 } 3258 3259 static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, 3260 struct bnx2x_virtf **vf, 3261 struct pf_vf_bulletin_content **bulletin) 3262 { 3263 if (bp->state != BNX2X_STATE_OPEN) { 3264 BNX2X_ERR("vf ndo called though PF is down\n"); 3265 return -EINVAL; 3266 } 3267 3268 if (!IS_SRIOV(bp)) { 3269 BNX2X_ERR("vf ndo called though sriov is disabled\n"); 3270 return -EINVAL; 3271 } 3272 3273 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { 3274 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n", 3275 vfidx, BNX2X_NR_VIRTFN(bp)); 3276 return -EINVAL; 3277 } 3278 3279 /* init members */ 3280 *vf = BP_VF(bp, vfidx); 3281 *bulletin = BP_VF_BULLETIN(bp, vfidx); 3282 3283 if (!*vf) { 3284 BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n", 3285 vfidx); 3286 return -EINVAL; 3287 } 3288 3289 if (!(*vf)->vfqs) { 3290 BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n", 3291 vfidx); 3292 return -EINVAL; 3293 } 3294 3295 if (!*bulletin) { 3296 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n", 3297 vfidx); 3298 return -EINVAL; 3299 } 3300 3301 return 0; 3302 } 3303 3304 int bnx2x_get_vf_config(struct net_device *dev, int vfidx, 3305 struct ifla_vf_info *ivi) 3306 { 3307 struct bnx2x *bp = netdev_priv(dev); 3308 struct bnx2x_virtf *vf = NULL; 3309 struct pf_vf_bulletin_content *bulletin = NULL; 3310 struct bnx2x_vlan_mac_obj *mac_obj; 3311 struct bnx2x_vlan_mac_obj *vlan_obj; 3312 int rc; 3313 3314 /* sanity and init */ 3315 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3316 if (rc) 3317 return rc; 3318 mac_obj = &bnx2x_leading_vfq(vf, mac_obj); 3319 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 3320 if (!mac_obj || !vlan_obj) { 3321 BNX2X_ERR("VF partially initialized\n"); 3322 return -EINVAL; 3323 } 3324 3325 ivi->vf = vfidx; 3326 ivi->qos = 0; 3327 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */ 3328 ivi->spoofchk = 1; /*always enabled */ 3329 if (vf->state == VF_ENABLED) { 3330 /* mac and vlan are in vlan_mac objects */ 3331 if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj))) 3332 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 3333 0, ETH_ALEN); 3334 if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj))) 3335 vlan_obj->get_n_elements(bp, vlan_obj, 1, 3336 (u8 *)&ivi->vlan, 0, 3337 VLAN_HLEN); 3338 } else { 3339 /* mac */ 3340 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 3341 /* mac configured by ndo so its in bulletin board */ 3342 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); 3343 else 3344 /* function has not been loaded yet. Show mac as 0s */ 3345 memset(&ivi->mac, 0, ETH_ALEN); 3346 3347 /* vlan */ 3348 if (bulletin->valid_bitmap & (1 << VLAN_VALID)) 3349 /* vlan configured by ndo so its in bulletin board */ 3350 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); 3351 else 3352 /* function has not been loaded yet. Show vlans as 0s */ 3353 memset(&ivi->vlan, 0, VLAN_HLEN); 3354 } 3355 3356 return 0; 3357 } 3358 3359 /* New mac for VF. Consider these cases: 3360 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and 3361 * supply at acquire. 3362 * 2. VF has already been acquired but has not yet initialized - store in local 3363 * bulletin board. mac will be posted on VF bulletin board after VF init. VF 3364 * will configure this mac when it is ready. 3365 * 3. VF has already initialized but has not yet setup a queue - post the new 3366 * mac on VF's bulletin board right now. VF will configure this mac when it 3367 * is ready. 3368 * 4. VF has already set a queue - delete any macs already configured for this 3369 * queue and manually config the new mac. 3370 * In any event, once this function has been called refuse any attempts by the 3371 * VF to configure any mac for itself except for this mac. In case of a race 3372 * where the VF fails to see the new post on its bulletin board before sending a 3373 * mac configuration request, the PF will simply fail the request and VF can try 3374 * again after consulting its bulletin board. 3375 */ 3376 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) 3377 { 3378 struct bnx2x *bp = netdev_priv(dev); 3379 int rc, q_logical_state; 3380 struct bnx2x_virtf *vf = NULL; 3381 struct pf_vf_bulletin_content *bulletin = NULL; 3382 3383 /* sanity and init */ 3384 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3385 if (rc) 3386 return rc; 3387 if (!is_valid_ether_addr(mac)) { 3388 BNX2X_ERR("mac address invalid\n"); 3389 return -EINVAL; 3390 } 3391 3392 /* update PF's copy of the VF's bulletin. Will no longer accept mac 3393 * configuration requests from vf unless match this mac 3394 */ 3395 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; 3396 memcpy(bulletin->mac, mac, ETH_ALEN); 3397 3398 /* Post update on VF's bulletin board */ 3399 rc = bnx2x_post_vf_bulletin(bp, vfidx); 3400 if (rc) { 3401 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); 3402 return rc; 3403 } 3404 3405 q_logical_state = 3406 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); 3407 if (vf->state == VF_ENABLED && 3408 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3409 /* configure the mac in device on this vf's queue */ 3410 unsigned long ramrod_flags = 0; 3411 struct bnx2x_vlan_mac_obj *mac_obj = 3412 &bnx2x_leading_vfq(vf, mac_obj); 3413 3414 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); 3415 if (rc) 3416 return rc; 3417 3418 /* must lock vfpf channel to protect against vf flows */ 3419 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3420 3421 /* remove existing eth macs */ 3422 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 3423 if (rc) { 3424 BNX2X_ERR("failed to delete eth macs\n"); 3425 rc = -EINVAL; 3426 goto out; 3427 } 3428 3429 /* remove existing uc list macs */ 3430 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); 3431 if (rc) { 3432 BNX2X_ERR("failed to delete uc_list macs\n"); 3433 rc = -EINVAL; 3434 goto out; 3435 } 3436 3437 /* configure the new mac to device */ 3438 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3439 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 3440 BNX2X_ETH_MAC, &ramrod_flags); 3441 3442 out: 3443 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3444 } 3445 3446 return 0; 3447 } 3448 3449 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 3450 { 3451 struct bnx2x_queue_state_params q_params = {NULL}; 3452 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 3453 struct bnx2x_queue_update_params *update_params; 3454 struct pf_vf_bulletin_content *bulletin = NULL; 3455 struct bnx2x_rx_mode_ramrod_params rx_ramrod; 3456 struct bnx2x *bp = netdev_priv(dev); 3457 struct bnx2x_vlan_mac_obj *vlan_obj; 3458 unsigned long vlan_mac_flags = 0; 3459 unsigned long ramrod_flags = 0; 3460 struct bnx2x_virtf *vf = NULL; 3461 unsigned long accept_flags; 3462 int rc; 3463 3464 /* sanity and init */ 3465 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3466 if (rc) 3467 return rc; 3468 3469 if (vlan > 4095) { 3470 BNX2X_ERR("illegal vlan value %d\n", vlan); 3471 return -EINVAL; 3472 } 3473 3474 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", 3475 vfidx, vlan, 0); 3476 3477 /* update PF's copy of the VF's bulletin. No point in posting the vlan 3478 * to the VF since it doesn't have anything to do with it. But it useful 3479 * to store it here in case the VF is not up yet and we can only 3480 * configure the vlan later when it does. Treat vlan id 0 as remove the 3481 * Host tag. 3482 */ 3483 if (vlan > 0) 3484 bulletin->valid_bitmap |= 1 << VLAN_VALID; 3485 else 3486 bulletin->valid_bitmap &= ~(1 << VLAN_VALID); 3487 bulletin->vlan = vlan; 3488 3489 /* is vf initialized and queue set up? */ 3490 if (vf->state != VF_ENABLED || 3491 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != 3492 BNX2X_Q_LOGICAL_STATE_ACTIVE) 3493 return rc; 3494 3495 /* configure the vlan in device on this vf's queue */ 3496 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 3497 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); 3498 if (rc) 3499 return rc; 3500 3501 /* must lock vfpf channel to protect against vf flows */ 3502 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3503 3504 /* remove existing vlans */ 3505 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3506 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 3507 &ramrod_flags); 3508 if (rc) { 3509 BNX2X_ERR("failed to delete vlans\n"); 3510 rc = -EINVAL; 3511 goto out; 3512 } 3513 3514 /* need to remove/add the VF's accept_any_vlan bit */ 3515 accept_flags = bnx2x_leading_vfq(vf, accept_flags); 3516 if (vlan) 3517 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 3518 else 3519 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 3520 3521 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, 3522 accept_flags); 3523 bnx2x_leading_vfq(vf, accept_flags) = accept_flags; 3524 bnx2x_config_rx_mode(bp, &rx_ramrod); 3525 3526 /* configure the new vlan to device */ 3527 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3528 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3529 ramrod_param.vlan_mac_obj = vlan_obj; 3530 ramrod_param.ramrod_flags = ramrod_flags; 3531 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 3532 &ramrod_param.user_req.vlan_mac_flags); 3533 ramrod_param.user_req.u.vlan.vlan = vlan; 3534 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 3535 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 3536 if (rc) { 3537 BNX2X_ERR("failed to configure vlan\n"); 3538 rc = -EINVAL; 3539 goto out; 3540 } 3541 3542 /* send queue update ramrod to configure default vlan and silent 3543 * vlan removal 3544 */ 3545 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3546 q_params.cmd = BNX2X_Q_CMD_UPDATE; 3547 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); 3548 update_params = &q_params.params.update; 3549 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 3550 &update_params->update_flags); 3551 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 3552 &update_params->update_flags); 3553 if (vlan == 0) { 3554 /* if vlan is 0 then we want to leave the VF traffic 3555 * untagged, and leave the incoming traffic untouched 3556 * (i.e. do not remove any vlan tags). 3557 */ 3558 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3559 &update_params->update_flags); 3560 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 3561 &update_params->update_flags); 3562 } else { 3563 /* configure default vlan to vf queue and set silent 3564 * vlan removal (the vf remains unaware of this vlan). 3565 */ 3566 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3567 &update_params->update_flags); 3568 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 3569 &update_params->update_flags); 3570 update_params->def_vlan = vlan; 3571 update_params->silent_removal_value = 3572 vlan & VLAN_VID_MASK; 3573 update_params->silent_removal_mask = VLAN_VID_MASK; 3574 } 3575 3576 /* Update the Queue state */ 3577 rc = bnx2x_queue_state_change(bp, &q_params); 3578 if (rc) { 3579 BNX2X_ERR("Failed to configure default VLAN\n"); 3580 goto out; 3581 } 3582 3583 3584 /* clear the flag indicating that this VF needs its vlan 3585 * (will only be set if the HV configured the Vlan before vf was 3586 * up and we were called because the VF came up later 3587 */ 3588 out: 3589 vf->cfg_flags &= ~VF_CFG_VLAN; 3590 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3591 3592 return rc; 3593 } 3594 3595 /* crc is the first field in the bulletin board. Compute the crc over the 3596 * entire bulletin board excluding the crc field itself. Use the length field 3597 * as the Bulletin Board was posted by a PF with possibly a different version 3598 * from the vf which will sample it. Therefore, the length is computed by the 3599 * PF and the used blindly by the VF. 3600 */ 3601 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, 3602 struct pf_vf_bulletin_content *bulletin) 3603 { 3604 return crc32(BULLETIN_CRC_SEED, 3605 ((u8 *)bulletin) + sizeof(bulletin->crc), 3606 bulletin->length - sizeof(bulletin->crc)); 3607 } 3608 3609 /* Check for new posts on the bulletin board */ 3610 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) 3611 { 3612 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; 3613 int attempts; 3614 3615 /* bulletin board hasn't changed since last sample */ 3616 if (bp->old_bulletin.version == bulletin.version) 3617 return PFVF_BULLETIN_UNCHANGED; 3618 3619 /* validate crc of new bulletin board */ 3620 if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) { 3621 /* sampling structure in mid post may result with corrupted data 3622 * validate crc to ensure coherency. 3623 */ 3624 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { 3625 bulletin = bp->pf2vf_bulletin->content; 3626 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, 3627 &bulletin)) 3628 break; 3629 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", 3630 bulletin.crc, 3631 bnx2x_crc_vf_bulletin(bp, &bulletin)); 3632 } 3633 if (attempts >= BULLETIN_ATTEMPTS) { 3634 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", 3635 attempts); 3636 return PFVF_BULLETIN_CRC_ERR; 3637 } 3638 } 3639 3640 /* the mac address in bulletin board is valid and is new */ 3641 if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID && 3642 memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) { 3643 /* update new mac to net device */ 3644 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); 3645 } 3646 3647 /* the vlan in bulletin board is valid and is new */ 3648 if (bulletin.valid_bitmap & 1 << VLAN_VALID) 3649 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN); 3650 3651 /* copy new bulletin board to bp */ 3652 bp->old_bulletin = bulletin; 3653 3654 return PFVF_BULLETIN_UPDATED; 3655 } 3656 3657 void bnx2x_timer_sriov(struct bnx2x *bp) 3658 { 3659 bnx2x_sample_bulletin(bp); 3660 3661 /* if channel is down we need to self destruct */ 3662 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { 3663 smp_mb__before_clear_bit(); 3664 set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 3665 &bp->sp_rtnl_state); 3666 smp_mb__after_clear_bit(); 3667 schedule_delayed_work(&bp->sp_rtnl_task, 0); 3668 } 3669 } 3670 3671 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 3672 { 3673 /* vf doorbells are embedded within the regview */ 3674 return bp->regview + PXP_VF_ADDR_DB_START; 3675 } 3676 3677 int bnx2x_vf_pci_alloc(struct bnx2x *bp) 3678 { 3679 mutex_init(&bp->vf2pf_mutex); 3680 3681 /* allocate vf2pf mailbox for vf to pf channel */ 3682 BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping, 3683 sizeof(struct bnx2x_vf_mbx_msg)); 3684 3685 /* allocate pf 2 vf bulletin board */ 3686 BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping, 3687 sizeof(union pf_vf_bulletin)); 3688 3689 return 0; 3690 3691 alloc_mem_err: 3692 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3693 sizeof(struct bnx2x_vf_mbx_msg)); 3694 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, 3695 sizeof(union pf_vf_bulletin)); 3696 return -ENOMEM; 3697 } 3698 3699 void bnx2x_iov_channel_down(struct bnx2x *bp) 3700 { 3701 int vf_idx; 3702 struct pf_vf_bulletin_content *bulletin; 3703 3704 if (!IS_SRIOV(bp)) 3705 return; 3706 3707 for_each_vf(bp, vf_idx) { 3708 /* locate this VFs bulletin board and update the channel down 3709 * bit 3710 */ 3711 bulletin = BP_VF_BULLETIN(bp, vf_idx); 3712 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN; 3713 3714 /* update vf bulletin board */ 3715 bnx2x_post_vf_bulletin(bp, vf_idx); 3716 } 3717 } 3718