1 /* bnx2x_sriov.c: Broadcom Everest network driver. 2 * 3 * Copyright 2009-2013 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 17 * Ariel Elior <ariele@broadcom.com> 18 * 19 */ 20 #include "bnx2x.h" 21 #include "bnx2x_init.h" 22 #include "bnx2x_cmn.h" 23 #include "bnx2x_sp.h" 24 #include <linux/crc32.h> 25 #include <linux/if_vlan.h> 26 27 /* General service functions */ 28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 29 u16 pf_id) 30 { 31 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 32 pf_id); 33 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 34 pf_id); 35 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 36 pf_id); 37 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 38 pf_id); 39 } 40 41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 42 u8 enable) 43 { 44 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 45 enable); 46 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 47 enable); 48 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 49 enable); 50 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 51 enable); 52 } 53 54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 55 { 56 int idx; 57 58 for_each_vf(bp, idx) 59 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) 60 break; 61 return idx; 62 } 63 64 static 65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 66 { 67 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); 68 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; 69 } 70 71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, 72 u8 igu_sb_id, u8 segment, u16 index, u8 op, 73 u8 update) 74 { 75 /* acking a VF sb through the PF - use the GRC */ 76 u32 ctl; 77 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 78 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 79 u32 func_encode = vf->abs_vfid; 80 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; 81 struct igu_regular cmd_data = {0}; 82 83 cmd_data.sb_id_and_flags = 84 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 85 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 86 (update << IGU_REGULAR_BUPDATE_SHIFT) | 87 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 88 89 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 90 func_encode << IGU_CTRL_REG_FID_SHIFT | 91 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 92 93 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 94 cmd_data.sb_id_and_flags, igu_addr_data); 95 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); 96 mmiowb(); 97 barrier(); 98 99 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 100 ctl, igu_addr_ctl); 101 REG_WR(bp, igu_addr_ctl, ctl); 102 mmiowb(); 103 barrier(); 104 } 105 /* VFOP - VF slow-path operation support */ 106 107 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000 108 109 /* VFOP operations states */ 110 enum bnx2x_vfop_qctor_state { 111 BNX2X_VFOP_QCTOR_INIT, 112 BNX2X_VFOP_QCTOR_SETUP, 113 BNX2X_VFOP_QCTOR_INT_EN 114 }; 115 116 enum bnx2x_vfop_qdtor_state { 117 BNX2X_VFOP_QDTOR_HALT, 118 BNX2X_VFOP_QDTOR_TERMINATE, 119 BNX2X_VFOP_QDTOR_CFCDEL, 120 BNX2X_VFOP_QDTOR_DONE 121 }; 122 123 enum bnx2x_vfop_vlan_mac_state { 124 BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 125 BNX2X_VFOP_VLAN_MAC_CLEAR, 126 BNX2X_VFOP_VLAN_MAC_CHK_DONE, 127 BNX2X_VFOP_MAC_CONFIG_LIST, 128 BNX2X_VFOP_VLAN_CONFIG_LIST, 129 BNX2X_VFOP_VLAN_CONFIG_LIST_0 130 }; 131 132 enum bnx2x_vfop_qsetup_state { 133 BNX2X_VFOP_QSETUP_CTOR, 134 BNX2X_VFOP_QSETUP_VLAN0, 135 BNX2X_VFOP_QSETUP_DONE 136 }; 137 138 enum bnx2x_vfop_mcast_state { 139 BNX2X_VFOP_MCAST_DEL, 140 BNX2X_VFOP_MCAST_ADD, 141 BNX2X_VFOP_MCAST_CHK_DONE 142 }; 143 enum bnx2x_vfop_qflr_state { 144 BNX2X_VFOP_QFLR_CLR_VLAN, 145 BNX2X_VFOP_QFLR_CLR_MAC, 146 BNX2X_VFOP_QFLR_TERMINATE, 147 BNX2X_VFOP_QFLR_DONE 148 }; 149 150 enum bnx2x_vfop_flr_state { 151 BNX2X_VFOP_FLR_QUEUES, 152 BNX2X_VFOP_FLR_HW 153 }; 154 155 enum bnx2x_vfop_close_state { 156 BNX2X_VFOP_CLOSE_QUEUES, 157 BNX2X_VFOP_CLOSE_HW 158 }; 159 160 enum bnx2x_vfop_rxmode_state { 161 BNX2X_VFOP_RXMODE_CONFIG, 162 BNX2X_VFOP_RXMODE_DONE 163 }; 164 165 enum bnx2x_vfop_qteardown_state { 166 BNX2X_VFOP_QTEARDOWN_RXMODE, 167 BNX2X_VFOP_QTEARDOWN_CLR_VLAN, 168 BNX2X_VFOP_QTEARDOWN_CLR_MAC, 169 BNX2X_VFOP_QTEARDOWN_CLR_MCAST, 170 BNX2X_VFOP_QTEARDOWN_QDTOR, 171 BNX2X_VFOP_QTEARDOWN_DONE 172 }; 173 174 enum bnx2x_vfop_rss_state { 175 BNX2X_VFOP_RSS_CONFIG, 176 BNX2X_VFOP_RSS_DONE 177 }; 178 179 #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) 180 181 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 182 struct bnx2x_queue_init_params *init_params, 183 struct bnx2x_queue_setup_params *setup_params, 184 u16 q_idx, u16 sb_idx) 185 { 186 DP(BNX2X_MSG_IOV, 187 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", 188 vf->abs_vfid, 189 q_idx, 190 sb_idx, 191 init_params->tx.sb_cq_index, 192 init_params->tx.hc_rate, 193 setup_params->flags, 194 setup_params->txq_params.traffic_type); 195 } 196 197 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 198 struct bnx2x_queue_init_params *init_params, 199 struct bnx2x_queue_setup_params *setup_params, 200 u16 q_idx, u16 sb_idx) 201 { 202 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; 203 204 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" 205 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", 206 vf->abs_vfid, 207 q_idx, 208 sb_idx, 209 init_params->rx.sb_cq_index, 210 init_params->rx.hc_rate, 211 setup_params->gen_params.mtu, 212 rxq_params->buf_sz, 213 rxq_params->sge_buf_sz, 214 rxq_params->max_sges_pkt, 215 rxq_params->tpa_agg_sz, 216 setup_params->flags, 217 rxq_params->drop_flags, 218 rxq_params->cache_line_log); 219 } 220 221 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 222 struct bnx2x_virtf *vf, 223 struct bnx2x_vf_queue *q, 224 struct bnx2x_vfop_qctor_params *p, 225 unsigned long q_type) 226 { 227 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; 228 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; 229 230 /* INIT */ 231 232 /* Enable host coalescing in the transition to INIT state */ 233 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) 234 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); 235 236 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) 237 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); 238 239 /* FW SB ID */ 240 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 241 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 242 243 /* context */ 244 init_p->cxts[0] = q->cxt; 245 246 /* SETUP */ 247 248 /* Setup-op general parameters */ 249 setup_p->gen_params.spcl_id = vf->sp_cl_id; 250 setup_p->gen_params.stat_id = vfq_stat_id(vf, q); 251 252 /* Setup-op pause params: 253 * Nothing to do, the pause thresholds are set by default to 0 which 254 * effectively turns off the feature for this queue. We don't want 255 * one queue (VF) to interfering with another queue (another VF) 256 */ 257 if (vf->cfg_flags & VF_CFG_FW_FC) 258 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", 259 vf->abs_vfid); 260 /* Setup-op flags: 261 * collect statistics, zero statistics, local-switching, security, 262 * OV for Flex10, RSS and MCAST for leading 263 */ 264 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) 265 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); 266 267 /* for VFs, enable tx switching, bd coherency, and mac address 268 * anti-spoofing 269 */ 270 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); 271 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 272 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 273 274 /* Setup-op rx parameters */ 275 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 276 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 277 278 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); 279 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); 280 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); 281 282 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) 283 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; 284 } 285 286 /* Setup-op tx parameters */ 287 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { 288 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; 289 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 290 } 291 } 292 293 /* VFOP queue construction */ 294 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf) 295 { 296 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 297 struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor; 298 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 299 enum bnx2x_vfop_qctor_state state = vfop->state; 300 301 bnx2x_vfop_reset_wq(vf); 302 303 if (vfop->rc < 0) 304 goto op_err; 305 306 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 307 308 switch (state) { 309 case BNX2X_VFOP_QCTOR_INIT: 310 311 /* has this queue already been opened? */ 312 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 313 BNX2X_Q_LOGICAL_STATE_ACTIVE) { 314 DP(BNX2X_MSG_IOV, 315 "Entered qctor but queue was already up. Aborting gracefully\n"); 316 goto op_done; 317 } 318 319 /* next state */ 320 vfop->state = BNX2X_VFOP_QCTOR_SETUP; 321 322 q_params->cmd = BNX2X_Q_CMD_INIT; 323 vfop->rc = bnx2x_queue_state_change(bp, q_params); 324 325 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 326 327 case BNX2X_VFOP_QCTOR_SETUP: 328 /* next state */ 329 vfop->state = BNX2X_VFOP_QCTOR_INT_EN; 330 331 /* copy pre-prepared setup params to the queue-state params */ 332 vfop->op_p->qctor.qstate.params.setup = 333 vfop->op_p->qctor.prep_qsetup; 334 335 q_params->cmd = BNX2X_Q_CMD_SETUP; 336 vfop->rc = bnx2x_queue_state_change(bp, q_params); 337 338 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 339 340 case BNX2X_VFOP_QCTOR_INT_EN: 341 342 /* enable interrupts */ 343 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx), 344 USTORM_ID, 0, IGU_INT_ENABLE, 0); 345 goto op_done; 346 default: 347 bnx2x_vfop_default(state); 348 } 349 op_err: 350 BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n", 351 vf->abs_vfid, args->qid, q_params->cmd, vfop->rc); 352 op_done: 353 bnx2x_vfop_end(bp, vf, vfop); 354 op_pending: 355 return; 356 } 357 358 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp, 359 struct bnx2x_virtf *vf, 360 struct bnx2x_vfop_cmd *cmd, 361 int qid) 362 { 363 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 364 365 if (vfop) { 366 vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 367 368 vfop->args.qctor.qid = qid; 369 vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx); 370 371 bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT, 372 bnx2x_vfop_qctor, cmd->done); 373 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor, 374 cmd->block); 375 } 376 return -ENOMEM; 377 } 378 379 /* VFOP queue destruction */ 380 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) 381 { 382 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 383 struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor; 384 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 385 enum bnx2x_vfop_qdtor_state state = vfop->state; 386 387 bnx2x_vfop_reset_wq(vf); 388 389 if (vfop->rc < 0) 390 goto op_err; 391 392 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 393 394 switch (state) { 395 case BNX2X_VFOP_QDTOR_HALT: 396 397 /* has this queue already been stopped? */ 398 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 399 BNX2X_Q_LOGICAL_STATE_STOPPED) { 400 DP(BNX2X_MSG_IOV, 401 "Entered qdtor but queue was already stopped. Aborting gracefully\n"); 402 403 /* next state */ 404 vfop->state = BNX2X_VFOP_QDTOR_DONE; 405 406 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 407 } 408 409 /* next state */ 410 vfop->state = BNX2X_VFOP_QDTOR_TERMINATE; 411 412 q_params->cmd = BNX2X_Q_CMD_HALT; 413 vfop->rc = bnx2x_queue_state_change(bp, q_params); 414 415 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 416 417 case BNX2X_VFOP_QDTOR_TERMINATE: 418 /* next state */ 419 vfop->state = BNX2X_VFOP_QDTOR_CFCDEL; 420 421 q_params->cmd = BNX2X_Q_CMD_TERMINATE; 422 vfop->rc = bnx2x_queue_state_change(bp, q_params); 423 424 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 425 426 case BNX2X_VFOP_QDTOR_CFCDEL: 427 /* next state */ 428 vfop->state = BNX2X_VFOP_QDTOR_DONE; 429 430 q_params->cmd = BNX2X_Q_CMD_CFC_DEL; 431 vfop->rc = bnx2x_queue_state_change(bp, q_params); 432 433 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 434 op_err: 435 BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n", 436 vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc); 437 op_done: 438 case BNX2X_VFOP_QDTOR_DONE: 439 /* invalidate the context */ 440 if (qdtor->cxt) { 441 qdtor->cxt->ustorm_ag_context.cdu_usage = 0; 442 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; 443 } 444 bnx2x_vfop_end(bp, vf, vfop); 445 return; 446 default: 447 bnx2x_vfop_default(state); 448 } 449 op_pending: 450 return; 451 } 452 453 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, 454 struct bnx2x_virtf *vf, 455 struct bnx2x_vfop_cmd *cmd, 456 int qid) 457 { 458 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 459 460 if (vfop) { 461 struct bnx2x_queue_state_params *qstate = 462 &vf->op_params.qctor.qstate; 463 464 memset(qstate, 0, sizeof(*qstate)); 465 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 466 467 vfop->args.qdtor.qid = qid; 468 vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt); 469 470 bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT, 471 bnx2x_vfop_qdtor, cmd->done); 472 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, 473 cmd->block); 474 } else { 475 BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid); 476 return -ENOMEM; 477 } 478 } 479 480 static void 481 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) 482 { 483 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 484 if (vf) { 485 /* the first igu entry belonging to VFs of this PF */ 486 if (!BP_VFDB(bp)->first_vf_igu_entry) 487 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; 488 489 /* the first igu entry belonging to this VF */ 490 if (!vf_sb_count(vf)) 491 vf->igu_base_id = igu_sb_id; 492 493 ++vf_sb_count(vf); 494 ++vf->sb_count; 495 } 496 BP_VFDB(bp)->vf_sbs_pool++; 497 } 498 499 /* VFOP MAC/VLAN helpers */ 500 static inline void bnx2x_vfop_credit(struct bnx2x *bp, 501 struct bnx2x_vfop *vfop, 502 struct bnx2x_vlan_mac_obj *obj) 503 { 504 struct bnx2x_vfop_args_filters *args = &vfop->args.filters; 505 506 /* update credit only if there is no error 507 * and a valid credit counter 508 */ 509 if (!vfop->rc && args->credit) { 510 struct list_head *pos; 511 int read_lock; 512 int cnt = 0; 513 514 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); 515 if (read_lock) 516 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); 517 518 list_for_each(pos, &obj->head) 519 cnt++; 520 521 if (!read_lock) 522 bnx2x_vlan_mac_h_read_unlock(bp, obj); 523 524 atomic_set(args->credit, cnt); 525 } 526 } 527 528 static int bnx2x_vfop_set_user_req(struct bnx2x *bp, 529 struct bnx2x_vfop_filter *pos, 530 struct bnx2x_vlan_mac_data *user_req) 531 { 532 user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD : 533 BNX2X_VLAN_MAC_DEL; 534 535 switch (pos->type) { 536 case BNX2X_VFOP_FILTER_MAC: 537 memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN); 538 break; 539 case BNX2X_VFOP_FILTER_VLAN: 540 user_req->u.vlan.vlan = pos->vid; 541 break; 542 default: 543 BNX2X_ERR("Invalid filter type, skipping\n"); 544 return 1; 545 } 546 return 0; 547 } 548 549 static int bnx2x_vfop_config_list(struct bnx2x *bp, 550 struct bnx2x_vfop_filters *filters, 551 struct bnx2x_vlan_mac_ramrod_params *vlan_mac) 552 { 553 struct bnx2x_vfop_filter *pos, *tmp; 554 struct list_head rollback_list, *filters_list = &filters->head; 555 struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req; 556 int rc = 0, cnt = 0; 557 558 INIT_LIST_HEAD(&rollback_list); 559 560 list_for_each_entry_safe(pos, tmp, filters_list, link) { 561 if (bnx2x_vfop_set_user_req(bp, pos, user_req)) 562 continue; 563 564 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 565 if (rc >= 0) { 566 cnt += pos->add ? 1 : -1; 567 list_move(&pos->link, &rollback_list); 568 rc = 0; 569 } else if (rc == -EEXIST) { 570 rc = 0; 571 } else { 572 BNX2X_ERR("Failed to add a new vlan_mac command\n"); 573 break; 574 } 575 } 576 577 /* rollback if error or too many rules added */ 578 if (rc || cnt > filters->add_cnt) { 579 BNX2X_ERR("error or too many rules added. Performing rollback\n"); 580 list_for_each_entry_safe(pos, tmp, &rollback_list, link) { 581 pos->add = !pos->add; /* reverse op */ 582 bnx2x_vfop_set_user_req(bp, pos, user_req); 583 bnx2x_config_vlan_mac(bp, vlan_mac); 584 list_del(&pos->link); 585 } 586 cnt = 0; 587 if (!rc) 588 rc = -EINVAL; 589 } 590 filters->add_cnt = cnt; 591 return rc; 592 } 593 594 /* VFOP set VLAN/MAC */ 595 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) 596 { 597 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 598 struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac; 599 struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj; 600 struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter; 601 602 enum bnx2x_vfop_vlan_mac_state state = vfop->state; 603 604 if (vfop->rc < 0) 605 goto op_err; 606 607 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 608 609 bnx2x_vfop_reset_wq(vf); 610 611 switch (state) { 612 case BNX2X_VFOP_VLAN_MAC_CLEAR: 613 /* next state */ 614 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 615 616 /* do delete */ 617 vfop->rc = obj->delete_all(bp, obj, 618 &vlan_mac->user_req.vlan_mac_flags, 619 &vlan_mac->ramrod_flags); 620 621 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 622 623 case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE: 624 /* next state */ 625 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 626 627 /* do config */ 628 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 629 if (vfop->rc == -EEXIST) 630 vfop->rc = 0; 631 632 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 633 634 case BNX2X_VFOP_VLAN_MAC_CHK_DONE: 635 vfop->rc = !!obj->raw.check_pending(&obj->raw); 636 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 637 638 case BNX2X_VFOP_MAC_CONFIG_LIST: 639 /* next state */ 640 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 641 642 /* do list config */ 643 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 644 if (vfop->rc) 645 goto op_err; 646 647 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 648 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 649 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 650 651 case BNX2X_VFOP_VLAN_CONFIG_LIST: 652 /* next state */ 653 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 654 655 /* do list config */ 656 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 657 if (!vfop->rc) { 658 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 659 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 660 } 661 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 662 663 default: 664 bnx2x_vfop_default(state); 665 } 666 op_err: 667 BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc); 668 op_done: 669 kfree(filters); 670 bnx2x_vfop_credit(bp, vfop, obj); 671 bnx2x_vfop_end(bp, vf, vfop); 672 op_pending: 673 return; 674 } 675 676 struct bnx2x_vfop_vlan_mac_flags { 677 bool drv_only; 678 bool dont_consume; 679 bool single_cmd; 680 bool add; 681 }; 682 683 static void 684 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 685 struct bnx2x_vfop_vlan_mac_flags *flags) 686 { 687 struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req; 688 689 memset(ramrod, 0, sizeof(*ramrod)); 690 691 /* ramrod flags */ 692 if (flags->drv_only) 693 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags); 694 if (flags->single_cmd) 695 set_bit(RAMROD_EXEC, &ramrod->ramrod_flags); 696 697 /* mac_vlan flags */ 698 if (flags->dont_consume) 699 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags); 700 701 /* cmd */ 702 ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL; 703 } 704 705 static inline void 706 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 707 struct bnx2x_vfop_vlan_mac_flags *flags) 708 { 709 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags); 710 set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags); 711 } 712 713 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, 714 struct bnx2x_virtf *vf, 715 struct bnx2x_vfop_cmd *cmd, 716 int qid, bool drv_only) 717 { 718 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 719 int rc; 720 721 if (vfop) { 722 struct bnx2x_vfop_args_filters filters = { 723 .multi_filter = NULL, /* single */ 724 .credit = NULL, /* consume credit */ 725 }; 726 struct bnx2x_vfop_vlan_mac_flags flags = { 727 .drv_only = drv_only, 728 .dont_consume = (filters.credit != NULL), 729 .single_cmd = true, 730 .add = false /* don't care */, 731 }; 732 struct bnx2x_vlan_mac_ramrod_params *ramrod = 733 &vf->op_params.vlan_mac; 734 735 /* set ramrod params */ 736 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 737 738 /* set object */ 739 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); 740 if (rc) 741 return rc; 742 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 743 744 /* set extra args */ 745 vfop->args.filters = filters; 746 747 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 748 bnx2x_vfop_vlan_mac, cmd->done); 749 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 750 cmd->block); 751 } 752 return -ENOMEM; 753 } 754 755 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, 756 struct bnx2x_virtf *vf, 757 struct bnx2x_vfop_cmd *cmd, 758 struct bnx2x_vfop_filters *macs, 759 int qid, bool drv_only) 760 { 761 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 762 int rc; 763 764 if (vfop) { 765 struct bnx2x_vfop_args_filters filters = { 766 .multi_filter = macs, 767 .credit = NULL, /* consume credit */ 768 }; 769 struct bnx2x_vfop_vlan_mac_flags flags = { 770 .drv_only = drv_only, 771 .dont_consume = (filters.credit != NULL), 772 .single_cmd = false, 773 .add = false, /* don't care since only the items in the 774 * filters list affect the sp operation, 775 * not the list itself 776 */ 777 }; 778 struct bnx2x_vlan_mac_ramrod_params *ramrod = 779 &vf->op_params.vlan_mac; 780 781 /* set ramrod params */ 782 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 783 784 /* set object */ 785 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); 786 if (rc) 787 return rc; 788 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 789 790 /* set extra args */ 791 filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX; 792 vfop->args.filters = filters; 793 794 bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST, 795 bnx2x_vfop_vlan_mac, cmd->done); 796 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 797 cmd->block); 798 } 799 return -ENOMEM; 800 } 801 802 static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, 803 struct bnx2x_virtf *vf, 804 struct bnx2x_vfop_cmd *cmd, 805 int qid, u16 vid, bool add) 806 { 807 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 808 int rc; 809 810 if (vfop) { 811 struct bnx2x_vfop_args_filters filters = { 812 .multi_filter = NULL, /* single command */ 813 .credit = &bnx2x_vfq(vf, qid, vlan_count), 814 }; 815 struct bnx2x_vfop_vlan_mac_flags flags = { 816 .drv_only = false, 817 .dont_consume = (filters.credit != NULL), 818 .single_cmd = true, 819 .add = add, 820 }; 821 struct bnx2x_vlan_mac_ramrod_params *ramrod = 822 &vf->op_params.vlan_mac; 823 824 /* set ramrod params */ 825 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 826 ramrod->user_req.u.vlan.vlan = vid; 827 828 /* set object */ 829 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); 830 if (rc) 831 return rc; 832 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 833 834 /* set extra args */ 835 vfop->args.filters = filters; 836 837 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 838 bnx2x_vfop_vlan_mac, cmd->done); 839 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 840 cmd->block); 841 } 842 return -ENOMEM; 843 } 844 845 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, 846 struct bnx2x_virtf *vf, 847 struct bnx2x_vfop_cmd *cmd, 848 int qid, bool drv_only) 849 { 850 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 851 int rc; 852 853 if (vfop) { 854 struct bnx2x_vfop_args_filters filters = { 855 .multi_filter = NULL, /* single command */ 856 .credit = &bnx2x_vfq(vf, qid, vlan_count), 857 }; 858 struct bnx2x_vfop_vlan_mac_flags flags = { 859 .drv_only = drv_only, 860 .dont_consume = (filters.credit != NULL), 861 .single_cmd = true, 862 .add = false, /* don't care */ 863 }; 864 struct bnx2x_vlan_mac_ramrod_params *ramrod = 865 &vf->op_params.vlan_mac; 866 867 /* set ramrod params */ 868 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 869 870 /* set object */ 871 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); 872 if (rc) 873 return rc; 874 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 875 876 /* set extra args */ 877 vfop->args.filters = filters; 878 879 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 880 bnx2x_vfop_vlan_mac, cmd->done); 881 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 882 cmd->block); 883 } 884 return -ENOMEM; 885 } 886 887 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, 888 struct bnx2x_virtf *vf, 889 struct bnx2x_vfop_cmd *cmd, 890 struct bnx2x_vfop_filters *vlans, 891 int qid, bool drv_only) 892 { 893 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 894 int rc; 895 896 if (vfop) { 897 struct bnx2x_vfop_args_filters filters = { 898 .multi_filter = vlans, 899 .credit = &bnx2x_vfq(vf, qid, vlan_count), 900 }; 901 struct bnx2x_vfop_vlan_mac_flags flags = { 902 .drv_only = drv_only, 903 .dont_consume = (filters.credit != NULL), 904 .single_cmd = false, 905 .add = false, /* don't care */ 906 }; 907 struct bnx2x_vlan_mac_ramrod_params *ramrod = 908 &vf->op_params.vlan_mac; 909 910 /* set ramrod params */ 911 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 912 913 /* set object */ 914 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); 915 if (rc) 916 return rc; 917 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 918 919 /* set extra args */ 920 filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) - 921 atomic_read(filters.credit); 922 923 vfop->args.filters = filters; 924 925 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST, 926 bnx2x_vfop_vlan_mac, cmd->done); 927 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 928 cmd->block); 929 } 930 return -ENOMEM; 931 } 932 933 /* VFOP queue setup (queue constructor + set vlan 0) */ 934 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf) 935 { 936 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 937 int qid = vfop->args.qctor.qid; 938 enum bnx2x_vfop_qsetup_state state = vfop->state; 939 struct bnx2x_vfop_cmd cmd = { 940 .done = bnx2x_vfop_qsetup, 941 .block = false, 942 }; 943 944 if (vfop->rc < 0) 945 goto op_err; 946 947 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 948 949 switch (state) { 950 case BNX2X_VFOP_QSETUP_CTOR: 951 /* init the queue ctor command */ 952 vfop->state = BNX2X_VFOP_QSETUP_VLAN0; 953 vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid); 954 if (vfop->rc) 955 goto op_err; 956 return; 957 958 case BNX2X_VFOP_QSETUP_VLAN0: 959 /* skip if non-leading or FPGA/EMU*/ 960 if (qid) 961 goto op_done; 962 963 /* init the queue set-vlan command (for vlan 0) */ 964 vfop->state = BNX2X_VFOP_QSETUP_DONE; 965 vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true); 966 if (vfop->rc) 967 goto op_err; 968 return; 969 op_err: 970 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); 971 op_done: 972 case BNX2X_VFOP_QSETUP_DONE: 973 vf->cfg_flags |= VF_CFG_VLAN; 974 smp_mb__before_clear_bit(); 975 set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, 976 &bp->sp_rtnl_state); 977 smp_mb__after_clear_bit(); 978 schedule_delayed_work(&bp->sp_rtnl_task, 0); 979 bnx2x_vfop_end(bp, vf, vfop); 980 return; 981 default: 982 bnx2x_vfop_default(state); 983 } 984 } 985 986 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, 987 struct bnx2x_virtf *vf, 988 struct bnx2x_vfop_cmd *cmd, 989 int qid) 990 { 991 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 992 993 if (vfop) { 994 vfop->args.qctor.qid = qid; 995 996 bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR, 997 bnx2x_vfop_qsetup, cmd->done); 998 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup, 999 cmd->block); 1000 } 1001 return -ENOMEM; 1002 } 1003 1004 /* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */ 1005 static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) 1006 { 1007 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1008 int qid = vfop->args.qx.qid; 1009 enum bnx2x_vfop_qflr_state state = vfop->state; 1010 struct bnx2x_queue_state_params *qstate; 1011 struct bnx2x_vfop_cmd cmd; 1012 1013 bnx2x_vfop_reset_wq(vf); 1014 1015 if (vfop->rc < 0) 1016 goto op_err; 1017 1018 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state); 1019 1020 cmd.done = bnx2x_vfop_qflr; 1021 cmd.block = false; 1022 1023 switch (state) { 1024 case BNX2X_VFOP_QFLR_CLR_VLAN: 1025 /* vlan-clear-all: driver-only, don't consume credit */ 1026 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; 1027 if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) 1028 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, 1029 true); 1030 if (vfop->rc) 1031 goto op_err; 1032 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 1033 1034 case BNX2X_VFOP_QFLR_CLR_MAC: 1035 /* mac-clear-all: driver only consume credit */ 1036 vfop->state = BNX2X_VFOP_QFLR_TERMINATE; 1037 if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) 1038 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, 1039 true); 1040 DP(BNX2X_MSG_IOV, 1041 "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d", 1042 vf->abs_vfid, vfop->rc); 1043 if (vfop->rc) 1044 goto op_err; 1045 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 1046 1047 case BNX2X_VFOP_QFLR_TERMINATE: 1048 qstate = &vfop->op_p->qctor.qstate; 1049 memset(qstate , 0, sizeof(*qstate)); 1050 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 1051 vfop->state = BNX2X_VFOP_QFLR_DONE; 1052 1053 DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n", 1054 vf->abs_vfid, qstate->q_obj->state); 1055 1056 if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) { 1057 qstate->q_obj->state = BNX2X_Q_STATE_STOPPED; 1058 qstate->cmd = BNX2X_Q_CMD_TERMINATE; 1059 vfop->rc = bnx2x_queue_state_change(bp, qstate); 1060 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND); 1061 } else { 1062 goto op_done; 1063 } 1064 1065 op_err: 1066 BNX2X_ERR("QFLR[%d:%d] error: rc %d\n", 1067 vf->abs_vfid, qid, vfop->rc); 1068 op_done: 1069 case BNX2X_VFOP_QFLR_DONE: 1070 bnx2x_vfop_end(bp, vf, vfop); 1071 return; 1072 default: 1073 bnx2x_vfop_default(state); 1074 } 1075 op_pending: 1076 return; 1077 } 1078 1079 static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp, 1080 struct bnx2x_virtf *vf, 1081 struct bnx2x_vfop_cmd *cmd, 1082 int qid) 1083 { 1084 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1085 1086 if (vfop) { 1087 vfop->args.qx.qid = qid; 1088 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN, 1089 bnx2x_vfop_qflr, cmd->done); 1090 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr, 1091 cmd->block); 1092 } 1093 return -ENOMEM; 1094 } 1095 1096 /* VFOP multi-casts */ 1097 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf) 1098 { 1099 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1100 struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast; 1101 struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw; 1102 struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list; 1103 enum bnx2x_vfop_mcast_state state = vfop->state; 1104 int i; 1105 1106 bnx2x_vfop_reset_wq(vf); 1107 1108 if (vfop->rc < 0) 1109 goto op_err; 1110 1111 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1112 1113 switch (state) { 1114 case BNX2X_VFOP_MCAST_DEL: 1115 /* clear existing mcasts */ 1116 vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD 1117 : BNX2X_VFOP_MCAST_CHK_DONE; 1118 mcast->mcast_list_len = vf->mcast_list_len; 1119 vf->mcast_list_len = args->mc_num; 1120 vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL); 1121 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 1122 1123 case BNX2X_VFOP_MCAST_ADD: 1124 if (raw->check_pending(raw)) 1125 goto op_pending; 1126 1127 /* update mcast list on the ramrod params */ 1128 INIT_LIST_HEAD(&mcast->mcast_list); 1129 for (i = 0; i < args->mc_num; i++) 1130 list_add_tail(&(args->mc[i].link), 1131 &mcast->mcast_list); 1132 mcast->mcast_list_len = args->mc_num; 1133 1134 /* add new mcasts */ 1135 vfop->state = BNX2X_VFOP_MCAST_CHK_DONE; 1136 vfop->rc = bnx2x_config_mcast(bp, mcast, 1137 BNX2X_MCAST_CMD_ADD); 1138 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1139 1140 case BNX2X_VFOP_MCAST_CHK_DONE: 1141 vfop->rc = raw->check_pending(raw) ? 1 : 0; 1142 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1143 default: 1144 bnx2x_vfop_default(state); 1145 } 1146 op_err: 1147 BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc); 1148 op_done: 1149 kfree(args->mc); 1150 bnx2x_vfop_end(bp, vf, vfop); 1151 op_pending: 1152 return; 1153 } 1154 1155 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, 1156 struct bnx2x_virtf *vf, 1157 struct bnx2x_vfop_cmd *cmd, 1158 bnx2x_mac_addr_t *mcasts, 1159 int mcast_num, bool drv_only) 1160 { 1161 struct bnx2x_vfop *vfop = NULL; 1162 size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem); 1163 struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) : 1164 NULL; 1165 1166 if (!mc_sz || mc) { 1167 vfop = bnx2x_vfop_add(bp, vf); 1168 if (vfop) { 1169 int i; 1170 struct bnx2x_mcast_ramrod_params *ramrod = 1171 &vf->op_params.mcast; 1172 1173 /* set ramrod params */ 1174 memset(ramrod, 0, sizeof(*ramrod)); 1175 ramrod->mcast_obj = &vf->mcast_obj; 1176 if (drv_only) 1177 set_bit(RAMROD_DRV_CLR_ONLY, 1178 &ramrod->ramrod_flags); 1179 1180 /* copy mcasts pointers */ 1181 vfop->args.mc_list.mc_num = mcast_num; 1182 vfop->args.mc_list.mc = mc; 1183 for (i = 0; i < mcast_num; i++) 1184 mc[i].mac = mcasts[i]; 1185 1186 bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL, 1187 bnx2x_vfop_mcast, cmd->done); 1188 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast, 1189 cmd->block); 1190 } else { 1191 kfree(mc); 1192 } 1193 } 1194 return -ENOMEM; 1195 } 1196 1197 /* VFOP rx-mode */ 1198 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) 1199 { 1200 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1201 struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode; 1202 enum bnx2x_vfop_rxmode_state state = vfop->state; 1203 1204 bnx2x_vfop_reset_wq(vf); 1205 1206 if (vfop->rc < 0) 1207 goto op_err; 1208 1209 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1210 1211 switch (state) { 1212 case BNX2X_VFOP_RXMODE_CONFIG: 1213 /* next state */ 1214 vfop->state = BNX2X_VFOP_RXMODE_DONE; 1215 1216 /* record the accept flags in vfdb so hypervisor can modify them 1217 * if necessary 1218 */ 1219 bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) = 1220 ramrod->rx_accept_flags; 1221 vfop->rc = bnx2x_config_rx_mode(bp, ramrod); 1222 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1223 op_err: 1224 BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc); 1225 op_done: 1226 case BNX2X_VFOP_RXMODE_DONE: 1227 bnx2x_vfop_end(bp, vf, vfop); 1228 return; 1229 default: 1230 bnx2x_vfop_default(state); 1231 } 1232 op_pending: 1233 return; 1234 } 1235 1236 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, 1237 struct bnx2x_rx_mode_ramrod_params *ramrod, 1238 struct bnx2x_virtf *vf, 1239 unsigned long accept_flags) 1240 { 1241 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); 1242 1243 memset(ramrod, 0, sizeof(*ramrod)); 1244 ramrod->cid = vfq->cid; 1245 ramrod->cl_id = vfq_cl_id(vf, vfq); 1246 ramrod->rx_mode_obj = &bp->rx_mode_obj; 1247 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); 1248 ramrod->rx_accept_flags = accept_flags; 1249 ramrod->tx_accept_flags = accept_flags; 1250 ramrod->pstate = &vf->filter_state; 1251 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; 1252 1253 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 1254 set_bit(RAMROD_RX, &ramrod->ramrod_flags); 1255 set_bit(RAMROD_TX, &ramrod->ramrod_flags); 1256 1257 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); 1258 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); 1259 } 1260 1261 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, 1262 struct bnx2x_virtf *vf, 1263 struct bnx2x_vfop_cmd *cmd, 1264 int qid, unsigned long accept_flags) 1265 { 1266 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1267 1268 if (vfop) { 1269 struct bnx2x_rx_mode_ramrod_params *ramrod = 1270 &vf->op_params.rx_mode; 1271 1272 bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags); 1273 1274 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, 1275 bnx2x_vfop_rxmode, cmd->done); 1276 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode, 1277 cmd->block); 1278 } 1279 return -ENOMEM; 1280 } 1281 1282 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs, 1283 * queue destructor) 1284 */ 1285 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf) 1286 { 1287 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1288 int qid = vfop->args.qx.qid; 1289 enum bnx2x_vfop_qteardown_state state = vfop->state; 1290 struct bnx2x_vfop_cmd cmd; 1291 1292 if (vfop->rc < 0) 1293 goto op_err; 1294 1295 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1296 1297 cmd.done = bnx2x_vfop_qdown; 1298 cmd.block = false; 1299 1300 switch (state) { 1301 case BNX2X_VFOP_QTEARDOWN_RXMODE: 1302 /* Drop all */ 1303 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN; 1304 vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0); 1305 if (vfop->rc) 1306 goto op_err; 1307 return; 1308 1309 case BNX2X_VFOP_QTEARDOWN_CLR_VLAN: 1310 /* vlan-clear-all: don't consume credit */ 1311 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC; 1312 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false); 1313 if (vfop->rc) 1314 goto op_err; 1315 return; 1316 1317 case BNX2X_VFOP_QTEARDOWN_CLR_MAC: 1318 /* mac-clear-all: consume credit */ 1319 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST; 1320 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false); 1321 if (vfop->rc) 1322 goto op_err; 1323 return; 1324 1325 case BNX2X_VFOP_QTEARDOWN_CLR_MCAST: 1326 vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR; 1327 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false); 1328 if (vfop->rc) 1329 goto op_err; 1330 return; 1331 1332 case BNX2X_VFOP_QTEARDOWN_QDTOR: 1333 /* run the queue destruction flow */ 1334 DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n"); 1335 vfop->state = BNX2X_VFOP_QTEARDOWN_DONE; 1336 DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n"); 1337 vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid); 1338 DP(BNX2X_MSG_IOV, "returned from cmd\n"); 1339 if (vfop->rc) 1340 goto op_err; 1341 return; 1342 op_err: 1343 BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n", 1344 vf->abs_vfid, qid, vfop->rc); 1345 1346 case BNX2X_VFOP_QTEARDOWN_DONE: 1347 bnx2x_vfop_end(bp, vf, vfop); 1348 return; 1349 default: 1350 bnx2x_vfop_default(state); 1351 } 1352 } 1353 1354 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, 1355 struct bnx2x_virtf *vf, 1356 struct bnx2x_vfop_cmd *cmd, 1357 int qid) 1358 { 1359 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1360 1361 /* for non leading queues skip directly to qdown sate */ 1362 if (vfop) { 1363 vfop->args.qx.qid = qid; 1364 bnx2x_vfop_opset(qid == LEADING_IDX ? 1365 BNX2X_VFOP_QTEARDOWN_RXMODE : 1366 BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown, 1367 cmd->done); 1368 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, 1369 cmd->block); 1370 } 1371 1372 return -ENOMEM; 1373 } 1374 1375 /* VF enable primitives 1376 * when pretend is required the caller is responsible 1377 * for calling pretend prior to calling these routines 1378 */ 1379 1380 /* internal vf enable - until vf is enabled internally all transactions 1381 * are blocked. This routine should always be called last with pretend. 1382 */ 1383 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) 1384 { 1385 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); 1386 } 1387 1388 /* clears vf error in all semi blocks */ 1389 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) 1390 { 1391 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); 1392 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); 1393 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); 1394 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); 1395 } 1396 1397 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) 1398 { 1399 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; 1400 u32 was_err_reg = 0; 1401 1402 switch (was_err_group) { 1403 case 0: 1404 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; 1405 break; 1406 case 1: 1407 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; 1408 break; 1409 case 2: 1410 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; 1411 break; 1412 case 3: 1413 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; 1414 break; 1415 } 1416 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); 1417 } 1418 1419 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) 1420 { 1421 int i; 1422 u32 val; 1423 1424 /* Set VF masks and configuration - pretend */ 1425 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1426 1427 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 1428 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 1429 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 1430 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 1431 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 1432 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 1433 1434 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 1435 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); 1436 if (vf->cfg_flags & VF_CFG_INT_SIMD) 1437 val |= IGU_VF_CONF_SINGLE_ISR_EN; 1438 val &= ~IGU_VF_CONF_PARENT_MASK; 1439 val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */ 1440 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 1441 1442 DP(BNX2X_MSG_IOV, 1443 "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n", 1444 vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION)); 1445 1446 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1447 1448 /* iterate over all queues, clear sb consumer */ 1449 for (i = 0; i < vf_sb_count(vf); i++) { 1450 u8 igu_sb_id = vf_igu_sb(vf, i); 1451 1452 /* zero prod memory */ 1453 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); 1454 1455 /* clear sb state machine */ 1456 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, 1457 false /* VF */); 1458 1459 /* disable + update */ 1460 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, 1461 IGU_INT_DISABLE, 1); 1462 } 1463 } 1464 1465 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) 1466 { 1467 /* set the VF-PF association in the FW */ 1468 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); 1469 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); 1470 1471 /* clear vf errors*/ 1472 bnx2x_vf_semi_clear_err(bp, abs_vfid); 1473 bnx2x_vf_pglue_clear_err(bp, abs_vfid); 1474 1475 /* internal vf-enable - pretend */ 1476 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); 1477 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); 1478 bnx2x_vf_enable_internal(bp, true); 1479 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1480 } 1481 1482 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) 1483 { 1484 /* Reset vf in IGU interrupts are still disabled */ 1485 bnx2x_vf_igu_reset(bp, vf); 1486 1487 /* pretend to enable the vf with the PBF */ 1488 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1489 REG_WR(bp, PBF_REG_DISABLE_VF, 0); 1490 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1491 } 1492 1493 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) 1494 { 1495 struct pci_dev *dev; 1496 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 1497 1498 if (!vf) 1499 return false; 1500 1501 dev = pci_get_bus_and_slot(vf->bus, vf->devfn); 1502 if (dev) 1503 return bnx2x_is_pcie_pending(dev); 1504 return false; 1505 } 1506 1507 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) 1508 { 1509 /* Verify no pending pci transactions */ 1510 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) 1511 BNX2X_ERR("PCIE Transactions still pending\n"); 1512 1513 return 0; 1514 } 1515 1516 /* must be called after the number of PF queues and the number of VFs are 1517 * both known 1518 */ 1519 static void 1520 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 1521 { 1522 struct vf_pf_resc_request *resc = &vf->alloc_resc; 1523 u16 vlan_count = 0; 1524 1525 /* will be set only during VF-ACQUIRE */ 1526 resc->num_rxqs = 0; 1527 resc->num_txqs = 0; 1528 1529 /* no credit calculations for macs (just yet) */ 1530 resc->num_mac_filters = 1; 1531 1532 /* divvy up vlan rules */ 1533 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 1534 vlan_count = 1 << ilog2(vlan_count); 1535 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); 1536 1537 /* no real limitation */ 1538 resc->num_mc_filters = 0; 1539 1540 /* num_sbs already set */ 1541 resc->num_sbs = vf->sb_count; 1542 } 1543 1544 /* FLR routines: */ 1545 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 1546 { 1547 /* reset the state variables */ 1548 bnx2x_iov_static_resc(bp, vf); 1549 vf->state = VF_FREE; 1550 } 1551 1552 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) 1553 { 1554 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 1555 1556 /* DQ usage counter */ 1557 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1558 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, 1559 "DQ VF usage counter timed out", 1560 poll_cnt); 1561 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1562 1563 /* FW cleanup command - poll for the results */ 1564 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), 1565 poll_cnt)) 1566 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid); 1567 1568 /* verify TX hw is flushed */ 1569 bnx2x_tx_hw_flushed(bp, poll_cnt); 1570 } 1571 1572 static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) 1573 { 1574 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1575 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; 1576 enum bnx2x_vfop_flr_state state = vfop->state; 1577 struct bnx2x_vfop_cmd cmd = { 1578 .done = bnx2x_vfop_flr, 1579 .block = false, 1580 }; 1581 1582 if (vfop->rc < 0) 1583 goto op_err; 1584 1585 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1586 1587 switch (state) { 1588 case BNX2X_VFOP_FLR_QUEUES: 1589 /* the cleanup operations are valid if and only if the VF 1590 * was first acquired. 1591 */ 1592 if (++(qx->qid) < vf_rxq_count(vf)) { 1593 vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd, 1594 qx->qid); 1595 if (vfop->rc) 1596 goto op_err; 1597 return; 1598 } 1599 /* remove multicasts */ 1600 vfop->state = BNX2X_VFOP_FLR_HW; 1601 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 1602 0, true); 1603 if (vfop->rc) 1604 goto op_err; 1605 return; 1606 case BNX2X_VFOP_FLR_HW: 1607 1608 /* dispatch final cleanup and wait for HW queues to flush */ 1609 bnx2x_vf_flr_clnup_hw(bp, vf); 1610 1611 /* release VF resources */ 1612 bnx2x_vf_free_resc(bp, vf); 1613 1614 /* re-open the mailbox */ 1615 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 1616 1617 goto op_done; 1618 default: 1619 bnx2x_vfop_default(state); 1620 } 1621 op_err: 1622 BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc); 1623 op_done: 1624 vf->flr_clnup_stage = VF_FLR_ACK; 1625 bnx2x_vfop_end(bp, vf, vfop); 1626 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 1627 } 1628 1629 static int bnx2x_vfop_flr_cmd(struct bnx2x *bp, 1630 struct bnx2x_virtf *vf, 1631 vfop_handler_t done) 1632 { 1633 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1634 if (vfop) { 1635 vfop->args.qx.qid = -1; /* loop */ 1636 bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES, 1637 bnx2x_vfop_flr, done); 1638 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false); 1639 } 1640 return -ENOMEM; 1641 } 1642 1643 static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf) 1644 { 1645 int i = prev_vf ? prev_vf->index + 1 : 0; 1646 struct bnx2x_virtf *vf; 1647 1648 /* find next VF to cleanup */ 1649 next_vf_to_clean: 1650 for (; 1651 i < BNX2X_NR_VIRTFN(bp) && 1652 (bnx2x_vf(bp, i, state) != VF_RESET || 1653 bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN); 1654 i++) 1655 ; 1656 1657 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i, 1658 BNX2X_NR_VIRTFN(bp)); 1659 1660 if (i < BNX2X_NR_VIRTFN(bp)) { 1661 vf = BP_VF(bp, i); 1662 1663 /* lock the vf pf channel */ 1664 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 1665 1666 /* invoke the VF FLR SM */ 1667 if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) { 1668 BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n", 1669 vf->abs_vfid); 1670 1671 /* mark the VF to be ACKED and continue */ 1672 vf->flr_clnup_stage = VF_FLR_ACK; 1673 goto next_vf_to_clean; 1674 } 1675 return; 1676 } 1677 1678 /* we are done, update vf records */ 1679 for_each_vf(bp, i) { 1680 vf = BP_VF(bp, i); 1681 1682 if (vf->flr_clnup_stage != VF_FLR_ACK) 1683 continue; 1684 1685 vf->flr_clnup_stage = VF_FLR_EPILOG; 1686 } 1687 1688 /* Acknowledge the handled VFs. 1689 * we are acknowledge all the vfs which an flr was requested for, even 1690 * if amongst them there are such that we never opened, since the mcp 1691 * will interrupt us immediately again if we only ack some of the bits, 1692 * resulting in an endless loop. This can happen for example in KVM 1693 * where an 'all ones' flr request is sometimes given by hyper visor 1694 */ 1695 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n", 1696 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1697 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1698 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 1699 bp->vfdb->flrd_vfs[i]); 1700 1701 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); 1702 1703 /* clear the acked bits - better yet if the MCP implemented 1704 * write to clear semantics 1705 */ 1706 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1707 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); 1708 } 1709 1710 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) 1711 { 1712 int i; 1713 1714 /* Read FLR'd VFs */ 1715 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1716 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); 1717 1718 DP(BNX2X_MSG_MCP, 1719 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n", 1720 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1721 1722 for_each_vf(bp, i) { 1723 struct bnx2x_virtf *vf = BP_VF(bp, i); 1724 u32 reset = 0; 1725 1726 if (vf->abs_vfid < 32) 1727 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); 1728 else 1729 reset = bp->vfdb->flrd_vfs[1] & 1730 (1 << (vf->abs_vfid - 32)); 1731 1732 if (reset) { 1733 /* set as reset and ready for cleanup */ 1734 vf->state = VF_RESET; 1735 vf->flr_clnup_stage = VF_FLR_CLN; 1736 1737 DP(BNX2X_MSG_IOV, 1738 "Initiating Final cleanup for VF %d\n", 1739 vf->abs_vfid); 1740 } 1741 } 1742 1743 /* do the FLR cleanup for all marked VFs*/ 1744 bnx2x_vf_flr_clnup(bp, NULL); 1745 } 1746 1747 /* IOV global initialization routines */ 1748 void bnx2x_iov_init_dq(struct bnx2x *bp) 1749 { 1750 if (!IS_SRIOV(bp)) 1751 return; 1752 1753 /* Set the DQ such that the CID reflect the abs_vfid */ 1754 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); 1755 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 1756 1757 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to 1758 * the PF L2 queues 1759 */ 1760 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 1761 1762 /* The VF window size is the log2 of the max number of CIDs per VF */ 1763 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 1764 1765 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 1766 * the Pf doorbell size although the 2 are independent. 1767 */ 1768 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); 1769 1770 /* No security checks for now - 1771 * configure single rule (out of 16) mask = 0x1, value = 0x0, 1772 * CID range 0 - 0x1ffff 1773 */ 1774 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); 1775 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); 1776 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1777 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1778 1779 /* set the VF doorbell threshold */ 1780 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 1781 } 1782 1783 void bnx2x_iov_init_dmae(struct bnx2x *bp) 1784 { 1785 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) 1786 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 1787 } 1788 1789 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1790 { 1791 struct pci_dev *dev = bp->pdev; 1792 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1793 1794 return dev->bus->number + ((dev->devfn + iov->offset + 1795 iov->stride * vfid) >> 8); 1796 } 1797 1798 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) 1799 { 1800 struct pci_dev *dev = bp->pdev; 1801 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1802 1803 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; 1804 } 1805 1806 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) 1807 { 1808 int i, n; 1809 struct pci_dev *dev = bp->pdev; 1810 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1811 1812 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { 1813 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); 1814 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); 1815 1816 size /= iov->total; 1817 vf->bars[n].bar = start + size * vf->abs_vfid; 1818 vf->bars[n].size = size; 1819 } 1820 } 1821 1822 static int bnx2x_ari_enabled(struct pci_dev *dev) 1823 { 1824 return dev->bus->self && dev->bus->self->ari_enabled; 1825 } 1826 1827 static void 1828 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) 1829 { 1830 int sb_id; 1831 u32 val; 1832 u8 fid, current_pf = 0; 1833 1834 /* IGU in normal mode - read CAM */ 1835 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 1836 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); 1837 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 1838 continue; 1839 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1840 if (fid & IGU_FID_ENCODE_IS_PF) 1841 current_pf = fid & IGU_FID_PF_NUM_MASK; 1842 else if (current_pf == BP_FUNC(bp)) 1843 bnx2x_vf_set_igu_info(bp, sb_id, 1844 (fid & IGU_FID_VF_NUM_MASK)); 1845 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1846 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 1847 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 1848 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 1849 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1850 } 1851 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); 1852 } 1853 1854 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1855 { 1856 if (bp->vfdb) { 1857 kfree(bp->vfdb->vfqs); 1858 kfree(bp->vfdb->vfs); 1859 kfree(bp->vfdb); 1860 } 1861 bp->vfdb = NULL; 1862 } 1863 1864 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1865 { 1866 int pos; 1867 struct pci_dev *dev = bp->pdev; 1868 1869 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 1870 if (!pos) { 1871 BNX2X_ERR("failed to find SRIOV capability in device\n"); 1872 return -ENODEV; 1873 } 1874 1875 iov->pos = pos; 1876 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); 1877 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 1878 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); 1879 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); 1880 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 1881 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 1882 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 1883 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 1884 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 1885 1886 return 0; 1887 } 1888 1889 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1890 { 1891 u32 val; 1892 1893 /* read the SRIOV capability structure 1894 * The fields can be read via configuration read or 1895 * directly from the device (starting at offset PCICFG_OFFSET) 1896 */ 1897 if (bnx2x_sriov_pci_cfg_info(bp, iov)) 1898 return -ENODEV; 1899 1900 /* get the number of SRIOV bars */ 1901 iov->nres = 0; 1902 1903 /* read the first_vfid */ 1904 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); 1905 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) 1906 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); 1907 1908 DP(BNX2X_MSG_IOV, 1909 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 1910 BP_FUNC(bp), 1911 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, 1912 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 1913 1914 return 0; 1915 } 1916 1917 /* must be called after PF bars are mapped */ 1918 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1919 int num_vfs_param) 1920 { 1921 int err, i; 1922 struct bnx2x_sriov *iov; 1923 struct pci_dev *dev = bp->pdev; 1924 1925 bp->vfdb = NULL; 1926 1927 /* verify is pf */ 1928 if (IS_VF(bp)) 1929 return 0; 1930 1931 /* verify sriov capability is present in configuration space */ 1932 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) 1933 return 0; 1934 1935 /* verify chip revision */ 1936 if (CHIP_IS_E1x(bp)) 1937 return 0; 1938 1939 /* check if SRIOV support is turned off */ 1940 if (!num_vfs_param) 1941 return 0; 1942 1943 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ 1944 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { 1945 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", 1946 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); 1947 return 0; 1948 } 1949 1950 /* SRIOV can be enabled only with MSIX */ 1951 if (int_mode_param == BNX2X_INT_MODE_MSI || 1952 int_mode_param == BNX2X_INT_MODE_INTX) { 1953 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); 1954 return 0; 1955 } 1956 1957 err = -EIO; 1958 /* verify ari is enabled */ 1959 if (!bnx2x_ari_enabled(bp->pdev)) { 1960 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n"); 1961 return 0; 1962 } 1963 1964 /* verify igu is in normal mode */ 1965 if (CHIP_INT_MODE_IS_BC(bp)) { 1966 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 1967 return 0; 1968 } 1969 1970 /* allocate the vfs database */ 1971 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); 1972 if (!bp->vfdb) { 1973 BNX2X_ERR("failed to allocate vf database\n"); 1974 err = -ENOMEM; 1975 goto failed; 1976 } 1977 1978 /* get the sriov info - Linux already collected all the pertinent 1979 * information, however the sriov structure is for the private use 1980 * of the pci module. Also we want this information regardless 1981 * of the hyper-visor. 1982 */ 1983 iov = &(bp->vfdb->sriov); 1984 err = bnx2x_sriov_info(bp, iov); 1985 if (err) 1986 goto failed; 1987 1988 /* SR-IOV capability was enabled but there are no VFs*/ 1989 if (iov->total == 0) 1990 goto failed; 1991 1992 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); 1993 1994 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n", 1995 num_vfs_param, iov->nr_virtfn); 1996 1997 /* allocate the vf array */ 1998 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 1999 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); 2000 if (!bp->vfdb->vfs) { 2001 BNX2X_ERR("failed to allocate vf array\n"); 2002 err = -ENOMEM; 2003 goto failed; 2004 } 2005 2006 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ 2007 for_each_vf(bp, i) { 2008 bnx2x_vf(bp, i, index) = i; 2009 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; 2010 bnx2x_vf(bp, i, state) = VF_FREE; 2011 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head)); 2012 mutex_init(&bnx2x_vf(bp, i, op_mutex)); 2013 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; 2014 } 2015 2016 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 2017 bnx2x_get_vf_igu_cam_info(bp); 2018 2019 /* allocate the queue arrays for all VFs */ 2020 bp->vfdb->vfqs = kzalloc( 2021 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), 2022 GFP_KERNEL); 2023 2024 DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs); 2025 2026 if (!bp->vfdb->vfqs) { 2027 BNX2X_ERR("failed to allocate vf queue array\n"); 2028 err = -ENOMEM; 2029 goto failed; 2030 } 2031 2032 return 0; 2033 failed: 2034 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 2035 __bnx2x_iov_free_vfdb(bp); 2036 return err; 2037 } 2038 2039 void bnx2x_iov_remove_one(struct bnx2x *bp) 2040 { 2041 int vf_idx; 2042 2043 /* if SRIOV is not enabled there's nothing to do */ 2044 if (!IS_SRIOV(bp)) 2045 return; 2046 2047 DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); 2048 pci_disable_sriov(bp->pdev); 2049 DP(BNX2X_MSG_IOV, "sriov disabled\n"); 2050 2051 /* disable access to all VFs */ 2052 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { 2053 bnx2x_pretend_func(bp, 2054 HW_VF_HANDLE(bp, 2055 bp->vfdb->sriov.first_vf_in_pf + 2056 vf_idx)); 2057 DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n", 2058 bp->vfdb->sriov.first_vf_in_pf + vf_idx); 2059 bnx2x_vf_enable_internal(bp, 0); 2060 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2061 } 2062 2063 /* free vf database */ 2064 __bnx2x_iov_free_vfdb(bp); 2065 } 2066 2067 void bnx2x_iov_free_mem(struct bnx2x *bp) 2068 { 2069 int i; 2070 2071 if (!IS_SRIOV(bp)) 2072 return; 2073 2074 /* free vfs hw contexts */ 2075 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2076 struct hw_dma *cxt = &bp->vfdb->context[i]; 2077 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); 2078 } 2079 2080 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, 2081 BP_VFDB(bp)->sp_dma.mapping, 2082 BP_VFDB(bp)->sp_dma.size); 2083 2084 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, 2085 BP_VF_MBX_DMA(bp)->mapping, 2086 BP_VF_MBX_DMA(bp)->size); 2087 2088 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, 2089 BP_VF_BULLETIN_DMA(bp)->mapping, 2090 BP_VF_BULLETIN_DMA(bp)->size); 2091 } 2092 2093 int bnx2x_iov_alloc_mem(struct bnx2x *bp) 2094 { 2095 size_t tot_size; 2096 int i, rc = 0; 2097 2098 if (!IS_SRIOV(bp)) 2099 return rc; 2100 2101 /* allocate vfs hw contexts */ 2102 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * 2103 BNX2X_CIDS_PER_VF * sizeof(union cdu_context); 2104 2105 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2106 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); 2107 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); 2108 2109 if (cxt->size) { 2110 BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size); 2111 } else { 2112 cxt->addr = NULL; 2113 cxt->mapping = 0; 2114 } 2115 tot_size -= cxt->size; 2116 } 2117 2118 /* allocate vfs ramrods dma memory - client_init and set_mac */ 2119 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); 2120 BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping, 2121 tot_size); 2122 BP_VFDB(bp)->sp_dma.size = tot_size; 2123 2124 /* allocate mailboxes */ 2125 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; 2126 BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping, 2127 tot_size); 2128 BP_VF_MBX_DMA(bp)->size = tot_size; 2129 2130 /* allocate local bulletin boards */ 2131 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; 2132 BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr, 2133 &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size); 2134 BP_VF_BULLETIN_DMA(bp)->size = tot_size; 2135 2136 return 0; 2137 2138 alloc_mem_err: 2139 return -ENOMEM; 2140 } 2141 2142 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 2143 struct bnx2x_vf_queue *q) 2144 { 2145 u8 cl_id = vfq_cl_id(vf, q); 2146 u8 func_id = FW_VF_HANDLE(vf->abs_vfid); 2147 unsigned long q_type = 0; 2148 2149 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 2150 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 2151 2152 /* Queue State object */ 2153 bnx2x_init_queue_obj(bp, &q->sp_obj, 2154 cl_id, &q->cid, 1, func_id, 2155 bnx2x_vf_sp(bp, vf, q_data), 2156 bnx2x_vf_sp_map(bp, vf, q_data), 2157 q_type); 2158 2159 DP(BNX2X_MSG_IOV, 2160 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", 2161 vf->abs_vfid, q->sp_obj.func_id, q->cid); 2162 } 2163 2164 /* called by bnx2x_nic_load */ 2165 int bnx2x_iov_nic_init(struct bnx2x *bp) 2166 { 2167 int vfid; 2168 2169 if (!IS_SRIOV(bp)) { 2170 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 2171 return 0; 2172 } 2173 2174 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); 2175 2176 /* let FLR complete ... */ 2177 msleep(100); 2178 2179 /* initialize vf database */ 2180 for_each_vf(bp, vfid) { 2181 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 2182 2183 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * 2184 BNX2X_CIDS_PER_VF; 2185 2186 union cdu_context *base_cxt = (union cdu_context *) 2187 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2188 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2189 2190 DP(BNX2X_MSG_IOV, 2191 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", 2192 vf->abs_vfid, vf_sb_count(vf), base_vf_cid, 2193 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 2194 2195 /* init statically provisioned resources */ 2196 bnx2x_iov_static_resc(bp, vf); 2197 2198 /* queues are initialized during VF-ACQUIRE */ 2199 2200 /* reserve the vf vlan credit */ 2201 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf)); 2202 2203 vf->filter_state = 0; 2204 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 2205 2206 /* init mcast object - This object will be re-initialized 2207 * during VF-ACQUIRE with the proper cl_id and cid. 2208 * It needs to be initialized here so that it can be safely 2209 * handled by a subsequent FLR flow. 2210 */ 2211 vf->mcast_list_len = 0; 2212 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, 2213 0xFF, 0xFF, 0xFF, 2214 bnx2x_vf_sp(bp, vf, mcast_rdata), 2215 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 2216 BNX2X_FILTER_MCAST_PENDING, 2217 &vf->filter_state, 2218 BNX2X_OBJ_TYPE_RX_TX); 2219 2220 /* set the mailbox message addresses */ 2221 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) 2222 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * 2223 MBX_MSG_ALIGNED_SIZE); 2224 2225 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + 2226 vfid * MBX_MSG_ALIGNED_SIZE; 2227 2228 /* Enable vf mailbox */ 2229 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 2230 } 2231 2232 /* Final VF init */ 2233 for_each_vf(bp, vfid) { 2234 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 2235 2236 /* fill in the BDF and bars */ 2237 vf->bus = bnx2x_vf_bus(bp, vfid); 2238 vf->devfn = bnx2x_vf_devfn(bp, vfid); 2239 bnx2x_vf_set_bars(bp, vf); 2240 2241 DP(BNX2X_MSG_IOV, 2242 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", 2243 vf->abs_vfid, vf->bus, vf->devfn, 2244 (unsigned)vf->bars[0].bar, vf->bars[0].size, 2245 (unsigned)vf->bars[1].bar, vf->bars[1].size, 2246 (unsigned)vf->bars[2].bar, vf->bars[2].size); 2247 } 2248 2249 return 0; 2250 } 2251 2252 /* called by bnx2x_chip_cleanup */ 2253 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) 2254 { 2255 int i; 2256 2257 if (!IS_SRIOV(bp)) 2258 return 0; 2259 2260 /* release all the VFs */ 2261 for_each_vf(bp, i) 2262 bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */ 2263 2264 return 0; 2265 } 2266 2267 /* called by bnx2x_init_hw_func, returns the next ilt line */ 2268 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) 2269 { 2270 int i; 2271 struct bnx2x_ilt *ilt = BP_ILT(bp); 2272 2273 if (!IS_SRIOV(bp)) 2274 return line; 2275 2276 /* set vfs ilt lines */ 2277 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2278 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); 2279 2280 ilt->lines[line+i].page = hw_cxt->addr; 2281 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 2282 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 2283 } 2284 return line + i; 2285 } 2286 2287 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) 2288 { 2289 return ((cid >= BNX2X_FIRST_VF_CID) && 2290 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); 2291 } 2292 2293 static 2294 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, 2295 struct bnx2x_vf_queue *vfq, 2296 union event_ring_elem *elem) 2297 { 2298 unsigned long ramrod_flags = 0; 2299 int rc = 0; 2300 2301 /* Always push next commands out, don't wait here */ 2302 set_bit(RAMROD_CONT, &ramrod_flags); 2303 2304 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 2305 case BNX2X_FILTER_MAC_PENDING: 2306 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, 2307 &ramrod_flags); 2308 break; 2309 case BNX2X_FILTER_VLAN_PENDING: 2310 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, 2311 &ramrod_flags); 2312 break; 2313 default: 2314 BNX2X_ERR("Unsupported classification command: %d\n", 2315 elem->message.data.eth_event.echo); 2316 return; 2317 } 2318 if (rc < 0) 2319 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 2320 else if (rc > 0) 2321 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); 2322 } 2323 2324 static 2325 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, 2326 struct bnx2x_virtf *vf) 2327 { 2328 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 2329 int rc; 2330 2331 rparam.mcast_obj = &vf->mcast_obj; 2332 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); 2333 2334 /* If there are pending mcast commands - send them */ 2335 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { 2336 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 2337 if (rc < 0) 2338 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 2339 rc); 2340 } 2341 } 2342 2343 static 2344 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, 2345 struct bnx2x_virtf *vf) 2346 { 2347 smp_mb__before_clear_bit(); 2348 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 2349 smp_mb__after_clear_bit(); 2350 } 2351 2352 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) 2353 { 2354 struct bnx2x_virtf *vf; 2355 int qidx = 0, abs_vfid; 2356 u8 opcode; 2357 u16 cid = 0xffff; 2358 2359 if (!IS_SRIOV(bp)) 2360 return 1; 2361 2362 /* first get the cid - the only events we handle here are cfc-delete 2363 * and set-mac completion 2364 */ 2365 opcode = elem->message.opcode; 2366 2367 switch (opcode) { 2368 case EVENT_RING_OPCODE_CFC_DEL: 2369 cid = SW_CID((__force __le32) 2370 elem->message.data.cfc_del_event.cid); 2371 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); 2372 break; 2373 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 2374 case EVENT_RING_OPCODE_MULTICAST_RULES: 2375 case EVENT_RING_OPCODE_FILTERS_RULES: 2376 cid = (elem->message.data.eth_event.echo & 2377 BNX2X_SWCID_MASK); 2378 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 2379 break; 2380 case EVENT_RING_OPCODE_VF_FLR: 2381 abs_vfid = elem->message.data.vf_flr_event.vf_id; 2382 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", 2383 abs_vfid); 2384 goto get_vf; 2385 case EVENT_RING_OPCODE_MALICIOUS_VF: 2386 abs_vfid = elem->message.data.malicious_vf_event.vf_id; 2387 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n", 2388 abs_vfid, elem->message.data.malicious_vf_event.err_id); 2389 goto get_vf; 2390 default: 2391 return 1; 2392 } 2393 2394 /* check if the cid is the VF range */ 2395 if (!bnx2x_iov_is_vf_cid(bp, cid)) { 2396 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); 2397 return 1; 2398 } 2399 2400 /* extract vf and rxq index from vf_cid - relies on the following: 2401 * 1. vfid on cid reflects the true abs_vfid 2402 * 2. The max number of VFs (per path) is 64 2403 */ 2404 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); 2405 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2406 get_vf: 2407 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 2408 2409 if (!vf) { 2410 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", 2411 cid, abs_vfid); 2412 return 0; 2413 } 2414 2415 switch (opcode) { 2416 case EVENT_RING_OPCODE_CFC_DEL: 2417 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", 2418 vf->abs_vfid, qidx); 2419 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, 2420 &vfq_get(vf, 2421 qidx)->sp_obj, 2422 BNX2X_Q_CMD_CFC_DEL); 2423 break; 2424 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 2425 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", 2426 vf->abs_vfid, qidx); 2427 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); 2428 break; 2429 case EVENT_RING_OPCODE_MULTICAST_RULES: 2430 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", 2431 vf->abs_vfid, qidx); 2432 bnx2x_vf_handle_mcast_eqe(bp, vf); 2433 break; 2434 case EVENT_RING_OPCODE_FILTERS_RULES: 2435 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", 2436 vf->abs_vfid, qidx); 2437 bnx2x_vf_handle_filters_eqe(bp, vf); 2438 break; 2439 case EVENT_RING_OPCODE_VF_FLR: 2440 DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n", 2441 vf->abs_vfid); 2442 /* Do nothing for now */ 2443 break; 2444 case EVENT_RING_OPCODE_MALICIOUS_VF: 2445 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n", 2446 abs_vfid, elem->message.data.malicious_vf_event.err_id); 2447 /* Do nothing for now */ 2448 break; 2449 } 2450 /* SRIOV: reschedule any 'in_progress' operations */ 2451 bnx2x_iov_sp_event(bp, cid, false); 2452 2453 return 0; 2454 } 2455 2456 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) 2457 { 2458 /* extract the vf from vf_cid - relies on the following: 2459 * 1. vfid on cid reflects the true abs_vfid 2460 * 2. The max number of VFs (per path) is 64 2461 */ 2462 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2463 return bnx2x_vf_by_abs_fid(bp, abs_vfid); 2464 } 2465 2466 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 2467 struct bnx2x_queue_sp_obj **q_obj) 2468 { 2469 struct bnx2x_virtf *vf; 2470 2471 if (!IS_SRIOV(bp)) 2472 return; 2473 2474 vf = bnx2x_vf_by_cid(bp, vf_cid); 2475 2476 if (vf) { 2477 /* extract queue index from vf_cid - relies on the following: 2478 * 1. vfid on cid reflects the true abs_vfid 2479 * 2. The max number of VFs (per path) is 64 2480 */ 2481 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); 2482 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); 2483 } else { 2484 BNX2X_ERR("No vf matching cid %d\n", vf_cid); 2485 } 2486 } 2487 2488 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work) 2489 { 2490 struct bnx2x_virtf *vf; 2491 2492 /* check if the cid is the VF range */ 2493 if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid)) 2494 return; 2495 2496 vf = bnx2x_vf_by_cid(bp, vf_cid); 2497 if (vf) { 2498 /* set in_progress flag */ 2499 atomic_set(&vf->op_in_progress, 1); 2500 if (queue_work) 2501 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 2502 } 2503 } 2504 2505 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) 2506 { 2507 int i; 2508 int first_queue_query_index, num_queues_req; 2509 dma_addr_t cur_data_offset; 2510 struct stats_query_entry *cur_query_entry; 2511 u8 stats_count = 0; 2512 bool is_fcoe = false; 2513 2514 if (!IS_SRIOV(bp)) 2515 return; 2516 2517 if (!NO_FCOE(bp)) 2518 is_fcoe = true; 2519 2520 /* fcoe adds one global request and one queue request */ 2521 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; 2522 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 2523 (is_fcoe ? 0 : 1); 2524 2525 DP(BNX2X_MSG_IOV, 2526 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", 2527 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, 2528 first_queue_query_index + num_queues_req); 2529 2530 cur_data_offset = bp->fw_stats_data_mapping + 2531 offsetof(struct bnx2x_fw_stats_data, queue_stats) + 2532 num_queues_req * sizeof(struct per_queue_stats); 2533 2534 cur_query_entry = &bp->fw_stats_req-> 2535 query[first_queue_query_index + num_queues_req]; 2536 2537 for_each_vf(bp, i) { 2538 int j; 2539 struct bnx2x_virtf *vf = BP_VF(bp, i); 2540 2541 if (vf->state != VF_ENABLED) { 2542 DP(BNX2X_MSG_IOV, 2543 "vf %d not enabled so no stats for it\n", 2544 vf->abs_vfid); 2545 continue; 2546 } 2547 2548 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); 2549 for_each_vfq(vf, j) { 2550 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 2551 2552 dma_addr_t q_stats_addr = 2553 vf->fw_stat_map + j * vf->stats_stride; 2554 2555 /* collect stats fro active queues only */ 2556 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 2557 BNX2X_Q_LOGICAL_STATE_STOPPED) 2558 continue; 2559 2560 /* create stats query entry for this queue */ 2561 cur_query_entry->kind = STATS_TYPE_QUEUE; 2562 cur_query_entry->index = vfq_stat_id(vf, rxq); 2563 cur_query_entry->funcID = 2564 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 2565 cur_query_entry->address.hi = 2566 cpu_to_le32(U64_HI(q_stats_addr)); 2567 cur_query_entry->address.lo = 2568 cpu_to_le32(U64_LO(q_stats_addr)); 2569 DP(BNX2X_MSG_IOV, 2570 "added address %x %x for vf %d queue %d client %d\n", 2571 cur_query_entry->address.hi, 2572 cur_query_entry->address.lo, cur_query_entry->funcID, 2573 j, cur_query_entry->index); 2574 cur_query_entry++; 2575 cur_data_offset += sizeof(struct per_queue_stats); 2576 stats_count++; 2577 2578 /* all stats are coalesced to the leading queue */ 2579 if (vf->cfg_flags & VF_CFG_STATS_COALESCE) 2580 break; 2581 } 2582 } 2583 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 2584 } 2585 2586 void bnx2x_iov_sp_task(struct bnx2x *bp) 2587 { 2588 int i; 2589 2590 if (!IS_SRIOV(bp)) 2591 return; 2592 /* Iterate over all VFs and invoke state transition for VFs with 2593 * 'in-progress' slow-path operations 2594 */ 2595 DP(BNX2X_MSG_IOV, "searching for pending vf operations\n"); 2596 for_each_vf(bp, i) { 2597 struct bnx2x_virtf *vf = BP_VF(bp, i); 2598 2599 if (!vf) { 2600 BNX2X_ERR("VF was null! skipping...\n"); 2601 continue; 2602 } 2603 2604 if (!list_empty(&vf->op_list_head) && 2605 atomic_read(&vf->op_in_progress)) { 2606 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); 2607 bnx2x_vfop_cur(bp, vf)->transition(bp, vf); 2608 } 2609 } 2610 } 2611 2612 static inline 2613 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) 2614 { 2615 int i; 2616 struct bnx2x_virtf *vf = NULL; 2617 2618 for_each_vf(bp, i) { 2619 vf = BP_VF(bp, i); 2620 if (stat_id >= vf->igu_base_id && 2621 stat_id < vf->igu_base_id + vf_sb_count(vf)) 2622 break; 2623 } 2624 return vf; 2625 } 2626 2627 /* VF API helpers */ 2628 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, 2629 u8 enable) 2630 { 2631 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; 2632 u32 val = enable ? (abs_vfid | (1 << 6)) : 0; 2633 2634 REG_WR(bp, reg, val); 2635 } 2636 2637 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) 2638 { 2639 int i; 2640 2641 for_each_vfq(vf, i) 2642 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2643 vfq_qzone_id(vf, vfq_get(vf, i)), false); 2644 } 2645 2646 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) 2647 { 2648 u32 val; 2649 2650 /* clear the VF configuration - pretend */ 2651 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 2652 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 2653 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | 2654 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK); 2655 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 2656 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2657 } 2658 2659 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) 2660 { 2661 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), 2662 BNX2X_VF_MAX_QUEUES); 2663 } 2664 2665 static 2666 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, 2667 struct vf_pf_resc_request *req_resc) 2668 { 2669 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2670 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2671 2672 return ((req_resc->num_rxqs <= rxq_cnt) && 2673 (req_resc->num_txqs <= txq_cnt) && 2674 (req_resc->num_sbs <= vf_sb_count(vf)) && 2675 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 2676 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); 2677 } 2678 2679 /* CORE VF API */ 2680 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 2681 struct vf_pf_resc_request *resc) 2682 { 2683 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * 2684 BNX2X_CIDS_PER_VF; 2685 2686 union cdu_context *base_cxt = (union cdu_context *) 2687 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2688 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2689 int i; 2690 2691 /* if state is 'acquired' the VF was not released or FLR'd, in 2692 * this case the returned resources match the acquired already 2693 * acquired resources. Verify that the requested numbers do 2694 * not exceed the already acquired numbers. 2695 */ 2696 if (vf->state == VF_ACQUIRED) { 2697 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", 2698 vf->abs_vfid); 2699 2700 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2701 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", 2702 vf->abs_vfid); 2703 return -EINVAL; 2704 } 2705 return 0; 2706 } 2707 2708 /* Otherwise vf state must be 'free' or 'reset' */ 2709 if (vf->state != VF_FREE && vf->state != VF_RESET) { 2710 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", 2711 vf->abs_vfid, vf->state); 2712 return -EINVAL; 2713 } 2714 2715 /* static allocation: 2716 * the global maximum number are fixed per VF. Fail the request if 2717 * requested number exceed these globals 2718 */ 2719 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2720 DP(BNX2X_MSG_IOV, 2721 "cannot fulfill vf resource request. Placing maximal available values in response\n"); 2722 /* set the max resource in the vf */ 2723 return -ENOMEM; 2724 } 2725 2726 /* Set resources counters - 0 request means max available */ 2727 vf_sb_count(vf) = resc->num_sbs; 2728 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2729 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2730 if (resc->num_mac_filters) 2731 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 2732 if (resc->num_vlan_filters) 2733 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; 2734 2735 DP(BNX2X_MSG_IOV, 2736 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2737 vf_sb_count(vf), vf_rxq_count(vf), 2738 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2739 vf_vlan_rules_cnt(vf)); 2740 2741 /* Initialize the queues */ 2742 if (!vf->vfqs) { 2743 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); 2744 return -EINVAL; 2745 } 2746 2747 for_each_vfq(vf, i) { 2748 struct bnx2x_vf_queue *q = vfq_get(vf, i); 2749 2750 if (!q) { 2751 BNX2X_ERR("q number %d was not allocated\n", i); 2752 return -EINVAL; 2753 } 2754 2755 q->index = i; 2756 q->cxt = &((base_cxt + i)->eth); 2757 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; 2758 2759 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", 2760 vf->abs_vfid, i, q->index, q->cid, q->cxt); 2761 2762 /* init SP objects */ 2763 bnx2x_vfq_init(bp, vf, q); 2764 } 2765 vf->state = VF_ACQUIRED; 2766 return 0; 2767 } 2768 2769 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) 2770 { 2771 struct bnx2x_func_init_params func_init = {0}; 2772 u16 flags = 0; 2773 int i; 2774 2775 /* the sb resources are initialized at this point, do the 2776 * FW/HW initializations 2777 */ 2778 for_each_vf_sb(vf, i) 2779 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, 2780 vf_igu_sb(vf, i), vf_igu_sb(vf, i)); 2781 2782 /* Sanity checks */ 2783 if (vf->state != VF_ACQUIRED) { 2784 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", 2785 vf->abs_vfid, vf->state); 2786 return -EINVAL; 2787 } 2788 2789 /* let FLR complete ... */ 2790 msleep(100); 2791 2792 /* FLR cleanup epilogue */ 2793 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) 2794 return -EBUSY; 2795 2796 /* reset IGU VF statistics: MSIX */ 2797 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); 2798 2799 /* vf init */ 2800 if (vf->cfg_flags & VF_CFG_STATS) 2801 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); 2802 2803 if (vf->cfg_flags & VF_CFG_TPA) 2804 flags |= FUNC_FLG_TPA; 2805 2806 if (is_vf_multi(vf)) 2807 flags |= FUNC_FLG_RSS; 2808 2809 /* function setup */ 2810 func_init.func_flgs = flags; 2811 func_init.pf_id = BP_FUNC(bp); 2812 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); 2813 func_init.fw_stat_map = vf->fw_stat_map; 2814 func_init.spq_map = vf->spq_map; 2815 func_init.spq_prod = 0; 2816 bnx2x_func_init(bp, &func_init); 2817 2818 /* Enable the vf */ 2819 bnx2x_vf_enable_access(bp, vf->abs_vfid); 2820 bnx2x_vf_enable_traffic(bp, vf); 2821 2822 /* queue protection table */ 2823 for_each_vfq(vf, i) 2824 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2825 vfq_qzone_id(vf, vfq_get(vf, i)), true); 2826 2827 vf->state = VF_ENABLED; 2828 2829 /* update vf bulletin board */ 2830 bnx2x_post_vf_bulletin(bp, vf->index); 2831 2832 return 0; 2833 } 2834 2835 struct set_vf_state_cookie { 2836 struct bnx2x_virtf *vf; 2837 u8 state; 2838 }; 2839 2840 static void bnx2x_set_vf_state(void *cookie) 2841 { 2842 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; 2843 2844 p->vf->state = p->state; 2845 } 2846 2847 /* VFOP close (teardown the queues, delete mcasts and close HW) */ 2848 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) 2849 { 2850 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2851 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; 2852 enum bnx2x_vfop_close_state state = vfop->state; 2853 struct bnx2x_vfop_cmd cmd = { 2854 .done = bnx2x_vfop_close, 2855 .block = false, 2856 }; 2857 2858 if (vfop->rc < 0) 2859 goto op_err; 2860 2861 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 2862 2863 switch (state) { 2864 case BNX2X_VFOP_CLOSE_QUEUES: 2865 2866 if (++(qx->qid) < vf_rxq_count(vf)) { 2867 vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid); 2868 if (vfop->rc) 2869 goto op_err; 2870 return; 2871 } 2872 vfop->state = BNX2X_VFOP_CLOSE_HW; 2873 vfop->rc = 0; 2874 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 2875 2876 case BNX2X_VFOP_CLOSE_HW: 2877 2878 /* disable the interrupts */ 2879 DP(BNX2X_MSG_IOV, "disabling igu\n"); 2880 bnx2x_vf_igu_disable(bp, vf); 2881 2882 /* disable the VF */ 2883 DP(BNX2X_MSG_IOV, "clearing qtbl\n"); 2884 bnx2x_vf_clr_qtbl(bp, vf); 2885 2886 goto op_done; 2887 default: 2888 bnx2x_vfop_default(state); 2889 } 2890 op_err: 2891 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); 2892 op_done: 2893 2894 /* need to make sure there are no outstanding stats ramrods which may 2895 * cause the device to access the VF's stats buffer which it will free 2896 * as soon as we return from the close flow. 2897 */ 2898 { 2899 struct set_vf_state_cookie cookie; 2900 2901 cookie.vf = vf; 2902 cookie.state = VF_ACQUIRED; 2903 bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); 2904 } 2905 2906 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2907 bnx2x_vfop_end(bp, vf, vfop); 2908 op_pending: 2909 /* Not supported at the moment; Exists for macros only */ 2910 return; 2911 } 2912 2913 int bnx2x_vfop_close_cmd(struct bnx2x *bp, 2914 struct bnx2x_virtf *vf, 2915 struct bnx2x_vfop_cmd *cmd) 2916 { 2917 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 2918 if (vfop) { 2919 vfop->args.qx.qid = -1; /* loop */ 2920 bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES, 2921 bnx2x_vfop_close, cmd->done); 2922 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close, 2923 cmd->block); 2924 } 2925 return -ENOMEM; 2926 } 2927 2928 /* VF release can be called either: 1. The VF was acquired but 2929 * not enabled 2. the vf was enabled or in the process of being 2930 * enabled 2931 */ 2932 static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) 2933 { 2934 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2935 struct bnx2x_vfop_cmd cmd = { 2936 .done = bnx2x_vfop_release, 2937 .block = false, 2938 }; 2939 2940 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2941 2942 if (vfop->rc < 0) 2943 goto op_err; 2944 2945 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, 2946 vf->state == VF_FREE ? "Free" : 2947 vf->state == VF_ACQUIRED ? "Acquired" : 2948 vf->state == VF_ENABLED ? "Enabled" : 2949 vf->state == VF_RESET ? "Reset" : 2950 "Unknown"); 2951 2952 switch (vf->state) { 2953 case VF_ENABLED: 2954 vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); 2955 if (vfop->rc) 2956 goto op_err; 2957 return; 2958 2959 case VF_ACQUIRED: 2960 DP(BNX2X_MSG_IOV, "about to free resources\n"); 2961 bnx2x_vf_free_resc(bp, vf); 2962 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2963 goto op_done; 2964 2965 case VF_FREE: 2966 case VF_RESET: 2967 /* do nothing */ 2968 goto op_done; 2969 default: 2970 bnx2x_vfop_default(vf->state); 2971 } 2972 op_err: 2973 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc); 2974 op_done: 2975 bnx2x_vfop_end(bp, vf, vfop); 2976 } 2977 2978 static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf) 2979 { 2980 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2981 enum bnx2x_vfop_rss_state state; 2982 2983 if (!vfop) { 2984 BNX2X_ERR("vfop was null\n"); 2985 return; 2986 } 2987 2988 state = vfop->state; 2989 bnx2x_vfop_reset_wq(vf); 2990 2991 if (vfop->rc < 0) 2992 goto op_err; 2993 2994 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 2995 2996 switch (state) { 2997 case BNX2X_VFOP_RSS_CONFIG: 2998 /* next state */ 2999 vfop->state = BNX2X_VFOP_RSS_DONE; 3000 bnx2x_config_rss(bp, &vfop->op_p->rss); 3001 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 3002 op_err: 3003 BNX2X_ERR("RSS error: rc %d\n", vfop->rc); 3004 op_done: 3005 case BNX2X_VFOP_RSS_DONE: 3006 bnx2x_vfop_end(bp, vf, vfop); 3007 return; 3008 default: 3009 bnx2x_vfop_default(state); 3010 } 3011 op_pending: 3012 return; 3013 } 3014 3015 int bnx2x_vfop_release_cmd(struct bnx2x *bp, 3016 struct bnx2x_virtf *vf, 3017 struct bnx2x_vfop_cmd *cmd) 3018 { 3019 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 3020 if (vfop) { 3021 bnx2x_vfop_opset(-1, /* use vf->state */ 3022 bnx2x_vfop_release, cmd->done); 3023 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release, 3024 cmd->block); 3025 } 3026 return -ENOMEM; 3027 } 3028 3029 int bnx2x_vfop_rss_cmd(struct bnx2x *bp, 3030 struct bnx2x_virtf *vf, 3031 struct bnx2x_vfop_cmd *cmd) 3032 { 3033 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 3034 3035 if (vfop) { 3036 bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss, 3037 cmd->done); 3038 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss, 3039 cmd->block); 3040 } 3041 return -ENOMEM; 3042 } 3043 3044 /* VF release ~ VF close + VF release-resources 3045 * Release is the ultimate SW shutdown and is called whenever an 3046 * irrecoverable error is encountered. 3047 */ 3048 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) 3049 { 3050 struct bnx2x_vfop_cmd cmd = { 3051 .done = NULL, 3052 .block = block, 3053 }; 3054 int rc; 3055 3056 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); 3057 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 3058 3059 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); 3060 if (rc) 3061 WARN(rc, 3062 "VF[%d] Failed to allocate resources for release op- rc=%d\n", 3063 vf->abs_vfid, rc); 3064 } 3065 3066 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, 3067 struct bnx2x_virtf *vf, u32 *sbdf) 3068 { 3069 *sbdf = vf->devfn | (vf->bus << 8); 3070 } 3071 3072 static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf, 3073 struct bnx2x_vf_bar_info *bar_info) 3074 { 3075 int n; 3076 3077 bar_info->nr_bars = bp->vfdb->sriov.nres; 3078 for (n = 0; n < bar_info->nr_bars; n++) 3079 bar_info->bars[n] = vf->bars[n]; 3080 } 3081 3082 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3083 enum channel_tlvs tlv) 3084 { 3085 /* we don't lock the channel for unsupported tlvs */ 3086 if (!bnx2x_tlv_supported(tlv)) { 3087 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n"); 3088 return; 3089 } 3090 3091 /* lock the channel */ 3092 mutex_lock(&vf->op_mutex); 3093 3094 /* record the locking op */ 3095 vf->op_current = tlv; 3096 3097 /* log the lock */ 3098 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", 3099 vf->abs_vfid, tlv); 3100 } 3101 3102 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3103 enum channel_tlvs expected_tlv) 3104 { 3105 enum channel_tlvs current_tlv; 3106 3107 if (!vf) { 3108 BNX2X_ERR("VF was %p\n", vf); 3109 return; 3110 } 3111 3112 current_tlv = vf->op_current; 3113 3114 /* we don't unlock the channel for unsupported tlvs */ 3115 if (!bnx2x_tlv_supported(expected_tlv)) 3116 return; 3117 3118 WARN(expected_tlv != vf->op_current, 3119 "lock mismatch: expected %d found %d", expected_tlv, 3120 vf->op_current); 3121 3122 /* record the locking op */ 3123 vf->op_current = CHANNEL_TLV_NONE; 3124 3125 /* lock the channel */ 3126 mutex_unlock(&vf->op_mutex); 3127 3128 /* log the unlock */ 3129 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 3130 vf->abs_vfid, vf->op_current); 3131 } 3132 3133 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) 3134 { 3135 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 3136 3137 if (!IS_SRIOV(bp)) { 3138 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n"); 3139 return -EINVAL; 3140 } 3141 3142 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 3143 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3144 3145 /* HW channel is only operational when PF is up */ 3146 if (bp->state != BNX2X_STATE_OPEN) { 3147 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n"); 3148 return -EINVAL; 3149 } 3150 3151 /* we are always bound by the total_vfs in the configuration space */ 3152 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { 3153 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n", 3154 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3155 num_vfs_param = BNX2X_NR_VIRTFN(bp); 3156 } 3157 3158 bp->requested_nr_virtfn = num_vfs_param; 3159 if (num_vfs_param == 0) { 3160 pci_disable_sriov(dev); 3161 return 0; 3162 } else { 3163 return bnx2x_enable_sriov(bp); 3164 } 3165 } 3166 #define IGU_ENTRY_SIZE 4 3167 3168 int bnx2x_enable_sriov(struct bnx2x *bp) 3169 { 3170 int rc = 0, req_vfs = bp->requested_nr_virtfn; 3171 int vf_idx, sb_idx, vfq_idx, qcount, first_vf; 3172 u32 igu_entry, address; 3173 u16 num_vf_queues; 3174 3175 if (req_vfs == 0) 3176 return 0; 3177 3178 first_vf = bp->vfdb->sriov.first_vf_in_pf; 3179 3180 /* statically distribute vf sb pool between VFs */ 3181 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES, 3182 BP_VFDB(bp)->vf_sbs_pool / req_vfs); 3183 3184 /* zero previous values learned from igu cam */ 3185 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) { 3186 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 3187 3188 vf->sb_count = 0; 3189 vf_sb_count(BP_VF(bp, vf_idx)) = 0; 3190 } 3191 bp->vfdb->vf_sbs_pool = 0; 3192 3193 /* prepare IGU cam */ 3194 sb_idx = BP_VFDB(bp)->first_vf_igu_entry; 3195 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE; 3196 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 3197 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) { 3198 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT | 3199 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT | 3200 IGU_REG_MAPPING_MEMORY_VALID; 3201 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n", 3202 sb_idx, vf_idx); 3203 REG_WR(bp, address, igu_entry); 3204 sb_idx++; 3205 address += IGU_ENTRY_SIZE; 3206 } 3207 } 3208 3209 /* Reinitialize vf database according to igu cam */ 3210 bnx2x_get_vf_igu_cam_info(bp); 3211 3212 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n", 3213 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); 3214 3215 qcount = 0; 3216 for_each_vf(bp, vf_idx) { 3217 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 3218 3219 /* set local queue arrays */ 3220 vf->vfqs = &bp->vfdb->vfqs[qcount]; 3221 qcount += vf_sb_count(vf); 3222 bnx2x_iov_static_resc(bp, vf); 3223 } 3224 3225 /* prepare msix vectors in VF configuration space - the value in the 3226 * PCI configuration space should be the index of the last entry, 3227 * namely one less than the actual size of the table 3228 */ 3229 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 3230 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 3231 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 3232 num_vf_queues - 1); 3233 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", 3234 vf_idx, num_vf_queues - 1); 3235 } 3236 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 3237 3238 /* enable sriov. This will probe all the VFs, and consequentially cause 3239 * the "acquire" messages to appear on the VF PF channel. 3240 */ 3241 DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); 3242 bnx2x_disable_sriov(bp); 3243 rc = pci_enable_sriov(bp->pdev, req_vfs); 3244 if (rc) { 3245 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 3246 return rc; 3247 } 3248 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); 3249 return req_vfs; 3250 } 3251 3252 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) 3253 { 3254 int vfidx; 3255 struct pf_vf_bulletin_content *bulletin; 3256 3257 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); 3258 for_each_vf(bp, vfidx) { 3259 bulletin = BP_VF_BULLETIN(bp, vfidx); 3260 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) 3261 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); 3262 } 3263 } 3264 3265 void bnx2x_disable_sriov(struct bnx2x *bp) 3266 { 3267 pci_disable_sriov(bp->pdev); 3268 } 3269 3270 static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, 3271 struct bnx2x_virtf **vf, 3272 struct pf_vf_bulletin_content **bulletin) 3273 { 3274 if (bp->state != BNX2X_STATE_OPEN) { 3275 BNX2X_ERR("vf ndo called though PF is down\n"); 3276 return -EINVAL; 3277 } 3278 3279 if (!IS_SRIOV(bp)) { 3280 BNX2X_ERR("vf ndo called though sriov is disabled\n"); 3281 return -EINVAL; 3282 } 3283 3284 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { 3285 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n", 3286 vfidx, BNX2X_NR_VIRTFN(bp)); 3287 return -EINVAL; 3288 } 3289 3290 /* init members */ 3291 *vf = BP_VF(bp, vfidx); 3292 *bulletin = BP_VF_BULLETIN(bp, vfidx); 3293 3294 if (!*vf) { 3295 BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n", 3296 vfidx); 3297 return -EINVAL; 3298 } 3299 3300 if (!(*vf)->vfqs) { 3301 BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n", 3302 vfidx); 3303 return -EINVAL; 3304 } 3305 3306 if (!*bulletin) { 3307 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n", 3308 vfidx); 3309 return -EINVAL; 3310 } 3311 3312 return 0; 3313 } 3314 3315 int bnx2x_get_vf_config(struct net_device *dev, int vfidx, 3316 struct ifla_vf_info *ivi) 3317 { 3318 struct bnx2x *bp = netdev_priv(dev); 3319 struct bnx2x_virtf *vf = NULL; 3320 struct pf_vf_bulletin_content *bulletin = NULL; 3321 struct bnx2x_vlan_mac_obj *mac_obj; 3322 struct bnx2x_vlan_mac_obj *vlan_obj; 3323 int rc; 3324 3325 /* sanity and init */ 3326 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3327 if (rc) 3328 return rc; 3329 mac_obj = &bnx2x_leading_vfq(vf, mac_obj); 3330 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 3331 if (!mac_obj || !vlan_obj) { 3332 BNX2X_ERR("VF partially initialized\n"); 3333 return -EINVAL; 3334 } 3335 3336 ivi->vf = vfidx; 3337 ivi->qos = 0; 3338 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */ 3339 ivi->spoofchk = 1; /*always enabled */ 3340 if (vf->state == VF_ENABLED) { 3341 /* mac and vlan are in vlan_mac objects */ 3342 if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj))) 3343 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 3344 0, ETH_ALEN); 3345 if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj))) 3346 vlan_obj->get_n_elements(bp, vlan_obj, 1, 3347 (u8 *)&ivi->vlan, 0, 3348 VLAN_HLEN); 3349 } else { 3350 /* mac */ 3351 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 3352 /* mac configured by ndo so its in bulletin board */ 3353 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); 3354 else 3355 /* function has not been loaded yet. Show mac as 0s */ 3356 memset(&ivi->mac, 0, ETH_ALEN); 3357 3358 /* vlan */ 3359 if (bulletin->valid_bitmap & (1 << VLAN_VALID)) 3360 /* vlan configured by ndo so its in bulletin board */ 3361 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); 3362 else 3363 /* function has not been loaded yet. Show vlans as 0s */ 3364 memset(&ivi->vlan, 0, VLAN_HLEN); 3365 } 3366 3367 return 0; 3368 } 3369 3370 /* New mac for VF. Consider these cases: 3371 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and 3372 * supply at acquire. 3373 * 2. VF has already been acquired but has not yet initialized - store in local 3374 * bulletin board. mac will be posted on VF bulletin board after VF init. VF 3375 * will configure this mac when it is ready. 3376 * 3. VF has already initialized but has not yet setup a queue - post the new 3377 * mac on VF's bulletin board right now. VF will configure this mac when it 3378 * is ready. 3379 * 4. VF has already set a queue - delete any macs already configured for this 3380 * queue and manually config the new mac. 3381 * In any event, once this function has been called refuse any attempts by the 3382 * VF to configure any mac for itself except for this mac. In case of a race 3383 * where the VF fails to see the new post on its bulletin board before sending a 3384 * mac configuration request, the PF will simply fail the request and VF can try 3385 * again after consulting its bulletin board. 3386 */ 3387 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) 3388 { 3389 struct bnx2x *bp = netdev_priv(dev); 3390 int rc, q_logical_state; 3391 struct bnx2x_virtf *vf = NULL; 3392 struct pf_vf_bulletin_content *bulletin = NULL; 3393 3394 /* sanity and init */ 3395 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3396 if (rc) 3397 return rc; 3398 if (!is_valid_ether_addr(mac)) { 3399 BNX2X_ERR("mac address invalid\n"); 3400 return -EINVAL; 3401 } 3402 3403 /* update PF's copy of the VF's bulletin. Will no longer accept mac 3404 * configuration requests from vf unless match this mac 3405 */ 3406 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; 3407 memcpy(bulletin->mac, mac, ETH_ALEN); 3408 3409 /* Post update on VF's bulletin board */ 3410 rc = bnx2x_post_vf_bulletin(bp, vfidx); 3411 if (rc) { 3412 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); 3413 return rc; 3414 } 3415 3416 q_logical_state = 3417 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); 3418 if (vf->state == VF_ENABLED && 3419 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3420 /* configure the mac in device on this vf's queue */ 3421 unsigned long ramrod_flags = 0; 3422 struct bnx2x_vlan_mac_obj *mac_obj = 3423 &bnx2x_leading_vfq(vf, mac_obj); 3424 3425 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); 3426 if (rc) 3427 return rc; 3428 3429 /* must lock vfpf channel to protect against vf flows */ 3430 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3431 3432 /* remove existing eth macs */ 3433 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 3434 if (rc) { 3435 BNX2X_ERR("failed to delete eth macs\n"); 3436 rc = -EINVAL; 3437 goto out; 3438 } 3439 3440 /* remove existing uc list macs */ 3441 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); 3442 if (rc) { 3443 BNX2X_ERR("failed to delete uc_list macs\n"); 3444 rc = -EINVAL; 3445 goto out; 3446 } 3447 3448 /* configure the new mac to device */ 3449 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3450 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 3451 BNX2X_ETH_MAC, &ramrod_flags); 3452 3453 out: 3454 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3455 } 3456 3457 return 0; 3458 } 3459 3460 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 3461 { 3462 struct bnx2x_queue_state_params q_params = {NULL}; 3463 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 3464 struct bnx2x_queue_update_params *update_params; 3465 struct pf_vf_bulletin_content *bulletin = NULL; 3466 struct bnx2x_rx_mode_ramrod_params rx_ramrod; 3467 struct bnx2x *bp = netdev_priv(dev); 3468 struct bnx2x_vlan_mac_obj *vlan_obj; 3469 unsigned long vlan_mac_flags = 0; 3470 unsigned long ramrod_flags = 0; 3471 struct bnx2x_virtf *vf = NULL; 3472 unsigned long accept_flags; 3473 int rc; 3474 3475 /* sanity and init */ 3476 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3477 if (rc) 3478 return rc; 3479 3480 if (vlan > 4095) { 3481 BNX2X_ERR("illegal vlan value %d\n", vlan); 3482 return -EINVAL; 3483 } 3484 3485 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", 3486 vfidx, vlan, 0); 3487 3488 /* update PF's copy of the VF's bulletin. No point in posting the vlan 3489 * to the VF since it doesn't have anything to do with it. But it useful 3490 * to store it here in case the VF is not up yet and we can only 3491 * configure the vlan later when it does. Treat vlan id 0 as remove the 3492 * Host tag. 3493 */ 3494 if (vlan > 0) 3495 bulletin->valid_bitmap |= 1 << VLAN_VALID; 3496 else 3497 bulletin->valid_bitmap &= ~(1 << VLAN_VALID); 3498 bulletin->vlan = vlan; 3499 3500 /* is vf initialized and queue set up? */ 3501 if (vf->state != VF_ENABLED || 3502 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != 3503 BNX2X_Q_LOGICAL_STATE_ACTIVE) 3504 return rc; 3505 3506 /* configure the vlan in device on this vf's queue */ 3507 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 3508 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); 3509 if (rc) 3510 return rc; 3511 3512 /* must lock vfpf channel to protect against vf flows */ 3513 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3514 3515 /* remove existing vlans */ 3516 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3517 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 3518 &ramrod_flags); 3519 if (rc) { 3520 BNX2X_ERR("failed to delete vlans\n"); 3521 rc = -EINVAL; 3522 goto out; 3523 } 3524 3525 /* need to remove/add the VF's accept_any_vlan bit */ 3526 accept_flags = bnx2x_leading_vfq(vf, accept_flags); 3527 if (vlan) 3528 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 3529 else 3530 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 3531 3532 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, 3533 accept_flags); 3534 bnx2x_leading_vfq(vf, accept_flags) = accept_flags; 3535 bnx2x_config_rx_mode(bp, &rx_ramrod); 3536 3537 /* configure the new vlan to device */ 3538 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3539 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3540 ramrod_param.vlan_mac_obj = vlan_obj; 3541 ramrod_param.ramrod_flags = ramrod_flags; 3542 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 3543 &ramrod_param.user_req.vlan_mac_flags); 3544 ramrod_param.user_req.u.vlan.vlan = vlan; 3545 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 3546 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 3547 if (rc) { 3548 BNX2X_ERR("failed to configure vlan\n"); 3549 rc = -EINVAL; 3550 goto out; 3551 } 3552 3553 /* send queue update ramrod to configure default vlan and silent 3554 * vlan removal 3555 */ 3556 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3557 q_params.cmd = BNX2X_Q_CMD_UPDATE; 3558 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); 3559 update_params = &q_params.params.update; 3560 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 3561 &update_params->update_flags); 3562 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 3563 &update_params->update_flags); 3564 if (vlan == 0) { 3565 /* if vlan is 0 then we want to leave the VF traffic 3566 * untagged, and leave the incoming traffic untouched 3567 * (i.e. do not remove any vlan tags). 3568 */ 3569 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3570 &update_params->update_flags); 3571 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 3572 &update_params->update_flags); 3573 } else { 3574 /* configure default vlan to vf queue and set silent 3575 * vlan removal (the vf remains unaware of this vlan). 3576 */ 3577 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3578 &update_params->update_flags); 3579 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 3580 &update_params->update_flags); 3581 update_params->def_vlan = vlan; 3582 update_params->silent_removal_value = 3583 vlan & VLAN_VID_MASK; 3584 update_params->silent_removal_mask = VLAN_VID_MASK; 3585 } 3586 3587 /* Update the Queue state */ 3588 rc = bnx2x_queue_state_change(bp, &q_params); 3589 if (rc) { 3590 BNX2X_ERR("Failed to configure default VLAN\n"); 3591 goto out; 3592 } 3593 3594 3595 /* clear the flag indicating that this VF needs its vlan 3596 * (will only be set if the HV configured the Vlan before vf was 3597 * up and we were called because the VF came up later 3598 */ 3599 out: 3600 vf->cfg_flags &= ~VF_CFG_VLAN; 3601 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3602 3603 return rc; 3604 } 3605 3606 /* crc is the first field in the bulletin board. Compute the crc over the 3607 * entire bulletin board excluding the crc field itself. Use the length field 3608 * as the Bulletin Board was posted by a PF with possibly a different version 3609 * from the vf which will sample it. Therefore, the length is computed by the 3610 * PF and the used blindly by the VF. 3611 */ 3612 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, 3613 struct pf_vf_bulletin_content *bulletin) 3614 { 3615 return crc32(BULLETIN_CRC_SEED, 3616 ((u8 *)bulletin) + sizeof(bulletin->crc), 3617 bulletin->length - sizeof(bulletin->crc)); 3618 } 3619 3620 /* Check for new posts on the bulletin board */ 3621 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) 3622 { 3623 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; 3624 int attempts; 3625 3626 /* bulletin board hasn't changed since last sample */ 3627 if (bp->old_bulletin.version == bulletin.version) 3628 return PFVF_BULLETIN_UNCHANGED; 3629 3630 /* validate crc of new bulletin board */ 3631 if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) { 3632 /* sampling structure in mid post may result with corrupted data 3633 * validate crc to ensure coherency. 3634 */ 3635 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { 3636 bulletin = bp->pf2vf_bulletin->content; 3637 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, 3638 &bulletin)) 3639 break; 3640 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", 3641 bulletin.crc, 3642 bnx2x_crc_vf_bulletin(bp, &bulletin)); 3643 } 3644 if (attempts >= BULLETIN_ATTEMPTS) { 3645 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", 3646 attempts); 3647 return PFVF_BULLETIN_CRC_ERR; 3648 } 3649 } 3650 3651 /* the mac address in bulletin board is valid and is new */ 3652 if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID && 3653 !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) { 3654 /* update new mac to net device */ 3655 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); 3656 } 3657 3658 /* the vlan in bulletin board is valid and is new */ 3659 if (bulletin.valid_bitmap & 1 << VLAN_VALID) 3660 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN); 3661 3662 /* copy new bulletin board to bp */ 3663 bp->old_bulletin = bulletin; 3664 3665 return PFVF_BULLETIN_UPDATED; 3666 } 3667 3668 void bnx2x_timer_sriov(struct bnx2x *bp) 3669 { 3670 bnx2x_sample_bulletin(bp); 3671 3672 /* if channel is down we need to self destruct */ 3673 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { 3674 smp_mb__before_clear_bit(); 3675 set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 3676 &bp->sp_rtnl_state); 3677 smp_mb__after_clear_bit(); 3678 schedule_delayed_work(&bp->sp_rtnl_task, 0); 3679 } 3680 } 3681 3682 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 3683 { 3684 /* vf doorbells are embedded within the regview */ 3685 return bp->regview + PXP_VF_ADDR_DB_START; 3686 } 3687 3688 int bnx2x_vf_pci_alloc(struct bnx2x *bp) 3689 { 3690 mutex_init(&bp->vf2pf_mutex); 3691 3692 /* allocate vf2pf mailbox for vf to pf channel */ 3693 BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping, 3694 sizeof(struct bnx2x_vf_mbx_msg)); 3695 3696 /* allocate pf 2 vf bulletin board */ 3697 BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping, 3698 sizeof(union pf_vf_bulletin)); 3699 3700 return 0; 3701 3702 alloc_mem_err: 3703 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3704 sizeof(struct bnx2x_vf_mbx_msg)); 3705 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, 3706 sizeof(union pf_vf_bulletin)); 3707 return -ENOMEM; 3708 } 3709 3710 void bnx2x_iov_channel_down(struct bnx2x *bp) 3711 { 3712 int vf_idx; 3713 struct pf_vf_bulletin_content *bulletin; 3714 3715 if (!IS_SRIOV(bp)) 3716 return; 3717 3718 for_each_vf(bp, vf_idx) { 3719 /* locate this VFs bulletin board and update the channel down 3720 * bit 3721 */ 3722 bulletin = BP_VF_BULLETIN(bp, vf_idx); 3723 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN; 3724 3725 /* update vf bulletin board */ 3726 bnx2x_post_vf_bulletin(bp, vf_idx); 3727 } 3728 } 3729