1 /* bnx2x_sriov.c: Broadcom Everest network driver. 2 * 3 * Copyright 2009-2013 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 17 * Ariel Elior <ariele@broadcom.com> 18 * 19 */ 20 #include "bnx2x.h" 21 #include "bnx2x_init.h" 22 #include "bnx2x_cmn.h" 23 #include "bnx2x_sp.h" 24 #include <linux/crc32.h> 25 #include <linux/if_vlan.h> 26 27 /* General service functions */ 28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 29 u16 pf_id) 30 { 31 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 32 pf_id); 33 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 34 pf_id); 35 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 36 pf_id); 37 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 38 pf_id); 39 } 40 41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 42 u8 enable) 43 { 44 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 45 enable); 46 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 47 enable); 48 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 49 enable); 50 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 51 enable); 52 } 53 54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 55 { 56 int idx; 57 58 for_each_vf(bp, idx) 59 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) 60 break; 61 return idx; 62 } 63 64 static 65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 66 { 67 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); 68 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; 69 } 70 71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, 72 u8 igu_sb_id, u8 segment, u16 index, u8 op, 73 u8 update) 74 { 75 /* acking a VF sb through the PF - use the GRC */ 76 u32 ctl; 77 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 78 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 79 u32 func_encode = vf->abs_vfid; 80 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; 81 struct igu_regular cmd_data = {0}; 82 83 cmd_data.sb_id_and_flags = 84 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 85 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 86 (update << IGU_REGULAR_BUPDATE_SHIFT) | 87 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 88 89 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 90 func_encode << IGU_CTRL_REG_FID_SHIFT | 91 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 92 93 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 94 cmd_data.sb_id_and_flags, igu_addr_data); 95 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); 96 mmiowb(); 97 barrier(); 98 99 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 100 ctl, igu_addr_ctl); 101 REG_WR(bp, igu_addr_ctl, ctl); 102 mmiowb(); 103 barrier(); 104 } 105 /* VFOP - VF slow-path operation support */ 106 107 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000 108 109 /* VFOP operations states */ 110 enum bnx2x_vfop_qctor_state { 111 BNX2X_VFOP_QCTOR_INIT, 112 BNX2X_VFOP_QCTOR_SETUP, 113 BNX2X_VFOP_QCTOR_INT_EN 114 }; 115 116 enum bnx2x_vfop_qdtor_state { 117 BNX2X_VFOP_QDTOR_HALT, 118 BNX2X_VFOP_QDTOR_TERMINATE, 119 BNX2X_VFOP_QDTOR_CFCDEL, 120 BNX2X_VFOP_QDTOR_DONE 121 }; 122 123 enum bnx2x_vfop_vlan_mac_state { 124 BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 125 BNX2X_VFOP_VLAN_MAC_CLEAR, 126 BNX2X_VFOP_VLAN_MAC_CHK_DONE, 127 BNX2X_VFOP_MAC_CONFIG_LIST, 128 BNX2X_VFOP_VLAN_CONFIG_LIST, 129 BNX2X_VFOP_VLAN_CONFIG_LIST_0 130 }; 131 132 enum bnx2x_vfop_qsetup_state { 133 BNX2X_VFOP_QSETUP_CTOR, 134 BNX2X_VFOP_QSETUP_VLAN0, 135 BNX2X_VFOP_QSETUP_DONE 136 }; 137 138 enum bnx2x_vfop_mcast_state { 139 BNX2X_VFOP_MCAST_DEL, 140 BNX2X_VFOP_MCAST_ADD, 141 BNX2X_VFOP_MCAST_CHK_DONE 142 }; 143 enum bnx2x_vfop_qflr_state { 144 BNX2X_VFOP_QFLR_CLR_VLAN, 145 BNX2X_VFOP_QFLR_CLR_MAC, 146 BNX2X_VFOP_QFLR_TERMINATE, 147 BNX2X_VFOP_QFLR_DONE 148 }; 149 150 enum bnx2x_vfop_flr_state { 151 BNX2X_VFOP_FLR_QUEUES, 152 BNX2X_VFOP_FLR_HW 153 }; 154 155 enum bnx2x_vfop_close_state { 156 BNX2X_VFOP_CLOSE_QUEUES, 157 BNX2X_VFOP_CLOSE_HW 158 }; 159 160 enum bnx2x_vfop_rxmode_state { 161 BNX2X_VFOP_RXMODE_CONFIG, 162 BNX2X_VFOP_RXMODE_DONE 163 }; 164 165 enum bnx2x_vfop_qteardown_state { 166 BNX2X_VFOP_QTEARDOWN_RXMODE, 167 BNX2X_VFOP_QTEARDOWN_CLR_VLAN, 168 BNX2X_VFOP_QTEARDOWN_CLR_MAC, 169 BNX2X_VFOP_QTEARDOWN_CLR_MCAST, 170 BNX2X_VFOP_QTEARDOWN_QDTOR, 171 BNX2X_VFOP_QTEARDOWN_DONE 172 }; 173 174 enum bnx2x_vfop_rss_state { 175 BNX2X_VFOP_RSS_CONFIG, 176 BNX2X_VFOP_RSS_DONE 177 }; 178 179 #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) 180 181 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 182 struct bnx2x_queue_init_params *init_params, 183 struct bnx2x_queue_setup_params *setup_params, 184 u16 q_idx, u16 sb_idx) 185 { 186 DP(BNX2X_MSG_IOV, 187 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", 188 vf->abs_vfid, 189 q_idx, 190 sb_idx, 191 init_params->tx.sb_cq_index, 192 init_params->tx.hc_rate, 193 setup_params->flags, 194 setup_params->txq_params.traffic_type); 195 } 196 197 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 198 struct bnx2x_queue_init_params *init_params, 199 struct bnx2x_queue_setup_params *setup_params, 200 u16 q_idx, u16 sb_idx) 201 { 202 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; 203 204 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" 205 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", 206 vf->abs_vfid, 207 q_idx, 208 sb_idx, 209 init_params->rx.sb_cq_index, 210 init_params->rx.hc_rate, 211 setup_params->gen_params.mtu, 212 rxq_params->buf_sz, 213 rxq_params->sge_buf_sz, 214 rxq_params->max_sges_pkt, 215 rxq_params->tpa_agg_sz, 216 setup_params->flags, 217 rxq_params->drop_flags, 218 rxq_params->cache_line_log); 219 } 220 221 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 222 struct bnx2x_virtf *vf, 223 struct bnx2x_vf_queue *q, 224 struct bnx2x_vfop_qctor_params *p, 225 unsigned long q_type) 226 { 227 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; 228 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; 229 230 /* INIT */ 231 232 /* Enable host coalescing in the transition to INIT state */ 233 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) 234 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); 235 236 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) 237 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); 238 239 /* FW SB ID */ 240 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 241 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 242 243 /* context */ 244 init_p->cxts[0] = q->cxt; 245 246 /* SETUP */ 247 248 /* Setup-op general parameters */ 249 setup_p->gen_params.spcl_id = vf->sp_cl_id; 250 setup_p->gen_params.stat_id = vfq_stat_id(vf, q); 251 252 /* Setup-op pause params: 253 * Nothing to do, the pause thresholds are set by default to 0 which 254 * effectively turns off the feature for this queue. We don't want 255 * one queue (VF) to interfering with another queue (another VF) 256 */ 257 if (vf->cfg_flags & VF_CFG_FW_FC) 258 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", 259 vf->abs_vfid); 260 /* Setup-op flags: 261 * collect statistics, zero statistics, local-switching, security, 262 * OV for Flex10, RSS and MCAST for leading 263 */ 264 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) 265 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); 266 267 /* for VFs, enable tx switching, bd coherency, and mac address 268 * anti-spoofing 269 */ 270 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); 271 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 272 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 273 274 /* Setup-op rx parameters */ 275 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 276 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 277 278 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); 279 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); 280 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); 281 282 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) 283 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; 284 } 285 286 /* Setup-op tx parameters */ 287 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { 288 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; 289 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 290 } 291 } 292 293 /* VFOP queue construction */ 294 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf) 295 { 296 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 297 struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor; 298 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 299 enum bnx2x_vfop_qctor_state state = vfop->state; 300 301 bnx2x_vfop_reset_wq(vf); 302 303 if (vfop->rc < 0) 304 goto op_err; 305 306 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 307 308 switch (state) { 309 case BNX2X_VFOP_QCTOR_INIT: 310 311 /* has this queue already been opened? */ 312 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 313 BNX2X_Q_LOGICAL_STATE_ACTIVE) { 314 DP(BNX2X_MSG_IOV, 315 "Entered qctor but queue was already up. Aborting gracefully\n"); 316 goto op_done; 317 } 318 319 /* next state */ 320 vfop->state = BNX2X_VFOP_QCTOR_SETUP; 321 322 q_params->cmd = BNX2X_Q_CMD_INIT; 323 vfop->rc = bnx2x_queue_state_change(bp, q_params); 324 325 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 326 327 case BNX2X_VFOP_QCTOR_SETUP: 328 /* next state */ 329 vfop->state = BNX2X_VFOP_QCTOR_INT_EN; 330 331 /* copy pre-prepared setup params to the queue-state params */ 332 vfop->op_p->qctor.qstate.params.setup = 333 vfop->op_p->qctor.prep_qsetup; 334 335 q_params->cmd = BNX2X_Q_CMD_SETUP; 336 vfop->rc = bnx2x_queue_state_change(bp, q_params); 337 338 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 339 340 case BNX2X_VFOP_QCTOR_INT_EN: 341 342 /* enable interrupts */ 343 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx), 344 USTORM_ID, 0, IGU_INT_ENABLE, 0); 345 goto op_done; 346 default: 347 bnx2x_vfop_default(state); 348 } 349 op_err: 350 BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n", 351 vf->abs_vfid, args->qid, q_params->cmd, vfop->rc); 352 op_done: 353 bnx2x_vfop_end(bp, vf, vfop); 354 op_pending: 355 return; 356 } 357 358 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp, 359 struct bnx2x_virtf *vf, 360 struct bnx2x_vfop_cmd *cmd, 361 int qid) 362 { 363 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 364 365 if (vfop) { 366 vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 367 368 vfop->args.qctor.qid = qid; 369 vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx); 370 371 bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT, 372 bnx2x_vfop_qctor, cmd->done); 373 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor, 374 cmd->block); 375 } 376 return -ENOMEM; 377 } 378 379 /* VFOP queue destruction */ 380 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) 381 { 382 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 383 struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor; 384 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 385 enum bnx2x_vfop_qdtor_state state = vfop->state; 386 387 bnx2x_vfop_reset_wq(vf); 388 389 if (vfop->rc < 0) 390 goto op_err; 391 392 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 393 394 switch (state) { 395 case BNX2X_VFOP_QDTOR_HALT: 396 397 /* has this queue already been stopped? */ 398 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 399 BNX2X_Q_LOGICAL_STATE_STOPPED) { 400 DP(BNX2X_MSG_IOV, 401 "Entered qdtor but queue was already stopped. Aborting gracefully\n"); 402 403 /* next state */ 404 vfop->state = BNX2X_VFOP_QDTOR_DONE; 405 406 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 407 } 408 409 /* next state */ 410 vfop->state = BNX2X_VFOP_QDTOR_TERMINATE; 411 412 q_params->cmd = BNX2X_Q_CMD_HALT; 413 vfop->rc = bnx2x_queue_state_change(bp, q_params); 414 415 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 416 417 case BNX2X_VFOP_QDTOR_TERMINATE: 418 /* next state */ 419 vfop->state = BNX2X_VFOP_QDTOR_CFCDEL; 420 421 q_params->cmd = BNX2X_Q_CMD_TERMINATE; 422 vfop->rc = bnx2x_queue_state_change(bp, q_params); 423 424 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 425 426 case BNX2X_VFOP_QDTOR_CFCDEL: 427 /* next state */ 428 vfop->state = BNX2X_VFOP_QDTOR_DONE; 429 430 q_params->cmd = BNX2X_Q_CMD_CFC_DEL; 431 vfop->rc = bnx2x_queue_state_change(bp, q_params); 432 433 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 434 op_err: 435 BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n", 436 vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc); 437 op_done: 438 case BNX2X_VFOP_QDTOR_DONE: 439 /* invalidate the context */ 440 if (qdtor->cxt) { 441 qdtor->cxt->ustorm_ag_context.cdu_usage = 0; 442 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; 443 } 444 bnx2x_vfop_end(bp, vf, vfop); 445 return; 446 default: 447 bnx2x_vfop_default(state); 448 } 449 op_pending: 450 return; 451 } 452 453 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, 454 struct bnx2x_virtf *vf, 455 struct bnx2x_vfop_cmd *cmd, 456 int qid) 457 { 458 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 459 460 if (vfop) { 461 struct bnx2x_queue_state_params *qstate = 462 &vf->op_params.qctor.qstate; 463 464 memset(qstate, 0, sizeof(*qstate)); 465 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 466 467 vfop->args.qdtor.qid = qid; 468 vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt); 469 470 bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT, 471 bnx2x_vfop_qdtor, cmd->done); 472 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, 473 cmd->block); 474 } else { 475 BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid); 476 return -ENOMEM; 477 } 478 } 479 480 static void 481 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) 482 { 483 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 484 if (vf) { 485 /* the first igu entry belonging to VFs of this PF */ 486 if (!BP_VFDB(bp)->first_vf_igu_entry) 487 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; 488 489 /* the first igu entry belonging to this VF */ 490 if (!vf_sb_count(vf)) 491 vf->igu_base_id = igu_sb_id; 492 493 ++vf_sb_count(vf); 494 ++vf->sb_count; 495 } 496 BP_VFDB(bp)->vf_sbs_pool++; 497 } 498 499 /* VFOP MAC/VLAN helpers */ 500 static inline void bnx2x_vfop_credit(struct bnx2x *bp, 501 struct bnx2x_vfop *vfop, 502 struct bnx2x_vlan_mac_obj *obj) 503 { 504 struct bnx2x_vfop_args_filters *args = &vfop->args.filters; 505 506 /* update credit only if there is no error 507 * and a valid credit counter 508 */ 509 if (!vfop->rc && args->credit) { 510 struct list_head *pos; 511 int read_lock; 512 int cnt = 0; 513 514 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); 515 if (read_lock) 516 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); 517 518 list_for_each(pos, &obj->head) 519 cnt++; 520 521 if (!read_lock) 522 bnx2x_vlan_mac_h_read_unlock(bp, obj); 523 524 atomic_set(args->credit, cnt); 525 } 526 } 527 528 static int bnx2x_vfop_set_user_req(struct bnx2x *bp, 529 struct bnx2x_vfop_filter *pos, 530 struct bnx2x_vlan_mac_data *user_req) 531 { 532 user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD : 533 BNX2X_VLAN_MAC_DEL; 534 535 switch (pos->type) { 536 case BNX2X_VFOP_FILTER_MAC: 537 memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN); 538 break; 539 case BNX2X_VFOP_FILTER_VLAN: 540 user_req->u.vlan.vlan = pos->vid; 541 break; 542 default: 543 BNX2X_ERR("Invalid filter type, skipping\n"); 544 return 1; 545 } 546 return 0; 547 } 548 549 static int bnx2x_vfop_config_list(struct bnx2x *bp, 550 struct bnx2x_vfop_filters *filters, 551 struct bnx2x_vlan_mac_ramrod_params *vlan_mac) 552 { 553 struct bnx2x_vfop_filter *pos, *tmp; 554 struct list_head rollback_list, *filters_list = &filters->head; 555 struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req; 556 int rc = 0, cnt = 0; 557 558 INIT_LIST_HEAD(&rollback_list); 559 560 list_for_each_entry_safe(pos, tmp, filters_list, link) { 561 if (bnx2x_vfop_set_user_req(bp, pos, user_req)) 562 continue; 563 564 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 565 if (rc >= 0) { 566 cnt += pos->add ? 1 : -1; 567 list_move(&pos->link, &rollback_list); 568 rc = 0; 569 } else if (rc == -EEXIST) { 570 rc = 0; 571 } else { 572 BNX2X_ERR("Failed to add a new vlan_mac command\n"); 573 break; 574 } 575 } 576 577 /* rollback if error or too many rules added */ 578 if (rc || cnt > filters->add_cnt) { 579 BNX2X_ERR("error or too many rules added. Performing rollback\n"); 580 list_for_each_entry_safe(pos, tmp, &rollback_list, link) { 581 pos->add = !pos->add; /* reverse op */ 582 bnx2x_vfop_set_user_req(bp, pos, user_req); 583 bnx2x_config_vlan_mac(bp, vlan_mac); 584 list_del(&pos->link); 585 } 586 cnt = 0; 587 if (!rc) 588 rc = -EINVAL; 589 } 590 filters->add_cnt = cnt; 591 return rc; 592 } 593 594 /* VFOP set VLAN/MAC */ 595 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) 596 { 597 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 598 struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac; 599 struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj; 600 struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter; 601 602 enum bnx2x_vfop_vlan_mac_state state = vfop->state; 603 604 if (vfop->rc < 0) 605 goto op_err; 606 607 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 608 609 bnx2x_vfop_reset_wq(vf); 610 611 switch (state) { 612 case BNX2X_VFOP_VLAN_MAC_CLEAR: 613 /* next state */ 614 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 615 616 /* do delete */ 617 vfop->rc = obj->delete_all(bp, obj, 618 &vlan_mac->user_req.vlan_mac_flags, 619 &vlan_mac->ramrod_flags); 620 621 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 622 623 case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE: 624 /* next state */ 625 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 626 627 /* do config */ 628 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 629 if (vfop->rc == -EEXIST) 630 vfop->rc = 0; 631 632 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 633 634 case BNX2X_VFOP_VLAN_MAC_CHK_DONE: 635 vfop->rc = !!obj->raw.check_pending(&obj->raw); 636 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 637 638 case BNX2X_VFOP_MAC_CONFIG_LIST: 639 /* next state */ 640 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 641 642 /* do list config */ 643 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 644 if (vfop->rc) 645 goto op_err; 646 647 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 648 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 649 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 650 651 case BNX2X_VFOP_VLAN_CONFIG_LIST: 652 /* next state */ 653 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 654 655 /* do list config */ 656 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 657 if (!vfop->rc) { 658 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 659 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 660 } 661 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 662 663 default: 664 bnx2x_vfop_default(state); 665 } 666 op_err: 667 BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc); 668 op_done: 669 kfree(filters); 670 bnx2x_vfop_credit(bp, vfop, obj); 671 bnx2x_vfop_end(bp, vf, vfop); 672 op_pending: 673 return; 674 } 675 676 struct bnx2x_vfop_vlan_mac_flags { 677 bool drv_only; 678 bool dont_consume; 679 bool single_cmd; 680 bool add; 681 }; 682 683 static void 684 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 685 struct bnx2x_vfop_vlan_mac_flags *flags) 686 { 687 struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req; 688 689 memset(ramrod, 0, sizeof(*ramrod)); 690 691 /* ramrod flags */ 692 if (flags->drv_only) 693 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags); 694 if (flags->single_cmd) 695 set_bit(RAMROD_EXEC, &ramrod->ramrod_flags); 696 697 /* mac_vlan flags */ 698 if (flags->dont_consume) 699 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags); 700 701 /* cmd */ 702 ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL; 703 } 704 705 static inline void 706 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 707 struct bnx2x_vfop_vlan_mac_flags *flags) 708 { 709 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags); 710 set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags); 711 } 712 713 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, 714 struct bnx2x_virtf *vf, 715 struct bnx2x_vfop_cmd *cmd, 716 int qid, bool drv_only) 717 { 718 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 719 int rc; 720 721 if (vfop) { 722 struct bnx2x_vfop_args_filters filters = { 723 .multi_filter = NULL, /* single */ 724 .credit = NULL, /* consume credit */ 725 }; 726 struct bnx2x_vfop_vlan_mac_flags flags = { 727 .drv_only = drv_only, 728 .dont_consume = (filters.credit != NULL), 729 .single_cmd = true, 730 .add = false /* don't care */, 731 }; 732 struct bnx2x_vlan_mac_ramrod_params *ramrod = 733 &vf->op_params.vlan_mac; 734 735 /* set ramrod params */ 736 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 737 738 /* set object */ 739 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); 740 if (rc) 741 return rc; 742 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 743 744 /* set extra args */ 745 vfop->args.filters = filters; 746 747 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 748 bnx2x_vfop_vlan_mac, cmd->done); 749 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 750 cmd->block); 751 } 752 return -ENOMEM; 753 } 754 755 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, 756 struct bnx2x_virtf *vf, 757 struct bnx2x_vfop_cmd *cmd, 758 struct bnx2x_vfop_filters *macs, 759 int qid, bool drv_only) 760 { 761 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 762 int rc; 763 764 if (vfop) { 765 struct bnx2x_vfop_args_filters filters = { 766 .multi_filter = macs, 767 .credit = NULL, /* consume credit */ 768 }; 769 struct bnx2x_vfop_vlan_mac_flags flags = { 770 .drv_only = drv_only, 771 .dont_consume = (filters.credit != NULL), 772 .single_cmd = false, 773 .add = false, /* don't care since only the items in the 774 * filters list affect the sp operation, 775 * not the list itself 776 */ 777 }; 778 struct bnx2x_vlan_mac_ramrod_params *ramrod = 779 &vf->op_params.vlan_mac; 780 781 /* set ramrod params */ 782 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 783 784 /* set object */ 785 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); 786 if (rc) 787 return rc; 788 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 789 790 /* set extra args */ 791 filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX; 792 vfop->args.filters = filters; 793 794 bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST, 795 bnx2x_vfop_vlan_mac, cmd->done); 796 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 797 cmd->block); 798 } 799 return -ENOMEM; 800 } 801 802 int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, 803 struct bnx2x_virtf *vf, 804 struct bnx2x_vfop_cmd *cmd, 805 int qid, u16 vid, bool add) 806 { 807 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 808 int rc; 809 810 if (vfop) { 811 struct bnx2x_vfop_args_filters filters = { 812 .multi_filter = NULL, /* single command */ 813 .credit = &bnx2x_vfq(vf, qid, vlan_count), 814 }; 815 struct bnx2x_vfop_vlan_mac_flags flags = { 816 .drv_only = false, 817 .dont_consume = (filters.credit != NULL), 818 .single_cmd = true, 819 .add = add, 820 }; 821 struct bnx2x_vlan_mac_ramrod_params *ramrod = 822 &vf->op_params.vlan_mac; 823 824 /* set ramrod params */ 825 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 826 ramrod->user_req.u.vlan.vlan = vid; 827 828 /* set object */ 829 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); 830 if (rc) 831 return rc; 832 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 833 834 /* set extra args */ 835 vfop->args.filters = filters; 836 837 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 838 bnx2x_vfop_vlan_mac, cmd->done); 839 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 840 cmd->block); 841 } 842 return -ENOMEM; 843 } 844 845 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, 846 struct bnx2x_virtf *vf, 847 struct bnx2x_vfop_cmd *cmd, 848 int qid, bool drv_only) 849 { 850 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 851 int rc; 852 853 if (vfop) { 854 struct bnx2x_vfop_args_filters filters = { 855 .multi_filter = NULL, /* single command */ 856 .credit = &bnx2x_vfq(vf, qid, vlan_count), 857 }; 858 struct bnx2x_vfop_vlan_mac_flags flags = { 859 .drv_only = drv_only, 860 .dont_consume = (filters.credit != NULL), 861 .single_cmd = true, 862 .add = false, /* don't care */ 863 }; 864 struct bnx2x_vlan_mac_ramrod_params *ramrod = 865 &vf->op_params.vlan_mac; 866 867 /* set ramrod params */ 868 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 869 870 /* set object */ 871 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); 872 if (rc) 873 return rc; 874 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 875 876 /* set extra args */ 877 vfop->args.filters = filters; 878 879 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 880 bnx2x_vfop_vlan_mac, cmd->done); 881 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 882 cmd->block); 883 } 884 return -ENOMEM; 885 } 886 887 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, 888 struct bnx2x_virtf *vf, 889 struct bnx2x_vfop_cmd *cmd, 890 struct bnx2x_vfop_filters *vlans, 891 int qid, bool drv_only) 892 { 893 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 894 int rc; 895 896 if (vfop) { 897 struct bnx2x_vfop_args_filters filters = { 898 .multi_filter = vlans, 899 .credit = &bnx2x_vfq(vf, qid, vlan_count), 900 }; 901 struct bnx2x_vfop_vlan_mac_flags flags = { 902 .drv_only = drv_only, 903 .dont_consume = (filters.credit != NULL), 904 .single_cmd = false, 905 .add = false, /* don't care */ 906 }; 907 struct bnx2x_vlan_mac_ramrod_params *ramrod = 908 &vf->op_params.vlan_mac; 909 910 /* set ramrod params */ 911 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 912 913 /* set object */ 914 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); 915 if (rc) 916 return rc; 917 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 918 919 /* set extra args */ 920 filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) - 921 atomic_read(filters.credit); 922 923 vfop->args.filters = filters; 924 925 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST, 926 bnx2x_vfop_vlan_mac, cmd->done); 927 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 928 cmd->block); 929 } 930 return -ENOMEM; 931 } 932 933 /* VFOP queue setup (queue constructor + set vlan 0) */ 934 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf) 935 { 936 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 937 int qid = vfop->args.qctor.qid; 938 enum bnx2x_vfop_qsetup_state state = vfop->state; 939 struct bnx2x_vfop_cmd cmd = { 940 .done = bnx2x_vfop_qsetup, 941 .block = false, 942 }; 943 944 if (vfop->rc < 0) 945 goto op_err; 946 947 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 948 949 switch (state) { 950 case BNX2X_VFOP_QSETUP_CTOR: 951 /* init the queue ctor command */ 952 vfop->state = BNX2X_VFOP_QSETUP_VLAN0; 953 vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid); 954 if (vfop->rc) 955 goto op_err; 956 return; 957 958 case BNX2X_VFOP_QSETUP_VLAN0: 959 /* skip if non-leading or FPGA/EMU*/ 960 if (qid) 961 goto op_done; 962 963 /* init the queue set-vlan command (for vlan 0) */ 964 vfop->state = BNX2X_VFOP_QSETUP_DONE; 965 vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true); 966 if (vfop->rc) 967 goto op_err; 968 return; 969 op_err: 970 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); 971 op_done: 972 case BNX2X_VFOP_QSETUP_DONE: 973 vf->cfg_flags |= VF_CFG_VLAN; 974 smp_mb__before_clear_bit(); 975 set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, 976 &bp->sp_rtnl_state); 977 smp_mb__after_clear_bit(); 978 schedule_delayed_work(&bp->sp_rtnl_task, 0); 979 bnx2x_vfop_end(bp, vf, vfop); 980 return; 981 default: 982 bnx2x_vfop_default(state); 983 } 984 } 985 986 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, 987 struct bnx2x_virtf *vf, 988 struct bnx2x_vfop_cmd *cmd, 989 int qid) 990 { 991 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 992 993 if (vfop) { 994 vfop->args.qctor.qid = qid; 995 996 bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR, 997 bnx2x_vfop_qsetup, cmd->done); 998 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup, 999 cmd->block); 1000 } 1001 return -ENOMEM; 1002 } 1003 1004 /* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */ 1005 static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) 1006 { 1007 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1008 int qid = vfop->args.qx.qid; 1009 enum bnx2x_vfop_qflr_state state = vfop->state; 1010 struct bnx2x_queue_state_params *qstate; 1011 struct bnx2x_vfop_cmd cmd; 1012 1013 bnx2x_vfop_reset_wq(vf); 1014 1015 if (vfop->rc < 0) 1016 goto op_err; 1017 1018 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state); 1019 1020 cmd.done = bnx2x_vfop_qflr; 1021 cmd.block = false; 1022 1023 switch (state) { 1024 case BNX2X_VFOP_QFLR_CLR_VLAN: 1025 /* vlan-clear-all: driver-only, don't consume credit */ 1026 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; 1027 if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) 1028 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, 1029 true); 1030 if (vfop->rc) 1031 goto op_err; 1032 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 1033 1034 case BNX2X_VFOP_QFLR_CLR_MAC: 1035 /* mac-clear-all: driver only consume credit */ 1036 vfop->state = BNX2X_VFOP_QFLR_TERMINATE; 1037 if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) 1038 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, 1039 true); 1040 DP(BNX2X_MSG_IOV, 1041 "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d", 1042 vf->abs_vfid, vfop->rc); 1043 if (vfop->rc) 1044 goto op_err; 1045 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 1046 1047 case BNX2X_VFOP_QFLR_TERMINATE: 1048 qstate = &vfop->op_p->qctor.qstate; 1049 memset(qstate , 0, sizeof(*qstate)); 1050 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 1051 vfop->state = BNX2X_VFOP_QFLR_DONE; 1052 1053 DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n", 1054 vf->abs_vfid, qstate->q_obj->state); 1055 1056 if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) { 1057 qstate->q_obj->state = BNX2X_Q_STATE_STOPPED; 1058 qstate->cmd = BNX2X_Q_CMD_TERMINATE; 1059 vfop->rc = bnx2x_queue_state_change(bp, qstate); 1060 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND); 1061 } else { 1062 goto op_done; 1063 } 1064 1065 op_err: 1066 BNX2X_ERR("QFLR[%d:%d] error: rc %d\n", 1067 vf->abs_vfid, qid, vfop->rc); 1068 op_done: 1069 case BNX2X_VFOP_QFLR_DONE: 1070 bnx2x_vfop_end(bp, vf, vfop); 1071 return; 1072 default: 1073 bnx2x_vfop_default(state); 1074 } 1075 op_pending: 1076 return; 1077 } 1078 1079 static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp, 1080 struct bnx2x_virtf *vf, 1081 struct bnx2x_vfop_cmd *cmd, 1082 int qid) 1083 { 1084 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1085 1086 if (vfop) { 1087 vfop->args.qx.qid = qid; 1088 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN, 1089 bnx2x_vfop_qflr, cmd->done); 1090 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr, 1091 cmd->block); 1092 } 1093 return -ENOMEM; 1094 } 1095 1096 /* VFOP multi-casts */ 1097 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf) 1098 { 1099 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1100 struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast; 1101 struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw; 1102 struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list; 1103 enum bnx2x_vfop_mcast_state state = vfop->state; 1104 int i; 1105 1106 bnx2x_vfop_reset_wq(vf); 1107 1108 if (vfop->rc < 0) 1109 goto op_err; 1110 1111 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1112 1113 switch (state) { 1114 case BNX2X_VFOP_MCAST_DEL: 1115 /* clear existing mcasts */ 1116 vfop->state = (args->mc_num) ? BNX2X_VFOP_MCAST_ADD 1117 : BNX2X_VFOP_MCAST_CHK_DONE; 1118 mcast->mcast_list_len = vf->mcast_list_len; 1119 vf->mcast_list_len = args->mc_num; 1120 vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL); 1121 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 1122 1123 case BNX2X_VFOP_MCAST_ADD: 1124 if (raw->check_pending(raw)) 1125 goto op_pending; 1126 1127 /* update mcast list on the ramrod params */ 1128 INIT_LIST_HEAD(&mcast->mcast_list); 1129 for (i = 0; i < args->mc_num; i++) 1130 list_add_tail(&(args->mc[i].link), 1131 &mcast->mcast_list); 1132 mcast->mcast_list_len = args->mc_num; 1133 1134 /* add new mcasts */ 1135 vfop->state = BNX2X_VFOP_MCAST_CHK_DONE; 1136 vfop->rc = bnx2x_config_mcast(bp, mcast, 1137 BNX2X_MCAST_CMD_ADD); 1138 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1139 1140 case BNX2X_VFOP_MCAST_CHK_DONE: 1141 vfop->rc = raw->check_pending(raw) ? 1 : 0; 1142 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1143 default: 1144 bnx2x_vfop_default(state); 1145 } 1146 op_err: 1147 BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc); 1148 op_done: 1149 kfree(args->mc); 1150 bnx2x_vfop_end(bp, vf, vfop); 1151 op_pending: 1152 return; 1153 } 1154 1155 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, 1156 struct bnx2x_virtf *vf, 1157 struct bnx2x_vfop_cmd *cmd, 1158 bnx2x_mac_addr_t *mcasts, 1159 int mcast_num, bool drv_only) 1160 { 1161 struct bnx2x_vfop *vfop = NULL; 1162 size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem); 1163 struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) : 1164 NULL; 1165 1166 if (!mc_sz || mc) { 1167 vfop = bnx2x_vfop_add(bp, vf); 1168 if (vfop) { 1169 int i; 1170 struct bnx2x_mcast_ramrod_params *ramrod = 1171 &vf->op_params.mcast; 1172 1173 /* set ramrod params */ 1174 memset(ramrod, 0, sizeof(*ramrod)); 1175 ramrod->mcast_obj = &vf->mcast_obj; 1176 if (drv_only) 1177 set_bit(RAMROD_DRV_CLR_ONLY, 1178 &ramrod->ramrod_flags); 1179 1180 /* copy mcasts pointers */ 1181 vfop->args.mc_list.mc_num = mcast_num; 1182 vfop->args.mc_list.mc = mc; 1183 for (i = 0; i < mcast_num; i++) 1184 mc[i].mac = mcasts[i]; 1185 1186 bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL, 1187 bnx2x_vfop_mcast, cmd->done); 1188 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast, 1189 cmd->block); 1190 } else { 1191 kfree(mc); 1192 } 1193 } 1194 return -ENOMEM; 1195 } 1196 1197 /* VFOP rx-mode */ 1198 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) 1199 { 1200 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1201 struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode; 1202 enum bnx2x_vfop_rxmode_state state = vfop->state; 1203 1204 bnx2x_vfop_reset_wq(vf); 1205 1206 if (vfop->rc < 0) 1207 goto op_err; 1208 1209 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1210 1211 switch (state) { 1212 case BNX2X_VFOP_RXMODE_CONFIG: 1213 /* next state */ 1214 vfop->state = BNX2X_VFOP_RXMODE_DONE; 1215 1216 vfop->rc = bnx2x_config_rx_mode(bp, ramrod); 1217 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1218 op_err: 1219 BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc); 1220 op_done: 1221 case BNX2X_VFOP_RXMODE_DONE: 1222 bnx2x_vfop_end(bp, vf, vfop); 1223 return; 1224 default: 1225 bnx2x_vfop_default(state); 1226 } 1227 op_pending: 1228 return; 1229 } 1230 1231 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, 1232 struct bnx2x_virtf *vf, 1233 struct bnx2x_vfop_cmd *cmd, 1234 int qid, unsigned long accept_flags) 1235 { 1236 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); 1237 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1238 1239 if (vfop) { 1240 struct bnx2x_rx_mode_ramrod_params *ramrod = 1241 &vf->op_params.rx_mode; 1242 1243 memset(ramrod, 0, sizeof(*ramrod)); 1244 1245 /* Prepare ramrod parameters */ 1246 ramrod->cid = vfq->cid; 1247 ramrod->cl_id = vfq_cl_id(vf, vfq); 1248 ramrod->rx_mode_obj = &bp->rx_mode_obj; 1249 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); 1250 1251 ramrod->rx_accept_flags = accept_flags; 1252 ramrod->tx_accept_flags = accept_flags; 1253 ramrod->pstate = &vf->filter_state; 1254 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; 1255 1256 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 1257 set_bit(RAMROD_RX, &ramrod->ramrod_flags); 1258 set_bit(RAMROD_TX, &ramrod->ramrod_flags); 1259 1260 ramrod->rdata = 1261 bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); 1262 ramrod->rdata_mapping = 1263 bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); 1264 1265 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, 1266 bnx2x_vfop_rxmode, cmd->done); 1267 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode, 1268 cmd->block); 1269 } 1270 return -ENOMEM; 1271 } 1272 1273 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs, 1274 * queue destructor) 1275 */ 1276 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf) 1277 { 1278 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1279 int qid = vfop->args.qx.qid; 1280 enum bnx2x_vfop_qteardown_state state = vfop->state; 1281 struct bnx2x_vfop_cmd cmd; 1282 1283 if (vfop->rc < 0) 1284 goto op_err; 1285 1286 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1287 1288 cmd.done = bnx2x_vfop_qdown; 1289 cmd.block = false; 1290 1291 switch (state) { 1292 case BNX2X_VFOP_QTEARDOWN_RXMODE: 1293 /* Drop all */ 1294 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN; 1295 vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0); 1296 if (vfop->rc) 1297 goto op_err; 1298 return; 1299 1300 case BNX2X_VFOP_QTEARDOWN_CLR_VLAN: 1301 /* vlan-clear-all: don't consume credit */ 1302 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC; 1303 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false); 1304 if (vfop->rc) 1305 goto op_err; 1306 return; 1307 1308 case BNX2X_VFOP_QTEARDOWN_CLR_MAC: 1309 /* mac-clear-all: consume credit */ 1310 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MCAST; 1311 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false); 1312 if (vfop->rc) 1313 goto op_err; 1314 return; 1315 1316 case BNX2X_VFOP_QTEARDOWN_CLR_MCAST: 1317 vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR; 1318 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false); 1319 if (vfop->rc) 1320 goto op_err; 1321 return; 1322 1323 case BNX2X_VFOP_QTEARDOWN_QDTOR: 1324 /* run the queue destruction flow */ 1325 DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n"); 1326 vfop->state = BNX2X_VFOP_QTEARDOWN_DONE; 1327 DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n"); 1328 vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid); 1329 DP(BNX2X_MSG_IOV, "returned from cmd\n"); 1330 if (vfop->rc) 1331 goto op_err; 1332 return; 1333 op_err: 1334 BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n", 1335 vf->abs_vfid, qid, vfop->rc); 1336 1337 case BNX2X_VFOP_QTEARDOWN_DONE: 1338 bnx2x_vfop_end(bp, vf, vfop); 1339 return; 1340 default: 1341 bnx2x_vfop_default(state); 1342 } 1343 } 1344 1345 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, 1346 struct bnx2x_virtf *vf, 1347 struct bnx2x_vfop_cmd *cmd, 1348 int qid) 1349 { 1350 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1351 1352 /* for non leading queues skip directly to qdown sate */ 1353 if (vfop) { 1354 vfop->args.qx.qid = qid; 1355 bnx2x_vfop_opset(qid == LEADING_IDX ? 1356 BNX2X_VFOP_QTEARDOWN_RXMODE : 1357 BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown, 1358 cmd->done); 1359 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, 1360 cmd->block); 1361 } 1362 1363 return -ENOMEM; 1364 } 1365 1366 /* VF enable primitives 1367 * when pretend is required the caller is responsible 1368 * for calling pretend prior to calling these routines 1369 */ 1370 1371 /* internal vf enable - until vf is enabled internally all transactions 1372 * are blocked. This routine should always be called last with pretend. 1373 */ 1374 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) 1375 { 1376 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); 1377 } 1378 1379 /* clears vf error in all semi blocks */ 1380 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) 1381 { 1382 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); 1383 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); 1384 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); 1385 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); 1386 } 1387 1388 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) 1389 { 1390 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; 1391 u32 was_err_reg = 0; 1392 1393 switch (was_err_group) { 1394 case 0: 1395 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; 1396 break; 1397 case 1: 1398 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; 1399 break; 1400 case 2: 1401 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; 1402 break; 1403 case 3: 1404 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; 1405 break; 1406 } 1407 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); 1408 } 1409 1410 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) 1411 { 1412 int i; 1413 u32 val; 1414 1415 /* Set VF masks and configuration - pretend */ 1416 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1417 1418 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 1419 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 1420 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 1421 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 1422 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 1423 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 1424 1425 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 1426 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); 1427 if (vf->cfg_flags & VF_CFG_INT_SIMD) 1428 val |= IGU_VF_CONF_SINGLE_ISR_EN; 1429 val &= ~IGU_VF_CONF_PARENT_MASK; 1430 val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */ 1431 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 1432 1433 DP(BNX2X_MSG_IOV, 1434 "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n", 1435 vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION)); 1436 1437 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1438 1439 /* iterate over all queues, clear sb consumer */ 1440 for (i = 0; i < vf_sb_count(vf); i++) { 1441 u8 igu_sb_id = vf_igu_sb(vf, i); 1442 1443 /* zero prod memory */ 1444 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); 1445 1446 /* clear sb state machine */ 1447 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, 1448 false /* VF */); 1449 1450 /* disable + update */ 1451 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, 1452 IGU_INT_DISABLE, 1); 1453 } 1454 } 1455 1456 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) 1457 { 1458 /* set the VF-PF association in the FW */ 1459 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); 1460 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); 1461 1462 /* clear vf errors*/ 1463 bnx2x_vf_semi_clear_err(bp, abs_vfid); 1464 bnx2x_vf_pglue_clear_err(bp, abs_vfid); 1465 1466 /* internal vf-enable - pretend */ 1467 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); 1468 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); 1469 bnx2x_vf_enable_internal(bp, true); 1470 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1471 } 1472 1473 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) 1474 { 1475 /* Reset vf in IGU interrupts are still disabled */ 1476 bnx2x_vf_igu_reset(bp, vf); 1477 1478 /* pretend to enable the vf with the PBF */ 1479 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1480 REG_WR(bp, PBF_REG_DISABLE_VF, 0); 1481 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1482 } 1483 1484 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) 1485 { 1486 struct pci_dev *dev; 1487 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 1488 1489 if (!vf) 1490 return false; 1491 1492 dev = pci_get_bus_and_slot(vf->bus, vf->devfn); 1493 if (dev) 1494 return bnx2x_is_pcie_pending(dev); 1495 return false; 1496 } 1497 1498 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) 1499 { 1500 /* Verify no pending pci transactions */ 1501 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) 1502 BNX2X_ERR("PCIE Transactions still pending\n"); 1503 1504 return 0; 1505 } 1506 1507 /* must be called after the number of PF queues and the number of VFs are 1508 * both known 1509 */ 1510 static void 1511 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 1512 { 1513 struct vf_pf_resc_request *resc = &vf->alloc_resc; 1514 u16 vlan_count = 0; 1515 1516 /* will be set only during VF-ACQUIRE */ 1517 resc->num_rxqs = 0; 1518 resc->num_txqs = 0; 1519 1520 /* no credit calculations for macs (just yet) */ 1521 resc->num_mac_filters = 1; 1522 1523 /* divvy up vlan rules */ 1524 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 1525 vlan_count = 1 << ilog2(vlan_count); 1526 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); 1527 1528 /* no real limitation */ 1529 resc->num_mc_filters = 0; 1530 1531 /* num_sbs already set */ 1532 resc->num_sbs = vf->sb_count; 1533 } 1534 1535 /* FLR routines: */ 1536 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 1537 { 1538 /* reset the state variables */ 1539 bnx2x_iov_static_resc(bp, vf); 1540 vf->state = VF_FREE; 1541 } 1542 1543 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) 1544 { 1545 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 1546 1547 /* DQ usage counter */ 1548 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1549 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, 1550 "DQ VF usage counter timed out", 1551 poll_cnt); 1552 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1553 1554 /* FW cleanup command - poll for the results */ 1555 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), 1556 poll_cnt)) 1557 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid); 1558 1559 /* verify TX hw is flushed */ 1560 bnx2x_tx_hw_flushed(bp, poll_cnt); 1561 } 1562 1563 static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) 1564 { 1565 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1566 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; 1567 enum bnx2x_vfop_flr_state state = vfop->state; 1568 struct bnx2x_vfop_cmd cmd = { 1569 .done = bnx2x_vfop_flr, 1570 .block = false, 1571 }; 1572 1573 if (vfop->rc < 0) 1574 goto op_err; 1575 1576 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1577 1578 switch (state) { 1579 case BNX2X_VFOP_FLR_QUEUES: 1580 /* the cleanup operations are valid if and only if the VF 1581 * was first acquired. 1582 */ 1583 if (++(qx->qid) < vf_rxq_count(vf)) { 1584 vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd, 1585 qx->qid); 1586 if (vfop->rc) 1587 goto op_err; 1588 return; 1589 } 1590 /* remove multicasts */ 1591 vfop->state = BNX2X_VFOP_FLR_HW; 1592 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 1593 0, true); 1594 if (vfop->rc) 1595 goto op_err; 1596 return; 1597 case BNX2X_VFOP_FLR_HW: 1598 1599 /* dispatch final cleanup and wait for HW queues to flush */ 1600 bnx2x_vf_flr_clnup_hw(bp, vf); 1601 1602 /* release VF resources */ 1603 bnx2x_vf_free_resc(bp, vf); 1604 1605 /* re-open the mailbox */ 1606 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 1607 1608 goto op_done; 1609 default: 1610 bnx2x_vfop_default(state); 1611 } 1612 op_err: 1613 BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc); 1614 op_done: 1615 vf->flr_clnup_stage = VF_FLR_ACK; 1616 bnx2x_vfop_end(bp, vf, vfop); 1617 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 1618 } 1619 1620 static int bnx2x_vfop_flr_cmd(struct bnx2x *bp, 1621 struct bnx2x_virtf *vf, 1622 vfop_handler_t done) 1623 { 1624 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1625 if (vfop) { 1626 vfop->args.qx.qid = -1; /* loop */ 1627 bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES, 1628 bnx2x_vfop_flr, done); 1629 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false); 1630 } 1631 return -ENOMEM; 1632 } 1633 1634 static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf) 1635 { 1636 int i = prev_vf ? prev_vf->index + 1 : 0; 1637 struct bnx2x_virtf *vf; 1638 1639 /* find next VF to cleanup */ 1640 next_vf_to_clean: 1641 for (; 1642 i < BNX2X_NR_VIRTFN(bp) && 1643 (bnx2x_vf(bp, i, state) != VF_RESET || 1644 bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN); 1645 i++) 1646 ; 1647 1648 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i, 1649 BNX2X_NR_VIRTFN(bp)); 1650 1651 if (i < BNX2X_NR_VIRTFN(bp)) { 1652 vf = BP_VF(bp, i); 1653 1654 /* lock the vf pf channel */ 1655 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 1656 1657 /* invoke the VF FLR SM */ 1658 if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) { 1659 BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n", 1660 vf->abs_vfid); 1661 1662 /* mark the VF to be ACKED and continue */ 1663 vf->flr_clnup_stage = VF_FLR_ACK; 1664 goto next_vf_to_clean; 1665 } 1666 return; 1667 } 1668 1669 /* we are done, update vf records */ 1670 for_each_vf(bp, i) { 1671 vf = BP_VF(bp, i); 1672 1673 if (vf->flr_clnup_stage != VF_FLR_ACK) 1674 continue; 1675 1676 vf->flr_clnup_stage = VF_FLR_EPILOG; 1677 } 1678 1679 /* Acknowledge the handled VFs. 1680 * we are acknowledge all the vfs which an flr was requested for, even 1681 * if amongst them there are such that we never opened, since the mcp 1682 * will interrupt us immediately again if we only ack some of the bits, 1683 * resulting in an endless loop. This can happen for example in KVM 1684 * where an 'all ones' flr request is sometimes given by hyper visor 1685 */ 1686 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n", 1687 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1688 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1689 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 1690 bp->vfdb->flrd_vfs[i]); 1691 1692 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); 1693 1694 /* clear the acked bits - better yet if the MCP implemented 1695 * write to clear semantics 1696 */ 1697 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1698 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); 1699 } 1700 1701 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) 1702 { 1703 int i; 1704 1705 /* Read FLR'd VFs */ 1706 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1707 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); 1708 1709 DP(BNX2X_MSG_MCP, 1710 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n", 1711 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1712 1713 for_each_vf(bp, i) { 1714 struct bnx2x_virtf *vf = BP_VF(bp, i); 1715 u32 reset = 0; 1716 1717 if (vf->abs_vfid < 32) 1718 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); 1719 else 1720 reset = bp->vfdb->flrd_vfs[1] & 1721 (1 << (vf->abs_vfid - 32)); 1722 1723 if (reset) { 1724 /* set as reset and ready for cleanup */ 1725 vf->state = VF_RESET; 1726 vf->flr_clnup_stage = VF_FLR_CLN; 1727 1728 DP(BNX2X_MSG_IOV, 1729 "Initiating Final cleanup for VF %d\n", 1730 vf->abs_vfid); 1731 } 1732 } 1733 1734 /* do the FLR cleanup for all marked VFs*/ 1735 bnx2x_vf_flr_clnup(bp, NULL); 1736 } 1737 1738 /* IOV global initialization routines */ 1739 void bnx2x_iov_init_dq(struct bnx2x *bp) 1740 { 1741 if (!IS_SRIOV(bp)) 1742 return; 1743 1744 /* Set the DQ such that the CID reflect the abs_vfid */ 1745 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); 1746 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 1747 1748 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to 1749 * the PF L2 queues 1750 */ 1751 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 1752 1753 /* The VF window size is the log2 of the max number of CIDs per VF */ 1754 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 1755 1756 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 1757 * the Pf doorbell size although the 2 are independent. 1758 */ 1759 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); 1760 1761 /* No security checks for now - 1762 * configure single rule (out of 16) mask = 0x1, value = 0x0, 1763 * CID range 0 - 0x1ffff 1764 */ 1765 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); 1766 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); 1767 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1768 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1769 1770 /* set the VF doorbell threshold */ 1771 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 1772 } 1773 1774 void bnx2x_iov_init_dmae(struct bnx2x *bp) 1775 { 1776 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) 1777 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 1778 } 1779 1780 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1781 { 1782 struct pci_dev *dev = bp->pdev; 1783 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1784 1785 return dev->bus->number + ((dev->devfn + iov->offset + 1786 iov->stride * vfid) >> 8); 1787 } 1788 1789 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) 1790 { 1791 struct pci_dev *dev = bp->pdev; 1792 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1793 1794 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; 1795 } 1796 1797 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) 1798 { 1799 int i, n; 1800 struct pci_dev *dev = bp->pdev; 1801 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1802 1803 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { 1804 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); 1805 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); 1806 1807 size /= iov->total; 1808 vf->bars[n].bar = start + size * vf->abs_vfid; 1809 vf->bars[n].size = size; 1810 } 1811 } 1812 1813 static int bnx2x_ari_enabled(struct pci_dev *dev) 1814 { 1815 return dev->bus->self && dev->bus->self->ari_enabled; 1816 } 1817 1818 static void 1819 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) 1820 { 1821 int sb_id; 1822 u32 val; 1823 u8 fid, current_pf = 0; 1824 1825 /* IGU in normal mode - read CAM */ 1826 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 1827 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); 1828 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 1829 continue; 1830 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1831 if (fid & IGU_FID_ENCODE_IS_PF) 1832 current_pf = fid & IGU_FID_PF_NUM_MASK; 1833 else if (current_pf == BP_FUNC(bp)) 1834 bnx2x_vf_set_igu_info(bp, sb_id, 1835 (fid & IGU_FID_VF_NUM_MASK)); 1836 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1837 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 1838 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 1839 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 1840 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1841 } 1842 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); 1843 } 1844 1845 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1846 { 1847 if (bp->vfdb) { 1848 kfree(bp->vfdb->vfqs); 1849 kfree(bp->vfdb->vfs); 1850 kfree(bp->vfdb); 1851 } 1852 bp->vfdb = NULL; 1853 } 1854 1855 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1856 { 1857 int pos; 1858 struct pci_dev *dev = bp->pdev; 1859 1860 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 1861 if (!pos) { 1862 BNX2X_ERR("failed to find SRIOV capability in device\n"); 1863 return -ENODEV; 1864 } 1865 1866 iov->pos = pos; 1867 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); 1868 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 1869 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); 1870 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); 1871 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 1872 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 1873 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 1874 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 1875 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 1876 1877 return 0; 1878 } 1879 1880 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1881 { 1882 u32 val; 1883 1884 /* read the SRIOV capability structure 1885 * The fields can be read via configuration read or 1886 * directly from the device (starting at offset PCICFG_OFFSET) 1887 */ 1888 if (bnx2x_sriov_pci_cfg_info(bp, iov)) 1889 return -ENODEV; 1890 1891 /* get the number of SRIOV bars */ 1892 iov->nres = 0; 1893 1894 /* read the first_vfid */ 1895 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); 1896 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) 1897 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); 1898 1899 DP(BNX2X_MSG_IOV, 1900 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 1901 BP_FUNC(bp), 1902 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, 1903 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 1904 1905 return 0; 1906 } 1907 1908 /* must be called after PF bars are mapped */ 1909 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1910 int num_vfs_param) 1911 { 1912 int err, i; 1913 struct bnx2x_sriov *iov; 1914 struct pci_dev *dev = bp->pdev; 1915 1916 bp->vfdb = NULL; 1917 1918 /* verify is pf */ 1919 if (IS_VF(bp)) 1920 return 0; 1921 1922 /* verify sriov capability is present in configuration space */ 1923 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) 1924 return 0; 1925 1926 /* verify chip revision */ 1927 if (CHIP_IS_E1x(bp)) 1928 return 0; 1929 1930 /* check if SRIOV support is turned off */ 1931 if (!num_vfs_param) 1932 return 0; 1933 1934 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ 1935 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { 1936 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", 1937 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); 1938 return 0; 1939 } 1940 1941 /* SRIOV can be enabled only with MSIX */ 1942 if (int_mode_param == BNX2X_INT_MODE_MSI || 1943 int_mode_param == BNX2X_INT_MODE_INTX) { 1944 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); 1945 return 0; 1946 } 1947 1948 err = -EIO; 1949 /* verify ari is enabled */ 1950 if (!bnx2x_ari_enabled(bp->pdev)) { 1951 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n"); 1952 return 0; 1953 } 1954 1955 /* verify igu is in normal mode */ 1956 if (CHIP_INT_MODE_IS_BC(bp)) { 1957 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 1958 return 0; 1959 } 1960 1961 /* allocate the vfs database */ 1962 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); 1963 if (!bp->vfdb) { 1964 BNX2X_ERR("failed to allocate vf database\n"); 1965 err = -ENOMEM; 1966 goto failed; 1967 } 1968 1969 /* get the sriov info - Linux already collected all the pertinent 1970 * information, however the sriov structure is for the private use 1971 * of the pci module. Also we want this information regardless 1972 * of the hyper-visor. 1973 */ 1974 iov = &(bp->vfdb->sriov); 1975 err = bnx2x_sriov_info(bp, iov); 1976 if (err) 1977 goto failed; 1978 1979 /* SR-IOV capability was enabled but there are no VFs*/ 1980 if (iov->total == 0) 1981 goto failed; 1982 1983 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); 1984 1985 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n", 1986 num_vfs_param, iov->nr_virtfn); 1987 1988 /* allocate the vf array */ 1989 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 1990 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); 1991 if (!bp->vfdb->vfs) { 1992 BNX2X_ERR("failed to allocate vf array\n"); 1993 err = -ENOMEM; 1994 goto failed; 1995 } 1996 1997 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ 1998 for_each_vf(bp, i) { 1999 bnx2x_vf(bp, i, index) = i; 2000 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; 2001 bnx2x_vf(bp, i, state) = VF_FREE; 2002 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head)); 2003 mutex_init(&bnx2x_vf(bp, i, op_mutex)); 2004 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; 2005 } 2006 2007 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 2008 bnx2x_get_vf_igu_cam_info(bp); 2009 2010 /* allocate the queue arrays for all VFs */ 2011 bp->vfdb->vfqs = kzalloc( 2012 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), 2013 GFP_KERNEL); 2014 2015 DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs); 2016 2017 if (!bp->vfdb->vfqs) { 2018 BNX2X_ERR("failed to allocate vf queue array\n"); 2019 err = -ENOMEM; 2020 goto failed; 2021 } 2022 2023 return 0; 2024 failed: 2025 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 2026 __bnx2x_iov_free_vfdb(bp); 2027 return err; 2028 } 2029 2030 void bnx2x_iov_remove_one(struct bnx2x *bp) 2031 { 2032 int vf_idx; 2033 2034 /* if SRIOV is not enabled there's nothing to do */ 2035 if (!IS_SRIOV(bp)) 2036 return; 2037 2038 DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); 2039 pci_disable_sriov(bp->pdev); 2040 DP(BNX2X_MSG_IOV, "sriov disabled\n"); 2041 2042 /* disable access to all VFs */ 2043 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { 2044 bnx2x_pretend_func(bp, 2045 HW_VF_HANDLE(bp, 2046 bp->vfdb->sriov.first_vf_in_pf + 2047 vf_idx)); 2048 DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n", 2049 bp->vfdb->sriov.first_vf_in_pf + vf_idx); 2050 bnx2x_vf_enable_internal(bp, 0); 2051 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2052 } 2053 2054 /* free vf database */ 2055 __bnx2x_iov_free_vfdb(bp); 2056 } 2057 2058 void bnx2x_iov_free_mem(struct bnx2x *bp) 2059 { 2060 int i; 2061 2062 if (!IS_SRIOV(bp)) 2063 return; 2064 2065 /* free vfs hw contexts */ 2066 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2067 struct hw_dma *cxt = &bp->vfdb->context[i]; 2068 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); 2069 } 2070 2071 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, 2072 BP_VFDB(bp)->sp_dma.mapping, 2073 BP_VFDB(bp)->sp_dma.size); 2074 2075 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, 2076 BP_VF_MBX_DMA(bp)->mapping, 2077 BP_VF_MBX_DMA(bp)->size); 2078 2079 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, 2080 BP_VF_BULLETIN_DMA(bp)->mapping, 2081 BP_VF_BULLETIN_DMA(bp)->size); 2082 } 2083 2084 int bnx2x_iov_alloc_mem(struct bnx2x *bp) 2085 { 2086 size_t tot_size; 2087 int i, rc = 0; 2088 2089 if (!IS_SRIOV(bp)) 2090 return rc; 2091 2092 /* allocate vfs hw contexts */ 2093 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * 2094 BNX2X_CIDS_PER_VF * sizeof(union cdu_context); 2095 2096 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2097 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); 2098 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); 2099 2100 if (cxt->size) { 2101 BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size); 2102 } else { 2103 cxt->addr = NULL; 2104 cxt->mapping = 0; 2105 } 2106 tot_size -= cxt->size; 2107 } 2108 2109 /* allocate vfs ramrods dma memory - client_init and set_mac */ 2110 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); 2111 BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping, 2112 tot_size); 2113 BP_VFDB(bp)->sp_dma.size = tot_size; 2114 2115 /* allocate mailboxes */ 2116 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; 2117 BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping, 2118 tot_size); 2119 BP_VF_MBX_DMA(bp)->size = tot_size; 2120 2121 /* allocate local bulletin boards */ 2122 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; 2123 BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr, 2124 &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size); 2125 BP_VF_BULLETIN_DMA(bp)->size = tot_size; 2126 2127 return 0; 2128 2129 alloc_mem_err: 2130 return -ENOMEM; 2131 } 2132 2133 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 2134 struct bnx2x_vf_queue *q) 2135 { 2136 u8 cl_id = vfq_cl_id(vf, q); 2137 u8 func_id = FW_VF_HANDLE(vf->abs_vfid); 2138 unsigned long q_type = 0; 2139 2140 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 2141 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 2142 2143 /* Queue State object */ 2144 bnx2x_init_queue_obj(bp, &q->sp_obj, 2145 cl_id, &q->cid, 1, func_id, 2146 bnx2x_vf_sp(bp, vf, q_data), 2147 bnx2x_vf_sp_map(bp, vf, q_data), 2148 q_type); 2149 2150 DP(BNX2X_MSG_IOV, 2151 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", 2152 vf->abs_vfid, q->sp_obj.func_id, q->cid); 2153 } 2154 2155 /* called by bnx2x_nic_load */ 2156 int bnx2x_iov_nic_init(struct bnx2x *bp) 2157 { 2158 int vfid; 2159 2160 if (!IS_SRIOV(bp)) { 2161 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 2162 return 0; 2163 } 2164 2165 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); 2166 2167 /* let FLR complete ... */ 2168 msleep(100); 2169 2170 /* initialize vf database */ 2171 for_each_vf(bp, vfid) { 2172 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 2173 2174 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * 2175 BNX2X_CIDS_PER_VF; 2176 2177 union cdu_context *base_cxt = (union cdu_context *) 2178 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2179 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2180 2181 DP(BNX2X_MSG_IOV, 2182 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", 2183 vf->abs_vfid, vf_sb_count(vf), base_vf_cid, 2184 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 2185 2186 /* init statically provisioned resources */ 2187 bnx2x_iov_static_resc(bp, vf); 2188 2189 /* queues are initialized during VF-ACQUIRE */ 2190 2191 /* reserve the vf vlan credit */ 2192 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf)); 2193 2194 vf->filter_state = 0; 2195 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 2196 2197 /* init mcast object - This object will be re-initialized 2198 * during VF-ACQUIRE with the proper cl_id and cid. 2199 * It needs to be initialized here so that it can be safely 2200 * handled by a subsequent FLR flow. 2201 */ 2202 vf->mcast_list_len = 0; 2203 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, 2204 0xFF, 0xFF, 0xFF, 2205 bnx2x_vf_sp(bp, vf, mcast_rdata), 2206 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 2207 BNX2X_FILTER_MCAST_PENDING, 2208 &vf->filter_state, 2209 BNX2X_OBJ_TYPE_RX_TX); 2210 2211 /* set the mailbox message addresses */ 2212 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) 2213 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * 2214 MBX_MSG_ALIGNED_SIZE); 2215 2216 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + 2217 vfid * MBX_MSG_ALIGNED_SIZE; 2218 2219 /* Enable vf mailbox */ 2220 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 2221 } 2222 2223 /* Final VF init */ 2224 for_each_vf(bp, vfid) { 2225 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 2226 2227 /* fill in the BDF and bars */ 2228 vf->bus = bnx2x_vf_bus(bp, vfid); 2229 vf->devfn = bnx2x_vf_devfn(bp, vfid); 2230 bnx2x_vf_set_bars(bp, vf); 2231 2232 DP(BNX2X_MSG_IOV, 2233 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", 2234 vf->abs_vfid, vf->bus, vf->devfn, 2235 (unsigned)vf->bars[0].bar, vf->bars[0].size, 2236 (unsigned)vf->bars[1].bar, vf->bars[1].size, 2237 (unsigned)vf->bars[2].bar, vf->bars[2].size); 2238 } 2239 2240 return 0; 2241 } 2242 2243 /* called by bnx2x_chip_cleanup */ 2244 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) 2245 { 2246 int i; 2247 2248 if (!IS_SRIOV(bp)) 2249 return 0; 2250 2251 /* release all the VFs */ 2252 for_each_vf(bp, i) 2253 bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */ 2254 2255 return 0; 2256 } 2257 2258 /* called by bnx2x_init_hw_func, returns the next ilt line */ 2259 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) 2260 { 2261 int i; 2262 struct bnx2x_ilt *ilt = BP_ILT(bp); 2263 2264 if (!IS_SRIOV(bp)) 2265 return line; 2266 2267 /* set vfs ilt lines */ 2268 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2269 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); 2270 2271 ilt->lines[line+i].page = hw_cxt->addr; 2272 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 2273 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 2274 } 2275 return line + i; 2276 } 2277 2278 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) 2279 { 2280 return ((cid >= BNX2X_FIRST_VF_CID) && 2281 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); 2282 } 2283 2284 static 2285 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, 2286 struct bnx2x_vf_queue *vfq, 2287 union event_ring_elem *elem) 2288 { 2289 unsigned long ramrod_flags = 0; 2290 int rc = 0; 2291 2292 /* Always push next commands out, don't wait here */ 2293 set_bit(RAMROD_CONT, &ramrod_flags); 2294 2295 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 2296 case BNX2X_FILTER_MAC_PENDING: 2297 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, 2298 &ramrod_flags); 2299 break; 2300 case BNX2X_FILTER_VLAN_PENDING: 2301 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, 2302 &ramrod_flags); 2303 break; 2304 default: 2305 BNX2X_ERR("Unsupported classification command: %d\n", 2306 elem->message.data.eth_event.echo); 2307 return; 2308 } 2309 if (rc < 0) 2310 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 2311 else if (rc > 0) 2312 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); 2313 } 2314 2315 static 2316 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, 2317 struct bnx2x_virtf *vf) 2318 { 2319 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 2320 int rc; 2321 2322 rparam.mcast_obj = &vf->mcast_obj; 2323 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); 2324 2325 /* If there are pending mcast commands - send them */ 2326 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { 2327 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 2328 if (rc < 0) 2329 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 2330 rc); 2331 } 2332 } 2333 2334 static 2335 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, 2336 struct bnx2x_virtf *vf) 2337 { 2338 smp_mb__before_clear_bit(); 2339 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 2340 smp_mb__after_clear_bit(); 2341 } 2342 2343 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) 2344 { 2345 struct bnx2x_virtf *vf; 2346 int qidx = 0, abs_vfid; 2347 u8 opcode; 2348 u16 cid = 0xffff; 2349 2350 if (!IS_SRIOV(bp)) 2351 return 1; 2352 2353 /* first get the cid - the only events we handle here are cfc-delete 2354 * and set-mac completion 2355 */ 2356 opcode = elem->message.opcode; 2357 2358 switch (opcode) { 2359 case EVENT_RING_OPCODE_CFC_DEL: 2360 cid = SW_CID((__force __le32) 2361 elem->message.data.cfc_del_event.cid); 2362 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); 2363 break; 2364 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 2365 case EVENT_RING_OPCODE_MULTICAST_RULES: 2366 case EVENT_RING_OPCODE_FILTERS_RULES: 2367 cid = (elem->message.data.eth_event.echo & 2368 BNX2X_SWCID_MASK); 2369 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 2370 break; 2371 case EVENT_RING_OPCODE_VF_FLR: 2372 abs_vfid = elem->message.data.vf_flr_event.vf_id; 2373 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", 2374 abs_vfid); 2375 goto get_vf; 2376 case EVENT_RING_OPCODE_MALICIOUS_VF: 2377 abs_vfid = elem->message.data.malicious_vf_event.vf_id; 2378 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n", 2379 abs_vfid, elem->message.data.malicious_vf_event.err_id); 2380 goto get_vf; 2381 default: 2382 return 1; 2383 } 2384 2385 /* check if the cid is the VF range */ 2386 if (!bnx2x_iov_is_vf_cid(bp, cid)) { 2387 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); 2388 return 1; 2389 } 2390 2391 /* extract vf and rxq index from vf_cid - relies on the following: 2392 * 1. vfid on cid reflects the true abs_vfid 2393 * 2. The max number of VFs (per path) is 64 2394 */ 2395 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); 2396 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2397 get_vf: 2398 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 2399 2400 if (!vf) { 2401 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", 2402 cid, abs_vfid); 2403 return 0; 2404 } 2405 2406 switch (opcode) { 2407 case EVENT_RING_OPCODE_CFC_DEL: 2408 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", 2409 vf->abs_vfid, qidx); 2410 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, 2411 &vfq_get(vf, 2412 qidx)->sp_obj, 2413 BNX2X_Q_CMD_CFC_DEL); 2414 break; 2415 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 2416 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", 2417 vf->abs_vfid, qidx); 2418 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); 2419 break; 2420 case EVENT_RING_OPCODE_MULTICAST_RULES: 2421 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", 2422 vf->abs_vfid, qidx); 2423 bnx2x_vf_handle_mcast_eqe(bp, vf); 2424 break; 2425 case EVENT_RING_OPCODE_FILTERS_RULES: 2426 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", 2427 vf->abs_vfid, qidx); 2428 bnx2x_vf_handle_filters_eqe(bp, vf); 2429 break; 2430 case EVENT_RING_OPCODE_VF_FLR: 2431 DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n", 2432 vf->abs_vfid); 2433 /* Do nothing for now */ 2434 break; 2435 case EVENT_RING_OPCODE_MALICIOUS_VF: 2436 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n", 2437 abs_vfid, elem->message.data.malicious_vf_event.err_id); 2438 /* Do nothing for now */ 2439 break; 2440 } 2441 /* SRIOV: reschedule any 'in_progress' operations */ 2442 bnx2x_iov_sp_event(bp, cid, false); 2443 2444 return 0; 2445 } 2446 2447 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) 2448 { 2449 /* extract the vf from vf_cid - relies on the following: 2450 * 1. vfid on cid reflects the true abs_vfid 2451 * 2. The max number of VFs (per path) is 64 2452 */ 2453 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2454 return bnx2x_vf_by_abs_fid(bp, abs_vfid); 2455 } 2456 2457 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 2458 struct bnx2x_queue_sp_obj **q_obj) 2459 { 2460 struct bnx2x_virtf *vf; 2461 2462 if (!IS_SRIOV(bp)) 2463 return; 2464 2465 vf = bnx2x_vf_by_cid(bp, vf_cid); 2466 2467 if (vf) { 2468 /* extract queue index from vf_cid - relies on the following: 2469 * 1. vfid on cid reflects the true abs_vfid 2470 * 2. The max number of VFs (per path) is 64 2471 */ 2472 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); 2473 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); 2474 } else { 2475 BNX2X_ERR("No vf matching cid %d\n", vf_cid); 2476 } 2477 } 2478 2479 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work) 2480 { 2481 struct bnx2x_virtf *vf; 2482 2483 /* check if the cid is the VF range */ 2484 if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid)) 2485 return; 2486 2487 vf = bnx2x_vf_by_cid(bp, vf_cid); 2488 if (vf) { 2489 /* set in_progress flag */ 2490 atomic_set(&vf->op_in_progress, 1); 2491 if (queue_work) 2492 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 2493 } 2494 } 2495 2496 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) 2497 { 2498 int i; 2499 int first_queue_query_index, num_queues_req; 2500 dma_addr_t cur_data_offset; 2501 struct stats_query_entry *cur_query_entry; 2502 u8 stats_count = 0; 2503 bool is_fcoe = false; 2504 2505 if (!IS_SRIOV(bp)) 2506 return; 2507 2508 if (!NO_FCOE(bp)) 2509 is_fcoe = true; 2510 2511 /* fcoe adds one global request and one queue request */ 2512 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; 2513 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 2514 (is_fcoe ? 0 : 1); 2515 2516 DP(BNX2X_MSG_IOV, 2517 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", 2518 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, 2519 first_queue_query_index + num_queues_req); 2520 2521 cur_data_offset = bp->fw_stats_data_mapping + 2522 offsetof(struct bnx2x_fw_stats_data, queue_stats) + 2523 num_queues_req * sizeof(struct per_queue_stats); 2524 2525 cur_query_entry = &bp->fw_stats_req-> 2526 query[first_queue_query_index + num_queues_req]; 2527 2528 for_each_vf(bp, i) { 2529 int j; 2530 struct bnx2x_virtf *vf = BP_VF(bp, i); 2531 2532 if (vf->state != VF_ENABLED) { 2533 DP(BNX2X_MSG_IOV, 2534 "vf %d not enabled so no stats for it\n", 2535 vf->abs_vfid); 2536 continue; 2537 } 2538 2539 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); 2540 for_each_vfq(vf, j) { 2541 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 2542 2543 dma_addr_t q_stats_addr = 2544 vf->fw_stat_map + j * vf->stats_stride; 2545 2546 /* collect stats fro active queues only */ 2547 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 2548 BNX2X_Q_LOGICAL_STATE_STOPPED) 2549 continue; 2550 2551 /* create stats query entry for this queue */ 2552 cur_query_entry->kind = STATS_TYPE_QUEUE; 2553 cur_query_entry->index = vfq_stat_id(vf, rxq); 2554 cur_query_entry->funcID = 2555 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 2556 cur_query_entry->address.hi = 2557 cpu_to_le32(U64_HI(q_stats_addr)); 2558 cur_query_entry->address.lo = 2559 cpu_to_le32(U64_LO(q_stats_addr)); 2560 DP(BNX2X_MSG_IOV, 2561 "added address %x %x for vf %d queue %d client %d\n", 2562 cur_query_entry->address.hi, 2563 cur_query_entry->address.lo, cur_query_entry->funcID, 2564 j, cur_query_entry->index); 2565 cur_query_entry++; 2566 cur_data_offset += sizeof(struct per_queue_stats); 2567 stats_count++; 2568 2569 /* all stats are coalesced to the leading queue */ 2570 if (vf->cfg_flags & VF_CFG_STATS_COALESCE) 2571 break; 2572 } 2573 } 2574 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 2575 } 2576 2577 void bnx2x_iov_sp_task(struct bnx2x *bp) 2578 { 2579 int i; 2580 2581 if (!IS_SRIOV(bp)) 2582 return; 2583 /* Iterate over all VFs and invoke state transition for VFs with 2584 * 'in-progress' slow-path operations 2585 */ 2586 DP(BNX2X_MSG_IOV, "searching for pending vf operations\n"); 2587 for_each_vf(bp, i) { 2588 struct bnx2x_virtf *vf = BP_VF(bp, i); 2589 2590 if (!vf) { 2591 BNX2X_ERR("VF was null! skipping...\n"); 2592 continue; 2593 } 2594 2595 if (!list_empty(&vf->op_list_head) && 2596 atomic_read(&vf->op_in_progress)) { 2597 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); 2598 bnx2x_vfop_cur(bp, vf)->transition(bp, vf); 2599 } 2600 } 2601 } 2602 2603 static inline 2604 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) 2605 { 2606 int i; 2607 struct bnx2x_virtf *vf = NULL; 2608 2609 for_each_vf(bp, i) { 2610 vf = BP_VF(bp, i); 2611 if (stat_id >= vf->igu_base_id && 2612 stat_id < vf->igu_base_id + vf_sb_count(vf)) 2613 break; 2614 } 2615 return vf; 2616 } 2617 2618 /* VF API helpers */ 2619 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, 2620 u8 enable) 2621 { 2622 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; 2623 u32 val = enable ? (abs_vfid | (1 << 6)) : 0; 2624 2625 REG_WR(bp, reg, val); 2626 } 2627 2628 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) 2629 { 2630 int i; 2631 2632 for_each_vfq(vf, i) 2633 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2634 vfq_qzone_id(vf, vfq_get(vf, i)), false); 2635 } 2636 2637 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) 2638 { 2639 u32 val; 2640 2641 /* clear the VF configuration - pretend */ 2642 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 2643 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 2644 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | 2645 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK); 2646 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 2647 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2648 } 2649 2650 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) 2651 { 2652 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), 2653 BNX2X_VF_MAX_QUEUES); 2654 } 2655 2656 static 2657 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, 2658 struct vf_pf_resc_request *req_resc) 2659 { 2660 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2661 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2662 2663 return ((req_resc->num_rxqs <= rxq_cnt) && 2664 (req_resc->num_txqs <= txq_cnt) && 2665 (req_resc->num_sbs <= vf_sb_count(vf)) && 2666 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 2667 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); 2668 } 2669 2670 /* CORE VF API */ 2671 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 2672 struct vf_pf_resc_request *resc) 2673 { 2674 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * 2675 BNX2X_CIDS_PER_VF; 2676 2677 union cdu_context *base_cxt = (union cdu_context *) 2678 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2679 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2680 int i; 2681 2682 /* if state is 'acquired' the VF was not released or FLR'd, in 2683 * this case the returned resources match the acquired already 2684 * acquired resources. Verify that the requested numbers do 2685 * not exceed the already acquired numbers. 2686 */ 2687 if (vf->state == VF_ACQUIRED) { 2688 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", 2689 vf->abs_vfid); 2690 2691 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2692 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", 2693 vf->abs_vfid); 2694 return -EINVAL; 2695 } 2696 return 0; 2697 } 2698 2699 /* Otherwise vf state must be 'free' or 'reset' */ 2700 if (vf->state != VF_FREE && vf->state != VF_RESET) { 2701 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", 2702 vf->abs_vfid, vf->state); 2703 return -EINVAL; 2704 } 2705 2706 /* static allocation: 2707 * the global maximum number are fixed per VF. Fail the request if 2708 * requested number exceed these globals 2709 */ 2710 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2711 DP(BNX2X_MSG_IOV, 2712 "cannot fulfill vf resource request. Placing maximal available values in response\n"); 2713 /* set the max resource in the vf */ 2714 return -ENOMEM; 2715 } 2716 2717 /* Set resources counters - 0 request means max available */ 2718 vf_sb_count(vf) = resc->num_sbs; 2719 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2720 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2721 if (resc->num_mac_filters) 2722 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 2723 if (resc->num_vlan_filters) 2724 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; 2725 2726 DP(BNX2X_MSG_IOV, 2727 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2728 vf_sb_count(vf), vf_rxq_count(vf), 2729 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2730 vf_vlan_rules_cnt(vf)); 2731 2732 /* Initialize the queues */ 2733 if (!vf->vfqs) { 2734 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); 2735 return -EINVAL; 2736 } 2737 2738 for_each_vfq(vf, i) { 2739 struct bnx2x_vf_queue *q = vfq_get(vf, i); 2740 2741 if (!q) { 2742 BNX2X_ERR("q number %d was not allocated\n", i); 2743 return -EINVAL; 2744 } 2745 2746 q->index = i; 2747 q->cxt = &((base_cxt + i)->eth); 2748 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; 2749 2750 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", 2751 vf->abs_vfid, i, q->index, q->cid, q->cxt); 2752 2753 /* init SP objects */ 2754 bnx2x_vfq_init(bp, vf, q); 2755 } 2756 vf->state = VF_ACQUIRED; 2757 return 0; 2758 } 2759 2760 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) 2761 { 2762 struct bnx2x_func_init_params func_init = {0}; 2763 u16 flags = 0; 2764 int i; 2765 2766 /* the sb resources are initialized at this point, do the 2767 * FW/HW initializations 2768 */ 2769 for_each_vf_sb(vf, i) 2770 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, 2771 vf_igu_sb(vf, i), vf_igu_sb(vf, i)); 2772 2773 /* Sanity checks */ 2774 if (vf->state != VF_ACQUIRED) { 2775 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", 2776 vf->abs_vfid, vf->state); 2777 return -EINVAL; 2778 } 2779 2780 /* let FLR complete ... */ 2781 msleep(100); 2782 2783 /* FLR cleanup epilogue */ 2784 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) 2785 return -EBUSY; 2786 2787 /* reset IGU VF statistics: MSIX */ 2788 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); 2789 2790 /* vf init */ 2791 if (vf->cfg_flags & VF_CFG_STATS) 2792 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); 2793 2794 if (vf->cfg_flags & VF_CFG_TPA) 2795 flags |= FUNC_FLG_TPA; 2796 2797 if (is_vf_multi(vf)) 2798 flags |= FUNC_FLG_RSS; 2799 2800 /* function setup */ 2801 func_init.func_flgs = flags; 2802 func_init.pf_id = BP_FUNC(bp); 2803 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); 2804 func_init.fw_stat_map = vf->fw_stat_map; 2805 func_init.spq_map = vf->spq_map; 2806 func_init.spq_prod = 0; 2807 bnx2x_func_init(bp, &func_init); 2808 2809 /* Enable the vf */ 2810 bnx2x_vf_enable_access(bp, vf->abs_vfid); 2811 bnx2x_vf_enable_traffic(bp, vf); 2812 2813 /* queue protection table */ 2814 for_each_vfq(vf, i) 2815 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2816 vfq_qzone_id(vf, vfq_get(vf, i)), true); 2817 2818 vf->state = VF_ENABLED; 2819 2820 /* update vf bulletin board */ 2821 bnx2x_post_vf_bulletin(bp, vf->index); 2822 2823 return 0; 2824 } 2825 2826 struct set_vf_state_cookie { 2827 struct bnx2x_virtf *vf; 2828 u8 state; 2829 }; 2830 2831 static void bnx2x_set_vf_state(void *cookie) 2832 { 2833 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; 2834 2835 p->vf->state = p->state; 2836 } 2837 2838 /* VFOP close (teardown the queues, delete mcasts and close HW) */ 2839 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) 2840 { 2841 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2842 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; 2843 enum bnx2x_vfop_close_state state = vfop->state; 2844 struct bnx2x_vfop_cmd cmd = { 2845 .done = bnx2x_vfop_close, 2846 .block = false, 2847 }; 2848 2849 if (vfop->rc < 0) 2850 goto op_err; 2851 2852 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 2853 2854 switch (state) { 2855 case BNX2X_VFOP_CLOSE_QUEUES: 2856 2857 if (++(qx->qid) < vf_rxq_count(vf)) { 2858 vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid); 2859 if (vfop->rc) 2860 goto op_err; 2861 return; 2862 } 2863 vfop->state = BNX2X_VFOP_CLOSE_HW; 2864 vfop->rc = 0; 2865 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 2866 2867 case BNX2X_VFOP_CLOSE_HW: 2868 2869 /* disable the interrupts */ 2870 DP(BNX2X_MSG_IOV, "disabling igu\n"); 2871 bnx2x_vf_igu_disable(bp, vf); 2872 2873 /* disable the VF */ 2874 DP(BNX2X_MSG_IOV, "clearing qtbl\n"); 2875 bnx2x_vf_clr_qtbl(bp, vf); 2876 2877 goto op_done; 2878 default: 2879 bnx2x_vfop_default(state); 2880 } 2881 op_err: 2882 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); 2883 op_done: 2884 2885 /* need to make sure there are no outstanding stats ramrods which may 2886 * cause the device to access the VF's stats buffer which it will free 2887 * as soon as we return from the close flow. 2888 */ 2889 { 2890 struct set_vf_state_cookie cookie; 2891 2892 cookie.vf = vf; 2893 cookie.state = VF_ACQUIRED; 2894 bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); 2895 } 2896 2897 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2898 bnx2x_vfop_end(bp, vf, vfop); 2899 op_pending: 2900 /* Not supported at the moment; Exists for macros only */ 2901 return; 2902 } 2903 2904 int bnx2x_vfop_close_cmd(struct bnx2x *bp, 2905 struct bnx2x_virtf *vf, 2906 struct bnx2x_vfop_cmd *cmd) 2907 { 2908 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 2909 if (vfop) { 2910 vfop->args.qx.qid = -1; /* loop */ 2911 bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES, 2912 bnx2x_vfop_close, cmd->done); 2913 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close, 2914 cmd->block); 2915 } 2916 return -ENOMEM; 2917 } 2918 2919 /* VF release can be called either: 1. The VF was acquired but 2920 * not enabled 2. the vf was enabled or in the process of being 2921 * enabled 2922 */ 2923 static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) 2924 { 2925 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2926 struct bnx2x_vfop_cmd cmd = { 2927 .done = bnx2x_vfop_release, 2928 .block = false, 2929 }; 2930 2931 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2932 2933 if (vfop->rc < 0) 2934 goto op_err; 2935 2936 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, 2937 vf->state == VF_FREE ? "Free" : 2938 vf->state == VF_ACQUIRED ? "Acquired" : 2939 vf->state == VF_ENABLED ? "Enabled" : 2940 vf->state == VF_RESET ? "Reset" : 2941 "Unknown"); 2942 2943 switch (vf->state) { 2944 case VF_ENABLED: 2945 vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); 2946 if (vfop->rc) 2947 goto op_err; 2948 return; 2949 2950 case VF_ACQUIRED: 2951 DP(BNX2X_MSG_IOV, "about to free resources\n"); 2952 bnx2x_vf_free_resc(bp, vf); 2953 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2954 goto op_done; 2955 2956 case VF_FREE: 2957 case VF_RESET: 2958 /* do nothing */ 2959 goto op_done; 2960 default: 2961 bnx2x_vfop_default(vf->state); 2962 } 2963 op_err: 2964 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc); 2965 op_done: 2966 bnx2x_vfop_end(bp, vf, vfop); 2967 } 2968 2969 static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf) 2970 { 2971 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2972 enum bnx2x_vfop_rss_state state; 2973 2974 if (!vfop) { 2975 BNX2X_ERR("vfop was null\n"); 2976 return; 2977 } 2978 2979 state = vfop->state; 2980 bnx2x_vfop_reset_wq(vf); 2981 2982 if (vfop->rc < 0) 2983 goto op_err; 2984 2985 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 2986 2987 switch (state) { 2988 case BNX2X_VFOP_RSS_CONFIG: 2989 /* next state */ 2990 vfop->state = BNX2X_VFOP_RSS_DONE; 2991 bnx2x_config_rss(bp, &vfop->op_p->rss); 2992 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 2993 op_err: 2994 BNX2X_ERR("RSS error: rc %d\n", vfop->rc); 2995 op_done: 2996 case BNX2X_VFOP_RSS_DONE: 2997 bnx2x_vfop_end(bp, vf, vfop); 2998 return; 2999 default: 3000 bnx2x_vfop_default(state); 3001 } 3002 op_pending: 3003 return; 3004 } 3005 3006 int bnx2x_vfop_release_cmd(struct bnx2x *bp, 3007 struct bnx2x_virtf *vf, 3008 struct bnx2x_vfop_cmd *cmd) 3009 { 3010 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 3011 if (vfop) { 3012 bnx2x_vfop_opset(-1, /* use vf->state */ 3013 bnx2x_vfop_release, cmd->done); 3014 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release, 3015 cmd->block); 3016 } 3017 return -ENOMEM; 3018 } 3019 3020 int bnx2x_vfop_rss_cmd(struct bnx2x *bp, 3021 struct bnx2x_virtf *vf, 3022 struct bnx2x_vfop_cmd *cmd) 3023 { 3024 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 3025 3026 if (vfop) { 3027 bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss, 3028 cmd->done); 3029 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss, 3030 cmd->block); 3031 } 3032 return -ENOMEM; 3033 } 3034 3035 /* VF release ~ VF close + VF release-resources 3036 * Release is the ultimate SW shutdown and is called whenever an 3037 * irrecoverable error is encountered. 3038 */ 3039 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) 3040 { 3041 struct bnx2x_vfop_cmd cmd = { 3042 .done = NULL, 3043 .block = block, 3044 }; 3045 int rc; 3046 3047 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); 3048 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 3049 3050 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); 3051 if (rc) 3052 WARN(rc, 3053 "VF[%d] Failed to allocate resources for release op- rc=%d\n", 3054 vf->abs_vfid, rc); 3055 } 3056 3057 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, 3058 struct bnx2x_virtf *vf, u32 *sbdf) 3059 { 3060 *sbdf = vf->devfn | (vf->bus << 8); 3061 } 3062 3063 static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf, 3064 struct bnx2x_vf_bar_info *bar_info) 3065 { 3066 int n; 3067 3068 bar_info->nr_bars = bp->vfdb->sriov.nres; 3069 for (n = 0; n < bar_info->nr_bars; n++) 3070 bar_info->bars[n] = vf->bars[n]; 3071 } 3072 3073 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3074 enum channel_tlvs tlv) 3075 { 3076 /* we don't lock the channel for unsupported tlvs */ 3077 if (!bnx2x_tlv_supported(tlv)) { 3078 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n"); 3079 return; 3080 } 3081 3082 /* lock the channel */ 3083 mutex_lock(&vf->op_mutex); 3084 3085 /* record the locking op */ 3086 vf->op_current = tlv; 3087 3088 /* log the lock */ 3089 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", 3090 vf->abs_vfid, tlv); 3091 } 3092 3093 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3094 enum channel_tlvs expected_tlv) 3095 { 3096 enum channel_tlvs current_tlv; 3097 3098 if (!vf) { 3099 BNX2X_ERR("VF was %p\n", vf); 3100 return; 3101 } 3102 3103 current_tlv = vf->op_current; 3104 3105 /* we don't unlock the channel for unsupported tlvs */ 3106 if (!bnx2x_tlv_supported(expected_tlv)) 3107 return; 3108 3109 WARN(expected_tlv != vf->op_current, 3110 "lock mismatch: expected %d found %d", expected_tlv, 3111 vf->op_current); 3112 3113 /* record the locking op */ 3114 vf->op_current = CHANNEL_TLV_NONE; 3115 3116 /* lock the channel */ 3117 mutex_unlock(&vf->op_mutex); 3118 3119 /* log the unlock */ 3120 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 3121 vf->abs_vfid, vf->op_current); 3122 } 3123 3124 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) 3125 { 3126 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 3127 3128 if (!IS_SRIOV(bp)) { 3129 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n"); 3130 return -EINVAL; 3131 } 3132 3133 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 3134 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3135 3136 /* HW channel is only operational when PF is up */ 3137 if (bp->state != BNX2X_STATE_OPEN) { 3138 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n"); 3139 return -EINVAL; 3140 } 3141 3142 /* we are always bound by the total_vfs in the configuration space */ 3143 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { 3144 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n", 3145 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3146 num_vfs_param = BNX2X_NR_VIRTFN(bp); 3147 } 3148 3149 bp->requested_nr_virtfn = num_vfs_param; 3150 if (num_vfs_param == 0) { 3151 pci_disable_sriov(dev); 3152 return 0; 3153 } else { 3154 return bnx2x_enable_sriov(bp); 3155 } 3156 } 3157 #define IGU_ENTRY_SIZE 4 3158 3159 int bnx2x_enable_sriov(struct bnx2x *bp) 3160 { 3161 int rc = 0, req_vfs = bp->requested_nr_virtfn; 3162 int vf_idx, sb_idx, vfq_idx, qcount, first_vf; 3163 u32 igu_entry, address; 3164 u16 num_vf_queues; 3165 3166 if (req_vfs == 0) 3167 return 0; 3168 3169 first_vf = bp->vfdb->sriov.first_vf_in_pf; 3170 3171 /* statically distribute vf sb pool between VFs */ 3172 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES, 3173 BP_VFDB(bp)->vf_sbs_pool / req_vfs); 3174 3175 /* zero previous values learned from igu cam */ 3176 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) { 3177 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 3178 3179 vf->sb_count = 0; 3180 vf_sb_count(BP_VF(bp, vf_idx)) = 0; 3181 } 3182 bp->vfdb->vf_sbs_pool = 0; 3183 3184 /* prepare IGU cam */ 3185 sb_idx = BP_VFDB(bp)->first_vf_igu_entry; 3186 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE; 3187 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 3188 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) { 3189 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT | 3190 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT | 3191 IGU_REG_MAPPING_MEMORY_VALID; 3192 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n", 3193 sb_idx, vf_idx); 3194 REG_WR(bp, address, igu_entry); 3195 sb_idx++; 3196 address += IGU_ENTRY_SIZE; 3197 } 3198 } 3199 3200 /* Reinitialize vf database according to igu cam */ 3201 bnx2x_get_vf_igu_cam_info(bp); 3202 3203 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n", 3204 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); 3205 3206 qcount = 0; 3207 for_each_vf(bp, vf_idx) { 3208 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 3209 3210 /* set local queue arrays */ 3211 vf->vfqs = &bp->vfdb->vfqs[qcount]; 3212 qcount += vf_sb_count(vf); 3213 bnx2x_iov_static_resc(bp, vf); 3214 } 3215 3216 /* prepare msix vectors in VF configuration space */ 3217 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 3218 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 3219 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 3220 num_vf_queues); 3221 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", 3222 vf_idx, num_vf_queues); 3223 } 3224 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 3225 3226 /* enable sriov. This will probe all the VFs, and consequentially cause 3227 * the "acquire" messages to appear on the VF PF channel. 3228 */ 3229 DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); 3230 bnx2x_disable_sriov(bp); 3231 rc = pci_enable_sriov(bp->pdev, req_vfs); 3232 if (rc) { 3233 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 3234 return rc; 3235 } 3236 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); 3237 return req_vfs; 3238 } 3239 3240 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) 3241 { 3242 int vfidx; 3243 struct pf_vf_bulletin_content *bulletin; 3244 3245 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); 3246 for_each_vf(bp, vfidx) { 3247 bulletin = BP_VF_BULLETIN(bp, vfidx); 3248 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) 3249 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); 3250 } 3251 } 3252 3253 void bnx2x_disable_sriov(struct bnx2x *bp) 3254 { 3255 pci_disable_sriov(bp->pdev); 3256 } 3257 3258 static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, 3259 struct bnx2x_virtf **vf, 3260 struct pf_vf_bulletin_content **bulletin) 3261 { 3262 if (bp->state != BNX2X_STATE_OPEN) { 3263 BNX2X_ERR("vf ndo called though PF is down\n"); 3264 return -EINVAL; 3265 } 3266 3267 if (!IS_SRIOV(bp)) { 3268 BNX2X_ERR("vf ndo called though sriov is disabled\n"); 3269 return -EINVAL; 3270 } 3271 3272 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { 3273 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n", 3274 vfidx, BNX2X_NR_VIRTFN(bp)); 3275 return -EINVAL; 3276 } 3277 3278 /* init members */ 3279 *vf = BP_VF(bp, vfidx); 3280 *bulletin = BP_VF_BULLETIN(bp, vfidx); 3281 3282 if (!*vf) { 3283 BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n", 3284 vfidx); 3285 return -EINVAL; 3286 } 3287 3288 if (!(*vf)->vfqs) { 3289 BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n", 3290 vfidx); 3291 return -EINVAL; 3292 } 3293 3294 if (!*bulletin) { 3295 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n", 3296 vfidx); 3297 return -EINVAL; 3298 } 3299 3300 return 0; 3301 } 3302 3303 int bnx2x_get_vf_config(struct net_device *dev, int vfidx, 3304 struct ifla_vf_info *ivi) 3305 { 3306 struct bnx2x *bp = netdev_priv(dev); 3307 struct bnx2x_virtf *vf = NULL; 3308 struct pf_vf_bulletin_content *bulletin = NULL; 3309 struct bnx2x_vlan_mac_obj *mac_obj; 3310 struct bnx2x_vlan_mac_obj *vlan_obj; 3311 int rc; 3312 3313 /* sanity and init */ 3314 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3315 if (rc) 3316 return rc; 3317 mac_obj = &bnx2x_leading_vfq(vf, mac_obj); 3318 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 3319 if (!mac_obj || !vlan_obj) { 3320 BNX2X_ERR("VF partially initialized\n"); 3321 return -EINVAL; 3322 } 3323 3324 ivi->vf = vfidx; 3325 ivi->qos = 0; 3326 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */ 3327 ivi->spoofchk = 1; /*always enabled */ 3328 if (vf->state == VF_ENABLED) { 3329 /* mac and vlan are in vlan_mac objects */ 3330 if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj))) 3331 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 3332 0, ETH_ALEN); 3333 if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj))) 3334 vlan_obj->get_n_elements(bp, vlan_obj, 1, 3335 (u8 *)&ivi->vlan, 0, 3336 VLAN_HLEN); 3337 } else { 3338 /* mac */ 3339 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 3340 /* mac configured by ndo so its in bulletin board */ 3341 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); 3342 else 3343 /* function has not been loaded yet. Show mac as 0s */ 3344 memset(&ivi->mac, 0, ETH_ALEN); 3345 3346 /* vlan */ 3347 if (bulletin->valid_bitmap & (1 << VLAN_VALID)) 3348 /* vlan configured by ndo so its in bulletin board */ 3349 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); 3350 else 3351 /* function has not been loaded yet. Show vlans as 0s */ 3352 memset(&ivi->vlan, 0, VLAN_HLEN); 3353 } 3354 3355 return 0; 3356 } 3357 3358 /* New mac for VF. Consider these cases: 3359 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and 3360 * supply at acquire. 3361 * 2. VF has already been acquired but has not yet initialized - store in local 3362 * bulletin board. mac will be posted on VF bulletin board after VF init. VF 3363 * will configure this mac when it is ready. 3364 * 3. VF has already initialized but has not yet setup a queue - post the new 3365 * mac on VF's bulletin board right now. VF will configure this mac when it 3366 * is ready. 3367 * 4. VF has already set a queue - delete any macs already configured for this 3368 * queue and manually config the new mac. 3369 * In any event, once this function has been called refuse any attempts by the 3370 * VF to configure any mac for itself except for this mac. In case of a race 3371 * where the VF fails to see the new post on its bulletin board before sending a 3372 * mac configuration request, the PF will simply fail the request and VF can try 3373 * again after consulting its bulletin board. 3374 */ 3375 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) 3376 { 3377 struct bnx2x *bp = netdev_priv(dev); 3378 int rc, q_logical_state; 3379 struct bnx2x_virtf *vf = NULL; 3380 struct pf_vf_bulletin_content *bulletin = NULL; 3381 3382 /* sanity and init */ 3383 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3384 if (rc) 3385 return rc; 3386 if (!is_valid_ether_addr(mac)) { 3387 BNX2X_ERR("mac address invalid\n"); 3388 return -EINVAL; 3389 } 3390 3391 /* update PF's copy of the VF's bulletin. Will no longer accept mac 3392 * configuration requests from vf unless match this mac 3393 */ 3394 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; 3395 memcpy(bulletin->mac, mac, ETH_ALEN); 3396 3397 /* Post update on VF's bulletin board */ 3398 rc = bnx2x_post_vf_bulletin(bp, vfidx); 3399 if (rc) { 3400 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); 3401 return rc; 3402 } 3403 3404 q_logical_state = 3405 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); 3406 if (vf->state == VF_ENABLED && 3407 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3408 /* configure the mac in device on this vf's queue */ 3409 unsigned long ramrod_flags = 0; 3410 struct bnx2x_vlan_mac_obj *mac_obj = 3411 &bnx2x_leading_vfq(vf, mac_obj); 3412 3413 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); 3414 if (rc) 3415 return rc; 3416 3417 /* must lock vfpf channel to protect against vf flows */ 3418 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3419 3420 /* remove existing eth macs */ 3421 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 3422 if (rc) { 3423 BNX2X_ERR("failed to delete eth macs\n"); 3424 rc = -EINVAL; 3425 goto out; 3426 } 3427 3428 /* remove existing uc list macs */ 3429 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); 3430 if (rc) { 3431 BNX2X_ERR("failed to delete uc_list macs\n"); 3432 rc = -EINVAL; 3433 goto out; 3434 } 3435 3436 /* configure the new mac to device */ 3437 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3438 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 3439 BNX2X_ETH_MAC, &ramrod_flags); 3440 3441 out: 3442 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3443 } 3444 3445 return 0; 3446 } 3447 3448 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 3449 { 3450 struct bnx2x *bp = netdev_priv(dev); 3451 int rc, q_logical_state; 3452 struct bnx2x_virtf *vf = NULL; 3453 struct pf_vf_bulletin_content *bulletin = NULL; 3454 3455 /* sanity and init */ 3456 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3457 if (rc) 3458 return rc; 3459 3460 if (vlan > 4095) { 3461 BNX2X_ERR("illegal vlan value %d\n", vlan); 3462 return -EINVAL; 3463 } 3464 3465 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", 3466 vfidx, vlan, 0); 3467 3468 /* update PF's copy of the VF's bulletin. No point in posting the vlan 3469 * to the VF since it doesn't have anything to do with it. But it useful 3470 * to store it here in case the VF is not up yet and we can only 3471 * configure the vlan later when it does. 3472 */ 3473 bulletin->valid_bitmap |= 1 << VLAN_VALID; 3474 bulletin->vlan = vlan; 3475 3476 /* is vf initialized and queue set up? */ 3477 q_logical_state = 3478 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); 3479 if (vf->state == VF_ENABLED && 3480 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3481 /* configure the vlan in device on this vf's queue */ 3482 unsigned long ramrod_flags = 0; 3483 unsigned long vlan_mac_flags = 0; 3484 struct bnx2x_vlan_mac_obj *vlan_obj = 3485 &bnx2x_leading_vfq(vf, vlan_obj); 3486 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 3487 struct bnx2x_queue_state_params q_params = {NULL}; 3488 struct bnx2x_queue_update_params *update_params; 3489 3490 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); 3491 if (rc) 3492 return rc; 3493 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3494 3495 /* must lock vfpf channel to protect against vf flows */ 3496 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3497 3498 /* remove existing vlans */ 3499 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3500 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 3501 &ramrod_flags); 3502 if (rc) { 3503 BNX2X_ERR("failed to delete vlans\n"); 3504 rc = -EINVAL; 3505 goto out; 3506 } 3507 3508 /* send queue update ramrod to configure default vlan and silent 3509 * vlan removal 3510 */ 3511 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3512 q_params.cmd = BNX2X_Q_CMD_UPDATE; 3513 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); 3514 update_params = &q_params.params.update; 3515 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 3516 &update_params->update_flags); 3517 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 3518 &update_params->update_flags); 3519 3520 if (vlan == 0) { 3521 /* if vlan is 0 then we want to leave the VF traffic 3522 * untagged, and leave the incoming traffic untouched 3523 * (i.e. do not remove any vlan tags). 3524 */ 3525 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3526 &update_params->update_flags); 3527 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 3528 &update_params->update_flags); 3529 } else { 3530 /* configure the new vlan to device */ 3531 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3532 ramrod_param.vlan_mac_obj = vlan_obj; 3533 ramrod_param.ramrod_flags = ramrod_flags; 3534 ramrod_param.user_req.u.vlan.vlan = vlan; 3535 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 3536 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 3537 if (rc) { 3538 BNX2X_ERR("failed to configure vlan\n"); 3539 rc = -EINVAL; 3540 goto out; 3541 } 3542 3543 /* configure default vlan to vf queue and set silent 3544 * vlan removal (the vf remains unaware of this vlan). 3545 */ 3546 update_params = &q_params.params.update; 3547 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3548 &update_params->update_flags); 3549 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 3550 &update_params->update_flags); 3551 update_params->def_vlan = vlan; 3552 } 3553 3554 /* Update the Queue state */ 3555 rc = bnx2x_queue_state_change(bp, &q_params); 3556 if (rc) { 3557 BNX2X_ERR("Failed to configure default VLAN\n"); 3558 goto out; 3559 } 3560 3561 /* clear the flag indicating that this VF needs its vlan 3562 * (will only be set if the HV configured the Vlan before vf was 3563 * up and we were called because the VF came up later 3564 */ 3565 out: 3566 vf->cfg_flags &= ~VF_CFG_VLAN; 3567 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3568 } 3569 return rc; 3570 } 3571 3572 /* crc is the first field in the bulletin board. Compute the crc over the 3573 * entire bulletin board excluding the crc field itself. Use the length field 3574 * as the Bulletin Board was posted by a PF with possibly a different version 3575 * from the vf which will sample it. Therefore, the length is computed by the 3576 * PF and the used blindly by the VF. 3577 */ 3578 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, 3579 struct pf_vf_bulletin_content *bulletin) 3580 { 3581 return crc32(BULLETIN_CRC_SEED, 3582 ((u8 *)bulletin) + sizeof(bulletin->crc), 3583 bulletin->length - sizeof(bulletin->crc)); 3584 } 3585 3586 /* Check for new posts on the bulletin board */ 3587 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) 3588 { 3589 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; 3590 int attempts; 3591 3592 /* bulletin board hasn't changed since last sample */ 3593 if (bp->old_bulletin.version == bulletin.version) 3594 return PFVF_BULLETIN_UNCHANGED; 3595 3596 /* validate crc of new bulletin board */ 3597 if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) { 3598 /* sampling structure in mid post may result with corrupted data 3599 * validate crc to ensure coherency. 3600 */ 3601 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { 3602 bulletin = bp->pf2vf_bulletin->content; 3603 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, 3604 &bulletin)) 3605 break; 3606 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", 3607 bulletin.crc, 3608 bnx2x_crc_vf_bulletin(bp, &bulletin)); 3609 } 3610 if (attempts >= BULLETIN_ATTEMPTS) { 3611 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", 3612 attempts); 3613 return PFVF_BULLETIN_CRC_ERR; 3614 } 3615 } 3616 3617 /* the mac address in bulletin board is valid and is new */ 3618 if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID && 3619 memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) { 3620 /* update new mac to net device */ 3621 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); 3622 } 3623 3624 /* the vlan in bulletin board is valid and is new */ 3625 if (bulletin.valid_bitmap & 1 << VLAN_VALID) 3626 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN); 3627 3628 /* copy new bulletin board to bp */ 3629 bp->old_bulletin = bulletin; 3630 3631 return PFVF_BULLETIN_UPDATED; 3632 } 3633 3634 void bnx2x_timer_sriov(struct bnx2x *bp) 3635 { 3636 bnx2x_sample_bulletin(bp); 3637 3638 /* if channel is down we need to self destruct */ 3639 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { 3640 smp_mb__before_clear_bit(); 3641 set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 3642 &bp->sp_rtnl_state); 3643 smp_mb__after_clear_bit(); 3644 schedule_delayed_work(&bp->sp_rtnl_task, 0); 3645 } 3646 } 3647 3648 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 3649 { 3650 /* vf doorbells are embedded within the regview */ 3651 return bp->regview + PXP_VF_ADDR_DB_START; 3652 } 3653 3654 int bnx2x_vf_pci_alloc(struct bnx2x *bp) 3655 { 3656 mutex_init(&bp->vf2pf_mutex); 3657 3658 /* allocate vf2pf mailbox for vf to pf channel */ 3659 BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping, 3660 sizeof(struct bnx2x_vf_mbx_msg)); 3661 3662 /* allocate pf 2 vf bulletin board */ 3663 BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping, 3664 sizeof(union pf_vf_bulletin)); 3665 3666 return 0; 3667 3668 alloc_mem_err: 3669 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3670 sizeof(struct bnx2x_vf_mbx_msg)); 3671 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, 3672 sizeof(union pf_vf_bulletin)); 3673 return -ENOMEM; 3674 } 3675 3676 void bnx2x_iov_channel_down(struct bnx2x *bp) 3677 { 3678 int vf_idx; 3679 struct pf_vf_bulletin_content *bulletin; 3680 3681 if (!IS_SRIOV(bp)) 3682 return; 3683 3684 for_each_vf(bp, vf_idx) { 3685 /* locate this VFs bulletin board and update the channel down 3686 * bit 3687 */ 3688 bulletin = BP_VF_BULLETIN(bp, vf_idx); 3689 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN; 3690 3691 /* update vf bulletin board */ 3692 bnx2x_post_vf_bulletin(bp, vf_idx); 3693 } 3694 } 3695