1 /* bnx2x_sriov.c: Broadcom Everest network driver. 2 * 3 * Copyright 2009-2013 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 17 * Ariel Elior <ariele@broadcom.com> 18 * 19 */ 20 #include "bnx2x.h" 21 #include "bnx2x_init.h" 22 #include "bnx2x_cmn.h" 23 #include "bnx2x_sp.h" 24 #include <linux/crc32.h> 25 #include <linux/if_vlan.h> 26 27 /* General service functions */ 28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 29 u16 pf_id) 30 { 31 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 32 pf_id); 33 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 34 pf_id); 35 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 36 pf_id); 37 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 38 pf_id); 39 } 40 41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 42 u8 enable) 43 { 44 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 45 enable); 46 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 47 enable); 48 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 49 enable); 50 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 51 enable); 52 } 53 54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 55 { 56 int idx; 57 58 for_each_vf(bp, idx) 59 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) 60 break; 61 return idx; 62 } 63 64 static 65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 66 { 67 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); 68 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; 69 } 70 71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, 72 u8 igu_sb_id, u8 segment, u16 index, u8 op, 73 u8 update) 74 { 75 /* acking a VF sb through the PF - use the GRC */ 76 u32 ctl; 77 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 78 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 79 u32 func_encode = vf->abs_vfid; 80 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; 81 struct igu_regular cmd_data = {0}; 82 83 cmd_data.sb_id_and_flags = 84 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 85 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 86 (update << IGU_REGULAR_BUPDATE_SHIFT) | 87 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 88 89 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 90 func_encode << IGU_CTRL_REG_FID_SHIFT | 91 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 92 93 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 94 cmd_data.sb_id_and_flags, igu_addr_data); 95 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); 96 mmiowb(); 97 barrier(); 98 99 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 100 ctl, igu_addr_ctl); 101 REG_WR(bp, igu_addr_ctl, ctl); 102 mmiowb(); 103 barrier(); 104 } 105 /* VFOP - VF slow-path operation support */ 106 107 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000 108 109 /* VFOP operations states */ 110 enum bnx2x_vfop_qctor_state { 111 BNX2X_VFOP_QCTOR_INIT, 112 BNX2X_VFOP_QCTOR_SETUP, 113 BNX2X_VFOP_QCTOR_INT_EN 114 }; 115 116 enum bnx2x_vfop_qdtor_state { 117 BNX2X_VFOP_QDTOR_HALT, 118 BNX2X_VFOP_QDTOR_TERMINATE, 119 BNX2X_VFOP_QDTOR_CFCDEL, 120 BNX2X_VFOP_QDTOR_DONE 121 }; 122 123 enum bnx2x_vfop_vlan_mac_state { 124 BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 125 BNX2X_VFOP_VLAN_MAC_CLEAR, 126 BNX2X_VFOP_VLAN_MAC_CHK_DONE, 127 BNX2X_VFOP_MAC_CONFIG_LIST, 128 BNX2X_VFOP_VLAN_CONFIG_LIST, 129 BNX2X_VFOP_VLAN_CONFIG_LIST_0 130 }; 131 132 enum bnx2x_vfop_qsetup_state { 133 BNX2X_VFOP_QSETUP_CTOR, 134 BNX2X_VFOP_QSETUP_VLAN0, 135 BNX2X_VFOP_QSETUP_DONE 136 }; 137 138 enum bnx2x_vfop_mcast_state { 139 BNX2X_VFOP_MCAST_DEL, 140 BNX2X_VFOP_MCAST_ADD, 141 BNX2X_VFOP_MCAST_CHK_DONE 142 }; 143 enum bnx2x_vfop_qflr_state { 144 BNX2X_VFOP_QFLR_CLR_VLAN, 145 BNX2X_VFOP_QFLR_CLR_MAC, 146 BNX2X_VFOP_QFLR_TERMINATE, 147 BNX2X_VFOP_QFLR_DONE 148 }; 149 150 enum bnx2x_vfop_flr_state { 151 BNX2X_VFOP_FLR_QUEUES, 152 BNX2X_VFOP_FLR_HW 153 }; 154 155 enum bnx2x_vfop_close_state { 156 BNX2X_VFOP_CLOSE_QUEUES, 157 BNX2X_VFOP_CLOSE_HW 158 }; 159 160 enum bnx2x_vfop_rxmode_state { 161 BNX2X_VFOP_RXMODE_CONFIG, 162 BNX2X_VFOP_RXMODE_DONE 163 }; 164 165 enum bnx2x_vfop_qteardown_state { 166 BNX2X_VFOP_QTEARDOWN_RXMODE, 167 BNX2X_VFOP_QTEARDOWN_CLR_VLAN, 168 BNX2X_VFOP_QTEARDOWN_CLR_MAC, 169 BNX2X_VFOP_QTEARDOWN_QDTOR, 170 BNX2X_VFOP_QTEARDOWN_DONE 171 }; 172 173 enum bnx2x_vfop_rss_state { 174 BNX2X_VFOP_RSS_CONFIG, 175 BNX2X_VFOP_RSS_DONE 176 }; 177 178 #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) 179 180 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 181 struct bnx2x_queue_init_params *init_params, 182 struct bnx2x_queue_setup_params *setup_params, 183 u16 q_idx, u16 sb_idx) 184 { 185 DP(BNX2X_MSG_IOV, 186 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", 187 vf->abs_vfid, 188 q_idx, 189 sb_idx, 190 init_params->tx.sb_cq_index, 191 init_params->tx.hc_rate, 192 setup_params->flags, 193 setup_params->txq_params.traffic_type); 194 } 195 196 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 197 struct bnx2x_queue_init_params *init_params, 198 struct bnx2x_queue_setup_params *setup_params, 199 u16 q_idx, u16 sb_idx) 200 { 201 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; 202 203 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" 204 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", 205 vf->abs_vfid, 206 q_idx, 207 sb_idx, 208 init_params->rx.sb_cq_index, 209 init_params->rx.hc_rate, 210 setup_params->gen_params.mtu, 211 rxq_params->buf_sz, 212 rxq_params->sge_buf_sz, 213 rxq_params->max_sges_pkt, 214 rxq_params->tpa_agg_sz, 215 setup_params->flags, 216 rxq_params->drop_flags, 217 rxq_params->cache_line_log); 218 } 219 220 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 221 struct bnx2x_virtf *vf, 222 struct bnx2x_vf_queue *q, 223 struct bnx2x_vfop_qctor_params *p, 224 unsigned long q_type) 225 { 226 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; 227 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; 228 229 /* INIT */ 230 231 /* Enable host coalescing in the transition to INIT state */ 232 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) 233 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); 234 235 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) 236 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); 237 238 /* FW SB ID */ 239 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 240 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 241 242 /* context */ 243 init_p->cxts[0] = q->cxt; 244 245 /* SETUP */ 246 247 /* Setup-op general parameters */ 248 setup_p->gen_params.spcl_id = vf->sp_cl_id; 249 setup_p->gen_params.stat_id = vfq_stat_id(vf, q); 250 251 /* Setup-op pause params: 252 * Nothing to do, the pause thresholds are set by default to 0 which 253 * effectively turns off the feature for this queue. We don't want 254 * one queue (VF) to interfering with another queue (another VF) 255 */ 256 if (vf->cfg_flags & VF_CFG_FW_FC) 257 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", 258 vf->abs_vfid); 259 /* Setup-op flags: 260 * collect statistics, zero statistics, local-switching, security, 261 * OV for Flex10, RSS and MCAST for leading 262 */ 263 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) 264 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); 265 266 /* for VFs, enable tx switching, bd coherency, and mac address 267 * anti-spoofing 268 */ 269 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); 270 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 271 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 272 273 /* Setup-op rx parameters */ 274 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 275 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 276 277 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); 278 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); 279 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); 280 281 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) 282 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; 283 } 284 285 /* Setup-op tx parameters */ 286 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { 287 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; 288 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 289 } 290 } 291 292 /* VFOP queue construction */ 293 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf) 294 { 295 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 296 struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor; 297 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 298 enum bnx2x_vfop_qctor_state state = vfop->state; 299 300 bnx2x_vfop_reset_wq(vf); 301 302 if (vfop->rc < 0) 303 goto op_err; 304 305 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 306 307 switch (state) { 308 case BNX2X_VFOP_QCTOR_INIT: 309 310 /* has this queue already been opened? */ 311 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 312 BNX2X_Q_LOGICAL_STATE_ACTIVE) { 313 DP(BNX2X_MSG_IOV, 314 "Entered qctor but queue was already up. Aborting gracefully\n"); 315 goto op_done; 316 } 317 318 /* next state */ 319 vfop->state = BNX2X_VFOP_QCTOR_SETUP; 320 321 q_params->cmd = BNX2X_Q_CMD_INIT; 322 vfop->rc = bnx2x_queue_state_change(bp, q_params); 323 324 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 325 326 case BNX2X_VFOP_QCTOR_SETUP: 327 /* next state */ 328 vfop->state = BNX2X_VFOP_QCTOR_INT_EN; 329 330 /* copy pre-prepared setup params to the queue-state params */ 331 vfop->op_p->qctor.qstate.params.setup = 332 vfop->op_p->qctor.prep_qsetup; 333 334 q_params->cmd = BNX2X_Q_CMD_SETUP; 335 vfop->rc = bnx2x_queue_state_change(bp, q_params); 336 337 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 338 339 case BNX2X_VFOP_QCTOR_INT_EN: 340 341 /* enable interrupts */ 342 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx), 343 USTORM_ID, 0, IGU_INT_ENABLE, 0); 344 goto op_done; 345 default: 346 bnx2x_vfop_default(state); 347 } 348 op_err: 349 BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n", 350 vf->abs_vfid, args->qid, q_params->cmd, vfop->rc); 351 op_done: 352 bnx2x_vfop_end(bp, vf, vfop); 353 op_pending: 354 return; 355 } 356 357 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp, 358 struct bnx2x_virtf *vf, 359 struct bnx2x_vfop_cmd *cmd, 360 int qid) 361 { 362 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 363 364 if (vfop) { 365 vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 366 367 vfop->args.qctor.qid = qid; 368 vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx); 369 370 bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT, 371 bnx2x_vfop_qctor, cmd->done); 372 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor, 373 cmd->block); 374 } 375 return -ENOMEM; 376 } 377 378 /* VFOP queue destruction */ 379 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) 380 { 381 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 382 struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor; 383 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 384 enum bnx2x_vfop_qdtor_state state = vfop->state; 385 386 bnx2x_vfop_reset_wq(vf); 387 388 if (vfop->rc < 0) 389 goto op_err; 390 391 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 392 393 switch (state) { 394 case BNX2X_VFOP_QDTOR_HALT: 395 396 /* has this queue already been stopped? */ 397 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 398 BNX2X_Q_LOGICAL_STATE_STOPPED) { 399 DP(BNX2X_MSG_IOV, 400 "Entered qdtor but queue was already stopped. Aborting gracefully\n"); 401 402 /* next state */ 403 vfop->state = BNX2X_VFOP_QDTOR_DONE; 404 405 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 406 } 407 408 /* next state */ 409 vfop->state = BNX2X_VFOP_QDTOR_TERMINATE; 410 411 q_params->cmd = BNX2X_Q_CMD_HALT; 412 vfop->rc = bnx2x_queue_state_change(bp, q_params); 413 414 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 415 416 case BNX2X_VFOP_QDTOR_TERMINATE: 417 /* next state */ 418 vfop->state = BNX2X_VFOP_QDTOR_CFCDEL; 419 420 q_params->cmd = BNX2X_Q_CMD_TERMINATE; 421 vfop->rc = bnx2x_queue_state_change(bp, q_params); 422 423 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 424 425 case BNX2X_VFOP_QDTOR_CFCDEL: 426 /* next state */ 427 vfop->state = BNX2X_VFOP_QDTOR_DONE; 428 429 q_params->cmd = BNX2X_Q_CMD_CFC_DEL; 430 vfop->rc = bnx2x_queue_state_change(bp, q_params); 431 432 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 433 op_err: 434 BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n", 435 vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc); 436 op_done: 437 case BNX2X_VFOP_QDTOR_DONE: 438 /* invalidate the context */ 439 if (qdtor->cxt) { 440 qdtor->cxt->ustorm_ag_context.cdu_usage = 0; 441 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; 442 } 443 bnx2x_vfop_end(bp, vf, vfop); 444 return; 445 default: 446 bnx2x_vfop_default(state); 447 } 448 op_pending: 449 return; 450 } 451 452 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, 453 struct bnx2x_virtf *vf, 454 struct bnx2x_vfop_cmd *cmd, 455 int qid) 456 { 457 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 458 459 if (vfop) { 460 struct bnx2x_queue_state_params *qstate = 461 &vf->op_params.qctor.qstate; 462 463 memset(qstate, 0, sizeof(*qstate)); 464 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 465 466 vfop->args.qdtor.qid = qid; 467 vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt); 468 469 bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT, 470 bnx2x_vfop_qdtor, cmd->done); 471 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, 472 cmd->block); 473 } 474 DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n", 475 vf->abs_vfid, vfop->rc); 476 return -ENOMEM; 477 } 478 479 static void 480 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) 481 { 482 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 483 if (vf) { 484 /* the first igu entry belonging to VFs of this PF */ 485 if (!BP_VFDB(bp)->first_vf_igu_entry) 486 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; 487 488 /* the first igu entry belonging to this VF */ 489 if (!vf_sb_count(vf)) 490 vf->igu_base_id = igu_sb_id; 491 492 ++vf_sb_count(vf); 493 ++vf->sb_count; 494 } 495 BP_VFDB(bp)->vf_sbs_pool++; 496 } 497 498 /* VFOP MAC/VLAN helpers */ 499 static inline void bnx2x_vfop_credit(struct bnx2x *bp, 500 struct bnx2x_vfop *vfop, 501 struct bnx2x_vlan_mac_obj *obj) 502 { 503 struct bnx2x_vfop_args_filters *args = &vfop->args.filters; 504 505 /* update credit only if there is no error 506 * and a valid credit counter 507 */ 508 if (!vfop->rc && args->credit) { 509 struct list_head *pos; 510 int read_lock; 511 int cnt = 0; 512 513 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); 514 if (read_lock) 515 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); 516 517 list_for_each(pos, &obj->head) 518 cnt++; 519 520 if (!read_lock) 521 bnx2x_vlan_mac_h_read_unlock(bp, obj); 522 523 atomic_set(args->credit, cnt); 524 } 525 } 526 527 static int bnx2x_vfop_set_user_req(struct bnx2x *bp, 528 struct bnx2x_vfop_filter *pos, 529 struct bnx2x_vlan_mac_data *user_req) 530 { 531 user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD : 532 BNX2X_VLAN_MAC_DEL; 533 534 switch (pos->type) { 535 case BNX2X_VFOP_FILTER_MAC: 536 memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN); 537 break; 538 case BNX2X_VFOP_FILTER_VLAN: 539 user_req->u.vlan.vlan = pos->vid; 540 break; 541 default: 542 BNX2X_ERR("Invalid filter type, skipping\n"); 543 return 1; 544 } 545 return 0; 546 } 547 548 static int 549 bnx2x_vfop_config_vlan0(struct bnx2x *bp, 550 struct bnx2x_vlan_mac_ramrod_params *vlan_mac, 551 bool add) 552 { 553 int rc; 554 555 vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD : 556 BNX2X_VLAN_MAC_DEL; 557 vlan_mac->user_req.u.vlan.vlan = 0; 558 559 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 560 if (rc == -EEXIST) 561 rc = 0; 562 return rc; 563 } 564 565 static int bnx2x_vfop_config_list(struct bnx2x *bp, 566 struct bnx2x_vfop_filters *filters, 567 struct bnx2x_vlan_mac_ramrod_params *vlan_mac) 568 { 569 struct bnx2x_vfop_filter *pos, *tmp; 570 struct list_head rollback_list, *filters_list = &filters->head; 571 struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req; 572 int rc = 0, cnt = 0; 573 574 INIT_LIST_HEAD(&rollback_list); 575 576 list_for_each_entry_safe(pos, tmp, filters_list, link) { 577 if (bnx2x_vfop_set_user_req(bp, pos, user_req)) 578 continue; 579 580 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 581 if (rc >= 0) { 582 cnt += pos->add ? 1 : -1; 583 list_move(&pos->link, &rollback_list); 584 rc = 0; 585 } else if (rc == -EEXIST) { 586 rc = 0; 587 } else { 588 BNX2X_ERR("Failed to add a new vlan_mac command\n"); 589 break; 590 } 591 } 592 593 /* rollback if error or too many rules added */ 594 if (rc || cnt > filters->add_cnt) { 595 BNX2X_ERR("error or too many rules added. Performing rollback\n"); 596 list_for_each_entry_safe(pos, tmp, &rollback_list, link) { 597 pos->add = !pos->add; /* reverse op */ 598 bnx2x_vfop_set_user_req(bp, pos, user_req); 599 bnx2x_config_vlan_mac(bp, vlan_mac); 600 list_del(&pos->link); 601 } 602 cnt = 0; 603 if (!rc) 604 rc = -EINVAL; 605 } 606 filters->add_cnt = cnt; 607 return rc; 608 } 609 610 /* VFOP set VLAN/MAC */ 611 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) 612 { 613 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 614 struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac; 615 struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj; 616 struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter; 617 618 enum bnx2x_vfop_vlan_mac_state state = vfop->state; 619 620 if (vfop->rc < 0) 621 goto op_err; 622 623 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 624 625 bnx2x_vfop_reset_wq(vf); 626 627 switch (state) { 628 case BNX2X_VFOP_VLAN_MAC_CLEAR: 629 /* next state */ 630 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 631 632 /* do delete */ 633 vfop->rc = obj->delete_all(bp, obj, 634 &vlan_mac->user_req.vlan_mac_flags, 635 &vlan_mac->ramrod_flags); 636 637 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 638 639 case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE: 640 /* next state */ 641 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 642 643 /* do config */ 644 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 645 if (vfop->rc == -EEXIST) 646 vfop->rc = 0; 647 648 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 649 650 case BNX2X_VFOP_VLAN_MAC_CHK_DONE: 651 vfop->rc = !!obj->raw.check_pending(&obj->raw); 652 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 653 654 case BNX2X_VFOP_MAC_CONFIG_LIST: 655 /* next state */ 656 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 657 658 /* do list config */ 659 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 660 if (vfop->rc) 661 goto op_err; 662 663 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 664 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 665 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 666 667 case BNX2X_VFOP_VLAN_CONFIG_LIST: 668 /* next state */ 669 vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0; 670 671 /* remove vlan0 - could be no-op */ 672 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false); 673 if (vfop->rc) 674 goto op_err; 675 676 /* Do vlan list config. if this operation fails we try to 677 * restore vlan0 to keep the queue is working order 678 */ 679 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 680 if (!vfop->rc) { 681 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 682 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 683 } 684 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */ 685 686 case BNX2X_VFOP_VLAN_CONFIG_LIST_0: 687 /* next state */ 688 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 689 690 if (list_empty(&obj->head)) 691 /* add vlan0 */ 692 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true); 693 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 694 695 default: 696 bnx2x_vfop_default(state); 697 } 698 op_err: 699 BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc); 700 op_done: 701 kfree(filters); 702 bnx2x_vfop_credit(bp, vfop, obj); 703 bnx2x_vfop_end(bp, vf, vfop); 704 op_pending: 705 return; 706 } 707 708 struct bnx2x_vfop_vlan_mac_flags { 709 bool drv_only; 710 bool dont_consume; 711 bool single_cmd; 712 bool add; 713 }; 714 715 static void 716 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 717 struct bnx2x_vfop_vlan_mac_flags *flags) 718 { 719 struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req; 720 721 memset(ramrod, 0, sizeof(*ramrod)); 722 723 /* ramrod flags */ 724 if (flags->drv_only) 725 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags); 726 if (flags->single_cmd) 727 set_bit(RAMROD_EXEC, &ramrod->ramrod_flags); 728 729 /* mac_vlan flags */ 730 if (flags->dont_consume) 731 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags); 732 733 /* cmd */ 734 ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL; 735 } 736 737 static inline void 738 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 739 struct bnx2x_vfop_vlan_mac_flags *flags) 740 { 741 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags); 742 set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags); 743 } 744 745 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, 746 struct bnx2x_virtf *vf, 747 struct bnx2x_vfop_cmd *cmd, 748 int qid, bool drv_only) 749 { 750 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 751 int rc; 752 753 if (vfop) { 754 struct bnx2x_vfop_args_filters filters = { 755 .multi_filter = NULL, /* single */ 756 .credit = NULL, /* consume credit */ 757 }; 758 struct bnx2x_vfop_vlan_mac_flags flags = { 759 .drv_only = drv_only, 760 .dont_consume = (filters.credit != NULL), 761 .single_cmd = true, 762 .add = false /* don't care */, 763 }; 764 struct bnx2x_vlan_mac_ramrod_params *ramrod = 765 &vf->op_params.vlan_mac; 766 767 /* set ramrod params */ 768 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 769 770 /* set object */ 771 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); 772 if (rc) 773 return rc; 774 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 775 776 /* set extra args */ 777 vfop->args.filters = filters; 778 779 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 780 bnx2x_vfop_vlan_mac, cmd->done); 781 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 782 cmd->block); 783 } 784 return -ENOMEM; 785 } 786 787 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, 788 struct bnx2x_virtf *vf, 789 struct bnx2x_vfop_cmd *cmd, 790 struct bnx2x_vfop_filters *macs, 791 int qid, bool drv_only) 792 { 793 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 794 int rc; 795 796 if (vfop) { 797 struct bnx2x_vfop_args_filters filters = { 798 .multi_filter = macs, 799 .credit = NULL, /* consume credit */ 800 }; 801 struct bnx2x_vfop_vlan_mac_flags flags = { 802 .drv_only = drv_only, 803 .dont_consume = (filters.credit != NULL), 804 .single_cmd = false, 805 .add = false, /* don't care since only the items in the 806 * filters list affect the sp operation, 807 * not the list itself 808 */ 809 }; 810 struct bnx2x_vlan_mac_ramrod_params *ramrod = 811 &vf->op_params.vlan_mac; 812 813 /* set ramrod params */ 814 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 815 816 /* set object */ 817 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)); 818 if (rc) 819 return rc; 820 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 821 822 /* set extra args */ 823 filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX; 824 vfop->args.filters = filters; 825 826 bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST, 827 bnx2x_vfop_vlan_mac, cmd->done); 828 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 829 cmd->block); 830 } 831 return -ENOMEM; 832 } 833 834 int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, 835 struct bnx2x_virtf *vf, 836 struct bnx2x_vfop_cmd *cmd, 837 int qid, u16 vid, bool add) 838 { 839 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 840 int rc; 841 842 if (vfop) { 843 struct bnx2x_vfop_args_filters filters = { 844 .multi_filter = NULL, /* single command */ 845 .credit = &bnx2x_vfq(vf, qid, vlan_count), 846 }; 847 struct bnx2x_vfop_vlan_mac_flags flags = { 848 .drv_only = false, 849 .dont_consume = (filters.credit != NULL), 850 .single_cmd = true, 851 .add = add, 852 }; 853 struct bnx2x_vlan_mac_ramrod_params *ramrod = 854 &vf->op_params.vlan_mac; 855 856 /* set ramrod params */ 857 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 858 ramrod->user_req.u.vlan.vlan = vid; 859 860 /* set object */ 861 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); 862 if (rc) 863 return rc; 864 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 865 866 /* set extra args */ 867 vfop->args.filters = filters; 868 869 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 870 bnx2x_vfop_vlan_mac, cmd->done); 871 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 872 cmd->block); 873 } 874 return -ENOMEM; 875 } 876 877 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, 878 struct bnx2x_virtf *vf, 879 struct bnx2x_vfop_cmd *cmd, 880 int qid, bool drv_only) 881 { 882 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 883 int rc; 884 885 if (vfop) { 886 struct bnx2x_vfop_args_filters filters = { 887 .multi_filter = NULL, /* single command */ 888 .credit = &bnx2x_vfq(vf, qid, vlan_count), 889 }; 890 struct bnx2x_vfop_vlan_mac_flags flags = { 891 .drv_only = drv_only, 892 .dont_consume = (filters.credit != NULL), 893 .single_cmd = true, 894 .add = false, /* don't care */ 895 }; 896 struct bnx2x_vlan_mac_ramrod_params *ramrod = 897 &vf->op_params.vlan_mac; 898 899 /* set ramrod params */ 900 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 901 902 /* set object */ 903 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); 904 if (rc) 905 return rc; 906 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 907 908 /* set extra args */ 909 vfop->args.filters = filters; 910 911 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 912 bnx2x_vfop_vlan_mac, cmd->done); 913 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 914 cmd->block); 915 } 916 return -ENOMEM; 917 } 918 919 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, 920 struct bnx2x_virtf *vf, 921 struct bnx2x_vfop_cmd *cmd, 922 struct bnx2x_vfop_filters *vlans, 923 int qid, bool drv_only) 924 { 925 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 926 int rc; 927 928 if (vfop) { 929 struct bnx2x_vfop_args_filters filters = { 930 .multi_filter = vlans, 931 .credit = &bnx2x_vfq(vf, qid, vlan_count), 932 }; 933 struct bnx2x_vfop_vlan_mac_flags flags = { 934 .drv_only = drv_only, 935 .dont_consume = (filters.credit != NULL), 936 .single_cmd = false, 937 .add = false, /* don't care */ 938 }; 939 struct bnx2x_vlan_mac_ramrod_params *ramrod = 940 &vf->op_params.vlan_mac; 941 942 /* set ramrod params */ 943 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 944 945 /* set object */ 946 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)); 947 if (rc) 948 return rc; 949 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 950 951 /* set extra args */ 952 filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) - 953 atomic_read(filters.credit); 954 955 vfop->args.filters = filters; 956 957 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST, 958 bnx2x_vfop_vlan_mac, cmd->done); 959 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 960 cmd->block); 961 } 962 return -ENOMEM; 963 } 964 965 /* VFOP queue setup (queue constructor + set vlan 0) */ 966 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf) 967 { 968 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 969 int qid = vfop->args.qctor.qid; 970 enum bnx2x_vfop_qsetup_state state = vfop->state; 971 struct bnx2x_vfop_cmd cmd = { 972 .done = bnx2x_vfop_qsetup, 973 .block = false, 974 }; 975 976 if (vfop->rc < 0) 977 goto op_err; 978 979 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 980 981 switch (state) { 982 case BNX2X_VFOP_QSETUP_CTOR: 983 /* init the queue ctor command */ 984 vfop->state = BNX2X_VFOP_QSETUP_VLAN0; 985 vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid); 986 if (vfop->rc) 987 goto op_err; 988 return; 989 990 case BNX2X_VFOP_QSETUP_VLAN0: 991 /* skip if non-leading or FPGA/EMU*/ 992 if (qid) 993 goto op_done; 994 995 /* init the queue set-vlan command (for vlan 0) */ 996 vfop->state = BNX2X_VFOP_QSETUP_DONE; 997 vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true); 998 if (vfop->rc) 999 goto op_err; 1000 return; 1001 op_err: 1002 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); 1003 op_done: 1004 case BNX2X_VFOP_QSETUP_DONE: 1005 vf->cfg_flags |= VF_CFG_VLAN; 1006 smp_mb__before_clear_bit(); 1007 set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, 1008 &bp->sp_rtnl_state); 1009 smp_mb__after_clear_bit(); 1010 schedule_delayed_work(&bp->sp_rtnl_task, 0); 1011 bnx2x_vfop_end(bp, vf, vfop); 1012 return; 1013 default: 1014 bnx2x_vfop_default(state); 1015 } 1016 } 1017 1018 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, 1019 struct bnx2x_virtf *vf, 1020 struct bnx2x_vfop_cmd *cmd, 1021 int qid) 1022 { 1023 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1024 1025 if (vfop) { 1026 vfop->args.qctor.qid = qid; 1027 1028 bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR, 1029 bnx2x_vfop_qsetup, cmd->done); 1030 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup, 1031 cmd->block); 1032 } 1033 return -ENOMEM; 1034 } 1035 1036 /* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */ 1037 static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) 1038 { 1039 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1040 int qid = vfop->args.qx.qid; 1041 enum bnx2x_vfop_qflr_state state = vfop->state; 1042 struct bnx2x_queue_state_params *qstate; 1043 struct bnx2x_vfop_cmd cmd; 1044 1045 bnx2x_vfop_reset_wq(vf); 1046 1047 if (vfop->rc < 0) 1048 goto op_err; 1049 1050 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state); 1051 1052 cmd.done = bnx2x_vfop_qflr; 1053 cmd.block = false; 1054 1055 switch (state) { 1056 case BNX2X_VFOP_QFLR_CLR_VLAN: 1057 /* vlan-clear-all: driver-only, don't consume credit */ 1058 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; 1059 if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) 1060 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, 1061 true); 1062 if (vfop->rc) 1063 goto op_err; 1064 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 1065 1066 case BNX2X_VFOP_QFLR_CLR_MAC: 1067 /* mac-clear-all: driver only consume credit */ 1068 vfop->state = BNX2X_VFOP_QFLR_TERMINATE; 1069 if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) 1070 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, 1071 true); 1072 DP(BNX2X_MSG_IOV, 1073 "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d", 1074 vf->abs_vfid, vfop->rc); 1075 if (vfop->rc) 1076 goto op_err; 1077 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 1078 1079 case BNX2X_VFOP_QFLR_TERMINATE: 1080 qstate = &vfop->op_p->qctor.qstate; 1081 memset(qstate , 0, sizeof(*qstate)); 1082 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 1083 vfop->state = BNX2X_VFOP_QFLR_DONE; 1084 1085 DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n", 1086 vf->abs_vfid, qstate->q_obj->state); 1087 1088 if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) { 1089 qstate->q_obj->state = BNX2X_Q_STATE_STOPPED; 1090 qstate->cmd = BNX2X_Q_CMD_TERMINATE; 1091 vfop->rc = bnx2x_queue_state_change(bp, qstate); 1092 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND); 1093 } else { 1094 goto op_done; 1095 } 1096 1097 op_err: 1098 BNX2X_ERR("QFLR[%d:%d] error: rc %d\n", 1099 vf->abs_vfid, qid, vfop->rc); 1100 op_done: 1101 case BNX2X_VFOP_QFLR_DONE: 1102 bnx2x_vfop_end(bp, vf, vfop); 1103 return; 1104 default: 1105 bnx2x_vfop_default(state); 1106 } 1107 op_pending: 1108 return; 1109 } 1110 1111 static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp, 1112 struct bnx2x_virtf *vf, 1113 struct bnx2x_vfop_cmd *cmd, 1114 int qid) 1115 { 1116 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1117 1118 if (vfop) { 1119 vfop->args.qx.qid = qid; 1120 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN, 1121 bnx2x_vfop_qflr, cmd->done); 1122 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr, 1123 cmd->block); 1124 } 1125 return -ENOMEM; 1126 } 1127 1128 /* VFOP multi-casts */ 1129 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf) 1130 { 1131 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1132 struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast; 1133 struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw; 1134 struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list; 1135 enum bnx2x_vfop_mcast_state state = vfop->state; 1136 int i; 1137 1138 bnx2x_vfop_reset_wq(vf); 1139 1140 if (vfop->rc < 0) 1141 goto op_err; 1142 1143 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1144 1145 switch (state) { 1146 case BNX2X_VFOP_MCAST_DEL: 1147 /* clear existing mcasts */ 1148 vfop->state = BNX2X_VFOP_MCAST_ADD; 1149 vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL); 1150 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 1151 1152 case BNX2X_VFOP_MCAST_ADD: 1153 if (raw->check_pending(raw)) 1154 goto op_pending; 1155 1156 if (args->mc_num) { 1157 /* update mcast list on the ramrod params */ 1158 INIT_LIST_HEAD(&mcast->mcast_list); 1159 for (i = 0; i < args->mc_num; i++) 1160 list_add_tail(&(args->mc[i].link), 1161 &mcast->mcast_list); 1162 /* add new mcasts */ 1163 vfop->state = BNX2X_VFOP_MCAST_CHK_DONE; 1164 vfop->rc = bnx2x_config_mcast(bp, mcast, 1165 BNX2X_MCAST_CMD_ADD); 1166 } 1167 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1168 1169 case BNX2X_VFOP_MCAST_CHK_DONE: 1170 vfop->rc = raw->check_pending(raw) ? 1 : 0; 1171 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1172 default: 1173 bnx2x_vfop_default(state); 1174 } 1175 op_err: 1176 BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc); 1177 op_done: 1178 kfree(args->mc); 1179 bnx2x_vfop_end(bp, vf, vfop); 1180 op_pending: 1181 return; 1182 } 1183 1184 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, 1185 struct bnx2x_virtf *vf, 1186 struct bnx2x_vfop_cmd *cmd, 1187 bnx2x_mac_addr_t *mcasts, 1188 int mcast_num, bool drv_only) 1189 { 1190 struct bnx2x_vfop *vfop = NULL; 1191 size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem); 1192 struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) : 1193 NULL; 1194 1195 if (!mc_sz || mc) { 1196 vfop = bnx2x_vfop_add(bp, vf); 1197 if (vfop) { 1198 int i; 1199 struct bnx2x_mcast_ramrod_params *ramrod = 1200 &vf->op_params.mcast; 1201 1202 /* set ramrod params */ 1203 memset(ramrod, 0, sizeof(*ramrod)); 1204 ramrod->mcast_obj = &vf->mcast_obj; 1205 if (drv_only) 1206 set_bit(RAMROD_DRV_CLR_ONLY, 1207 &ramrod->ramrod_flags); 1208 1209 /* copy mcasts pointers */ 1210 vfop->args.mc_list.mc_num = mcast_num; 1211 vfop->args.mc_list.mc = mc; 1212 for (i = 0; i < mcast_num; i++) 1213 mc[i].mac = mcasts[i]; 1214 1215 bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL, 1216 bnx2x_vfop_mcast, cmd->done); 1217 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast, 1218 cmd->block); 1219 } else { 1220 kfree(mc); 1221 } 1222 } 1223 return -ENOMEM; 1224 } 1225 1226 /* VFOP rx-mode */ 1227 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) 1228 { 1229 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1230 struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode; 1231 enum bnx2x_vfop_rxmode_state state = vfop->state; 1232 1233 bnx2x_vfop_reset_wq(vf); 1234 1235 if (vfop->rc < 0) 1236 goto op_err; 1237 1238 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1239 1240 switch (state) { 1241 case BNX2X_VFOP_RXMODE_CONFIG: 1242 /* next state */ 1243 vfop->state = BNX2X_VFOP_RXMODE_DONE; 1244 1245 vfop->rc = bnx2x_config_rx_mode(bp, ramrod); 1246 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1247 op_err: 1248 BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc); 1249 op_done: 1250 case BNX2X_VFOP_RXMODE_DONE: 1251 bnx2x_vfop_end(bp, vf, vfop); 1252 return; 1253 default: 1254 bnx2x_vfop_default(state); 1255 } 1256 op_pending: 1257 return; 1258 } 1259 1260 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, 1261 struct bnx2x_virtf *vf, 1262 struct bnx2x_vfop_cmd *cmd, 1263 int qid, unsigned long accept_flags) 1264 { 1265 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); 1266 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1267 1268 if (vfop) { 1269 struct bnx2x_rx_mode_ramrod_params *ramrod = 1270 &vf->op_params.rx_mode; 1271 1272 memset(ramrod, 0, sizeof(*ramrod)); 1273 1274 /* Prepare ramrod parameters */ 1275 ramrod->cid = vfq->cid; 1276 ramrod->cl_id = vfq_cl_id(vf, vfq); 1277 ramrod->rx_mode_obj = &bp->rx_mode_obj; 1278 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); 1279 1280 ramrod->rx_accept_flags = accept_flags; 1281 ramrod->tx_accept_flags = accept_flags; 1282 ramrod->pstate = &vf->filter_state; 1283 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; 1284 1285 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 1286 set_bit(RAMROD_RX, &ramrod->ramrod_flags); 1287 set_bit(RAMROD_TX, &ramrod->ramrod_flags); 1288 1289 ramrod->rdata = 1290 bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); 1291 ramrod->rdata_mapping = 1292 bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); 1293 1294 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, 1295 bnx2x_vfop_rxmode, cmd->done); 1296 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode, 1297 cmd->block); 1298 } 1299 return -ENOMEM; 1300 } 1301 1302 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs, 1303 * queue destructor) 1304 */ 1305 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf) 1306 { 1307 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1308 int qid = vfop->args.qx.qid; 1309 enum bnx2x_vfop_qteardown_state state = vfop->state; 1310 struct bnx2x_vfop_cmd cmd; 1311 1312 if (vfop->rc < 0) 1313 goto op_err; 1314 1315 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1316 1317 cmd.done = bnx2x_vfop_qdown; 1318 cmd.block = false; 1319 1320 switch (state) { 1321 case BNX2X_VFOP_QTEARDOWN_RXMODE: 1322 /* Drop all */ 1323 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN; 1324 vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0); 1325 if (vfop->rc) 1326 goto op_err; 1327 return; 1328 1329 case BNX2X_VFOP_QTEARDOWN_CLR_VLAN: 1330 /* vlan-clear-all: don't consume credit */ 1331 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC; 1332 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false); 1333 if (vfop->rc) 1334 goto op_err; 1335 return; 1336 1337 case BNX2X_VFOP_QTEARDOWN_CLR_MAC: 1338 /* mac-clear-all: consume credit */ 1339 vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR; 1340 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false); 1341 if (vfop->rc) 1342 goto op_err; 1343 return; 1344 1345 case BNX2X_VFOP_QTEARDOWN_QDTOR: 1346 /* run the queue destruction flow */ 1347 DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n"); 1348 vfop->state = BNX2X_VFOP_QTEARDOWN_DONE; 1349 DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n"); 1350 vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid); 1351 DP(BNX2X_MSG_IOV, "returned from cmd\n"); 1352 if (vfop->rc) 1353 goto op_err; 1354 return; 1355 op_err: 1356 BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n", 1357 vf->abs_vfid, qid, vfop->rc); 1358 1359 case BNX2X_VFOP_QTEARDOWN_DONE: 1360 bnx2x_vfop_end(bp, vf, vfop); 1361 return; 1362 default: 1363 bnx2x_vfop_default(state); 1364 } 1365 } 1366 1367 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, 1368 struct bnx2x_virtf *vf, 1369 struct bnx2x_vfop_cmd *cmd, 1370 int qid) 1371 { 1372 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1373 1374 /* for non leading queues skip directly to qdown sate */ 1375 if (vfop) { 1376 vfop->args.qx.qid = qid; 1377 bnx2x_vfop_opset(qid == LEADING_IDX ? 1378 BNX2X_VFOP_QTEARDOWN_RXMODE : 1379 BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown, 1380 cmd->done); 1381 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, 1382 cmd->block); 1383 } 1384 1385 return -ENOMEM; 1386 } 1387 1388 /* VF enable primitives 1389 * when pretend is required the caller is responsible 1390 * for calling pretend prior to calling these routines 1391 */ 1392 1393 /* internal vf enable - until vf is enabled internally all transactions 1394 * are blocked. This routine should always be called last with pretend. 1395 */ 1396 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) 1397 { 1398 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); 1399 } 1400 1401 /* clears vf error in all semi blocks */ 1402 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) 1403 { 1404 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); 1405 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); 1406 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); 1407 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); 1408 } 1409 1410 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) 1411 { 1412 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; 1413 u32 was_err_reg = 0; 1414 1415 switch (was_err_group) { 1416 case 0: 1417 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; 1418 break; 1419 case 1: 1420 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; 1421 break; 1422 case 2: 1423 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; 1424 break; 1425 case 3: 1426 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; 1427 break; 1428 } 1429 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); 1430 } 1431 1432 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) 1433 { 1434 int i; 1435 u32 val; 1436 1437 /* Set VF masks and configuration - pretend */ 1438 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1439 1440 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 1441 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 1442 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 1443 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 1444 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 1445 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 1446 1447 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 1448 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); 1449 if (vf->cfg_flags & VF_CFG_INT_SIMD) 1450 val |= IGU_VF_CONF_SINGLE_ISR_EN; 1451 val &= ~IGU_VF_CONF_PARENT_MASK; 1452 val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */ 1453 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 1454 1455 DP(BNX2X_MSG_IOV, 1456 "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n", 1457 vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION)); 1458 1459 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1460 1461 /* iterate over all queues, clear sb consumer */ 1462 for (i = 0; i < vf_sb_count(vf); i++) { 1463 u8 igu_sb_id = vf_igu_sb(vf, i); 1464 1465 /* zero prod memory */ 1466 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); 1467 1468 /* clear sb state machine */ 1469 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, 1470 false /* VF */); 1471 1472 /* disable + update */ 1473 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, 1474 IGU_INT_DISABLE, 1); 1475 } 1476 } 1477 1478 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) 1479 { 1480 /* set the VF-PF association in the FW */ 1481 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); 1482 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); 1483 1484 /* clear vf errors*/ 1485 bnx2x_vf_semi_clear_err(bp, abs_vfid); 1486 bnx2x_vf_pglue_clear_err(bp, abs_vfid); 1487 1488 /* internal vf-enable - pretend */ 1489 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); 1490 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); 1491 bnx2x_vf_enable_internal(bp, true); 1492 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1493 } 1494 1495 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) 1496 { 1497 /* Reset vf in IGU interrupts are still disabled */ 1498 bnx2x_vf_igu_reset(bp, vf); 1499 1500 /* pretend to enable the vf with the PBF */ 1501 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1502 REG_WR(bp, PBF_REG_DISABLE_VF, 0); 1503 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1504 } 1505 1506 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) 1507 { 1508 struct pci_dev *dev; 1509 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 1510 1511 if (!vf) 1512 return false; 1513 1514 dev = pci_get_bus_and_slot(vf->bus, vf->devfn); 1515 if (dev) 1516 return bnx2x_is_pcie_pending(dev); 1517 return false; 1518 } 1519 1520 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) 1521 { 1522 /* Verify no pending pci transactions */ 1523 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) 1524 BNX2X_ERR("PCIE Transactions still pending\n"); 1525 1526 return 0; 1527 } 1528 1529 /* must be called after the number of PF queues and the number of VFs are 1530 * both known 1531 */ 1532 static void 1533 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 1534 { 1535 struct vf_pf_resc_request *resc = &vf->alloc_resc; 1536 u16 vlan_count = 0; 1537 1538 /* will be set only during VF-ACQUIRE */ 1539 resc->num_rxqs = 0; 1540 resc->num_txqs = 0; 1541 1542 /* no credit calculations for macs (just yet) */ 1543 resc->num_mac_filters = 1; 1544 1545 /* divvy up vlan rules */ 1546 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 1547 vlan_count = 1 << ilog2(vlan_count); 1548 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); 1549 1550 /* no real limitation */ 1551 resc->num_mc_filters = 0; 1552 1553 /* num_sbs already set */ 1554 resc->num_sbs = vf->sb_count; 1555 } 1556 1557 /* FLR routines: */ 1558 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 1559 { 1560 /* reset the state variables */ 1561 bnx2x_iov_static_resc(bp, vf); 1562 vf->state = VF_FREE; 1563 } 1564 1565 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) 1566 { 1567 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 1568 1569 /* DQ usage counter */ 1570 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1571 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, 1572 "DQ VF usage counter timed out", 1573 poll_cnt); 1574 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1575 1576 /* FW cleanup command - poll for the results */ 1577 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), 1578 poll_cnt)) 1579 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid); 1580 1581 /* verify TX hw is flushed */ 1582 bnx2x_tx_hw_flushed(bp, poll_cnt); 1583 } 1584 1585 static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) 1586 { 1587 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1588 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; 1589 enum bnx2x_vfop_flr_state state = vfop->state; 1590 struct bnx2x_vfop_cmd cmd = { 1591 .done = bnx2x_vfop_flr, 1592 .block = false, 1593 }; 1594 1595 if (vfop->rc < 0) 1596 goto op_err; 1597 1598 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1599 1600 switch (state) { 1601 case BNX2X_VFOP_FLR_QUEUES: 1602 /* the cleanup operations are valid if and only if the VF 1603 * was first acquired. 1604 */ 1605 if (++(qx->qid) < vf_rxq_count(vf)) { 1606 vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd, 1607 qx->qid); 1608 if (vfop->rc) 1609 goto op_err; 1610 return; 1611 } 1612 /* remove multicasts */ 1613 vfop->state = BNX2X_VFOP_FLR_HW; 1614 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 1615 0, true); 1616 if (vfop->rc) 1617 goto op_err; 1618 return; 1619 case BNX2X_VFOP_FLR_HW: 1620 1621 /* dispatch final cleanup and wait for HW queues to flush */ 1622 bnx2x_vf_flr_clnup_hw(bp, vf); 1623 1624 /* release VF resources */ 1625 bnx2x_vf_free_resc(bp, vf); 1626 1627 /* re-open the mailbox */ 1628 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 1629 1630 goto op_done; 1631 default: 1632 bnx2x_vfop_default(state); 1633 } 1634 op_err: 1635 BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc); 1636 op_done: 1637 vf->flr_clnup_stage = VF_FLR_ACK; 1638 bnx2x_vfop_end(bp, vf, vfop); 1639 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 1640 } 1641 1642 static int bnx2x_vfop_flr_cmd(struct bnx2x *bp, 1643 struct bnx2x_virtf *vf, 1644 vfop_handler_t done) 1645 { 1646 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1647 if (vfop) { 1648 vfop->args.qx.qid = -1; /* loop */ 1649 bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES, 1650 bnx2x_vfop_flr, done); 1651 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false); 1652 } 1653 return -ENOMEM; 1654 } 1655 1656 static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf) 1657 { 1658 int i = prev_vf ? prev_vf->index + 1 : 0; 1659 struct bnx2x_virtf *vf; 1660 1661 /* find next VF to cleanup */ 1662 next_vf_to_clean: 1663 for (; 1664 i < BNX2X_NR_VIRTFN(bp) && 1665 (bnx2x_vf(bp, i, state) != VF_RESET || 1666 bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN); 1667 i++) 1668 ; 1669 1670 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i, 1671 BNX2X_NR_VIRTFN(bp)); 1672 1673 if (i < BNX2X_NR_VIRTFN(bp)) { 1674 vf = BP_VF(bp, i); 1675 1676 /* lock the vf pf channel */ 1677 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 1678 1679 /* invoke the VF FLR SM */ 1680 if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) { 1681 BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n", 1682 vf->abs_vfid); 1683 1684 /* mark the VF to be ACKED and continue */ 1685 vf->flr_clnup_stage = VF_FLR_ACK; 1686 goto next_vf_to_clean; 1687 } 1688 return; 1689 } 1690 1691 /* we are done, update vf records */ 1692 for_each_vf(bp, i) { 1693 vf = BP_VF(bp, i); 1694 1695 if (vf->flr_clnup_stage != VF_FLR_ACK) 1696 continue; 1697 1698 vf->flr_clnup_stage = VF_FLR_EPILOG; 1699 } 1700 1701 /* Acknowledge the handled VFs. 1702 * we are acknowledge all the vfs which an flr was requested for, even 1703 * if amongst them there are such that we never opened, since the mcp 1704 * will interrupt us immediately again if we only ack some of the bits, 1705 * resulting in an endless loop. This can happen for example in KVM 1706 * where an 'all ones' flr request is sometimes given by hyper visor 1707 */ 1708 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n", 1709 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1710 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1711 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 1712 bp->vfdb->flrd_vfs[i]); 1713 1714 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); 1715 1716 /* clear the acked bits - better yet if the MCP implemented 1717 * write to clear semantics 1718 */ 1719 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1720 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); 1721 } 1722 1723 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) 1724 { 1725 int i; 1726 1727 /* Read FLR'd VFs */ 1728 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1729 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); 1730 1731 DP(BNX2X_MSG_MCP, 1732 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n", 1733 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1734 1735 for_each_vf(bp, i) { 1736 struct bnx2x_virtf *vf = BP_VF(bp, i); 1737 u32 reset = 0; 1738 1739 if (vf->abs_vfid < 32) 1740 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); 1741 else 1742 reset = bp->vfdb->flrd_vfs[1] & 1743 (1 << (vf->abs_vfid - 32)); 1744 1745 if (reset) { 1746 /* set as reset and ready for cleanup */ 1747 vf->state = VF_RESET; 1748 vf->flr_clnup_stage = VF_FLR_CLN; 1749 1750 DP(BNX2X_MSG_IOV, 1751 "Initiating Final cleanup for VF %d\n", 1752 vf->abs_vfid); 1753 } 1754 } 1755 1756 /* do the FLR cleanup for all marked VFs*/ 1757 bnx2x_vf_flr_clnup(bp, NULL); 1758 } 1759 1760 /* IOV global initialization routines */ 1761 void bnx2x_iov_init_dq(struct bnx2x *bp) 1762 { 1763 if (!IS_SRIOV(bp)) 1764 return; 1765 1766 /* Set the DQ such that the CID reflect the abs_vfid */ 1767 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); 1768 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 1769 1770 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to 1771 * the PF L2 queues 1772 */ 1773 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 1774 1775 /* The VF window size is the log2 of the max number of CIDs per VF */ 1776 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 1777 1778 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 1779 * the Pf doorbell size although the 2 are independent. 1780 */ 1781 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); 1782 1783 /* No security checks for now - 1784 * configure single rule (out of 16) mask = 0x1, value = 0x0, 1785 * CID range 0 - 0x1ffff 1786 */ 1787 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); 1788 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); 1789 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1790 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1791 1792 /* set the number of VF allowed doorbells to the full DQ range */ 1793 REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); 1794 1795 /* set the VF doorbell threshold */ 1796 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 1797 } 1798 1799 void bnx2x_iov_init_dmae(struct bnx2x *bp) 1800 { 1801 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) 1802 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 1803 } 1804 1805 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1806 { 1807 struct pci_dev *dev = bp->pdev; 1808 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1809 1810 return dev->bus->number + ((dev->devfn + iov->offset + 1811 iov->stride * vfid) >> 8); 1812 } 1813 1814 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) 1815 { 1816 struct pci_dev *dev = bp->pdev; 1817 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1818 1819 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; 1820 } 1821 1822 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) 1823 { 1824 int i, n; 1825 struct pci_dev *dev = bp->pdev; 1826 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1827 1828 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { 1829 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); 1830 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); 1831 1832 size /= iov->total; 1833 vf->bars[n].bar = start + size * vf->abs_vfid; 1834 vf->bars[n].size = size; 1835 } 1836 } 1837 1838 static int bnx2x_ari_enabled(struct pci_dev *dev) 1839 { 1840 return dev->bus->self && dev->bus->self->ari_enabled; 1841 } 1842 1843 static void 1844 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) 1845 { 1846 int sb_id; 1847 u32 val; 1848 u8 fid, current_pf = 0; 1849 1850 /* IGU in normal mode - read CAM */ 1851 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 1852 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); 1853 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 1854 continue; 1855 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1856 if (fid & IGU_FID_ENCODE_IS_PF) 1857 current_pf = fid & IGU_FID_PF_NUM_MASK; 1858 else if (current_pf == BP_ABS_FUNC(bp)) 1859 bnx2x_vf_set_igu_info(bp, sb_id, 1860 (fid & IGU_FID_VF_NUM_MASK)); 1861 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1862 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 1863 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 1864 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 1865 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1866 } 1867 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); 1868 } 1869 1870 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1871 { 1872 if (bp->vfdb) { 1873 kfree(bp->vfdb->vfqs); 1874 kfree(bp->vfdb->vfs); 1875 kfree(bp->vfdb); 1876 } 1877 bp->vfdb = NULL; 1878 } 1879 1880 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1881 { 1882 int pos; 1883 struct pci_dev *dev = bp->pdev; 1884 1885 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 1886 if (!pos) { 1887 BNX2X_ERR("failed to find SRIOV capability in device\n"); 1888 return -ENODEV; 1889 } 1890 1891 iov->pos = pos; 1892 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); 1893 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 1894 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); 1895 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); 1896 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 1897 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 1898 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 1899 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 1900 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 1901 1902 return 0; 1903 } 1904 1905 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1906 { 1907 u32 val; 1908 1909 /* read the SRIOV capability structure 1910 * The fields can be read via configuration read or 1911 * directly from the device (starting at offset PCICFG_OFFSET) 1912 */ 1913 if (bnx2x_sriov_pci_cfg_info(bp, iov)) 1914 return -ENODEV; 1915 1916 /* get the number of SRIOV bars */ 1917 iov->nres = 0; 1918 1919 /* read the first_vfid */ 1920 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); 1921 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) 1922 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); 1923 1924 DP(BNX2X_MSG_IOV, 1925 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 1926 BP_FUNC(bp), 1927 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, 1928 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 1929 1930 return 0; 1931 } 1932 1933 /* must be called after PF bars are mapped */ 1934 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1935 int num_vfs_param) 1936 { 1937 int err, i; 1938 struct bnx2x_sriov *iov; 1939 struct pci_dev *dev = bp->pdev; 1940 1941 bp->vfdb = NULL; 1942 1943 /* verify is pf */ 1944 if (IS_VF(bp)) 1945 return 0; 1946 1947 /* verify sriov capability is present in configuration space */ 1948 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) 1949 return 0; 1950 1951 /* verify chip revision */ 1952 if (CHIP_IS_E1x(bp)) 1953 return 0; 1954 1955 /* check if SRIOV support is turned off */ 1956 if (!num_vfs_param) 1957 return 0; 1958 1959 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ 1960 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { 1961 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", 1962 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); 1963 return 0; 1964 } 1965 1966 /* SRIOV can be enabled only with MSIX */ 1967 if (int_mode_param == BNX2X_INT_MODE_MSI || 1968 int_mode_param == BNX2X_INT_MODE_INTX) { 1969 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); 1970 return 0; 1971 } 1972 1973 err = -EIO; 1974 /* verify ari is enabled */ 1975 if (!bnx2x_ari_enabled(bp->pdev)) { 1976 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n"); 1977 return 0; 1978 } 1979 1980 /* verify igu is in normal mode */ 1981 if (CHIP_INT_MODE_IS_BC(bp)) { 1982 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 1983 return 0; 1984 } 1985 1986 /* allocate the vfs database */ 1987 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); 1988 if (!bp->vfdb) { 1989 BNX2X_ERR("failed to allocate vf database\n"); 1990 err = -ENOMEM; 1991 goto failed; 1992 } 1993 1994 /* get the sriov info - Linux already collected all the pertinent 1995 * information, however the sriov structure is for the private use 1996 * of the pci module. Also we want this information regardless 1997 * of the hyper-visor. 1998 */ 1999 iov = &(bp->vfdb->sriov); 2000 err = bnx2x_sriov_info(bp, iov); 2001 if (err) 2002 goto failed; 2003 2004 /* SR-IOV capability was enabled but there are no VFs*/ 2005 if (iov->total == 0) 2006 goto failed; 2007 2008 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); 2009 2010 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n", 2011 num_vfs_param, iov->nr_virtfn); 2012 2013 /* allocate the vf array */ 2014 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 2015 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); 2016 if (!bp->vfdb->vfs) { 2017 BNX2X_ERR("failed to allocate vf array\n"); 2018 err = -ENOMEM; 2019 goto failed; 2020 } 2021 2022 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ 2023 for_each_vf(bp, i) { 2024 bnx2x_vf(bp, i, index) = i; 2025 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; 2026 bnx2x_vf(bp, i, state) = VF_FREE; 2027 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head)); 2028 mutex_init(&bnx2x_vf(bp, i, op_mutex)); 2029 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; 2030 } 2031 2032 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 2033 bnx2x_get_vf_igu_cam_info(bp); 2034 2035 /* allocate the queue arrays for all VFs */ 2036 bp->vfdb->vfqs = kzalloc( 2037 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), 2038 GFP_KERNEL); 2039 2040 DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs); 2041 2042 if (!bp->vfdb->vfqs) { 2043 BNX2X_ERR("failed to allocate vf queue array\n"); 2044 err = -ENOMEM; 2045 goto failed; 2046 } 2047 2048 return 0; 2049 failed: 2050 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 2051 __bnx2x_iov_free_vfdb(bp); 2052 return err; 2053 } 2054 2055 void bnx2x_iov_remove_one(struct bnx2x *bp) 2056 { 2057 /* if SRIOV is not enabled there's nothing to do */ 2058 if (!IS_SRIOV(bp)) 2059 return; 2060 2061 DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); 2062 pci_disable_sriov(bp->pdev); 2063 DP(BNX2X_MSG_IOV, "sriov disabled\n"); 2064 2065 /* free vf database */ 2066 __bnx2x_iov_free_vfdb(bp); 2067 } 2068 2069 void bnx2x_iov_free_mem(struct bnx2x *bp) 2070 { 2071 int i; 2072 2073 if (!IS_SRIOV(bp)) 2074 return; 2075 2076 /* free vfs hw contexts */ 2077 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2078 struct hw_dma *cxt = &bp->vfdb->context[i]; 2079 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); 2080 } 2081 2082 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, 2083 BP_VFDB(bp)->sp_dma.mapping, 2084 BP_VFDB(bp)->sp_dma.size); 2085 2086 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, 2087 BP_VF_MBX_DMA(bp)->mapping, 2088 BP_VF_MBX_DMA(bp)->size); 2089 2090 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, 2091 BP_VF_BULLETIN_DMA(bp)->mapping, 2092 BP_VF_BULLETIN_DMA(bp)->size); 2093 } 2094 2095 int bnx2x_iov_alloc_mem(struct bnx2x *bp) 2096 { 2097 size_t tot_size; 2098 int i, rc = 0; 2099 2100 if (!IS_SRIOV(bp)) 2101 return rc; 2102 2103 /* allocate vfs hw contexts */ 2104 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * 2105 BNX2X_CIDS_PER_VF * sizeof(union cdu_context); 2106 2107 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2108 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); 2109 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); 2110 2111 if (cxt->size) { 2112 BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size); 2113 } else { 2114 cxt->addr = NULL; 2115 cxt->mapping = 0; 2116 } 2117 tot_size -= cxt->size; 2118 } 2119 2120 /* allocate vfs ramrods dma memory - client_init and set_mac */ 2121 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); 2122 BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping, 2123 tot_size); 2124 BP_VFDB(bp)->sp_dma.size = tot_size; 2125 2126 /* allocate mailboxes */ 2127 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; 2128 BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping, 2129 tot_size); 2130 BP_VF_MBX_DMA(bp)->size = tot_size; 2131 2132 /* allocate local bulletin boards */ 2133 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; 2134 BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr, 2135 &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size); 2136 BP_VF_BULLETIN_DMA(bp)->size = tot_size; 2137 2138 return 0; 2139 2140 alloc_mem_err: 2141 return -ENOMEM; 2142 } 2143 2144 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 2145 struct bnx2x_vf_queue *q) 2146 { 2147 u8 cl_id = vfq_cl_id(vf, q); 2148 u8 func_id = FW_VF_HANDLE(vf->abs_vfid); 2149 unsigned long q_type = 0; 2150 2151 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 2152 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 2153 2154 /* Queue State object */ 2155 bnx2x_init_queue_obj(bp, &q->sp_obj, 2156 cl_id, &q->cid, 1, func_id, 2157 bnx2x_vf_sp(bp, vf, q_data), 2158 bnx2x_vf_sp_map(bp, vf, q_data), 2159 q_type); 2160 2161 DP(BNX2X_MSG_IOV, 2162 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", 2163 vf->abs_vfid, q->sp_obj.func_id, q->cid); 2164 } 2165 2166 /* called by bnx2x_nic_load */ 2167 int bnx2x_iov_nic_init(struct bnx2x *bp) 2168 { 2169 int vfid; 2170 2171 if (!IS_SRIOV(bp)) { 2172 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 2173 return 0; 2174 } 2175 2176 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); 2177 2178 /* let FLR complete ... */ 2179 msleep(100); 2180 2181 /* initialize vf database */ 2182 for_each_vf(bp, vfid) { 2183 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 2184 2185 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * 2186 BNX2X_CIDS_PER_VF; 2187 2188 union cdu_context *base_cxt = (union cdu_context *) 2189 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2190 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2191 2192 DP(BNX2X_MSG_IOV, 2193 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", 2194 vf->abs_vfid, vf_sb_count(vf), base_vf_cid, 2195 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 2196 2197 /* init statically provisioned resources */ 2198 bnx2x_iov_static_resc(bp, vf); 2199 2200 /* queues are initialized during VF-ACQUIRE */ 2201 2202 /* reserve the vf vlan credit */ 2203 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf)); 2204 2205 vf->filter_state = 0; 2206 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 2207 2208 /* init mcast object - This object will be re-initialized 2209 * during VF-ACQUIRE with the proper cl_id and cid. 2210 * It needs to be initialized here so that it can be safely 2211 * handled by a subsequent FLR flow. 2212 */ 2213 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, 2214 0xFF, 0xFF, 0xFF, 2215 bnx2x_vf_sp(bp, vf, mcast_rdata), 2216 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 2217 BNX2X_FILTER_MCAST_PENDING, 2218 &vf->filter_state, 2219 BNX2X_OBJ_TYPE_RX_TX); 2220 2221 /* set the mailbox message addresses */ 2222 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) 2223 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * 2224 MBX_MSG_ALIGNED_SIZE); 2225 2226 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + 2227 vfid * MBX_MSG_ALIGNED_SIZE; 2228 2229 /* Enable vf mailbox */ 2230 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 2231 } 2232 2233 /* Final VF init */ 2234 for_each_vf(bp, vfid) { 2235 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 2236 2237 /* fill in the BDF and bars */ 2238 vf->bus = bnx2x_vf_bus(bp, vfid); 2239 vf->devfn = bnx2x_vf_devfn(bp, vfid); 2240 bnx2x_vf_set_bars(bp, vf); 2241 2242 DP(BNX2X_MSG_IOV, 2243 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", 2244 vf->abs_vfid, vf->bus, vf->devfn, 2245 (unsigned)vf->bars[0].bar, vf->bars[0].size, 2246 (unsigned)vf->bars[1].bar, vf->bars[1].size, 2247 (unsigned)vf->bars[2].bar, vf->bars[2].size); 2248 } 2249 2250 return 0; 2251 } 2252 2253 /* called by bnx2x_chip_cleanup */ 2254 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) 2255 { 2256 int i; 2257 2258 if (!IS_SRIOV(bp)) 2259 return 0; 2260 2261 /* release all the VFs */ 2262 for_each_vf(bp, i) 2263 bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */ 2264 2265 return 0; 2266 } 2267 2268 /* called by bnx2x_init_hw_func, returns the next ilt line */ 2269 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) 2270 { 2271 int i; 2272 struct bnx2x_ilt *ilt = BP_ILT(bp); 2273 2274 if (!IS_SRIOV(bp)) 2275 return line; 2276 2277 /* set vfs ilt lines */ 2278 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2279 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); 2280 2281 ilt->lines[line+i].page = hw_cxt->addr; 2282 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 2283 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 2284 } 2285 return line + i; 2286 } 2287 2288 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) 2289 { 2290 return ((cid >= BNX2X_FIRST_VF_CID) && 2291 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); 2292 } 2293 2294 static 2295 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, 2296 struct bnx2x_vf_queue *vfq, 2297 union event_ring_elem *elem) 2298 { 2299 unsigned long ramrod_flags = 0; 2300 int rc = 0; 2301 2302 /* Always push next commands out, don't wait here */ 2303 set_bit(RAMROD_CONT, &ramrod_flags); 2304 2305 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 2306 case BNX2X_FILTER_MAC_PENDING: 2307 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, 2308 &ramrod_flags); 2309 break; 2310 case BNX2X_FILTER_VLAN_PENDING: 2311 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, 2312 &ramrod_flags); 2313 break; 2314 default: 2315 BNX2X_ERR("Unsupported classification command: %d\n", 2316 elem->message.data.eth_event.echo); 2317 return; 2318 } 2319 if (rc < 0) 2320 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 2321 else if (rc > 0) 2322 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); 2323 } 2324 2325 static 2326 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, 2327 struct bnx2x_virtf *vf) 2328 { 2329 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 2330 int rc; 2331 2332 rparam.mcast_obj = &vf->mcast_obj; 2333 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); 2334 2335 /* If there are pending mcast commands - send them */ 2336 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { 2337 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 2338 if (rc < 0) 2339 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 2340 rc); 2341 } 2342 } 2343 2344 static 2345 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, 2346 struct bnx2x_virtf *vf) 2347 { 2348 smp_mb__before_clear_bit(); 2349 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 2350 smp_mb__after_clear_bit(); 2351 } 2352 2353 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) 2354 { 2355 struct bnx2x_virtf *vf; 2356 int qidx = 0, abs_vfid; 2357 u8 opcode; 2358 u16 cid = 0xffff; 2359 2360 if (!IS_SRIOV(bp)) 2361 return 1; 2362 2363 /* first get the cid - the only events we handle here are cfc-delete 2364 * and set-mac completion 2365 */ 2366 opcode = elem->message.opcode; 2367 2368 switch (opcode) { 2369 case EVENT_RING_OPCODE_CFC_DEL: 2370 cid = SW_CID((__force __le32) 2371 elem->message.data.cfc_del_event.cid); 2372 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); 2373 break; 2374 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 2375 case EVENT_RING_OPCODE_MULTICAST_RULES: 2376 case EVENT_RING_OPCODE_FILTERS_RULES: 2377 cid = (elem->message.data.eth_event.echo & 2378 BNX2X_SWCID_MASK); 2379 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 2380 break; 2381 case EVENT_RING_OPCODE_VF_FLR: 2382 abs_vfid = elem->message.data.vf_flr_event.vf_id; 2383 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", 2384 abs_vfid); 2385 goto get_vf; 2386 case EVENT_RING_OPCODE_MALICIOUS_VF: 2387 abs_vfid = elem->message.data.malicious_vf_event.vf_id; 2388 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n", 2389 abs_vfid, elem->message.data.malicious_vf_event.err_id); 2390 goto get_vf; 2391 default: 2392 return 1; 2393 } 2394 2395 /* check if the cid is the VF range */ 2396 if (!bnx2x_iov_is_vf_cid(bp, cid)) { 2397 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); 2398 return 1; 2399 } 2400 2401 /* extract vf and rxq index from vf_cid - relies on the following: 2402 * 1. vfid on cid reflects the true abs_vfid 2403 * 2. The max number of VFs (per path) is 64 2404 */ 2405 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); 2406 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2407 get_vf: 2408 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 2409 2410 if (!vf) { 2411 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", 2412 cid, abs_vfid); 2413 return 0; 2414 } 2415 2416 switch (opcode) { 2417 case EVENT_RING_OPCODE_CFC_DEL: 2418 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", 2419 vf->abs_vfid, qidx); 2420 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, 2421 &vfq_get(vf, 2422 qidx)->sp_obj, 2423 BNX2X_Q_CMD_CFC_DEL); 2424 break; 2425 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 2426 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", 2427 vf->abs_vfid, qidx); 2428 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); 2429 break; 2430 case EVENT_RING_OPCODE_MULTICAST_RULES: 2431 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", 2432 vf->abs_vfid, qidx); 2433 bnx2x_vf_handle_mcast_eqe(bp, vf); 2434 break; 2435 case EVENT_RING_OPCODE_FILTERS_RULES: 2436 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", 2437 vf->abs_vfid, qidx); 2438 bnx2x_vf_handle_filters_eqe(bp, vf); 2439 break; 2440 case EVENT_RING_OPCODE_VF_FLR: 2441 DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n", 2442 vf->abs_vfid); 2443 /* Do nothing for now */ 2444 break; 2445 case EVENT_RING_OPCODE_MALICIOUS_VF: 2446 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n", 2447 abs_vfid, elem->message.data.malicious_vf_event.err_id); 2448 /* Do nothing for now */ 2449 break; 2450 } 2451 /* SRIOV: reschedule any 'in_progress' operations */ 2452 bnx2x_iov_sp_event(bp, cid, false); 2453 2454 return 0; 2455 } 2456 2457 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) 2458 { 2459 /* extract the vf from vf_cid - relies on the following: 2460 * 1. vfid on cid reflects the true abs_vfid 2461 * 2. The max number of VFs (per path) is 64 2462 */ 2463 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2464 return bnx2x_vf_by_abs_fid(bp, abs_vfid); 2465 } 2466 2467 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 2468 struct bnx2x_queue_sp_obj **q_obj) 2469 { 2470 struct bnx2x_virtf *vf; 2471 2472 if (!IS_SRIOV(bp)) 2473 return; 2474 2475 vf = bnx2x_vf_by_cid(bp, vf_cid); 2476 2477 if (vf) { 2478 /* extract queue index from vf_cid - relies on the following: 2479 * 1. vfid on cid reflects the true abs_vfid 2480 * 2. The max number of VFs (per path) is 64 2481 */ 2482 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); 2483 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); 2484 } else { 2485 BNX2X_ERR("No vf matching cid %d\n", vf_cid); 2486 } 2487 } 2488 2489 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work) 2490 { 2491 struct bnx2x_virtf *vf; 2492 2493 /* check if the cid is the VF range */ 2494 if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid)) 2495 return; 2496 2497 vf = bnx2x_vf_by_cid(bp, vf_cid); 2498 if (vf) { 2499 /* set in_progress flag */ 2500 atomic_set(&vf->op_in_progress, 1); 2501 if (queue_work) 2502 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 2503 } 2504 } 2505 2506 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) 2507 { 2508 int i; 2509 int first_queue_query_index, num_queues_req; 2510 dma_addr_t cur_data_offset; 2511 struct stats_query_entry *cur_query_entry; 2512 u8 stats_count = 0; 2513 bool is_fcoe = false; 2514 2515 if (!IS_SRIOV(bp)) 2516 return; 2517 2518 if (!NO_FCOE(bp)) 2519 is_fcoe = true; 2520 2521 /* fcoe adds one global request and one queue request */ 2522 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; 2523 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 2524 (is_fcoe ? 0 : 1); 2525 2526 DP(BNX2X_MSG_IOV, 2527 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", 2528 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, 2529 first_queue_query_index + num_queues_req); 2530 2531 cur_data_offset = bp->fw_stats_data_mapping + 2532 offsetof(struct bnx2x_fw_stats_data, queue_stats) + 2533 num_queues_req * sizeof(struct per_queue_stats); 2534 2535 cur_query_entry = &bp->fw_stats_req-> 2536 query[first_queue_query_index + num_queues_req]; 2537 2538 for_each_vf(bp, i) { 2539 int j; 2540 struct bnx2x_virtf *vf = BP_VF(bp, i); 2541 2542 if (vf->state != VF_ENABLED) { 2543 DP(BNX2X_MSG_IOV, 2544 "vf %d not enabled so no stats for it\n", 2545 vf->abs_vfid); 2546 continue; 2547 } 2548 2549 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); 2550 for_each_vfq(vf, j) { 2551 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 2552 2553 dma_addr_t q_stats_addr = 2554 vf->fw_stat_map + j * vf->stats_stride; 2555 2556 /* collect stats fro active queues only */ 2557 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 2558 BNX2X_Q_LOGICAL_STATE_STOPPED) 2559 continue; 2560 2561 /* create stats query entry for this queue */ 2562 cur_query_entry->kind = STATS_TYPE_QUEUE; 2563 cur_query_entry->index = vfq_stat_id(vf, rxq); 2564 cur_query_entry->funcID = 2565 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 2566 cur_query_entry->address.hi = 2567 cpu_to_le32(U64_HI(q_stats_addr)); 2568 cur_query_entry->address.lo = 2569 cpu_to_le32(U64_LO(q_stats_addr)); 2570 DP(BNX2X_MSG_IOV, 2571 "added address %x %x for vf %d queue %d client %d\n", 2572 cur_query_entry->address.hi, 2573 cur_query_entry->address.lo, cur_query_entry->funcID, 2574 j, cur_query_entry->index); 2575 cur_query_entry++; 2576 cur_data_offset += sizeof(struct per_queue_stats); 2577 stats_count++; 2578 2579 /* all stats are coalesced to the leading queue */ 2580 if (vf->cfg_flags & VF_CFG_STATS_COALESCE) 2581 break; 2582 } 2583 } 2584 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 2585 } 2586 2587 void bnx2x_iov_sp_task(struct bnx2x *bp) 2588 { 2589 int i; 2590 2591 if (!IS_SRIOV(bp)) 2592 return; 2593 /* Iterate over all VFs and invoke state transition for VFs with 2594 * 'in-progress' slow-path operations 2595 */ 2596 DP(BNX2X_MSG_IOV, "searching for pending vf operations\n"); 2597 for_each_vf(bp, i) { 2598 struct bnx2x_virtf *vf = BP_VF(bp, i); 2599 2600 if (!vf) { 2601 BNX2X_ERR("VF was null! skipping...\n"); 2602 continue; 2603 } 2604 2605 if (!list_empty(&vf->op_list_head) && 2606 atomic_read(&vf->op_in_progress)) { 2607 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); 2608 bnx2x_vfop_cur(bp, vf)->transition(bp, vf); 2609 } 2610 } 2611 } 2612 2613 static inline 2614 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) 2615 { 2616 int i; 2617 struct bnx2x_virtf *vf = NULL; 2618 2619 for_each_vf(bp, i) { 2620 vf = BP_VF(bp, i); 2621 if (stat_id >= vf->igu_base_id && 2622 stat_id < vf->igu_base_id + vf_sb_count(vf)) 2623 break; 2624 } 2625 return vf; 2626 } 2627 2628 /* VF API helpers */ 2629 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, 2630 u8 enable) 2631 { 2632 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; 2633 u32 val = enable ? (abs_vfid | (1 << 6)) : 0; 2634 2635 REG_WR(bp, reg, val); 2636 } 2637 2638 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) 2639 { 2640 int i; 2641 2642 for_each_vfq(vf, i) 2643 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2644 vfq_qzone_id(vf, vfq_get(vf, i)), false); 2645 } 2646 2647 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) 2648 { 2649 u32 val; 2650 2651 /* clear the VF configuration - pretend */ 2652 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 2653 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 2654 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | 2655 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK); 2656 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 2657 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2658 } 2659 2660 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) 2661 { 2662 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), 2663 BNX2X_VF_MAX_QUEUES); 2664 } 2665 2666 static 2667 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, 2668 struct vf_pf_resc_request *req_resc) 2669 { 2670 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2671 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2672 2673 return ((req_resc->num_rxqs <= rxq_cnt) && 2674 (req_resc->num_txqs <= txq_cnt) && 2675 (req_resc->num_sbs <= vf_sb_count(vf)) && 2676 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 2677 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); 2678 } 2679 2680 /* CORE VF API */ 2681 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 2682 struct vf_pf_resc_request *resc) 2683 { 2684 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * 2685 BNX2X_CIDS_PER_VF; 2686 2687 union cdu_context *base_cxt = (union cdu_context *) 2688 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2689 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2690 int i; 2691 2692 /* if state is 'acquired' the VF was not released or FLR'd, in 2693 * this case the returned resources match the acquired already 2694 * acquired resources. Verify that the requested numbers do 2695 * not exceed the already acquired numbers. 2696 */ 2697 if (vf->state == VF_ACQUIRED) { 2698 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", 2699 vf->abs_vfid); 2700 2701 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2702 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", 2703 vf->abs_vfid); 2704 return -EINVAL; 2705 } 2706 return 0; 2707 } 2708 2709 /* Otherwise vf state must be 'free' or 'reset' */ 2710 if (vf->state != VF_FREE && vf->state != VF_RESET) { 2711 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", 2712 vf->abs_vfid, vf->state); 2713 return -EINVAL; 2714 } 2715 2716 /* static allocation: 2717 * the global maximum number are fixed per VF. Fail the request if 2718 * requested number exceed these globals 2719 */ 2720 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2721 DP(BNX2X_MSG_IOV, 2722 "cannot fulfill vf resource request. Placing maximal available values in response\n"); 2723 /* set the max resource in the vf */ 2724 return -ENOMEM; 2725 } 2726 2727 /* Set resources counters - 0 request means max available */ 2728 vf_sb_count(vf) = resc->num_sbs; 2729 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2730 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2731 if (resc->num_mac_filters) 2732 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 2733 if (resc->num_vlan_filters) 2734 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; 2735 2736 DP(BNX2X_MSG_IOV, 2737 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2738 vf_sb_count(vf), vf_rxq_count(vf), 2739 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2740 vf_vlan_rules_cnt(vf)); 2741 2742 /* Initialize the queues */ 2743 if (!vf->vfqs) { 2744 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); 2745 return -EINVAL; 2746 } 2747 2748 for_each_vfq(vf, i) { 2749 struct bnx2x_vf_queue *q = vfq_get(vf, i); 2750 2751 if (!q) { 2752 BNX2X_ERR("q number %d was not allocated\n", i); 2753 return -EINVAL; 2754 } 2755 2756 q->index = i; 2757 q->cxt = &((base_cxt + i)->eth); 2758 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; 2759 2760 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", 2761 vf->abs_vfid, i, q->index, q->cid, q->cxt); 2762 2763 /* init SP objects */ 2764 bnx2x_vfq_init(bp, vf, q); 2765 } 2766 vf->state = VF_ACQUIRED; 2767 return 0; 2768 } 2769 2770 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) 2771 { 2772 struct bnx2x_func_init_params func_init = {0}; 2773 u16 flags = 0; 2774 int i; 2775 2776 /* the sb resources are initialized at this point, do the 2777 * FW/HW initializations 2778 */ 2779 for_each_vf_sb(vf, i) 2780 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, 2781 vf_igu_sb(vf, i), vf_igu_sb(vf, i)); 2782 2783 /* Sanity checks */ 2784 if (vf->state != VF_ACQUIRED) { 2785 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", 2786 vf->abs_vfid, vf->state); 2787 return -EINVAL; 2788 } 2789 2790 /* let FLR complete ... */ 2791 msleep(100); 2792 2793 /* FLR cleanup epilogue */ 2794 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) 2795 return -EBUSY; 2796 2797 /* reset IGU VF statistics: MSIX */ 2798 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); 2799 2800 /* vf init */ 2801 if (vf->cfg_flags & VF_CFG_STATS) 2802 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); 2803 2804 if (vf->cfg_flags & VF_CFG_TPA) 2805 flags |= FUNC_FLG_TPA; 2806 2807 if (is_vf_multi(vf)) 2808 flags |= FUNC_FLG_RSS; 2809 2810 /* function setup */ 2811 func_init.func_flgs = flags; 2812 func_init.pf_id = BP_FUNC(bp); 2813 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); 2814 func_init.fw_stat_map = vf->fw_stat_map; 2815 func_init.spq_map = vf->spq_map; 2816 func_init.spq_prod = 0; 2817 bnx2x_func_init(bp, &func_init); 2818 2819 /* Enable the vf */ 2820 bnx2x_vf_enable_access(bp, vf->abs_vfid); 2821 bnx2x_vf_enable_traffic(bp, vf); 2822 2823 /* queue protection table */ 2824 for_each_vfq(vf, i) 2825 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2826 vfq_qzone_id(vf, vfq_get(vf, i)), true); 2827 2828 vf->state = VF_ENABLED; 2829 2830 /* update vf bulletin board */ 2831 bnx2x_post_vf_bulletin(bp, vf->index); 2832 2833 return 0; 2834 } 2835 2836 /* VFOP close (teardown the queues, delete mcasts and close HW) */ 2837 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) 2838 { 2839 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2840 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; 2841 enum bnx2x_vfop_close_state state = vfop->state; 2842 struct bnx2x_vfop_cmd cmd = { 2843 .done = bnx2x_vfop_close, 2844 .block = false, 2845 }; 2846 2847 if (vfop->rc < 0) 2848 goto op_err; 2849 2850 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 2851 2852 switch (state) { 2853 case BNX2X_VFOP_CLOSE_QUEUES: 2854 2855 if (++(qx->qid) < vf_rxq_count(vf)) { 2856 vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid); 2857 if (vfop->rc) 2858 goto op_err; 2859 return; 2860 } 2861 2862 /* remove multicasts */ 2863 vfop->state = BNX2X_VFOP_CLOSE_HW; 2864 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false); 2865 if (vfop->rc) 2866 goto op_err; 2867 return; 2868 2869 case BNX2X_VFOP_CLOSE_HW: 2870 2871 /* disable the interrupts */ 2872 DP(BNX2X_MSG_IOV, "disabling igu\n"); 2873 bnx2x_vf_igu_disable(bp, vf); 2874 2875 /* disable the VF */ 2876 DP(BNX2X_MSG_IOV, "clearing qtbl\n"); 2877 bnx2x_vf_clr_qtbl(bp, vf); 2878 2879 goto op_done; 2880 default: 2881 bnx2x_vfop_default(state); 2882 } 2883 op_err: 2884 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); 2885 op_done: 2886 vf->state = VF_ACQUIRED; 2887 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2888 bnx2x_vfop_end(bp, vf, vfop); 2889 } 2890 2891 int bnx2x_vfop_close_cmd(struct bnx2x *bp, 2892 struct bnx2x_virtf *vf, 2893 struct bnx2x_vfop_cmd *cmd) 2894 { 2895 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 2896 if (vfop) { 2897 vfop->args.qx.qid = -1; /* loop */ 2898 bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES, 2899 bnx2x_vfop_close, cmd->done); 2900 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close, 2901 cmd->block); 2902 } 2903 return -ENOMEM; 2904 } 2905 2906 /* VF release can be called either: 1. The VF was acquired but 2907 * not enabled 2. the vf was enabled or in the process of being 2908 * enabled 2909 */ 2910 static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) 2911 { 2912 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2913 struct bnx2x_vfop_cmd cmd = { 2914 .done = bnx2x_vfop_release, 2915 .block = false, 2916 }; 2917 2918 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2919 2920 if (vfop->rc < 0) 2921 goto op_err; 2922 2923 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, 2924 vf->state == VF_FREE ? "Free" : 2925 vf->state == VF_ACQUIRED ? "Acquired" : 2926 vf->state == VF_ENABLED ? "Enabled" : 2927 vf->state == VF_RESET ? "Reset" : 2928 "Unknown"); 2929 2930 switch (vf->state) { 2931 case VF_ENABLED: 2932 vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); 2933 if (vfop->rc) 2934 goto op_err; 2935 return; 2936 2937 case VF_ACQUIRED: 2938 DP(BNX2X_MSG_IOV, "about to free resources\n"); 2939 bnx2x_vf_free_resc(bp, vf); 2940 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2941 goto op_done; 2942 2943 case VF_FREE: 2944 case VF_RESET: 2945 /* do nothing */ 2946 goto op_done; 2947 default: 2948 bnx2x_vfop_default(vf->state); 2949 } 2950 op_err: 2951 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc); 2952 op_done: 2953 bnx2x_vfop_end(bp, vf, vfop); 2954 } 2955 2956 static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf) 2957 { 2958 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2959 enum bnx2x_vfop_rss_state state; 2960 2961 if (!vfop) { 2962 BNX2X_ERR("vfop was null\n"); 2963 return; 2964 } 2965 2966 state = vfop->state; 2967 bnx2x_vfop_reset_wq(vf); 2968 2969 if (vfop->rc < 0) 2970 goto op_err; 2971 2972 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 2973 2974 switch (state) { 2975 case BNX2X_VFOP_RSS_CONFIG: 2976 /* next state */ 2977 vfop->state = BNX2X_VFOP_RSS_DONE; 2978 bnx2x_config_rss(bp, &vfop->op_p->rss); 2979 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 2980 op_err: 2981 BNX2X_ERR("RSS error: rc %d\n", vfop->rc); 2982 op_done: 2983 case BNX2X_VFOP_RSS_DONE: 2984 bnx2x_vfop_end(bp, vf, vfop); 2985 return; 2986 default: 2987 bnx2x_vfop_default(state); 2988 } 2989 op_pending: 2990 return; 2991 } 2992 2993 int bnx2x_vfop_release_cmd(struct bnx2x *bp, 2994 struct bnx2x_virtf *vf, 2995 struct bnx2x_vfop_cmd *cmd) 2996 { 2997 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 2998 if (vfop) { 2999 bnx2x_vfop_opset(-1, /* use vf->state */ 3000 bnx2x_vfop_release, cmd->done); 3001 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release, 3002 cmd->block); 3003 } 3004 return -ENOMEM; 3005 } 3006 3007 int bnx2x_vfop_rss_cmd(struct bnx2x *bp, 3008 struct bnx2x_virtf *vf, 3009 struct bnx2x_vfop_cmd *cmd) 3010 { 3011 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 3012 3013 if (vfop) { 3014 bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss, 3015 cmd->done); 3016 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss, 3017 cmd->block); 3018 } 3019 return -ENOMEM; 3020 } 3021 3022 /* VF release ~ VF close + VF release-resources 3023 * Release is the ultimate SW shutdown and is called whenever an 3024 * irrecoverable error is encountered. 3025 */ 3026 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) 3027 { 3028 struct bnx2x_vfop_cmd cmd = { 3029 .done = NULL, 3030 .block = block, 3031 }; 3032 int rc; 3033 3034 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); 3035 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 3036 3037 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); 3038 if (rc) 3039 WARN(rc, 3040 "VF[%d] Failed to allocate resources for release op- rc=%d\n", 3041 vf->abs_vfid, rc); 3042 } 3043 3044 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, 3045 struct bnx2x_virtf *vf, u32 *sbdf) 3046 { 3047 *sbdf = vf->devfn | (vf->bus << 8); 3048 } 3049 3050 static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf, 3051 struct bnx2x_vf_bar_info *bar_info) 3052 { 3053 int n; 3054 3055 bar_info->nr_bars = bp->vfdb->sriov.nres; 3056 for (n = 0; n < bar_info->nr_bars; n++) 3057 bar_info->bars[n] = vf->bars[n]; 3058 } 3059 3060 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3061 enum channel_tlvs tlv) 3062 { 3063 /* we don't lock the channel for unsupported tlvs */ 3064 if (!bnx2x_tlv_supported(tlv)) { 3065 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n"); 3066 return; 3067 } 3068 3069 /* lock the channel */ 3070 mutex_lock(&vf->op_mutex); 3071 3072 /* record the locking op */ 3073 vf->op_current = tlv; 3074 3075 /* log the lock */ 3076 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", 3077 vf->abs_vfid, tlv); 3078 } 3079 3080 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3081 enum channel_tlvs expected_tlv) 3082 { 3083 enum channel_tlvs current_tlv; 3084 3085 if (!vf) { 3086 BNX2X_ERR("VF was %p\n", vf); 3087 return; 3088 } 3089 3090 current_tlv = vf->op_current; 3091 3092 /* we don't unlock the channel for unsupported tlvs */ 3093 if (!bnx2x_tlv_supported(expected_tlv)) 3094 return; 3095 3096 WARN(expected_tlv != vf->op_current, 3097 "lock mismatch: expected %d found %d", expected_tlv, 3098 vf->op_current); 3099 3100 /* record the locking op */ 3101 vf->op_current = CHANNEL_TLV_NONE; 3102 3103 /* lock the channel */ 3104 mutex_unlock(&vf->op_mutex); 3105 3106 /* log the unlock */ 3107 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 3108 vf->abs_vfid, vf->op_current); 3109 } 3110 3111 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) 3112 { 3113 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 3114 3115 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 3116 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3117 3118 /* HW channel is only operational when PF is up */ 3119 if (bp->state != BNX2X_STATE_OPEN) { 3120 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n"); 3121 return -EINVAL; 3122 } 3123 3124 /* we are always bound by the total_vfs in the configuration space */ 3125 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { 3126 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n", 3127 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3128 num_vfs_param = BNX2X_NR_VIRTFN(bp); 3129 } 3130 3131 bp->requested_nr_virtfn = num_vfs_param; 3132 if (num_vfs_param == 0) { 3133 pci_disable_sriov(dev); 3134 return 0; 3135 } else { 3136 return bnx2x_enable_sriov(bp); 3137 } 3138 } 3139 #define IGU_ENTRY_SIZE 4 3140 3141 int bnx2x_enable_sriov(struct bnx2x *bp) 3142 { 3143 int rc = 0, req_vfs = bp->requested_nr_virtfn; 3144 int vf_idx, sb_idx, vfq_idx, qcount, first_vf; 3145 u32 igu_entry, address; 3146 u16 num_vf_queues; 3147 3148 if (req_vfs == 0) 3149 return 0; 3150 3151 first_vf = bp->vfdb->sriov.first_vf_in_pf; 3152 3153 /* statically distribute vf sb pool between VFs */ 3154 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES, 3155 BP_VFDB(bp)->vf_sbs_pool / req_vfs); 3156 3157 /* zero previous values learned from igu cam */ 3158 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) { 3159 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 3160 3161 vf->sb_count = 0; 3162 vf_sb_count(BP_VF(bp, vf_idx)) = 0; 3163 } 3164 bp->vfdb->vf_sbs_pool = 0; 3165 3166 /* prepare IGU cam */ 3167 sb_idx = BP_VFDB(bp)->first_vf_igu_entry; 3168 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE; 3169 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 3170 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) { 3171 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT | 3172 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT | 3173 IGU_REG_MAPPING_MEMORY_VALID; 3174 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n", 3175 sb_idx, vf_idx); 3176 REG_WR(bp, address, igu_entry); 3177 sb_idx++; 3178 address += IGU_ENTRY_SIZE; 3179 } 3180 } 3181 3182 /* Reinitialize vf database according to igu cam */ 3183 bnx2x_get_vf_igu_cam_info(bp); 3184 3185 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n", 3186 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); 3187 3188 qcount = 0; 3189 for_each_vf(bp, vf_idx) { 3190 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 3191 3192 /* set local queue arrays */ 3193 vf->vfqs = &bp->vfdb->vfqs[qcount]; 3194 qcount += vf_sb_count(vf); 3195 } 3196 3197 /* prepare msix vectors in VF configuration space */ 3198 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 3199 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 3200 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 3201 num_vf_queues); 3202 } 3203 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 3204 3205 /* enable sriov. This will probe all the VFs, and consequentially cause 3206 * the "acquire" messages to appear on the VF PF channel. 3207 */ 3208 DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); 3209 pci_disable_sriov(bp->pdev); 3210 rc = pci_enable_sriov(bp->pdev, req_vfs); 3211 if (rc) { 3212 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 3213 return rc; 3214 } 3215 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); 3216 return req_vfs; 3217 } 3218 3219 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) 3220 { 3221 int vfidx; 3222 struct pf_vf_bulletin_content *bulletin; 3223 3224 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); 3225 for_each_vf(bp, vfidx) { 3226 bulletin = BP_VF_BULLETIN(bp, vfidx); 3227 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) 3228 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); 3229 } 3230 } 3231 3232 void bnx2x_disable_sriov(struct bnx2x *bp) 3233 { 3234 pci_disable_sriov(bp->pdev); 3235 } 3236 3237 int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf, 3238 struct pf_vf_bulletin_content **bulletin) 3239 { 3240 if (bp->state != BNX2X_STATE_OPEN) { 3241 BNX2X_ERR("vf ndo called though PF is down\n"); 3242 return -EINVAL; 3243 } 3244 3245 if (!IS_SRIOV(bp)) { 3246 BNX2X_ERR("vf ndo called though sriov is disabled\n"); 3247 return -EINVAL; 3248 } 3249 3250 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { 3251 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n", 3252 vfidx, BNX2X_NR_VIRTFN(bp)); 3253 return -EINVAL; 3254 } 3255 3256 /* init members */ 3257 *vf = BP_VF(bp, vfidx); 3258 *bulletin = BP_VF_BULLETIN(bp, vfidx); 3259 3260 if (!*vf) { 3261 BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n", 3262 vfidx); 3263 return -EINVAL; 3264 } 3265 3266 if (!(*vf)->vfqs) { 3267 BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n", 3268 vfidx); 3269 return -EINVAL; 3270 } 3271 3272 if (!*bulletin) { 3273 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n", 3274 vfidx); 3275 return -EINVAL; 3276 } 3277 3278 return 0; 3279 } 3280 3281 int bnx2x_get_vf_config(struct net_device *dev, int vfidx, 3282 struct ifla_vf_info *ivi) 3283 { 3284 struct bnx2x *bp = netdev_priv(dev); 3285 struct bnx2x_virtf *vf = NULL; 3286 struct pf_vf_bulletin_content *bulletin = NULL; 3287 struct bnx2x_vlan_mac_obj *mac_obj; 3288 struct bnx2x_vlan_mac_obj *vlan_obj; 3289 int rc; 3290 3291 /* sanity and init */ 3292 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3293 if (rc) 3294 return rc; 3295 mac_obj = &bnx2x_leading_vfq(vf, mac_obj); 3296 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 3297 if (!mac_obj || !vlan_obj) { 3298 BNX2X_ERR("VF partially initialized\n"); 3299 return -EINVAL; 3300 } 3301 3302 ivi->vf = vfidx; 3303 ivi->qos = 0; 3304 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */ 3305 ivi->spoofchk = 1; /*always enabled */ 3306 if (vf->state == VF_ENABLED) { 3307 /* mac and vlan are in vlan_mac objects */ 3308 if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj))) 3309 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 3310 0, ETH_ALEN); 3311 if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj))) 3312 vlan_obj->get_n_elements(bp, vlan_obj, 1, 3313 (u8 *)&ivi->vlan, 0, 3314 VLAN_HLEN); 3315 } else { 3316 /* mac */ 3317 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 3318 /* mac configured by ndo so its in bulletin board */ 3319 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); 3320 else 3321 /* function has not been loaded yet. Show mac as 0s */ 3322 memset(&ivi->mac, 0, ETH_ALEN); 3323 3324 /* vlan */ 3325 if (bulletin->valid_bitmap & (1 << VLAN_VALID)) 3326 /* vlan configured by ndo so its in bulletin board */ 3327 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); 3328 else 3329 /* function has not been loaded yet. Show vlans as 0s */ 3330 memset(&ivi->vlan, 0, VLAN_HLEN); 3331 } 3332 3333 return 0; 3334 } 3335 3336 /* New mac for VF. Consider these cases: 3337 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and 3338 * supply at acquire. 3339 * 2. VF has already been acquired but has not yet initialized - store in local 3340 * bulletin board. mac will be posted on VF bulletin board after VF init. VF 3341 * will configure this mac when it is ready. 3342 * 3. VF has already initialized but has not yet setup a queue - post the new 3343 * mac on VF's bulletin board right now. VF will configure this mac when it 3344 * is ready. 3345 * 4. VF has already set a queue - delete any macs already configured for this 3346 * queue and manually config the new mac. 3347 * In any event, once this function has been called refuse any attempts by the 3348 * VF to configure any mac for itself except for this mac. In case of a race 3349 * where the VF fails to see the new post on its bulletin board before sending a 3350 * mac configuration request, the PF will simply fail the request and VF can try 3351 * again after consulting its bulletin board. 3352 */ 3353 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) 3354 { 3355 struct bnx2x *bp = netdev_priv(dev); 3356 int rc, q_logical_state; 3357 struct bnx2x_virtf *vf = NULL; 3358 struct pf_vf_bulletin_content *bulletin = NULL; 3359 3360 /* sanity and init */ 3361 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3362 if (rc) 3363 return rc; 3364 if (!is_valid_ether_addr(mac)) { 3365 BNX2X_ERR("mac address invalid\n"); 3366 return -EINVAL; 3367 } 3368 3369 /* update PF's copy of the VF's bulletin. Will no longer accept mac 3370 * configuration requests from vf unless match this mac 3371 */ 3372 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; 3373 memcpy(bulletin->mac, mac, ETH_ALEN); 3374 3375 /* Post update on VF's bulletin board */ 3376 rc = bnx2x_post_vf_bulletin(bp, vfidx); 3377 if (rc) { 3378 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); 3379 return rc; 3380 } 3381 3382 q_logical_state = 3383 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); 3384 if (vf->state == VF_ENABLED && 3385 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3386 /* configure the mac in device on this vf's queue */ 3387 unsigned long ramrod_flags = 0; 3388 struct bnx2x_vlan_mac_obj *mac_obj = 3389 &bnx2x_leading_vfq(vf, mac_obj); 3390 3391 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); 3392 if (rc) 3393 return rc; 3394 3395 /* must lock vfpf channel to protect against vf flows */ 3396 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3397 3398 /* remove existing eth macs */ 3399 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 3400 if (rc) { 3401 BNX2X_ERR("failed to delete eth macs\n"); 3402 return -EINVAL; 3403 } 3404 3405 /* remove existing uc list macs */ 3406 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); 3407 if (rc) { 3408 BNX2X_ERR("failed to delete uc_list macs\n"); 3409 return -EINVAL; 3410 } 3411 3412 /* configure the new mac to device */ 3413 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3414 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 3415 BNX2X_ETH_MAC, &ramrod_flags); 3416 3417 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3418 } 3419 3420 return 0; 3421 } 3422 3423 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 3424 { 3425 struct bnx2x *bp = netdev_priv(dev); 3426 int rc, q_logical_state; 3427 struct bnx2x_virtf *vf = NULL; 3428 struct pf_vf_bulletin_content *bulletin = NULL; 3429 3430 /* sanity and init */ 3431 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3432 if (rc) 3433 return rc; 3434 3435 if (vlan > 4095) { 3436 BNX2X_ERR("illegal vlan value %d\n", vlan); 3437 return -EINVAL; 3438 } 3439 3440 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", 3441 vfidx, vlan, 0); 3442 3443 /* update PF's copy of the VF's bulletin. No point in posting the vlan 3444 * to the VF since it doesn't have anything to do with it. But it useful 3445 * to store it here in case the VF is not up yet and we can only 3446 * configure the vlan later when it does. 3447 */ 3448 bulletin->valid_bitmap |= 1 << VLAN_VALID; 3449 bulletin->vlan = vlan; 3450 3451 /* is vf initialized and queue set up? */ 3452 q_logical_state = 3453 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); 3454 if (vf->state == VF_ENABLED && 3455 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3456 /* configure the vlan in device on this vf's queue */ 3457 unsigned long ramrod_flags = 0; 3458 unsigned long vlan_mac_flags = 0; 3459 struct bnx2x_vlan_mac_obj *vlan_obj = 3460 &bnx2x_leading_vfq(vf, vlan_obj); 3461 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 3462 struct bnx2x_queue_state_params q_params = {NULL}; 3463 struct bnx2x_queue_update_params *update_params; 3464 3465 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); 3466 if (rc) 3467 return rc; 3468 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3469 3470 /* must lock vfpf channel to protect against vf flows */ 3471 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3472 3473 /* remove existing vlans */ 3474 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3475 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 3476 &ramrod_flags); 3477 if (rc) { 3478 BNX2X_ERR("failed to delete vlans\n"); 3479 return -EINVAL; 3480 } 3481 3482 /* send queue update ramrod to configure default vlan and silent 3483 * vlan removal 3484 */ 3485 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3486 q_params.cmd = BNX2X_Q_CMD_UPDATE; 3487 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); 3488 update_params = &q_params.params.update; 3489 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 3490 &update_params->update_flags); 3491 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 3492 &update_params->update_flags); 3493 3494 if (vlan == 0) { 3495 /* if vlan is 0 then we want to leave the VF traffic 3496 * untagged, and leave the incoming traffic untouched 3497 * (i.e. do not remove any vlan tags). 3498 */ 3499 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3500 &update_params->update_flags); 3501 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 3502 &update_params->update_flags); 3503 } else { 3504 /* configure the new vlan to device */ 3505 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3506 ramrod_param.vlan_mac_obj = vlan_obj; 3507 ramrod_param.ramrod_flags = ramrod_flags; 3508 ramrod_param.user_req.u.vlan.vlan = vlan; 3509 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 3510 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 3511 if (rc) { 3512 BNX2X_ERR("failed to configure vlan\n"); 3513 return -EINVAL; 3514 } 3515 3516 /* configure default vlan to vf queue and set silent 3517 * vlan removal (the vf remains unaware of this vlan). 3518 */ 3519 update_params = &q_params.params.update; 3520 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3521 &update_params->update_flags); 3522 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 3523 &update_params->update_flags); 3524 update_params->def_vlan = vlan; 3525 } 3526 3527 /* Update the Queue state */ 3528 rc = bnx2x_queue_state_change(bp, &q_params); 3529 if (rc) { 3530 BNX2X_ERR("Failed to configure default VLAN\n"); 3531 return rc; 3532 } 3533 3534 /* clear the flag indicating that this VF needs its vlan 3535 * (will only be set if the HV configured th Vlan before vf was 3536 * and we were called because the VF came up later 3537 */ 3538 vf->cfg_flags &= ~VF_CFG_VLAN; 3539 3540 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3541 } 3542 return 0; 3543 } 3544 3545 /* crc is the first field in the bulletin board. Compute the crc over the 3546 * entire bulletin board excluding the crc field itself. Use the length field 3547 * as the Bulletin Board was posted by a PF with possibly a different version 3548 * from the vf which will sample it. Therefore, the length is computed by the 3549 * PF and the used blindly by the VF. 3550 */ 3551 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, 3552 struct pf_vf_bulletin_content *bulletin) 3553 { 3554 return crc32(BULLETIN_CRC_SEED, 3555 ((u8 *)bulletin) + sizeof(bulletin->crc), 3556 bulletin->length - sizeof(bulletin->crc)); 3557 } 3558 3559 /* Check for new posts on the bulletin board */ 3560 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) 3561 { 3562 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; 3563 int attempts; 3564 3565 /* bulletin board hasn't changed since last sample */ 3566 if (bp->old_bulletin.version == bulletin.version) 3567 return PFVF_BULLETIN_UNCHANGED; 3568 3569 /* validate crc of new bulletin board */ 3570 if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) { 3571 /* sampling structure in mid post may result with corrupted data 3572 * validate crc to ensure coherency. 3573 */ 3574 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { 3575 bulletin = bp->pf2vf_bulletin->content; 3576 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, 3577 &bulletin)) 3578 break; 3579 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", 3580 bulletin.crc, 3581 bnx2x_crc_vf_bulletin(bp, &bulletin)); 3582 } 3583 if (attempts >= BULLETIN_ATTEMPTS) { 3584 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", 3585 attempts); 3586 return PFVF_BULLETIN_CRC_ERR; 3587 } 3588 } 3589 3590 /* the mac address in bulletin board is valid and is new */ 3591 if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID && 3592 memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) { 3593 /* update new mac to net device */ 3594 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); 3595 } 3596 3597 /* the vlan in bulletin board is valid and is new */ 3598 if (bulletin.valid_bitmap & 1 << VLAN_VALID) 3599 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN); 3600 3601 /* copy new bulletin board to bp */ 3602 bp->old_bulletin = bulletin; 3603 3604 return PFVF_BULLETIN_UPDATED; 3605 } 3606 3607 void bnx2x_timer_sriov(struct bnx2x *bp) 3608 { 3609 bnx2x_sample_bulletin(bp); 3610 3611 /* if channel is down we need to self destruct */ 3612 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { 3613 smp_mb__before_clear_bit(); 3614 set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 3615 &bp->sp_rtnl_state); 3616 smp_mb__after_clear_bit(); 3617 schedule_delayed_work(&bp->sp_rtnl_task, 0); 3618 } 3619 } 3620 3621 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 3622 { 3623 /* vf doorbells are embedded within the regview */ 3624 return bp->regview + PXP_VF_ADDR_DB_START; 3625 } 3626 3627 int bnx2x_vf_pci_alloc(struct bnx2x *bp) 3628 { 3629 mutex_init(&bp->vf2pf_mutex); 3630 3631 /* allocate vf2pf mailbox for vf to pf channel */ 3632 BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping, 3633 sizeof(struct bnx2x_vf_mbx_msg)); 3634 3635 /* allocate pf 2 vf bulletin board */ 3636 BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping, 3637 sizeof(union pf_vf_bulletin)); 3638 3639 return 0; 3640 3641 alloc_mem_err: 3642 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3643 sizeof(struct bnx2x_vf_mbx_msg)); 3644 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, 3645 sizeof(union pf_vf_bulletin)); 3646 return -ENOMEM; 3647 } 3648 3649 int bnx2x_open_epilog(struct bnx2x *bp) 3650 { 3651 /* Enable sriov via delayed work. This must be done via delayed work 3652 * because it causes the probe of the vf devices to be run, which invoke 3653 * register_netdevice which must have rtnl lock taken. As we are holding 3654 * the lock right now, that could only work if the probe would not take 3655 * the lock. However, as the probe of the vf may be called from other 3656 * contexts as well (such as passthrough to vm fails) it can't assume 3657 * the lock is being held for it. Using delayed work here allows the 3658 * probe code to simply take the lock (i.e. wait for it to be released 3659 * if it is being held). We only want to do this if the number of VFs 3660 * was set before PF driver was loaded. 3661 */ 3662 if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) { 3663 smp_mb__before_clear_bit(); 3664 set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state); 3665 smp_mb__after_clear_bit(); 3666 schedule_delayed_work(&bp->sp_rtnl_task, 0); 3667 } 3668 3669 return 0; 3670 } 3671 3672 void bnx2x_iov_channel_down(struct bnx2x *bp) 3673 { 3674 int vf_idx; 3675 struct pf_vf_bulletin_content *bulletin; 3676 3677 if (!IS_SRIOV(bp)) 3678 return; 3679 3680 for_each_vf(bp, vf_idx) { 3681 /* locate this VFs bulletin board and update the channel down 3682 * bit 3683 */ 3684 bulletin = BP_VF_BULLETIN(bp, vf_idx); 3685 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN; 3686 3687 /* update vf bulletin board */ 3688 bnx2x_post_vf_bulletin(bp, vf_idx); 3689 } 3690 } 3691