1 /* bnx2x_sriov.c: Broadcom Everest network driver. 2 * 3 * Copyright 2009-2013 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 17 * Ariel Elior <ariele@broadcom.com> 18 * 19 */ 20 #include "bnx2x.h" 21 #include "bnx2x_init.h" 22 #include "bnx2x_cmn.h" 23 #include "bnx2x_sp.h" 24 #include <linux/crc32.h> 25 #include <linux/if_vlan.h> 26 27 /* General service functions */ 28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 29 u16 pf_id) 30 { 31 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 32 pf_id); 33 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 34 pf_id); 35 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 36 pf_id); 37 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 38 pf_id); 39 } 40 41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 42 u8 enable) 43 { 44 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 45 enable); 46 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 47 enable); 48 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 49 enable); 50 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 51 enable); 52 } 53 54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 55 { 56 int idx; 57 58 for_each_vf(bp, idx) 59 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) 60 break; 61 return idx; 62 } 63 64 static 65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 66 { 67 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); 68 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; 69 } 70 71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, 72 u8 igu_sb_id, u8 segment, u16 index, u8 op, 73 u8 update) 74 { 75 /* acking a VF sb through the PF - use the GRC */ 76 u32 ctl; 77 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 78 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 79 u32 func_encode = vf->abs_vfid; 80 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; 81 struct igu_regular cmd_data = {0}; 82 83 cmd_data.sb_id_and_flags = 84 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 85 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 86 (update << IGU_REGULAR_BUPDATE_SHIFT) | 87 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 88 89 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 90 func_encode << IGU_CTRL_REG_FID_SHIFT | 91 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 92 93 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 94 cmd_data.sb_id_and_flags, igu_addr_data); 95 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); 96 mmiowb(); 97 barrier(); 98 99 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 100 ctl, igu_addr_ctl); 101 REG_WR(bp, igu_addr_ctl, ctl); 102 mmiowb(); 103 barrier(); 104 } 105 /* VFOP - VF slow-path operation support */ 106 107 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000 108 109 /* VFOP operations states */ 110 enum bnx2x_vfop_qctor_state { 111 BNX2X_VFOP_QCTOR_INIT, 112 BNX2X_VFOP_QCTOR_SETUP, 113 BNX2X_VFOP_QCTOR_INT_EN 114 }; 115 116 enum bnx2x_vfop_qdtor_state { 117 BNX2X_VFOP_QDTOR_HALT, 118 BNX2X_VFOP_QDTOR_TERMINATE, 119 BNX2X_VFOP_QDTOR_CFCDEL, 120 BNX2X_VFOP_QDTOR_DONE 121 }; 122 123 enum bnx2x_vfop_vlan_mac_state { 124 BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 125 BNX2X_VFOP_VLAN_MAC_CLEAR, 126 BNX2X_VFOP_VLAN_MAC_CHK_DONE, 127 BNX2X_VFOP_MAC_CONFIG_LIST, 128 BNX2X_VFOP_VLAN_CONFIG_LIST, 129 BNX2X_VFOP_VLAN_CONFIG_LIST_0 130 }; 131 132 enum bnx2x_vfop_qsetup_state { 133 BNX2X_VFOP_QSETUP_CTOR, 134 BNX2X_VFOP_QSETUP_VLAN0, 135 BNX2X_VFOP_QSETUP_DONE 136 }; 137 138 enum bnx2x_vfop_mcast_state { 139 BNX2X_VFOP_MCAST_DEL, 140 BNX2X_VFOP_MCAST_ADD, 141 BNX2X_VFOP_MCAST_CHK_DONE 142 }; 143 enum bnx2x_vfop_qflr_state { 144 BNX2X_VFOP_QFLR_CLR_VLAN, 145 BNX2X_VFOP_QFLR_CLR_MAC, 146 BNX2X_VFOP_QFLR_TERMINATE, 147 BNX2X_VFOP_QFLR_DONE 148 }; 149 150 enum bnx2x_vfop_flr_state { 151 BNX2X_VFOP_FLR_QUEUES, 152 BNX2X_VFOP_FLR_HW 153 }; 154 155 enum bnx2x_vfop_close_state { 156 BNX2X_VFOP_CLOSE_QUEUES, 157 BNX2X_VFOP_CLOSE_HW 158 }; 159 160 enum bnx2x_vfop_rxmode_state { 161 BNX2X_VFOP_RXMODE_CONFIG, 162 BNX2X_VFOP_RXMODE_DONE 163 }; 164 165 enum bnx2x_vfop_qteardown_state { 166 BNX2X_VFOP_QTEARDOWN_RXMODE, 167 BNX2X_VFOP_QTEARDOWN_CLR_VLAN, 168 BNX2X_VFOP_QTEARDOWN_CLR_MAC, 169 BNX2X_VFOP_QTEARDOWN_QDTOR, 170 BNX2X_VFOP_QTEARDOWN_DONE 171 }; 172 173 #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) 174 175 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 176 struct bnx2x_queue_init_params *init_params, 177 struct bnx2x_queue_setup_params *setup_params, 178 u16 q_idx, u16 sb_idx) 179 { 180 DP(BNX2X_MSG_IOV, 181 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", 182 vf->abs_vfid, 183 q_idx, 184 sb_idx, 185 init_params->tx.sb_cq_index, 186 init_params->tx.hc_rate, 187 setup_params->flags, 188 setup_params->txq_params.traffic_type); 189 } 190 191 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 192 struct bnx2x_queue_init_params *init_params, 193 struct bnx2x_queue_setup_params *setup_params, 194 u16 q_idx, u16 sb_idx) 195 { 196 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; 197 198 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" 199 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", 200 vf->abs_vfid, 201 q_idx, 202 sb_idx, 203 init_params->rx.sb_cq_index, 204 init_params->rx.hc_rate, 205 setup_params->gen_params.mtu, 206 rxq_params->buf_sz, 207 rxq_params->sge_buf_sz, 208 rxq_params->max_sges_pkt, 209 rxq_params->tpa_agg_sz, 210 setup_params->flags, 211 rxq_params->drop_flags, 212 rxq_params->cache_line_log); 213 } 214 215 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 216 struct bnx2x_virtf *vf, 217 struct bnx2x_vf_queue *q, 218 struct bnx2x_vfop_qctor_params *p, 219 unsigned long q_type) 220 { 221 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; 222 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; 223 224 /* INIT */ 225 226 /* Enable host coalescing in the transition to INIT state */ 227 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) 228 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); 229 230 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) 231 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); 232 233 /* FW SB ID */ 234 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 235 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 236 237 /* context */ 238 init_p->cxts[0] = q->cxt; 239 240 /* SETUP */ 241 242 /* Setup-op general parameters */ 243 setup_p->gen_params.spcl_id = vf->sp_cl_id; 244 setup_p->gen_params.stat_id = vfq_stat_id(vf, q); 245 246 /* Setup-op pause params: 247 * Nothing to do, the pause thresholds are set by default to 0 which 248 * effectively turns off the feature for this queue. We don't want 249 * one queue (VF) to interfering with another queue (another VF) 250 */ 251 if (vf->cfg_flags & VF_CFG_FW_FC) 252 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", 253 vf->abs_vfid); 254 /* Setup-op flags: 255 * collect statistics, zero statistics, local-switching, security, 256 * OV for Flex10, RSS and MCAST for leading 257 */ 258 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) 259 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); 260 261 /* for VFs, enable tx switching, bd coherency, and mac address 262 * anti-spoofing 263 */ 264 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); 265 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 266 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 267 268 if (vfq_is_leading(q)) { 269 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags); 270 __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags); 271 } 272 273 /* Setup-op rx parameters */ 274 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 275 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 276 277 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); 278 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); 279 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); 280 281 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) 282 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; 283 } 284 285 /* Setup-op tx parameters */ 286 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { 287 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; 288 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 289 } 290 } 291 292 /* VFOP queue construction */ 293 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf) 294 { 295 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 296 struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor; 297 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 298 enum bnx2x_vfop_qctor_state state = vfop->state; 299 300 bnx2x_vfop_reset_wq(vf); 301 302 if (vfop->rc < 0) 303 goto op_err; 304 305 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 306 307 switch (state) { 308 case BNX2X_VFOP_QCTOR_INIT: 309 310 /* has this queue already been opened? */ 311 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 312 BNX2X_Q_LOGICAL_STATE_ACTIVE) { 313 DP(BNX2X_MSG_IOV, 314 "Entered qctor but queue was already up. Aborting gracefully\n"); 315 goto op_done; 316 } 317 318 /* next state */ 319 vfop->state = BNX2X_VFOP_QCTOR_SETUP; 320 321 q_params->cmd = BNX2X_Q_CMD_INIT; 322 vfop->rc = bnx2x_queue_state_change(bp, q_params); 323 324 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 325 326 case BNX2X_VFOP_QCTOR_SETUP: 327 /* next state */ 328 vfop->state = BNX2X_VFOP_QCTOR_INT_EN; 329 330 /* copy pre-prepared setup params to the queue-state params */ 331 vfop->op_p->qctor.qstate.params.setup = 332 vfop->op_p->qctor.prep_qsetup; 333 334 q_params->cmd = BNX2X_Q_CMD_SETUP; 335 vfop->rc = bnx2x_queue_state_change(bp, q_params); 336 337 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 338 339 case BNX2X_VFOP_QCTOR_INT_EN: 340 341 /* enable interrupts */ 342 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx), 343 USTORM_ID, 0, IGU_INT_ENABLE, 0); 344 goto op_done; 345 default: 346 bnx2x_vfop_default(state); 347 } 348 op_err: 349 BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n", 350 vf->abs_vfid, args->qid, q_params->cmd, vfop->rc); 351 op_done: 352 bnx2x_vfop_end(bp, vf, vfop); 353 op_pending: 354 return; 355 } 356 357 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp, 358 struct bnx2x_virtf *vf, 359 struct bnx2x_vfop_cmd *cmd, 360 int qid) 361 { 362 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 363 364 if (vfop) { 365 vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 366 367 vfop->args.qctor.qid = qid; 368 vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx); 369 370 bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT, 371 bnx2x_vfop_qctor, cmd->done); 372 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor, 373 cmd->block); 374 } 375 return -ENOMEM; 376 } 377 378 /* VFOP queue destruction */ 379 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) 380 { 381 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 382 struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor; 383 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 384 enum bnx2x_vfop_qdtor_state state = vfop->state; 385 386 bnx2x_vfop_reset_wq(vf); 387 388 if (vfop->rc < 0) 389 goto op_err; 390 391 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 392 393 switch (state) { 394 case BNX2X_VFOP_QDTOR_HALT: 395 396 /* has this queue already been stopped? */ 397 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 398 BNX2X_Q_LOGICAL_STATE_STOPPED) { 399 DP(BNX2X_MSG_IOV, 400 "Entered qdtor but queue was already stopped. Aborting gracefully\n"); 401 goto op_done; 402 } 403 404 /* next state */ 405 vfop->state = BNX2X_VFOP_QDTOR_TERMINATE; 406 407 q_params->cmd = BNX2X_Q_CMD_HALT; 408 vfop->rc = bnx2x_queue_state_change(bp, q_params); 409 410 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 411 412 case BNX2X_VFOP_QDTOR_TERMINATE: 413 /* next state */ 414 vfop->state = BNX2X_VFOP_QDTOR_CFCDEL; 415 416 q_params->cmd = BNX2X_Q_CMD_TERMINATE; 417 vfop->rc = bnx2x_queue_state_change(bp, q_params); 418 419 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 420 421 case BNX2X_VFOP_QDTOR_CFCDEL: 422 /* next state */ 423 vfop->state = BNX2X_VFOP_QDTOR_DONE; 424 425 q_params->cmd = BNX2X_Q_CMD_CFC_DEL; 426 vfop->rc = bnx2x_queue_state_change(bp, q_params); 427 428 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 429 op_err: 430 BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n", 431 vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc); 432 op_done: 433 case BNX2X_VFOP_QDTOR_DONE: 434 /* invalidate the context */ 435 qdtor->cxt->ustorm_ag_context.cdu_usage = 0; 436 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; 437 bnx2x_vfop_end(bp, vf, vfop); 438 return; 439 default: 440 bnx2x_vfop_default(state); 441 } 442 op_pending: 443 return; 444 } 445 446 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, 447 struct bnx2x_virtf *vf, 448 struct bnx2x_vfop_cmd *cmd, 449 int qid) 450 { 451 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 452 453 if (vfop) { 454 struct bnx2x_queue_state_params *qstate = 455 &vf->op_params.qctor.qstate; 456 457 memset(qstate, 0, sizeof(*qstate)); 458 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 459 460 vfop->args.qdtor.qid = qid; 461 vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt); 462 463 bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT, 464 bnx2x_vfop_qdtor, cmd->done); 465 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, 466 cmd->block); 467 } 468 DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid); 469 return -ENOMEM; 470 } 471 472 static void 473 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) 474 { 475 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 476 if (vf) { 477 if (!vf_sb_count(vf)) 478 vf->igu_base_id = igu_sb_id; 479 ++vf_sb_count(vf); 480 } 481 } 482 483 /* VFOP MAC/VLAN helpers */ 484 static inline void bnx2x_vfop_credit(struct bnx2x *bp, 485 struct bnx2x_vfop *vfop, 486 struct bnx2x_vlan_mac_obj *obj) 487 { 488 struct bnx2x_vfop_args_filters *args = &vfop->args.filters; 489 490 /* update credit only if there is no error 491 * and a valid credit counter 492 */ 493 if (!vfop->rc && args->credit) { 494 int cnt = 0; 495 struct list_head *pos; 496 497 list_for_each(pos, &obj->head) 498 cnt++; 499 500 atomic_set(args->credit, cnt); 501 } 502 } 503 504 static int bnx2x_vfop_set_user_req(struct bnx2x *bp, 505 struct bnx2x_vfop_filter *pos, 506 struct bnx2x_vlan_mac_data *user_req) 507 { 508 user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD : 509 BNX2X_VLAN_MAC_DEL; 510 511 switch (pos->type) { 512 case BNX2X_VFOP_FILTER_MAC: 513 memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN); 514 break; 515 case BNX2X_VFOP_FILTER_VLAN: 516 user_req->u.vlan.vlan = pos->vid; 517 break; 518 default: 519 BNX2X_ERR("Invalid filter type, skipping\n"); 520 return 1; 521 } 522 return 0; 523 } 524 525 static int bnx2x_vfop_config_list(struct bnx2x *bp, 526 struct bnx2x_vfop_filters *filters, 527 struct bnx2x_vlan_mac_ramrod_params *vlan_mac) 528 { 529 struct bnx2x_vfop_filter *pos, *tmp; 530 struct list_head rollback_list, *filters_list = &filters->head; 531 struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req; 532 int rc = 0, cnt = 0; 533 534 INIT_LIST_HEAD(&rollback_list); 535 536 list_for_each_entry_safe(pos, tmp, filters_list, link) { 537 if (bnx2x_vfop_set_user_req(bp, pos, user_req)) 538 continue; 539 540 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 541 if (rc >= 0) { 542 cnt += pos->add ? 1 : -1; 543 list_move(&pos->link, &rollback_list); 544 rc = 0; 545 } else if (rc == -EEXIST) { 546 rc = 0; 547 } else { 548 BNX2X_ERR("Failed to add a new vlan_mac command\n"); 549 break; 550 } 551 } 552 553 /* rollback if error or too many rules added */ 554 if (rc || cnt > filters->add_cnt) { 555 BNX2X_ERR("error or too many rules added. Performing rollback\n"); 556 list_for_each_entry_safe(pos, tmp, &rollback_list, link) { 557 pos->add = !pos->add; /* reverse op */ 558 bnx2x_vfop_set_user_req(bp, pos, user_req); 559 bnx2x_config_vlan_mac(bp, vlan_mac); 560 list_del(&pos->link); 561 } 562 cnt = 0; 563 if (!rc) 564 rc = -EINVAL; 565 } 566 filters->add_cnt = cnt; 567 return rc; 568 } 569 570 /* VFOP set VLAN/MAC */ 571 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) 572 { 573 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 574 struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac; 575 struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj; 576 struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter; 577 578 enum bnx2x_vfop_vlan_mac_state state = vfop->state; 579 580 if (vfop->rc < 0) 581 goto op_err; 582 583 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 584 585 bnx2x_vfop_reset_wq(vf); 586 587 switch (state) { 588 case BNX2X_VFOP_VLAN_MAC_CLEAR: 589 /* next state */ 590 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 591 592 /* do delete */ 593 vfop->rc = obj->delete_all(bp, obj, 594 &vlan_mac->user_req.vlan_mac_flags, 595 &vlan_mac->ramrod_flags); 596 597 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 598 599 case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE: 600 /* next state */ 601 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 602 603 /* do config */ 604 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 605 if (vfop->rc == -EEXIST) 606 vfop->rc = 0; 607 608 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 609 610 case BNX2X_VFOP_VLAN_MAC_CHK_DONE: 611 vfop->rc = !!obj->raw.check_pending(&obj->raw); 612 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 613 614 case BNX2X_VFOP_MAC_CONFIG_LIST: 615 /* next state */ 616 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 617 618 /* do list config */ 619 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 620 if (vfop->rc) 621 goto op_err; 622 623 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 624 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 625 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 626 627 case BNX2X_VFOP_VLAN_CONFIG_LIST: 628 /* next state */ 629 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 630 631 /* do list config */ 632 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 633 if (!vfop->rc) { 634 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 635 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 636 } 637 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 638 639 default: 640 bnx2x_vfop_default(state); 641 } 642 op_err: 643 BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc); 644 op_done: 645 kfree(filters); 646 bnx2x_vfop_credit(bp, vfop, obj); 647 bnx2x_vfop_end(bp, vf, vfop); 648 op_pending: 649 return; 650 } 651 652 struct bnx2x_vfop_vlan_mac_flags { 653 bool drv_only; 654 bool dont_consume; 655 bool single_cmd; 656 bool add; 657 }; 658 659 static void 660 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 661 struct bnx2x_vfop_vlan_mac_flags *flags) 662 { 663 struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req; 664 665 memset(ramrod, 0, sizeof(*ramrod)); 666 667 /* ramrod flags */ 668 if (flags->drv_only) 669 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags); 670 if (flags->single_cmd) 671 set_bit(RAMROD_EXEC, &ramrod->ramrod_flags); 672 673 /* mac_vlan flags */ 674 if (flags->dont_consume) 675 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags); 676 677 /* cmd */ 678 ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL; 679 } 680 681 static inline void 682 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 683 struct bnx2x_vfop_vlan_mac_flags *flags) 684 { 685 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags); 686 set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags); 687 } 688 689 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, 690 struct bnx2x_virtf *vf, 691 struct bnx2x_vfop_cmd *cmd, 692 int qid, bool drv_only) 693 { 694 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 695 696 if (vfop) { 697 struct bnx2x_vfop_args_filters filters = { 698 .multi_filter = NULL, /* single */ 699 .credit = NULL, /* consume credit */ 700 }; 701 struct bnx2x_vfop_vlan_mac_flags flags = { 702 .drv_only = drv_only, 703 .dont_consume = (filters.credit != NULL), 704 .single_cmd = true, 705 .add = false /* don't care */, 706 }; 707 struct bnx2x_vlan_mac_ramrod_params *ramrod = 708 &vf->op_params.vlan_mac; 709 710 /* set ramrod params */ 711 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 712 713 /* set object */ 714 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 715 716 /* set extra args */ 717 vfop->args.filters = filters; 718 719 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 720 bnx2x_vfop_vlan_mac, cmd->done); 721 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 722 cmd->block); 723 } 724 return -ENOMEM; 725 } 726 727 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, 728 struct bnx2x_virtf *vf, 729 struct bnx2x_vfop_cmd *cmd, 730 struct bnx2x_vfop_filters *macs, 731 int qid, bool drv_only) 732 { 733 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 734 735 if (vfop) { 736 struct bnx2x_vfop_args_filters filters = { 737 .multi_filter = macs, 738 .credit = NULL, /* consume credit */ 739 }; 740 struct bnx2x_vfop_vlan_mac_flags flags = { 741 .drv_only = drv_only, 742 .dont_consume = (filters.credit != NULL), 743 .single_cmd = false, 744 .add = false, /* don't care since only the items in the 745 * filters list affect the sp operation, 746 * not the list itself 747 */ 748 }; 749 struct bnx2x_vlan_mac_ramrod_params *ramrod = 750 &vf->op_params.vlan_mac; 751 752 /* set ramrod params */ 753 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 754 755 /* set object */ 756 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 757 758 /* set extra args */ 759 filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX; 760 vfop->args.filters = filters; 761 762 bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST, 763 bnx2x_vfop_vlan_mac, cmd->done); 764 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 765 cmd->block); 766 } 767 return -ENOMEM; 768 } 769 770 int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, 771 struct bnx2x_virtf *vf, 772 struct bnx2x_vfop_cmd *cmd, 773 int qid, u16 vid, bool add) 774 { 775 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 776 777 if (vfop) { 778 struct bnx2x_vfop_args_filters filters = { 779 .multi_filter = NULL, /* single command */ 780 .credit = &bnx2x_vfq(vf, qid, vlan_count), 781 }; 782 struct bnx2x_vfop_vlan_mac_flags flags = { 783 .drv_only = false, 784 .dont_consume = (filters.credit != NULL), 785 .single_cmd = true, 786 .add = add, 787 }; 788 struct bnx2x_vlan_mac_ramrod_params *ramrod = 789 &vf->op_params.vlan_mac; 790 791 /* set ramrod params */ 792 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 793 ramrod->user_req.u.vlan.vlan = vid; 794 795 /* set object */ 796 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 797 798 /* set extra args */ 799 vfop->args.filters = filters; 800 801 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 802 bnx2x_vfop_vlan_mac, cmd->done); 803 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 804 cmd->block); 805 } 806 return -ENOMEM; 807 } 808 809 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, 810 struct bnx2x_virtf *vf, 811 struct bnx2x_vfop_cmd *cmd, 812 int qid, bool drv_only) 813 { 814 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 815 816 if (vfop) { 817 struct bnx2x_vfop_args_filters filters = { 818 .multi_filter = NULL, /* single command */ 819 .credit = &bnx2x_vfq(vf, qid, vlan_count), 820 }; 821 struct bnx2x_vfop_vlan_mac_flags flags = { 822 .drv_only = drv_only, 823 .dont_consume = (filters.credit != NULL), 824 .single_cmd = true, 825 .add = false, /* don't care */ 826 }; 827 struct bnx2x_vlan_mac_ramrod_params *ramrod = 828 &vf->op_params.vlan_mac; 829 830 /* set ramrod params */ 831 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 832 833 /* set object */ 834 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 835 836 /* set extra args */ 837 vfop->args.filters = filters; 838 839 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 840 bnx2x_vfop_vlan_mac, cmd->done); 841 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 842 cmd->block); 843 } 844 return -ENOMEM; 845 } 846 847 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, 848 struct bnx2x_virtf *vf, 849 struct bnx2x_vfop_cmd *cmd, 850 struct bnx2x_vfop_filters *vlans, 851 int qid, bool drv_only) 852 { 853 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 854 855 if (vfop) { 856 struct bnx2x_vfop_args_filters filters = { 857 .multi_filter = vlans, 858 .credit = &bnx2x_vfq(vf, qid, vlan_count), 859 }; 860 struct bnx2x_vfop_vlan_mac_flags flags = { 861 .drv_only = drv_only, 862 .dont_consume = (filters.credit != NULL), 863 .single_cmd = false, 864 .add = false, /* don't care */ 865 }; 866 struct bnx2x_vlan_mac_ramrod_params *ramrod = 867 &vf->op_params.vlan_mac; 868 869 /* set ramrod params */ 870 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 871 872 /* set object */ 873 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 874 875 /* set extra args */ 876 filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) - 877 atomic_read(filters.credit); 878 879 vfop->args.filters = filters; 880 881 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST, 882 bnx2x_vfop_vlan_mac, cmd->done); 883 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 884 cmd->block); 885 } 886 return -ENOMEM; 887 } 888 889 /* VFOP queue setup (queue constructor + set vlan 0) */ 890 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf) 891 { 892 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 893 int qid = vfop->args.qctor.qid; 894 enum bnx2x_vfop_qsetup_state state = vfop->state; 895 struct bnx2x_vfop_cmd cmd = { 896 .done = bnx2x_vfop_qsetup, 897 .block = false, 898 }; 899 900 if (vfop->rc < 0) 901 goto op_err; 902 903 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 904 905 switch (state) { 906 case BNX2X_VFOP_QSETUP_CTOR: 907 /* init the queue ctor command */ 908 vfop->state = BNX2X_VFOP_QSETUP_VLAN0; 909 vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid); 910 if (vfop->rc) 911 goto op_err; 912 return; 913 914 case BNX2X_VFOP_QSETUP_VLAN0: 915 /* skip if non-leading or FPGA/EMU*/ 916 if (qid) 917 goto op_done; 918 919 /* init the queue set-vlan command (for vlan 0) */ 920 vfop->state = BNX2X_VFOP_QSETUP_DONE; 921 vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true); 922 if (vfop->rc) 923 goto op_err; 924 return; 925 op_err: 926 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); 927 op_done: 928 case BNX2X_VFOP_QSETUP_DONE: 929 vf->cfg_flags |= VF_CFG_VLAN; 930 smp_mb__before_clear_bit(); 931 set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, 932 &bp->sp_rtnl_state); 933 smp_mb__after_clear_bit(); 934 schedule_delayed_work(&bp->sp_rtnl_task, 0); 935 bnx2x_vfop_end(bp, vf, vfop); 936 return; 937 default: 938 bnx2x_vfop_default(state); 939 } 940 } 941 942 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, 943 struct bnx2x_virtf *vf, 944 struct bnx2x_vfop_cmd *cmd, 945 int qid) 946 { 947 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 948 949 if (vfop) { 950 vfop->args.qctor.qid = qid; 951 952 bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR, 953 bnx2x_vfop_qsetup, cmd->done); 954 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup, 955 cmd->block); 956 } 957 return -ENOMEM; 958 } 959 960 /* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */ 961 static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) 962 { 963 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 964 int qid = vfop->args.qx.qid; 965 enum bnx2x_vfop_qflr_state state = vfop->state; 966 struct bnx2x_queue_state_params *qstate; 967 struct bnx2x_vfop_cmd cmd; 968 969 bnx2x_vfop_reset_wq(vf); 970 971 if (vfop->rc < 0) 972 goto op_err; 973 974 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state); 975 976 cmd.done = bnx2x_vfop_qflr; 977 cmd.block = false; 978 979 switch (state) { 980 case BNX2X_VFOP_QFLR_CLR_VLAN: 981 /* vlan-clear-all: driver-only, don't consume credit */ 982 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; 983 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true); 984 if (vfop->rc) 985 goto op_err; 986 return; 987 988 case BNX2X_VFOP_QFLR_CLR_MAC: 989 /* mac-clear-all: driver only consume credit */ 990 vfop->state = BNX2X_VFOP_QFLR_TERMINATE; 991 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true); 992 DP(BNX2X_MSG_IOV, 993 "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d", 994 vf->abs_vfid, vfop->rc); 995 if (vfop->rc) 996 goto op_err; 997 return; 998 999 case BNX2X_VFOP_QFLR_TERMINATE: 1000 qstate = &vfop->op_p->qctor.qstate; 1001 memset(qstate , 0, sizeof(*qstate)); 1002 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 1003 vfop->state = BNX2X_VFOP_QFLR_DONE; 1004 1005 DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n", 1006 vf->abs_vfid, qstate->q_obj->state); 1007 1008 if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) { 1009 qstate->q_obj->state = BNX2X_Q_STATE_STOPPED; 1010 qstate->cmd = BNX2X_Q_CMD_TERMINATE; 1011 vfop->rc = bnx2x_queue_state_change(bp, qstate); 1012 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND); 1013 } else { 1014 goto op_done; 1015 } 1016 1017 op_err: 1018 BNX2X_ERR("QFLR[%d:%d] error: rc %d\n", 1019 vf->abs_vfid, qid, vfop->rc); 1020 op_done: 1021 case BNX2X_VFOP_QFLR_DONE: 1022 bnx2x_vfop_end(bp, vf, vfop); 1023 return; 1024 default: 1025 bnx2x_vfop_default(state); 1026 } 1027 op_pending: 1028 return; 1029 } 1030 1031 static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp, 1032 struct bnx2x_virtf *vf, 1033 struct bnx2x_vfop_cmd *cmd, 1034 int qid) 1035 { 1036 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1037 1038 if (vfop) { 1039 vfop->args.qx.qid = qid; 1040 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN, 1041 bnx2x_vfop_qflr, cmd->done); 1042 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr, 1043 cmd->block); 1044 } 1045 return -ENOMEM; 1046 } 1047 1048 /* VFOP multi-casts */ 1049 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf) 1050 { 1051 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1052 struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast; 1053 struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw; 1054 struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list; 1055 enum bnx2x_vfop_mcast_state state = vfop->state; 1056 int i; 1057 1058 bnx2x_vfop_reset_wq(vf); 1059 1060 if (vfop->rc < 0) 1061 goto op_err; 1062 1063 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1064 1065 switch (state) { 1066 case BNX2X_VFOP_MCAST_DEL: 1067 /* clear existing mcasts */ 1068 vfop->state = BNX2X_VFOP_MCAST_ADD; 1069 vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL); 1070 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 1071 1072 case BNX2X_VFOP_MCAST_ADD: 1073 if (raw->check_pending(raw)) 1074 goto op_pending; 1075 1076 if (args->mc_num) { 1077 /* update mcast list on the ramrod params */ 1078 INIT_LIST_HEAD(&mcast->mcast_list); 1079 for (i = 0; i < args->mc_num; i++) 1080 list_add_tail(&(args->mc[i].link), 1081 &mcast->mcast_list); 1082 /* add new mcasts */ 1083 vfop->state = BNX2X_VFOP_MCAST_CHK_DONE; 1084 vfop->rc = bnx2x_config_mcast(bp, mcast, 1085 BNX2X_MCAST_CMD_ADD); 1086 } 1087 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1088 1089 case BNX2X_VFOP_MCAST_CHK_DONE: 1090 vfop->rc = raw->check_pending(raw) ? 1 : 0; 1091 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1092 default: 1093 bnx2x_vfop_default(state); 1094 } 1095 op_err: 1096 BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc); 1097 op_done: 1098 kfree(args->mc); 1099 bnx2x_vfop_end(bp, vf, vfop); 1100 op_pending: 1101 return; 1102 } 1103 1104 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, 1105 struct bnx2x_virtf *vf, 1106 struct bnx2x_vfop_cmd *cmd, 1107 bnx2x_mac_addr_t *mcasts, 1108 int mcast_num, bool drv_only) 1109 { 1110 struct bnx2x_vfop *vfop = NULL; 1111 size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem); 1112 struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) : 1113 NULL; 1114 1115 if (!mc_sz || mc) { 1116 vfop = bnx2x_vfop_add(bp, vf); 1117 if (vfop) { 1118 int i; 1119 struct bnx2x_mcast_ramrod_params *ramrod = 1120 &vf->op_params.mcast; 1121 1122 /* set ramrod params */ 1123 memset(ramrod, 0, sizeof(*ramrod)); 1124 ramrod->mcast_obj = &vf->mcast_obj; 1125 if (drv_only) 1126 set_bit(RAMROD_DRV_CLR_ONLY, 1127 &ramrod->ramrod_flags); 1128 1129 /* copy mcasts pointers */ 1130 vfop->args.mc_list.mc_num = mcast_num; 1131 vfop->args.mc_list.mc = mc; 1132 for (i = 0; i < mcast_num; i++) 1133 mc[i].mac = mcasts[i]; 1134 1135 bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL, 1136 bnx2x_vfop_mcast, cmd->done); 1137 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast, 1138 cmd->block); 1139 } else { 1140 kfree(mc); 1141 } 1142 } 1143 return -ENOMEM; 1144 } 1145 1146 /* VFOP rx-mode */ 1147 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) 1148 { 1149 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1150 struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode; 1151 enum bnx2x_vfop_rxmode_state state = vfop->state; 1152 1153 bnx2x_vfop_reset_wq(vf); 1154 1155 if (vfop->rc < 0) 1156 goto op_err; 1157 1158 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1159 1160 switch (state) { 1161 case BNX2X_VFOP_RXMODE_CONFIG: 1162 /* next state */ 1163 vfop->state = BNX2X_VFOP_RXMODE_DONE; 1164 1165 vfop->rc = bnx2x_config_rx_mode(bp, ramrod); 1166 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1167 op_err: 1168 BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc); 1169 op_done: 1170 case BNX2X_VFOP_RXMODE_DONE: 1171 bnx2x_vfop_end(bp, vf, vfop); 1172 return; 1173 default: 1174 bnx2x_vfop_default(state); 1175 } 1176 op_pending: 1177 return; 1178 } 1179 1180 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, 1181 struct bnx2x_virtf *vf, 1182 struct bnx2x_vfop_cmd *cmd, 1183 int qid, unsigned long accept_flags) 1184 { 1185 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); 1186 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1187 1188 if (vfop) { 1189 struct bnx2x_rx_mode_ramrod_params *ramrod = 1190 &vf->op_params.rx_mode; 1191 1192 memset(ramrod, 0, sizeof(*ramrod)); 1193 1194 /* Prepare ramrod parameters */ 1195 ramrod->cid = vfq->cid; 1196 ramrod->cl_id = vfq_cl_id(vf, vfq); 1197 ramrod->rx_mode_obj = &bp->rx_mode_obj; 1198 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); 1199 1200 ramrod->rx_accept_flags = accept_flags; 1201 ramrod->tx_accept_flags = accept_flags; 1202 ramrod->pstate = &vf->filter_state; 1203 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; 1204 1205 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 1206 set_bit(RAMROD_RX, &ramrod->ramrod_flags); 1207 set_bit(RAMROD_TX, &ramrod->ramrod_flags); 1208 1209 ramrod->rdata = 1210 bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); 1211 ramrod->rdata_mapping = 1212 bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); 1213 1214 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, 1215 bnx2x_vfop_rxmode, cmd->done); 1216 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode, 1217 cmd->block); 1218 } 1219 return -ENOMEM; 1220 } 1221 1222 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs, 1223 * queue destructor) 1224 */ 1225 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf) 1226 { 1227 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1228 int qid = vfop->args.qx.qid; 1229 enum bnx2x_vfop_qteardown_state state = vfop->state; 1230 struct bnx2x_vfop_cmd cmd; 1231 1232 if (vfop->rc < 0) 1233 goto op_err; 1234 1235 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1236 1237 cmd.done = bnx2x_vfop_qdown; 1238 cmd.block = false; 1239 1240 switch (state) { 1241 case BNX2X_VFOP_QTEARDOWN_RXMODE: 1242 /* Drop all */ 1243 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN; 1244 vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0); 1245 if (vfop->rc) 1246 goto op_err; 1247 return; 1248 1249 case BNX2X_VFOP_QTEARDOWN_CLR_VLAN: 1250 /* vlan-clear-all: don't consume credit */ 1251 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC; 1252 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false); 1253 if (vfop->rc) 1254 goto op_err; 1255 return; 1256 1257 case BNX2X_VFOP_QTEARDOWN_CLR_MAC: 1258 /* mac-clear-all: consume credit */ 1259 vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR; 1260 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false); 1261 if (vfop->rc) 1262 goto op_err; 1263 return; 1264 1265 case BNX2X_VFOP_QTEARDOWN_QDTOR: 1266 /* run the queue destruction flow */ 1267 DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n"); 1268 vfop->state = BNX2X_VFOP_QTEARDOWN_DONE; 1269 DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n"); 1270 vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid); 1271 DP(BNX2X_MSG_IOV, "returned from cmd\n"); 1272 if (vfop->rc) 1273 goto op_err; 1274 return; 1275 op_err: 1276 BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n", 1277 vf->abs_vfid, qid, vfop->rc); 1278 1279 case BNX2X_VFOP_QTEARDOWN_DONE: 1280 bnx2x_vfop_end(bp, vf, vfop); 1281 return; 1282 default: 1283 bnx2x_vfop_default(state); 1284 } 1285 } 1286 1287 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, 1288 struct bnx2x_virtf *vf, 1289 struct bnx2x_vfop_cmd *cmd, 1290 int qid) 1291 { 1292 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1293 1294 if (vfop) { 1295 vfop->args.qx.qid = qid; 1296 bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE, 1297 bnx2x_vfop_qdown, cmd->done); 1298 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, 1299 cmd->block); 1300 } 1301 1302 return -ENOMEM; 1303 } 1304 1305 /* VF enable primitives 1306 * when pretend is required the caller is responsible 1307 * for calling pretend prior to calling these routines 1308 */ 1309 1310 /* internal vf enable - until vf is enabled internally all transactions 1311 * are blocked. This routine should always be called last with pretend. 1312 */ 1313 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) 1314 { 1315 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); 1316 } 1317 1318 /* clears vf error in all semi blocks */ 1319 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) 1320 { 1321 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); 1322 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); 1323 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); 1324 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); 1325 } 1326 1327 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) 1328 { 1329 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; 1330 u32 was_err_reg = 0; 1331 1332 switch (was_err_group) { 1333 case 0: 1334 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; 1335 break; 1336 case 1: 1337 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; 1338 break; 1339 case 2: 1340 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; 1341 break; 1342 case 3: 1343 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; 1344 break; 1345 } 1346 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); 1347 } 1348 1349 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) 1350 { 1351 int i; 1352 u32 val; 1353 1354 /* Set VF masks and configuration - pretend */ 1355 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1356 1357 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 1358 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 1359 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 1360 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 1361 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 1362 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 1363 1364 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 1365 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); 1366 if (vf->cfg_flags & VF_CFG_INT_SIMD) 1367 val |= IGU_VF_CONF_SINGLE_ISR_EN; 1368 val &= ~IGU_VF_CONF_PARENT_MASK; 1369 val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */ 1370 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 1371 1372 DP(BNX2X_MSG_IOV, 1373 "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n", 1374 vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION)); 1375 1376 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1377 1378 /* iterate over all queues, clear sb consumer */ 1379 for (i = 0; i < vf_sb_count(vf); i++) { 1380 u8 igu_sb_id = vf_igu_sb(vf, i); 1381 1382 /* zero prod memory */ 1383 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); 1384 1385 /* clear sb state machine */ 1386 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, 1387 false /* VF */); 1388 1389 /* disable + update */ 1390 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, 1391 IGU_INT_DISABLE, 1); 1392 } 1393 } 1394 1395 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) 1396 { 1397 /* set the VF-PF association in the FW */ 1398 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); 1399 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); 1400 1401 /* clear vf errors*/ 1402 bnx2x_vf_semi_clear_err(bp, abs_vfid); 1403 bnx2x_vf_pglue_clear_err(bp, abs_vfid); 1404 1405 /* internal vf-enable - pretend */ 1406 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); 1407 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); 1408 bnx2x_vf_enable_internal(bp, true); 1409 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1410 } 1411 1412 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) 1413 { 1414 /* Reset vf in IGU interrupts are still disabled */ 1415 bnx2x_vf_igu_reset(bp, vf); 1416 1417 /* pretend to enable the vf with the PBF */ 1418 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1419 REG_WR(bp, PBF_REG_DISABLE_VF, 0); 1420 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1421 } 1422 1423 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) 1424 { 1425 struct pci_dev *dev; 1426 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 1427 1428 if (!vf) 1429 return false; 1430 1431 dev = pci_get_bus_and_slot(vf->bus, vf->devfn); 1432 if (dev) 1433 return bnx2x_is_pcie_pending(dev); 1434 return false; 1435 } 1436 1437 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) 1438 { 1439 /* Verify no pending pci transactions */ 1440 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) 1441 BNX2X_ERR("PCIE Transactions still pending\n"); 1442 1443 return 0; 1444 } 1445 1446 /* must be called after the number of PF queues and the number of VFs are 1447 * both known 1448 */ 1449 static void 1450 bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc) 1451 { 1452 u16 vlan_count = 0; 1453 1454 /* will be set only during VF-ACQUIRE */ 1455 resc->num_rxqs = 0; 1456 resc->num_txqs = 0; 1457 1458 /* no credit calculcis for macs (just yet) */ 1459 resc->num_mac_filters = 1; 1460 1461 /* divvy up vlan rules */ 1462 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 1463 vlan_count = 1 << ilog2(vlan_count); 1464 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); 1465 1466 /* no real limitation */ 1467 resc->num_mc_filters = 0; 1468 1469 /* num_sbs already set */ 1470 } 1471 1472 /* FLR routines: */ 1473 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 1474 { 1475 /* reset the state variables */ 1476 bnx2x_iov_static_resc(bp, &vf->alloc_resc); 1477 vf->state = VF_FREE; 1478 } 1479 1480 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) 1481 { 1482 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 1483 1484 /* DQ usage counter */ 1485 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1486 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, 1487 "DQ VF usage counter timed out", 1488 poll_cnt); 1489 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1490 1491 /* FW cleanup command - poll for the results */ 1492 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), 1493 poll_cnt)) 1494 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid); 1495 1496 /* verify TX hw is flushed */ 1497 bnx2x_tx_hw_flushed(bp, poll_cnt); 1498 } 1499 1500 static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) 1501 { 1502 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1503 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; 1504 enum bnx2x_vfop_flr_state state = vfop->state; 1505 struct bnx2x_vfop_cmd cmd = { 1506 .done = bnx2x_vfop_flr, 1507 .block = false, 1508 }; 1509 1510 if (vfop->rc < 0) 1511 goto op_err; 1512 1513 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1514 1515 switch (state) { 1516 case BNX2X_VFOP_FLR_QUEUES: 1517 /* the cleanup operations are valid if and only if the VF 1518 * was first acquired. 1519 */ 1520 if (++(qx->qid) < vf_rxq_count(vf)) { 1521 vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd, 1522 qx->qid); 1523 if (vfop->rc) 1524 goto op_err; 1525 return; 1526 } 1527 /* remove multicasts */ 1528 vfop->state = BNX2X_VFOP_FLR_HW; 1529 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 1530 0, true); 1531 if (vfop->rc) 1532 goto op_err; 1533 return; 1534 case BNX2X_VFOP_FLR_HW: 1535 1536 /* dispatch final cleanup and wait for HW queues to flush */ 1537 bnx2x_vf_flr_clnup_hw(bp, vf); 1538 1539 /* release VF resources */ 1540 bnx2x_vf_free_resc(bp, vf); 1541 1542 /* re-open the mailbox */ 1543 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 1544 1545 goto op_done; 1546 default: 1547 bnx2x_vfop_default(state); 1548 } 1549 op_err: 1550 BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc); 1551 op_done: 1552 vf->flr_clnup_stage = VF_FLR_ACK; 1553 bnx2x_vfop_end(bp, vf, vfop); 1554 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 1555 } 1556 1557 static int bnx2x_vfop_flr_cmd(struct bnx2x *bp, 1558 struct bnx2x_virtf *vf, 1559 vfop_handler_t done) 1560 { 1561 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1562 if (vfop) { 1563 vfop->args.qx.qid = -1; /* loop */ 1564 bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES, 1565 bnx2x_vfop_flr, done); 1566 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false); 1567 } 1568 return -ENOMEM; 1569 } 1570 1571 static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf) 1572 { 1573 int i = prev_vf ? prev_vf->index + 1 : 0; 1574 struct bnx2x_virtf *vf; 1575 1576 /* find next VF to cleanup */ 1577 next_vf_to_clean: 1578 for (; 1579 i < BNX2X_NR_VIRTFN(bp) && 1580 (bnx2x_vf(bp, i, state) != VF_RESET || 1581 bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN); 1582 i++) 1583 ; 1584 1585 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i, 1586 BNX2X_NR_VIRTFN(bp)); 1587 1588 if (i < BNX2X_NR_VIRTFN(bp)) { 1589 vf = BP_VF(bp, i); 1590 1591 /* lock the vf pf channel */ 1592 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 1593 1594 /* invoke the VF FLR SM */ 1595 if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) { 1596 BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n", 1597 vf->abs_vfid); 1598 1599 /* mark the VF to be ACKED and continue */ 1600 vf->flr_clnup_stage = VF_FLR_ACK; 1601 goto next_vf_to_clean; 1602 } 1603 return; 1604 } 1605 1606 /* we are done, update vf records */ 1607 for_each_vf(bp, i) { 1608 vf = BP_VF(bp, i); 1609 1610 if (vf->flr_clnup_stage != VF_FLR_ACK) 1611 continue; 1612 1613 vf->flr_clnup_stage = VF_FLR_EPILOG; 1614 } 1615 1616 /* Acknowledge the handled VFs. 1617 * we are acknowledge all the vfs which an flr was requested for, even 1618 * if amongst them there are such that we never opened, since the mcp 1619 * will interrupt us immediately again if we only ack some of the bits, 1620 * resulting in an endless loop. This can happen for example in KVM 1621 * where an 'all ones' flr request is sometimes given by hyper visor 1622 */ 1623 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n", 1624 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1625 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1626 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 1627 bp->vfdb->flrd_vfs[i]); 1628 1629 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); 1630 1631 /* clear the acked bits - better yet if the MCP implemented 1632 * write to clear semantics 1633 */ 1634 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1635 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); 1636 } 1637 1638 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) 1639 { 1640 int i; 1641 1642 /* Read FLR'd VFs */ 1643 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1644 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); 1645 1646 DP(BNX2X_MSG_MCP, 1647 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n", 1648 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1649 1650 for_each_vf(bp, i) { 1651 struct bnx2x_virtf *vf = BP_VF(bp, i); 1652 u32 reset = 0; 1653 1654 if (vf->abs_vfid < 32) 1655 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); 1656 else 1657 reset = bp->vfdb->flrd_vfs[1] & 1658 (1 << (vf->abs_vfid - 32)); 1659 1660 if (reset) { 1661 /* set as reset and ready for cleanup */ 1662 vf->state = VF_RESET; 1663 vf->flr_clnup_stage = VF_FLR_CLN; 1664 1665 DP(BNX2X_MSG_IOV, 1666 "Initiating Final cleanup for VF %d\n", 1667 vf->abs_vfid); 1668 } 1669 } 1670 1671 /* do the FLR cleanup for all marked VFs*/ 1672 bnx2x_vf_flr_clnup(bp, NULL); 1673 } 1674 1675 /* IOV global initialization routines */ 1676 void bnx2x_iov_init_dq(struct bnx2x *bp) 1677 { 1678 if (!IS_SRIOV(bp)) 1679 return; 1680 1681 /* Set the DQ such that the CID reflect the abs_vfid */ 1682 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); 1683 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 1684 1685 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to 1686 * the PF L2 queues 1687 */ 1688 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 1689 1690 /* The VF window size is the log2 of the max number of CIDs per VF */ 1691 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 1692 1693 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 1694 * the Pf doorbell size although the 2 are independent. 1695 */ 1696 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 1697 BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT); 1698 1699 /* No security checks for now - 1700 * configure single rule (out of 16) mask = 0x1, value = 0x0, 1701 * CID range 0 - 0x1ffff 1702 */ 1703 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); 1704 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); 1705 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1706 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1707 1708 /* set the number of VF allowed doorbells to the full DQ range */ 1709 REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); 1710 1711 /* set the VF doorbell threshold */ 1712 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 1713 } 1714 1715 void bnx2x_iov_init_dmae(struct bnx2x *bp) 1716 { 1717 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) 1718 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 1719 } 1720 1721 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1722 { 1723 struct pci_dev *dev = bp->pdev; 1724 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1725 1726 return dev->bus->number + ((dev->devfn + iov->offset + 1727 iov->stride * vfid) >> 8); 1728 } 1729 1730 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) 1731 { 1732 struct pci_dev *dev = bp->pdev; 1733 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1734 1735 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; 1736 } 1737 1738 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) 1739 { 1740 int i, n; 1741 struct pci_dev *dev = bp->pdev; 1742 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1743 1744 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { 1745 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); 1746 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); 1747 1748 size /= iov->total; 1749 vf->bars[n].bar = start + size * vf->abs_vfid; 1750 vf->bars[n].size = size; 1751 } 1752 } 1753 1754 static int bnx2x_ari_enabled(struct pci_dev *dev) 1755 { 1756 return dev->bus->self && dev->bus->self->ari_enabled; 1757 } 1758 1759 static void 1760 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) 1761 { 1762 int sb_id; 1763 u32 val; 1764 u8 fid; 1765 1766 /* IGU in normal mode - read CAM */ 1767 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 1768 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); 1769 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 1770 continue; 1771 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1772 if (!(fid & IGU_FID_ENCODE_IS_PF)) 1773 bnx2x_vf_set_igu_info(bp, sb_id, 1774 (fid & IGU_FID_VF_NUM_MASK)); 1775 1776 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1777 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 1778 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 1779 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 1780 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1781 } 1782 } 1783 1784 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1785 { 1786 if (bp->vfdb) { 1787 kfree(bp->vfdb->vfqs); 1788 kfree(bp->vfdb->vfs); 1789 kfree(bp->vfdb); 1790 } 1791 bp->vfdb = NULL; 1792 } 1793 1794 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1795 { 1796 int pos; 1797 struct pci_dev *dev = bp->pdev; 1798 1799 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 1800 if (!pos) { 1801 BNX2X_ERR("failed to find SRIOV capability in device\n"); 1802 return -ENODEV; 1803 } 1804 1805 iov->pos = pos; 1806 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); 1807 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 1808 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); 1809 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); 1810 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 1811 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 1812 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 1813 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 1814 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 1815 1816 return 0; 1817 } 1818 1819 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1820 { 1821 u32 val; 1822 1823 /* read the SRIOV capability structure 1824 * The fields can be read via configuration read or 1825 * directly from the device (starting at offset PCICFG_OFFSET) 1826 */ 1827 if (bnx2x_sriov_pci_cfg_info(bp, iov)) 1828 return -ENODEV; 1829 1830 /* get the number of SRIOV bars */ 1831 iov->nres = 0; 1832 1833 /* read the first_vfid */ 1834 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); 1835 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) 1836 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); 1837 1838 DP(BNX2X_MSG_IOV, 1839 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 1840 BP_FUNC(bp), 1841 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, 1842 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 1843 1844 return 0; 1845 } 1846 1847 static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp) 1848 { 1849 int i; 1850 u8 queue_count = 0; 1851 1852 if (IS_SRIOV(bp)) 1853 for_each_vf(bp, i) 1854 queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs); 1855 1856 return queue_count; 1857 } 1858 1859 /* must be called after PF bars are mapped */ 1860 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1861 int num_vfs_param) 1862 { 1863 int err, i, qcount; 1864 struct bnx2x_sriov *iov; 1865 struct pci_dev *dev = bp->pdev; 1866 1867 bp->vfdb = NULL; 1868 1869 /* verify is pf */ 1870 if (IS_VF(bp)) 1871 return 0; 1872 1873 /* verify sriov capability is present in configuration space */ 1874 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) 1875 return 0; 1876 1877 /* verify chip revision */ 1878 if (CHIP_IS_E1x(bp)) 1879 return 0; 1880 1881 /* check if SRIOV support is turned off */ 1882 if (!num_vfs_param) 1883 return 0; 1884 1885 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ 1886 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { 1887 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", 1888 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); 1889 return 0; 1890 } 1891 1892 /* SRIOV can be enabled only with MSIX */ 1893 if (int_mode_param == BNX2X_INT_MODE_MSI || 1894 int_mode_param == BNX2X_INT_MODE_INTX) { 1895 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); 1896 return 0; 1897 } 1898 1899 err = -EIO; 1900 /* verify ari is enabled */ 1901 if (!bnx2x_ari_enabled(bp->pdev)) { 1902 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n"); 1903 return 0; 1904 } 1905 1906 /* verify igu is in normal mode */ 1907 if (CHIP_INT_MODE_IS_BC(bp)) { 1908 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 1909 return 0; 1910 } 1911 1912 /* allocate the vfs database */ 1913 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); 1914 if (!bp->vfdb) { 1915 BNX2X_ERR("failed to allocate vf database\n"); 1916 err = -ENOMEM; 1917 goto failed; 1918 } 1919 1920 /* get the sriov info - Linux already collected all the pertinent 1921 * information, however the sriov structure is for the private use 1922 * of the pci module. Also we want this information regardless 1923 * of the hyper-visor. 1924 */ 1925 iov = &(bp->vfdb->sriov); 1926 err = bnx2x_sriov_info(bp, iov); 1927 if (err) 1928 goto failed; 1929 1930 /* SR-IOV capability was enabled but there are no VFs*/ 1931 if (iov->total == 0) 1932 goto failed; 1933 1934 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); 1935 1936 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n", 1937 num_vfs_param, iov->nr_virtfn); 1938 1939 /* allocate the vf array */ 1940 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 1941 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); 1942 if (!bp->vfdb->vfs) { 1943 BNX2X_ERR("failed to allocate vf array\n"); 1944 err = -ENOMEM; 1945 goto failed; 1946 } 1947 1948 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ 1949 for_each_vf(bp, i) { 1950 bnx2x_vf(bp, i, index) = i; 1951 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; 1952 bnx2x_vf(bp, i, state) = VF_FREE; 1953 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head)); 1954 mutex_init(&bnx2x_vf(bp, i, op_mutex)); 1955 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; 1956 } 1957 1958 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 1959 bnx2x_get_vf_igu_cam_info(bp); 1960 1961 /* get the total queue count and allocate the global queue arrays */ 1962 qcount = bnx2x_iov_get_max_queue_count(bp); 1963 1964 /* allocate the queue arrays for all VFs */ 1965 bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue), 1966 GFP_KERNEL); 1967 if (!bp->vfdb->vfqs) { 1968 BNX2X_ERR("failed to allocate vf queue array\n"); 1969 err = -ENOMEM; 1970 goto failed; 1971 } 1972 1973 return 0; 1974 failed: 1975 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 1976 __bnx2x_iov_free_vfdb(bp); 1977 return err; 1978 } 1979 1980 void bnx2x_iov_remove_one(struct bnx2x *bp) 1981 { 1982 /* if SRIOV is not enabled there's nothing to do */ 1983 if (!IS_SRIOV(bp)) 1984 return; 1985 1986 DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); 1987 pci_disable_sriov(bp->pdev); 1988 DP(BNX2X_MSG_IOV, "sriov disabled\n"); 1989 1990 /* free vf database */ 1991 __bnx2x_iov_free_vfdb(bp); 1992 } 1993 1994 void bnx2x_iov_free_mem(struct bnx2x *bp) 1995 { 1996 int i; 1997 1998 if (!IS_SRIOV(bp)) 1999 return; 2000 2001 /* free vfs hw contexts */ 2002 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2003 struct hw_dma *cxt = &bp->vfdb->context[i]; 2004 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); 2005 } 2006 2007 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, 2008 BP_VFDB(bp)->sp_dma.mapping, 2009 BP_VFDB(bp)->sp_dma.size); 2010 2011 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, 2012 BP_VF_MBX_DMA(bp)->mapping, 2013 BP_VF_MBX_DMA(bp)->size); 2014 2015 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, 2016 BP_VF_BULLETIN_DMA(bp)->mapping, 2017 BP_VF_BULLETIN_DMA(bp)->size); 2018 } 2019 2020 int bnx2x_iov_alloc_mem(struct bnx2x *bp) 2021 { 2022 size_t tot_size; 2023 int i, rc = 0; 2024 2025 if (!IS_SRIOV(bp)) 2026 return rc; 2027 2028 /* allocate vfs hw contexts */ 2029 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * 2030 BNX2X_CIDS_PER_VF * sizeof(union cdu_context); 2031 2032 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2033 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); 2034 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); 2035 2036 if (cxt->size) { 2037 BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size); 2038 } else { 2039 cxt->addr = NULL; 2040 cxt->mapping = 0; 2041 } 2042 tot_size -= cxt->size; 2043 } 2044 2045 /* allocate vfs ramrods dma memory - client_init and set_mac */ 2046 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); 2047 BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping, 2048 tot_size); 2049 BP_VFDB(bp)->sp_dma.size = tot_size; 2050 2051 /* allocate mailboxes */ 2052 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; 2053 BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping, 2054 tot_size); 2055 BP_VF_MBX_DMA(bp)->size = tot_size; 2056 2057 /* allocate local bulletin boards */ 2058 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; 2059 BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr, 2060 &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size); 2061 BP_VF_BULLETIN_DMA(bp)->size = tot_size; 2062 2063 return 0; 2064 2065 alloc_mem_err: 2066 return -ENOMEM; 2067 } 2068 2069 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 2070 struct bnx2x_vf_queue *q) 2071 { 2072 u8 cl_id = vfq_cl_id(vf, q); 2073 u8 func_id = FW_VF_HANDLE(vf->abs_vfid); 2074 unsigned long q_type = 0; 2075 2076 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 2077 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 2078 2079 /* Queue State object */ 2080 bnx2x_init_queue_obj(bp, &q->sp_obj, 2081 cl_id, &q->cid, 1, func_id, 2082 bnx2x_vf_sp(bp, vf, q_data), 2083 bnx2x_vf_sp_map(bp, vf, q_data), 2084 q_type); 2085 2086 DP(BNX2X_MSG_IOV, 2087 "initialized vf %d's queue object. func id set to %d\n", 2088 vf->abs_vfid, q->sp_obj.func_id); 2089 2090 /* mac/vlan objects are per queue, but only those 2091 * that belong to the leading queue are initialized 2092 */ 2093 if (vfq_is_leading(q)) { 2094 /* mac */ 2095 bnx2x_init_mac_obj(bp, &q->mac_obj, 2096 cl_id, q->cid, func_id, 2097 bnx2x_vf_sp(bp, vf, mac_rdata), 2098 bnx2x_vf_sp_map(bp, vf, mac_rdata), 2099 BNX2X_FILTER_MAC_PENDING, 2100 &vf->filter_state, 2101 BNX2X_OBJ_TYPE_RX_TX, 2102 &bp->macs_pool); 2103 /* vlan */ 2104 bnx2x_init_vlan_obj(bp, &q->vlan_obj, 2105 cl_id, q->cid, func_id, 2106 bnx2x_vf_sp(bp, vf, vlan_rdata), 2107 bnx2x_vf_sp_map(bp, vf, vlan_rdata), 2108 BNX2X_FILTER_VLAN_PENDING, 2109 &vf->filter_state, 2110 BNX2X_OBJ_TYPE_RX_TX, 2111 &bp->vlans_pool); 2112 2113 /* mcast */ 2114 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, 2115 q->cid, func_id, func_id, 2116 bnx2x_vf_sp(bp, vf, mcast_rdata), 2117 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 2118 BNX2X_FILTER_MCAST_PENDING, 2119 &vf->filter_state, 2120 BNX2X_OBJ_TYPE_RX_TX); 2121 2122 vf->leading_rss = cl_id; 2123 } 2124 } 2125 2126 /* called by bnx2x_nic_load */ 2127 int bnx2x_iov_nic_init(struct bnx2x *bp) 2128 { 2129 int vfid, qcount, i; 2130 2131 if (!IS_SRIOV(bp)) { 2132 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 2133 return 0; 2134 } 2135 2136 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); 2137 2138 /* let FLR complete ... */ 2139 msleep(100); 2140 2141 /* initialize vf database */ 2142 for_each_vf(bp, vfid) { 2143 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 2144 2145 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * 2146 BNX2X_CIDS_PER_VF; 2147 2148 union cdu_context *base_cxt = (union cdu_context *) 2149 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2150 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2151 2152 DP(BNX2X_MSG_IOV, 2153 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", 2154 vf->abs_vfid, vf_sb_count(vf), base_vf_cid, 2155 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 2156 2157 /* init statically provisioned resources */ 2158 bnx2x_iov_static_resc(bp, &vf->alloc_resc); 2159 2160 /* queues are initialized during VF-ACQUIRE */ 2161 2162 /* reserve the vf vlan credit */ 2163 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf)); 2164 2165 vf->filter_state = 0; 2166 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 2167 2168 /* init mcast object - This object will be re-initialized 2169 * during VF-ACQUIRE with the proper cl_id and cid. 2170 * It needs to be initialized here so that it can be safely 2171 * handled by a subsequent FLR flow. 2172 */ 2173 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, 2174 0xFF, 0xFF, 0xFF, 2175 bnx2x_vf_sp(bp, vf, mcast_rdata), 2176 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 2177 BNX2X_FILTER_MCAST_PENDING, 2178 &vf->filter_state, 2179 BNX2X_OBJ_TYPE_RX_TX); 2180 2181 /* set the mailbox message addresses */ 2182 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) 2183 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * 2184 MBX_MSG_ALIGNED_SIZE); 2185 2186 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + 2187 vfid * MBX_MSG_ALIGNED_SIZE; 2188 2189 /* Enable vf mailbox */ 2190 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 2191 } 2192 2193 /* Final VF init */ 2194 qcount = 0; 2195 for_each_vf(bp, i) { 2196 struct bnx2x_virtf *vf = BP_VF(bp, i); 2197 2198 /* fill in the BDF and bars */ 2199 vf->bus = bnx2x_vf_bus(bp, i); 2200 vf->devfn = bnx2x_vf_devfn(bp, i); 2201 bnx2x_vf_set_bars(bp, vf); 2202 2203 DP(BNX2X_MSG_IOV, 2204 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", 2205 vf->abs_vfid, vf->bus, vf->devfn, 2206 (unsigned)vf->bars[0].bar, vf->bars[0].size, 2207 (unsigned)vf->bars[1].bar, vf->bars[1].size, 2208 (unsigned)vf->bars[2].bar, vf->bars[2].size); 2209 2210 /* set local queue arrays */ 2211 vf->vfqs = &bp->vfdb->vfqs[qcount]; 2212 qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs); 2213 } 2214 2215 return 0; 2216 } 2217 2218 /* called by bnx2x_chip_cleanup */ 2219 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) 2220 { 2221 int i; 2222 2223 if (!IS_SRIOV(bp)) 2224 return 0; 2225 2226 /* release all the VFs */ 2227 for_each_vf(bp, i) 2228 bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */ 2229 2230 return 0; 2231 } 2232 2233 /* called by bnx2x_init_hw_func, returns the next ilt line */ 2234 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) 2235 { 2236 int i; 2237 struct bnx2x_ilt *ilt = BP_ILT(bp); 2238 2239 if (!IS_SRIOV(bp)) 2240 return line; 2241 2242 /* set vfs ilt lines */ 2243 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2244 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); 2245 2246 ilt->lines[line+i].page = hw_cxt->addr; 2247 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 2248 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 2249 } 2250 return line + i; 2251 } 2252 2253 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) 2254 { 2255 return ((cid >= BNX2X_FIRST_VF_CID) && 2256 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); 2257 } 2258 2259 static 2260 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, 2261 struct bnx2x_vf_queue *vfq, 2262 union event_ring_elem *elem) 2263 { 2264 unsigned long ramrod_flags = 0; 2265 int rc = 0; 2266 2267 /* Always push next commands out, don't wait here */ 2268 set_bit(RAMROD_CONT, &ramrod_flags); 2269 2270 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 2271 case BNX2X_FILTER_MAC_PENDING: 2272 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, 2273 &ramrod_flags); 2274 break; 2275 case BNX2X_FILTER_VLAN_PENDING: 2276 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, 2277 &ramrod_flags); 2278 break; 2279 default: 2280 BNX2X_ERR("Unsupported classification command: %d\n", 2281 elem->message.data.eth_event.echo); 2282 return; 2283 } 2284 if (rc < 0) 2285 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 2286 else if (rc > 0) 2287 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); 2288 } 2289 2290 static 2291 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, 2292 struct bnx2x_virtf *vf) 2293 { 2294 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 2295 int rc; 2296 2297 rparam.mcast_obj = &vf->mcast_obj; 2298 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); 2299 2300 /* If there are pending mcast commands - send them */ 2301 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { 2302 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 2303 if (rc < 0) 2304 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 2305 rc); 2306 } 2307 } 2308 2309 static 2310 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, 2311 struct bnx2x_virtf *vf) 2312 { 2313 smp_mb__before_clear_bit(); 2314 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 2315 smp_mb__after_clear_bit(); 2316 } 2317 2318 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) 2319 { 2320 struct bnx2x_virtf *vf; 2321 int qidx = 0, abs_vfid; 2322 u8 opcode; 2323 u16 cid = 0xffff; 2324 2325 if (!IS_SRIOV(bp)) 2326 return 1; 2327 2328 /* first get the cid - the only events we handle here are cfc-delete 2329 * and set-mac completion 2330 */ 2331 opcode = elem->message.opcode; 2332 2333 switch (opcode) { 2334 case EVENT_RING_OPCODE_CFC_DEL: 2335 cid = SW_CID((__force __le32) 2336 elem->message.data.cfc_del_event.cid); 2337 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); 2338 break; 2339 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 2340 case EVENT_RING_OPCODE_MULTICAST_RULES: 2341 case EVENT_RING_OPCODE_FILTERS_RULES: 2342 cid = (elem->message.data.eth_event.echo & 2343 BNX2X_SWCID_MASK); 2344 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 2345 break; 2346 case EVENT_RING_OPCODE_VF_FLR: 2347 abs_vfid = elem->message.data.vf_flr_event.vf_id; 2348 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", 2349 abs_vfid); 2350 goto get_vf; 2351 case EVENT_RING_OPCODE_MALICIOUS_VF: 2352 abs_vfid = elem->message.data.malicious_vf_event.vf_id; 2353 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n", 2354 abs_vfid, elem->message.data.malicious_vf_event.err_id); 2355 goto get_vf; 2356 default: 2357 return 1; 2358 } 2359 2360 /* check if the cid is the VF range */ 2361 if (!bnx2x_iov_is_vf_cid(bp, cid)) { 2362 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); 2363 return 1; 2364 } 2365 2366 /* extract vf and rxq index from vf_cid - relies on the following: 2367 * 1. vfid on cid reflects the true abs_vfid 2368 * 2. The max number of VFs (per path) is 64 2369 */ 2370 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); 2371 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2372 get_vf: 2373 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 2374 2375 if (!vf) { 2376 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", 2377 cid, abs_vfid); 2378 return 0; 2379 } 2380 2381 switch (opcode) { 2382 case EVENT_RING_OPCODE_CFC_DEL: 2383 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", 2384 vf->abs_vfid, qidx); 2385 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, 2386 &vfq_get(vf, 2387 qidx)->sp_obj, 2388 BNX2X_Q_CMD_CFC_DEL); 2389 break; 2390 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 2391 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", 2392 vf->abs_vfid, qidx); 2393 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); 2394 break; 2395 case EVENT_RING_OPCODE_MULTICAST_RULES: 2396 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", 2397 vf->abs_vfid, qidx); 2398 bnx2x_vf_handle_mcast_eqe(bp, vf); 2399 break; 2400 case EVENT_RING_OPCODE_FILTERS_RULES: 2401 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", 2402 vf->abs_vfid, qidx); 2403 bnx2x_vf_handle_filters_eqe(bp, vf); 2404 break; 2405 case EVENT_RING_OPCODE_VF_FLR: 2406 DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n", 2407 vf->abs_vfid); 2408 /* Do nothing for now */ 2409 break; 2410 case EVENT_RING_OPCODE_MALICIOUS_VF: 2411 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n", 2412 abs_vfid, elem->message.data.malicious_vf_event.err_id); 2413 /* Do nothing for now */ 2414 break; 2415 } 2416 /* SRIOV: reschedule any 'in_progress' operations */ 2417 bnx2x_iov_sp_event(bp, cid, false); 2418 2419 return 0; 2420 } 2421 2422 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) 2423 { 2424 /* extract the vf from vf_cid - relies on the following: 2425 * 1. vfid on cid reflects the true abs_vfid 2426 * 2. The max number of VFs (per path) is 64 2427 */ 2428 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2429 return bnx2x_vf_by_abs_fid(bp, abs_vfid); 2430 } 2431 2432 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 2433 struct bnx2x_queue_sp_obj **q_obj) 2434 { 2435 struct bnx2x_virtf *vf; 2436 2437 if (!IS_SRIOV(bp)) 2438 return; 2439 2440 vf = bnx2x_vf_by_cid(bp, vf_cid); 2441 2442 if (vf) { 2443 /* extract queue index from vf_cid - relies on the following: 2444 * 1. vfid on cid reflects the true abs_vfid 2445 * 2. The max number of VFs (per path) is 64 2446 */ 2447 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); 2448 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); 2449 } else { 2450 BNX2X_ERR("No vf matching cid %d\n", vf_cid); 2451 } 2452 } 2453 2454 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work) 2455 { 2456 struct bnx2x_virtf *vf; 2457 2458 /* check if the cid is the VF range */ 2459 if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid)) 2460 return; 2461 2462 vf = bnx2x_vf_by_cid(bp, vf_cid); 2463 if (vf) { 2464 /* set in_progress flag */ 2465 atomic_set(&vf->op_in_progress, 1); 2466 if (queue_work) 2467 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 2468 } 2469 } 2470 2471 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) 2472 { 2473 int i; 2474 int first_queue_query_index, num_queues_req; 2475 dma_addr_t cur_data_offset; 2476 struct stats_query_entry *cur_query_entry; 2477 u8 stats_count = 0; 2478 bool is_fcoe = false; 2479 2480 if (!IS_SRIOV(bp)) 2481 return; 2482 2483 if (!NO_FCOE(bp)) 2484 is_fcoe = true; 2485 2486 /* fcoe adds one global request and one queue request */ 2487 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; 2488 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 2489 (is_fcoe ? 0 : 1); 2490 2491 DP(BNX2X_MSG_IOV, 2492 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", 2493 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, 2494 first_queue_query_index + num_queues_req); 2495 2496 cur_data_offset = bp->fw_stats_data_mapping + 2497 offsetof(struct bnx2x_fw_stats_data, queue_stats) + 2498 num_queues_req * sizeof(struct per_queue_stats); 2499 2500 cur_query_entry = &bp->fw_stats_req-> 2501 query[first_queue_query_index + num_queues_req]; 2502 2503 for_each_vf(bp, i) { 2504 int j; 2505 struct bnx2x_virtf *vf = BP_VF(bp, i); 2506 2507 if (vf->state != VF_ENABLED) { 2508 DP(BNX2X_MSG_IOV, 2509 "vf %d not enabled so no stats for it\n", 2510 vf->abs_vfid); 2511 continue; 2512 } 2513 2514 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); 2515 for_each_vfq(vf, j) { 2516 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 2517 2518 /* collect stats fro active queues only */ 2519 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 2520 BNX2X_Q_LOGICAL_STATE_STOPPED) 2521 continue; 2522 2523 /* create stats query entry for this queue */ 2524 cur_query_entry->kind = STATS_TYPE_QUEUE; 2525 cur_query_entry->index = vfq_cl_id(vf, rxq); 2526 cur_query_entry->funcID = 2527 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 2528 cur_query_entry->address.hi = 2529 cpu_to_le32(U64_HI(vf->fw_stat_map)); 2530 cur_query_entry->address.lo = 2531 cpu_to_le32(U64_LO(vf->fw_stat_map)); 2532 DP(BNX2X_MSG_IOV, 2533 "added address %x %x for vf %d queue %d client %d\n", 2534 cur_query_entry->address.hi, 2535 cur_query_entry->address.lo, cur_query_entry->funcID, 2536 j, cur_query_entry->index); 2537 cur_query_entry++; 2538 cur_data_offset += sizeof(struct per_queue_stats); 2539 stats_count++; 2540 } 2541 } 2542 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 2543 } 2544 2545 void bnx2x_iov_sp_task(struct bnx2x *bp) 2546 { 2547 int i; 2548 2549 if (!IS_SRIOV(bp)) 2550 return; 2551 /* Iterate over all VFs and invoke state transition for VFs with 2552 * 'in-progress' slow-path operations 2553 */ 2554 DP(BNX2X_MSG_IOV, "searching for pending vf operations\n"); 2555 for_each_vf(bp, i) { 2556 struct bnx2x_virtf *vf = BP_VF(bp, i); 2557 2558 if (!list_empty(&vf->op_list_head) && 2559 atomic_read(&vf->op_in_progress)) { 2560 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); 2561 bnx2x_vfop_cur(bp, vf)->transition(bp, vf); 2562 } 2563 } 2564 } 2565 2566 static inline 2567 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) 2568 { 2569 int i; 2570 struct bnx2x_virtf *vf = NULL; 2571 2572 for_each_vf(bp, i) { 2573 vf = BP_VF(bp, i); 2574 if (stat_id >= vf->igu_base_id && 2575 stat_id < vf->igu_base_id + vf_sb_count(vf)) 2576 break; 2577 } 2578 return vf; 2579 } 2580 2581 /* VF API helpers */ 2582 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, 2583 u8 enable) 2584 { 2585 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; 2586 u32 val = enable ? (abs_vfid | (1 << 6)) : 0; 2587 2588 REG_WR(bp, reg, val); 2589 } 2590 2591 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) 2592 { 2593 int i; 2594 2595 for_each_vfq(vf, i) 2596 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2597 vfq_qzone_id(vf, vfq_get(vf, i)), false); 2598 } 2599 2600 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) 2601 { 2602 u32 val; 2603 2604 /* clear the VF configuration - pretend */ 2605 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 2606 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 2607 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | 2608 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK); 2609 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 2610 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2611 } 2612 2613 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) 2614 { 2615 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), 2616 BNX2X_VF_MAX_QUEUES); 2617 } 2618 2619 static 2620 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, 2621 struct vf_pf_resc_request *req_resc) 2622 { 2623 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2624 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2625 2626 return ((req_resc->num_rxqs <= rxq_cnt) && 2627 (req_resc->num_txqs <= txq_cnt) && 2628 (req_resc->num_sbs <= vf_sb_count(vf)) && 2629 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 2630 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); 2631 } 2632 2633 /* CORE VF API */ 2634 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 2635 struct vf_pf_resc_request *resc) 2636 { 2637 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * 2638 BNX2X_CIDS_PER_VF; 2639 2640 union cdu_context *base_cxt = (union cdu_context *) 2641 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2642 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2643 int i; 2644 2645 /* if state is 'acquired' the VF was not released or FLR'd, in 2646 * this case the returned resources match the acquired already 2647 * acquired resources. Verify that the requested numbers do 2648 * not exceed the already acquired numbers. 2649 */ 2650 if (vf->state == VF_ACQUIRED) { 2651 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", 2652 vf->abs_vfid); 2653 2654 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2655 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", 2656 vf->abs_vfid); 2657 return -EINVAL; 2658 } 2659 return 0; 2660 } 2661 2662 /* Otherwise vf state must be 'free' or 'reset' */ 2663 if (vf->state != VF_FREE && vf->state != VF_RESET) { 2664 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", 2665 vf->abs_vfid, vf->state); 2666 return -EINVAL; 2667 } 2668 2669 /* static allocation: 2670 * the global maximum number are fixed per VF. Fail the request if 2671 * requested number exceed these globals 2672 */ 2673 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2674 DP(BNX2X_MSG_IOV, 2675 "cannot fulfill vf resource request. Placing maximal available values in response\n"); 2676 /* set the max resource in the vf */ 2677 return -ENOMEM; 2678 } 2679 2680 /* Set resources counters - 0 request means max available */ 2681 vf_sb_count(vf) = resc->num_sbs; 2682 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2683 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2684 if (resc->num_mac_filters) 2685 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 2686 if (resc->num_vlan_filters) 2687 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; 2688 2689 DP(BNX2X_MSG_IOV, 2690 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2691 vf_sb_count(vf), vf_rxq_count(vf), 2692 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2693 vf_vlan_rules_cnt(vf)); 2694 2695 /* Initialize the queues */ 2696 if (!vf->vfqs) { 2697 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); 2698 return -EINVAL; 2699 } 2700 2701 for_each_vfq(vf, i) { 2702 struct bnx2x_vf_queue *q = vfq_get(vf, i); 2703 2704 if (!q) { 2705 DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i); 2706 return -EINVAL; 2707 } 2708 2709 q->index = i; 2710 q->cxt = &((base_cxt + i)->eth); 2711 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; 2712 2713 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", 2714 vf->abs_vfid, i, q->index, q->cid, q->cxt); 2715 2716 /* init SP objects */ 2717 bnx2x_vfq_init(bp, vf, q); 2718 } 2719 vf->state = VF_ACQUIRED; 2720 return 0; 2721 } 2722 2723 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) 2724 { 2725 struct bnx2x_func_init_params func_init = {0}; 2726 u16 flags = 0; 2727 int i; 2728 2729 /* the sb resources are initialized at this point, do the 2730 * FW/HW initializations 2731 */ 2732 for_each_vf_sb(vf, i) 2733 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, 2734 vf_igu_sb(vf, i), vf_igu_sb(vf, i)); 2735 2736 /* Sanity checks */ 2737 if (vf->state != VF_ACQUIRED) { 2738 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", 2739 vf->abs_vfid, vf->state); 2740 return -EINVAL; 2741 } 2742 2743 /* let FLR complete ... */ 2744 msleep(100); 2745 2746 /* FLR cleanup epilogue */ 2747 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) 2748 return -EBUSY; 2749 2750 /* reset IGU VF statistics: MSIX */ 2751 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); 2752 2753 /* vf init */ 2754 if (vf->cfg_flags & VF_CFG_STATS) 2755 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); 2756 2757 if (vf->cfg_flags & VF_CFG_TPA) 2758 flags |= FUNC_FLG_TPA; 2759 2760 if (is_vf_multi(vf)) 2761 flags |= FUNC_FLG_RSS; 2762 2763 /* function setup */ 2764 func_init.func_flgs = flags; 2765 func_init.pf_id = BP_FUNC(bp); 2766 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); 2767 func_init.fw_stat_map = vf->fw_stat_map; 2768 func_init.spq_map = vf->spq_map; 2769 func_init.spq_prod = 0; 2770 bnx2x_func_init(bp, &func_init); 2771 2772 /* Enable the vf */ 2773 bnx2x_vf_enable_access(bp, vf->abs_vfid); 2774 bnx2x_vf_enable_traffic(bp, vf); 2775 2776 /* queue protection table */ 2777 for_each_vfq(vf, i) 2778 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2779 vfq_qzone_id(vf, vfq_get(vf, i)), true); 2780 2781 vf->state = VF_ENABLED; 2782 2783 /* update vf bulletin board */ 2784 bnx2x_post_vf_bulletin(bp, vf->index); 2785 2786 return 0; 2787 } 2788 2789 struct set_vf_state_cookie { 2790 struct bnx2x_virtf *vf; 2791 u8 state; 2792 }; 2793 2794 void bnx2x_set_vf_state(void *cookie) 2795 { 2796 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; 2797 2798 p->vf->state = p->state; 2799 } 2800 2801 /* VFOP close (teardown the queues, delete mcasts and close HW) */ 2802 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) 2803 { 2804 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2805 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; 2806 enum bnx2x_vfop_close_state state = vfop->state; 2807 struct bnx2x_vfop_cmd cmd = { 2808 .done = bnx2x_vfop_close, 2809 .block = false, 2810 }; 2811 2812 if (vfop->rc < 0) 2813 goto op_err; 2814 2815 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 2816 2817 switch (state) { 2818 case BNX2X_VFOP_CLOSE_QUEUES: 2819 2820 if (++(qx->qid) < vf_rxq_count(vf)) { 2821 vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid); 2822 if (vfop->rc) 2823 goto op_err; 2824 return; 2825 } 2826 2827 /* remove multicasts */ 2828 vfop->state = BNX2X_VFOP_CLOSE_HW; 2829 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false); 2830 if (vfop->rc) 2831 goto op_err; 2832 return; 2833 2834 case BNX2X_VFOP_CLOSE_HW: 2835 2836 /* disable the interrupts */ 2837 DP(BNX2X_MSG_IOV, "disabling igu\n"); 2838 bnx2x_vf_igu_disable(bp, vf); 2839 2840 /* disable the VF */ 2841 DP(BNX2X_MSG_IOV, "clearing qtbl\n"); 2842 bnx2x_vf_clr_qtbl(bp, vf); 2843 2844 goto op_done; 2845 default: 2846 bnx2x_vfop_default(state); 2847 } 2848 op_err: 2849 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); 2850 op_done: 2851 2852 /* need to make sure there are no outstanding stats ramrods which may 2853 * cause the device to access the VF's stats buffer which it will free 2854 * as soon as we return from the close flow. 2855 */ 2856 { 2857 struct set_vf_state_cookie cookie; 2858 2859 cookie.vf = vf; 2860 cookie.state = VF_ACQUIRED; 2861 bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); 2862 } 2863 2864 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2865 bnx2x_vfop_end(bp, vf, vfop); 2866 } 2867 2868 int bnx2x_vfop_close_cmd(struct bnx2x *bp, 2869 struct bnx2x_virtf *vf, 2870 struct bnx2x_vfop_cmd *cmd) 2871 { 2872 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 2873 if (vfop) { 2874 vfop->args.qx.qid = -1; /* loop */ 2875 bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES, 2876 bnx2x_vfop_close, cmd->done); 2877 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close, 2878 cmd->block); 2879 } 2880 return -ENOMEM; 2881 } 2882 2883 /* VF release can be called either: 1. The VF was acquired but 2884 * not enabled 2. the vf was enabled or in the process of being 2885 * enabled 2886 */ 2887 static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) 2888 { 2889 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2890 struct bnx2x_vfop_cmd cmd = { 2891 .done = bnx2x_vfop_release, 2892 .block = false, 2893 }; 2894 2895 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2896 2897 if (vfop->rc < 0) 2898 goto op_err; 2899 2900 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, 2901 vf->state == VF_FREE ? "Free" : 2902 vf->state == VF_ACQUIRED ? "Acquired" : 2903 vf->state == VF_ENABLED ? "Enabled" : 2904 vf->state == VF_RESET ? "Reset" : 2905 "Unknown"); 2906 2907 switch (vf->state) { 2908 case VF_ENABLED: 2909 vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); 2910 if (vfop->rc) 2911 goto op_err; 2912 return; 2913 2914 case VF_ACQUIRED: 2915 DP(BNX2X_MSG_IOV, "about to free resources\n"); 2916 bnx2x_vf_free_resc(bp, vf); 2917 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2918 goto op_done; 2919 2920 case VF_FREE: 2921 case VF_RESET: 2922 /* do nothing */ 2923 goto op_done; 2924 default: 2925 bnx2x_vfop_default(vf->state); 2926 } 2927 op_err: 2928 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc); 2929 op_done: 2930 bnx2x_vfop_end(bp, vf, vfop); 2931 } 2932 2933 int bnx2x_vfop_release_cmd(struct bnx2x *bp, 2934 struct bnx2x_virtf *vf, 2935 struct bnx2x_vfop_cmd *cmd) 2936 { 2937 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 2938 if (vfop) { 2939 bnx2x_vfop_opset(-1, /* use vf->state */ 2940 bnx2x_vfop_release, cmd->done); 2941 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release, 2942 cmd->block); 2943 } 2944 return -ENOMEM; 2945 } 2946 2947 /* VF release ~ VF close + VF release-resources 2948 * Release is the ultimate SW shutdown and is called whenever an 2949 * irrecoverable error is encountered. 2950 */ 2951 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) 2952 { 2953 struct bnx2x_vfop_cmd cmd = { 2954 .done = NULL, 2955 .block = block, 2956 }; 2957 int rc; 2958 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 2959 2960 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); 2961 if (rc) 2962 WARN(rc, 2963 "VF[%d] Failed to allocate resources for release op- rc=%d\n", 2964 vf->abs_vfid, rc); 2965 } 2966 2967 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, 2968 struct bnx2x_virtf *vf, u32 *sbdf) 2969 { 2970 *sbdf = vf->devfn | (vf->bus << 8); 2971 } 2972 2973 static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf, 2974 struct bnx2x_vf_bar_info *bar_info) 2975 { 2976 int n; 2977 2978 bar_info->nr_bars = bp->vfdb->sriov.nres; 2979 for (n = 0; n < bar_info->nr_bars; n++) 2980 bar_info->bars[n] = vf->bars[n]; 2981 } 2982 2983 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 2984 enum channel_tlvs tlv) 2985 { 2986 /* lock the channel */ 2987 mutex_lock(&vf->op_mutex); 2988 2989 /* record the locking op */ 2990 vf->op_current = tlv; 2991 2992 /* log the lock */ 2993 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", 2994 vf->abs_vfid, tlv); 2995 } 2996 2997 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 2998 enum channel_tlvs expected_tlv) 2999 { 3000 WARN(expected_tlv != vf->op_current, 3001 "lock mismatch: expected %d found %d", expected_tlv, 3002 vf->op_current); 3003 3004 /* lock the channel */ 3005 mutex_unlock(&vf->op_mutex); 3006 3007 /* log the unlock */ 3008 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 3009 vf->abs_vfid, vf->op_current); 3010 3011 /* record the locking op */ 3012 vf->op_current = CHANNEL_TLV_NONE; 3013 } 3014 3015 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) 3016 { 3017 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 3018 3019 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 3020 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3021 3022 /* HW channel is only operational when PF is up */ 3023 if (bp->state != BNX2X_STATE_OPEN) { 3024 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n"); 3025 return -EINVAL; 3026 } 3027 3028 /* we are always bound by the total_vfs in the configuration space */ 3029 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { 3030 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n", 3031 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3032 num_vfs_param = BNX2X_NR_VIRTFN(bp); 3033 } 3034 3035 bp->requested_nr_virtfn = num_vfs_param; 3036 if (num_vfs_param == 0) { 3037 pci_disable_sriov(dev); 3038 return 0; 3039 } else { 3040 return bnx2x_enable_sriov(bp); 3041 } 3042 } 3043 3044 int bnx2x_enable_sriov(struct bnx2x *bp) 3045 { 3046 int rc = 0, req_vfs = bp->requested_nr_virtfn; 3047 3048 rc = pci_enable_sriov(bp->pdev, req_vfs); 3049 if (rc) { 3050 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 3051 return rc; 3052 } 3053 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); 3054 return req_vfs; 3055 } 3056 3057 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) 3058 { 3059 int vfidx; 3060 struct pf_vf_bulletin_content *bulletin; 3061 3062 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); 3063 for_each_vf(bp, vfidx) { 3064 bulletin = BP_VF_BULLETIN(bp, vfidx); 3065 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) 3066 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); 3067 } 3068 } 3069 3070 void bnx2x_disable_sriov(struct bnx2x *bp) 3071 { 3072 pci_disable_sriov(bp->pdev); 3073 } 3074 3075 static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, 3076 struct bnx2x_virtf **vf, 3077 struct pf_vf_bulletin_content **bulletin) 3078 { 3079 if (bp->state != BNX2X_STATE_OPEN) { 3080 BNX2X_ERR("vf ndo called though PF is down\n"); 3081 return -EINVAL; 3082 } 3083 3084 if (!IS_SRIOV(bp)) { 3085 BNX2X_ERR("vf ndo called though sriov is disabled\n"); 3086 return -EINVAL; 3087 } 3088 3089 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { 3090 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n", 3091 vfidx, BNX2X_NR_VIRTFN(bp)); 3092 return -EINVAL; 3093 } 3094 3095 /* init members */ 3096 *vf = BP_VF(bp, vfidx); 3097 *bulletin = BP_VF_BULLETIN(bp, vfidx); 3098 3099 if (!*vf) { 3100 BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", 3101 vfidx); 3102 return -EINVAL; 3103 } 3104 3105 if (!*bulletin) { 3106 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n", 3107 vfidx); 3108 return -EINVAL; 3109 } 3110 3111 return 0; 3112 } 3113 3114 int bnx2x_get_vf_config(struct net_device *dev, int vfidx, 3115 struct ifla_vf_info *ivi) 3116 { 3117 struct bnx2x *bp = netdev_priv(dev); 3118 struct bnx2x_virtf *vf = NULL; 3119 struct pf_vf_bulletin_content *bulletin = NULL; 3120 struct bnx2x_vlan_mac_obj *mac_obj; 3121 struct bnx2x_vlan_mac_obj *vlan_obj; 3122 int rc; 3123 3124 /* sanity and init */ 3125 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3126 if (rc) 3127 return rc; 3128 mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3129 vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); 3130 if (!mac_obj || !vlan_obj) { 3131 BNX2X_ERR("VF partially initialized\n"); 3132 return -EINVAL; 3133 } 3134 3135 ivi->vf = vfidx; 3136 ivi->qos = 0; 3137 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */ 3138 ivi->spoofchk = 1; /*always enabled */ 3139 if (vf->state == VF_ENABLED) { 3140 /* mac and vlan are in vlan_mac objects */ 3141 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 3142 0, ETH_ALEN); 3143 vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan, 3144 0, VLAN_HLEN); 3145 } else { 3146 /* mac */ 3147 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 3148 /* mac configured by ndo so its in bulletin board */ 3149 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); 3150 else 3151 /* function has not been loaded yet. Show mac as 0s */ 3152 memset(&ivi->mac, 0, ETH_ALEN); 3153 3154 /* vlan */ 3155 if (bulletin->valid_bitmap & (1 << VLAN_VALID)) 3156 /* vlan configured by ndo so its in bulletin board */ 3157 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); 3158 else 3159 /* function has not been loaded yet. Show vlans as 0s */ 3160 memset(&ivi->vlan, 0, VLAN_HLEN); 3161 } 3162 3163 return 0; 3164 } 3165 3166 /* New mac for VF. Consider these cases: 3167 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and 3168 * supply at acquire. 3169 * 2. VF has already been acquired but has not yet initialized - store in local 3170 * bulletin board. mac will be posted on VF bulletin board after VF init. VF 3171 * will configure this mac when it is ready. 3172 * 3. VF has already initialized but has not yet setup a queue - post the new 3173 * mac on VF's bulletin board right now. VF will configure this mac when it 3174 * is ready. 3175 * 4. VF has already set a queue - delete any macs already configured for this 3176 * queue and manually config the new mac. 3177 * In any event, once this function has been called refuse any attempts by the 3178 * VF to configure any mac for itself except for this mac. In case of a race 3179 * where the VF fails to see the new post on its bulletin board before sending a 3180 * mac configuration request, the PF will simply fail the request and VF can try 3181 * again after consulting its bulletin board. 3182 */ 3183 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) 3184 { 3185 struct bnx2x *bp = netdev_priv(dev); 3186 int rc, q_logical_state; 3187 struct bnx2x_virtf *vf = NULL; 3188 struct pf_vf_bulletin_content *bulletin = NULL; 3189 3190 /* sanity and init */ 3191 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3192 if (rc) 3193 return rc; 3194 if (!is_valid_ether_addr(mac)) { 3195 BNX2X_ERR("mac address invalid\n"); 3196 return -EINVAL; 3197 } 3198 3199 /* update PF's copy of the VF's bulletin. Will no longer accept mac 3200 * configuration requests from vf unless match this mac 3201 */ 3202 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; 3203 memcpy(bulletin->mac, mac, ETH_ALEN); 3204 3205 /* Post update on VF's bulletin board */ 3206 rc = bnx2x_post_vf_bulletin(bp, vfidx); 3207 if (rc) { 3208 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); 3209 return rc; 3210 } 3211 3212 /* is vf initialized and queue set up? */ 3213 q_logical_state = 3214 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); 3215 if (vf->state == VF_ENABLED && 3216 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3217 /* configure the mac in device on this vf's queue */ 3218 unsigned long ramrod_flags = 0; 3219 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3220 3221 /* must lock vfpf channel to protect against vf flows */ 3222 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3223 3224 /* remove existing eth macs */ 3225 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 3226 if (rc) { 3227 BNX2X_ERR("failed to delete eth macs\n"); 3228 return -EINVAL; 3229 } 3230 3231 /* remove existing uc list macs */ 3232 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); 3233 if (rc) { 3234 BNX2X_ERR("failed to delete uc_list macs\n"); 3235 return -EINVAL; 3236 } 3237 3238 /* configure the new mac to device */ 3239 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3240 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 3241 BNX2X_ETH_MAC, &ramrod_flags); 3242 3243 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3244 } 3245 3246 return 0; 3247 } 3248 3249 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 3250 { 3251 struct bnx2x *bp = netdev_priv(dev); 3252 int rc, q_logical_state; 3253 struct bnx2x_virtf *vf = NULL; 3254 struct pf_vf_bulletin_content *bulletin = NULL; 3255 3256 /* sanity and init */ 3257 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3258 if (rc) 3259 return rc; 3260 3261 if (vlan > 4095) { 3262 BNX2X_ERR("illegal vlan value %d\n", vlan); 3263 return -EINVAL; 3264 } 3265 3266 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", 3267 vfidx, vlan, 0); 3268 3269 /* update PF's copy of the VF's bulletin. No point in posting the vlan 3270 * to the VF since it doesn't have anything to do with it. But it useful 3271 * to store it here in case the VF is not up yet and we can only 3272 * configure the vlan later when it does. 3273 */ 3274 bulletin->valid_bitmap |= 1 << VLAN_VALID; 3275 bulletin->vlan = vlan; 3276 3277 /* is vf initialized and queue set up? */ 3278 q_logical_state = 3279 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); 3280 if (vf->state == VF_ENABLED && 3281 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3282 /* configure the vlan in device on this vf's queue */ 3283 unsigned long ramrod_flags = 0; 3284 unsigned long vlan_mac_flags = 0; 3285 struct bnx2x_vlan_mac_obj *vlan_obj = 3286 &bnx2x_vfq(vf, 0, vlan_obj); 3287 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 3288 struct bnx2x_queue_state_params q_params = {NULL}; 3289 struct bnx2x_queue_update_params *update_params; 3290 3291 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3292 3293 /* must lock vfpf channel to protect against vf flows */ 3294 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3295 3296 /* remove existing vlans */ 3297 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3298 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 3299 &ramrod_flags); 3300 if (rc) { 3301 BNX2X_ERR("failed to delete vlans\n"); 3302 return -EINVAL; 3303 } 3304 3305 /* send queue update ramrod to configure default vlan and silent 3306 * vlan removal 3307 */ 3308 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3309 q_params.cmd = BNX2X_Q_CMD_UPDATE; 3310 q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj); 3311 update_params = &q_params.params.update; 3312 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 3313 &update_params->update_flags); 3314 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 3315 &update_params->update_flags); 3316 3317 if (vlan == 0) { 3318 /* if vlan is 0 then we want to leave the VF traffic 3319 * untagged, and leave the incoming traffic untouched 3320 * (i.e. do not remove any vlan tags). 3321 */ 3322 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3323 &update_params->update_flags); 3324 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 3325 &update_params->update_flags); 3326 } else { 3327 /* configure the new vlan to device */ 3328 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3329 ramrod_param.vlan_mac_obj = vlan_obj; 3330 ramrod_param.ramrod_flags = ramrod_flags; 3331 ramrod_param.user_req.u.vlan.vlan = vlan; 3332 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 3333 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 3334 if (rc) { 3335 BNX2X_ERR("failed to configure vlan\n"); 3336 return -EINVAL; 3337 } 3338 3339 /* configure default vlan to vf queue and set silent 3340 * vlan removal (the vf remains unaware of this vlan). 3341 */ 3342 update_params = &q_params.params.update; 3343 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3344 &update_params->update_flags); 3345 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 3346 &update_params->update_flags); 3347 update_params->def_vlan = vlan; 3348 } 3349 3350 /* Update the Queue state */ 3351 rc = bnx2x_queue_state_change(bp, &q_params); 3352 if (rc) { 3353 BNX2X_ERR("Failed to configure default VLAN\n"); 3354 return rc; 3355 } 3356 3357 /* clear the flag indicating that this VF needs its vlan 3358 * (will only be set if the HV configured th Vlan before vf was 3359 * and we were called because the VF came up later 3360 */ 3361 vf->cfg_flags &= ~VF_CFG_VLAN; 3362 3363 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3364 } 3365 return 0; 3366 } 3367 3368 /* crc is the first field in the bulletin board. Compute the crc over the 3369 * entire bulletin board excluding the crc field itself. Use the length field 3370 * as the Bulletin Board was posted by a PF with possibly a different version 3371 * from the vf which will sample it. Therefore, the length is computed by the 3372 * PF and the used blindly by the VF. 3373 */ 3374 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, 3375 struct pf_vf_bulletin_content *bulletin) 3376 { 3377 return crc32(BULLETIN_CRC_SEED, 3378 ((u8 *)bulletin) + sizeof(bulletin->crc), 3379 bulletin->length - sizeof(bulletin->crc)); 3380 } 3381 3382 /* Check for new posts on the bulletin board */ 3383 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) 3384 { 3385 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; 3386 int attempts; 3387 3388 /* bulletin board hasn't changed since last sample */ 3389 if (bp->old_bulletin.version == bulletin.version) 3390 return PFVF_BULLETIN_UNCHANGED; 3391 3392 /* validate crc of new bulletin board */ 3393 if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) { 3394 /* sampling structure in mid post may result with corrupted data 3395 * validate crc to ensure coherency. 3396 */ 3397 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { 3398 bulletin = bp->pf2vf_bulletin->content; 3399 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, 3400 &bulletin)) 3401 break; 3402 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", 3403 bulletin.crc, 3404 bnx2x_crc_vf_bulletin(bp, &bulletin)); 3405 } 3406 if (attempts >= BULLETIN_ATTEMPTS) { 3407 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", 3408 attempts); 3409 return PFVF_BULLETIN_CRC_ERR; 3410 } 3411 } 3412 3413 /* the mac address in bulletin board is valid and is new */ 3414 if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID && 3415 memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) { 3416 /* update new mac to net device */ 3417 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); 3418 } 3419 3420 /* the vlan in bulletin board is valid and is new */ 3421 if (bulletin.valid_bitmap & 1 << VLAN_VALID) 3422 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN); 3423 3424 /* copy new bulletin board to bp */ 3425 bp->old_bulletin = bulletin; 3426 3427 return PFVF_BULLETIN_UPDATED; 3428 } 3429 3430 void bnx2x_timer_sriov(struct bnx2x *bp) 3431 { 3432 bnx2x_sample_bulletin(bp); 3433 3434 /* if channel is down we need to self destruct */ 3435 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { 3436 smp_mb__before_clear_bit(); 3437 set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 3438 &bp->sp_rtnl_state); 3439 smp_mb__after_clear_bit(); 3440 schedule_delayed_work(&bp->sp_rtnl_task, 0); 3441 } 3442 } 3443 3444 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 3445 { 3446 /* vf doorbells are embedded within the regview */ 3447 return bp->regview + PXP_VF_ADDR_DB_START; 3448 } 3449 3450 int bnx2x_vf_pci_alloc(struct bnx2x *bp) 3451 { 3452 mutex_init(&bp->vf2pf_mutex); 3453 3454 /* allocate vf2pf mailbox for vf to pf channel */ 3455 BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping, 3456 sizeof(struct bnx2x_vf_mbx_msg)); 3457 3458 /* allocate pf 2 vf bulletin board */ 3459 BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping, 3460 sizeof(union pf_vf_bulletin)); 3461 3462 return 0; 3463 3464 alloc_mem_err: 3465 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3466 sizeof(struct bnx2x_vf_mbx_msg)); 3467 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, 3468 sizeof(union pf_vf_bulletin)); 3469 return -ENOMEM; 3470 } 3471 3472 int bnx2x_open_epilog(struct bnx2x *bp) 3473 { 3474 /* Enable sriov via delayed work. This must be done via delayed work 3475 * because it causes the probe of the vf devices to be run, which invoke 3476 * register_netdevice which must have rtnl lock taken. As we are holding 3477 * the lock right now, that could only work if the probe would not take 3478 * the lock. However, as the probe of the vf may be called from other 3479 * contexts as well (such as passthrough to vm fails) it can't assume 3480 * the lock is being held for it. Using delayed work here allows the 3481 * probe code to simply take the lock (i.e. wait for it to be released 3482 * if it is being held). We only want to do this if the number of VFs 3483 * was set before PF driver was loaded. 3484 */ 3485 if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) { 3486 smp_mb__before_clear_bit(); 3487 set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state); 3488 smp_mb__after_clear_bit(); 3489 schedule_delayed_work(&bp->sp_rtnl_task, 0); 3490 } 3491 3492 return 0; 3493 } 3494 3495 void bnx2x_iov_channel_down(struct bnx2x *bp) 3496 { 3497 int vf_idx; 3498 struct pf_vf_bulletin_content *bulletin; 3499 3500 if (!IS_SRIOV(bp)) 3501 return; 3502 3503 for_each_vf(bp, vf_idx) { 3504 /* locate this VFs bulletin board and update the channel down 3505 * bit 3506 */ 3507 bulletin = BP_VF_BULLETIN(bp, vf_idx); 3508 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN; 3509 3510 /* update vf bulletin board */ 3511 bnx2x_post_vf_bulletin(bp, vf_idx); 3512 } 3513 } 3514