1 /* bnx2x_sriov.c: Broadcom Everest network driver. 2 * 3 * Copyright 2009-2013 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 17 * Ariel Elior <ariele@broadcom.com> 18 * 19 */ 20 #include "bnx2x.h" 21 #include "bnx2x_init.h" 22 #include "bnx2x_cmn.h" 23 #include "bnx2x_sp.h" 24 #include <linux/crc32.h> 25 #include <linux/if_vlan.h> 26 27 /* General service functions */ 28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 29 u16 pf_id) 30 { 31 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 32 pf_id); 33 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 34 pf_id); 35 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 36 pf_id); 37 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 38 pf_id); 39 } 40 41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 42 u8 enable) 43 { 44 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 45 enable); 46 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 47 enable); 48 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 49 enable); 50 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 51 enable); 52 } 53 54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 55 { 56 int idx; 57 58 for_each_vf(bp, idx) 59 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) 60 break; 61 return idx; 62 } 63 64 static 65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 66 { 67 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); 68 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; 69 } 70 71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, 72 u8 igu_sb_id, u8 segment, u16 index, u8 op, 73 u8 update) 74 { 75 /* acking a VF sb through the PF - use the GRC */ 76 u32 ctl; 77 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 78 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 79 u32 func_encode = vf->abs_vfid; 80 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; 81 struct igu_regular cmd_data = {0}; 82 83 cmd_data.sb_id_and_flags = 84 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 85 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 86 (update << IGU_REGULAR_BUPDATE_SHIFT) | 87 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 88 89 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 90 func_encode << IGU_CTRL_REG_FID_SHIFT | 91 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 92 93 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 94 cmd_data.sb_id_and_flags, igu_addr_data); 95 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); 96 mmiowb(); 97 barrier(); 98 99 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 100 ctl, igu_addr_ctl); 101 REG_WR(bp, igu_addr_ctl, ctl); 102 mmiowb(); 103 barrier(); 104 } 105 /* VFOP - VF slow-path operation support */ 106 107 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000 108 109 /* VFOP operations states */ 110 enum bnx2x_vfop_qctor_state { 111 BNX2X_VFOP_QCTOR_INIT, 112 BNX2X_VFOP_QCTOR_SETUP, 113 BNX2X_VFOP_QCTOR_INT_EN 114 }; 115 116 enum bnx2x_vfop_qdtor_state { 117 BNX2X_VFOP_QDTOR_HALT, 118 BNX2X_VFOP_QDTOR_TERMINATE, 119 BNX2X_VFOP_QDTOR_CFCDEL, 120 BNX2X_VFOP_QDTOR_DONE 121 }; 122 123 enum bnx2x_vfop_vlan_mac_state { 124 BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 125 BNX2X_VFOP_VLAN_MAC_CLEAR, 126 BNX2X_VFOP_VLAN_MAC_CHK_DONE, 127 BNX2X_VFOP_MAC_CONFIG_LIST, 128 BNX2X_VFOP_VLAN_CONFIG_LIST, 129 BNX2X_VFOP_VLAN_CONFIG_LIST_0 130 }; 131 132 enum bnx2x_vfop_qsetup_state { 133 BNX2X_VFOP_QSETUP_CTOR, 134 BNX2X_VFOP_QSETUP_VLAN0, 135 BNX2X_VFOP_QSETUP_DONE 136 }; 137 138 enum bnx2x_vfop_mcast_state { 139 BNX2X_VFOP_MCAST_DEL, 140 BNX2X_VFOP_MCAST_ADD, 141 BNX2X_VFOP_MCAST_CHK_DONE 142 }; 143 enum bnx2x_vfop_qflr_state { 144 BNX2X_VFOP_QFLR_CLR_VLAN, 145 BNX2X_VFOP_QFLR_CLR_MAC, 146 BNX2X_VFOP_QFLR_TERMINATE, 147 BNX2X_VFOP_QFLR_DONE 148 }; 149 150 enum bnx2x_vfop_flr_state { 151 BNX2X_VFOP_FLR_QUEUES, 152 BNX2X_VFOP_FLR_HW 153 }; 154 155 enum bnx2x_vfop_close_state { 156 BNX2X_VFOP_CLOSE_QUEUES, 157 BNX2X_VFOP_CLOSE_HW 158 }; 159 160 enum bnx2x_vfop_rxmode_state { 161 BNX2X_VFOP_RXMODE_CONFIG, 162 BNX2X_VFOP_RXMODE_DONE 163 }; 164 165 enum bnx2x_vfop_qteardown_state { 166 BNX2X_VFOP_QTEARDOWN_RXMODE, 167 BNX2X_VFOP_QTEARDOWN_CLR_VLAN, 168 BNX2X_VFOP_QTEARDOWN_CLR_MAC, 169 BNX2X_VFOP_QTEARDOWN_QDTOR, 170 BNX2X_VFOP_QTEARDOWN_DONE 171 }; 172 173 #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) 174 175 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 176 struct bnx2x_queue_init_params *init_params, 177 struct bnx2x_queue_setup_params *setup_params, 178 u16 q_idx, u16 sb_idx) 179 { 180 DP(BNX2X_MSG_IOV, 181 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", 182 vf->abs_vfid, 183 q_idx, 184 sb_idx, 185 init_params->tx.sb_cq_index, 186 init_params->tx.hc_rate, 187 setup_params->flags, 188 setup_params->txq_params.traffic_type); 189 } 190 191 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 192 struct bnx2x_queue_init_params *init_params, 193 struct bnx2x_queue_setup_params *setup_params, 194 u16 q_idx, u16 sb_idx) 195 { 196 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; 197 198 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" 199 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", 200 vf->abs_vfid, 201 q_idx, 202 sb_idx, 203 init_params->rx.sb_cq_index, 204 init_params->rx.hc_rate, 205 setup_params->gen_params.mtu, 206 rxq_params->buf_sz, 207 rxq_params->sge_buf_sz, 208 rxq_params->max_sges_pkt, 209 rxq_params->tpa_agg_sz, 210 setup_params->flags, 211 rxq_params->drop_flags, 212 rxq_params->cache_line_log); 213 } 214 215 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 216 struct bnx2x_virtf *vf, 217 struct bnx2x_vf_queue *q, 218 struct bnx2x_vfop_qctor_params *p, 219 unsigned long q_type) 220 { 221 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; 222 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; 223 224 /* INIT */ 225 226 /* Enable host coalescing in the transition to INIT state */ 227 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) 228 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); 229 230 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) 231 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); 232 233 /* FW SB ID */ 234 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 235 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 236 237 /* context */ 238 init_p->cxts[0] = q->cxt; 239 240 /* SETUP */ 241 242 /* Setup-op general parameters */ 243 setup_p->gen_params.spcl_id = vf->sp_cl_id; 244 setup_p->gen_params.stat_id = vfq_stat_id(vf, q); 245 246 /* Setup-op pause params: 247 * Nothing to do, the pause thresholds are set by default to 0 which 248 * effectively turns off the feature for this queue. We don't want 249 * one queue (VF) to interfering with another queue (another VF) 250 */ 251 if (vf->cfg_flags & VF_CFG_FW_FC) 252 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", 253 vf->abs_vfid); 254 /* Setup-op flags: 255 * collect statistics, zero statistics, local-switching, security, 256 * OV for Flex10, RSS and MCAST for leading 257 */ 258 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) 259 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); 260 261 /* for VFs, enable tx switching, bd coherency, and mac address 262 * anti-spoofing 263 */ 264 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); 265 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 266 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 267 268 if (vfq_is_leading(q)) { 269 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags); 270 __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags); 271 } 272 273 /* Setup-op rx parameters */ 274 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 275 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 276 277 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); 278 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); 279 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); 280 281 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) 282 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; 283 } 284 285 /* Setup-op tx parameters */ 286 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { 287 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; 288 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 289 } 290 } 291 292 /* VFOP queue construction */ 293 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf) 294 { 295 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 296 struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor; 297 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 298 enum bnx2x_vfop_qctor_state state = vfop->state; 299 300 bnx2x_vfop_reset_wq(vf); 301 302 if (vfop->rc < 0) 303 goto op_err; 304 305 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 306 307 switch (state) { 308 case BNX2X_VFOP_QCTOR_INIT: 309 310 /* has this queue already been opened? */ 311 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 312 BNX2X_Q_LOGICAL_STATE_ACTIVE) { 313 DP(BNX2X_MSG_IOV, 314 "Entered qctor but queue was already up. Aborting gracefully\n"); 315 goto op_done; 316 } 317 318 /* next state */ 319 vfop->state = BNX2X_VFOP_QCTOR_SETUP; 320 321 q_params->cmd = BNX2X_Q_CMD_INIT; 322 vfop->rc = bnx2x_queue_state_change(bp, q_params); 323 324 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 325 326 case BNX2X_VFOP_QCTOR_SETUP: 327 /* next state */ 328 vfop->state = BNX2X_VFOP_QCTOR_INT_EN; 329 330 /* copy pre-prepared setup params to the queue-state params */ 331 vfop->op_p->qctor.qstate.params.setup = 332 vfop->op_p->qctor.prep_qsetup; 333 334 q_params->cmd = BNX2X_Q_CMD_SETUP; 335 vfop->rc = bnx2x_queue_state_change(bp, q_params); 336 337 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 338 339 case BNX2X_VFOP_QCTOR_INT_EN: 340 341 /* enable interrupts */ 342 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx), 343 USTORM_ID, 0, IGU_INT_ENABLE, 0); 344 goto op_done; 345 default: 346 bnx2x_vfop_default(state); 347 } 348 op_err: 349 BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n", 350 vf->abs_vfid, args->qid, q_params->cmd, vfop->rc); 351 op_done: 352 bnx2x_vfop_end(bp, vf, vfop); 353 op_pending: 354 return; 355 } 356 357 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp, 358 struct bnx2x_virtf *vf, 359 struct bnx2x_vfop_cmd *cmd, 360 int qid) 361 { 362 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 363 364 if (vfop) { 365 vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 366 367 vfop->args.qctor.qid = qid; 368 vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx); 369 370 bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT, 371 bnx2x_vfop_qctor, cmd->done); 372 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor, 373 cmd->block); 374 } 375 return -ENOMEM; 376 } 377 378 /* VFOP queue destruction */ 379 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) 380 { 381 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 382 struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor; 383 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 384 enum bnx2x_vfop_qdtor_state state = vfop->state; 385 386 bnx2x_vfop_reset_wq(vf); 387 388 if (vfop->rc < 0) 389 goto op_err; 390 391 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 392 393 switch (state) { 394 case BNX2X_VFOP_QDTOR_HALT: 395 396 /* has this queue already been stopped? */ 397 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 398 BNX2X_Q_LOGICAL_STATE_STOPPED) { 399 DP(BNX2X_MSG_IOV, 400 "Entered qdtor but queue was already stopped. Aborting gracefully\n"); 401 goto op_done; 402 } 403 404 /* next state */ 405 vfop->state = BNX2X_VFOP_QDTOR_TERMINATE; 406 407 q_params->cmd = BNX2X_Q_CMD_HALT; 408 vfop->rc = bnx2x_queue_state_change(bp, q_params); 409 410 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 411 412 case BNX2X_VFOP_QDTOR_TERMINATE: 413 /* next state */ 414 vfop->state = BNX2X_VFOP_QDTOR_CFCDEL; 415 416 q_params->cmd = BNX2X_Q_CMD_TERMINATE; 417 vfop->rc = bnx2x_queue_state_change(bp, q_params); 418 419 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 420 421 case BNX2X_VFOP_QDTOR_CFCDEL: 422 /* next state */ 423 vfop->state = BNX2X_VFOP_QDTOR_DONE; 424 425 q_params->cmd = BNX2X_Q_CMD_CFC_DEL; 426 vfop->rc = bnx2x_queue_state_change(bp, q_params); 427 428 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 429 op_err: 430 BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n", 431 vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc); 432 op_done: 433 case BNX2X_VFOP_QDTOR_DONE: 434 /* invalidate the context */ 435 qdtor->cxt->ustorm_ag_context.cdu_usage = 0; 436 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; 437 bnx2x_vfop_end(bp, vf, vfop); 438 return; 439 default: 440 bnx2x_vfop_default(state); 441 } 442 op_pending: 443 return; 444 } 445 446 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, 447 struct bnx2x_virtf *vf, 448 struct bnx2x_vfop_cmd *cmd, 449 int qid) 450 { 451 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 452 453 if (vfop) { 454 struct bnx2x_queue_state_params *qstate = 455 &vf->op_params.qctor.qstate; 456 457 memset(qstate, 0, sizeof(*qstate)); 458 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 459 460 vfop->args.qdtor.qid = qid; 461 vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt); 462 463 bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT, 464 bnx2x_vfop_qdtor, cmd->done); 465 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, 466 cmd->block); 467 } 468 DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid); 469 return -ENOMEM; 470 } 471 472 static void 473 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) 474 { 475 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 476 if (vf) { 477 if (!vf_sb_count(vf)) 478 vf->igu_base_id = igu_sb_id; 479 ++vf_sb_count(vf); 480 } 481 } 482 483 /* VFOP MAC/VLAN helpers */ 484 static inline void bnx2x_vfop_credit(struct bnx2x *bp, 485 struct bnx2x_vfop *vfop, 486 struct bnx2x_vlan_mac_obj *obj) 487 { 488 struct bnx2x_vfop_args_filters *args = &vfop->args.filters; 489 490 /* update credit only if there is no error 491 * and a valid credit counter 492 */ 493 if (!vfop->rc && args->credit) { 494 int cnt = 0; 495 struct list_head *pos; 496 497 list_for_each(pos, &obj->head) 498 cnt++; 499 500 atomic_set(args->credit, cnt); 501 } 502 } 503 504 static int bnx2x_vfop_set_user_req(struct bnx2x *bp, 505 struct bnx2x_vfop_filter *pos, 506 struct bnx2x_vlan_mac_data *user_req) 507 { 508 user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD : 509 BNX2X_VLAN_MAC_DEL; 510 511 switch (pos->type) { 512 case BNX2X_VFOP_FILTER_MAC: 513 memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN); 514 break; 515 case BNX2X_VFOP_FILTER_VLAN: 516 user_req->u.vlan.vlan = pos->vid; 517 break; 518 default: 519 BNX2X_ERR("Invalid filter type, skipping\n"); 520 return 1; 521 } 522 return 0; 523 } 524 525 static int 526 bnx2x_vfop_config_vlan0(struct bnx2x *bp, 527 struct bnx2x_vlan_mac_ramrod_params *vlan_mac, 528 bool add) 529 { 530 int rc; 531 532 vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD : 533 BNX2X_VLAN_MAC_DEL; 534 vlan_mac->user_req.u.vlan.vlan = 0; 535 536 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 537 if (rc == -EEXIST) 538 rc = 0; 539 return rc; 540 } 541 542 static int bnx2x_vfop_config_list(struct bnx2x *bp, 543 struct bnx2x_vfop_filters *filters, 544 struct bnx2x_vlan_mac_ramrod_params *vlan_mac) 545 { 546 struct bnx2x_vfop_filter *pos, *tmp; 547 struct list_head rollback_list, *filters_list = &filters->head; 548 struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req; 549 int rc = 0, cnt = 0; 550 551 INIT_LIST_HEAD(&rollback_list); 552 553 list_for_each_entry_safe(pos, tmp, filters_list, link) { 554 if (bnx2x_vfop_set_user_req(bp, pos, user_req)) 555 continue; 556 557 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 558 if (rc >= 0) { 559 cnt += pos->add ? 1 : -1; 560 list_move(&pos->link, &rollback_list); 561 rc = 0; 562 } else if (rc == -EEXIST) { 563 rc = 0; 564 } else { 565 BNX2X_ERR("Failed to add a new vlan_mac command\n"); 566 break; 567 } 568 } 569 570 /* rollback if error or too many rules added */ 571 if (rc || cnt > filters->add_cnt) { 572 BNX2X_ERR("error or too many rules added. Performing rollback\n"); 573 list_for_each_entry_safe(pos, tmp, &rollback_list, link) { 574 pos->add = !pos->add; /* reverse op */ 575 bnx2x_vfop_set_user_req(bp, pos, user_req); 576 bnx2x_config_vlan_mac(bp, vlan_mac); 577 list_del(&pos->link); 578 } 579 cnt = 0; 580 if (!rc) 581 rc = -EINVAL; 582 } 583 filters->add_cnt = cnt; 584 return rc; 585 } 586 587 /* VFOP set VLAN/MAC */ 588 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) 589 { 590 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 591 struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac; 592 struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj; 593 struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter; 594 595 enum bnx2x_vfop_vlan_mac_state state = vfop->state; 596 597 if (vfop->rc < 0) 598 goto op_err; 599 600 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 601 602 bnx2x_vfop_reset_wq(vf); 603 604 switch (state) { 605 case BNX2X_VFOP_VLAN_MAC_CLEAR: 606 /* next state */ 607 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 608 609 /* do delete */ 610 vfop->rc = obj->delete_all(bp, obj, 611 &vlan_mac->user_req.vlan_mac_flags, 612 &vlan_mac->ramrod_flags); 613 614 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 615 616 case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE: 617 /* next state */ 618 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 619 620 /* do config */ 621 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 622 if (vfop->rc == -EEXIST) 623 vfop->rc = 0; 624 625 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 626 627 case BNX2X_VFOP_VLAN_MAC_CHK_DONE: 628 vfop->rc = !!obj->raw.check_pending(&obj->raw); 629 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 630 631 case BNX2X_VFOP_MAC_CONFIG_LIST: 632 /* next state */ 633 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 634 635 /* do list config */ 636 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 637 if (vfop->rc) 638 goto op_err; 639 640 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 641 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 642 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 643 644 case BNX2X_VFOP_VLAN_CONFIG_LIST: 645 /* next state */ 646 vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0; 647 648 /* remove vlan0 - could be no-op */ 649 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false); 650 if (vfop->rc) 651 goto op_err; 652 653 /* Do vlan list config. if this operation fails we try to 654 * restore vlan0 to keep the queue is working order 655 */ 656 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 657 if (!vfop->rc) { 658 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 659 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 660 } 661 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */ 662 663 case BNX2X_VFOP_VLAN_CONFIG_LIST_0: 664 /* next state */ 665 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 666 667 if (list_empty(&obj->head)) 668 /* add vlan0 */ 669 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true); 670 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 671 672 default: 673 bnx2x_vfop_default(state); 674 } 675 op_err: 676 BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc); 677 op_done: 678 kfree(filters); 679 bnx2x_vfop_credit(bp, vfop, obj); 680 bnx2x_vfop_end(bp, vf, vfop); 681 op_pending: 682 return; 683 } 684 685 struct bnx2x_vfop_vlan_mac_flags { 686 bool drv_only; 687 bool dont_consume; 688 bool single_cmd; 689 bool add; 690 }; 691 692 static void 693 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 694 struct bnx2x_vfop_vlan_mac_flags *flags) 695 { 696 struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req; 697 698 memset(ramrod, 0, sizeof(*ramrod)); 699 700 /* ramrod flags */ 701 if (flags->drv_only) 702 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags); 703 if (flags->single_cmd) 704 set_bit(RAMROD_EXEC, &ramrod->ramrod_flags); 705 706 /* mac_vlan flags */ 707 if (flags->dont_consume) 708 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags); 709 710 /* cmd */ 711 ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL; 712 } 713 714 static inline void 715 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 716 struct bnx2x_vfop_vlan_mac_flags *flags) 717 { 718 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags); 719 set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags); 720 } 721 722 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, 723 struct bnx2x_virtf *vf, 724 struct bnx2x_vfop_cmd *cmd, 725 int qid, bool drv_only) 726 { 727 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 728 729 if (vfop) { 730 struct bnx2x_vfop_args_filters filters = { 731 .multi_filter = NULL, /* single */ 732 .credit = NULL, /* consume credit */ 733 }; 734 struct bnx2x_vfop_vlan_mac_flags flags = { 735 .drv_only = drv_only, 736 .dont_consume = (filters.credit != NULL), 737 .single_cmd = true, 738 .add = false /* don't care */, 739 }; 740 struct bnx2x_vlan_mac_ramrod_params *ramrod = 741 &vf->op_params.vlan_mac; 742 743 /* set ramrod params */ 744 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 745 746 /* set object */ 747 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 748 749 /* set extra args */ 750 vfop->args.filters = filters; 751 752 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 753 bnx2x_vfop_vlan_mac, cmd->done); 754 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 755 cmd->block); 756 } 757 return -ENOMEM; 758 } 759 760 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, 761 struct bnx2x_virtf *vf, 762 struct bnx2x_vfop_cmd *cmd, 763 struct bnx2x_vfop_filters *macs, 764 int qid, bool drv_only) 765 { 766 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 767 768 if (vfop) { 769 struct bnx2x_vfop_args_filters filters = { 770 .multi_filter = macs, 771 .credit = NULL, /* consume credit */ 772 }; 773 struct bnx2x_vfop_vlan_mac_flags flags = { 774 .drv_only = drv_only, 775 .dont_consume = (filters.credit != NULL), 776 .single_cmd = false, 777 .add = false, /* don't care since only the items in the 778 * filters list affect the sp operation, 779 * not the list itself 780 */ 781 }; 782 struct bnx2x_vlan_mac_ramrod_params *ramrod = 783 &vf->op_params.vlan_mac; 784 785 /* set ramrod params */ 786 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 787 788 /* set object */ 789 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 790 791 /* set extra args */ 792 filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX; 793 vfop->args.filters = filters; 794 795 bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST, 796 bnx2x_vfop_vlan_mac, cmd->done); 797 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 798 cmd->block); 799 } 800 return -ENOMEM; 801 } 802 803 int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, 804 struct bnx2x_virtf *vf, 805 struct bnx2x_vfop_cmd *cmd, 806 int qid, u16 vid, bool add) 807 { 808 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 809 810 if (vfop) { 811 struct bnx2x_vfop_args_filters filters = { 812 .multi_filter = NULL, /* single command */ 813 .credit = &bnx2x_vfq(vf, qid, vlan_count), 814 }; 815 struct bnx2x_vfop_vlan_mac_flags flags = { 816 .drv_only = false, 817 .dont_consume = (filters.credit != NULL), 818 .single_cmd = true, 819 .add = add, 820 }; 821 struct bnx2x_vlan_mac_ramrod_params *ramrod = 822 &vf->op_params.vlan_mac; 823 824 /* set ramrod params */ 825 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 826 ramrod->user_req.u.vlan.vlan = vid; 827 828 /* set object */ 829 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 830 831 /* set extra args */ 832 vfop->args.filters = filters; 833 834 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 835 bnx2x_vfop_vlan_mac, cmd->done); 836 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 837 cmd->block); 838 } 839 return -ENOMEM; 840 } 841 842 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, 843 struct bnx2x_virtf *vf, 844 struct bnx2x_vfop_cmd *cmd, 845 int qid, bool drv_only) 846 { 847 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 848 849 if (vfop) { 850 struct bnx2x_vfop_args_filters filters = { 851 .multi_filter = NULL, /* single command */ 852 .credit = &bnx2x_vfq(vf, qid, vlan_count), 853 }; 854 struct bnx2x_vfop_vlan_mac_flags flags = { 855 .drv_only = drv_only, 856 .dont_consume = (filters.credit != NULL), 857 .single_cmd = true, 858 .add = false, /* don't care */ 859 }; 860 struct bnx2x_vlan_mac_ramrod_params *ramrod = 861 &vf->op_params.vlan_mac; 862 863 /* set ramrod params */ 864 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 865 866 /* set object */ 867 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 868 869 /* set extra args */ 870 vfop->args.filters = filters; 871 872 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 873 bnx2x_vfop_vlan_mac, cmd->done); 874 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 875 cmd->block); 876 } 877 return -ENOMEM; 878 } 879 880 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, 881 struct bnx2x_virtf *vf, 882 struct bnx2x_vfop_cmd *cmd, 883 struct bnx2x_vfop_filters *vlans, 884 int qid, bool drv_only) 885 { 886 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 887 888 if (vfop) { 889 struct bnx2x_vfop_args_filters filters = { 890 .multi_filter = vlans, 891 .credit = &bnx2x_vfq(vf, qid, vlan_count), 892 }; 893 struct bnx2x_vfop_vlan_mac_flags flags = { 894 .drv_only = drv_only, 895 .dont_consume = (filters.credit != NULL), 896 .single_cmd = false, 897 .add = false, /* don't care */ 898 }; 899 struct bnx2x_vlan_mac_ramrod_params *ramrod = 900 &vf->op_params.vlan_mac; 901 902 /* set ramrod params */ 903 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 904 905 /* set object */ 906 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 907 908 /* set extra args */ 909 filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) - 910 atomic_read(filters.credit); 911 912 vfop->args.filters = filters; 913 914 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST, 915 bnx2x_vfop_vlan_mac, cmd->done); 916 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 917 cmd->block); 918 } 919 return -ENOMEM; 920 } 921 922 /* VFOP queue setup (queue constructor + set vlan 0) */ 923 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf) 924 { 925 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 926 int qid = vfop->args.qctor.qid; 927 enum bnx2x_vfop_qsetup_state state = vfop->state; 928 struct bnx2x_vfop_cmd cmd = { 929 .done = bnx2x_vfop_qsetup, 930 .block = false, 931 }; 932 933 if (vfop->rc < 0) 934 goto op_err; 935 936 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 937 938 switch (state) { 939 case BNX2X_VFOP_QSETUP_CTOR: 940 /* init the queue ctor command */ 941 vfop->state = BNX2X_VFOP_QSETUP_VLAN0; 942 vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid); 943 if (vfop->rc) 944 goto op_err; 945 return; 946 947 case BNX2X_VFOP_QSETUP_VLAN0: 948 /* skip if non-leading or FPGA/EMU*/ 949 if (qid) 950 goto op_done; 951 952 /* init the queue set-vlan command (for vlan 0) */ 953 vfop->state = BNX2X_VFOP_QSETUP_DONE; 954 vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true); 955 if (vfop->rc) 956 goto op_err; 957 return; 958 op_err: 959 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); 960 op_done: 961 case BNX2X_VFOP_QSETUP_DONE: 962 vf->cfg_flags |= VF_CFG_VLAN; 963 smp_mb__before_clear_bit(); 964 set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, 965 &bp->sp_rtnl_state); 966 smp_mb__after_clear_bit(); 967 schedule_delayed_work(&bp->sp_rtnl_task, 0); 968 bnx2x_vfop_end(bp, vf, vfop); 969 return; 970 default: 971 bnx2x_vfop_default(state); 972 } 973 } 974 975 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, 976 struct bnx2x_virtf *vf, 977 struct bnx2x_vfop_cmd *cmd, 978 int qid) 979 { 980 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 981 982 if (vfop) { 983 vfop->args.qctor.qid = qid; 984 985 bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR, 986 bnx2x_vfop_qsetup, cmd->done); 987 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup, 988 cmd->block); 989 } 990 return -ENOMEM; 991 } 992 993 /* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */ 994 static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) 995 { 996 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 997 int qid = vfop->args.qx.qid; 998 enum bnx2x_vfop_qflr_state state = vfop->state; 999 struct bnx2x_queue_state_params *qstate; 1000 struct bnx2x_vfop_cmd cmd; 1001 1002 bnx2x_vfop_reset_wq(vf); 1003 1004 if (vfop->rc < 0) 1005 goto op_err; 1006 1007 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state); 1008 1009 cmd.done = bnx2x_vfop_qflr; 1010 cmd.block = false; 1011 1012 switch (state) { 1013 case BNX2X_VFOP_QFLR_CLR_VLAN: 1014 /* vlan-clear-all: driver-only, don't consume credit */ 1015 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; 1016 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true); 1017 if (vfop->rc) 1018 goto op_err; 1019 return; 1020 1021 case BNX2X_VFOP_QFLR_CLR_MAC: 1022 /* mac-clear-all: driver only consume credit */ 1023 vfop->state = BNX2X_VFOP_QFLR_TERMINATE; 1024 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true); 1025 DP(BNX2X_MSG_IOV, 1026 "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d", 1027 vf->abs_vfid, vfop->rc); 1028 if (vfop->rc) 1029 goto op_err; 1030 return; 1031 1032 case BNX2X_VFOP_QFLR_TERMINATE: 1033 qstate = &vfop->op_p->qctor.qstate; 1034 memset(qstate , 0, sizeof(*qstate)); 1035 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 1036 vfop->state = BNX2X_VFOP_QFLR_DONE; 1037 1038 DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n", 1039 vf->abs_vfid, qstate->q_obj->state); 1040 1041 if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) { 1042 qstate->q_obj->state = BNX2X_Q_STATE_STOPPED; 1043 qstate->cmd = BNX2X_Q_CMD_TERMINATE; 1044 vfop->rc = bnx2x_queue_state_change(bp, qstate); 1045 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND); 1046 } else { 1047 goto op_done; 1048 } 1049 1050 op_err: 1051 BNX2X_ERR("QFLR[%d:%d] error: rc %d\n", 1052 vf->abs_vfid, qid, vfop->rc); 1053 op_done: 1054 case BNX2X_VFOP_QFLR_DONE: 1055 bnx2x_vfop_end(bp, vf, vfop); 1056 return; 1057 default: 1058 bnx2x_vfop_default(state); 1059 } 1060 op_pending: 1061 return; 1062 } 1063 1064 static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp, 1065 struct bnx2x_virtf *vf, 1066 struct bnx2x_vfop_cmd *cmd, 1067 int qid) 1068 { 1069 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1070 1071 if (vfop) { 1072 vfop->args.qx.qid = qid; 1073 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN, 1074 bnx2x_vfop_qflr, cmd->done); 1075 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr, 1076 cmd->block); 1077 } 1078 return -ENOMEM; 1079 } 1080 1081 /* VFOP multi-casts */ 1082 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf) 1083 { 1084 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1085 struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast; 1086 struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw; 1087 struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list; 1088 enum bnx2x_vfop_mcast_state state = vfop->state; 1089 int i; 1090 1091 bnx2x_vfop_reset_wq(vf); 1092 1093 if (vfop->rc < 0) 1094 goto op_err; 1095 1096 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1097 1098 switch (state) { 1099 case BNX2X_VFOP_MCAST_DEL: 1100 /* clear existing mcasts */ 1101 vfop->state = BNX2X_VFOP_MCAST_ADD; 1102 vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL); 1103 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 1104 1105 case BNX2X_VFOP_MCAST_ADD: 1106 if (raw->check_pending(raw)) 1107 goto op_pending; 1108 1109 if (args->mc_num) { 1110 /* update mcast list on the ramrod params */ 1111 INIT_LIST_HEAD(&mcast->mcast_list); 1112 for (i = 0; i < args->mc_num; i++) 1113 list_add_tail(&(args->mc[i].link), 1114 &mcast->mcast_list); 1115 /* add new mcasts */ 1116 vfop->state = BNX2X_VFOP_MCAST_CHK_DONE; 1117 vfop->rc = bnx2x_config_mcast(bp, mcast, 1118 BNX2X_MCAST_CMD_ADD); 1119 } 1120 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1121 1122 case BNX2X_VFOP_MCAST_CHK_DONE: 1123 vfop->rc = raw->check_pending(raw) ? 1 : 0; 1124 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1125 default: 1126 bnx2x_vfop_default(state); 1127 } 1128 op_err: 1129 BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc); 1130 op_done: 1131 kfree(args->mc); 1132 bnx2x_vfop_end(bp, vf, vfop); 1133 op_pending: 1134 return; 1135 } 1136 1137 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, 1138 struct bnx2x_virtf *vf, 1139 struct bnx2x_vfop_cmd *cmd, 1140 bnx2x_mac_addr_t *mcasts, 1141 int mcast_num, bool drv_only) 1142 { 1143 struct bnx2x_vfop *vfop = NULL; 1144 size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem); 1145 struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) : 1146 NULL; 1147 1148 if (!mc_sz || mc) { 1149 vfop = bnx2x_vfop_add(bp, vf); 1150 if (vfop) { 1151 int i; 1152 struct bnx2x_mcast_ramrod_params *ramrod = 1153 &vf->op_params.mcast; 1154 1155 /* set ramrod params */ 1156 memset(ramrod, 0, sizeof(*ramrod)); 1157 ramrod->mcast_obj = &vf->mcast_obj; 1158 if (drv_only) 1159 set_bit(RAMROD_DRV_CLR_ONLY, 1160 &ramrod->ramrod_flags); 1161 1162 /* copy mcasts pointers */ 1163 vfop->args.mc_list.mc_num = mcast_num; 1164 vfop->args.mc_list.mc = mc; 1165 for (i = 0; i < mcast_num; i++) 1166 mc[i].mac = mcasts[i]; 1167 1168 bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL, 1169 bnx2x_vfop_mcast, cmd->done); 1170 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast, 1171 cmd->block); 1172 } else { 1173 kfree(mc); 1174 } 1175 } 1176 return -ENOMEM; 1177 } 1178 1179 /* VFOP rx-mode */ 1180 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) 1181 { 1182 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1183 struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode; 1184 enum bnx2x_vfop_rxmode_state state = vfop->state; 1185 1186 bnx2x_vfop_reset_wq(vf); 1187 1188 if (vfop->rc < 0) 1189 goto op_err; 1190 1191 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1192 1193 switch (state) { 1194 case BNX2X_VFOP_RXMODE_CONFIG: 1195 /* next state */ 1196 vfop->state = BNX2X_VFOP_RXMODE_DONE; 1197 1198 vfop->rc = bnx2x_config_rx_mode(bp, ramrod); 1199 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1200 op_err: 1201 BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc); 1202 op_done: 1203 case BNX2X_VFOP_RXMODE_DONE: 1204 bnx2x_vfop_end(bp, vf, vfop); 1205 return; 1206 default: 1207 bnx2x_vfop_default(state); 1208 } 1209 op_pending: 1210 return; 1211 } 1212 1213 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, 1214 struct bnx2x_virtf *vf, 1215 struct bnx2x_vfop_cmd *cmd, 1216 int qid, unsigned long accept_flags) 1217 { 1218 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); 1219 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1220 1221 if (vfop) { 1222 struct bnx2x_rx_mode_ramrod_params *ramrod = 1223 &vf->op_params.rx_mode; 1224 1225 memset(ramrod, 0, sizeof(*ramrod)); 1226 1227 /* Prepare ramrod parameters */ 1228 ramrod->cid = vfq->cid; 1229 ramrod->cl_id = vfq_cl_id(vf, vfq); 1230 ramrod->rx_mode_obj = &bp->rx_mode_obj; 1231 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); 1232 1233 ramrod->rx_accept_flags = accept_flags; 1234 ramrod->tx_accept_flags = accept_flags; 1235 ramrod->pstate = &vf->filter_state; 1236 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; 1237 1238 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 1239 set_bit(RAMROD_RX, &ramrod->ramrod_flags); 1240 set_bit(RAMROD_TX, &ramrod->ramrod_flags); 1241 1242 ramrod->rdata = 1243 bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); 1244 ramrod->rdata_mapping = 1245 bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); 1246 1247 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, 1248 bnx2x_vfop_rxmode, cmd->done); 1249 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode, 1250 cmd->block); 1251 } 1252 return -ENOMEM; 1253 } 1254 1255 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs, 1256 * queue destructor) 1257 */ 1258 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf) 1259 { 1260 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1261 int qid = vfop->args.qx.qid; 1262 enum bnx2x_vfop_qteardown_state state = vfop->state; 1263 struct bnx2x_vfop_cmd cmd; 1264 1265 if (vfop->rc < 0) 1266 goto op_err; 1267 1268 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1269 1270 cmd.done = bnx2x_vfop_qdown; 1271 cmd.block = false; 1272 1273 switch (state) { 1274 case BNX2X_VFOP_QTEARDOWN_RXMODE: 1275 /* Drop all */ 1276 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN; 1277 vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0); 1278 if (vfop->rc) 1279 goto op_err; 1280 return; 1281 1282 case BNX2X_VFOP_QTEARDOWN_CLR_VLAN: 1283 /* vlan-clear-all: don't consume credit */ 1284 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC; 1285 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false); 1286 if (vfop->rc) 1287 goto op_err; 1288 return; 1289 1290 case BNX2X_VFOP_QTEARDOWN_CLR_MAC: 1291 /* mac-clear-all: consume credit */ 1292 vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR; 1293 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false); 1294 if (vfop->rc) 1295 goto op_err; 1296 return; 1297 1298 case BNX2X_VFOP_QTEARDOWN_QDTOR: 1299 /* run the queue destruction flow */ 1300 DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n"); 1301 vfop->state = BNX2X_VFOP_QTEARDOWN_DONE; 1302 DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n"); 1303 vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid); 1304 DP(BNX2X_MSG_IOV, "returned from cmd\n"); 1305 if (vfop->rc) 1306 goto op_err; 1307 return; 1308 op_err: 1309 BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n", 1310 vf->abs_vfid, qid, vfop->rc); 1311 1312 case BNX2X_VFOP_QTEARDOWN_DONE: 1313 bnx2x_vfop_end(bp, vf, vfop); 1314 return; 1315 default: 1316 bnx2x_vfop_default(state); 1317 } 1318 } 1319 1320 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, 1321 struct bnx2x_virtf *vf, 1322 struct bnx2x_vfop_cmd *cmd, 1323 int qid) 1324 { 1325 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1326 1327 if (vfop) { 1328 vfop->args.qx.qid = qid; 1329 bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE, 1330 bnx2x_vfop_qdown, cmd->done); 1331 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, 1332 cmd->block); 1333 } 1334 1335 return -ENOMEM; 1336 } 1337 1338 /* VF enable primitives 1339 * when pretend is required the caller is responsible 1340 * for calling pretend prior to calling these routines 1341 */ 1342 1343 /* internal vf enable - until vf is enabled internally all transactions 1344 * are blocked. This routine should always be called last with pretend. 1345 */ 1346 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) 1347 { 1348 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); 1349 } 1350 1351 /* clears vf error in all semi blocks */ 1352 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) 1353 { 1354 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); 1355 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); 1356 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); 1357 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); 1358 } 1359 1360 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) 1361 { 1362 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; 1363 u32 was_err_reg = 0; 1364 1365 switch (was_err_group) { 1366 case 0: 1367 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; 1368 break; 1369 case 1: 1370 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; 1371 break; 1372 case 2: 1373 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; 1374 break; 1375 case 3: 1376 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; 1377 break; 1378 } 1379 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); 1380 } 1381 1382 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) 1383 { 1384 int i; 1385 u32 val; 1386 1387 /* Set VF masks and configuration - pretend */ 1388 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1389 1390 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 1391 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 1392 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 1393 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 1394 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 1395 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 1396 1397 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 1398 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); 1399 if (vf->cfg_flags & VF_CFG_INT_SIMD) 1400 val |= IGU_VF_CONF_SINGLE_ISR_EN; 1401 val &= ~IGU_VF_CONF_PARENT_MASK; 1402 val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */ 1403 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 1404 1405 DP(BNX2X_MSG_IOV, 1406 "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n", 1407 vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION)); 1408 1409 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1410 1411 /* iterate over all queues, clear sb consumer */ 1412 for (i = 0; i < vf_sb_count(vf); i++) { 1413 u8 igu_sb_id = vf_igu_sb(vf, i); 1414 1415 /* zero prod memory */ 1416 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); 1417 1418 /* clear sb state machine */ 1419 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, 1420 false /* VF */); 1421 1422 /* disable + update */ 1423 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, 1424 IGU_INT_DISABLE, 1); 1425 } 1426 } 1427 1428 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) 1429 { 1430 /* set the VF-PF association in the FW */ 1431 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); 1432 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); 1433 1434 /* clear vf errors*/ 1435 bnx2x_vf_semi_clear_err(bp, abs_vfid); 1436 bnx2x_vf_pglue_clear_err(bp, abs_vfid); 1437 1438 /* internal vf-enable - pretend */ 1439 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); 1440 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); 1441 bnx2x_vf_enable_internal(bp, true); 1442 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1443 } 1444 1445 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) 1446 { 1447 /* Reset vf in IGU interrupts are still disabled */ 1448 bnx2x_vf_igu_reset(bp, vf); 1449 1450 /* pretend to enable the vf with the PBF */ 1451 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1452 REG_WR(bp, PBF_REG_DISABLE_VF, 0); 1453 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1454 } 1455 1456 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) 1457 { 1458 struct pci_dev *dev; 1459 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 1460 1461 if (!vf) 1462 return false; 1463 1464 dev = pci_get_bus_and_slot(vf->bus, vf->devfn); 1465 if (dev) 1466 return bnx2x_is_pcie_pending(dev); 1467 return false; 1468 } 1469 1470 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) 1471 { 1472 /* Verify no pending pci transactions */ 1473 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) 1474 BNX2X_ERR("PCIE Transactions still pending\n"); 1475 1476 return 0; 1477 } 1478 1479 /* must be called after the number of PF queues and the number of VFs are 1480 * both known 1481 */ 1482 static void 1483 bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc) 1484 { 1485 u16 vlan_count = 0; 1486 1487 /* will be set only during VF-ACQUIRE */ 1488 resc->num_rxqs = 0; 1489 resc->num_txqs = 0; 1490 1491 /* no credit calculcis for macs (just yet) */ 1492 resc->num_mac_filters = 1; 1493 1494 /* divvy up vlan rules */ 1495 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 1496 vlan_count = 1 << ilog2(vlan_count); 1497 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); 1498 1499 /* no real limitation */ 1500 resc->num_mc_filters = 0; 1501 1502 /* num_sbs already set */ 1503 } 1504 1505 /* FLR routines: */ 1506 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 1507 { 1508 /* reset the state variables */ 1509 bnx2x_iov_static_resc(bp, &vf->alloc_resc); 1510 vf->state = VF_FREE; 1511 } 1512 1513 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) 1514 { 1515 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 1516 1517 /* DQ usage counter */ 1518 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1519 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, 1520 "DQ VF usage counter timed out", 1521 poll_cnt); 1522 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1523 1524 /* FW cleanup command - poll for the results */ 1525 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), 1526 poll_cnt)) 1527 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid); 1528 1529 /* verify TX hw is flushed */ 1530 bnx2x_tx_hw_flushed(bp, poll_cnt); 1531 } 1532 1533 static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) 1534 { 1535 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1536 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; 1537 enum bnx2x_vfop_flr_state state = vfop->state; 1538 struct bnx2x_vfop_cmd cmd = { 1539 .done = bnx2x_vfop_flr, 1540 .block = false, 1541 }; 1542 1543 if (vfop->rc < 0) 1544 goto op_err; 1545 1546 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1547 1548 switch (state) { 1549 case BNX2X_VFOP_FLR_QUEUES: 1550 /* the cleanup operations are valid if and only if the VF 1551 * was first acquired. 1552 */ 1553 if (++(qx->qid) < vf_rxq_count(vf)) { 1554 vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd, 1555 qx->qid); 1556 if (vfop->rc) 1557 goto op_err; 1558 return; 1559 } 1560 /* remove multicasts */ 1561 vfop->state = BNX2X_VFOP_FLR_HW; 1562 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 1563 0, true); 1564 if (vfop->rc) 1565 goto op_err; 1566 return; 1567 case BNX2X_VFOP_FLR_HW: 1568 1569 /* dispatch final cleanup and wait for HW queues to flush */ 1570 bnx2x_vf_flr_clnup_hw(bp, vf); 1571 1572 /* release VF resources */ 1573 bnx2x_vf_free_resc(bp, vf); 1574 1575 /* re-open the mailbox */ 1576 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 1577 1578 goto op_done; 1579 default: 1580 bnx2x_vfop_default(state); 1581 } 1582 op_err: 1583 BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc); 1584 op_done: 1585 vf->flr_clnup_stage = VF_FLR_ACK; 1586 bnx2x_vfop_end(bp, vf, vfop); 1587 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 1588 } 1589 1590 static int bnx2x_vfop_flr_cmd(struct bnx2x *bp, 1591 struct bnx2x_virtf *vf, 1592 vfop_handler_t done) 1593 { 1594 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1595 if (vfop) { 1596 vfop->args.qx.qid = -1; /* loop */ 1597 bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES, 1598 bnx2x_vfop_flr, done); 1599 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false); 1600 } 1601 return -ENOMEM; 1602 } 1603 1604 static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf) 1605 { 1606 int i = prev_vf ? prev_vf->index + 1 : 0; 1607 struct bnx2x_virtf *vf; 1608 1609 /* find next VF to cleanup */ 1610 next_vf_to_clean: 1611 for (; 1612 i < BNX2X_NR_VIRTFN(bp) && 1613 (bnx2x_vf(bp, i, state) != VF_RESET || 1614 bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN); 1615 i++) 1616 ; 1617 1618 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i, 1619 BNX2X_NR_VIRTFN(bp)); 1620 1621 if (i < BNX2X_NR_VIRTFN(bp)) { 1622 vf = BP_VF(bp, i); 1623 1624 /* lock the vf pf channel */ 1625 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 1626 1627 /* invoke the VF FLR SM */ 1628 if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) { 1629 BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n", 1630 vf->abs_vfid); 1631 1632 /* mark the VF to be ACKED and continue */ 1633 vf->flr_clnup_stage = VF_FLR_ACK; 1634 goto next_vf_to_clean; 1635 } 1636 return; 1637 } 1638 1639 /* we are done, update vf records */ 1640 for_each_vf(bp, i) { 1641 vf = BP_VF(bp, i); 1642 1643 if (vf->flr_clnup_stage != VF_FLR_ACK) 1644 continue; 1645 1646 vf->flr_clnup_stage = VF_FLR_EPILOG; 1647 } 1648 1649 /* Acknowledge the handled VFs. 1650 * we are acknowledge all the vfs which an flr was requested for, even 1651 * if amongst them there are such that we never opened, since the mcp 1652 * will interrupt us immediately again if we only ack some of the bits, 1653 * resulting in an endless loop. This can happen for example in KVM 1654 * where an 'all ones' flr request is sometimes given by hyper visor 1655 */ 1656 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n", 1657 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1658 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1659 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 1660 bp->vfdb->flrd_vfs[i]); 1661 1662 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); 1663 1664 /* clear the acked bits - better yet if the MCP implemented 1665 * write to clear semantics 1666 */ 1667 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1668 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); 1669 } 1670 1671 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) 1672 { 1673 int i; 1674 1675 /* Read FLR'd VFs */ 1676 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1677 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); 1678 1679 DP(BNX2X_MSG_MCP, 1680 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n", 1681 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1682 1683 for_each_vf(bp, i) { 1684 struct bnx2x_virtf *vf = BP_VF(bp, i); 1685 u32 reset = 0; 1686 1687 if (vf->abs_vfid < 32) 1688 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); 1689 else 1690 reset = bp->vfdb->flrd_vfs[1] & 1691 (1 << (vf->abs_vfid - 32)); 1692 1693 if (reset) { 1694 /* set as reset and ready for cleanup */ 1695 vf->state = VF_RESET; 1696 vf->flr_clnup_stage = VF_FLR_CLN; 1697 1698 DP(BNX2X_MSG_IOV, 1699 "Initiating Final cleanup for VF %d\n", 1700 vf->abs_vfid); 1701 } 1702 } 1703 1704 /* do the FLR cleanup for all marked VFs*/ 1705 bnx2x_vf_flr_clnup(bp, NULL); 1706 } 1707 1708 /* IOV global initialization routines */ 1709 void bnx2x_iov_init_dq(struct bnx2x *bp) 1710 { 1711 if (!IS_SRIOV(bp)) 1712 return; 1713 1714 /* Set the DQ such that the CID reflect the abs_vfid */ 1715 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); 1716 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 1717 1718 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to 1719 * the PF L2 queues 1720 */ 1721 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 1722 1723 /* The VF window size is the log2 of the max number of CIDs per VF */ 1724 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 1725 1726 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 1727 * the Pf doorbell size although the 2 are independent. 1728 */ 1729 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 1730 BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT); 1731 1732 /* No security checks for now - 1733 * configure single rule (out of 16) mask = 0x1, value = 0x0, 1734 * CID range 0 - 0x1ffff 1735 */ 1736 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); 1737 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); 1738 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1739 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1740 1741 /* set the number of VF allowed doorbells to the full DQ range */ 1742 REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); 1743 1744 /* set the VF doorbell threshold */ 1745 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 1746 } 1747 1748 void bnx2x_iov_init_dmae(struct bnx2x *bp) 1749 { 1750 DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF"); 1751 if (!IS_SRIOV(bp)) 1752 return; 1753 1754 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 1755 } 1756 1757 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1758 { 1759 struct pci_dev *dev = bp->pdev; 1760 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1761 1762 return dev->bus->number + ((dev->devfn + iov->offset + 1763 iov->stride * vfid) >> 8); 1764 } 1765 1766 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) 1767 { 1768 struct pci_dev *dev = bp->pdev; 1769 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1770 1771 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; 1772 } 1773 1774 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) 1775 { 1776 int i, n; 1777 struct pci_dev *dev = bp->pdev; 1778 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1779 1780 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { 1781 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); 1782 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); 1783 1784 size /= iov->total; 1785 vf->bars[n].bar = start + size * vf->abs_vfid; 1786 vf->bars[n].size = size; 1787 } 1788 } 1789 1790 static int bnx2x_ari_enabled(struct pci_dev *dev) 1791 { 1792 return dev->bus->self && dev->bus->self->ari_enabled; 1793 } 1794 1795 static void 1796 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) 1797 { 1798 int sb_id; 1799 u32 val; 1800 u8 fid; 1801 1802 /* IGU in normal mode - read CAM */ 1803 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 1804 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); 1805 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 1806 continue; 1807 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1808 if (!(fid & IGU_FID_ENCODE_IS_PF)) 1809 bnx2x_vf_set_igu_info(bp, sb_id, 1810 (fid & IGU_FID_VF_NUM_MASK)); 1811 1812 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1813 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 1814 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 1815 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 1816 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1817 } 1818 } 1819 1820 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1821 { 1822 if (bp->vfdb) { 1823 kfree(bp->vfdb->vfqs); 1824 kfree(bp->vfdb->vfs); 1825 kfree(bp->vfdb); 1826 } 1827 bp->vfdb = NULL; 1828 } 1829 1830 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1831 { 1832 int pos; 1833 struct pci_dev *dev = bp->pdev; 1834 1835 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 1836 if (!pos) { 1837 BNX2X_ERR("failed to find SRIOV capability in device\n"); 1838 return -ENODEV; 1839 } 1840 1841 iov->pos = pos; 1842 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); 1843 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 1844 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); 1845 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); 1846 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 1847 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 1848 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 1849 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 1850 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 1851 1852 return 0; 1853 } 1854 1855 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1856 { 1857 u32 val; 1858 1859 /* read the SRIOV capability structure 1860 * The fields can be read via configuration read or 1861 * directly from the device (starting at offset PCICFG_OFFSET) 1862 */ 1863 if (bnx2x_sriov_pci_cfg_info(bp, iov)) 1864 return -ENODEV; 1865 1866 /* get the number of SRIOV bars */ 1867 iov->nres = 0; 1868 1869 /* read the first_vfid */ 1870 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); 1871 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) 1872 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); 1873 1874 DP(BNX2X_MSG_IOV, 1875 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 1876 BP_FUNC(bp), 1877 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, 1878 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 1879 1880 return 0; 1881 } 1882 1883 static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp) 1884 { 1885 int i; 1886 u8 queue_count = 0; 1887 1888 if (IS_SRIOV(bp)) 1889 for_each_vf(bp, i) 1890 queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs); 1891 1892 return queue_count; 1893 } 1894 1895 /* must be called after PF bars are mapped */ 1896 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1897 int num_vfs_param) 1898 { 1899 int err, i, qcount; 1900 struct bnx2x_sriov *iov; 1901 struct pci_dev *dev = bp->pdev; 1902 1903 bp->vfdb = NULL; 1904 1905 /* verify is pf */ 1906 if (IS_VF(bp)) 1907 return 0; 1908 1909 /* verify sriov capability is present in configuration space */ 1910 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) 1911 return 0; 1912 1913 /* verify chip revision */ 1914 if (CHIP_IS_E1x(bp)) 1915 return 0; 1916 1917 /* check if SRIOV support is turned off */ 1918 if (!num_vfs_param) 1919 return 0; 1920 1921 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ 1922 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { 1923 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", 1924 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); 1925 return 0; 1926 } 1927 1928 /* SRIOV can be enabled only with MSIX */ 1929 if (int_mode_param == BNX2X_INT_MODE_MSI || 1930 int_mode_param == BNX2X_INT_MODE_INTX) { 1931 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); 1932 return 0; 1933 } 1934 1935 err = -EIO; 1936 /* verify ari is enabled */ 1937 if (!bnx2x_ari_enabled(bp->pdev)) { 1938 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n"); 1939 return 0; 1940 } 1941 1942 /* verify igu is in normal mode */ 1943 if (CHIP_INT_MODE_IS_BC(bp)) { 1944 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 1945 return 0; 1946 } 1947 1948 /* allocate the vfs database */ 1949 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); 1950 if (!bp->vfdb) { 1951 BNX2X_ERR("failed to allocate vf database\n"); 1952 err = -ENOMEM; 1953 goto failed; 1954 } 1955 1956 /* get the sriov info - Linux already collected all the pertinent 1957 * information, however the sriov structure is for the private use 1958 * of the pci module. Also we want this information regardless 1959 * of the hyper-visor. 1960 */ 1961 iov = &(bp->vfdb->sriov); 1962 err = bnx2x_sriov_info(bp, iov); 1963 if (err) 1964 goto failed; 1965 1966 /* SR-IOV capability was enabled but there are no VFs*/ 1967 if (iov->total == 0) 1968 goto failed; 1969 1970 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); 1971 1972 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n", 1973 num_vfs_param, iov->nr_virtfn); 1974 1975 /* allocate the vf array */ 1976 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 1977 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); 1978 if (!bp->vfdb->vfs) { 1979 BNX2X_ERR("failed to allocate vf array\n"); 1980 err = -ENOMEM; 1981 goto failed; 1982 } 1983 1984 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ 1985 for_each_vf(bp, i) { 1986 bnx2x_vf(bp, i, index) = i; 1987 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; 1988 bnx2x_vf(bp, i, state) = VF_FREE; 1989 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head)); 1990 mutex_init(&bnx2x_vf(bp, i, op_mutex)); 1991 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; 1992 } 1993 1994 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 1995 bnx2x_get_vf_igu_cam_info(bp); 1996 1997 /* get the total queue count and allocate the global queue arrays */ 1998 qcount = bnx2x_iov_get_max_queue_count(bp); 1999 2000 /* allocate the queue arrays for all VFs */ 2001 bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue), 2002 GFP_KERNEL); 2003 if (!bp->vfdb->vfqs) { 2004 BNX2X_ERR("failed to allocate vf queue array\n"); 2005 err = -ENOMEM; 2006 goto failed; 2007 } 2008 2009 return 0; 2010 failed: 2011 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 2012 __bnx2x_iov_free_vfdb(bp); 2013 return err; 2014 } 2015 2016 void bnx2x_iov_remove_one(struct bnx2x *bp) 2017 { 2018 /* if SRIOV is not enabled there's nothing to do */ 2019 if (!IS_SRIOV(bp)) 2020 return; 2021 2022 DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); 2023 pci_disable_sriov(bp->pdev); 2024 DP(BNX2X_MSG_IOV, "sriov disabled\n"); 2025 2026 /* free vf database */ 2027 __bnx2x_iov_free_vfdb(bp); 2028 } 2029 2030 void bnx2x_iov_free_mem(struct bnx2x *bp) 2031 { 2032 int i; 2033 2034 if (!IS_SRIOV(bp)) 2035 return; 2036 2037 /* free vfs hw contexts */ 2038 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2039 struct hw_dma *cxt = &bp->vfdb->context[i]; 2040 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); 2041 } 2042 2043 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, 2044 BP_VFDB(bp)->sp_dma.mapping, 2045 BP_VFDB(bp)->sp_dma.size); 2046 2047 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, 2048 BP_VF_MBX_DMA(bp)->mapping, 2049 BP_VF_MBX_DMA(bp)->size); 2050 2051 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, 2052 BP_VF_BULLETIN_DMA(bp)->mapping, 2053 BP_VF_BULLETIN_DMA(bp)->size); 2054 } 2055 2056 int bnx2x_iov_alloc_mem(struct bnx2x *bp) 2057 { 2058 size_t tot_size; 2059 int i, rc = 0; 2060 2061 if (!IS_SRIOV(bp)) 2062 return rc; 2063 2064 /* allocate vfs hw contexts */ 2065 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * 2066 BNX2X_CIDS_PER_VF * sizeof(union cdu_context); 2067 2068 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2069 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); 2070 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); 2071 2072 if (cxt->size) { 2073 BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size); 2074 } else { 2075 cxt->addr = NULL; 2076 cxt->mapping = 0; 2077 } 2078 tot_size -= cxt->size; 2079 } 2080 2081 /* allocate vfs ramrods dma memory - client_init and set_mac */ 2082 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); 2083 BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping, 2084 tot_size); 2085 BP_VFDB(bp)->sp_dma.size = tot_size; 2086 2087 /* allocate mailboxes */ 2088 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; 2089 BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping, 2090 tot_size); 2091 BP_VF_MBX_DMA(bp)->size = tot_size; 2092 2093 /* allocate local bulletin boards */ 2094 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; 2095 BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr, 2096 &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size); 2097 BP_VF_BULLETIN_DMA(bp)->size = tot_size; 2098 2099 return 0; 2100 2101 alloc_mem_err: 2102 return -ENOMEM; 2103 } 2104 2105 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 2106 struct bnx2x_vf_queue *q) 2107 { 2108 u8 cl_id = vfq_cl_id(vf, q); 2109 u8 func_id = FW_VF_HANDLE(vf->abs_vfid); 2110 unsigned long q_type = 0; 2111 2112 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 2113 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 2114 2115 /* Queue State object */ 2116 bnx2x_init_queue_obj(bp, &q->sp_obj, 2117 cl_id, &q->cid, 1, func_id, 2118 bnx2x_vf_sp(bp, vf, q_data), 2119 bnx2x_vf_sp_map(bp, vf, q_data), 2120 q_type); 2121 2122 DP(BNX2X_MSG_IOV, 2123 "initialized vf %d's queue object. func id set to %d\n", 2124 vf->abs_vfid, q->sp_obj.func_id); 2125 2126 /* mac/vlan objects are per queue, but only those 2127 * that belong to the leading queue are initialized 2128 */ 2129 if (vfq_is_leading(q)) { 2130 /* mac */ 2131 bnx2x_init_mac_obj(bp, &q->mac_obj, 2132 cl_id, q->cid, func_id, 2133 bnx2x_vf_sp(bp, vf, mac_rdata), 2134 bnx2x_vf_sp_map(bp, vf, mac_rdata), 2135 BNX2X_FILTER_MAC_PENDING, 2136 &vf->filter_state, 2137 BNX2X_OBJ_TYPE_RX_TX, 2138 &bp->macs_pool); 2139 /* vlan */ 2140 bnx2x_init_vlan_obj(bp, &q->vlan_obj, 2141 cl_id, q->cid, func_id, 2142 bnx2x_vf_sp(bp, vf, vlan_rdata), 2143 bnx2x_vf_sp_map(bp, vf, vlan_rdata), 2144 BNX2X_FILTER_VLAN_PENDING, 2145 &vf->filter_state, 2146 BNX2X_OBJ_TYPE_RX_TX, 2147 &bp->vlans_pool); 2148 2149 /* mcast */ 2150 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, 2151 q->cid, func_id, func_id, 2152 bnx2x_vf_sp(bp, vf, mcast_rdata), 2153 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 2154 BNX2X_FILTER_MCAST_PENDING, 2155 &vf->filter_state, 2156 BNX2X_OBJ_TYPE_RX_TX); 2157 2158 vf->leading_rss = cl_id; 2159 } 2160 } 2161 2162 /* called by bnx2x_nic_load */ 2163 int bnx2x_iov_nic_init(struct bnx2x *bp) 2164 { 2165 int vfid, qcount, i; 2166 2167 if (!IS_SRIOV(bp)) { 2168 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 2169 return 0; 2170 } 2171 2172 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); 2173 2174 /* let FLR complete ... */ 2175 msleep(100); 2176 2177 /* initialize vf database */ 2178 for_each_vf(bp, vfid) { 2179 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 2180 2181 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * 2182 BNX2X_CIDS_PER_VF; 2183 2184 union cdu_context *base_cxt = (union cdu_context *) 2185 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2186 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2187 2188 DP(BNX2X_MSG_IOV, 2189 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", 2190 vf->abs_vfid, vf_sb_count(vf), base_vf_cid, 2191 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 2192 2193 /* init statically provisioned resources */ 2194 bnx2x_iov_static_resc(bp, &vf->alloc_resc); 2195 2196 /* queues are initialized during VF-ACQUIRE */ 2197 2198 /* reserve the vf vlan credit */ 2199 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf)); 2200 2201 vf->filter_state = 0; 2202 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 2203 2204 /* init mcast object - This object will be re-initialized 2205 * during VF-ACQUIRE with the proper cl_id and cid. 2206 * It needs to be initialized here so that it can be safely 2207 * handled by a subsequent FLR flow. 2208 */ 2209 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, 2210 0xFF, 0xFF, 0xFF, 2211 bnx2x_vf_sp(bp, vf, mcast_rdata), 2212 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 2213 BNX2X_FILTER_MCAST_PENDING, 2214 &vf->filter_state, 2215 BNX2X_OBJ_TYPE_RX_TX); 2216 2217 /* set the mailbox message addresses */ 2218 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) 2219 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * 2220 MBX_MSG_ALIGNED_SIZE); 2221 2222 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + 2223 vfid * MBX_MSG_ALIGNED_SIZE; 2224 2225 /* Enable vf mailbox */ 2226 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 2227 } 2228 2229 /* Final VF init */ 2230 qcount = 0; 2231 for_each_vf(bp, i) { 2232 struct bnx2x_virtf *vf = BP_VF(bp, i); 2233 2234 /* fill in the BDF and bars */ 2235 vf->bus = bnx2x_vf_bus(bp, i); 2236 vf->devfn = bnx2x_vf_devfn(bp, i); 2237 bnx2x_vf_set_bars(bp, vf); 2238 2239 DP(BNX2X_MSG_IOV, 2240 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", 2241 vf->abs_vfid, vf->bus, vf->devfn, 2242 (unsigned)vf->bars[0].bar, vf->bars[0].size, 2243 (unsigned)vf->bars[1].bar, vf->bars[1].size, 2244 (unsigned)vf->bars[2].bar, vf->bars[2].size); 2245 2246 /* set local queue arrays */ 2247 vf->vfqs = &bp->vfdb->vfqs[qcount]; 2248 qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs); 2249 } 2250 2251 return 0; 2252 } 2253 2254 /* called by bnx2x_chip_cleanup */ 2255 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) 2256 { 2257 int i; 2258 2259 if (!IS_SRIOV(bp)) 2260 return 0; 2261 2262 /* release all the VFs */ 2263 for_each_vf(bp, i) 2264 bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */ 2265 2266 return 0; 2267 } 2268 2269 /* called by bnx2x_init_hw_func, returns the next ilt line */ 2270 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) 2271 { 2272 int i; 2273 struct bnx2x_ilt *ilt = BP_ILT(bp); 2274 2275 if (!IS_SRIOV(bp)) 2276 return line; 2277 2278 /* set vfs ilt lines */ 2279 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2280 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); 2281 2282 ilt->lines[line+i].page = hw_cxt->addr; 2283 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 2284 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 2285 } 2286 return line + i; 2287 } 2288 2289 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) 2290 { 2291 return ((cid >= BNX2X_FIRST_VF_CID) && 2292 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); 2293 } 2294 2295 static 2296 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, 2297 struct bnx2x_vf_queue *vfq, 2298 union event_ring_elem *elem) 2299 { 2300 unsigned long ramrod_flags = 0; 2301 int rc = 0; 2302 2303 /* Always push next commands out, don't wait here */ 2304 set_bit(RAMROD_CONT, &ramrod_flags); 2305 2306 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 2307 case BNX2X_FILTER_MAC_PENDING: 2308 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, 2309 &ramrod_flags); 2310 break; 2311 case BNX2X_FILTER_VLAN_PENDING: 2312 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, 2313 &ramrod_flags); 2314 break; 2315 default: 2316 BNX2X_ERR("Unsupported classification command: %d\n", 2317 elem->message.data.eth_event.echo); 2318 return; 2319 } 2320 if (rc < 0) 2321 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 2322 else if (rc > 0) 2323 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); 2324 } 2325 2326 static 2327 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, 2328 struct bnx2x_virtf *vf) 2329 { 2330 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 2331 int rc; 2332 2333 rparam.mcast_obj = &vf->mcast_obj; 2334 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); 2335 2336 /* If there are pending mcast commands - send them */ 2337 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { 2338 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 2339 if (rc < 0) 2340 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 2341 rc); 2342 } 2343 } 2344 2345 static 2346 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, 2347 struct bnx2x_virtf *vf) 2348 { 2349 smp_mb__before_clear_bit(); 2350 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 2351 smp_mb__after_clear_bit(); 2352 } 2353 2354 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) 2355 { 2356 struct bnx2x_virtf *vf; 2357 int qidx = 0, abs_vfid; 2358 u8 opcode; 2359 u16 cid = 0xffff; 2360 2361 if (!IS_SRIOV(bp)) 2362 return 1; 2363 2364 /* first get the cid - the only events we handle here are cfc-delete 2365 * and set-mac completion 2366 */ 2367 opcode = elem->message.opcode; 2368 2369 switch (opcode) { 2370 case EVENT_RING_OPCODE_CFC_DEL: 2371 cid = SW_CID((__force __le32) 2372 elem->message.data.cfc_del_event.cid); 2373 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); 2374 break; 2375 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 2376 case EVENT_RING_OPCODE_MULTICAST_RULES: 2377 case EVENT_RING_OPCODE_FILTERS_RULES: 2378 cid = (elem->message.data.eth_event.echo & 2379 BNX2X_SWCID_MASK); 2380 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 2381 break; 2382 case EVENT_RING_OPCODE_VF_FLR: 2383 abs_vfid = elem->message.data.vf_flr_event.vf_id; 2384 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", 2385 abs_vfid); 2386 goto get_vf; 2387 case EVENT_RING_OPCODE_MALICIOUS_VF: 2388 abs_vfid = elem->message.data.malicious_vf_event.vf_id; 2389 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n", 2390 abs_vfid, elem->message.data.malicious_vf_event.err_id); 2391 goto get_vf; 2392 default: 2393 return 1; 2394 } 2395 2396 /* check if the cid is the VF range */ 2397 if (!bnx2x_iov_is_vf_cid(bp, cid)) { 2398 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); 2399 return 1; 2400 } 2401 2402 /* extract vf and rxq index from vf_cid - relies on the following: 2403 * 1. vfid on cid reflects the true abs_vfid 2404 * 2. The max number of VFs (per path) is 64 2405 */ 2406 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); 2407 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2408 get_vf: 2409 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 2410 2411 if (!vf) { 2412 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", 2413 cid, abs_vfid); 2414 return 0; 2415 } 2416 2417 switch (opcode) { 2418 case EVENT_RING_OPCODE_CFC_DEL: 2419 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", 2420 vf->abs_vfid, qidx); 2421 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, 2422 &vfq_get(vf, 2423 qidx)->sp_obj, 2424 BNX2X_Q_CMD_CFC_DEL); 2425 break; 2426 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 2427 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", 2428 vf->abs_vfid, qidx); 2429 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); 2430 break; 2431 case EVENT_RING_OPCODE_MULTICAST_RULES: 2432 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", 2433 vf->abs_vfid, qidx); 2434 bnx2x_vf_handle_mcast_eqe(bp, vf); 2435 break; 2436 case EVENT_RING_OPCODE_FILTERS_RULES: 2437 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", 2438 vf->abs_vfid, qidx); 2439 bnx2x_vf_handle_filters_eqe(bp, vf); 2440 break; 2441 case EVENT_RING_OPCODE_VF_FLR: 2442 DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n", 2443 vf->abs_vfid); 2444 /* Do nothing for now */ 2445 break; 2446 case EVENT_RING_OPCODE_MALICIOUS_VF: 2447 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n", 2448 abs_vfid, elem->message.data.malicious_vf_event.err_id); 2449 /* Do nothing for now */ 2450 break; 2451 } 2452 /* SRIOV: reschedule any 'in_progress' operations */ 2453 bnx2x_iov_sp_event(bp, cid, false); 2454 2455 return 0; 2456 } 2457 2458 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) 2459 { 2460 /* extract the vf from vf_cid - relies on the following: 2461 * 1. vfid on cid reflects the true abs_vfid 2462 * 2. The max number of VFs (per path) is 64 2463 */ 2464 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2465 return bnx2x_vf_by_abs_fid(bp, abs_vfid); 2466 } 2467 2468 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 2469 struct bnx2x_queue_sp_obj **q_obj) 2470 { 2471 struct bnx2x_virtf *vf; 2472 2473 if (!IS_SRIOV(bp)) 2474 return; 2475 2476 vf = bnx2x_vf_by_cid(bp, vf_cid); 2477 2478 if (vf) { 2479 /* extract queue index from vf_cid - relies on the following: 2480 * 1. vfid on cid reflects the true abs_vfid 2481 * 2. The max number of VFs (per path) is 64 2482 */ 2483 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); 2484 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); 2485 } else { 2486 BNX2X_ERR("No vf matching cid %d\n", vf_cid); 2487 } 2488 } 2489 2490 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work) 2491 { 2492 struct bnx2x_virtf *vf; 2493 2494 /* check if the cid is the VF range */ 2495 if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid)) 2496 return; 2497 2498 vf = bnx2x_vf_by_cid(bp, vf_cid); 2499 if (vf) { 2500 /* set in_progress flag */ 2501 atomic_set(&vf->op_in_progress, 1); 2502 if (queue_work) 2503 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 2504 } 2505 } 2506 2507 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) 2508 { 2509 int i; 2510 int first_queue_query_index, num_queues_req; 2511 dma_addr_t cur_data_offset; 2512 struct stats_query_entry *cur_query_entry; 2513 u8 stats_count = 0; 2514 bool is_fcoe = false; 2515 2516 if (!IS_SRIOV(bp)) 2517 return; 2518 2519 if (!NO_FCOE(bp)) 2520 is_fcoe = true; 2521 2522 /* fcoe adds one global request and one queue request */ 2523 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; 2524 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 2525 (is_fcoe ? 0 : 1); 2526 2527 DP(BNX2X_MSG_IOV, 2528 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", 2529 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, 2530 first_queue_query_index + num_queues_req); 2531 2532 cur_data_offset = bp->fw_stats_data_mapping + 2533 offsetof(struct bnx2x_fw_stats_data, queue_stats) + 2534 num_queues_req * sizeof(struct per_queue_stats); 2535 2536 cur_query_entry = &bp->fw_stats_req-> 2537 query[first_queue_query_index + num_queues_req]; 2538 2539 for_each_vf(bp, i) { 2540 int j; 2541 struct bnx2x_virtf *vf = BP_VF(bp, i); 2542 2543 if (vf->state != VF_ENABLED) { 2544 DP(BNX2X_MSG_IOV, 2545 "vf %d not enabled so no stats for it\n", 2546 vf->abs_vfid); 2547 continue; 2548 } 2549 2550 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); 2551 for_each_vfq(vf, j) { 2552 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 2553 2554 /* collect stats fro active queues only */ 2555 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 2556 BNX2X_Q_LOGICAL_STATE_STOPPED) 2557 continue; 2558 2559 /* create stats query entry for this queue */ 2560 cur_query_entry->kind = STATS_TYPE_QUEUE; 2561 cur_query_entry->index = vfq_cl_id(vf, rxq); 2562 cur_query_entry->funcID = 2563 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 2564 cur_query_entry->address.hi = 2565 cpu_to_le32(U64_HI(vf->fw_stat_map)); 2566 cur_query_entry->address.lo = 2567 cpu_to_le32(U64_LO(vf->fw_stat_map)); 2568 DP(BNX2X_MSG_IOV, 2569 "added address %x %x for vf %d queue %d client %d\n", 2570 cur_query_entry->address.hi, 2571 cur_query_entry->address.lo, cur_query_entry->funcID, 2572 j, cur_query_entry->index); 2573 cur_query_entry++; 2574 cur_data_offset += sizeof(struct per_queue_stats); 2575 stats_count++; 2576 } 2577 } 2578 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 2579 } 2580 2581 void bnx2x_iov_sp_task(struct bnx2x *bp) 2582 { 2583 int i; 2584 2585 if (!IS_SRIOV(bp)) 2586 return; 2587 /* Iterate over all VFs and invoke state transition for VFs with 2588 * 'in-progress' slow-path operations 2589 */ 2590 DP(BNX2X_MSG_IOV, "searching for pending vf operations\n"); 2591 for_each_vf(bp, i) { 2592 struct bnx2x_virtf *vf = BP_VF(bp, i); 2593 2594 if (!list_empty(&vf->op_list_head) && 2595 atomic_read(&vf->op_in_progress)) { 2596 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); 2597 bnx2x_vfop_cur(bp, vf)->transition(bp, vf); 2598 } 2599 } 2600 } 2601 2602 static inline 2603 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) 2604 { 2605 int i; 2606 struct bnx2x_virtf *vf = NULL; 2607 2608 for_each_vf(bp, i) { 2609 vf = BP_VF(bp, i); 2610 if (stat_id >= vf->igu_base_id && 2611 stat_id < vf->igu_base_id + vf_sb_count(vf)) 2612 break; 2613 } 2614 return vf; 2615 } 2616 2617 /* VF API helpers */ 2618 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, 2619 u8 enable) 2620 { 2621 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; 2622 u32 val = enable ? (abs_vfid | (1 << 6)) : 0; 2623 2624 REG_WR(bp, reg, val); 2625 } 2626 2627 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) 2628 { 2629 int i; 2630 2631 for_each_vfq(vf, i) 2632 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2633 vfq_qzone_id(vf, vfq_get(vf, i)), false); 2634 } 2635 2636 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) 2637 { 2638 u32 val; 2639 2640 /* clear the VF configuration - pretend */ 2641 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 2642 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 2643 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | 2644 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK); 2645 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 2646 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2647 } 2648 2649 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) 2650 { 2651 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), 2652 BNX2X_VF_MAX_QUEUES); 2653 } 2654 2655 static 2656 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, 2657 struct vf_pf_resc_request *req_resc) 2658 { 2659 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2660 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2661 2662 return ((req_resc->num_rxqs <= rxq_cnt) && 2663 (req_resc->num_txqs <= txq_cnt) && 2664 (req_resc->num_sbs <= vf_sb_count(vf)) && 2665 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 2666 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); 2667 } 2668 2669 /* CORE VF API */ 2670 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 2671 struct vf_pf_resc_request *resc) 2672 { 2673 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * 2674 BNX2X_CIDS_PER_VF; 2675 2676 union cdu_context *base_cxt = (union cdu_context *) 2677 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2678 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2679 int i; 2680 2681 /* if state is 'acquired' the VF was not released or FLR'd, in 2682 * this case the returned resources match the acquired already 2683 * acquired resources. Verify that the requested numbers do 2684 * not exceed the already acquired numbers. 2685 */ 2686 if (vf->state == VF_ACQUIRED) { 2687 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", 2688 vf->abs_vfid); 2689 2690 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2691 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", 2692 vf->abs_vfid); 2693 return -EINVAL; 2694 } 2695 return 0; 2696 } 2697 2698 /* Otherwise vf state must be 'free' or 'reset' */ 2699 if (vf->state != VF_FREE && vf->state != VF_RESET) { 2700 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", 2701 vf->abs_vfid, vf->state); 2702 return -EINVAL; 2703 } 2704 2705 /* static allocation: 2706 * the global maximum number are fixed per VF. Fail the request if 2707 * requested number exceed these globals 2708 */ 2709 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2710 DP(BNX2X_MSG_IOV, 2711 "cannot fulfill vf resource request. Placing maximal available values in response\n"); 2712 /* set the max resource in the vf */ 2713 return -ENOMEM; 2714 } 2715 2716 /* Set resources counters - 0 request means max available */ 2717 vf_sb_count(vf) = resc->num_sbs; 2718 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2719 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2720 if (resc->num_mac_filters) 2721 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 2722 if (resc->num_vlan_filters) 2723 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; 2724 2725 DP(BNX2X_MSG_IOV, 2726 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2727 vf_sb_count(vf), vf_rxq_count(vf), 2728 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2729 vf_vlan_rules_cnt(vf)); 2730 2731 /* Initialize the queues */ 2732 if (!vf->vfqs) { 2733 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); 2734 return -EINVAL; 2735 } 2736 2737 for_each_vfq(vf, i) { 2738 struct bnx2x_vf_queue *q = vfq_get(vf, i); 2739 2740 if (!q) { 2741 DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i); 2742 return -EINVAL; 2743 } 2744 2745 q->index = i; 2746 q->cxt = &((base_cxt + i)->eth); 2747 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; 2748 2749 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", 2750 vf->abs_vfid, i, q->index, q->cid, q->cxt); 2751 2752 /* init SP objects */ 2753 bnx2x_vfq_init(bp, vf, q); 2754 } 2755 vf->state = VF_ACQUIRED; 2756 return 0; 2757 } 2758 2759 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) 2760 { 2761 struct bnx2x_func_init_params func_init = {0}; 2762 u16 flags = 0; 2763 int i; 2764 2765 /* the sb resources are initialized at this point, do the 2766 * FW/HW initializations 2767 */ 2768 for_each_vf_sb(vf, i) 2769 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, 2770 vf_igu_sb(vf, i), vf_igu_sb(vf, i)); 2771 2772 /* Sanity checks */ 2773 if (vf->state != VF_ACQUIRED) { 2774 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", 2775 vf->abs_vfid, vf->state); 2776 return -EINVAL; 2777 } 2778 2779 /* let FLR complete ... */ 2780 msleep(100); 2781 2782 /* FLR cleanup epilogue */ 2783 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) 2784 return -EBUSY; 2785 2786 /* reset IGU VF statistics: MSIX */ 2787 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); 2788 2789 /* vf init */ 2790 if (vf->cfg_flags & VF_CFG_STATS) 2791 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); 2792 2793 if (vf->cfg_flags & VF_CFG_TPA) 2794 flags |= FUNC_FLG_TPA; 2795 2796 if (is_vf_multi(vf)) 2797 flags |= FUNC_FLG_RSS; 2798 2799 /* function setup */ 2800 func_init.func_flgs = flags; 2801 func_init.pf_id = BP_FUNC(bp); 2802 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); 2803 func_init.fw_stat_map = vf->fw_stat_map; 2804 func_init.spq_map = vf->spq_map; 2805 func_init.spq_prod = 0; 2806 bnx2x_func_init(bp, &func_init); 2807 2808 /* Enable the vf */ 2809 bnx2x_vf_enable_access(bp, vf->abs_vfid); 2810 bnx2x_vf_enable_traffic(bp, vf); 2811 2812 /* queue protection table */ 2813 for_each_vfq(vf, i) 2814 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2815 vfq_qzone_id(vf, vfq_get(vf, i)), true); 2816 2817 vf->state = VF_ENABLED; 2818 2819 /* update vf bulletin board */ 2820 bnx2x_post_vf_bulletin(bp, vf->index); 2821 2822 return 0; 2823 } 2824 2825 /* VFOP close (teardown the queues, delete mcasts and close HW) */ 2826 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) 2827 { 2828 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2829 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; 2830 enum bnx2x_vfop_close_state state = vfop->state; 2831 struct bnx2x_vfop_cmd cmd = { 2832 .done = bnx2x_vfop_close, 2833 .block = false, 2834 }; 2835 2836 if (vfop->rc < 0) 2837 goto op_err; 2838 2839 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 2840 2841 switch (state) { 2842 case BNX2X_VFOP_CLOSE_QUEUES: 2843 2844 if (++(qx->qid) < vf_rxq_count(vf)) { 2845 vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid); 2846 if (vfop->rc) 2847 goto op_err; 2848 return; 2849 } 2850 2851 /* remove multicasts */ 2852 vfop->state = BNX2X_VFOP_CLOSE_HW; 2853 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false); 2854 if (vfop->rc) 2855 goto op_err; 2856 return; 2857 2858 case BNX2X_VFOP_CLOSE_HW: 2859 2860 /* disable the interrupts */ 2861 DP(BNX2X_MSG_IOV, "disabling igu\n"); 2862 bnx2x_vf_igu_disable(bp, vf); 2863 2864 /* disable the VF */ 2865 DP(BNX2X_MSG_IOV, "clearing qtbl\n"); 2866 bnx2x_vf_clr_qtbl(bp, vf); 2867 2868 goto op_done; 2869 default: 2870 bnx2x_vfop_default(state); 2871 } 2872 op_err: 2873 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); 2874 op_done: 2875 vf->state = VF_ACQUIRED; 2876 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2877 bnx2x_vfop_end(bp, vf, vfop); 2878 } 2879 2880 int bnx2x_vfop_close_cmd(struct bnx2x *bp, 2881 struct bnx2x_virtf *vf, 2882 struct bnx2x_vfop_cmd *cmd) 2883 { 2884 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 2885 if (vfop) { 2886 vfop->args.qx.qid = -1; /* loop */ 2887 bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES, 2888 bnx2x_vfop_close, cmd->done); 2889 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close, 2890 cmd->block); 2891 } 2892 return -ENOMEM; 2893 } 2894 2895 /* VF release can be called either: 1. The VF was acquired but 2896 * not enabled 2. the vf was enabled or in the process of being 2897 * enabled 2898 */ 2899 static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) 2900 { 2901 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2902 struct bnx2x_vfop_cmd cmd = { 2903 .done = bnx2x_vfop_release, 2904 .block = false, 2905 }; 2906 2907 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2908 2909 if (vfop->rc < 0) 2910 goto op_err; 2911 2912 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, 2913 vf->state == VF_FREE ? "Free" : 2914 vf->state == VF_ACQUIRED ? "Acquired" : 2915 vf->state == VF_ENABLED ? "Enabled" : 2916 vf->state == VF_RESET ? "Reset" : 2917 "Unknown"); 2918 2919 switch (vf->state) { 2920 case VF_ENABLED: 2921 vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); 2922 if (vfop->rc) 2923 goto op_err; 2924 return; 2925 2926 case VF_ACQUIRED: 2927 DP(BNX2X_MSG_IOV, "about to free resources\n"); 2928 bnx2x_vf_free_resc(bp, vf); 2929 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2930 goto op_done; 2931 2932 case VF_FREE: 2933 case VF_RESET: 2934 /* do nothing */ 2935 goto op_done; 2936 default: 2937 bnx2x_vfop_default(vf->state); 2938 } 2939 op_err: 2940 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc); 2941 op_done: 2942 bnx2x_vfop_end(bp, vf, vfop); 2943 } 2944 2945 int bnx2x_vfop_release_cmd(struct bnx2x *bp, 2946 struct bnx2x_virtf *vf, 2947 struct bnx2x_vfop_cmd *cmd) 2948 { 2949 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 2950 if (vfop) { 2951 bnx2x_vfop_opset(-1, /* use vf->state */ 2952 bnx2x_vfop_release, cmd->done); 2953 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release, 2954 cmd->block); 2955 } 2956 return -ENOMEM; 2957 } 2958 2959 /* VF release ~ VF close + VF release-resources 2960 * Release is the ultimate SW shutdown and is called whenever an 2961 * irrecoverable error is encountered. 2962 */ 2963 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) 2964 { 2965 struct bnx2x_vfop_cmd cmd = { 2966 .done = NULL, 2967 .block = block, 2968 }; 2969 int rc; 2970 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 2971 2972 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); 2973 if (rc) 2974 WARN(rc, 2975 "VF[%d] Failed to allocate resources for release op- rc=%d\n", 2976 vf->abs_vfid, rc); 2977 } 2978 2979 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, 2980 struct bnx2x_virtf *vf, u32 *sbdf) 2981 { 2982 *sbdf = vf->devfn | (vf->bus << 8); 2983 } 2984 2985 static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf, 2986 struct bnx2x_vf_bar_info *bar_info) 2987 { 2988 int n; 2989 2990 bar_info->nr_bars = bp->vfdb->sriov.nres; 2991 for (n = 0; n < bar_info->nr_bars; n++) 2992 bar_info->bars[n] = vf->bars[n]; 2993 } 2994 2995 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 2996 enum channel_tlvs tlv) 2997 { 2998 /* lock the channel */ 2999 mutex_lock(&vf->op_mutex); 3000 3001 /* record the locking op */ 3002 vf->op_current = tlv; 3003 3004 /* log the lock */ 3005 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", 3006 vf->abs_vfid, tlv); 3007 } 3008 3009 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3010 enum channel_tlvs expected_tlv) 3011 { 3012 WARN(expected_tlv != vf->op_current, 3013 "lock mismatch: expected %d found %d", expected_tlv, 3014 vf->op_current); 3015 3016 /* lock the channel */ 3017 mutex_unlock(&vf->op_mutex); 3018 3019 /* log the unlock */ 3020 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 3021 vf->abs_vfid, vf->op_current); 3022 3023 /* record the locking op */ 3024 vf->op_current = CHANNEL_TLV_NONE; 3025 } 3026 3027 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) 3028 { 3029 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 3030 3031 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 3032 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3033 3034 /* HW channel is only operational when PF is up */ 3035 if (bp->state != BNX2X_STATE_OPEN) { 3036 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n"); 3037 return -EINVAL; 3038 } 3039 3040 /* we are always bound by the total_vfs in the configuration space */ 3041 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { 3042 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n", 3043 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3044 num_vfs_param = BNX2X_NR_VIRTFN(bp); 3045 } 3046 3047 bp->requested_nr_virtfn = num_vfs_param; 3048 if (num_vfs_param == 0) { 3049 pci_disable_sriov(dev); 3050 return 0; 3051 } else { 3052 return bnx2x_enable_sriov(bp); 3053 } 3054 } 3055 3056 int bnx2x_enable_sriov(struct bnx2x *bp) 3057 { 3058 int rc = 0, req_vfs = bp->requested_nr_virtfn; 3059 3060 rc = pci_enable_sriov(bp->pdev, req_vfs); 3061 if (rc) { 3062 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 3063 return rc; 3064 } 3065 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); 3066 return req_vfs; 3067 } 3068 3069 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) 3070 { 3071 int vfidx; 3072 struct pf_vf_bulletin_content *bulletin; 3073 3074 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); 3075 for_each_vf(bp, vfidx) { 3076 bulletin = BP_VF_BULLETIN(bp, vfidx); 3077 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) 3078 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); 3079 } 3080 } 3081 3082 void bnx2x_disable_sriov(struct bnx2x *bp) 3083 { 3084 pci_disable_sriov(bp->pdev); 3085 } 3086 3087 static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx, 3088 struct bnx2x_virtf *vf) 3089 { 3090 if (bp->state != BNX2X_STATE_OPEN) { 3091 BNX2X_ERR("vf ndo called though PF is down\n"); 3092 return -EINVAL; 3093 } 3094 3095 if (!IS_SRIOV(bp)) { 3096 BNX2X_ERR("vf ndo called though sriov is disabled\n"); 3097 return -EINVAL; 3098 } 3099 3100 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { 3101 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n", 3102 vfidx, BNX2X_NR_VIRTFN(bp)); 3103 return -EINVAL; 3104 } 3105 3106 if (!vf) { 3107 BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", 3108 vfidx); 3109 return -EINVAL; 3110 } 3111 3112 return 0; 3113 } 3114 3115 int bnx2x_get_vf_config(struct net_device *dev, int vfidx, 3116 struct ifla_vf_info *ivi) 3117 { 3118 struct bnx2x *bp = netdev_priv(dev); 3119 struct bnx2x_virtf *vf = BP_VF(bp, vfidx); 3120 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3121 struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); 3122 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); 3123 int rc; 3124 3125 /* sanity */ 3126 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); 3127 if (rc) 3128 return rc; 3129 if (!mac_obj || !vlan_obj || !bulletin) { 3130 BNX2X_ERR("VF partially initialized\n"); 3131 return -EINVAL; 3132 } 3133 3134 ivi->vf = vfidx; 3135 ivi->qos = 0; 3136 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */ 3137 ivi->spoofchk = 1; /*always enabled */ 3138 if (vf->state == VF_ENABLED) { 3139 /* mac and vlan are in vlan_mac objects */ 3140 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 3141 0, ETH_ALEN); 3142 vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan, 3143 0, VLAN_HLEN); 3144 } else { 3145 /* mac */ 3146 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 3147 /* mac configured by ndo so its in bulletin board */ 3148 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); 3149 else 3150 /* function has not been loaded yet. Show mac as 0s */ 3151 memset(&ivi->mac, 0, ETH_ALEN); 3152 3153 /* vlan */ 3154 if (bulletin->valid_bitmap & (1 << VLAN_VALID)) 3155 /* vlan configured by ndo so its in bulletin board */ 3156 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); 3157 else 3158 /* function has not been loaded yet. Show vlans as 0s */ 3159 memset(&ivi->vlan, 0, VLAN_HLEN); 3160 } 3161 3162 return 0; 3163 } 3164 3165 /* New mac for VF. Consider these cases: 3166 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and 3167 * supply at acquire. 3168 * 2. VF has already been acquired but has not yet initialized - store in local 3169 * bulletin board. mac will be posted on VF bulletin board after VF init. VF 3170 * will configure this mac when it is ready. 3171 * 3. VF has already initialized but has not yet setup a queue - post the new 3172 * mac on VF's bulletin board right now. VF will configure this mac when it 3173 * is ready. 3174 * 4. VF has already set a queue - delete any macs already configured for this 3175 * queue and manually config the new mac. 3176 * In any event, once this function has been called refuse any attempts by the 3177 * VF to configure any mac for itself except for this mac. In case of a race 3178 * where the VF fails to see the new post on its bulletin board before sending a 3179 * mac configuration request, the PF will simply fail the request and VF can try 3180 * again after consulting its bulletin board. 3181 */ 3182 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) 3183 { 3184 struct bnx2x *bp = netdev_priv(dev); 3185 int rc, q_logical_state; 3186 struct bnx2x_virtf *vf = BP_VF(bp, vfidx); 3187 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); 3188 3189 /* sanity */ 3190 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); 3191 if (rc) 3192 return rc; 3193 if (!is_valid_ether_addr(mac)) { 3194 BNX2X_ERR("mac address invalid\n"); 3195 return -EINVAL; 3196 } 3197 3198 /* update PF's copy of the VF's bulletin. Will no longer accept mac 3199 * configuration requests from vf unless match this mac 3200 */ 3201 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; 3202 memcpy(bulletin->mac, mac, ETH_ALEN); 3203 3204 /* Post update on VF's bulletin board */ 3205 rc = bnx2x_post_vf_bulletin(bp, vfidx); 3206 if (rc) { 3207 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); 3208 return rc; 3209 } 3210 3211 /* is vf initialized and queue set up? */ 3212 q_logical_state = 3213 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); 3214 if (vf->state == VF_ENABLED && 3215 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3216 /* configure the mac in device on this vf's queue */ 3217 unsigned long ramrod_flags = 0; 3218 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3219 3220 /* must lock vfpf channel to protect against vf flows */ 3221 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3222 3223 /* remove existing eth macs */ 3224 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 3225 if (rc) { 3226 BNX2X_ERR("failed to delete eth macs\n"); 3227 return -EINVAL; 3228 } 3229 3230 /* remove existing uc list macs */ 3231 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); 3232 if (rc) { 3233 BNX2X_ERR("failed to delete uc_list macs\n"); 3234 return -EINVAL; 3235 } 3236 3237 /* configure the new mac to device */ 3238 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3239 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 3240 BNX2X_ETH_MAC, &ramrod_flags); 3241 3242 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3243 } 3244 3245 return 0; 3246 } 3247 3248 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 3249 { 3250 struct bnx2x *bp = netdev_priv(dev); 3251 int rc, q_logical_state; 3252 struct bnx2x_virtf *vf = BP_VF(bp, vfidx); 3253 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); 3254 3255 /* sanity */ 3256 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); 3257 if (rc) 3258 return rc; 3259 3260 if (vlan > 4095) { 3261 BNX2X_ERR("illegal vlan value %d\n", vlan); 3262 return -EINVAL; 3263 } 3264 3265 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", 3266 vfidx, vlan, 0); 3267 3268 /* update PF's copy of the VF's bulletin. No point in posting the vlan 3269 * to the VF since it doesn't have anything to do with it. But it useful 3270 * to store it here in case the VF is not up yet and we can only 3271 * configure the vlan later when it does. 3272 */ 3273 bulletin->valid_bitmap |= 1 << VLAN_VALID; 3274 bulletin->vlan = vlan; 3275 3276 /* is vf initialized and queue set up? */ 3277 q_logical_state = 3278 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); 3279 if (vf->state == VF_ENABLED && 3280 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3281 /* configure the vlan in device on this vf's queue */ 3282 unsigned long ramrod_flags = 0; 3283 unsigned long vlan_mac_flags = 0; 3284 struct bnx2x_vlan_mac_obj *vlan_obj = 3285 &bnx2x_vfq(vf, 0, vlan_obj); 3286 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 3287 struct bnx2x_queue_state_params q_params = {NULL}; 3288 struct bnx2x_queue_update_params *update_params; 3289 3290 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3291 3292 /* must lock vfpf channel to protect against vf flows */ 3293 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3294 3295 /* remove existing vlans */ 3296 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3297 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 3298 &ramrod_flags); 3299 if (rc) { 3300 BNX2X_ERR("failed to delete vlans\n"); 3301 return -EINVAL; 3302 } 3303 3304 /* send queue update ramrod to configure default vlan and silent 3305 * vlan removal 3306 */ 3307 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3308 q_params.cmd = BNX2X_Q_CMD_UPDATE; 3309 q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj); 3310 update_params = &q_params.params.update; 3311 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 3312 &update_params->update_flags); 3313 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 3314 &update_params->update_flags); 3315 3316 if (vlan == 0) { 3317 /* if vlan is 0 then we want to leave the VF traffic 3318 * untagged, and leave the incoming traffic untouched 3319 * (i.e. do not remove any vlan tags). 3320 */ 3321 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3322 &update_params->update_flags); 3323 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 3324 &update_params->update_flags); 3325 } else { 3326 /* configure the new vlan to device */ 3327 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3328 ramrod_param.vlan_mac_obj = vlan_obj; 3329 ramrod_param.ramrod_flags = ramrod_flags; 3330 ramrod_param.user_req.u.vlan.vlan = vlan; 3331 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 3332 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 3333 if (rc) { 3334 BNX2X_ERR("failed to configure vlan\n"); 3335 return -EINVAL; 3336 } 3337 3338 /* configure default vlan to vf queue and set silent 3339 * vlan removal (the vf remains unaware of this vlan). 3340 */ 3341 update_params = &q_params.params.update; 3342 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3343 &update_params->update_flags); 3344 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 3345 &update_params->update_flags); 3346 update_params->def_vlan = vlan; 3347 } 3348 3349 /* Update the Queue state */ 3350 rc = bnx2x_queue_state_change(bp, &q_params); 3351 if (rc) { 3352 BNX2X_ERR("Failed to configure default VLAN\n"); 3353 return rc; 3354 } 3355 3356 /* clear the flag indicating that this VF needs its vlan 3357 * (will only be set if the HV configured th Vlan before vf was 3358 * and we were called because the VF came up later 3359 */ 3360 vf->cfg_flags &= ~VF_CFG_VLAN; 3361 3362 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3363 } 3364 return 0; 3365 } 3366 3367 /* crc is the first field in the bulletin board. Compute the crc over the 3368 * entire bulletin board excluding the crc field itself. Use the length field 3369 * as the Bulletin Board was posted by a PF with possibly a different version 3370 * from the vf which will sample it. Therefore, the length is computed by the 3371 * PF and the used blindly by the VF. 3372 */ 3373 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, 3374 struct pf_vf_bulletin_content *bulletin) 3375 { 3376 return crc32(BULLETIN_CRC_SEED, 3377 ((u8 *)bulletin) + sizeof(bulletin->crc), 3378 bulletin->length - sizeof(bulletin->crc)); 3379 } 3380 3381 /* Check for new posts on the bulletin board */ 3382 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) 3383 { 3384 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; 3385 int attempts; 3386 3387 /* bulletin board hasn't changed since last sample */ 3388 if (bp->old_bulletin.version == bulletin.version) 3389 return PFVF_BULLETIN_UNCHANGED; 3390 3391 /* validate crc of new bulletin board */ 3392 if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) { 3393 /* sampling structure in mid post may result with corrupted data 3394 * validate crc to ensure coherency. 3395 */ 3396 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { 3397 bulletin = bp->pf2vf_bulletin->content; 3398 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, 3399 &bulletin)) 3400 break; 3401 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", 3402 bulletin.crc, 3403 bnx2x_crc_vf_bulletin(bp, &bulletin)); 3404 } 3405 if (attempts >= BULLETIN_ATTEMPTS) { 3406 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", 3407 attempts); 3408 return PFVF_BULLETIN_CRC_ERR; 3409 } 3410 } 3411 3412 /* the mac address in bulletin board is valid and is new */ 3413 if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID && 3414 memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) { 3415 /* update new mac to net device */ 3416 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); 3417 } 3418 3419 /* the vlan in bulletin board is valid and is new */ 3420 if (bulletin.valid_bitmap & 1 << VLAN_VALID) 3421 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN); 3422 3423 /* copy new bulletin board to bp */ 3424 bp->old_bulletin = bulletin; 3425 3426 return PFVF_BULLETIN_UPDATED; 3427 } 3428 3429 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 3430 { 3431 /* vf doorbells are embedded within the regview */ 3432 return bp->regview + PXP_VF_ADDR_DB_START; 3433 } 3434 3435 int bnx2x_vf_pci_alloc(struct bnx2x *bp) 3436 { 3437 mutex_init(&bp->vf2pf_mutex); 3438 3439 /* allocate vf2pf mailbox for vf to pf channel */ 3440 BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping, 3441 sizeof(struct bnx2x_vf_mbx_msg)); 3442 3443 /* allocate pf 2 vf bulletin board */ 3444 BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping, 3445 sizeof(union pf_vf_bulletin)); 3446 3447 return 0; 3448 3449 alloc_mem_err: 3450 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3451 sizeof(struct bnx2x_vf_mbx_msg)); 3452 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3453 sizeof(union pf_vf_bulletin)); 3454 return -ENOMEM; 3455 } 3456 3457 int bnx2x_open_epilog(struct bnx2x *bp) 3458 { 3459 /* Enable sriov via delayed work. This must be done via delayed work 3460 * because it causes the probe of the vf devices to be run, which invoke 3461 * register_netdevice which must have rtnl lock taken. As we are holding 3462 * the lock right now, that could only work if the probe would not take 3463 * the lock. However, as the probe of the vf may be called from other 3464 * contexts as well (such as passthrough to vm fails) it can't assume 3465 * the lock is being held for it. Using delayed work here allows the 3466 * probe code to simply take the lock (i.e. wait for it to be released 3467 * if it is being held). We only want to do this if the number of VFs 3468 * was set before PF driver was loaded. 3469 */ 3470 if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) { 3471 smp_mb__before_clear_bit(); 3472 set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state); 3473 smp_mb__after_clear_bit(); 3474 schedule_delayed_work(&bp->sp_rtnl_task, 0); 3475 } 3476 3477 return 0; 3478 } 3479 3480 void bnx2x_iov_channel_down(struct bnx2x *bp) 3481 { 3482 int vf_idx; 3483 struct pf_vf_bulletin_content *bulletin; 3484 3485 if (!IS_SRIOV(bp)) 3486 return; 3487 3488 for_each_vf(bp, vf_idx) { 3489 /* locate this VFs bulletin board and update the channel down 3490 * bit 3491 */ 3492 bulletin = BP_VF_BULLETIN(bp, vf_idx); 3493 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN; 3494 3495 /* update vf bulletin board */ 3496 bnx2x_post_vf_bulletin(bp, vf_idx); 3497 } 3498 } 3499