1 /* bnx2x_sriov.c: Broadcom Everest network driver. 2 * 3 * Copyright 2009-2013 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 17 * Ariel Elior <ariele@broadcom.com> 18 * 19 */ 20 #include "bnx2x.h" 21 #include "bnx2x_init.h" 22 #include "bnx2x_cmn.h" 23 #include "bnx2x_sp.h" 24 #include <linux/crc32.h> 25 #include <linux/if_vlan.h> 26 27 /* General service functions */ 28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 29 u16 pf_id) 30 { 31 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 32 pf_id); 33 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 34 pf_id); 35 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 36 pf_id); 37 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 38 pf_id); 39 } 40 41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 42 u8 enable) 43 { 44 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 45 enable); 46 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 47 enable); 48 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 49 enable); 50 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 51 enable); 52 } 53 54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 55 { 56 int idx; 57 58 for_each_vf(bp, idx) 59 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) 60 break; 61 return idx; 62 } 63 64 static 65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 66 { 67 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); 68 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; 69 } 70 71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, 72 u8 igu_sb_id, u8 segment, u16 index, u8 op, 73 u8 update) 74 { 75 /* acking a VF sb through the PF - use the GRC */ 76 u32 ctl; 77 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 78 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 79 u32 func_encode = vf->abs_vfid; 80 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; 81 struct igu_regular cmd_data = {0}; 82 83 cmd_data.sb_id_and_flags = 84 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 85 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 86 (update << IGU_REGULAR_BUPDATE_SHIFT) | 87 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 88 89 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 90 func_encode << IGU_CTRL_REG_FID_SHIFT | 91 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 92 93 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 94 cmd_data.sb_id_and_flags, igu_addr_data); 95 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); 96 mmiowb(); 97 barrier(); 98 99 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 100 ctl, igu_addr_ctl); 101 REG_WR(bp, igu_addr_ctl, ctl); 102 mmiowb(); 103 barrier(); 104 } 105 /* VFOP - VF slow-path operation support */ 106 107 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000 108 109 /* VFOP operations states */ 110 enum bnx2x_vfop_qctor_state { 111 BNX2X_VFOP_QCTOR_INIT, 112 BNX2X_VFOP_QCTOR_SETUP, 113 BNX2X_VFOP_QCTOR_INT_EN 114 }; 115 116 enum bnx2x_vfop_qdtor_state { 117 BNX2X_VFOP_QDTOR_HALT, 118 BNX2X_VFOP_QDTOR_TERMINATE, 119 BNX2X_VFOP_QDTOR_CFCDEL, 120 BNX2X_VFOP_QDTOR_DONE 121 }; 122 123 enum bnx2x_vfop_vlan_mac_state { 124 BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 125 BNX2X_VFOP_VLAN_MAC_CLEAR, 126 BNX2X_VFOP_VLAN_MAC_CHK_DONE, 127 BNX2X_VFOP_MAC_CONFIG_LIST, 128 BNX2X_VFOP_VLAN_CONFIG_LIST, 129 BNX2X_VFOP_VLAN_CONFIG_LIST_0 130 }; 131 132 enum bnx2x_vfop_qsetup_state { 133 BNX2X_VFOP_QSETUP_CTOR, 134 BNX2X_VFOP_QSETUP_VLAN0, 135 BNX2X_VFOP_QSETUP_DONE 136 }; 137 138 enum bnx2x_vfop_mcast_state { 139 BNX2X_VFOP_MCAST_DEL, 140 BNX2X_VFOP_MCAST_ADD, 141 BNX2X_VFOP_MCAST_CHK_DONE 142 }; 143 enum bnx2x_vfop_qflr_state { 144 BNX2X_VFOP_QFLR_CLR_VLAN, 145 BNX2X_VFOP_QFLR_CLR_MAC, 146 BNX2X_VFOP_QFLR_TERMINATE, 147 BNX2X_VFOP_QFLR_DONE 148 }; 149 150 enum bnx2x_vfop_flr_state { 151 BNX2X_VFOP_FLR_QUEUES, 152 BNX2X_VFOP_FLR_HW 153 }; 154 155 enum bnx2x_vfop_close_state { 156 BNX2X_VFOP_CLOSE_QUEUES, 157 BNX2X_VFOP_CLOSE_HW 158 }; 159 160 enum bnx2x_vfop_rxmode_state { 161 BNX2X_VFOP_RXMODE_CONFIG, 162 BNX2X_VFOP_RXMODE_DONE 163 }; 164 165 enum bnx2x_vfop_qteardown_state { 166 BNX2X_VFOP_QTEARDOWN_RXMODE, 167 BNX2X_VFOP_QTEARDOWN_CLR_VLAN, 168 BNX2X_VFOP_QTEARDOWN_CLR_MAC, 169 BNX2X_VFOP_QTEARDOWN_QDTOR, 170 BNX2X_VFOP_QTEARDOWN_DONE 171 }; 172 173 #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) 174 175 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 176 struct bnx2x_queue_init_params *init_params, 177 struct bnx2x_queue_setup_params *setup_params, 178 u16 q_idx, u16 sb_idx) 179 { 180 DP(BNX2X_MSG_IOV, 181 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", 182 vf->abs_vfid, 183 q_idx, 184 sb_idx, 185 init_params->tx.sb_cq_index, 186 init_params->tx.hc_rate, 187 setup_params->flags, 188 setup_params->txq_params.traffic_type); 189 } 190 191 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 192 struct bnx2x_queue_init_params *init_params, 193 struct bnx2x_queue_setup_params *setup_params, 194 u16 q_idx, u16 sb_idx) 195 { 196 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; 197 198 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" 199 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", 200 vf->abs_vfid, 201 q_idx, 202 sb_idx, 203 init_params->rx.sb_cq_index, 204 init_params->rx.hc_rate, 205 setup_params->gen_params.mtu, 206 rxq_params->buf_sz, 207 rxq_params->sge_buf_sz, 208 rxq_params->max_sges_pkt, 209 rxq_params->tpa_agg_sz, 210 setup_params->flags, 211 rxq_params->drop_flags, 212 rxq_params->cache_line_log); 213 } 214 215 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 216 struct bnx2x_virtf *vf, 217 struct bnx2x_vf_queue *q, 218 struct bnx2x_vfop_qctor_params *p, 219 unsigned long q_type) 220 { 221 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; 222 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; 223 224 /* INIT */ 225 226 /* Enable host coalescing in the transition to INIT state */ 227 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) 228 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); 229 230 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) 231 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); 232 233 /* FW SB ID */ 234 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 235 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 236 237 /* context */ 238 init_p->cxts[0] = q->cxt; 239 240 /* SETUP */ 241 242 /* Setup-op general parameters */ 243 setup_p->gen_params.spcl_id = vf->sp_cl_id; 244 setup_p->gen_params.stat_id = vfq_stat_id(vf, q); 245 246 /* Setup-op pause params: 247 * Nothing to do, the pause thresholds are set by default to 0 which 248 * effectively turns off the feature for this queue. We don't want 249 * one queue (VF) to interfering with another queue (another VF) 250 */ 251 if (vf->cfg_flags & VF_CFG_FW_FC) 252 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", 253 vf->abs_vfid); 254 /* Setup-op flags: 255 * collect statistics, zero statistics, local-switching, security, 256 * OV for Flex10, RSS and MCAST for leading 257 */ 258 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) 259 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); 260 261 /* for VFs, enable tx switching, bd coherency, and mac address 262 * anti-spoofing 263 */ 264 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); 265 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 266 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 267 268 if (vfq_is_leading(q)) { 269 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags); 270 __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags); 271 } 272 273 /* Setup-op rx parameters */ 274 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 275 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 276 277 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); 278 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); 279 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); 280 281 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) 282 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; 283 } 284 285 /* Setup-op tx parameters */ 286 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { 287 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; 288 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 289 } 290 } 291 292 /* VFOP queue construction */ 293 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf) 294 { 295 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 296 struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor; 297 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 298 enum bnx2x_vfop_qctor_state state = vfop->state; 299 300 bnx2x_vfop_reset_wq(vf); 301 302 if (vfop->rc < 0) 303 goto op_err; 304 305 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 306 307 switch (state) { 308 case BNX2X_VFOP_QCTOR_INIT: 309 310 /* has this queue already been opened? */ 311 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 312 BNX2X_Q_LOGICAL_STATE_ACTIVE) { 313 DP(BNX2X_MSG_IOV, 314 "Entered qctor but queue was already up. Aborting gracefully\n"); 315 goto op_done; 316 } 317 318 /* next state */ 319 vfop->state = BNX2X_VFOP_QCTOR_SETUP; 320 321 q_params->cmd = BNX2X_Q_CMD_INIT; 322 vfop->rc = bnx2x_queue_state_change(bp, q_params); 323 324 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 325 326 case BNX2X_VFOP_QCTOR_SETUP: 327 /* next state */ 328 vfop->state = BNX2X_VFOP_QCTOR_INT_EN; 329 330 /* copy pre-prepared setup params to the queue-state params */ 331 vfop->op_p->qctor.qstate.params.setup = 332 vfop->op_p->qctor.prep_qsetup; 333 334 q_params->cmd = BNX2X_Q_CMD_SETUP; 335 vfop->rc = bnx2x_queue_state_change(bp, q_params); 336 337 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 338 339 case BNX2X_VFOP_QCTOR_INT_EN: 340 341 /* enable interrupts */ 342 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx), 343 USTORM_ID, 0, IGU_INT_ENABLE, 0); 344 goto op_done; 345 default: 346 bnx2x_vfop_default(state); 347 } 348 op_err: 349 BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n", 350 vf->abs_vfid, args->qid, q_params->cmd, vfop->rc); 351 op_done: 352 bnx2x_vfop_end(bp, vf, vfop); 353 op_pending: 354 return; 355 } 356 357 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp, 358 struct bnx2x_virtf *vf, 359 struct bnx2x_vfop_cmd *cmd, 360 int qid) 361 { 362 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 363 364 if (vfop) { 365 vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 366 367 vfop->args.qctor.qid = qid; 368 vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx); 369 370 bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT, 371 bnx2x_vfop_qctor, cmd->done); 372 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor, 373 cmd->block); 374 } 375 return -ENOMEM; 376 } 377 378 /* VFOP queue destruction */ 379 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) 380 { 381 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 382 struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor; 383 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 384 enum bnx2x_vfop_qdtor_state state = vfop->state; 385 386 bnx2x_vfop_reset_wq(vf); 387 388 if (vfop->rc < 0) 389 goto op_err; 390 391 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 392 393 switch (state) { 394 case BNX2X_VFOP_QDTOR_HALT: 395 396 /* has this queue already been stopped? */ 397 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 398 BNX2X_Q_LOGICAL_STATE_STOPPED) { 399 DP(BNX2X_MSG_IOV, 400 "Entered qdtor but queue was already stopped. Aborting gracefully\n"); 401 goto op_done; 402 } 403 404 /* next state */ 405 vfop->state = BNX2X_VFOP_QDTOR_TERMINATE; 406 407 q_params->cmd = BNX2X_Q_CMD_HALT; 408 vfop->rc = bnx2x_queue_state_change(bp, q_params); 409 410 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 411 412 case BNX2X_VFOP_QDTOR_TERMINATE: 413 /* next state */ 414 vfop->state = BNX2X_VFOP_QDTOR_CFCDEL; 415 416 q_params->cmd = BNX2X_Q_CMD_TERMINATE; 417 vfop->rc = bnx2x_queue_state_change(bp, q_params); 418 419 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 420 421 case BNX2X_VFOP_QDTOR_CFCDEL: 422 /* next state */ 423 vfop->state = BNX2X_VFOP_QDTOR_DONE; 424 425 q_params->cmd = BNX2X_Q_CMD_CFC_DEL; 426 vfop->rc = bnx2x_queue_state_change(bp, q_params); 427 428 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 429 op_err: 430 BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n", 431 vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc); 432 op_done: 433 case BNX2X_VFOP_QDTOR_DONE: 434 /* invalidate the context */ 435 qdtor->cxt->ustorm_ag_context.cdu_usage = 0; 436 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; 437 bnx2x_vfop_end(bp, vf, vfop); 438 return; 439 default: 440 bnx2x_vfop_default(state); 441 } 442 op_pending: 443 return; 444 } 445 446 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, 447 struct bnx2x_virtf *vf, 448 struct bnx2x_vfop_cmd *cmd, 449 int qid) 450 { 451 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 452 453 if (vfop) { 454 struct bnx2x_queue_state_params *qstate = 455 &vf->op_params.qctor.qstate; 456 457 memset(qstate, 0, sizeof(*qstate)); 458 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 459 460 vfop->args.qdtor.qid = qid; 461 vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt); 462 463 bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT, 464 bnx2x_vfop_qdtor, cmd->done); 465 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, 466 cmd->block); 467 } 468 DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid); 469 return -ENOMEM; 470 } 471 472 static void 473 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) 474 { 475 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 476 if (vf) { 477 if (!vf_sb_count(vf)) 478 vf->igu_base_id = igu_sb_id; 479 ++vf_sb_count(vf); 480 } 481 } 482 483 /* VFOP MAC/VLAN helpers */ 484 static inline void bnx2x_vfop_credit(struct bnx2x *bp, 485 struct bnx2x_vfop *vfop, 486 struct bnx2x_vlan_mac_obj *obj) 487 { 488 struct bnx2x_vfop_args_filters *args = &vfop->args.filters; 489 490 /* update credit only if there is no error 491 * and a valid credit counter 492 */ 493 if (!vfop->rc && args->credit) { 494 int cnt = 0; 495 struct list_head *pos; 496 497 list_for_each(pos, &obj->head) 498 cnt++; 499 500 atomic_set(args->credit, cnt); 501 } 502 } 503 504 static int bnx2x_vfop_set_user_req(struct bnx2x *bp, 505 struct bnx2x_vfop_filter *pos, 506 struct bnx2x_vlan_mac_data *user_req) 507 { 508 user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD : 509 BNX2X_VLAN_MAC_DEL; 510 511 switch (pos->type) { 512 case BNX2X_VFOP_FILTER_MAC: 513 memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN); 514 break; 515 case BNX2X_VFOP_FILTER_VLAN: 516 user_req->u.vlan.vlan = pos->vid; 517 break; 518 default: 519 BNX2X_ERR("Invalid filter type, skipping\n"); 520 return 1; 521 } 522 return 0; 523 } 524 525 static int 526 bnx2x_vfop_config_vlan0(struct bnx2x *bp, 527 struct bnx2x_vlan_mac_ramrod_params *vlan_mac, 528 bool add) 529 { 530 int rc; 531 532 vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD : 533 BNX2X_VLAN_MAC_DEL; 534 vlan_mac->user_req.u.vlan.vlan = 0; 535 536 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 537 if (rc == -EEXIST) 538 rc = 0; 539 return rc; 540 } 541 542 static int bnx2x_vfop_config_list(struct bnx2x *bp, 543 struct bnx2x_vfop_filters *filters, 544 struct bnx2x_vlan_mac_ramrod_params *vlan_mac) 545 { 546 struct bnx2x_vfop_filter *pos, *tmp; 547 struct list_head rollback_list, *filters_list = &filters->head; 548 struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req; 549 int rc = 0, cnt = 0; 550 551 INIT_LIST_HEAD(&rollback_list); 552 553 list_for_each_entry_safe(pos, tmp, filters_list, link) { 554 if (bnx2x_vfop_set_user_req(bp, pos, user_req)) 555 continue; 556 557 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 558 if (rc >= 0) { 559 cnt += pos->add ? 1 : -1; 560 list_del(&pos->link); 561 list_add(&pos->link, &rollback_list); 562 rc = 0; 563 } else if (rc == -EEXIST) { 564 rc = 0; 565 } else { 566 BNX2X_ERR("Failed to add a new vlan_mac command\n"); 567 break; 568 } 569 } 570 571 /* rollback if error or too many rules added */ 572 if (rc || cnt > filters->add_cnt) { 573 BNX2X_ERR("error or too many rules added. Performing rollback\n"); 574 list_for_each_entry_safe(pos, tmp, &rollback_list, link) { 575 pos->add = !pos->add; /* reverse op */ 576 bnx2x_vfop_set_user_req(bp, pos, user_req); 577 bnx2x_config_vlan_mac(bp, vlan_mac); 578 list_del(&pos->link); 579 } 580 cnt = 0; 581 if (!rc) 582 rc = -EINVAL; 583 } 584 filters->add_cnt = cnt; 585 return rc; 586 } 587 588 /* VFOP set VLAN/MAC */ 589 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) 590 { 591 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 592 struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac; 593 struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj; 594 struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter; 595 596 enum bnx2x_vfop_vlan_mac_state state = vfop->state; 597 598 if (vfop->rc < 0) 599 goto op_err; 600 601 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 602 603 bnx2x_vfop_reset_wq(vf); 604 605 switch (state) { 606 case BNX2X_VFOP_VLAN_MAC_CLEAR: 607 /* next state */ 608 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 609 610 /* do delete */ 611 vfop->rc = obj->delete_all(bp, obj, 612 &vlan_mac->user_req.vlan_mac_flags, 613 &vlan_mac->ramrod_flags); 614 615 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 616 617 case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE: 618 /* next state */ 619 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 620 621 /* do config */ 622 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 623 if (vfop->rc == -EEXIST) 624 vfop->rc = 0; 625 626 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 627 628 case BNX2X_VFOP_VLAN_MAC_CHK_DONE: 629 vfop->rc = !!obj->raw.check_pending(&obj->raw); 630 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 631 632 case BNX2X_VFOP_MAC_CONFIG_LIST: 633 /* next state */ 634 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 635 636 /* do list config */ 637 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 638 if (vfop->rc) 639 goto op_err; 640 641 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 642 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 643 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 644 645 case BNX2X_VFOP_VLAN_CONFIG_LIST: 646 /* next state */ 647 vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0; 648 649 /* remove vlan0 - could be no-op */ 650 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false); 651 if (vfop->rc) 652 goto op_err; 653 654 /* Do vlan list config. if this operation fails we try to 655 * restore vlan0 to keep the queue is working order 656 */ 657 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 658 if (!vfop->rc) { 659 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 660 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 661 } 662 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */ 663 664 case BNX2X_VFOP_VLAN_CONFIG_LIST_0: 665 /* next state */ 666 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 667 668 if (list_empty(&obj->head)) 669 /* add vlan0 */ 670 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true); 671 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 672 673 default: 674 bnx2x_vfop_default(state); 675 } 676 op_err: 677 BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc); 678 op_done: 679 kfree(filters); 680 bnx2x_vfop_credit(bp, vfop, obj); 681 bnx2x_vfop_end(bp, vf, vfop); 682 op_pending: 683 return; 684 } 685 686 struct bnx2x_vfop_vlan_mac_flags { 687 bool drv_only; 688 bool dont_consume; 689 bool single_cmd; 690 bool add; 691 }; 692 693 static void 694 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 695 struct bnx2x_vfop_vlan_mac_flags *flags) 696 { 697 struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req; 698 699 memset(ramrod, 0, sizeof(*ramrod)); 700 701 /* ramrod flags */ 702 if (flags->drv_only) 703 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags); 704 if (flags->single_cmd) 705 set_bit(RAMROD_EXEC, &ramrod->ramrod_flags); 706 707 /* mac_vlan flags */ 708 if (flags->dont_consume) 709 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags); 710 711 /* cmd */ 712 ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL; 713 } 714 715 static inline void 716 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 717 struct bnx2x_vfop_vlan_mac_flags *flags) 718 { 719 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags); 720 set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags); 721 } 722 723 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, 724 struct bnx2x_virtf *vf, 725 struct bnx2x_vfop_cmd *cmd, 726 int qid, bool drv_only) 727 { 728 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 729 730 if (vfop) { 731 struct bnx2x_vfop_args_filters filters = { 732 .multi_filter = NULL, /* single */ 733 .credit = NULL, /* consume credit */ 734 }; 735 struct bnx2x_vfop_vlan_mac_flags flags = { 736 .drv_only = drv_only, 737 .dont_consume = (filters.credit != NULL), 738 .single_cmd = true, 739 .add = false /* don't care */, 740 }; 741 struct bnx2x_vlan_mac_ramrod_params *ramrod = 742 &vf->op_params.vlan_mac; 743 744 /* set ramrod params */ 745 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 746 747 /* set object */ 748 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 749 750 /* set extra args */ 751 vfop->args.filters = filters; 752 753 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 754 bnx2x_vfop_vlan_mac, cmd->done); 755 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 756 cmd->block); 757 } 758 return -ENOMEM; 759 } 760 761 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, 762 struct bnx2x_virtf *vf, 763 struct bnx2x_vfop_cmd *cmd, 764 struct bnx2x_vfop_filters *macs, 765 int qid, bool drv_only) 766 { 767 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 768 769 if (vfop) { 770 struct bnx2x_vfop_args_filters filters = { 771 .multi_filter = macs, 772 .credit = NULL, /* consume credit */ 773 }; 774 struct bnx2x_vfop_vlan_mac_flags flags = { 775 .drv_only = drv_only, 776 .dont_consume = (filters.credit != NULL), 777 .single_cmd = false, 778 .add = false, /* don't care since only the items in the 779 * filters list affect the sp operation, 780 * not the list itself 781 */ 782 }; 783 struct bnx2x_vlan_mac_ramrod_params *ramrod = 784 &vf->op_params.vlan_mac; 785 786 /* set ramrod params */ 787 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 788 789 /* set object */ 790 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 791 792 /* set extra args */ 793 filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX; 794 vfop->args.filters = filters; 795 796 bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST, 797 bnx2x_vfop_vlan_mac, cmd->done); 798 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 799 cmd->block); 800 } 801 return -ENOMEM; 802 } 803 804 int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, 805 struct bnx2x_virtf *vf, 806 struct bnx2x_vfop_cmd *cmd, 807 int qid, u16 vid, bool add) 808 { 809 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 810 811 if (vfop) { 812 struct bnx2x_vfop_args_filters filters = { 813 .multi_filter = NULL, /* single command */ 814 .credit = &bnx2x_vfq(vf, qid, vlan_count), 815 }; 816 struct bnx2x_vfop_vlan_mac_flags flags = { 817 .drv_only = false, 818 .dont_consume = (filters.credit != NULL), 819 .single_cmd = true, 820 .add = add, 821 }; 822 struct bnx2x_vlan_mac_ramrod_params *ramrod = 823 &vf->op_params.vlan_mac; 824 825 /* set ramrod params */ 826 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 827 ramrod->user_req.u.vlan.vlan = vid; 828 829 /* set object */ 830 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 831 832 /* set extra args */ 833 vfop->args.filters = filters; 834 835 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 836 bnx2x_vfop_vlan_mac, cmd->done); 837 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 838 cmd->block); 839 } 840 return -ENOMEM; 841 } 842 843 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, 844 struct bnx2x_virtf *vf, 845 struct bnx2x_vfop_cmd *cmd, 846 int qid, bool drv_only) 847 { 848 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 849 850 if (vfop) { 851 struct bnx2x_vfop_args_filters filters = { 852 .multi_filter = NULL, /* single command */ 853 .credit = &bnx2x_vfq(vf, qid, vlan_count), 854 }; 855 struct bnx2x_vfop_vlan_mac_flags flags = { 856 .drv_only = drv_only, 857 .dont_consume = (filters.credit != NULL), 858 .single_cmd = true, 859 .add = false, /* don't care */ 860 }; 861 struct bnx2x_vlan_mac_ramrod_params *ramrod = 862 &vf->op_params.vlan_mac; 863 864 /* set ramrod params */ 865 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 866 867 /* set object */ 868 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 869 870 /* set extra args */ 871 vfop->args.filters = filters; 872 873 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 874 bnx2x_vfop_vlan_mac, cmd->done); 875 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 876 cmd->block); 877 } 878 return -ENOMEM; 879 } 880 881 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, 882 struct bnx2x_virtf *vf, 883 struct bnx2x_vfop_cmd *cmd, 884 struct bnx2x_vfop_filters *vlans, 885 int qid, bool drv_only) 886 { 887 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 888 889 if (vfop) { 890 struct bnx2x_vfop_args_filters filters = { 891 .multi_filter = vlans, 892 .credit = &bnx2x_vfq(vf, qid, vlan_count), 893 }; 894 struct bnx2x_vfop_vlan_mac_flags flags = { 895 .drv_only = drv_only, 896 .dont_consume = (filters.credit != NULL), 897 .single_cmd = false, 898 .add = false, /* don't care */ 899 }; 900 struct bnx2x_vlan_mac_ramrod_params *ramrod = 901 &vf->op_params.vlan_mac; 902 903 /* set ramrod params */ 904 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 905 906 /* set object */ 907 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 908 909 /* set extra args */ 910 filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) - 911 atomic_read(filters.credit); 912 913 vfop->args.filters = filters; 914 915 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST, 916 bnx2x_vfop_vlan_mac, cmd->done); 917 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 918 cmd->block); 919 } 920 return -ENOMEM; 921 } 922 923 /* VFOP queue setup (queue constructor + set vlan 0) */ 924 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf) 925 { 926 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 927 int qid = vfop->args.qctor.qid; 928 enum bnx2x_vfop_qsetup_state state = vfop->state; 929 struct bnx2x_vfop_cmd cmd = { 930 .done = bnx2x_vfop_qsetup, 931 .block = false, 932 }; 933 934 if (vfop->rc < 0) 935 goto op_err; 936 937 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 938 939 switch (state) { 940 case BNX2X_VFOP_QSETUP_CTOR: 941 /* init the queue ctor command */ 942 vfop->state = BNX2X_VFOP_QSETUP_VLAN0; 943 vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid); 944 if (vfop->rc) 945 goto op_err; 946 return; 947 948 case BNX2X_VFOP_QSETUP_VLAN0: 949 /* skip if non-leading or FPGA/EMU*/ 950 if (qid) 951 goto op_done; 952 953 /* init the queue set-vlan command (for vlan 0) */ 954 vfop->state = BNX2X_VFOP_QSETUP_DONE; 955 vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true); 956 if (vfop->rc) 957 goto op_err; 958 return; 959 op_err: 960 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); 961 op_done: 962 case BNX2X_VFOP_QSETUP_DONE: 963 vf->cfg_flags |= VF_CFG_VLAN; 964 smp_mb__before_clear_bit(); 965 set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, 966 &bp->sp_rtnl_state); 967 smp_mb__after_clear_bit(); 968 schedule_delayed_work(&bp->sp_rtnl_task, 0); 969 bnx2x_vfop_end(bp, vf, vfop); 970 return; 971 default: 972 bnx2x_vfop_default(state); 973 } 974 } 975 976 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, 977 struct bnx2x_virtf *vf, 978 struct bnx2x_vfop_cmd *cmd, 979 int qid) 980 { 981 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 982 983 if (vfop) { 984 vfop->args.qctor.qid = qid; 985 986 bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR, 987 bnx2x_vfop_qsetup, cmd->done); 988 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup, 989 cmd->block); 990 } 991 return -ENOMEM; 992 } 993 994 /* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */ 995 static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) 996 { 997 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 998 int qid = vfop->args.qx.qid; 999 enum bnx2x_vfop_qflr_state state = vfop->state; 1000 struct bnx2x_queue_state_params *qstate; 1001 struct bnx2x_vfop_cmd cmd; 1002 1003 bnx2x_vfop_reset_wq(vf); 1004 1005 if (vfop->rc < 0) 1006 goto op_err; 1007 1008 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state); 1009 1010 cmd.done = bnx2x_vfop_qflr; 1011 cmd.block = false; 1012 1013 switch (state) { 1014 case BNX2X_VFOP_QFLR_CLR_VLAN: 1015 /* vlan-clear-all: driver-only, don't consume credit */ 1016 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; 1017 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true); 1018 if (vfop->rc) 1019 goto op_err; 1020 return; 1021 1022 case BNX2X_VFOP_QFLR_CLR_MAC: 1023 /* mac-clear-all: driver only consume credit */ 1024 vfop->state = BNX2X_VFOP_QFLR_TERMINATE; 1025 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true); 1026 DP(BNX2X_MSG_IOV, 1027 "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d", 1028 vf->abs_vfid, vfop->rc); 1029 if (vfop->rc) 1030 goto op_err; 1031 return; 1032 1033 case BNX2X_VFOP_QFLR_TERMINATE: 1034 qstate = &vfop->op_p->qctor.qstate; 1035 memset(qstate , 0, sizeof(*qstate)); 1036 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 1037 vfop->state = BNX2X_VFOP_QFLR_DONE; 1038 1039 DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n", 1040 vf->abs_vfid, qstate->q_obj->state); 1041 1042 if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) { 1043 qstate->q_obj->state = BNX2X_Q_STATE_STOPPED; 1044 qstate->cmd = BNX2X_Q_CMD_TERMINATE; 1045 vfop->rc = bnx2x_queue_state_change(bp, qstate); 1046 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND); 1047 } else { 1048 goto op_done; 1049 } 1050 1051 op_err: 1052 BNX2X_ERR("QFLR[%d:%d] error: rc %d\n", 1053 vf->abs_vfid, qid, vfop->rc); 1054 op_done: 1055 case BNX2X_VFOP_QFLR_DONE: 1056 bnx2x_vfop_end(bp, vf, vfop); 1057 return; 1058 default: 1059 bnx2x_vfop_default(state); 1060 } 1061 op_pending: 1062 return; 1063 } 1064 1065 static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp, 1066 struct bnx2x_virtf *vf, 1067 struct bnx2x_vfop_cmd *cmd, 1068 int qid) 1069 { 1070 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1071 1072 if (vfop) { 1073 vfop->args.qx.qid = qid; 1074 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN, 1075 bnx2x_vfop_qflr, cmd->done); 1076 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr, 1077 cmd->block); 1078 } 1079 return -ENOMEM; 1080 } 1081 1082 /* VFOP multi-casts */ 1083 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf) 1084 { 1085 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1086 struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast; 1087 struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw; 1088 struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list; 1089 enum bnx2x_vfop_mcast_state state = vfop->state; 1090 int i; 1091 1092 bnx2x_vfop_reset_wq(vf); 1093 1094 if (vfop->rc < 0) 1095 goto op_err; 1096 1097 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1098 1099 switch (state) { 1100 case BNX2X_VFOP_MCAST_DEL: 1101 /* clear existing mcasts */ 1102 vfop->state = BNX2X_VFOP_MCAST_ADD; 1103 vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL); 1104 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 1105 1106 case BNX2X_VFOP_MCAST_ADD: 1107 if (raw->check_pending(raw)) 1108 goto op_pending; 1109 1110 if (args->mc_num) { 1111 /* update mcast list on the ramrod params */ 1112 INIT_LIST_HEAD(&mcast->mcast_list); 1113 for (i = 0; i < args->mc_num; i++) 1114 list_add_tail(&(args->mc[i].link), 1115 &mcast->mcast_list); 1116 /* add new mcasts */ 1117 vfop->state = BNX2X_VFOP_MCAST_CHK_DONE; 1118 vfop->rc = bnx2x_config_mcast(bp, mcast, 1119 BNX2X_MCAST_CMD_ADD); 1120 } 1121 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1122 1123 case BNX2X_VFOP_MCAST_CHK_DONE: 1124 vfop->rc = raw->check_pending(raw) ? 1 : 0; 1125 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1126 default: 1127 bnx2x_vfop_default(state); 1128 } 1129 op_err: 1130 BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc); 1131 op_done: 1132 kfree(args->mc); 1133 bnx2x_vfop_end(bp, vf, vfop); 1134 op_pending: 1135 return; 1136 } 1137 1138 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, 1139 struct bnx2x_virtf *vf, 1140 struct bnx2x_vfop_cmd *cmd, 1141 bnx2x_mac_addr_t *mcasts, 1142 int mcast_num, bool drv_only) 1143 { 1144 struct bnx2x_vfop *vfop = NULL; 1145 size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem); 1146 struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) : 1147 NULL; 1148 1149 if (!mc_sz || mc) { 1150 vfop = bnx2x_vfop_add(bp, vf); 1151 if (vfop) { 1152 int i; 1153 struct bnx2x_mcast_ramrod_params *ramrod = 1154 &vf->op_params.mcast; 1155 1156 /* set ramrod params */ 1157 memset(ramrod, 0, sizeof(*ramrod)); 1158 ramrod->mcast_obj = &vf->mcast_obj; 1159 if (drv_only) 1160 set_bit(RAMROD_DRV_CLR_ONLY, 1161 &ramrod->ramrod_flags); 1162 1163 /* copy mcasts pointers */ 1164 vfop->args.mc_list.mc_num = mcast_num; 1165 vfop->args.mc_list.mc = mc; 1166 for (i = 0; i < mcast_num; i++) 1167 mc[i].mac = mcasts[i]; 1168 1169 bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL, 1170 bnx2x_vfop_mcast, cmd->done); 1171 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast, 1172 cmd->block); 1173 } else { 1174 kfree(mc); 1175 } 1176 } 1177 return -ENOMEM; 1178 } 1179 1180 /* VFOP rx-mode */ 1181 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) 1182 { 1183 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1184 struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode; 1185 enum bnx2x_vfop_rxmode_state state = vfop->state; 1186 1187 bnx2x_vfop_reset_wq(vf); 1188 1189 if (vfop->rc < 0) 1190 goto op_err; 1191 1192 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1193 1194 switch (state) { 1195 case BNX2X_VFOP_RXMODE_CONFIG: 1196 /* next state */ 1197 vfop->state = BNX2X_VFOP_RXMODE_DONE; 1198 1199 vfop->rc = bnx2x_config_rx_mode(bp, ramrod); 1200 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1201 op_err: 1202 BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc); 1203 op_done: 1204 case BNX2X_VFOP_RXMODE_DONE: 1205 bnx2x_vfop_end(bp, vf, vfop); 1206 return; 1207 default: 1208 bnx2x_vfop_default(state); 1209 } 1210 op_pending: 1211 return; 1212 } 1213 1214 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, 1215 struct bnx2x_virtf *vf, 1216 struct bnx2x_vfop_cmd *cmd, 1217 int qid, unsigned long accept_flags) 1218 { 1219 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); 1220 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1221 1222 if (vfop) { 1223 struct bnx2x_rx_mode_ramrod_params *ramrod = 1224 &vf->op_params.rx_mode; 1225 1226 memset(ramrod, 0, sizeof(*ramrod)); 1227 1228 /* Prepare ramrod parameters */ 1229 ramrod->cid = vfq->cid; 1230 ramrod->cl_id = vfq_cl_id(vf, vfq); 1231 ramrod->rx_mode_obj = &bp->rx_mode_obj; 1232 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); 1233 1234 ramrod->rx_accept_flags = accept_flags; 1235 ramrod->tx_accept_flags = accept_flags; 1236 ramrod->pstate = &vf->filter_state; 1237 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; 1238 1239 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 1240 set_bit(RAMROD_RX, &ramrod->ramrod_flags); 1241 set_bit(RAMROD_TX, &ramrod->ramrod_flags); 1242 1243 ramrod->rdata = 1244 bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); 1245 ramrod->rdata_mapping = 1246 bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); 1247 1248 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, 1249 bnx2x_vfop_rxmode, cmd->done); 1250 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode, 1251 cmd->block); 1252 } 1253 return -ENOMEM; 1254 } 1255 1256 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs, 1257 * queue destructor) 1258 */ 1259 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf) 1260 { 1261 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1262 int qid = vfop->args.qx.qid; 1263 enum bnx2x_vfop_qteardown_state state = vfop->state; 1264 struct bnx2x_vfop_cmd cmd; 1265 1266 if (vfop->rc < 0) 1267 goto op_err; 1268 1269 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1270 1271 cmd.done = bnx2x_vfop_qdown; 1272 cmd.block = false; 1273 1274 switch (state) { 1275 case BNX2X_VFOP_QTEARDOWN_RXMODE: 1276 /* Drop all */ 1277 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN; 1278 vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0); 1279 if (vfop->rc) 1280 goto op_err; 1281 return; 1282 1283 case BNX2X_VFOP_QTEARDOWN_CLR_VLAN: 1284 /* vlan-clear-all: don't consume credit */ 1285 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC; 1286 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false); 1287 if (vfop->rc) 1288 goto op_err; 1289 return; 1290 1291 case BNX2X_VFOP_QTEARDOWN_CLR_MAC: 1292 /* mac-clear-all: consume credit */ 1293 vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR; 1294 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false); 1295 if (vfop->rc) 1296 goto op_err; 1297 return; 1298 1299 case BNX2X_VFOP_QTEARDOWN_QDTOR: 1300 /* run the queue destruction flow */ 1301 DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n"); 1302 vfop->state = BNX2X_VFOP_QTEARDOWN_DONE; 1303 DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n"); 1304 vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid); 1305 DP(BNX2X_MSG_IOV, "returned from cmd\n"); 1306 if (vfop->rc) 1307 goto op_err; 1308 return; 1309 op_err: 1310 BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n", 1311 vf->abs_vfid, qid, vfop->rc); 1312 1313 case BNX2X_VFOP_QTEARDOWN_DONE: 1314 bnx2x_vfop_end(bp, vf, vfop); 1315 return; 1316 default: 1317 bnx2x_vfop_default(state); 1318 } 1319 } 1320 1321 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, 1322 struct bnx2x_virtf *vf, 1323 struct bnx2x_vfop_cmd *cmd, 1324 int qid) 1325 { 1326 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1327 1328 if (vfop) { 1329 vfop->args.qx.qid = qid; 1330 bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE, 1331 bnx2x_vfop_qdown, cmd->done); 1332 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, 1333 cmd->block); 1334 } 1335 1336 return -ENOMEM; 1337 } 1338 1339 /* VF enable primitives 1340 * when pretend is required the caller is responsible 1341 * for calling pretend prior to calling these routines 1342 */ 1343 1344 /* internal vf enable - until vf is enabled internally all transactions 1345 * are blocked. this routine should always be called last with pretend. 1346 */ 1347 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) 1348 { 1349 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); 1350 } 1351 1352 /* clears vf error in all semi blocks */ 1353 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) 1354 { 1355 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); 1356 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); 1357 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); 1358 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); 1359 } 1360 1361 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) 1362 { 1363 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; 1364 u32 was_err_reg = 0; 1365 1366 switch (was_err_group) { 1367 case 0: 1368 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; 1369 break; 1370 case 1: 1371 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; 1372 break; 1373 case 2: 1374 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; 1375 break; 1376 case 3: 1377 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; 1378 break; 1379 } 1380 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); 1381 } 1382 1383 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) 1384 { 1385 int i; 1386 u32 val; 1387 1388 /* Set VF masks and configuration - pretend */ 1389 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1390 1391 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 1392 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 1393 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 1394 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 1395 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 1396 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 1397 1398 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 1399 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); 1400 if (vf->cfg_flags & VF_CFG_INT_SIMD) 1401 val |= IGU_VF_CONF_SINGLE_ISR_EN; 1402 val &= ~IGU_VF_CONF_PARENT_MASK; 1403 val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */ 1404 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 1405 1406 DP(BNX2X_MSG_IOV, 1407 "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n", 1408 vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION)); 1409 1410 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1411 1412 /* iterate over all queues, clear sb consumer */ 1413 for (i = 0; i < vf_sb_count(vf); i++) { 1414 u8 igu_sb_id = vf_igu_sb(vf, i); 1415 1416 /* zero prod memory */ 1417 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); 1418 1419 /* clear sb state machine */ 1420 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, 1421 false /* VF */); 1422 1423 /* disable + update */ 1424 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, 1425 IGU_INT_DISABLE, 1); 1426 } 1427 } 1428 1429 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) 1430 { 1431 /* set the VF-PF association in the FW */ 1432 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); 1433 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); 1434 1435 /* clear vf errors*/ 1436 bnx2x_vf_semi_clear_err(bp, abs_vfid); 1437 bnx2x_vf_pglue_clear_err(bp, abs_vfid); 1438 1439 /* internal vf-enable - pretend */ 1440 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); 1441 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); 1442 bnx2x_vf_enable_internal(bp, true); 1443 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1444 } 1445 1446 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) 1447 { 1448 /* Reset vf in IGU interrupts are still disabled */ 1449 bnx2x_vf_igu_reset(bp, vf); 1450 1451 /* pretend to enable the vf with the PBF */ 1452 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1453 REG_WR(bp, PBF_REG_DISABLE_VF, 0); 1454 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1455 } 1456 1457 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) 1458 { 1459 struct pci_dev *dev; 1460 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 1461 1462 if (!vf) 1463 goto unknown_dev; 1464 1465 dev = pci_get_bus_and_slot(vf->bus, vf->devfn); 1466 if (dev) 1467 return bnx2x_is_pcie_pending(dev); 1468 1469 unknown_dev: 1470 return false; 1471 } 1472 1473 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) 1474 { 1475 /* Wait 100ms */ 1476 msleep(100); 1477 1478 /* Verify no pending pci transactions */ 1479 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) 1480 BNX2X_ERR("PCIE Transactions still pending\n"); 1481 1482 return 0; 1483 } 1484 1485 /* must be called after the number of PF queues and the number of VFs are 1486 * both known 1487 */ 1488 static void 1489 bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc) 1490 { 1491 u16 vlan_count = 0; 1492 1493 /* will be set only during VF-ACQUIRE */ 1494 resc->num_rxqs = 0; 1495 resc->num_txqs = 0; 1496 1497 /* no credit calculcis for macs (just yet) */ 1498 resc->num_mac_filters = 1; 1499 1500 /* divvy up vlan rules */ 1501 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 1502 vlan_count = 1 << ilog2(vlan_count); 1503 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); 1504 1505 /* no real limitation */ 1506 resc->num_mc_filters = 0; 1507 1508 /* num_sbs already set */ 1509 } 1510 1511 /* FLR routines: */ 1512 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 1513 { 1514 /* reset the state variables */ 1515 bnx2x_iov_static_resc(bp, &vf->alloc_resc); 1516 vf->state = VF_FREE; 1517 } 1518 1519 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) 1520 { 1521 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 1522 1523 /* DQ usage counter */ 1524 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1525 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, 1526 "DQ VF usage counter timed out", 1527 poll_cnt); 1528 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1529 1530 /* FW cleanup command - poll for the results */ 1531 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), 1532 poll_cnt)) 1533 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid); 1534 1535 /* verify TX hw is flushed */ 1536 bnx2x_tx_hw_flushed(bp, poll_cnt); 1537 } 1538 1539 static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) 1540 { 1541 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1542 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; 1543 enum bnx2x_vfop_flr_state state = vfop->state; 1544 struct bnx2x_vfop_cmd cmd = { 1545 .done = bnx2x_vfop_flr, 1546 .block = false, 1547 }; 1548 1549 if (vfop->rc < 0) 1550 goto op_err; 1551 1552 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1553 1554 switch (state) { 1555 case BNX2X_VFOP_FLR_QUEUES: 1556 /* the cleanup operations are valid if and only if the VF 1557 * was first acquired. 1558 */ 1559 if (++(qx->qid) < vf_rxq_count(vf)) { 1560 vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd, 1561 qx->qid); 1562 if (vfop->rc) 1563 goto op_err; 1564 return; 1565 } 1566 /* remove multicasts */ 1567 vfop->state = BNX2X_VFOP_FLR_HW; 1568 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 1569 0, true); 1570 if (vfop->rc) 1571 goto op_err; 1572 return; 1573 case BNX2X_VFOP_FLR_HW: 1574 1575 /* dispatch final cleanup and wait for HW queues to flush */ 1576 bnx2x_vf_flr_clnup_hw(bp, vf); 1577 1578 /* release VF resources */ 1579 bnx2x_vf_free_resc(bp, vf); 1580 1581 /* re-open the mailbox */ 1582 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 1583 1584 goto op_done; 1585 default: 1586 bnx2x_vfop_default(state); 1587 } 1588 op_err: 1589 BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc); 1590 op_done: 1591 vf->flr_clnup_stage = VF_FLR_ACK; 1592 bnx2x_vfop_end(bp, vf, vfop); 1593 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 1594 } 1595 1596 static int bnx2x_vfop_flr_cmd(struct bnx2x *bp, 1597 struct bnx2x_virtf *vf, 1598 vfop_handler_t done) 1599 { 1600 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1601 if (vfop) { 1602 vfop->args.qx.qid = -1; /* loop */ 1603 bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES, 1604 bnx2x_vfop_flr, done); 1605 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false); 1606 } 1607 return -ENOMEM; 1608 } 1609 1610 static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf) 1611 { 1612 int i = prev_vf ? prev_vf->index + 1 : 0; 1613 struct bnx2x_virtf *vf; 1614 1615 /* find next VF to cleanup */ 1616 next_vf_to_clean: 1617 for (; 1618 i < BNX2X_NR_VIRTFN(bp) && 1619 (bnx2x_vf(bp, i, state) != VF_RESET || 1620 bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN); 1621 i++) 1622 ; 1623 1624 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. num of vfs: %d\n", i, 1625 BNX2X_NR_VIRTFN(bp)); 1626 1627 if (i < BNX2X_NR_VIRTFN(bp)) { 1628 vf = BP_VF(bp, i); 1629 1630 /* lock the vf pf channel */ 1631 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 1632 1633 /* invoke the VF FLR SM */ 1634 if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) { 1635 BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n", 1636 vf->abs_vfid); 1637 1638 /* mark the VF to be ACKED and continue */ 1639 vf->flr_clnup_stage = VF_FLR_ACK; 1640 goto next_vf_to_clean; 1641 } 1642 return; 1643 } 1644 1645 /* we are done, update vf records */ 1646 for_each_vf(bp, i) { 1647 vf = BP_VF(bp, i); 1648 1649 if (vf->flr_clnup_stage != VF_FLR_ACK) 1650 continue; 1651 1652 vf->flr_clnup_stage = VF_FLR_EPILOG; 1653 } 1654 1655 /* Acknowledge the handled VFs. 1656 * we are acknowledge all the vfs which an flr was requested for, even 1657 * if amongst them there are such that we never opened, since the mcp 1658 * will interrupt us immediately again if we only ack some of the bits, 1659 * resulting in an endless loop. This can happen for example in KVM 1660 * where an 'all ones' flr request is sometimes given by hyper visor 1661 */ 1662 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n", 1663 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1664 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1665 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 1666 bp->vfdb->flrd_vfs[i]); 1667 1668 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); 1669 1670 /* clear the acked bits - better yet if the MCP implemented 1671 * write to clear semantics 1672 */ 1673 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1674 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); 1675 } 1676 1677 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) 1678 { 1679 int i; 1680 1681 /* Read FLR'd VFs */ 1682 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1683 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); 1684 1685 DP(BNX2X_MSG_MCP, 1686 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n", 1687 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1688 1689 for_each_vf(bp, i) { 1690 struct bnx2x_virtf *vf = BP_VF(bp, i); 1691 u32 reset = 0; 1692 1693 if (vf->abs_vfid < 32) 1694 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); 1695 else 1696 reset = bp->vfdb->flrd_vfs[1] & 1697 (1 << (vf->abs_vfid - 32)); 1698 1699 if (reset) { 1700 /* set as reset and ready for cleanup */ 1701 vf->state = VF_RESET; 1702 vf->flr_clnup_stage = VF_FLR_CLN; 1703 1704 DP(BNX2X_MSG_IOV, 1705 "Initiating Final cleanup for VF %d\n", 1706 vf->abs_vfid); 1707 } 1708 } 1709 1710 /* do the FLR cleanup for all marked VFs*/ 1711 bnx2x_vf_flr_clnup(bp, NULL); 1712 } 1713 1714 /* IOV global initialization routines */ 1715 void bnx2x_iov_init_dq(struct bnx2x *bp) 1716 { 1717 if (!IS_SRIOV(bp)) 1718 return; 1719 1720 /* Set the DQ such that the CID reflect the abs_vfid */ 1721 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); 1722 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 1723 1724 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to 1725 * the PF L2 queues 1726 */ 1727 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 1728 1729 /* The VF window size is the log2 of the max number of CIDs per VF */ 1730 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 1731 1732 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 1733 * the Pf doorbell size although the 2 are independent. 1734 */ 1735 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 1736 BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT); 1737 1738 /* No security checks for now - 1739 * configure single rule (out of 16) mask = 0x1, value = 0x0, 1740 * CID range 0 - 0x1ffff 1741 */ 1742 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); 1743 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); 1744 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1745 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1746 1747 /* set the number of VF alllowed doorbells to the full DQ range */ 1748 REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); 1749 1750 /* set the VF doorbell threshold */ 1751 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 1752 } 1753 1754 void bnx2x_iov_init_dmae(struct bnx2x *bp) 1755 { 1756 DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF"); 1757 if (!IS_SRIOV(bp)) 1758 return; 1759 1760 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 1761 } 1762 1763 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1764 { 1765 struct pci_dev *dev = bp->pdev; 1766 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1767 1768 return dev->bus->number + ((dev->devfn + iov->offset + 1769 iov->stride * vfid) >> 8); 1770 } 1771 1772 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) 1773 { 1774 struct pci_dev *dev = bp->pdev; 1775 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1776 1777 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; 1778 } 1779 1780 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) 1781 { 1782 int i, n; 1783 struct pci_dev *dev = bp->pdev; 1784 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1785 1786 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { 1787 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); 1788 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); 1789 1790 size /= iov->total; 1791 vf->bars[n].bar = start + size * vf->abs_vfid; 1792 vf->bars[n].size = size; 1793 } 1794 } 1795 1796 static int bnx2x_ari_enabled(struct pci_dev *dev) 1797 { 1798 return dev->bus->self && dev->bus->self->ari_enabled; 1799 } 1800 1801 static void 1802 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) 1803 { 1804 int sb_id; 1805 u32 val; 1806 u8 fid; 1807 1808 /* IGU in normal mode - read CAM */ 1809 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 1810 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); 1811 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 1812 continue; 1813 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1814 if (!(fid & IGU_FID_ENCODE_IS_PF)) 1815 bnx2x_vf_set_igu_info(bp, sb_id, 1816 (fid & IGU_FID_VF_NUM_MASK)); 1817 1818 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1819 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 1820 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 1821 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 1822 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1823 } 1824 } 1825 1826 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1827 { 1828 if (bp->vfdb) { 1829 kfree(bp->vfdb->vfqs); 1830 kfree(bp->vfdb->vfs); 1831 kfree(bp->vfdb); 1832 } 1833 bp->vfdb = NULL; 1834 } 1835 1836 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1837 { 1838 int pos; 1839 struct pci_dev *dev = bp->pdev; 1840 1841 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 1842 if (!pos) { 1843 BNX2X_ERR("failed to find SRIOV capability in device\n"); 1844 return -ENODEV; 1845 } 1846 1847 iov->pos = pos; 1848 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); 1849 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 1850 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); 1851 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); 1852 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 1853 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 1854 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 1855 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 1856 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 1857 1858 return 0; 1859 } 1860 1861 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1862 { 1863 u32 val; 1864 1865 /* read the SRIOV capability structure 1866 * The fields can be read via configuration read or 1867 * directly from the device (starting at offset PCICFG_OFFSET) 1868 */ 1869 if (bnx2x_sriov_pci_cfg_info(bp, iov)) 1870 return -ENODEV; 1871 1872 /* get the number of SRIOV bars */ 1873 iov->nres = 0; 1874 1875 /* read the first_vfid */ 1876 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); 1877 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) 1878 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); 1879 1880 DP(BNX2X_MSG_IOV, 1881 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 1882 BP_FUNC(bp), 1883 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, 1884 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 1885 1886 return 0; 1887 } 1888 1889 static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp) 1890 { 1891 int i; 1892 u8 queue_count = 0; 1893 1894 if (IS_SRIOV(bp)) 1895 for_each_vf(bp, i) 1896 queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs); 1897 1898 return queue_count; 1899 } 1900 1901 /* must be called after PF bars are mapped */ 1902 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1903 int num_vfs_param) 1904 { 1905 int err, i, qcount; 1906 struct bnx2x_sriov *iov; 1907 struct pci_dev *dev = bp->pdev; 1908 1909 bp->vfdb = NULL; 1910 1911 /* verify is pf */ 1912 if (IS_VF(bp)) 1913 return 0; 1914 1915 /* verify sriov capability is present in configuration space */ 1916 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) 1917 return 0; 1918 1919 /* verify chip revision */ 1920 if (CHIP_IS_E1x(bp)) 1921 return 0; 1922 1923 /* check if SRIOV support is turned off */ 1924 if (!num_vfs_param) 1925 return 0; 1926 1927 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ 1928 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { 1929 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", 1930 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); 1931 return 0; 1932 } 1933 1934 /* SRIOV can be enabled only with MSIX */ 1935 if (int_mode_param == BNX2X_INT_MODE_MSI || 1936 int_mode_param == BNX2X_INT_MODE_INTX) 1937 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); 1938 1939 err = -EIO; 1940 /* verify ari is enabled */ 1941 if (!bnx2x_ari_enabled(bp->pdev)) { 1942 BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n"); 1943 return err; 1944 } 1945 1946 /* verify igu is in normal mode */ 1947 if (CHIP_INT_MODE_IS_BC(bp)) { 1948 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 1949 return err; 1950 } 1951 1952 /* allocate the vfs database */ 1953 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); 1954 if (!bp->vfdb) { 1955 BNX2X_ERR("failed to allocate vf database\n"); 1956 err = -ENOMEM; 1957 goto failed; 1958 } 1959 1960 /* get the sriov info - Linux already collected all the pertinent 1961 * information, however the sriov structure is for the private use 1962 * of the pci module. Also we want this information regardless 1963 * of the hyper-visor. 1964 */ 1965 iov = &(bp->vfdb->sriov); 1966 err = bnx2x_sriov_info(bp, iov); 1967 if (err) 1968 goto failed; 1969 1970 /* SR-IOV capability was enabled but there are no VFs*/ 1971 if (iov->total == 0) 1972 goto failed; 1973 1974 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); 1975 1976 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n", 1977 num_vfs_param, iov->nr_virtfn); 1978 1979 /* allocate the vf array */ 1980 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 1981 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); 1982 if (!bp->vfdb->vfs) { 1983 BNX2X_ERR("failed to allocate vf array\n"); 1984 err = -ENOMEM; 1985 goto failed; 1986 } 1987 1988 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ 1989 for_each_vf(bp, i) { 1990 bnx2x_vf(bp, i, index) = i; 1991 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; 1992 bnx2x_vf(bp, i, state) = VF_FREE; 1993 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head)); 1994 mutex_init(&bnx2x_vf(bp, i, op_mutex)); 1995 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; 1996 } 1997 1998 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 1999 bnx2x_get_vf_igu_cam_info(bp); 2000 2001 /* get the total queue count and allocate the global queue arrays */ 2002 qcount = bnx2x_iov_get_max_queue_count(bp); 2003 2004 /* allocate the queue arrays for all VFs */ 2005 bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue), 2006 GFP_KERNEL); 2007 if (!bp->vfdb->vfqs) { 2008 BNX2X_ERR("failed to allocate vf queue array\n"); 2009 err = -ENOMEM; 2010 goto failed; 2011 } 2012 2013 return 0; 2014 failed: 2015 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 2016 __bnx2x_iov_free_vfdb(bp); 2017 return err; 2018 } 2019 2020 void bnx2x_iov_remove_one(struct bnx2x *bp) 2021 { 2022 /* if SRIOV is not enabled there's nothing to do */ 2023 if (!IS_SRIOV(bp)) 2024 return; 2025 2026 DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); 2027 pci_disable_sriov(bp->pdev); 2028 DP(BNX2X_MSG_IOV, "sriov disabled\n"); 2029 2030 /* free vf database */ 2031 __bnx2x_iov_free_vfdb(bp); 2032 } 2033 2034 void bnx2x_iov_free_mem(struct bnx2x *bp) 2035 { 2036 int i; 2037 2038 if (!IS_SRIOV(bp)) 2039 return; 2040 2041 /* free vfs hw contexts */ 2042 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2043 struct hw_dma *cxt = &bp->vfdb->context[i]; 2044 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); 2045 } 2046 2047 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, 2048 BP_VFDB(bp)->sp_dma.mapping, 2049 BP_VFDB(bp)->sp_dma.size); 2050 2051 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, 2052 BP_VF_MBX_DMA(bp)->mapping, 2053 BP_VF_MBX_DMA(bp)->size); 2054 2055 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, 2056 BP_VF_BULLETIN_DMA(bp)->mapping, 2057 BP_VF_BULLETIN_DMA(bp)->size); 2058 } 2059 2060 int bnx2x_iov_alloc_mem(struct bnx2x *bp) 2061 { 2062 size_t tot_size; 2063 int i, rc = 0; 2064 2065 if (!IS_SRIOV(bp)) 2066 return rc; 2067 2068 /* allocate vfs hw contexts */ 2069 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * 2070 BNX2X_CIDS_PER_VF * sizeof(union cdu_context); 2071 2072 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2073 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); 2074 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); 2075 2076 if (cxt->size) { 2077 BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size); 2078 } else { 2079 cxt->addr = NULL; 2080 cxt->mapping = 0; 2081 } 2082 tot_size -= cxt->size; 2083 } 2084 2085 /* allocate vfs ramrods dma memory - client_init and set_mac */ 2086 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); 2087 BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping, 2088 tot_size); 2089 BP_VFDB(bp)->sp_dma.size = tot_size; 2090 2091 /* allocate mailboxes */ 2092 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; 2093 BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping, 2094 tot_size); 2095 BP_VF_MBX_DMA(bp)->size = tot_size; 2096 2097 /* allocate local bulletin boards */ 2098 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; 2099 BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr, 2100 &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size); 2101 BP_VF_BULLETIN_DMA(bp)->size = tot_size; 2102 2103 return 0; 2104 2105 alloc_mem_err: 2106 return -ENOMEM; 2107 } 2108 2109 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 2110 struct bnx2x_vf_queue *q) 2111 { 2112 u8 cl_id = vfq_cl_id(vf, q); 2113 u8 func_id = FW_VF_HANDLE(vf->abs_vfid); 2114 unsigned long q_type = 0; 2115 2116 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 2117 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 2118 2119 /* Queue State object */ 2120 bnx2x_init_queue_obj(bp, &q->sp_obj, 2121 cl_id, &q->cid, 1, func_id, 2122 bnx2x_vf_sp(bp, vf, q_data), 2123 bnx2x_vf_sp_map(bp, vf, q_data), 2124 q_type); 2125 2126 DP(BNX2X_MSG_IOV, 2127 "initialized vf %d's queue object. func id set to %d\n", 2128 vf->abs_vfid, q->sp_obj.func_id); 2129 2130 /* mac/vlan objects are per queue, but only those 2131 * that belong to the leading queue are initialized 2132 */ 2133 if (vfq_is_leading(q)) { 2134 /* mac */ 2135 bnx2x_init_mac_obj(bp, &q->mac_obj, 2136 cl_id, q->cid, func_id, 2137 bnx2x_vf_sp(bp, vf, mac_rdata), 2138 bnx2x_vf_sp_map(bp, vf, mac_rdata), 2139 BNX2X_FILTER_MAC_PENDING, 2140 &vf->filter_state, 2141 BNX2X_OBJ_TYPE_RX_TX, 2142 &bp->macs_pool); 2143 /* vlan */ 2144 bnx2x_init_vlan_obj(bp, &q->vlan_obj, 2145 cl_id, q->cid, func_id, 2146 bnx2x_vf_sp(bp, vf, vlan_rdata), 2147 bnx2x_vf_sp_map(bp, vf, vlan_rdata), 2148 BNX2X_FILTER_VLAN_PENDING, 2149 &vf->filter_state, 2150 BNX2X_OBJ_TYPE_RX_TX, 2151 &bp->vlans_pool); 2152 2153 /* mcast */ 2154 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, 2155 q->cid, func_id, func_id, 2156 bnx2x_vf_sp(bp, vf, mcast_rdata), 2157 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 2158 BNX2X_FILTER_MCAST_PENDING, 2159 &vf->filter_state, 2160 BNX2X_OBJ_TYPE_RX_TX); 2161 2162 vf->leading_rss = cl_id; 2163 } 2164 } 2165 2166 /* called by bnx2x_nic_load */ 2167 int bnx2x_iov_nic_init(struct bnx2x *bp) 2168 { 2169 int vfid, qcount, i; 2170 2171 if (!IS_SRIOV(bp)) { 2172 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 2173 return 0; 2174 } 2175 2176 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); 2177 2178 /* initialize vf database */ 2179 for_each_vf(bp, vfid) { 2180 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 2181 2182 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * 2183 BNX2X_CIDS_PER_VF; 2184 2185 union cdu_context *base_cxt = (union cdu_context *) 2186 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2187 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2188 2189 DP(BNX2X_MSG_IOV, 2190 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", 2191 vf->abs_vfid, vf_sb_count(vf), base_vf_cid, 2192 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 2193 2194 /* init statically provisioned resources */ 2195 bnx2x_iov_static_resc(bp, &vf->alloc_resc); 2196 2197 /* queues are initialized during VF-ACQUIRE */ 2198 2199 /* reserve the vf vlan credit */ 2200 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf)); 2201 2202 vf->filter_state = 0; 2203 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 2204 2205 /* init mcast object - This object will be re-initialized 2206 * during VF-ACQUIRE with the proper cl_id and cid. 2207 * It needs to be initialized here so that it can be safely 2208 * handled by a subsequent FLR flow. 2209 */ 2210 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, 2211 0xFF, 0xFF, 0xFF, 2212 bnx2x_vf_sp(bp, vf, mcast_rdata), 2213 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 2214 BNX2X_FILTER_MCAST_PENDING, 2215 &vf->filter_state, 2216 BNX2X_OBJ_TYPE_RX_TX); 2217 2218 /* set the mailbox message addresses */ 2219 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) 2220 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * 2221 MBX_MSG_ALIGNED_SIZE); 2222 2223 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + 2224 vfid * MBX_MSG_ALIGNED_SIZE; 2225 2226 /* Enable vf mailbox */ 2227 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 2228 } 2229 2230 /* Final VF init */ 2231 qcount = 0; 2232 for_each_vf(bp, i) { 2233 struct bnx2x_virtf *vf = BP_VF(bp, i); 2234 2235 /* fill in the BDF and bars */ 2236 vf->bus = bnx2x_vf_bus(bp, i); 2237 vf->devfn = bnx2x_vf_devfn(bp, i); 2238 bnx2x_vf_set_bars(bp, vf); 2239 2240 DP(BNX2X_MSG_IOV, 2241 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", 2242 vf->abs_vfid, vf->bus, vf->devfn, 2243 (unsigned)vf->bars[0].bar, vf->bars[0].size, 2244 (unsigned)vf->bars[1].bar, vf->bars[1].size, 2245 (unsigned)vf->bars[2].bar, vf->bars[2].size); 2246 2247 /* set local queue arrays */ 2248 vf->vfqs = &bp->vfdb->vfqs[qcount]; 2249 qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs); 2250 } 2251 2252 return 0; 2253 } 2254 2255 /* called by bnx2x_chip_cleanup */ 2256 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) 2257 { 2258 int i; 2259 2260 if (!IS_SRIOV(bp)) 2261 return 0; 2262 2263 /* release all the VFs */ 2264 for_each_vf(bp, i) 2265 bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */ 2266 2267 return 0; 2268 } 2269 2270 /* called by bnx2x_init_hw_func, returns the next ilt line */ 2271 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) 2272 { 2273 int i; 2274 struct bnx2x_ilt *ilt = BP_ILT(bp); 2275 2276 if (!IS_SRIOV(bp)) 2277 return line; 2278 2279 /* set vfs ilt lines */ 2280 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2281 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); 2282 2283 ilt->lines[line+i].page = hw_cxt->addr; 2284 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 2285 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 2286 } 2287 return line + i; 2288 } 2289 2290 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) 2291 { 2292 return ((cid >= BNX2X_FIRST_VF_CID) && 2293 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); 2294 } 2295 2296 static 2297 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, 2298 struct bnx2x_vf_queue *vfq, 2299 union event_ring_elem *elem) 2300 { 2301 unsigned long ramrod_flags = 0; 2302 int rc = 0; 2303 2304 /* Always push next commands out, don't wait here */ 2305 set_bit(RAMROD_CONT, &ramrod_flags); 2306 2307 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 2308 case BNX2X_FILTER_MAC_PENDING: 2309 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, 2310 &ramrod_flags); 2311 break; 2312 case BNX2X_FILTER_VLAN_PENDING: 2313 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, 2314 &ramrod_flags); 2315 break; 2316 default: 2317 BNX2X_ERR("Unsupported classification command: %d\n", 2318 elem->message.data.eth_event.echo); 2319 return; 2320 } 2321 if (rc < 0) 2322 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 2323 else if (rc > 0) 2324 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); 2325 } 2326 2327 static 2328 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, 2329 struct bnx2x_virtf *vf) 2330 { 2331 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 2332 int rc; 2333 2334 rparam.mcast_obj = &vf->mcast_obj; 2335 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); 2336 2337 /* If there are pending mcast commands - send them */ 2338 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { 2339 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 2340 if (rc < 0) 2341 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 2342 rc); 2343 } 2344 } 2345 2346 static 2347 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, 2348 struct bnx2x_virtf *vf) 2349 { 2350 smp_mb__before_clear_bit(); 2351 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 2352 smp_mb__after_clear_bit(); 2353 } 2354 2355 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) 2356 { 2357 struct bnx2x_virtf *vf; 2358 int qidx = 0, abs_vfid; 2359 u8 opcode; 2360 u16 cid = 0xffff; 2361 2362 if (!IS_SRIOV(bp)) 2363 return 1; 2364 2365 /* first get the cid - the only events we handle here are cfc-delete 2366 * and set-mac completion 2367 */ 2368 opcode = elem->message.opcode; 2369 2370 switch (opcode) { 2371 case EVENT_RING_OPCODE_CFC_DEL: 2372 cid = SW_CID((__force __le32) 2373 elem->message.data.cfc_del_event.cid); 2374 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); 2375 break; 2376 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 2377 case EVENT_RING_OPCODE_MULTICAST_RULES: 2378 case EVENT_RING_OPCODE_FILTERS_RULES: 2379 cid = (elem->message.data.eth_event.echo & 2380 BNX2X_SWCID_MASK); 2381 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 2382 break; 2383 case EVENT_RING_OPCODE_VF_FLR: 2384 abs_vfid = elem->message.data.vf_flr_event.vf_id; 2385 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", 2386 abs_vfid); 2387 goto get_vf; 2388 case EVENT_RING_OPCODE_MALICIOUS_VF: 2389 abs_vfid = elem->message.data.malicious_vf_event.vf_id; 2390 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n", 2391 abs_vfid); 2392 goto get_vf; 2393 default: 2394 return 1; 2395 } 2396 2397 /* check if the cid is the VF range */ 2398 if (!bnx2x_iov_is_vf_cid(bp, cid)) { 2399 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); 2400 return 1; 2401 } 2402 2403 /* extract vf and rxq index from vf_cid - relies on the following: 2404 * 1. vfid on cid reflects the true abs_vfid 2405 * 2. the max number of VFs (per path) is 64 2406 */ 2407 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); 2408 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2409 get_vf: 2410 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 2411 2412 if (!vf) { 2413 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", 2414 cid, abs_vfid); 2415 return 0; 2416 } 2417 2418 switch (opcode) { 2419 case EVENT_RING_OPCODE_CFC_DEL: 2420 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", 2421 vf->abs_vfid, qidx); 2422 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, 2423 &vfq_get(vf, 2424 qidx)->sp_obj, 2425 BNX2X_Q_CMD_CFC_DEL); 2426 break; 2427 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 2428 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", 2429 vf->abs_vfid, qidx); 2430 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); 2431 break; 2432 case EVENT_RING_OPCODE_MULTICAST_RULES: 2433 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", 2434 vf->abs_vfid, qidx); 2435 bnx2x_vf_handle_mcast_eqe(bp, vf); 2436 break; 2437 case EVENT_RING_OPCODE_FILTERS_RULES: 2438 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", 2439 vf->abs_vfid, qidx); 2440 bnx2x_vf_handle_filters_eqe(bp, vf); 2441 break; 2442 case EVENT_RING_OPCODE_VF_FLR: 2443 DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n", 2444 vf->abs_vfid); 2445 /* Do nothing for now */ 2446 break; 2447 case EVENT_RING_OPCODE_MALICIOUS_VF: 2448 DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n", 2449 vf->abs_vfid); 2450 /* Do nothing for now */ 2451 break; 2452 } 2453 /* SRIOV: reschedule any 'in_progress' operations */ 2454 bnx2x_iov_sp_event(bp, cid, false); 2455 2456 return 0; 2457 } 2458 2459 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) 2460 { 2461 /* extract the vf from vf_cid - relies on the following: 2462 * 1. vfid on cid reflects the true abs_vfid 2463 * 2. the max number of VFs (per path) is 64 2464 */ 2465 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2466 return bnx2x_vf_by_abs_fid(bp, abs_vfid); 2467 } 2468 2469 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 2470 struct bnx2x_queue_sp_obj **q_obj) 2471 { 2472 struct bnx2x_virtf *vf; 2473 2474 if (!IS_SRIOV(bp)) 2475 return; 2476 2477 vf = bnx2x_vf_by_cid(bp, vf_cid); 2478 2479 if (vf) { 2480 /* extract queue index from vf_cid - relies on the following: 2481 * 1. vfid on cid reflects the true abs_vfid 2482 * 2. the max number of VFs (per path) is 64 2483 */ 2484 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); 2485 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); 2486 } else { 2487 BNX2X_ERR("No vf matching cid %d\n", vf_cid); 2488 } 2489 } 2490 2491 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work) 2492 { 2493 struct bnx2x_virtf *vf; 2494 2495 /* check if the cid is the VF range */ 2496 if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid)) 2497 return; 2498 2499 vf = bnx2x_vf_by_cid(bp, vf_cid); 2500 if (vf) { 2501 /* set in_progress flag */ 2502 atomic_set(&vf->op_in_progress, 1); 2503 if (queue_work) 2504 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 2505 } 2506 } 2507 2508 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) 2509 { 2510 int i; 2511 int first_queue_query_index, num_queues_req; 2512 dma_addr_t cur_data_offset; 2513 struct stats_query_entry *cur_query_entry; 2514 u8 stats_count = 0; 2515 bool is_fcoe = false; 2516 2517 if (!IS_SRIOV(bp)) 2518 return; 2519 2520 if (!NO_FCOE(bp)) 2521 is_fcoe = true; 2522 2523 /* fcoe adds one global request and one queue request */ 2524 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; 2525 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 2526 (is_fcoe ? 0 : 1); 2527 2528 DP(BNX2X_MSG_IOV, 2529 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", 2530 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, 2531 first_queue_query_index + num_queues_req); 2532 2533 cur_data_offset = bp->fw_stats_data_mapping + 2534 offsetof(struct bnx2x_fw_stats_data, queue_stats) + 2535 num_queues_req * sizeof(struct per_queue_stats); 2536 2537 cur_query_entry = &bp->fw_stats_req-> 2538 query[first_queue_query_index + num_queues_req]; 2539 2540 for_each_vf(bp, i) { 2541 int j; 2542 struct bnx2x_virtf *vf = BP_VF(bp, i); 2543 2544 if (vf->state != VF_ENABLED) { 2545 DP(BNX2X_MSG_IOV, 2546 "vf %d not enabled so no stats for it\n", 2547 vf->abs_vfid); 2548 continue; 2549 } 2550 2551 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); 2552 for_each_vfq(vf, j) { 2553 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 2554 2555 /* collect stats fro active queues only */ 2556 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 2557 BNX2X_Q_LOGICAL_STATE_STOPPED) 2558 continue; 2559 2560 /* create stats query entry for this queue */ 2561 cur_query_entry->kind = STATS_TYPE_QUEUE; 2562 cur_query_entry->index = vfq_cl_id(vf, rxq); 2563 cur_query_entry->funcID = 2564 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 2565 cur_query_entry->address.hi = 2566 cpu_to_le32(U64_HI(vf->fw_stat_map)); 2567 cur_query_entry->address.lo = 2568 cpu_to_le32(U64_LO(vf->fw_stat_map)); 2569 DP(BNX2X_MSG_IOV, 2570 "added address %x %x for vf %d queue %d client %d\n", 2571 cur_query_entry->address.hi, 2572 cur_query_entry->address.lo, cur_query_entry->funcID, 2573 j, cur_query_entry->index); 2574 cur_query_entry++; 2575 cur_data_offset += sizeof(struct per_queue_stats); 2576 stats_count++; 2577 } 2578 } 2579 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 2580 } 2581 2582 void bnx2x_iov_sp_task(struct bnx2x *bp) 2583 { 2584 int i; 2585 2586 if (!IS_SRIOV(bp)) 2587 return; 2588 /* Iterate over all VFs and invoke state transition for VFs with 2589 * 'in-progress' slow-path operations 2590 */ 2591 DP(BNX2X_MSG_IOV, "searching for pending vf operations\n"); 2592 for_each_vf(bp, i) { 2593 struct bnx2x_virtf *vf = BP_VF(bp, i); 2594 2595 if (!list_empty(&vf->op_list_head) && 2596 atomic_read(&vf->op_in_progress)) { 2597 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); 2598 bnx2x_vfop_cur(bp, vf)->transition(bp, vf); 2599 } 2600 } 2601 } 2602 2603 static inline 2604 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) 2605 { 2606 int i; 2607 struct bnx2x_virtf *vf = NULL; 2608 2609 for_each_vf(bp, i) { 2610 vf = BP_VF(bp, i); 2611 if (stat_id >= vf->igu_base_id && 2612 stat_id < vf->igu_base_id + vf_sb_count(vf)) 2613 break; 2614 } 2615 return vf; 2616 } 2617 2618 /* VF API helpers */ 2619 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, 2620 u8 enable) 2621 { 2622 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; 2623 u32 val = enable ? (abs_vfid | (1 << 6)) : 0; 2624 2625 REG_WR(bp, reg, val); 2626 } 2627 2628 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) 2629 { 2630 int i; 2631 2632 for_each_vfq(vf, i) 2633 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2634 vfq_qzone_id(vf, vfq_get(vf, i)), false); 2635 } 2636 2637 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) 2638 { 2639 u32 val; 2640 2641 /* clear the VF configuration - pretend */ 2642 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 2643 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 2644 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | 2645 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK); 2646 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 2647 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2648 } 2649 2650 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) 2651 { 2652 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), 2653 BNX2X_VF_MAX_QUEUES); 2654 } 2655 2656 static 2657 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, 2658 struct vf_pf_resc_request *req_resc) 2659 { 2660 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2661 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2662 2663 return ((req_resc->num_rxqs <= rxq_cnt) && 2664 (req_resc->num_txqs <= txq_cnt) && 2665 (req_resc->num_sbs <= vf_sb_count(vf)) && 2666 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 2667 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); 2668 } 2669 2670 /* CORE VF API */ 2671 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 2672 struct vf_pf_resc_request *resc) 2673 { 2674 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * 2675 BNX2X_CIDS_PER_VF; 2676 2677 union cdu_context *base_cxt = (union cdu_context *) 2678 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2679 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2680 int i; 2681 2682 /* if state is 'acquired' the VF was not released or FLR'd, in 2683 * this case the returned resources match the acquired already 2684 * acquired resources. Verify that the requested numbers do 2685 * not exceed the already acquired numbers. 2686 */ 2687 if (vf->state == VF_ACQUIRED) { 2688 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", 2689 vf->abs_vfid); 2690 2691 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2692 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", 2693 vf->abs_vfid); 2694 return -EINVAL; 2695 } 2696 return 0; 2697 } 2698 2699 /* Otherwise vf state must be 'free' or 'reset' */ 2700 if (vf->state != VF_FREE && vf->state != VF_RESET) { 2701 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", 2702 vf->abs_vfid, vf->state); 2703 return -EINVAL; 2704 } 2705 2706 /* static allocation: 2707 * the global maximum number are fixed per VF. fail the request if 2708 * requested number exceed these globals 2709 */ 2710 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2711 DP(BNX2X_MSG_IOV, 2712 "cannot fulfill vf resource request. Placing maximal available values in response\n"); 2713 /* set the max resource in the vf */ 2714 return -ENOMEM; 2715 } 2716 2717 /* Set resources counters - 0 request means max available */ 2718 vf_sb_count(vf) = resc->num_sbs; 2719 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2720 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2721 if (resc->num_mac_filters) 2722 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 2723 if (resc->num_vlan_filters) 2724 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; 2725 2726 DP(BNX2X_MSG_IOV, 2727 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2728 vf_sb_count(vf), vf_rxq_count(vf), 2729 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2730 vf_vlan_rules_cnt(vf)); 2731 2732 /* Initialize the queues */ 2733 if (!vf->vfqs) { 2734 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); 2735 return -EINVAL; 2736 } 2737 2738 for_each_vfq(vf, i) { 2739 struct bnx2x_vf_queue *q = vfq_get(vf, i); 2740 2741 if (!q) { 2742 DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i); 2743 return -EINVAL; 2744 } 2745 2746 q->index = i; 2747 q->cxt = &((base_cxt + i)->eth); 2748 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; 2749 2750 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", 2751 vf->abs_vfid, i, q->index, q->cid, q->cxt); 2752 2753 /* init SP objects */ 2754 bnx2x_vfq_init(bp, vf, q); 2755 } 2756 vf->state = VF_ACQUIRED; 2757 return 0; 2758 } 2759 2760 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) 2761 { 2762 struct bnx2x_func_init_params func_init = {0}; 2763 u16 flags = 0; 2764 int i; 2765 2766 /* the sb resources are initialized at this point, do the 2767 * FW/HW initializations 2768 */ 2769 for_each_vf_sb(vf, i) 2770 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, 2771 vf_igu_sb(vf, i), vf_igu_sb(vf, i)); 2772 2773 /* Sanity checks */ 2774 if (vf->state != VF_ACQUIRED) { 2775 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", 2776 vf->abs_vfid, vf->state); 2777 return -EINVAL; 2778 } 2779 /* FLR cleanup epilogue */ 2780 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) 2781 return -EBUSY; 2782 2783 /* reset IGU VF statistics: MSIX */ 2784 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); 2785 2786 /* vf init */ 2787 if (vf->cfg_flags & VF_CFG_STATS) 2788 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); 2789 2790 if (vf->cfg_flags & VF_CFG_TPA) 2791 flags |= FUNC_FLG_TPA; 2792 2793 if (is_vf_multi(vf)) 2794 flags |= FUNC_FLG_RSS; 2795 2796 /* function setup */ 2797 func_init.func_flgs = flags; 2798 func_init.pf_id = BP_FUNC(bp); 2799 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); 2800 func_init.fw_stat_map = vf->fw_stat_map; 2801 func_init.spq_map = vf->spq_map; 2802 func_init.spq_prod = 0; 2803 bnx2x_func_init(bp, &func_init); 2804 2805 /* Enable the vf */ 2806 bnx2x_vf_enable_access(bp, vf->abs_vfid); 2807 bnx2x_vf_enable_traffic(bp, vf); 2808 2809 /* queue protection table */ 2810 for_each_vfq(vf, i) 2811 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2812 vfq_qzone_id(vf, vfq_get(vf, i)), true); 2813 2814 vf->state = VF_ENABLED; 2815 2816 /* update vf bulletin board */ 2817 bnx2x_post_vf_bulletin(bp, vf->index); 2818 2819 return 0; 2820 } 2821 2822 /* VFOP close (teardown the queues, delete mcasts and close HW) */ 2823 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) 2824 { 2825 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2826 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; 2827 enum bnx2x_vfop_close_state state = vfop->state; 2828 struct bnx2x_vfop_cmd cmd = { 2829 .done = bnx2x_vfop_close, 2830 .block = false, 2831 }; 2832 2833 if (vfop->rc < 0) 2834 goto op_err; 2835 2836 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 2837 2838 switch (state) { 2839 case BNX2X_VFOP_CLOSE_QUEUES: 2840 2841 if (++(qx->qid) < vf_rxq_count(vf)) { 2842 vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid); 2843 if (vfop->rc) 2844 goto op_err; 2845 return; 2846 } 2847 2848 /* remove multicasts */ 2849 vfop->state = BNX2X_VFOP_CLOSE_HW; 2850 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false); 2851 if (vfop->rc) 2852 goto op_err; 2853 return; 2854 2855 case BNX2X_VFOP_CLOSE_HW: 2856 2857 /* disable the interrupts */ 2858 DP(BNX2X_MSG_IOV, "disabling igu\n"); 2859 bnx2x_vf_igu_disable(bp, vf); 2860 2861 /* disable the VF */ 2862 DP(BNX2X_MSG_IOV, "clearing qtbl\n"); 2863 bnx2x_vf_clr_qtbl(bp, vf); 2864 2865 goto op_done; 2866 default: 2867 bnx2x_vfop_default(state); 2868 } 2869 op_err: 2870 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); 2871 op_done: 2872 vf->state = VF_ACQUIRED; 2873 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2874 bnx2x_vfop_end(bp, vf, vfop); 2875 } 2876 2877 int bnx2x_vfop_close_cmd(struct bnx2x *bp, 2878 struct bnx2x_virtf *vf, 2879 struct bnx2x_vfop_cmd *cmd) 2880 { 2881 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 2882 if (vfop) { 2883 vfop->args.qx.qid = -1; /* loop */ 2884 bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES, 2885 bnx2x_vfop_close, cmd->done); 2886 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close, 2887 cmd->block); 2888 } 2889 return -ENOMEM; 2890 } 2891 2892 /* VF release can be called either: 1. the VF was acquired but 2893 * not enabled 2. the vf was enabled or in the process of being 2894 * enabled 2895 */ 2896 static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) 2897 { 2898 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2899 struct bnx2x_vfop_cmd cmd = { 2900 .done = bnx2x_vfop_release, 2901 .block = false, 2902 }; 2903 2904 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2905 2906 if (vfop->rc < 0) 2907 goto op_err; 2908 2909 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, 2910 vf->state == VF_FREE ? "Free" : 2911 vf->state == VF_ACQUIRED ? "Acquired" : 2912 vf->state == VF_ENABLED ? "Enabled" : 2913 vf->state == VF_RESET ? "Reset" : 2914 "Unknown"); 2915 2916 switch (vf->state) { 2917 case VF_ENABLED: 2918 vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); 2919 if (vfop->rc) 2920 goto op_err; 2921 return; 2922 2923 case VF_ACQUIRED: 2924 DP(BNX2X_MSG_IOV, "about to free resources\n"); 2925 bnx2x_vf_free_resc(bp, vf); 2926 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2927 goto op_done; 2928 2929 case VF_FREE: 2930 case VF_RESET: 2931 /* do nothing */ 2932 goto op_done; 2933 default: 2934 bnx2x_vfop_default(vf->state); 2935 } 2936 op_err: 2937 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc); 2938 op_done: 2939 bnx2x_vfop_end(bp, vf, vfop); 2940 } 2941 2942 int bnx2x_vfop_release_cmd(struct bnx2x *bp, 2943 struct bnx2x_virtf *vf, 2944 struct bnx2x_vfop_cmd *cmd) 2945 { 2946 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 2947 if (vfop) { 2948 bnx2x_vfop_opset(-1, /* use vf->state */ 2949 bnx2x_vfop_release, cmd->done); 2950 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release, 2951 cmd->block); 2952 } 2953 return -ENOMEM; 2954 } 2955 2956 /* VF release ~ VF close + VF release-resources 2957 * Release is the ultimate SW shutdown and is called whenever an 2958 * irrecoverable error is encountered. 2959 */ 2960 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) 2961 { 2962 struct bnx2x_vfop_cmd cmd = { 2963 .done = NULL, 2964 .block = block, 2965 }; 2966 int rc; 2967 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 2968 2969 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); 2970 if (rc) 2971 WARN(rc, 2972 "VF[%d] Failed to allocate resources for release op- rc=%d\n", 2973 vf->abs_vfid, rc); 2974 } 2975 2976 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, 2977 struct bnx2x_virtf *vf, u32 *sbdf) 2978 { 2979 *sbdf = vf->devfn | (vf->bus << 8); 2980 } 2981 2982 static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf, 2983 struct bnx2x_vf_bar_info *bar_info) 2984 { 2985 int n; 2986 2987 bar_info->nr_bars = bp->vfdb->sriov.nres; 2988 for (n = 0; n < bar_info->nr_bars; n++) 2989 bar_info->bars[n] = vf->bars[n]; 2990 } 2991 2992 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 2993 enum channel_tlvs tlv) 2994 { 2995 /* lock the channel */ 2996 mutex_lock(&vf->op_mutex); 2997 2998 /* record the locking op */ 2999 vf->op_current = tlv; 3000 3001 /* log the lock */ 3002 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", 3003 vf->abs_vfid, tlv); 3004 } 3005 3006 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3007 enum channel_tlvs expected_tlv) 3008 { 3009 WARN(expected_tlv != vf->op_current, 3010 "lock mismatch: expected %d found %d", expected_tlv, 3011 vf->op_current); 3012 3013 /* lock the channel */ 3014 mutex_unlock(&vf->op_mutex); 3015 3016 /* log the unlock */ 3017 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 3018 vf->abs_vfid, vf->op_current); 3019 3020 /* record the locking op */ 3021 vf->op_current = CHANNEL_TLV_NONE; 3022 } 3023 3024 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) 3025 { 3026 3027 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 3028 3029 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 3030 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3031 3032 /* HW channel is only operational when PF is up */ 3033 if (bp->state != BNX2X_STATE_OPEN) { 3034 BNX2X_ERR("VF num configurtion via sysfs not supported while PF is down"); 3035 return -EINVAL; 3036 } 3037 3038 /* we are always bound by the total_vfs in the configuration space */ 3039 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { 3040 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n", 3041 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3042 num_vfs_param = BNX2X_NR_VIRTFN(bp); 3043 } 3044 3045 bp->requested_nr_virtfn = num_vfs_param; 3046 if (num_vfs_param == 0) { 3047 pci_disable_sriov(dev); 3048 return 0; 3049 } else { 3050 return bnx2x_enable_sriov(bp); 3051 } 3052 } 3053 3054 int bnx2x_enable_sriov(struct bnx2x *bp) 3055 { 3056 int rc = 0, req_vfs = bp->requested_nr_virtfn; 3057 3058 rc = pci_enable_sriov(bp->pdev, req_vfs); 3059 if (rc) { 3060 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 3061 return rc; 3062 } 3063 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); 3064 return req_vfs; 3065 } 3066 3067 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) 3068 { 3069 int vfidx; 3070 struct pf_vf_bulletin_content *bulletin; 3071 3072 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); 3073 for_each_vf(bp, vfidx) { 3074 bulletin = BP_VF_BULLETIN(bp, vfidx); 3075 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) 3076 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); 3077 } 3078 } 3079 3080 void bnx2x_disable_sriov(struct bnx2x *bp) 3081 { 3082 pci_disable_sriov(bp->pdev); 3083 } 3084 3085 static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx, 3086 struct bnx2x_virtf *vf) 3087 { 3088 if (!IS_SRIOV(bp)) { 3089 BNX2X_ERR("vf ndo called though sriov is disabled\n"); 3090 return -EINVAL; 3091 } 3092 3093 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { 3094 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n", 3095 vfidx, BNX2X_NR_VIRTFN(bp)); 3096 return -EINVAL; 3097 } 3098 3099 if (!vf) { 3100 BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", 3101 vfidx); 3102 return -EINVAL; 3103 } 3104 3105 return 0; 3106 } 3107 3108 int bnx2x_get_vf_config(struct net_device *dev, int vfidx, 3109 struct ifla_vf_info *ivi) 3110 { 3111 struct bnx2x *bp = netdev_priv(dev); 3112 struct bnx2x_virtf *vf = BP_VF(bp, vfidx); 3113 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3114 struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); 3115 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); 3116 int rc; 3117 3118 /* sanity */ 3119 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); 3120 if (rc) 3121 return rc; 3122 if (!mac_obj || !vlan_obj || !bulletin) { 3123 BNX2X_ERR("VF partially initialized\n"); 3124 return -EINVAL; 3125 } 3126 3127 ivi->vf = vfidx; 3128 ivi->qos = 0; 3129 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */ 3130 ivi->spoofchk = 1; /*always enabled */ 3131 if (vf->state == VF_ENABLED) { 3132 /* mac and vlan are in vlan_mac objects */ 3133 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 3134 0, ETH_ALEN); 3135 vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan, 3136 0, VLAN_HLEN); 3137 } else { 3138 /* mac */ 3139 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 3140 /* mac configured by ndo so its in bulletin board */ 3141 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); 3142 else 3143 /* funtion has not been loaded yet. Show mac as 0s */ 3144 memset(&ivi->mac, 0, ETH_ALEN); 3145 3146 /* vlan */ 3147 if (bulletin->valid_bitmap & (1 << VLAN_VALID)) 3148 /* vlan configured by ndo so its in bulletin board */ 3149 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); 3150 else 3151 /* funtion has not been loaded yet. Show vlans as 0s */ 3152 memset(&ivi->vlan, 0, VLAN_HLEN); 3153 } 3154 3155 return 0; 3156 } 3157 3158 /* New mac for VF. Consider these cases: 3159 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and 3160 * supply at acquire. 3161 * 2. VF has already been acquired but has not yet initialized - store in local 3162 * bulletin board. mac will be posted on VF bulletin board after VF init. VF 3163 * will configure this mac when it is ready. 3164 * 3. VF has already initialized but has not yet setup a queue - post the new 3165 * mac on VF's bulletin board right now. VF will configure this mac when it 3166 * is ready. 3167 * 4. VF has already set a queue - delete any macs already configured for this 3168 * queue and manually config the new mac. 3169 * In any event, once this function has been called refuse any attempts by the 3170 * VF to configure any mac for itself except for this mac. In case of a race 3171 * where the VF fails to see the new post on its bulletin board before sending a 3172 * mac configuration request, the PF will simply fail the request and VF can try 3173 * again after consulting its bulletin board. 3174 */ 3175 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) 3176 { 3177 struct bnx2x *bp = netdev_priv(dev); 3178 int rc, q_logical_state; 3179 struct bnx2x_virtf *vf = BP_VF(bp, vfidx); 3180 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); 3181 3182 /* sanity */ 3183 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); 3184 if (rc) 3185 return rc; 3186 if (!is_valid_ether_addr(mac)) { 3187 BNX2X_ERR("mac address invalid\n"); 3188 return -EINVAL; 3189 } 3190 3191 /* update PF's copy of the VF's bulletin. will no longer accept mac 3192 * configuration requests from vf unless match this mac 3193 */ 3194 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; 3195 memcpy(bulletin->mac, mac, ETH_ALEN); 3196 3197 /* Post update on VF's bulletin board */ 3198 rc = bnx2x_post_vf_bulletin(bp, vfidx); 3199 if (rc) { 3200 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); 3201 return rc; 3202 } 3203 3204 /* is vf initialized and queue set up? */ 3205 q_logical_state = 3206 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); 3207 if (vf->state == VF_ENABLED && 3208 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3209 /* configure the mac in device on this vf's queue */ 3210 unsigned long ramrod_flags = 0; 3211 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3212 3213 /* must lock vfpf channel to protect against vf flows */ 3214 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3215 3216 /* remove existing eth macs */ 3217 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 3218 if (rc) { 3219 BNX2X_ERR("failed to delete eth macs\n"); 3220 return -EINVAL; 3221 } 3222 3223 /* remove existing uc list macs */ 3224 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); 3225 if (rc) { 3226 BNX2X_ERR("failed to delete uc_list macs\n"); 3227 return -EINVAL; 3228 } 3229 3230 /* configure the new mac to device */ 3231 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3232 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 3233 BNX2X_ETH_MAC, &ramrod_flags); 3234 3235 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3236 } 3237 3238 return 0; 3239 } 3240 3241 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 3242 { 3243 struct bnx2x *bp = netdev_priv(dev); 3244 int rc, q_logical_state; 3245 struct bnx2x_virtf *vf = BP_VF(bp, vfidx); 3246 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); 3247 3248 /* sanity */ 3249 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); 3250 if (rc) 3251 return rc; 3252 3253 if (vlan > 4095) { 3254 BNX2X_ERR("illegal vlan value %d\n", vlan); 3255 return -EINVAL; 3256 } 3257 3258 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", 3259 vfidx, vlan, 0); 3260 3261 /* update PF's copy of the VF's bulletin. No point in posting the vlan 3262 * to the VF since it doesn't have anything to do with it. But it useful 3263 * to store it here in case the VF is not up yet and we can only 3264 * configure the vlan later when it does. 3265 */ 3266 bulletin->valid_bitmap |= 1 << VLAN_VALID; 3267 bulletin->vlan = vlan; 3268 3269 /* is vf initialized and queue set up? */ 3270 q_logical_state = 3271 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); 3272 if (vf->state == VF_ENABLED && 3273 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3274 /* configure the vlan in device on this vf's queue */ 3275 unsigned long ramrod_flags = 0; 3276 unsigned long vlan_mac_flags = 0; 3277 struct bnx2x_vlan_mac_obj *vlan_obj = 3278 &bnx2x_vfq(vf, 0, vlan_obj); 3279 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 3280 struct bnx2x_queue_state_params q_params = {NULL}; 3281 struct bnx2x_queue_update_params *update_params; 3282 3283 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3284 3285 /* must lock vfpf channel to protect against vf flows */ 3286 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3287 3288 /* remove existing vlans */ 3289 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3290 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 3291 &ramrod_flags); 3292 if (rc) { 3293 BNX2X_ERR("failed to delete vlans\n"); 3294 return -EINVAL; 3295 } 3296 3297 /* send queue update ramrod to configure default vlan and silent 3298 * vlan removal 3299 */ 3300 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3301 q_params.cmd = BNX2X_Q_CMD_UPDATE; 3302 q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj); 3303 update_params = &q_params.params.update; 3304 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 3305 &update_params->update_flags); 3306 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 3307 &update_params->update_flags); 3308 3309 if (vlan == 0) { 3310 /* if vlan is 0 then we want to leave the VF traffic 3311 * untagged, and leave the incoming traffic untouched 3312 * (i.e. do not remove any vlan tags). 3313 */ 3314 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3315 &update_params->update_flags); 3316 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 3317 &update_params->update_flags); 3318 } else { 3319 /* configure the new vlan to device */ 3320 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3321 ramrod_param.vlan_mac_obj = vlan_obj; 3322 ramrod_param.ramrod_flags = ramrod_flags; 3323 ramrod_param.user_req.u.vlan.vlan = vlan; 3324 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 3325 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 3326 if (rc) { 3327 BNX2X_ERR("failed to configure vlan\n"); 3328 return -EINVAL; 3329 } 3330 3331 /* configure default vlan to vf queue and set silent 3332 * vlan removal (the vf remains unaware of this vlan). 3333 */ 3334 update_params = &q_params.params.update; 3335 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3336 &update_params->update_flags); 3337 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 3338 &update_params->update_flags); 3339 update_params->def_vlan = vlan; 3340 } 3341 3342 /* Update the Queue state */ 3343 rc = bnx2x_queue_state_change(bp, &q_params); 3344 if (rc) { 3345 BNX2X_ERR("Failed to configure default VLAN\n"); 3346 return rc; 3347 } 3348 3349 /* clear the flag indicating that this VF needs its vlan 3350 * (will only be set if the HV configured th Vlan before vf was 3351 * and we were called because the VF came up later 3352 */ 3353 vf->cfg_flags &= ~VF_CFG_VLAN; 3354 3355 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3356 } 3357 return 0; 3358 } 3359 3360 /* crc is the first field in the bulletin board. compute the crc over the 3361 * entire bulletin board excluding the crc field itself 3362 */ 3363 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, 3364 struct pf_vf_bulletin_content *bulletin) 3365 { 3366 return crc32(BULLETIN_CRC_SEED, 3367 ((u8 *)bulletin) + sizeof(bulletin->crc), 3368 bulletin->length - sizeof(bulletin->crc)); 3369 } 3370 3371 /* Check for new posts on the bulletin board */ 3372 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) 3373 { 3374 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; 3375 int attempts; 3376 3377 /* bulletin board hasn't changed since last sample */ 3378 if (bp->old_bulletin.version == bulletin.version) 3379 return PFVF_BULLETIN_UNCHANGED; 3380 3381 /* validate crc of new bulletin board */ 3382 if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) { 3383 /* sampling structure in mid post may result with corrupted data 3384 * validate crc to ensure coherency. 3385 */ 3386 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { 3387 bulletin = bp->pf2vf_bulletin->content; 3388 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, 3389 &bulletin)) 3390 break; 3391 BNX2X_ERR("bad crc on bulletin board. contained %x computed %x\n", 3392 bulletin.crc, 3393 bnx2x_crc_vf_bulletin(bp, &bulletin)); 3394 } 3395 if (attempts >= BULLETIN_ATTEMPTS) { 3396 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", 3397 attempts); 3398 return PFVF_BULLETIN_CRC_ERR; 3399 } 3400 } 3401 3402 /* the mac address in bulletin board is valid and is new */ 3403 if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID && 3404 memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) { 3405 /* update new mac to net device */ 3406 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); 3407 } 3408 3409 /* the vlan in bulletin board is valid and is new */ 3410 if (bulletin.valid_bitmap & 1 << VLAN_VALID) 3411 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN); 3412 3413 /* copy new bulletin board to bp */ 3414 bp->old_bulletin = bulletin; 3415 3416 return PFVF_BULLETIN_UPDATED; 3417 } 3418 3419 void bnx2x_vf_map_doorbells(struct bnx2x *bp) 3420 { 3421 /* vf doorbells are embedded within the regview */ 3422 bp->doorbells = bp->regview + PXP_VF_ADDR_DB_START; 3423 } 3424 3425 int bnx2x_vf_pci_alloc(struct bnx2x *bp) 3426 { 3427 /* allocate vf2pf mailbox for vf to pf channel */ 3428 BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping, 3429 sizeof(struct bnx2x_vf_mbx_msg)); 3430 3431 /* allocate pf 2 vf bulletin board */ 3432 BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping, 3433 sizeof(union pf_vf_bulletin)); 3434 3435 return 0; 3436 3437 alloc_mem_err: 3438 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3439 sizeof(struct bnx2x_vf_mbx_msg)); 3440 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3441 sizeof(union pf_vf_bulletin)); 3442 return -ENOMEM; 3443 } 3444 3445 int bnx2x_open_epilog(struct bnx2x *bp) 3446 { 3447 /* Enable sriov via delayed work. This must be done via delayed work 3448 * because it causes the probe of the vf devices to be run, which invoke 3449 * register_netdevice which must have rtnl lock taken. As we are holding 3450 * the lock right now, that could only work if the probe would not take 3451 * the lock. However, as the probe of the vf may be called from other 3452 * contexts as well (such as passthrough to vm failes) it can't assume 3453 * the lock is being held for it. Using delayed work here allows the 3454 * probe code to simply take the lock (i.e. wait for it to be released 3455 * if it is being held). We only want to do this if the number of VFs 3456 * was set before PF driver was loaded. 3457 */ 3458 if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) { 3459 smp_mb__before_clear_bit(); 3460 set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state); 3461 smp_mb__after_clear_bit(); 3462 schedule_delayed_work(&bp->sp_rtnl_task, 0); 3463 } 3464 3465 return 0; 3466 } 3467