1 /* bnx2x_sriov.c: Broadcom Everest network driver. 2 * 3 * Copyright 2009-2013 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 17 * Ariel Elior <ariele@broadcom.com> 18 * 19 */ 20 #include "bnx2x.h" 21 #include "bnx2x_init.h" 22 #include "bnx2x_cmn.h" 23 #include "bnx2x_sp.h" 24 #include <linux/crc32.h> 25 #include <linux/if_vlan.h> 26 27 /* General service functions */ 28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 29 u16 pf_id) 30 { 31 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 32 pf_id); 33 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 34 pf_id); 35 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 36 pf_id); 37 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 38 pf_id); 39 } 40 41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 42 u8 enable) 43 { 44 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 45 enable); 46 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 47 enable); 48 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 49 enable); 50 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 51 enable); 52 } 53 54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 55 { 56 int idx; 57 58 for_each_vf(bp, idx) 59 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) 60 break; 61 return idx; 62 } 63 64 static 65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 66 { 67 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); 68 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; 69 } 70 71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, 72 u8 igu_sb_id, u8 segment, u16 index, u8 op, 73 u8 update) 74 { 75 /* acking a VF sb through the PF - use the GRC */ 76 u32 ctl; 77 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 78 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 79 u32 func_encode = vf->abs_vfid; 80 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; 81 struct igu_regular cmd_data = {0}; 82 83 cmd_data.sb_id_and_flags = 84 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 85 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 86 (update << IGU_REGULAR_BUPDATE_SHIFT) | 87 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 88 89 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 90 func_encode << IGU_CTRL_REG_FID_SHIFT | 91 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 92 93 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 94 cmd_data.sb_id_and_flags, igu_addr_data); 95 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); 96 mmiowb(); 97 barrier(); 98 99 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 100 ctl, igu_addr_ctl); 101 REG_WR(bp, igu_addr_ctl, ctl); 102 mmiowb(); 103 barrier(); 104 } 105 /* VFOP - VF slow-path operation support */ 106 107 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000 108 109 /* VFOP operations states */ 110 enum bnx2x_vfop_qctor_state { 111 BNX2X_VFOP_QCTOR_INIT, 112 BNX2X_VFOP_QCTOR_SETUP, 113 BNX2X_VFOP_QCTOR_INT_EN 114 }; 115 116 enum bnx2x_vfop_qdtor_state { 117 BNX2X_VFOP_QDTOR_HALT, 118 BNX2X_VFOP_QDTOR_TERMINATE, 119 BNX2X_VFOP_QDTOR_CFCDEL, 120 BNX2X_VFOP_QDTOR_DONE 121 }; 122 123 enum bnx2x_vfop_vlan_mac_state { 124 BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 125 BNX2X_VFOP_VLAN_MAC_CLEAR, 126 BNX2X_VFOP_VLAN_MAC_CHK_DONE, 127 BNX2X_VFOP_MAC_CONFIG_LIST, 128 BNX2X_VFOP_VLAN_CONFIG_LIST, 129 BNX2X_VFOP_VLAN_CONFIG_LIST_0 130 }; 131 132 enum bnx2x_vfop_qsetup_state { 133 BNX2X_VFOP_QSETUP_CTOR, 134 BNX2X_VFOP_QSETUP_VLAN0, 135 BNX2X_VFOP_QSETUP_DONE 136 }; 137 138 enum bnx2x_vfop_mcast_state { 139 BNX2X_VFOP_MCAST_DEL, 140 BNX2X_VFOP_MCAST_ADD, 141 BNX2X_VFOP_MCAST_CHK_DONE 142 }; 143 enum bnx2x_vfop_qflr_state { 144 BNX2X_VFOP_QFLR_CLR_VLAN, 145 BNX2X_VFOP_QFLR_CLR_MAC, 146 BNX2X_VFOP_QFLR_TERMINATE, 147 BNX2X_VFOP_QFLR_DONE 148 }; 149 150 enum bnx2x_vfop_flr_state { 151 BNX2X_VFOP_FLR_QUEUES, 152 BNX2X_VFOP_FLR_HW 153 }; 154 155 enum bnx2x_vfop_close_state { 156 BNX2X_VFOP_CLOSE_QUEUES, 157 BNX2X_VFOP_CLOSE_HW 158 }; 159 160 enum bnx2x_vfop_rxmode_state { 161 BNX2X_VFOP_RXMODE_CONFIG, 162 BNX2X_VFOP_RXMODE_DONE 163 }; 164 165 enum bnx2x_vfop_qteardown_state { 166 BNX2X_VFOP_QTEARDOWN_RXMODE, 167 BNX2X_VFOP_QTEARDOWN_CLR_VLAN, 168 BNX2X_VFOP_QTEARDOWN_CLR_MAC, 169 BNX2X_VFOP_QTEARDOWN_QDTOR, 170 BNX2X_VFOP_QTEARDOWN_DONE 171 }; 172 173 #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) 174 175 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 176 struct bnx2x_queue_init_params *init_params, 177 struct bnx2x_queue_setup_params *setup_params, 178 u16 q_idx, u16 sb_idx) 179 { 180 DP(BNX2X_MSG_IOV, 181 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", 182 vf->abs_vfid, 183 q_idx, 184 sb_idx, 185 init_params->tx.sb_cq_index, 186 init_params->tx.hc_rate, 187 setup_params->flags, 188 setup_params->txq_params.traffic_type); 189 } 190 191 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 192 struct bnx2x_queue_init_params *init_params, 193 struct bnx2x_queue_setup_params *setup_params, 194 u16 q_idx, u16 sb_idx) 195 { 196 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; 197 198 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" 199 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", 200 vf->abs_vfid, 201 q_idx, 202 sb_idx, 203 init_params->rx.sb_cq_index, 204 init_params->rx.hc_rate, 205 setup_params->gen_params.mtu, 206 rxq_params->buf_sz, 207 rxq_params->sge_buf_sz, 208 rxq_params->max_sges_pkt, 209 rxq_params->tpa_agg_sz, 210 setup_params->flags, 211 rxq_params->drop_flags, 212 rxq_params->cache_line_log); 213 } 214 215 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 216 struct bnx2x_virtf *vf, 217 struct bnx2x_vf_queue *q, 218 struct bnx2x_vfop_qctor_params *p, 219 unsigned long q_type) 220 { 221 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; 222 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; 223 224 /* INIT */ 225 226 /* Enable host coalescing in the transition to INIT state */ 227 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) 228 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); 229 230 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) 231 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); 232 233 /* FW SB ID */ 234 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 235 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 236 237 /* context */ 238 init_p->cxts[0] = q->cxt; 239 240 /* SETUP */ 241 242 /* Setup-op general parameters */ 243 setup_p->gen_params.spcl_id = vf->sp_cl_id; 244 setup_p->gen_params.stat_id = vfq_stat_id(vf, q); 245 246 /* Setup-op pause params: 247 * Nothing to do, the pause thresholds are set by default to 0 which 248 * effectively turns off the feature for this queue. We don't want 249 * one queue (VF) to interfering with another queue (another VF) 250 */ 251 if (vf->cfg_flags & VF_CFG_FW_FC) 252 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", 253 vf->abs_vfid); 254 /* Setup-op flags: 255 * collect statistics, zero statistics, local-switching, security, 256 * OV for Flex10, RSS and MCAST for leading 257 */ 258 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) 259 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); 260 261 /* for VFs, enable tx switching, bd coherency, and mac address 262 * anti-spoofing 263 */ 264 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); 265 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 266 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 267 268 if (vfq_is_leading(q)) { 269 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags); 270 __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags); 271 } 272 273 /* Setup-op rx parameters */ 274 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 275 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 276 277 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); 278 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); 279 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); 280 281 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) 282 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; 283 } 284 285 /* Setup-op tx parameters */ 286 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { 287 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; 288 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 289 } 290 } 291 292 /* VFOP queue construction */ 293 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf) 294 { 295 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 296 struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor; 297 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 298 enum bnx2x_vfop_qctor_state state = vfop->state; 299 300 bnx2x_vfop_reset_wq(vf); 301 302 if (vfop->rc < 0) 303 goto op_err; 304 305 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 306 307 switch (state) { 308 case BNX2X_VFOP_QCTOR_INIT: 309 310 /* has this queue already been opened? */ 311 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 312 BNX2X_Q_LOGICAL_STATE_ACTIVE) { 313 DP(BNX2X_MSG_IOV, 314 "Entered qctor but queue was already up. Aborting gracefully\n"); 315 goto op_done; 316 } 317 318 /* next state */ 319 vfop->state = BNX2X_VFOP_QCTOR_SETUP; 320 321 q_params->cmd = BNX2X_Q_CMD_INIT; 322 vfop->rc = bnx2x_queue_state_change(bp, q_params); 323 324 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 325 326 case BNX2X_VFOP_QCTOR_SETUP: 327 /* next state */ 328 vfop->state = BNX2X_VFOP_QCTOR_INT_EN; 329 330 /* copy pre-prepared setup params to the queue-state params */ 331 vfop->op_p->qctor.qstate.params.setup = 332 vfop->op_p->qctor.prep_qsetup; 333 334 q_params->cmd = BNX2X_Q_CMD_SETUP; 335 vfop->rc = bnx2x_queue_state_change(bp, q_params); 336 337 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 338 339 case BNX2X_VFOP_QCTOR_INT_EN: 340 341 /* enable interrupts */ 342 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx), 343 USTORM_ID, 0, IGU_INT_ENABLE, 0); 344 goto op_done; 345 default: 346 bnx2x_vfop_default(state); 347 } 348 op_err: 349 BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n", 350 vf->abs_vfid, args->qid, q_params->cmd, vfop->rc); 351 op_done: 352 bnx2x_vfop_end(bp, vf, vfop); 353 op_pending: 354 return; 355 } 356 357 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp, 358 struct bnx2x_virtf *vf, 359 struct bnx2x_vfop_cmd *cmd, 360 int qid) 361 { 362 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 363 364 if (vfop) { 365 vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 366 367 vfop->args.qctor.qid = qid; 368 vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx); 369 370 bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT, 371 bnx2x_vfop_qctor, cmd->done); 372 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor, 373 cmd->block); 374 } 375 return -ENOMEM; 376 } 377 378 /* VFOP queue destruction */ 379 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf) 380 { 381 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 382 struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor; 383 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 384 enum bnx2x_vfop_qdtor_state state = vfop->state; 385 386 bnx2x_vfop_reset_wq(vf); 387 388 if (vfop->rc < 0) 389 goto op_err; 390 391 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 392 393 switch (state) { 394 case BNX2X_VFOP_QDTOR_HALT: 395 396 /* has this queue already been stopped? */ 397 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 398 BNX2X_Q_LOGICAL_STATE_STOPPED) { 399 DP(BNX2X_MSG_IOV, 400 "Entered qdtor but queue was already stopped. Aborting gracefully\n"); 401 goto op_done; 402 } 403 404 /* next state */ 405 vfop->state = BNX2X_VFOP_QDTOR_TERMINATE; 406 407 q_params->cmd = BNX2X_Q_CMD_HALT; 408 vfop->rc = bnx2x_queue_state_change(bp, q_params); 409 410 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 411 412 case BNX2X_VFOP_QDTOR_TERMINATE: 413 /* next state */ 414 vfop->state = BNX2X_VFOP_QDTOR_CFCDEL; 415 416 q_params->cmd = BNX2X_Q_CMD_TERMINATE; 417 vfop->rc = bnx2x_queue_state_change(bp, q_params); 418 419 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 420 421 case BNX2X_VFOP_QDTOR_CFCDEL: 422 /* next state */ 423 vfop->state = BNX2X_VFOP_QDTOR_DONE; 424 425 q_params->cmd = BNX2X_Q_CMD_CFC_DEL; 426 vfop->rc = bnx2x_queue_state_change(bp, q_params); 427 428 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 429 op_err: 430 BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n", 431 vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc); 432 op_done: 433 case BNX2X_VFOP_QDTOR_DONE: 434 /* invalidate the context */ 435 qdtor->cxt->ustorm_ag_context.cdu_usage = 0; 436 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0; 437 bnx2x_vfop_end(bp, vf, vfop); 438 return; 439 default: 440 bnx2x_vfop_default(state); 441 } 442 op_pending: 443 return; 444 } 445 446 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp, 447 struct bnx2x_virtf *vf, 448 struct bnx2x_vfop_cmd *cmd, 449 int qid) 450 { 451 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 452 453 if (vfop) { 454 struct bnx2x_queue_state_params *qstate = 455 &vf->op_params.qctor.qstate; 456 457 memset(qstate, 0, sizeof(*qstate)); 458 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 459 460 vfop->args.qdtor.qid = qid; 461 vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt); 462 463 bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT, 464 bnx2x_vfop_qdtor, cmd->done); 465 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor, 466 cmd->block); 467 } 468 DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid); 469 return -ENOMEM; 470 } 471 472 static void 473 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) 474 { 475 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 476 if (vf) { 477 if (!vf_sb_count(vf)) 478 vf->igu_base_id = igu_sb_id; 479 ++vf_sb_count(vf); 480 } 481 } 482 483 /* VFOP MAC/VLAN helpers */ 484 static inline void bnx2x_vfop_credit(struct bnx2x *bp, 485 struct bnx2x_vfop *vfop, 486 struct bnx2x_vlan_mac_obj *obj) 487 { 488 struct bnx2x_vfop_args_filters *args = &vfop->args.filters; 489 490 /* update credit only if there is no error 491 * and a valid credit counter 492 */ 493 if (!vfop->rc && args->credit) { 494 struct list_head *pos; 495 int read_lock; 496 int cnt = 0; 497 498 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); 499 if (read_lock) 500 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); 501 502 list_for_each(pos, &obj->head) 503 cnt++; 504 505 if (!read_lock) 506 bnx2x_vlan_mac_h_read_unlock(bp, obj); 507 508 atomic_set(args->credit, cnt); 509 } 510 } 511 512 static int bnx2x_vfop_set_user_req(struct bnx2x *bp, 513 struct bnx2x_vfop_filter *pos, 514 struct bnx2x_vlan_mac_data *user_req) 515 { 516 user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD : 517 BNX2X_VLAN_MAC_DEL; 518 519 switch (pos->type) { 520 case BNX2X_VFOP_FILTER_MAC: 521 memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN); 522 break; 523 case BNX2X_VFOP_FILTER_VLAN: 524 user_req->u.vlan.vlan = pos->vid; 525 break; 526 default: 527 BNX2X_ERR("Invalid filter type, skipping\n"); 528 return 1; 529 } 530 return 0; 531 } 532 533 static int 534 bnx2x_vfop_config_vlan0(struct bnx2x *bp, 535 struct bnx2x_vlan_mac_ramrod_params *vlan_mac, 536 bool add) 537 { 538 int rc; 539 540 vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD : 541 BNX2X_VLAN_MAC_DEL; 542 vlan_mac->user_req.u.vlan.vlan = 0; 543 544 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 545 if (rc == -EEXIST) 546 rc = 0; 547 return rc; 548 } 549 550 static int bnx2x_vfop_config_list(struct bnx2x *bp, 551 struct bnx2x_vfop_filters *filters, 552 struct bnx2x_vlan_mac_ramrod_params *vlan_mac) 553 { 554 struct bnx2x_vfop_filter *pos, *tmp; 555 struct list_head rollback_list, *filters_list = &filters->head; 556 struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req; 557 int rc = 0, cnt = 0; 558 559 INIT_LIST_HEAD(&rollback_list); 560 561 list_for_each_entry_safe(pos, tmp, filters_list, link) { 562 if (bnx2x_vfop_set_user_req(bp, pos, user_req)) 563 continue; 564 565 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 566 if (rc >= 0) { 567 cnt += pos->add ? 1 : -1; 568 list_move(&pos->link, &rollback_list); 569 rc = 0; 570 } else if (rc == -EEXIST) { 571 rc = 0; 572 } else { 573 BNX2X_ERR("Failed to add a new vlan_mac command\n"); 574 break; 575 } 576 } 577 578 /* rollback if error or too many rules added */ 579 if (rc || cnt > filters->add_cnt) { 580 BNX2X_ERR("error or too many rules added. Performing rollback\n"); 581 list_for_each_entry_safe(pos, tmp, &rollback_list, link) { 582 pos->add = !pos->add; /* reverse op */ 583 bnx2x_vfop_set_user_req(bp, pos, user_req); 584 bnx2x_config_vlan_mac(bp, vlan_mac); 585 list_del(&pos->link); 586 } 587 cnt = 0; 588 if (!rc) 589 rc = -EINVAL; 590 } 591 filters->add_cnt = cnt; 592 return rc; 593 } 594 595 /* VFOP set VLAN/MAC */ 596 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) 597 { 598 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 599 struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac; 600 struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj; 601 struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter; 602 603 enum bnx2x_vfop_vlan_mac_state state = vfop->state; 604 605 if (vfop->rc < 0) 606 goto op_err; 607 608 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 609 610 bnx2x_vfop_reset_wq(vf); 611 612 switch (state) { 613 case BNX2X_VFOP_VLAN_MAC_CLEAR: 614 /* next state */ 615 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 616 617 /* do delete */ 618 vfop->rc = obj->delete_all(bp, obj, 619 &vlan_mac->user_req.vlan_mac_flags, 620 &vlan_mac->ramrod_flags); 621 622 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 623 624 case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE: 625 /* next state */ 626 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 627 628 /* do config */ 629 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 630 if (vfop->rc == -EEXIST) 631 vfop->rc = 0; 632 633 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 634 635 case BNX2X_VFOP_VLAN_MAC_CHK_DONE: 636 vfop->rc = !!obj->raw.check_pending(&obj->raw); 637 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 638 639 case BNX2X_VFOP_MAC_CONFIG_LIST: 640 /* next state */ 641 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 642 643 /* do list config */ 644 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 645 if (vfop->rc) 646 goto op_err; 647 648 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 649 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 650 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 651 652 case BNX2X_VFOP_VLAN_CONFIG_LIST: 653 /* next state */ 654 vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0; 655 656 /* remove vlan0 - could be no-op */ 657 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false); 658 if (vfop->rc) 659 goto op_err; 660 661 /* Do vlan list config. if this operation fails we try to 662 * restore vlan0 to keep the queue is working order 663 */ 664 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 665 if (!vfop->rc) { 666 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 667 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 668 } 669 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */ 670 671 case BNX2X_VFOP_VLAN_CONFIG_LIST_0: 672 /* next state */ 673 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 674 675 if (list_empty(&obj->head)) 676 /* add vlan0 */ 677 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true); 678 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 679 680 default: 681 bnx2x_vfop_default(state); 682 } 683 op_err: 684 BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc); 685 op_done: 686 kfree(filters); 687 bnx2x_vfop_credit(bp, vfop, obj); 688 bnx2x_vfop_end(bp, vf, vfop); 689 op_pending: 690 return; 691 } 692 693 struct bnx2x_vfop_vlan_mac_flags { 694 bool drv_only; 695 bool dont_consume; 696 bool single_cmd; 697 bool add; 698 }; 699 700 static void 701 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 702 struct bnx2x_vfop_vlan_mac_flags *flags) 703 { 704 struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req; 705 706 memset(ramrod, 0, sizeof(*ramrod)); 707 708 /* ramrod flags */ 709 if (flags->drv_only) 710 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags); 711 if (flags->single_cmd) 712 set_bit(RAMROD_EXEC, &ramrod->ramrod_flags); 713 714 /* mac_vlan flags */ 715 if (flags->dont_consume) 716 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags); 717 718 /* cmd */ 719 ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL; 720 } 721 722 static inline void 723 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 724 struct bnx2x_vfop_vlan_mac_flags *flags) 725 { 726 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags); 727 set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags); 728 } 729 730 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp, 731 struct bnx2x_virtf *vf, 732 struct bnx2x_vfop_cmd *cmd, 733 int qid, bool drv_only) 734 { 735 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 736 737 if (vfop) { 738 struct bnx2x_vfop_args_filters filters = { 739 .multi_filter = NULL, /* single */ 740 .credit = NULL, /* consume credit */ 741 }; 742 struct bnx2x_vfop_vlan_mac_flags flags = { 743 .drv_only = drv_only, 744 .dont_consume = (filters.credit != NULL), 745 .single_cmd = true, 746 .add = false /* don't care */, 747 }; 748 struct bnx2x_vlan_mac_ramrod_params *ramrod = 749 &vf->op_params.vlan_mac; 750 751 /* set ramrod params */ 752 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 753 754 /* set object */ 755 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 756 757 /* set extra args */ 758 vfop->args.filters = filters; 759 760 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 761 bnx2x_vfop_vlan_mac, cmd->done); 762 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 763 cmd->block); 764 } 765 return -ENOMEM; 766 } 767 768 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, 769 struct bnx2x_virtf *vf, 770 struct bnx2x_vfop_cmd *cmd, 771 struct bnx2x_vfop_filters *macs, 772 int qid, bool drv_only) 773 { 774 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 775 776 if (vfop) { 777 struct bnx2x_vfop_args_filters filters = { 778 .multi_filter = macs, 779 .credit = NULL, /* consume credit */ 780 }; 781 struct bnx2x_vfop_vlan_mac_flags flags = { 782 .drv_only = drv_only, 783 .dont_consume = (filters.credit != NULL), 784 .single_cmd = false, 785 .add = false, /* don't care since only the items in the 786 * filters list affect the sp operation, 787 * not the list itself 788 */ 789 }; 790 struct bnx2x_vlan_mac_ramrod_params *ramrod = 791 &vf->op_params.vlan_mac; 792 793 /* set ramrod params */ 794 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 795 796 /* set object */ 797 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 798 799 /* set extra args */ 800 filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX; 801 vfop->args.filters = filters; 802 803 bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST, 804 bnx2x_vfop_vlan_mac, cmd->done); 805 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 806 cmd->block); 807 } 808 return -ENOMEM; 809 } 810 811 int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, 812 struct bnx2x_virtf *vf, 813 struct bnx2x_vfop_cmd *cmd, 814 int qid, u16 vid, bool add) 815 { 816 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 817 818 if (vfop) { 819 struct bnx2x_vfop_args_filters filters = { 820 .multi_filter = NULL, /* single command */ 821 .credit = &bnx2x_vfq(vf, qid, vlan_count), 822 }; 823 struct bnx2x_vfop_vlan_mac_flags flags = { 824 .drv_only = false, 825 .dont_consume = (filters.credit != NULL), 826 .single_cmd = true, 827 .add = add, 828 }; 829 struct bnx2x_vlan_mac_ramrod_params *ramrod = 830 &vf->op_params.vlan_mac; 831 832 /* set ramrod params */ 833 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 834 ramrod->user_req.u.vlan.vlan = vid; 835 836 /* set object */ 837 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 838 839 /* set extra args */ 840 vfop->args.filters = filters; 841 842 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 843 bnx2x_vfop_vlan_mac, cmd->done); 844 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 845 cmd->block); 846 } 847 return -ENOMEM; 848 } 849 850 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp, 851 struct bnx2x_virtf *vf, 852 struct bnx2x_vfop_cmd *cmd, 853 int qid, bool drv_only) 854 { 855 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 856 857 if (vfop) { 858 struct bnx2x_vfop_args_filters filters = { 859 .multi_filter = NULL, /* single command */ 860 .credit = &bnx2x_vfq(vf, qid, vlan_count), 861 }; 862 struct bnx2x_vfop_vlan_mac_flags flags = { 863 .drv_only = drv_only, 864 .dont_consume = (filters.credit != NULL), 865 .single_cmd = true, 866 .add = false, /* don't care */ 867 }; 868 struct bnx2x_vlan_mac_ramrod_params *ramrod = 869 &vf->op_params.vlan_mac; 870 871 /* set ramrod params */ 872 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 873 874 /* set object */ 875 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 876 877 /* set extra args */ 878 vfop->args.filters = filters; 879 880 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR, 881 bnx2x_vfop_vlan_mac, cmd->done); 882 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 883 cmd->block); 884 } 885 return -ENOMEM; 886 } 887 888 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, 889 struct bnx2x_virtf *vf, 890 struct bnx2x_vfop_cmd *cmd, 891 struct bnx2x_vfop_filters *vlans, 892 int qid, bool drv_only) 893 { 894 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 895 896 if (vfop) { 897 struct bnx2x_vfop_args_filters filters = { 898 .multi_filter = vlans, 899 .credit = &bnx2x_vfq(vf, qid, vlan_count), 900 }; 901 struct bnx2x_vfop_vlan_mac_flags flags = { 902 .drv_only = drv_only, 903 .dont_consume = (filters.credit != NULL), 904 .single_cmd = false, 905 .add = false, /* don't care */ 906 }; 907 struct bnx2x_vlan_mac_ramrod_params *ramrod = 908 &vf->op_params.vlan_mac; 909 910 /* set ramrod params */ 911 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 912 913 /* set object */ 914 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 915 916 /* set extra args */ 917 filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) - 918 atomic_read(filters.credit); 919 920 vfop->args.filters = filters; 921 922 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST, 923 bnx2x_vfop_vlan_mac, cmd->done); 924 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 925 cmd->block); 926 } 927 return -ENOMEM; 928 } 929 930 /* VFOP queue setup (queue constructor + set vlan 0) */ 931 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf) 932 { 933 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 934 int qid = vfop->args.qctor.qid; 935 enum bnx2x_vfop_qsetup_state state = vfop->state; 936 struct bnx2x_vfop_cmd cmd = { 937 .done = bnx2x_vfop_qsetup, 938 .block = false, 939 }; 940 941 if (vfop->rc < 0) 942 goto op_err; 943 944 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 945 946 switch (state) { 947 case BNX2X_VFOP_QSETUP_CTOR: 948 /* init the queue ctor command */ 949 vfop->state = BNX2X_VFOP_QSETUP_VLAN0; 950 vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid); 951 if (vfop->rc) 952 goto op_err; 953 return; 954 955 case BNX2X_VFOP_QSETUP_VLAN0: 956 /* skip if non-leading or FPGA/EMU*/ 957 if (qid) 958 goto op_done; 959 960 /* init the queue set-vlan command (for vlan 0) */ 961 vfop->state = BNX2X_VFOP_QSETUP_DONE; 962 vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true); 963 if (vfop->rc) 964 goto op_err; 965 return; 966 op_err: 967 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); 968 op_done: 969 case BNX2X_VFOP_QSETUP_DONE: 970 vf->cfg_flags |= VF_CFG_VLAN; 971 smp_mb__before_clear_bit(); 972 set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, 973 &bp->sp_rtnl_state); 974 smp_mb__after_clear_bit(); 975 schedule_delayed_work(&bp->sp_rtnl_task, 0); 976 bnx2x_vfop_end(bp, vf, vfop); 977 return; 978 default: 979 bnx2x_vfop_default(state); 980 } 981 } 982 983 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, 984 struct bnx2x_virtf *vf, 985 struct bnx2x_vfop_cmd *cmd, 986 int qid) 987 { 988 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 989 990 if (vfop) { 991 vfop->args.qctor.qid = qid; 992 993 bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR, 994 bnx2x_vfop_qsetup, cmd->done); 995 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup, 996 cmd->block); 997 } 998 return -ENOMEM; 999 } 1000 1001 /* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */ 1002 static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf) 1003 { 1004 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1005 int qid = vfop->args.qx.qid; 1006 enum bnx2x_vfop_qflr_state state = vfop->state; 1007 struct bnx2x_queue_state_params *qstate; 1008 struct bnx2x_vfop_cmd cmd; 1009 1010 bnx2x_vfop_reset_wq(vf); 1011 1012 if (vfop->rc < 0) 1013 goto op_err; 1014 1015 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state); 1016 1017 cmd.done = bnx2x_vfop_qflr; 1018 cmd.block = false; 1019 1020 switch (state) { 1021 case BNX2X_VFOP_QFLR_CLR_VLAN: 1022 /* vlan-clear-all: driver-only, don't consume credit */ 1023 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; 1024 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true); 1025 if (vfop->rc) 1026 goto op_err; 1027 return; 1028 1029 case BNX2X_VFOP_QFLR_CLR_MAC: 1030 /* mac-clear-all: driver only consume credit */ 1031 vfop->state = BNX2X_VFOP_QFLR_TERMINATE; 1032 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true); 1033 DP(BNX2X_MSG_IOV, 1034 "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d", 1035 vf->abs_vfid, vfop->rc); 1036 if (vfop->rc) 1037 goto op_err; 1038 return; 1039 1040 case BNX2X_VFOP_QFLR_TERMINATE: 1041 qstate = &vfop->op_p->qctor.qstate; 1042 memset(qstate , 0, sizeof(*qstate)); 1043 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 1044 vfop->state = BNX2X_VFOP_QFLR_DONE; 1045 1046 DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n", 1047 vf->abs_vfid, qstate->q_obj->state); 1048 1049 if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) { 1050 qstate->q_obj->state = BNX2X_Q_STATE_STOPPED; 1051 qstate->cmd = BNX2X_Q_CMD_TERMINATE; 1052 vfop->rc = bnx2x_queue_state_change(bp, qstate); 1053 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND); 1054 } else { 1055 goto op_done; 1056 } 1057 1058 op_err: 1059 BNX2X_ERR("QFLR[%d:%d] error: rc %d\n", 1060 vf->abs_vfid, qid, vfop->rc); 1061 op_done: 1062 case BNX2X_VFOP_QFLR_DONE: 1063 bnx2x_vfop_end(bp, vf, vfop); 1064 return; 1065 default: 1066 bnx2x_vfop_default(state); 1067 } 1068 op_pending: 1069 return; 1070 } 1071 1072 static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp, 1073 struct bnx2x_virtf *vf, 1074 struct bnx2x_vfop_cmd *cmd, 1075 int qid) 1076 { 1077 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1078 1079 if (vfop) { 1080 vfop->args.qx.qid = qid; 1081 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN, 1082 bnx2x_vfop_qflr, cmd->done); 1083 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr, 1084 cmd->block); 1085 } 1086 return -ENOMEM; 1087 } 1088 1089 /* VFOP multi-casts */ 1090 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf) 1091 { 1092 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1093 struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast; 1094 struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw; 1095 struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list; 1096 enum bnx2x_vfop_mcast_state state = vfop->state; 1097 int i; 1098 1099 bnx2x_vfop_reset_wq(vf); 1100 1101 if (vfop->rc < 0) 1102 goto op_err; 1103 1104 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1105 1106 switch (state) { 1107 case BNX2X_VFOP_MCAST_DEL: 1108 /* clear existing mcasts */ 1109 vfop->state = BNX2X_VFOP_MCAST_ADD; 1110 vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL); 1111 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 1112 1113 case BNX2X_VFOP_MCAST_ADD: 1114 if (raw->check_pending(raw)) 1115 goto op_pending; 1116 1117 if (args->mc_num) { 1118 /* update mcast list on the ramrod params */ 1119 INIT_LIST_HEAD(&mcast->mcast_list); 1120 for (i = 0; i < args->mc_num; i++) 1121 list_add_tail(&(args->mc[i].link), 1122 &mcast->mcast_list); 1123 /* add new mcasts */ 1124 vfop->state = BNX2X_VFOP_MCAST_CHK_DONE; 1125 vfop->rc = bnx2x_config_mcast(bp, mcast, 1126 BNX2X_MCAST_CMD_ADD); 1127 } 1128 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1129 1130 case BNX2X_VFOP_MCAST_CHK_DONE: 1131 vfop->rc = raw->check_pending(raw) ? 1 : 0; 1132 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1133 default: 1134 bnx2x_vfop_default(state); 1135 } 1136 op_err: 1137 BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc); 1138 op_done: 1139 kfree(args->mc); 1140 bnx2x_vfop_end(bp, vf, vfop); 1141 op_pending: 1142 return; 1143 } 1144 1145 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, 1146 struct bnx2x_virtf *vf, 1147 struct bnx2x_vfop_cmd *cmd, 1148 bnx2x_mac_addr_t *mcasts, 1149 int mcast_num, bool drv_only) 1150 { 1151 struct bnx2x_vfop *vfop = NULL; 1152 size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem); 1153 struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) : 1154 NULL; 1155 1156 if (!mc_sz || mc) { 1157 vfop = bnx2x_vfop_add(bp, vf); 1158 if (vfop) { 1159 int i; 1160 struct bnx2x_mcast_ramrod_params *ramrod = 1161 &vf->op_params.mcast; 1162 1163 /* set ramrod params */ 1164 memset(ramrod, 0, sizeof(*ramrod)); 1165 ramrod->mcast_obj = &vf->mcast_obj; 1166 if (drv_only) 1167 set_bit(RAMROD_DRV_CLR_ONLY, 1168 &ramrod->ramrod_flags); 1169 1170 /* copy mcasts pointers */ 1171 vfop->args.mc_list.mc_num = mcast_num; 1172 vfop->args.mc_list.mc = mc; 1173 for (i = 0; i < mcast_num; i++) 1174 mc[i].mac = mcasts[i]; 1175 1176 bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL, 1177 bnx2x_vfop_mcast, cmd->done); 1178 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast, 1179 cmd->block); 1180 } else { 1181 kfree(mc); 1182 } 1183 } 1184 return -ENOMEM; 1185 } 1186 1187 /* VFOP rx-mode */ 1188 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf) 1189 { 1190 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1191 struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode; 1192 enum bnx2x_vfop_rxmode_state state = vfop->state; 1193 1194 bnx2x_vfop_reset_wq(vf); 1195 1196 if (vfop->rc < 0) 1197 goto op_err; 1198 1199 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1200 1201 switch (state) { 1202 case BNX2X_VFOP_RXMODE_CONFIG: 1203 /* next state */ 1204 vfop->state = BNX2X_VFOP_RXMODE_DONE; 1205 1206 vfop->rc = bnx2x_config_rx_mode(bp, ramrod); 1207 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 1208 op_err: 1209 BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc); 1210 op_done: 1211 case BNX2X_VFOP_RXMODE_DONE: 1212 bnx2x_vfop_end(bp, vf, vfop); 1213 return; 1214 default: 1215 bnx2x_vfop_default(state); 1216 } 1217 op_pending: 1218 return; 1219 } 1220 1221 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, 1222 struct bnx2x_virtf *vf, 1223 struct bnx2x_vfop_cmd *cmd, 1224 int qid, unsigned long accept_flags) 1225 { 1226 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); 1227 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1228 1229 if (vfop) { 1230 struct bnx2x_rx_mode_ramrod_params *ramrod = 1231 &vf->op_params.rx_mode; 1232 1233 memset(ramrod, 0, sizeof(*ramrod)); 1234 1235 /* Prepare ramrod parameters */ 1236 ramrod->cid = vfq->cid; 1237 ramrod->cl_id = vfq_cl_id(vf, vfq); 1238 ramrod->rx_mode_obj = &bp->rx_mode_obj; 1239 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); 1240 1241 ramrod->rx_accept_flags = accept_flags; 1242 ramrod->tx_accept_flags = accept_flags; 1243 ramrod->pstate = &vf->filter_state; 1244 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; 1245 1246 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 1247 set_bit(RAMROD_RX, &ramrod->ramrod_flags); 1248 set_bit(RAMROD_TX, &ramrod->ramrod_flags); 1249 1250 ramrod->rdata = 1251 bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); 1252 ramrod->rdata_mapping = 1253 bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); 1254 1255 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG, 1256 bnx2x_vfop_rxmode, cmd->done); 1257 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode, 1258 cmd->block); 1259 } 1260 return -ENOMEM; 1261 } 1262 1263 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs, 1264 * queue destructor) 1265 */ 1266 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf) 1267 { 1268 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1269 int qid = vfop->args.qx.qid; 1270 enum bnx2x_vfop_qteardown_state state = vfop->state; 1271 struct bnx2x_vfop_cmd cmd; 1272 1273 if (vfop->rc < 0) 1274 goto op_err; 1275 1276 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1277 1278 cmd.done = bnx2x_vfop_qdown; 1279 cmd.block = false; 1280 1281 switch (state) { 1282 case BNX2X_VFOP_QTEARDOWN_RXMODE: 1283 /* Drop all */ 1284 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN; 1285 vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0); 1286 if (vfop->rc) 1287 goto op_err; 1288 return; 1289 1290 case BNX2X_VFOP_QTEARDOWN_CLR_VLAN: 1291 /* vlan-clear-all: don't consume credit */ 1292 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC; 1293 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false); 1294 if (vfop->rc) 1295 goto op_err; 1296 return; 1297 1298 case BNX2X_VFOP_QTEARDOWN_CLR_MAC: 1299 /* mac-clear-all: consume credit */ 1300 vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR; 1301 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false); 1302 if (vfop->rc) 1303 goto op_err; 1304 return; 1305 1306 case BNX2X_VFOP_QTEARDOWN_QDTOR: 1307 /* run the queue destruction flow */ 1308 DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n"); 1309 vfop->state = BNX2X_VFOP_QTEARDOWN_DONE; 1310 DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n"); 1311 vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid); 1312 DP(BNX2X_MSG_IOV, "returned from cmd\n"); 1313 if (vfop->rc) 1314 goto op_err; 1315 return; 1316 op_err: 1317 BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n", 1318 vf->abs_vfid, qid, vfop->rc); 1319 1320 case BNX2X_VFOP_QTEARDOWN_DONE: 1321 bnx2x_vfop_end(bp, vf, vfop); 1322 return; 1323 default: 1324 bnx2x_vfop_default(state); 1325 } 1326 } 1327 1328 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, 1329 struct bnx2x_virtf *vf, 1330 struct bnx2x_vfop_cmd *cmd, 1331 int qid) 1332 { 1333 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1334 1335 if (vfop) { 1336 vfop->args.qx.qid = qid; 1337 bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE, 1338 bnx2x_vfop_qdown, cmd->done); 1339 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown, 1340 cmd->block); 1341 } 1342 1343 return -ENOMEM; 1344 } 1345 1346 /* VF enable primitives 1347 * when pretend is required the caller is responsible 1348 * for calling pretend prior to calling these routines 1349 */ 1350 1351 /* internal vf enable - until vf is enabled internally all transactions 1352 * are blocked. This routine should always be called last with pretend. 1353 */ 1354 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) 1355 { 1356 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); 1357 } 1358 1359 /* clears vf error in all semi blocks */ 1360 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) 1361 { 1362 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); 1363 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); 1364 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); 1365 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); 1366 } 1367 1368 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) 1369 { 1370 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; 1371 u32 was_err_reg = 0; 1372 1373 switch (was_err_group) { 1374 case 0: 1375 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; 1376 break; 1377 case 1: 1378 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; 1379 break; 1380 case 2: 1381 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; 1382 break; 1383 case 3: 1384 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; 1385 break; 1386 } 1387 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); 1388 } 1389 1390 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) 1391 { 1392 int i; 1393 u32 val; 1394 1395 /* Set VF masks and configuration - pretend */ 1396 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1397 1398 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 1399 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 1400 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 1401 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 1402 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 1403 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 1404 1405 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 1406 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); 1407 if (vf->cfg_flags & VF_CFG_INT_SIMD) 1408 val |= IGU_VF_CONF_SINGLE_ISR_EN; 1409 val &= ~IGU_VF_CONF_PARENT_MASK; 1410 val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */ 1411 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 1412 1413 DP(BNX2X_MSG_IOV, 1414 "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n", 1415 vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION)); 1416 1417 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1418 1419 /* iterate over all queues, clear sb consumer */ 1420 for (i = 0; i < vf_sb_count(vf); i++) { 1421 u8 igu_sb_id = vf_igu_sb(vf, i); 1422 1423 /* zero prod memory */ 1424 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); 1425 1426 /* clear sb state machine */ 1427 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, 1428 false /* VF */); 1429 1430 /* disable + update */ 1431 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, 1432 IGU_INT_DISABLE, 1); 1433 } 1434 } 1435 1436 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) 1437 { 1438 /* set the VF-PF association in the FW */ 1439 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); 1440 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); 1441 1442 /* clear vf errors*/ 1443 bnx2x_vf_semi_clear_err(bp, abs_vfid); 1444 bnx2x_vf_pglue_clear_err(bp, abs_vfid); 1445 1446 /* internal vf-enable - pretend */ 1447 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); 1448 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); 1449 bnx2x_vf_enable_internal(bp, true); 1450 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1451 } 1452 1453 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) 1454 { 1455 /* Reset vf in IGU interrupts are still disabled */ 1456 bnx2x_vf_igu_reset(bp, vf); 1457 1458 /* pretend to enable the vf with the PBF */ 1459 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1460 REG_WR(bp, PBF_REG_DISABLE_VF, 0); 1461 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1462 } 1463 1464 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) 1465 { 1466 struct pci_dev *dev; 1467 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 1468 1469 if (!vf) 1470 return false; 1471 1472 dev = pci_get_bus_and_slot(vf->bus, vf->devfn); 1473 if (dev) 1474 return bnx2x_is_pcie_pending(dev); 1475 return false; 1476 } 1477 1478 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) 1479 { 1480 /* Verify no pending pci transactions */ 1481 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) 1482 BNX2X_ERR("PCIE Transactions still pending\n"); 1483 1484 return 0; 1485 } 1486 1487 /* must be called after the number of PF queues and the number of VFs are 1488 * both known 1489 */ 1490 static void 1491 bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc) 1492 { 1493 u16 vlan_count = 0; 1494 1495 /* will be set only during VF-ACQUIRE */ 1496 resc->num_rxqs = 0; 1497 resc->num_txqs = 0; 1498 1499 /* no credit calculcis for macs (just yet) */ 1500 resc->num_mac_filters = 1; 1501 1502 /* divvy up vlan rules */ 1503 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 1504 vlan_count = 1 << ilog2(vlan_count); 1505 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); 1506 1507 /* no real limitation */ 1508 resc->num_mc_filters = 0; 1509 1510 /* num_sbs already set */ 1511 } 1512 1513 /* FLR routines: */ 1514 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 1515 { 1516 /* reset the state variables */ 1517 bnx2x_iov_static_resc(bp, &vf->alloc_resc); 1518 vf->state = VF_FREE; 1519 } 1520 1521 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) 1522 { 1523 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 1524 1525 /* DQ usage counter */ 1526 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1527 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, 1528 "DQ VF usage counter timed out", 1529 poll_cnt); 1530 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1531 1532 /* FW cleanup command - poll for the results */ 1533 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), 1534 poll_cnt)) 1535 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid); 1536 1537 /* verify TX hw is flushed */ 1538 bnx2x_tx_hw_flushed(bp, poll_cnt); 1539 } 1540 1541 static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) 1542 { 1543 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 1544 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; 1545 enum bnx2x_vfop_flr_state state = vfop->state; 1546 struct bnx2x_vfop_cmd cmd = { 1547 .done = bnx2x_vfop_flr, 1548 .block = false, 1549 }; 1550 1551 if (vfop->rc < 0) 1552 goto op_err; 1553 1554 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 1555 1556 switch (state) { 1557 case BNX2X_VFOP_FLR_QUEUES: 1558 /* the cleanup operations are valid if and only if the VF 1559 * was first acquired. 1560 */ 1561 if (++(qx->qid) < vf_rxq_count(vf)) { 1562 vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd, 1563 qx->qid); 1564 if (vfop->rc) 1565 goto op_err; 1566 return; 1567 } 1568 /* remove multicasts */ 1569 vfop->state = BNX2X_VFOP_FLR_HW; 1570 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 1571 0, true); 1572 if (vfop->rc) 1573 goto op_err; 1574 return; 1575 case BNX2X_VFOP_FLR_HW: 1576 1577 /* dispatch final cleanup and wait for HW queues to flush */ 1578 bnx2x_vf_flr_clnup_hw(bp, vf); 1579 1580 /* release VF resources */ 1581 bnx2x_vf_free_resc(bp, vf); 1582 1583 /* re-open the mailbox */ 1584 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 1585 1586 goto op_done; 1587 default: 1588 bnx2x_vfop_default(state); 1589 } 1590 op_err: 1591 BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc); 1592 op_done: 1593 vf->flr_clnup_stage = VF_FLR_ACK; 1594 bnx2x_vfop_end(bp, vf, vfop); 1595 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 1596 } 1597 1598 static int bnx2x_vfop_flr_cmd(struct bnx2x *bp, 1599 struct bnx2x_virtf *vf, 1600 vfop_handler_t done) 1601 { 1602 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 1603 if (vfop) { 1604 vfop->args.qx.qid = -1; /* loop */ 1605 bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES, 1606 bnx2x_vfop_flr, done); 1607 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false); 1608 } 1609 return -ENOMEM; 1610 } 1611 1612 static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf) 1613 { 1614 int i = prev_vf ? prev_vf->index + 1 : 0; 1615 struct bnx2x_virtf *vf; 1616 1617 /* find next VF to cleanup */ 1618 next_vf_to_clean: 1619 for (; 1620 i < BNX2X_NR_VIRTFN(bp) && 1621 (bnx2x_vf(bp, i, state) != VF_RESET || 1622 bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN); 1623 i++) 1624 ; 1625 1626 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i, 1627 BNX2X_NR_VIRTFN(bp)); 1628 1629 if (i < BNX2X_NR_VIRTFN(bp)) { 1630 vf = BP_VF(bp, i); 1631 1632 /* lock the vf pf channel */ 1633 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 1634 1635 /* invoke the VF FLR SM */ 1636 if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) { 1637 BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n", 1638 vf->abs_vfid); 1639 1640 /* mark the VF to be ACKED and continue */ 1641 vf->flr_clnup_stage = VF_FLR_ACK; 1642 goto next_vf_to_clean; 1643 } 1644 return; 1645 } 1646 1647 /* we are done, update vf records */ 1648 for_each_vf(bp, i) { 1649 vf = BP_VF(bp, i); 1650 1651 if (vf->flr_clnup_stage != VF_FLR_ACK) 1652 continue; 1653 1654 vf->flr_clnup_stage = VF_FLR_EPILOG; 1655 } 1656 1657 /* Acknowledge the handled VFs. 1658 * we are acknowledge all the vfs which an flr was requested for, even 1659 * if amongst them there are such that we never opened, since the mcp 1660 * will interrupt us immediately again if we only ack some of the bits, 1661 * resulting in an endless loop. This can happen for example in KVM 1662 * where an 'all ones' flr request is sometimes given by hyper visor 1663 */ 1664 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n", 1665 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1666 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1667 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 1668 bp->vfdb->flrd_vfs[i]); 1669 1670 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); 1671 1672 /* clear the acked bits - better yet if the MCP implemented 1673 * write to clear semantics 1674 */ 1675 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1676 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); 1677 } 1678 1679 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) 1680 { 1681 int i; 1682 1683 /* Read FLR'd VFs */ 1684 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1685 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); 1686 1687 DP(BNX2X_MSG_MCP, 1688 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n", 1689 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1690 1691 for_each_vf(bp, i) { 1692 struct bnx2x_virtf *vf = BP_VF(bp, i); 1693 u32 reset = 0; 1694 1695 if (vf->abs_vfid < 32) 1696 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); 1697 else 1698 reset = bp->vfdb->flrd_vfs[1] & 1699 (1 << (vf->abs_vfid - 32)); 1700 1701 if (reset) { 1702 /* set as reset and ready for cleanup */ 1703 vf->state = VF_RESET; 1704 vf->flr_clnup_stage = VF_FLR_CLN; 1705 1706 DP(BNX2X_MSG_IOV, 1707 "Initiating Final cleanup for VF %d\n", 1708 vf->abs_vfid); 1709 } 1710 } 1711 1712 /* do the FLR cleanup for all marked VFs*/ 1713 bnx2x_vf_flr_clnup(bp, NULL); 1714 } 1715 1716 /* IOV global initialization routines */ 1717 void bnx2x_iov_init_dq(struct bnx2x *bp) 1718 { 1719 if (!IS_SRIOV(bp)) 1720 return; 1721 1722 /* Set the DQ such that the CID reflect the abs_vfid */ 1723 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); 1724 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 1725 1726 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to 1727 * the PF L2 queues 1728 */ 1729 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 1730 1731 /* The VF window size is the log2 of the max number of CIDs per VF */ 1732 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 1733 1734 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 1735 * the Pf doorbell size although the 2 are independent. 1736 */ 1737 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 1738 BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT); 1739 1740 /* No security checks for now - 1741 * configure single rule (out of 16) mask = 0x1, value = 0x0, 1742 * CID range 0 - 0x1ffff 1743 */ 1744 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); 1745 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); 1746 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1747 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1748 1749 /* set the number of VF allowed doorbells to the full DQ range */ 1750 REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); 1751 1752 /* set the VF doorbell threshold */ 1753 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 1754 } 1755 1756 void bnx2x_iov_init_dmae(struct bnx2x *bp) 1757 { 1758 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) 1759 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 1760 } 1761 1762 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1763 { 1764 struct pci_dev *dev = bp->pdev; 1765 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1766 1767 return dev->bus->number + ((dev->devfn + iov->offset + 1768 iov->stride * vfid) >> 8); 1769 } 1770 1771 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) 1772 { 1773 struct pci_dev *dev = bp->pdev; 1774 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1775 1776 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; 1777 } 1778 1779 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) 1780 { 1781 int i, n; 1782 struct pci_dev *dev = bp->pdev; 1783 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1784 1785 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { 1786 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); 1787 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); 1788 1789 size /= iov->total; 1790 vf->bars[n].bar = start + size * vf->abs_vfid; 1791 vf->bars[n].size = size; 1792 } 1793 } 1794 1795 static int bnx2x_ari_enabled(struct pci_dev *dev) 1796 { 1797 return dev->bus->self && dev->bus->self->ari_enabled; 1798 } 1799 1800 static void 1801 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) 1802 { 1803 int sb_id; 1804 u32 val; 1805 u8 fid; 1806 1807 /* IGU in normal mode - read CAM */ 1808 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 1809 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); 1810 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 1811 continue; 1812 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1813 if (!(fid & IGU_FID_ENCODE_IS_PF)) 1814 bnx2x_vf_set_igu_info(bp, sb_id, 1815 (fid & IGU_FID_VF_NUM_MASK)); 1816 1817 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1818 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 1819 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 1820 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 1821 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1822 } 1823 } 1824 1825 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1826 { 1827 if (bp->vfdb) { 1828 kfree(bp->vfdb->vfqs); 1829 kfree(bp->vfdb->vfs); 1830 kfree(bp->vfdb); 1831 } 1832 bp->vfdb = NULL; 1833 } 1834 1835 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1836 { 1837 int pos; 1838 struct pci_dev *dev = bp->pdev; 1839 1840 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 1841 if (!pos) { 1842 BNX2X_ERR("failed to find SRIOV capability in device\n"); 1843 return -ENODEV; 1844 } 1845 1846 iov->pos = pos; 1847 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); 1848 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 1849 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); 1850 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); 1851 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 1852 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 1853 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 1854 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 1855 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 1856 1857 return 0; 1858 } 1859 1860 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1861 { 1862 u32 val; 1863 1864 /* read the SRIOV capability structure 1865 * The fields can be read via configuration read or 1866 * directly from the device (starting at offset PCICFG_OFFSET) 1867 */ 1868 if (bnx2x_sriov_pci_cfg_info(bp, iov)) 1869 return -ENODEV; 1870 1871 /* get the number of SRIOV bars */ 1872 iov->nres = 0; 1873 1874 /* read the first_vfid */ 1875 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); 1876 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) 1877 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); 1878 1879 DP(BNX2X_MSG_IOV, 1880 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 1881 BP_FUNC(bp), 1882 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, 1883 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 1884 1885 return 0; 1886 } 1887 1888 static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp) 1889 { 1890 int i; 1891 u8 queue_count = 0; 1892 1893 if (IS_SRIOV(bp)) 1894 for_each_vf(bp, i) 1895 queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs); 1896 1897 return queue_count; 1898 } 1899 1900 /* must be called after PF bars are mapped */ 1901 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1902 int num_vfs_param) 1903 { 1904 int err, i, qcount; 1905 struct bnx2x_sriov *iov; 1906 struct pci_dev *dev = bp->pdev; 1907 1908 bp->vfdb = NULL; 1909 1910 /* verify is pf */ 1911 if (IS_VF(bp)) 1912 return 0; 1913 1914 /* verify sriov capability is present in configuration space */ 1915 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) 1916 return 0; 1917 1918 /* verify chip revision */ 1919 if (CHIP_IS_E1x(bp)) 1920 return 0; 1921 1922 /* check if SRIOV support is turned off */ 1923 if (!num_vfs_param) 1924 return 0; 1925 1926 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ 1927 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { 1928 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", 1929 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); 1930 return 0; 1931 } 1932 1933 /* SRIOV can be enabled only with MSIX */ 1934 if (int_mode_param == BNX2X_INT_MODE_MSI || 1935 int_mode_param == BNX2X_INT_MODE_INTX) { 1936 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); 1937 return 0; 1938 } 1939 1940 err = -EIO; 1941 /* verify ari is enabled */ 1942 if (!bnx2x_ari_enabled(bp->pdev)) { 1943 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n"); 1944 return 0; 1945 } 1946 1947 /* verify igu is in normal mode */ 1948 if (CHIP_INT_MODE_IS_BC(bp)) { 1949 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 1950 return 0; 1951 } 1952 1953 /* allocate the vfs database */ 1954 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); 1955 if (!bp->vfdb) { 1956 BNX2X_ERR("failed to allocate vf database\n"); 1957 err = -ENOMEM; 1958 goto failed; 1959 } 1960 1961 /* get the sriov info - Linux already collected all the pertinent 1962 * information, however the sriov structure is for the private use 1963 * of the pci module. Also we want this information regardless 1964 * of the hyper-visor. 1965 */ 1966 iov = &(bp->vfdb->sriov); 1967 err = bnx2x_sriov_info(bp, iov); 1968 if (err) 1969 goto failed; 1970 1971 /* SR-IOV capability was enabled but there are no VFs*/ 1972 if (iov->total == 0) 1973 goto failed; 1974 1975 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); 1976 1977 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n", 1978 num_vfs_param, iov->nr_virtfn); 1979 1980 /* allocate the vf array */ 1981 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 1982 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); 1983 if (!bp->vfdb->vfs) { 1984 BNX2X_ERR("failed to allocate vf array\n"); 1985 err = -ENOMEM; 1986 goto failed; 1987 } 1988 1989 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ 1990 for_each_vf(bp, i) { 1991 bnx2x_vf(bp, i, index) = i; 1992 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; 1993 bnx2x_vf(bp, i, state) = VF_FREE; 1994 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head)); 1995 mutex_init(&bnx2x_vf(bp, i, op_mutex)); 1996 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; 1997 } 1998 1999 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 2000 bnx2x_get_vf_igu_cam_info(bp); 2001 2002 /* get the total queue count and allocate the global queue arrays */ 2003 qcount = bnx2x_iov_get_max_queue_count(bp); 2004 2005 /* allocate the queue arrays for all VFs */ 2006 bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue), 2007 GFP_KERNEL); 2008 if (!bp->vfdb->vfqs) { 2009 BNX2X_ERR("failed to allocate vf queue array\n"); 2010 err = -ENOMEM; 2011 goto failed; 2012 } 2013 2014 return 0; 2015 failed: 2016 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 2017 __bnx2x_iov_free_vfdb(bp); 2018 return err; 2019 } 2020 2021 void bnx2x_iov_remove_one(struct bnx2x *bp) 2022 { 2023 /* if SRIOV is not enabled there's nothing to do */ 2024 if (!IS_SRIOV(bp)) 2025 return; 2026 2027 DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); 2028 pci_disable_sriov(bp->pdev); 2029 DP(BNX2X_MSG_IOV, "sriov disabled\n"); 2030 2031 /* free vf database */ 2032 __bnx2x_iov_free_vfdb(bp); 2033 } 2034 2035 void bnx2x_iov_free_mem(struct bnx2x *bp) 2036 { 2037 int i; 2038 2039 if (!IS_SRIOV(bp)) 2040 return; 2041 2042 /* free vfs hw contexts */ 2043 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2044 struct hw_dma *cxt = &bp->vfdb->context[i]; 2045 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); 2046 } 2047 2048 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, 2049 BP_VFDB(bp)->sp_dma.mapping, 2050 BP_VFDB(bp)->sp_dma.size); 2051 2052 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, 2053 BP_VF_MBX_DMA(bp)->mapping, 2054 BP_VF_MBX_DMA(bp)->size); 2055 2056 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, 2057 BP_VF_BULLETIN_DMA(bp)->mapping, 2058 BP_VF_BULLETIN_DMA(bp)->size); 2059 } 2060 2061 int bnx2x_iov_alloc_mem(struct bnx2x *bp) 2062 { 2063 size_t tot_size; 2064 int i, rc = 0; 2065 2066 if (!IS_SRIOV(bp)) 2067 return rc; 2068 2069 /* allocate vfs hw contexts */ 2070 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * 2071 BNX2X_CIDS_PER_VF * sizeof(union cdu_context); 2072 2073 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2074 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); 2075 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); 2076 2077 if (cxt->size) { 2078 BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size); 2079 } else { 2080 cxt->addr = NULL; 2081 cxt->mapping = 0; 2082 } 2083 tot_size -= cxt->size; 2084 } 2085 2086 /* allocate vfs ramrods dma memory - client_init and set_mac */ 2087 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); 2088 BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping, 2089 tot_size); 2090 BP_VFDB(bp)->sp_dma.size = tot_size; 2091 2092 /* allocate mailboxes */ 2093 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; 2094 BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping, 2095 tot_size); 2096 BP_VF_MBX_DMA(bp)->size = tot_size; 2097 2098 /* allocate local bulletin boards */ 2099 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; 2100 BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr, 2101 &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size); 2102 BP_VF_BULLETIN_DMA(bp)->size = tot_size; 2103 2104 return 0; 2105 2106 alloc_mem_err: 2107 return -ENOMEM; 2108 } 2109 2110 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 2111 struct bnx2x_vf_queue *q) 2112 { 2113 u8 cl_id = vfq_cl_id(vf, q); 2114 u8 func_id = FW_VF_HANDLE(vf->abs_vfid); 2115 unsigned long q_type = 0; 2116 2117 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 2118 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 2119 2120 /* Queue State object */ 2121 bnx2x_init_queue_obj(bp, &q->sp_obj, 2122 cl_id, &q->cid, 1, func_id, 2123 bnx2x_vf_sp(bp, vf, q_data), 2124 bnx2x_vf_sp_map(bp, vf, q_data), 2125 q_type); 2126 2127 DP(BNX2X_MSG_IOV, 2128 "initialized vf %d's queue object. func id set to %d\n", 2129 vf->abs_vfid, q->sp_obj.func_id); 2130 2131 /* mac/vlan objects are per queue, but only those 2132 * that belong to the leading queue are initialized 2133 */ 2134 if (vfq_is_leading(q)) { 2135 /* mac */ 2136 bnx2x_init_mac_obj(bp, &q->mac_obj, 2137 cl_id, q->cid, func_id, 2138 bnx2x_vf_sp(bp, vf, mac_rdata), 2139 bnx2x_vf_sp_map(bp, vf, mac_rdata), 2140 BNX2X_FILTER_MAC_PENDING, 2141 &vf->filter_state, 2142 BNX2X_OBJ_TYPE_RX_TX, 2143 &bp->macs_pool); 2144 /* vlan */ 2145 bnx2x_init_vlan_obj(bp, &q->vlan_obj, 2146 cl_id, q->cid, func_id, 2147 bnx2x_vf_sp(bp, vf, vlan_rdata), 2148 bnx2x_vf_sp_map(bp, vf, vlan_rdata), 2149 BNX2X_FILTER_VLAN_PENDING, 2150 &vf->filter_state, 2151 BNX2X_OBJ_TYPE_RX_TX, 2152 &bp->vlans_pool); 2153 2154 /* mcast */ 2155 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, 2156 q->cid, func_id, func_id, 2157 bnx2x_vf_sp(bp, vf, mcast_rdata), 2158 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 2159 BNX2X_FILTER_MCAST_PENDING, 2160 &vf->filter_state, 2161 BNX2X_OBJ_TYPE_RX_TX); 2162 2163 vf->leading_rss = cl_id; 2164 } 2165 } 2166 2167 /* called by bnx2x_nic_load */ 2168 int bnx2x_iov_nic_init(struct bnx2x *bp) 2169 { 2170 int vfid, qcount, i; 2171 2172 if (!IS_SRIOV(bp)) { 2173 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 2174 return 0; 2175 } 2176 2177 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); 2178 2179 /* let FLR complete ... */ 2180 msleep(100); 2181 2182 /* initialize vf database */ 2183 for_each_vf(bp, vfid) { 2184 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 2185 2186 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * 2187 BNX2X_CIDS_PER_VF; 2188 2189 union cdu_context *base_cxt = (union cdu_context *) 2190 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2191 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2192 2193 DP(BNX2X_MSG_IOV, 2194 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", 2195 vf->abs_vfid, vf_sb_count(vf), base_vf_cid, 2196 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 2197 2198 /* init statically provisioned resources */ 2199 bnx2x_iov_static_resc(bp, &vf->alloc_resc); 2200 2201 /* queues are initialized during VF-ACQUIRE */ 2202 2203 /* reserve the vf vlan credit */ 2204 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf)); 2205 2206 vf->filter_state = 0; 2207 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 2208 2209 /* init mcast object - This object will be re-initialized 2210 * during VF-ACQUIRE with the proper cl_id and cid. 2211 * It needs to be initialized here so that it can be safely 2212 * handled by a subsequent FLR flow. 2213 */ 2214 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, 2215 0xFF, 0xFF, 0xFF, 2216 bnx2x_vf_sp(bp, vf, mcast_rdata), 2217 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 2218 BNX2X_FILTER_MCAST_PENDING, 2219 &vf->filter_state, 2220 BNX2X_OBJ_TYPE_RX_TX); 2221 2222 /* set the mailbox message addresses */ 2223 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) 2224 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * 2225 MBX_MSG_ALIGNED_SIZE); 2226 2227 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + 2228 vfid * MBX_MSG_ALIGNED_SIZE; 2229 2230 /* Enable vf mailbox */ 2231 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 2232 } 2233 2234 /* Final VF init */ 2235 qcount = 0; 2236 for_each_vf(bp, i) { 2237 struct bnx2x_virtf *vf = BP_VF(bp, i); 2238 2239 /* fill in the BDF and bars */ 2240 vf->bus = bnx2x_vf_bus(bp, i); 2241 vf->devfn = bnx2x_vf_devfn(bp, i); 2242 bnx2x_vf_set_bars(bp, vf); 2243 2244 DP(BNX2X_MSG_IOV, 2245 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", 2246 vf->abs_vfid, vf->bus, vf->devfn, 2247 (unsigned)vf->bars[0].bar, vf->bars[0].size, 2248 (unsigned)vf->bars[1].bar, vf->bars[1].size, 2249 (unsigned)vf->bars[2].bar, vf->bars[2].size); 2250 2251 /* set local queue arrays */ 2252 vf->vfqs = &bp->vfdb->vfqs[qcount]; 2253 qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs); 2254 } 2255 2256 return 0; 2257 } 2258 2259 /* called by bnx2x_chip_cleanup */ 2260 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) 2261 { 2262 int i; 2263 2264 if (!IS_SRIOV(bp)) 2265 return 0; 2266 2267 /* release all the VFs */ 2268 for_each_vf(bp, i) 2269 bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */ 2270 2271 return 0; 2272 } 2273 2274 /* called by bnx2x_init_hw_func, returns the next ilt line */ 2275 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) 2276 { 2277 int i; 2278 struct bnx2x_ilt *ilt = BP_ILT(bp); 2279 2280 if (!IS_SRIOV(bp)) 2281 return line; 2282 2283 /* set vfs ilt lines */ 2284 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 2285 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); 2286 2287 ilt->lines[line+i].page = hw_cxt->addr; 2288 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 2289 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 2290 } 2291 return line + i; 2292 } 2293 2294 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) 2295 { 2296 return ((cid >= BNX2X_FIRST_VF_CID) && 2297 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); 2298 } 2299 2300 static 2301 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, 2302 struct bnx2x_vf_queue *vfq, 2303 union event_ring_elem *elem) 2304 { 2305 unsigned long ramrod_flags = 0; 2306 int rc = 0; 2307 2308 /* Always push next commands out, don't wait here */ 2309 set_bit(RAMROD_CONT, &ramrod_flags); 2310 2311 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 2312 case BNX2X_FILTER_MAC_PENDING: 2313 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, 2314 &ramrod_flags); 2315 break; 2316 case BNX2X_FILTER_VLAN_PENDING: 2317 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, 2318 &ramrod_flags); 2319 break; 2320 default: 2321 BNX2X_ERR("Unsupported classification command: %d\n", 2322 elem->message.data.eth_event.echo); 2323 return; 2324 } 2325 if (rc < 0) 2326 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 2327 else if (rc > 0) 2328 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); 2329 } 2330 2331 static 2332 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, 2333 struct bnx2x_virtf *vf) 2334 { 2335 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 2336 int rc; 2337 2338 rparam.mcast_obj = &vf->mcast_obj; 2339 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); 2340 2341 /* If there are pending mcast commands - send them */ 2342 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { 2343 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 2344 if (rc < 0) 2345 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 2346 rc); 2347 } 2348 } 2349 2350 static 2351 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, 2352 struct bnx2x_virtf *vf) 2353 { 2354 smp_mb__before_clear_bit(); 2355 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 2356 smp_mb__after_clear_bit(); 2357 } 2358 2359 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) 2360 { 2361 struct bnx2x_virtf *vf; 2362 int qidx = 0, abs_vfid; 2363 u8 opcode; 2364 u16 cid = 0xffff; 2365 2366 if (!IS_SRIOV(bp)) 2367 return 1; 2368 2369 /* first get the cid - the only events we handle here are cfc-delete 2370 * and set-mac completion 2371 */ 2372 opcode = elem->message.opcode; 2373 2374 switch (opcode) { 2375 case EVENT_RING_OPCODE_CFC_DEL: 2376 cid = SW_CID((__force __le32) 2377 elem->message.data.cfc_del_event.cid); 2378 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); 2379 break; 2380 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 2381 case EVENT_RING_OPCODE_MULTICAST_RULES: 2382 case EVENT_RING_OPCODE_FILTERS_RULES: 2383 cid = (elem->message.data.eth_event.echo & 2384 BNX2X_SWCID_MASK); 2385 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 2386 break; 2387 case EVENT_RING_OPCODE_VF_FLR: 2388 abs_vfid = elem->message.data.vf_flr_event.vf_id; 2389 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", 2390 abs_vfid); 2391 goto get_vf; 2392 case EVENT_RING_OPCODE_MALICIOUS_VF: 2393 abs_vfid = elem->message.data.malicious_vf_event.vf_id; 2394 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n", 2395 abs_vfid, elem->message.data.malicious_vf_event.err_id); 2396 goto get_vf; 2397 default: 2398 return 1; 2399 } 2400 2401 /* check if the cid is the VF range */ 2402 if (!bnx2x_iov_is_vf_cid(bp, cid)) { 2403 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); 2404 return 1; 2405 } 2406 2407 /* extract vf and rxq index from vf_cid - relies on the following: 2408 * 1. vfid on cid reflects the true abs_vfid 2409 * 2. The max number of VFs (per path) is 64 2410 */ 2411 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); 2412 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2413 get_vf: 2414 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 2415 2416 if (!vf) { 2417 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", 2418 cid, abs_vfid); 2419 return 0; 2420 } 2421 2422 switch (opcode) { 2423 case EVENT_RING_OPCODE_CFC_DEL: 2424 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", 2425 vf->abs_vfid, qidx); 2426 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, 2427 &vfq_get(vf, 2428 qidx)->sp_obj, 2429 BNX2X_Q_CMD_CFC_DEL); 2430 break; 2431 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 2432 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", 2433 vf->abs_vfid, qidx); 2434 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); 2435 break; 2436 case EVENT_RING_OPCODE_MULTICAST_RULES: 2437 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", 2438 vf->abs_vfid, qidx); 2439 bnx2x_vf_handle_mcast_eqe(bp, vf); 2440 break; 2441 case EVENT_RING_OPCODE_FILTERS_RULES: 2442 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", 2443 vf->abs_vfid, qidx); 2444 bnx2x_vf_handle_filters_eqe(bp, vf); 2445 break; 2446 case EVENT_RING_OPCODE_VF_FLR: 2447 DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n", 2448 vf->abs_vfid); 2449 /* Do nothing for now */ 2450 break; 2451 case EVENT_RING_OPCODE_MALICIOUS_VF: 2452 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n", 2453 abs_vfid, elem->message.data.malicious_vf_event.err_id); 2454 /* Do nothing for now */ 2455 break; 2456 } 2457 /* SRIOV: reschedule any 'in_progress' operations */ 2458 bnx2x_iov_sp_event(bp, cid, false); 2459 2460 return 0; 2461 } 2462 2463 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) 2464 { 2465 /* extract the vf from vf_cid - relies on the following: 2466 * 1. vfid on cid reflects the true abs_vfid 2467 * 2. The max number of VFs (per path) is 64 2468 */ 2469 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 2470 return bnx2x_vf_by_abs_fid(bp, abs_vfid); 2471 } 2472 2473 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 2474 struct bnx2x_queue_sp_obj **q_obj) 2475 { 2476 struct bnx2x_virtf *vf; 2477 2478 if (!IS_SRIOV(bp)) 2479 return; 2480 2481 vf = bnx2x_vf_by_cid(bp, vf_cid); 2482 2483 if (vf) { 2484 /* extract queue index from vf_cid - relies on the following: 2485 * 1. vfid on cid reflects the true abs_vfid 2486 * 2. The max number of VFs (per path) is 64 2487 */ 2488 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); 2489 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); 2490 } else { 2491 BNX2X_ERR("No vf matching cid %d\n", vf_cid); 2492 } 2493 } 2494 2495 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work) 2496 { 2497 struct bnx2x_virtf *vf; 2498 2499 /* check if the cid is the VF range */ 2500 if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid)) 2501 return; 2502 2503 vf = bnx2x_vf_by_cid(bp, vf_cid); 2504 if (vf) { 2505 /* set in_progress flag */ 2506 atomic_set(&vf->op_in_progress, 1); 2507 if (queue_work) 2508 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 2509 } 2510 } 2511 2512 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) 2513 { 2514 int i; 2515 int first_queue_query_index, num_queues_req; 2516 dma_addr_t cur_data_offset; 2517 struct stats_query_entry *cur_query_entry; 2518 u8 stats_count = 0; 2519 bool is_fcoe = false; 2520 2521 if (!IS_SRIOV(bp)) 2522 return; 2523 2524 if (!NO_FCOE(bp)) 2525 is_fcoe = true; 2526 2527 /* fcoe adds one global request and one queue request */ 2528 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; 2529 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 2530 (is_fcoe ? 0 : 1); 2531 2532 DP(BNX2X_MSG_IOV, 2533 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", 2534 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, 2535 first_queue_query_index + num_queues_req); 2536 2537 cur_data_offset = bp->fw_stats_data_mapping + 2538 offsetof(struct bnx2x_fw_stats_data, queue_stats) + 2539 num_queues_req * sizeof(struct per_queue_stats); 2540 2541 cur_query_entry = &bp->fw_stats_req-> 2542 query[first_queue_query_index + num_queues_req]; 2543 2544 for_each_vf(bp, i) { 2545 int j; 2546 struct bnx2x_virtf *vf = BP_VF(bp, i); 2547 2548 if (vf->state != VF_ENABLED) { 2549 DP(BNX2X_MSG_IOV, 2550 "vf %d not enabled so no stats for it\n", 2551 vf->abs_vfid); 2552 continue; 2553 } 2554 2555 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); 2556 for_each_vfq(vf, j) { 2557 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 2558 2559 /* collect stats fro active queues only */ 2560 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 2561 BNX2X_Q_LOGICAL_STATE_STOPPED) 2562 continue; 2563 2564 /* create stats query entry for this queue */ 2565 cur_query_entry->kind = STATS_TYPE_QUEUE; 2566 cur_query_entry->index = vfq_cl_id(vf, rxq); 2567 cur_query_entry->funcID = 2568 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 2569 cur_query_entry->address.hi = 2570 cpu_to_le32(U64_HI(vf->fw_stat_map)); 2571 cur_query_entry->address.lo = 2572 cpu_to_le32(U64_LO(vf->fw_stat_map)); 2573 DP(BNX2X_MSG_IOV, 2574 "added address %x %x for vf %d queue %d client %d\n", 2575 cur_query_entry->address.hi, 2576 cur_query_entry->address.lo, cur_query_entry->funcID, 2577 j, cur_query_entry->index); 2578 cur_query_entry++; 2579 cur_data_offset += sizeof(struct per_queue_stats); 2580 stats_count++; 2581 } 2582 } 2583 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 2584 } 2585 2586 void bnx2x_iov_sp_task(struct bnx2x *bp) 2587 { 2588 int i; 2589 2590 if (!IS_SRIOV(bp)) 2591 return; 2592 /* Iterate over all VFs and invoke state transition for VFs with 2593 * 'in-progress' slow-path operations 2594 */ 2595 DP(BNX2X_MSG_IOV, "searching for pending vf operations\n"); 2596 for_each_vf(bp, i) { 2597 struct bnx2x_virtf *vf = BP_VF(bp, i); 2598 2599 if (!list_empty(&vf->op_list_head) && 2600 atomic_read(&vf->op_in_progress)) { 2601 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); 2602 bnx2x_vfop_cur(bp, vf)->transition(bp, vf); 2603 } 2604 } 2605 } 2606 2607 static inline 2608 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) 2609 { 2610 int i; 2611 struct bnx2x_virtf *vf = NULL; 2612 2613 for_each_vf(bp, i) { 2614 vf = BP_VF(bp, i); 2615 if (stat_id >= vf->igu_base_id && 2616 stat_id < vf->igu_base_id + vf_sb_count(vf)) 2617 break; 2618 } 2619 return vf; 2620 } 2621 2622 /* VF API helpers */ 2623 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, 2624 u8 enable) 2625 { 2626 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; 2627 u32 val = enable ? (abs_vfid | (1 << 6)) : 0; 2628 2629 REG_WR(bp, reg, val); 2630 } 2631 2632 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) 2633 { 2634 int i; 2635 2636 for_each_vfq(vf, i) 2637 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2638 vfq_qzone_id(vf, vfq_get(vf, i)), false); 2639 } 2640 2641 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) 2642 { 2643 u32 val; 2644 2645 /* clear the VF configuration - pretend */ 2646 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 2647 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 2648 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | 2649 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK); 2650 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 2651 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2652 } 2653 2654 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) 2655 { 2656 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), 2657 BNX2X_VF_MAX_QUEUES); 2658 } 2659 2660 static 2661 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, 2662 struct vf_pf_resc_request *req_resc) 2663 { 2664 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2665 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2666 2667 return ((req_resc->num_rxqs <= rxq_cnt) && 2668 (req_resc->num_txqs <= txq_cnt) && 2669 (req_resc->num_sbs <= vf_sb_count(vf)) && 2670 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 2671 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); 2672 } 2673 2674 /* CORE VF API */ 2675 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 2676 struct vf_pf_resc_request *resc) 2677 { 2678 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * 2679 BNX2X_CIDS_PER_VF; 2680 2681 union cdu_context *base_cxt = (union cdu_context *) 2682 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2683 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2684 int i; 2685 2686 /* if state is 'acquired' the VF was not released or FLR'd, in 2687 * this case the returned resources match the acquired already 2688 * acquired resources. Verify that the requested numbers do 2689 * not exceed the already acquired numbers. 2690 */ 2691 if (vf->state == VF_ACQUIRED) { 2692 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", 2693 vf->abs_vfid); 2694 2695 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2696 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", 2697 vf->abs_vfid); 2698 return -EINVAL; 2699 } 2700 return 0; 2701 } 2702 2703 /* Otherwise vf state must be 'free' or 'reset' */ 2704 if (vf->state != VF_FREE && vf->state != VF_RESET) { 2705 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", 2706 vf->abs_vfid, vf->state); 2707 return -EINVAL; 2708 } 2709 2710 /* static allocation: 2711 * the global maximum number are fixed per VF. Fail the request if 2712 * requested number exceed these globals 2713 */ 2714 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2715 DP(BNX2X_MSG_IOV, 2716 "cannot fulfill vf resource request. Placing maximal available values in response\n"); 2717 /* set the max resource in the vf */ 2718 return -ENOMEM; 2719 } 2720 2721 /* Set resources counters - 0 request means max available */ 2722 vf_sb_count(vf) = resc->num_sbs; 2723 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2724 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2725 if (resc->num_mac_filters) 2726 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 2727 if (resc->num_vlan_filters) 2728 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; 2729 2730 DP(BNX2X_MSG_IOV, 2731 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2732 vf_sb_count(vf), vf_rxq_count(vf), 2733 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2734 vf_vlan_rules_cnt(vf)); 2735 2736 /* Initialize the queues */ 2737 if (!vf->vfqs) { 2738 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); 2739 return -EINVAL; 2740 } 2741 2742 for_each_vfq(vf, i) { 2743 struct bnx2x_vf_queue *q = vfq_get(vf, i); 2744 2745 if (!q) { 2746 DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i); 2747 return -EINVAL; 2748 } 2749 2750 q->index = i; 2751 q->cxt = &((base_cxt + i)->eth); 2752 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; 2753 2754 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", 2755 vf->abs_vfid, i, q->index, q->cid, q->cxt); 2756 2757 /* init SP objects */ 2758 bnx2x_vfq_init(bp, vf, q); 2759 } 2760 vf->state = VF_ACQUIRED; 2761 return 0; 2762 } 2763 2764 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) 2765 { 2766 struct bnx2x_func_init_params func_init = {0}; 2767 u16 flags = 0; 2768 int i; 2769 2770 /* the sb resources are initialized at this point, do the 2771 * FW/HW initializations 2772 */ 2773 for_each_vf_sb(vf, i) 2774 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, 2775 vf_igu_sb(vf, i), vf_igu_sb(vf, i)); 2776 2777 /* Sanity checks */ 2778 if (vf->state != VF_ACQUIRED) { 2779 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", 2780 vf->abs_vfid, vf->state); 2781 return -EINVAL; 2782 } 2783 2784 /* let FLR complete ... */ 2785 msleep(100); 2786 2787 /* FLR cleanup epilogue */ 2788 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) 2789 return -EBUSY; 2790 2791 /* reset IGU VF statistics: MSIX */ 2792 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); 2793 2794 /* vf init */ 2795 if (vf->cfg_flags & VF_CFG_STATS) 2796 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); 2797 2798 if (vf->cfg_flags & VF_CFG_TPA) 2799 flags |= FUNC_FLG_TPA; 2800 2801 if (is_vf_multi(vf)) 2802 flags |= FUNC_FLG_RSS; 2803 2804 /* function setup */ 2805 func_init.func_flgs = flags; 2806 func_init.pf_id = BP_FUNC(bp); 2807 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); 2808 func_init.fw_stat_map = vf->fw_stat_map; 2809 func_init.spq_map = vf->spq_map; 2810 func_init.spq_prod = 0; 2811 bnx2x_func_init(bp, &func_init); 2812 2813 /* Enable the vf */ 2814 bnx2x_vf_enable_access(bp, vf->abs_vfid); 2815 bnx2x_vf_enable_traffic(bp, vf); 2816 2817 /* queue protection table */ 2818 for_each_vfq(vf, i) 2819 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2820 vfq_qzone_id(vf, vfq_get(vf, i)), true); 2821 2822 vf->state = VF_ENABLED; 2823 2824 /* update vf bulletin board */ 2825 bnx2x_post_vf_bulletin(bp, vf->index); 2826 2827 return 0; 2828 } 2829 2830 /* VFOP close (teardown the queues, delete mcasts and close HW) */ 2831 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) 2832 { 2833 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2834 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx; 2835 enum bnx2x_vfop_close_state state = vfop->state; 2836 struct bnx2x_vfop_cmd cmd = { 2837 .done = bnx2x_vfop_close, 2838 .block = false, 2839 }; 2840 2841 if (vfop->rc < 0) 2842 goto op_err; 2843 2844 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 2845 2846 switch (state) { 2847 case BNX2X_VFOP_CLOSE_QUEUES: 2848 2849 if (++(qx->qid) < vf_rxq_count(vf)) { 2850 vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid); 2851 if (vfop->rc) 2852 goto op_err; 2853 return; 2854 } 2855 2856 /* remove multicasts */ 2857 vfop->state = BNX2X_VFOP_CLOSE_HW; 2858 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false); 2859 if (vfop->rc) 2860 goto op_err; 2861 return; 2862 2863 case BNX2X_VFOP_CLOSE_HW: 2864 2865 /* disable the interrupts */ 2866 DP(BNX2X_MSG_IOV, "disabling igu\n"); 2867 bnx2x_vf_igu_disable(bp, vf); 2868 2869 /* disable the VF */ 2870 DP(BNX2X_MSG_IOV, "clearing qtbl\n"); 2871 bnx2x_vf_clr_qtbl(bp, vf); 2872 2873 goto op_done; 2874 default: 2875 bnx2x_vfop_default(state); 2876 } 2877 op_err: 2878 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); 2879 op_done: 2880 vf->state = VF_ACQUIRED; 2881 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2882 bnx2x_vfop_end(bp, vf, vfop); 2883 } 2884 2885 int bnx2x_vfop_close_cmd(struct bnx2x *bp, 2886 struct bnx2x_virtf *vf, 2887 struct bnx2x_vfop_cmd *cmd) 2888 { 2889 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 2890 if (vfop) { 2891 vfop->args.qx.qid = -1; /* loop */ 2892 bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES, 2893 bnx2x_vfop_close, cmd->done); 2894 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close, 2895 cmd->block); 2896 } 2897 return -ENOMEM; 2898 } 2899 2900 /* VF release can be called either: 1. The VF was acquired but 2901 * not enabled 2. the vf was enabled or in the process of being 2902 * enabled 2903 */ 2904 static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf) 2905 { 2906 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 2907 struct bnx2x_vfop_cmd cmd = { 2908 .done = bnx2x_vfop_release, 2909 .block = false, 2910 }; 2911 2912 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2913 2914 if (vfop->rc < 0) 2915 goto op_err; 2916 2917 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, 2918 vf->state == VF_FREE ? "Free" : 2919 vf->state == VF_ACQUIRED ? "Acquired" : 2920 vf->state == VF_ENABLED ? "Enabled" : 2921 vf->state == VF_RESET ? "Reset" : 2922 "Unknown"); 2923 2924 switch (vf->state) { 2925 case VF_ENABLED: 2926 vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd); 2927 if (vfop->rc) 2928 goto op_err; 2929 return; 2930 2931 case VF_ACQUIRED: 2932 DP(BNX2X_MSG_IOV, "about to free resources\n"); 2933 bnx2x_vf_free_resc(bp, vf); 2934 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc); 2935 goto op_done; 2936 2937 case VF_FREE: 2938 case VF_RESET: 2939 /* do nothing */ 2940 goto op_done; 2941 default: 2942 bnx2x_vfop_default(vf->state); 2943 } 2944 op_err: 2945 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc); 2946 op_done: 2947 bnx2x_vfop_end(bp, vf, vfop); 2948 } 2949 2950 int bnx2x_vfop_release_cmd(struct bnx2x *bp, 2951 struct bnx2x_virtf *vf, 2952 struct bnx2x_vfop_cmd *cmd) 2953 { 2954 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 2955 if (vfop) { 2956 bnx2x_vfop_opset(-1, /* use vf->state */ 2957 bnx2x_vfop_release, cmd->done); 2958 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release, 2959 cmd->block); 2960 } 2961 return -ENOMEM; 2962 } 2963 2964 /* VF release ~ VF close + VF release-resources 2965 * Release is the ultimate SW shutdown and is called whenever an 2966 * irrecoverable error is encountered. 2967 */ 2968 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block) 2969 { 2970 struct bnx2x_vfop_cmd cmd = { 2971 .done = NULL, 2972 .block = block, 2973 }; 2974 int rc; 2975 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 2976 2977 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd); 2978 if (rc) 2979 WARN(rc, 2980 "VF[%d] Failed to allocate resources for release op- rc=%d\n", 2981 vf->abs_vfid, rc); 2982 } 2983 2984 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, 2985 struct bnx2x_virtf *vf, u32 *sbdf) 2986 { 2987 *sbdf = vf->devfn | (vf->bus << 8); 2988 } 2989 2990 static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf, 2991 struct bnx2x_vf_bar_info *bar_info) 2992 { 2993 int n; 2994 2995 bar_info->nr_bars = bp->vfdb->sriov.nres; 2996 for (n = 0; n < bar_info->nr_bars; n++) 2997 bar_info->bars[n] = vf->bars[n]; 2998 } 2999 3000 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3001 enum channel_tlvs tlv) 3002 { 3003 /* lock the channel */ 3004 mutex_lock(&vf->op_mutex); 3005 3006 /* record the locking op */ 3007 vf->op_current = tlv; 3008 3009 /* log the lock */ 3010 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", 3011 vf->abs_vfid, tlv); 3012 } 3013 3014 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3015 enum channel_tlvs expected_tlv) 3016 { 3017 WARN(expected_tlv != vf->op_current, 3018 "lock mismatch: expected %d found %d", expected_tlv, 3019 vf->op_current); 3020 3021 /* lock the channel */ 3022 mutex_unlock(&vf->op_mutex); 3023 3024 /* log the unlock */ 3025 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 3026 vf->abs_vfid, vf->op_current); 3027 3028 /* record the locking op */ 3029 vf->op_current = CHANNEL_TLV_NONE; 3030 } 3031 3032 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) 3033 { 3034 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 3035 3036 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 3037 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3038 3039 /* HW channel is only operational when PF is up */ 3040 if (bp->state != BNX2X_STATE_OPEN) { 3041 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n"); 3042 return -EINVAL; 3043 } 3044 3045 /* we are always bound by the total_vfs in the configuration space */ 3046 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { 3047 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n", 3048 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 3049 num_vfs_param = BNX2X_NR_VIRTFN(bp); 3050 } 3051 3052 bp->requested_nr_virtfn = num_vfs_param; 3053 if (num_vfs_param == 0) { 3054 pci_disable_sriov(dev); 3055 return 0; 3056 } else { 3057 return bnx2x_enable_sriov(bp); 3058 } 3059 } 3060 3061 int bnx2x_enable_sriov(struct bnx2x *bp) 3062 { 3063 int rc = 0, req_vfs = bp->requested_nr_virtfn; 3064 3065 rc = pci_enable_sriov(bp->pdev, req_vfs); 3066 if (rc) { 3067 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 3068 return rc; 3069 } 3070 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); 3071 return req_vfs; 3072 } 3073 3074 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) 3075 { 3076 int vfidx; 3077 struct pf_vf_bulletin_content *bulletin; 3078 3079 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); 3080 for_each_vf(bp, vfidx) { 3081 bulletin = BP_VF_BULLETIN(bp, vfidx); 3082 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) 3083 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); 3084 } 3085 } 3086 3087 void bnx2x_disable_sriov(struct bnx2x *bp) 3088 { 3089 pci_disable_sriov(bp->pdev); 3090 } 3091 3092 static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, 3093 struct bnx2x_virtf **vf, 3094 struct pf_vf_bulletin_content **bulletin) 3095 { 3096 if (bp->state != BNX2X_STATE_OPEN) { 3097 BNX2X_ERR("vf ndo called though PF is down\n"); 3098 return -EINVAL; 3099 } 3100 3101 if (!IS_SRIOV(bp)) { 3102 BNX2X_ERR("vf ndo called though sriov is disabled\n"); 3103 return -EINVAL; 3104 } 3105 3106 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { 3107 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n", 3108 vfidx, BNX2X_NR_VIRTFN(bp)); 3109 return -EINVAL; 3110 } 3111 3112 /* init members */ 3113 *vf = BP_VF(bp, vfidx); 3114 *bulletin = BP_VF_BULLETIN(bp, vfidx); 3115 3116 if (!*vf) { 3117 BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", 3118 vfidx); 3119 return -EINVAL; 3120 } 3121 3122 if (!*bulletin) { 3123 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n", 3124 vfidx); 3125 return -EINVAL; 3126 } 3127 3128 return 0; 3129 } 3130 3131 int bnx2x_get_vf_config(struct net_device *dev, int vfidx, 3132 struct ifla_vf_info *ivi) 3133 { 3134 struct bnx2x *bp = netdev_priv(dev); 3135 struct bnx2x_virtf *vf = NULL; 3136 struct pf_vf_bulletin_content *bulletin = NULL; 3137 struct bnx2x_vlan_mac_obj *mac_obj; 3138 struct bnx2x_vlan_mac_obj *vlan_obj; 3139 int rc; 3140 3141 /* sanity and init */ 3142 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3143 if (rc) 3144 return rc; 3145 mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3146 vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); 3147 if (!mac_obj || !vlan_obj) { 3148 BNX2X_ERR("VF partially initialized\n"); 3149 return -EINVAL; 3150 } 3151 3152 ivi->vf = vfidx; 3153 ivi->qos = 0; 3154 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */ 3155 ivi->spoofchk = 1; /*always enabled */ 3156 if (vf->state == VF_ENABLED) { 3157 /* mac and vlan are in vlan_mac objects */ 3158 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 3159 0, ETH_ALEN); 3160 vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan, 3161 0, VLAN_HLEN); 3162 } else { 3163 /* mac */ 3164 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 3165 /* mac configured by ndo so its in bulletin board */ 3166 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); 3167 else 3168 /* function has not been loaded yet. Show mac as 0s */ 3169 memset(&ivi->mac, 0, ETH_ALEN); 3170 3171 /* vlan */ 3172 if (bulletin->valid_bitmap & (1 << VLAN_VALID)) 3173 /* vlan configured by ndo so its in bulletin board */ 3174 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); 3175 else 3176 /* function has not been loaded yet. Show vlans as 0s */ 3177 memset(&ivi->vlan, 0, VLAN_HLEN); 3178 } 3179 3180 return 0; 3181 } 3182 3183 /* New mac for VF. Consider these cases: 3184 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and 3185 * supply at acquire. 3186 * 2. VF has already been acquired but has not yet initialized - store in local 3187 * bulletin board. mac will be posted on VF bulletin board after VF init. VF 3188 * will configure this mac when it is ready. 3189 * 3. VF has already initialized but has not yet setup a queue - post the new 3190 * mac on VF's bulletin board right now. VF will configure this mac when it 3191 * is ready. 3192 * 4. VF has already set a queue - delete any macs already configured for this 3193 * queue and manually config the new mac. 3194 * In any event, once this function has been called refuse any attempts by the 3195 * VF to configure any mac for itself except for this mac. In case of a race 3196 * where the VF fails to see the new post on its bulletin board before sending a 3197 * mac configuration request, the PF will simply fail the request and VF can try 3198 * again after consulting its bulletin board. 3199 */ 3200 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) 3201 { 3202 struct bnx2x *bp = netdev_priv(dev); 3203 int rc, q_logical_state; 3204 struct bnx2x_virtf *vf = NULL; 3205 struct pf_vf_bulletin_content *bulletin = NULL; 3206 3207 /* sanity and init */ 3208 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3209 if (rc) 3210 return rc; 3211 if (!is_valid_ether_addr(mac)) { 3212 BNX2X_ERR("mac address invalid\n"); 3213 return -EINVAL; 3214 } 3215 3216 /* update PF's copy of the VF's bulletin. Will no longer accept mac 3217 * configuration requests from vf unless match this mac 3218 */ 3219 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; 3220 memcpy(bulletin->mac, mac, ETH_ALEN); 3221 3222 /* Post update on VF's bulletin board */ 3223 rc = bnx2x_post_vf_bulletin(bp, vfidx); 3224 if (rc) { 3225 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); 3226 return rc; 3227 } 3228 3229 /* is vf initialized and queue set up? */ 3230 q_logical_state = 3231 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); 3232 if (vf->state == VF_ENABLED && 3233 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3234 /* configure the mac in device on this vf's queue */ 3235 unsigned long ramrod_flags = 0; 3236 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3237 3238 /* must lock vfpf channel to protect against vf flows */ 3239 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3240 3241 /* remove existing eth macs */ 3242 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 3243 if (rc) { 3244 BNX2X_ERR("failed to delete eth macs\n"); 3245 return -EINVAL; 3246 } 3247 3248 /* remove existing uc list macs */ 3249 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); 3250 if (rc) { 3251 BNX2X_ERR("failed to delete uc_list macs\n"); 3252 return -EINVAL; 3253 } 3254 3255 /* configure the new mac to device */ 3256 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3257 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 3258 BNX2X_ETH_MAC, &ramrod_flags); 3259 3260 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3261 } 3262 3263 return 0; 3264 } 3265 3266 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 3267 { 3268 struct bnx2x *bp = netdev_priv(dev); 3269 int rc, q_logical_state; 3270 struct bnx2x_virtf *vf = NULL; 3271 struct pf_vf_bulletin_content *bulletin = NULL; 3272 3273 /* sanity and init */ 3274 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 3275 if (rc) 3276 return rc; 3277 3278 if (vlan > 4095) { 3279 BNX2X_ERR("illegal vlan value %d\n", vlan); 3280 return -EINVAL; 3281 } 3282 3283 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", 3284 vfidx, vlan, 0); 3285 3286 /* update PF's copy of the VF's bulletin. No point in posting the vlan 3287 * to the VF since it doesn't have anything to do with it. But it useful 3288 * to store it here in case the VF is not up yet and we can only 3289 * configure the vlan later when it does. 3290 */ 3291 bulletin->valid_bitmap |= 1 << VLAN_VALID; 3292 bulletin->vlan = vlan; 3293 3294 /* is vf initialized and queue set up? */ 3295 q_logical_state = 3296 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj)); 3297 if (vf->state == VF_ENABLED && 3298 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3299 /* configure the vlan in device on this vf's queue */ 3300 unsigned long ramrod_flags = 0; 3301 unsigned long vlan_mac_flags = 0; 3302 struct bnx2x_vlan_mac_obj *vlan_obj = 3303 &bnx2x_vfq(vf, 0, vlan_obj); 3304 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 3305 struct bnx2x_queue_state_params q_params = {NULL}; 3306 struct bnx2x_queue_update_params *update_params; 3307 3308 memset(&ramrod_param, 0, sizeof(ramrod_param)); 3309 3310 /* must lock vfpf channel to protect against vf flows */ 3311 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3312 3313 /* remove existing vlans */ 3314 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3315 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 3316 &ramrod_flags); 3317 if (rc) { 3318 BNX2X_ERR("failed to delete vlans\n"); 3319 return -EINVAL; 3320 } 3321 3322 /* send queue update ramrod to configure default vlan and silent 3323 * vlan removal 3324 */ 3325 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 3326 q_params.cmd = BNX2X_Q_CMD_UPDATE; 3327 q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj); 3328 update_params = &q_params.params.update; 3329 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 3330 &update_params->update_flags); 3331 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 3332 &update_params->update_flags); 3333 3334 if (vlan == 0) { 3335 /* if vlan is 0 then we want to leave the VF traffic 3336 * untagged, and leave the incoming traffic untouched 3337 * (i.e. do not remove any vlan tags). 3338 */ 3339 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3340 &update_params->update_flags); 3341 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 3342 &update_params->update_flags); 3343 } else { 3344 /* configure the new vlan to device */ 3345 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3346 ramrod_param.vlan_mac_obj = vlan_obj; 3347 ramrod_param.ramrod_flags = ramrod_flags; 3348 ramrod_param.user_req.u.vlan.vlan = vlan; 3349 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 3350 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 3351 if (rc) { 3352 BNX2X_ERR("failed to configure vlan\n"); 3353 return -EINVAL; 3354 } 3355 3356 /* configure default vlan to vf queue and set silent 3357 * vlan removal (the vf remains unaware of this vlan). 3358 */ 3359 update_params = &q_params.params.update; 3360 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 3361 &update_params->update_flags); 3362 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 3363 &update_params->update_flags); 3364 update_params->def_vlan = vlan; 3365 } 3366 3367 /* Update the Queue state */ 3368 rc = bnx2x_queue_state_change(bp, &q_params); 3369 if (rc) { 3370 BNX2X_ERR("Failed to configure default VLAN\n"); 3371 return rc; 3372 } 3373 3374 /* clear the flag indicating that this VF needs its vlan 3375 * (will only be set if the HV configured th Vlan before vf was 3376 * and we were called because the VF came up later 3377 */ 3378 vf->cfg_flags &= ~VF_CFG_VLAN; 3379 3380 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3381 } 3382 return 0; 3383 } 3384 3385 /* crc is the first field in the bulletin board. Compute the crc over the 3386 * entire bulletin board excluding the crc field itself. Use the length field 3387 * as the Bulletin Board was posted by a PF with possibly a different version 3388 * from the vf which will sample it. Therefore, the length is computed by the 3389 * PF and the used blindly by the VF. 3390 */ 3391 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, 3392 struct pf_vf_bulletin_content *bulletin) 3393 { 3394 return crc32(BULLETIN_CRC_SEED, 3395 ((u8 *)bulletin) + sizeof(bulletin->crc), 3396 bulletin->length - sizeof(bulletin->crc)); 3397 } 3398 3399 /* Check for new posts on the bulletin board */ 3400 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) 3401 { 3402 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; 3403 int attempts; 3404 3405 /* bulletin board hasn't changed since last sample */ 3406 if (bp->old_bulletin.version == bulletin.version) 3407 return PFVF_BULLETIN_UNCHANGED; 3408 3409 /* validate crc of new bulletin board */ 3410 if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) { 3411 /* sampling structure in mid post may result with corrupted data 3412 * validate crc to ensure coherency. 3413 */ 3414 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { 3415 bulletin = bp->pf2vf_bulletin->content; 3416 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, 3417 &bulletin)) 3418 break; 3419 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", 3420 bulletin.crc, 3421 bnx2x_crc_vf_bulletin(bp, &bulletin)); 3422 } 3423 if (attempts >= BULLETIN_ATTEMPTS) { 3424 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", 3425 attempts); 3426 return PFVF_BULLETIN_CRC_ERR; 3427 } 3428 } 3429 3430 /* the mac address in bulletin board is valid and is new */ 3431 if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID && 3432 memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) { 3433 /* update new mac to net device */ 3434 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); 3435 } 3436 3437 /* the vlan in bulletin board is valid and is new */ 3438 if (bulletin.valid_bitmap & 1 << VLAN_VALID) 3439 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN); 3440 3441 /* copy new bulletin board to bp */ 3442 bp->old_bulletin = bulletin; 3443 3444 return PFVF_BULLETIN_UPDATED; 3445 } 3446 3447 void bnx2x_timer_sriov(struct bnx2x *bp) 3448 { 3449 bnx2x_sample_bulletin(bp); 3450 3451 /* if channel is down we need to self destruct */ 3452 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { 3453 smp_mb__before_clear_bit(); 3454 set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 3455 &bp->sp_rtnl_state); 3456 smp_mb__after_clear_bit(); 3457 schedule_delayed_work(&bp->sp_rtnl_task, 0); 3458 } 3459 } 3460 3461 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 3462 { 3463 /* vf doorbells are embedded within the regview */ 3464 return bp->regview + PXP_VF_ADDR_DB_START; 3465 } 3466 3467 int bnx2x_vf_pci_alloc(struct bnx2x *bp) 3468 { 3469 mutex_init(&bp->vf2pf_mutex); 3470 3471 /* allocate vf2pf mailbox for vf to pf channel */ 3472 BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping, 3473 sizeof(struct bnx2x_vf_mbx_msg)); 3474 3475 /* allocate pf 2 vf bulletin board */ 3476 BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping, 3477 sizeof(union pf_vf_bulletin)); 3478 3479 return 0; 3480 3481 alloc_mem_err: 3482 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3483 sizeof(struct bnx2x_vf_mbx_msg)); 3484 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, 3485 sizeof(union pf_vf_bulletin)); 3486 return -ENOMEM; 3487 } 3488 3489 int bnx2x_open_epilog(struct bnx2x *bp) 3490 { 3491 /* Enable sriov via delayed work. This must be done via delayed work 3492 * because it causes the probe of the vf devices to be run, which invoke 3493 * register_netdevice which must have rtnl lock taken. As we are holding 3494 * the lock right now, that could only work if the probe would not take 3495 * the lock. However, as the probe of the vf may be called from other 3496 * contexts as well (such as passthrough to vm fails) it can't assume 3497 * the lock is being held for it. Using delayed work here allows the 3498 * probe code to simply take the lock (i.e. wait for it to be released 3499 * if it is being held). We only want to do this if the number of VFs 3500 * was set before PF driver was loaded. 3501 */ 3502 if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) { 3503 smp_mb__before_clear_bit(); 3504 set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state); 3505 smp_mb__after_clear_bit(); 3506 schedule_delayed_work(&bp->sp_rtnl_task, 0); 3507 } 3508 3509 return 0; 3510 } 3511 3512 void bnx2x_iov_channel_down(struct bnx2x *bp) 3513 { 3514 int vf_idx; 3515 struct pf_vf_bulletin_content *bulletin; 3516 3517 if (!IS_SRIOV(bp)) 3518 return; 3519 3520 for_each_vf(bp, vf_idx) { 3521 /* locate this VFs bulletin board and update the channel down 3522 * bit 3523 */ 3524 bulletin = BP_VF_BULLETIN(bp, vf_idx); 3525 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN; 3526 3527 /* update vf bulletin board */ 3528 bnx2x_post_vf_bulletin(bp, vf_idx); 3529 } 3530 } 3531