1 /* bnx2x_sriov.c: Broadcom Everest network driver. 2 * 3 * Copyright 2009-2012 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 17 * Ariel Elior <ariele@broadcom.com> 18 * 19 */ 20 #include "bnx2x.h" 21 #include "bnx2x_init.h" 22 #include "bnx2x_cmn.h" 23 #include "bnx2x_sriov.h" 24 25 /* General service functions */ 26 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 27 u16 pf_id) 28 { 29 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 30 pf_id); 31 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 32 pf_id); 33 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 34 pf_id); 35 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 36 pf_id); 37 } 38 39 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 40 u8 enable) 41 { 42 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 43 enable); 44 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 45 enable); 46 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 47 enable); 48 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 49 enable); 50 } 51 52 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 53 { 54 int idx; 55 56 for_each_vf(bp, idx) 57 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) 58 break; 59 return idx; 60 } 61 62 static 63 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 64 { 65 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); 66 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; 67 } 68 69 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, 70 u8 igu_sb_id, u8 segment, u16 index, u8 op, 71 u8 update) 72 { 73 /* acking a VF sb through the PF - use the GRC */ 74 u32 ctl; 75 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 76 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 77 u32 func_encode = vf->abs_vfid; 78 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; 79 struct igu_regular cmd_data = {0}; 80 81 cmd_data.sb_id_and_flags = 82 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 83 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 84 (update << IGU_REGULAR_BUPDATE_SHIFT) | 85 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 86 87 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 88 func_encode << IGU_CTRL_REG_FID_SHIFT | 89 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 90 91 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 92 cmd_data.sb_id_and_flags, igu_addr_data); 93 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); 94 mmiowb(); 95 barrier(); 96 97 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 98 ctl, igu_addr_ctl); 99 REG_WR(bp, igu_addr_ctl, ctl); 100 mmiowb(); 101 barrier(); 102 } 103 /* VFOP - VF slow-path operation support */ 104 105 /* VFOP operations states */ 106 enum bnx2x_vfop_qctor_state { 107 BNX2X_VFOP_QCTOR_INIT, 108 BNX2X_VFOP_QCTOR_SETUP, 109 BNX2X_VFOP_QCTOR_INT_EN 110 }; 111 112 enum bnx2x_vfop_vlan_mac_state { 113 BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 114 BNX2X_VFOP_VLAN_MAC_CLEAR, 115 BNX2X_VFOP_VLAN_MAC_CHK_DONE, 116 BNX2X_VFOP_MAC_CONFIG_LIST, 117 BNX2X_VFOP_VLAN_CONFIG_LIST, 118 BNX2X_VFOP_VLAN_CONFIG_LIST_0 119 }; 120 121 enum bnx2x_vfop_qsetup_state { 122 BNX2X_VFOP_QSETUP_CTOR, 123 BNX2X_VFOP_QSETUP_VLAN0, 124 BNX2X_VFOP_QSETUP_DONE 125 }; 126 127 #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) 128 129 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 130 struct bnx2x_queue_init_params *init_params, 131 struct bnx2x_queue_setup_params *setup_params, 132 u16 q_idx, u16 sb_idx) 133 { 134 DP(BNX2X_MSG_IOV, 135 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", 136 vf->abs_vfid, 137 q_idx, 138 sb_idx, 139 init_params->tx.sb_cq_index, 140 init_params->tx.hc_rate, 141 setup_params->flags, 142 setup_params->txq_params.traffic_type); 143 } 144 145 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 146 struct bnx2x_queue_init_params *init_params, 147 struct bnx2x_queue_setup_params *setup_params, 148 u16 q_idx, u16 sb_idx) 149 { 150 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; 151 152 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" 153 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", 154 vf->abs_vfid, 155 q_idx, 156 sb_idx, 157 init_params->rx.sb_cq_index, 158 init_params->rx.hc_rate, 159 setup_params->gen_params.mtu, 160 rxq_params->buf_sz, 161 rxq_params->sge_buf_sz, 162 rxq_params->max_sges_pkt, 163 rxq_params->tpa_agg_sz, 164 setup_params->flags, 165 rxq_params->drop_flags, 166 rxq_params->cache_line_log); 167 } 168 169 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 170 struct bnx2x_virtf *vf, 171 struct bnx2x_vf_queue *q, 172 struct bnx2x_vfop_qctor_params *p, 173 unsigned long q_type) 174 { 175 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; 176 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; 177 178 /* INIT */ 179 180 /* Enable host coalescing in the transition to INIT state */ 181 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) 182 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); 183 184 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) 185 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); 186 187 /* FW SB ID */ 188 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 189 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 190 191 /* context */ 192 init_p->cxts[0] = q->cxt; 193 194 /* SETUP */ 195 196 /* Setup-op general parameters */ 197 setup_p->gen_params.spcl_id = vf->sp_cl_id; 198 setup_p->gen_params.stat_id = vfq_stat_id(vf, q); 199 200 /* Setup-op pause params: 201 * Nothing to do, the pause thresholds are set by default to 0 which 202 * effectively turns off the feature for this queue. We don't want 203 * one queue (VF) to interfering with another queue (another VF) 204 */ 205 if (vf->cfg_flags & VF_CFG_FW_FC) 206 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", 207 vf->abs_vfid); 208 /* Setup-op flags: 209 * collect statistics, zero statistics, local-switching, security, 210 * OV for Flex10, RSS and MCAST for leading 211 */ 212 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) 213 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); 214 215 /* for VFs, enable tx switching, bd coherency, and mac address 216 * anti-spoofing 217 */ 218 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); 219 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 220 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 221 222 if (vfq_is_leading(q)) { 223 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags); 224 __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags); 225 } 226 227 /* Setup-op rx parameters */ 228 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 229 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 230 231 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); 232 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); 233 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); 234 235 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) 236 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; 237 } 238 239 /* Setup-op tx parameters */ 240 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { 241 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; 242 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 243 } 244 } 245 246 /* VFOP queue construction */ 247 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf) 248 { 249 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 250 struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor; 251 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate; 252 enum bnx2x_vfop_qctor_state state = vfop->state; 253 254 bnx2x_vfop_reset_wq(vf); 255 256 if (vfop->rc < 0) 257 goto op_err; 258 259 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 260 261 switch (state) { 262 case BNX2X_VFOP_QCTOR_INIT: 263 264 /* has this queue already been opened? */ 265 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 266 BNX2X_Q_LOGICAL_STATE_ACTIVE) { 267 DP(BNX2X_MSG_IOV, 268 "Entered qctor but queue was already up. Aborting gracefully\n"); 269 goto op_done; 270 } 271 272 /* next state */ 273 vfop->state = BNX2X_VFOP_QCTOR_SETUP; 274 275 q_params->cmd = BNX2X_Q_CMD_INIT; 276 vfop->rc = bnx2x_queue_state_change(bp, q_params); 277 278 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 279 280 case BNX2X_VFOP_QCTOR_SETUP: 281 /* next state */ 282 vfop->state = BNX2X_VFOP_QCTOR_INT_EN; 283 284 /* copy pre-prepared setup params to the queue-state params */ 285 vfop->op_p->qctor.qstate.params.setup = 286 vfop->op_p->qctor.prep_qsetup; 287 288 q_params->cmd = BNX2X_Q_CMD_SETUP; 289 vfop->rc = bnx2x_queue_state_change(bp, q_params); 290 291 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); 292 293 case BNX2X_VFOP_QCTOR_INT_EN: 294 295 /* enable interrupts */ 296 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx), 297 USTORM_ID, 0, IGU_INT_ENABLE, 0); 298 goto op_done; 299 default: 300 bnx2x_vfop_default(state); 301 } 302 op_err: 303 BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n", 304 vf->abs_vfid, args->qid, q_params->cmd, vfop->rc); 305 op_done: 306 bnx2x_vfop_end(bp, vf, vfop); 307 op_pending: 308 return; 309 } 310 311 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp, 312 struct bnx2x_virtf *vf, 313 struct bnx2x_vfop_cmd *cmd, 314 int qid) 315 { 316 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 317 318 if (vfop) { 319 vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 320 321 vfop->args.qctor.qid = qid; 322 vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx); 323 324 bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT, 325 bnx2x_vfop_qctor, cmd->done); 326 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor, 327 cmd->block); 328 } 329 return -ENOMEM; 330 } 331 332 static void 333 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) 334 { 335 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 336 if (vf) { 337 if (!vf_sb_count(vf)) 338 vf->igu_base_id = igu_sb_id; 339 ++vf_sb_count(vf); 340 } 341 } 342 343 /* VFOP MAC/VLAN helpers */ 344 static inline void bnx2x_vfop_credit(struct bnx2x *bp, 345 struct bnx2x_vfop *vfop, 346 struct bnx2x_vlan_mac_obj *obj) 347 { 348 struct bnx2x_vfop_args_filters *args = &vfop->args.filters; 349 350 /* update credit only if there is no error 351 * and a valid credit counter 352 */ 353 if (!vfop->rc && args->credit) { 354 int cnt = 0; 355 struct list_head *pos; 356 357 list_for_each(pos, &obj->head) 358 cnt++; 359 360 atomic_set(args->credit, cnt); 361 } 362 } 363 364 static int bnx2x_vfop_set_user_req(struct bnx2x *bp, 365 struct bnx2x_vfop_filter *pos, 366 struct bnx2x_vlan_mac_data *user_req) 367 { 368 user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD : 369 BNX2X_VLAN_MAC_DEL; 370 371 switch (pos->type) { 372 case BNX2X_VFOP_FILTER_MAC: 373 memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN); 374 break; 375 case BNX2X_VFOP_FILTER_VLAN: 376 user_req->u.vlan.vlan = pos->vid; 377 break; 378 default: 379 BNX2X_ERR("Invalid filter type, skipping\n"); 380 return 1; 381 } 382 return 0; 383 } 384 385 static int 386 bnx2x_vfop_config_vlan0(struct bnx2x *bp, 387 struct bnx2x_vlan_mac_ramrod_params *vlan_mac, 388 bool add) 389 { 390 int rc; 391 392 vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD : 393 BNX2X_VLAN_MAC_DEL; 394 vlan_mac->user_req.u.vlan.vlan = 0; 395 396 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 397 if (rc == -EEXIST) 398 rc = 0; 399 return rc; 400 } 401 402 static int bnx2x_vfop_config_list(struct bnx2x *bp, 403 struct bnx2x_vfop_filters *filters, 404 struct bnx2x_vlan_mac_ramrod_params *vlan_mac) 405 { 406 struct bnx2x_vfop_filter *pos, *tmp; 407 struct list_head rollback_list, *filters_list = &filters->head; 408 struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req; 409 int rc = 0, cnt = 0; 410 411 INIT_LIST_HEAD(&rollback_list); 412 413 list_for_each_entry_safe(pos, tmp, filters_list, link) { 414 if (bnx2x_vfop_set_user_req(bp, pos, user_req)) 415 continue; 416 417 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 418 if (rc >= 0) { 419 cnt += pos->add ? 1 : -1; 420 list_del(&pos->link); 421 list_add(&pos->link, &rollback_list); 422 rc = 0; 423 } else if (rc == -EEXIST) { 424 rc = 0; 425 } else { 426 BNX2X_ERR("Failed to add a new vlan_mac command\n"); 427 break; 428 } 429 } 430 431 /* rollback if error or too many rules added */ 432 if (rc || cnt > filters->add_cnt) { 433 BNX2X_ERR("error or too many rules added. Performing rollback\n"); 434 list_for_each_entry_safe(pos, tmp, &rollback_list, link) { 435 pos->add = !pos->add; /* reverse op */ 436 bnx2x_vfop_set_user_req(bp, pos, user_req); 437 bnx2x_config_vlan_mac(bp, vlan_mac); 438 list_del(&pos->link); 439 } 440 cnt = 0; 441 if (!rc) 442 rc = -EINVAL; 443 } 444 filters->add_cnt = cnt; 445 return rc; 446 } 447 448 /* VFOP set VLAN/MAC */ 449 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf) 450 { 451 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 452 struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac; 453 struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj; 454 struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter; 455 456 enum bnx2x_vfop_vlan_mac_state state = vfop->state; 457 458 if (vfop->rc < 0) 459 goto op_err; 460 461 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 462 463 bnx2x_vfop_reset_wq(vf); 464 465 switch (state) { 466 case BNX2X_VFOP_VLAN_MAC_CLEAR: 467 /* next state */ 468 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 469 470 /* do delete */ 471 vfop->rc = obj->delete_all(bp, obj, 472 &vlan_mac->user_req.vlan_mac_flags, 473 &vlan_mac->ramrod_flags); 474 475 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 476 477 case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE: 478 /* next state */ 479 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 480 481 /* do config */ 482 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 483 if (vfop->rc == -EEXIST) 484 vfop->rc = 0; 485 486 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 487 488 case BNX2X_VFOP_VLAN_MAC_CHK_DONE: 489 vfop->rc = !!obj->raw.check_pending(&obj->raw); 490 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 491 492 case BNX2X_VFOP_MAC_CONFIG_LIST: 493 /* next state */ 494 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 495 496 /* do list config */ 497 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 498 if (vfop->rc) 499 goto op_err; 500 501 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 502 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 503 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 504 505 case BNX2X_VFOP_VLAN_CONFIG_LIST: 506 /* next state */ 507 vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0; 508 509 /* remove vlan0 - could be no-op */ 510 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false); 511 if (vfop->rc) 512 goto op_err; 513 514 /* Do vlan list config. if this operation fails we try to 515 * restore vlan0 to keep the queue is working order 516 */ 517 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 518 if (!vfop->rc) { 519 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 520 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 521 } 522 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */ 523 524 case BNX2X_VFOP_VLAN_CONFIG_LIST_0: 525 /* next state */ 526 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE; 527 528 if (list_empty(&obj->head)) 529 /* add vlan0 */ 530 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true); 531 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 532 533 default: 534 bnx2x_vfop_default(state); 535 } 536 op_err: 537 BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc); 538 op_done: 539 kfree(filters); 540 bnx2x_vfop_credit(bp, vfop, obj); 541 bnx2x_vfop_end(bp, vf, vfop); 542 op_pending: 543 return; 544 } 545 546 struct bnx2x_vfop_vlan_mac_flags { 547 bool drv_only; 548 bool dont_consume; 549 bool single_cmd; 550 bool add; 551 }; 552 553 static void 554 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod, 555 struct bnx2x_vfop_vlan_mac_flags *flags) 556 { 557 struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req; 558 559 memset(ramrod, 0, sizeof(*ramrod)); 560 561 /* ramrod flags */ 562 if (flags->drv_only) 563 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags); 564 if (flags->single_cmd) 565 set_bit(RAMROD_EXEC, &ramrod->ramrod_flags); 566 567 /* mac_vlan flags */ 568 if (flags->dont_consume) 569 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags); 570 571 /* cmd */ 572 ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL; 573 } 574 575 int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp, 576 struct bnx2x_virtf *vf, 577 struct bnx2x_vfop_cmd *cmd, 578 int qid, u16 vid, bool add) 579 { 580 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 581 582 if (vfop) { 583 struct bnx2x_vfop_args_filters filters = { 584 .multi_filter = NULL, /* single command */ 585 .credit = &bnx2x_vfq(vf, qid, vlan_count), 586 }; 587 struct bnx2x_vfop_vlan_mac_flags flags = { 588 .drv_only = false, 589 .dont_consume = (filters.credit != NULL), 590 .single_cmd = true, 591 .add = add, 592 }; 593 struct bnx2x_vlan_mac_ramrod_params *ramrod = 594 &vf->op_params.vlan_mac; 595 596 /* set ramrod params */ 597 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 598 ramrod->user_req.u.vlan.vlan = vid; 599 600 /* set object */ 601 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 602 603 /* set extra args */ 604 vfop->args.filters = filters; 605 606 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE, 607 bnx2x_vfop_vlan_mac, cmd->done); 608 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac, 609 cmd->block); 610 } 611 return -ENOMEM; 612 } 613 614 /* VFOP queue setup (queue constructor + set vlan 0) */ 615 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf) 616 { 617 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf); 618 int qid = vfop->args.qctor.qid; 619 enum bnx2x_vfop_qsetup_state state = vfop->state; 620 struct bnx2x_vfop_cmd cmd = { 621 .done = bnx2x_vfop_qsetup, 622 .block = false, 623 }; 624 625 if (vfop->rc < 0) 626 goto op_err; 627 628 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state); 629 630 switch (state) { 631 case BNX2X_VFOP_QSETUP_CTOR: 632 /* init the queue ctor command */ 633 vfop->state = BNX2X_VFOP_QSETUP_VLAN0; 634 vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid); 635 if (vfop->rc) 636 goto op_err; 637 return; 638 639 case BNX2X_VFOP_QSETUP_VLAN0: 640 /* skip if non-leading or FPGA/EMU*/ 641 if (qid) 642 goto op_done; 643 644 /* init the queue set-vlan command (for vlan 0) */ 645 vfop->state = BNX2X_VFOP_QSETUP_DONE; 646 vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true); 647 if (vfop->rc) 648 goto op_err; 649 return; 650 op_err: 651 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); 652 op_done: 653 case BNX2X_VFOP_QSETUP_DONE: 654 bnx2x_vfop_end(bp, vf, vfop); 655 return; 656 default: 657 bnx2x_vfop_default(state); 658 } 659 } 660 661 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, 662 struct bnx2x_virtf *vf, 663 struct bnx2x_vfop_cmd *cmd, 664 int qid) 665 { 666 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 667 668 if (vfop) { 669 vfop->args.qctor.qid = qid; 670 671 bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR, 672 bnx2x_vfop_qsetup, cmd->done); 673 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup, 674 cmd->block); 675 } 676 return -ENOMEM; 677 } 678 679 /* VF enable primitives 680 * when pretend is required the caller is responsible 681 * for calling pretend prior to calling these routines 682 */ 683 684 /* called only on E1H or E2. 685 * When pretending to be PF, the pretend value is the function number 0...7 686 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID 687 * combination 688 */ 689 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val) 690 { 691 u32 pretend_reg; 692 693 if (CHIP_IS_E1H(bp) && pretend_func_val > E1H_FUNC_MAX) 694 return -1; 695 696 /* get my own pretend register */ 697 pretend_reg = bnx2x_get_pretend_reg(bp); 698 REG_WR(bp, pretend_reg, pretend_func_val); 699 REG_RD(bp, pretend_reg); 700 return 0; 701 } 702 703 /* internal vf enable - until vf is enabled internally all transactions 704 * are blocked. this routine should always be called last with pretend. 705 */ 706 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) 707 { 708 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); 709 } 710 711 /* clears vf error in all semi blocks */ 712 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) 713 { 714 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); 715 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); 716 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); 717 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); 718 } 719 720 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) 721 { 722 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; 723 u32 was_err_reg = 0; 724 725 switch (was_err_group) { 726 case 0: 727 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; 728 break; 729 case 1: 730 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; 731 break; 732 case 2: 733 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; 734 break; 735 case 3: 736 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; 737 break; 738 } 739 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); 740 } 741 742 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) 743 { 744 int i; 745 u32 val; 746 747 /* Set VF masks and configuration - pretend */ 748 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 749 750 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 751 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 752 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 753 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 754 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 755 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 756 757 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 758 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); 759 if (vf->cfg_flags & VF_CFG_INT_SIMD) 760 val |= IGU_VF_CONF_SINGLE_ISR_EN; 761 val &= ~IGU_VF_CONF_PARENT_MASK; 762 val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */ 763 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 764 765 DP(BNX2X_MSG_IOV, 766 "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n", 767 vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION)); 768 769 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 770 771 /* iterate over all queues, clear sb consumer */ 772 for (i = 0; i < vf_sb_count(vf); i++) { 773 u8 igu_sb_id = vf_igu_sb(vf, i); 774 775 /* zero prod memory */ 776 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); 777 778 /* clear sb state machine */ 779 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, 780 false /* VF */); 781 782 /* disable + update */ 783 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, 784 IGU_INT_DISABLE, 1); 785 } 786 } 787 788 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) 789 { 790 /* set the VF-PF association in the FW */ 791 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); 792 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); 793 794 /* clear vf errors*/ 795 bnx2x_vf_semi_clear_err(bp, abs_vfid); 796 bnx2x_vf_pglue_clear_err(bp, abs_vfid); 797 798 /* internal vf-enable - pretend */ 799 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); 800 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); 801 bnx2x_vf_enable_internal(bp, true); 802 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 803 } 804 805 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) 806 { 807 /* Reset vf in IGU interrupts are still disabled */ 808 bnx2x_vf_igu_reset(bp, vf); 809 810 /* pretend to enable the vf with the PBF */ 811 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 812 REG_WR(bp, PBF_REG_DISABLE_VF, 0); 813 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 814 } 815 816 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) 817 { 818 struct pci_dev *dev; 819 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 820 821 if (!vf) 822 goto unknown_dev; 823 824 dev = pci_get_bus_and_slot(vf->bus, vf->devfn); 825 if (dev) 826 return bnx2x_is_pcie_pending(dev); 827 828 unknown_dev: 829 BNX2X_ERR("Unknown device\n"); 830 return false; 831 } 832 833 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) 834 { 835 /* Wait 100ms */ 836 msleep(100); 837 838 /* Verify no pending pci transactions */ 839 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) 840 BNX2X_ERR("PCIE Transactions still pending\n"); 841 842 return 0; 843 } 844 845 /* must be called after the number of PF queues and the number of VFs are 846 * both known 847 */ 848 static void 849 bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc) 850 { 851 u16 vlan_count = 0; 852 853 /* will be set only during VF-ACQUIRE */ 854 resc->num_rxqs = 0; 855 resc->num_txqs = 0; 856 857 /* no credit calculcis for macs (just yet) */ 858 resc->num_mac_filters = 1; 859 860 /* divvy up vlan rules */ 861 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 862 vlan_count = 1 << ilog2(vlan_count); 863 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); 864 865 /* no real limitation */ 866 resc->num_mc_filters = 0; 867 868 /* num_sbs already set */ 869 } 870 871 /* IOV global initialization routines */ 872 void bnx2x_iov_init_dq(struct bnx2x *bp) 873 { 874 if (!IS_SRIOV(bp)) 875 return; 876 877 /* Set the DQ such that the CID reflect the abs_vfid */ 878 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); 879 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 880 881 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to 882 * the PF L2 queues 883 */ 884 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 885 886 /* The VF window size is the log2 of the max number of CIDs per VF */ 887 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 888 889 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 890 * the Pf doorbell size although the 2 are independent. 891 */ 892 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 893 BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT); 894 895 /* No security checks for now - 896 * configure single rule (out of 16) mask = 0x1, value = 0x0, 897 * CID range 0 - 0x1ffff 898 */ 899 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); 900 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); 901 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 902 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 903 904 /* set the number of VF alllowed doorbells to the full DQ range */ 905 REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); 906 907 /* set the VF doorbell threshold */ 908 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 909 } 910 911 void bnx2x_iov_init_dmae(struct bnx2x *bp) 912 { 913 DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF"); 914 if (!IS_SRIOV(bp)) 915 return; 916 917 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 918 } 919 920 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 921 { 922 struct pci_dev *dev = bp->pdev; 923 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 924 925 return dev->bus->number + ((dev->devfn + iov->offset + 926 iov->stride * vfid) >> 8); 927 } 928 929 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) 930 { 931 struct pci_dev *dev = bp->pdev; 932 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 933 934 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; 935 } 936 937 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) 938 { 939 int i, n; 940 struct pci_dev *dev = bp->pdev; 941 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 942 943 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { 944 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); 945 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); 946 947 do_div(size, iov->total); 948 vf->bars[n].bar = start + size * vf->abs_vfid; 949 vf->bars[n].size = size; 950 } 951 } 952 953 static int bnx2x_ari_enabled(struct pci_dev *dev) 954 { 955 return dev->bus->self && dev->bus->self->ari_enabled; 956 } 957 958 static void 959 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) 960 { 961 int sb_id; 962 u32 val; 963 u8 fid; 964 965 /* IGU in normal mode - read CAM */ 966 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 967 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); 968 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 969 continue; 970 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 971 if (!(fid & IGU_FID_ENCODE_IS_PF)) 972 bnx2x_vf_set_igu_info(bp, sb_id, 973 (fid & IGU_FID_VF_NUM_MASK)); 974 975 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 976 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 977 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 978 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 979 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 980 } 981 } 982 983 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 984 { 985 if (bp->vfdb) { 986 kfree(bp->vfdb->vfqs); 987 kfree(bp->vfdb->vfs); 988 kfree(bp->vfdb); 989 } 990 bp->vfdb = NULL; 991 } 992 993 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 994 { 995 int pos; 996 struct pci_dev *dev = bp->pdev; 997 998 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 999 if (!pos) { 1000 BNX2X_ERR("failed to find SRIOV capability in device\n"); 1001 return -ENODEV; 1002 } 1003 1004 iov->pos = pos; 1005 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); 1006 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 1007 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); 1008 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); 1009 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 1010 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 1011 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 1012 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 1013 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 1014 1015 return 0; 1016 } 1017 1018 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1019 { 1020 u32 val; 1021 1022 /* read the SRIOV capability structure 1023 * The fields can be read via configuration read or 1024 * directly from the device (starting at offset PCICFG_OFFSET) 1025 */ 1026 if (bnx2x_sriov_pci_cfg_info(bp, iov)) 1027 return -ENODEV; 1028 1029 /* get the number of SRIOV bars */ 1030 iov->nres = 0; 1031 1032 /* read the first_vfid */ 1033 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); 1034 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) 1035 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); 1036 1037 DP(BNX2X_MSG_IOV, 1038 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 1039 BP_FUNC(bp), 1040 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, 1041 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 1042 1043 return 0; 1044 } 1045 1046 static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp) 1047 { 1048 int i; 1049 u8 queue_count = 0; 1050 1051 if (IS_SRIOV(bp)) 1052 for_each_vf(bp, i) 1053 queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs); 1054 1055 return queue_count; 1056 } 1057 1058 /* must be called after PF bars are mapped */ 1059 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1060 int num_vfs_param) 1061 { 1062 int err, i, qcount; 1063 struct bnx2x_sriov *iov; 1064 struct pci_dev *dev = bp->pdev; 1065 1066 bp->vfdb = NULL; 1067 1068 /* verify is pf */ 1069 if (IS_VF(bp)) 1070 return 0; 1071 1072 /* verify sriov capability is present in configuration space */ 1073 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) 1074 return 0; 1075 1076 /* verify chip revision */ 1077 if (CHIP_IS_E1x(bp)) 1078 return 0; 1079 1080 /* check if SRIOV support is turned off */ 1081 if (!num_vfs_param) 1082 return 0; 1083 1084 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ 1085 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { 1086 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", 1087 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); 1088 return 0; 1089 } 1090 1091 /* SRIOV can be enabled only with MSIX */ 1092 if (int_mode_param == BNX2X_INT_MODE_MSI || 1093 int_mode_param == BNX2X_INT_MODE_INTX) 1094 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); 1095 1096 err = -EIO; 1097 /* verify ari is enabled */ 1098 if (!bnx2x_ari_enabled(bp->pdev)) { 1099 BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n"); 1100 return err; 1101 } 1102 1103 /* verify igu is in normal mode */ 1104 if (CHIP_INT_MODE_IS_BC(bp)) { 1105 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 1106 return err; 1107 } 1108 1109 /* allocate the vfs database */ 1110 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); 1111 if (!bp->vfdb) { 1112 BNX2X_ERR("failed to allocate vf database\n"); 1113 err = -ENOMEM; 1114 goto failed; 1115 } 1116 1117 /* get the sriov info - Linux already collected all the pertinent 1118 * information, however the sriov structure is for the private use 1119 * of the pci module. Also we want this information regardless 1120 * of the hyper-visor. 1121 */ 1122 iov = &(bp->vfdb->sriov); 1123 err = bnx2x_sriov_info(bp, iov); 1124 if (err) 1125 goto failed; 1126 1127 /* SR-IOV capability was enabled but there are no VFs*/ 1128 if (iov->total == 0) 1129 goto failed; 1130 1131 /* calculate the actual number of VFs */ 1132 iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param); 1133 1134 /* allocate the vf array */ 1135 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 1136 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); 1137 if (!bp->vfdb->vfs) { 1138 BNX2X_ERR("failed to allocate vf array\n"); 1139 err = -ENOMEM; 1140 goto failed; 1141 } 1142 1143 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ 1144 for_each_vf(bp, i) { 1145 bnx2x_vf(bp, i, index) = i; 1146 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; 1147 bnx2x_vf(bp, i, state) = VF_FREE; 1148 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head)); 1149 mutex_init(&bnx2x_vf(bp, i, op_mutex)); 1150 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; 1151 } 1152 1153 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 1154 bnx2x_get_vf_igu_cam_info(bp); 1155 1156 /* get the total queue count and allocate the global queue arrays */ 1157 qcount = bnx2x_iov_get_max_queue_count(bp); 1158 1159 /* allocate the queue arrays for all VFs */ 1160 bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue), 1161 GFP_KERNEL); 1162 if (!bp->vfdb->vfqs) { 1163 BNX2X_ERR("failed to allocate vf queue array\n"); 1164 err = -ENOMEM; 1165 goto failed; 1166 } 1167 1168 return 0; 1169 failed: 1170 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 1171 __bnx2x_iov_free_vfdb(bp); 1172 return err; 1173 } 1174 1175 void bnx2x_iov_remove_one(struct bnx2x *bp) 1176 { 1177 /* if SRIOV is not enabled there's nothing to do */ 1178 if (!IS_SRIOV(bp)) 1179 return; 1180 1181 /* free vf database */ 1182 __bnx2x_iov_free_vfdb(bp); 1183 } 1184 1185 void bnx2x_iov_free_mem(struct bnx2x *bp) 1186 { 1187 int i; 1188 1189 if (!IS_SRIOV(bp)) 1190 return; 1191 1192 /* free vfs hw contexts */ 1193 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1194 struct hw_dma *cxt = &bp->vfdb->context[i]; 1195 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); 1196 } 1197 1198 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, 1199 BP_VFDB(bp)->sp_dma.mapping, 1200 BP_VFDB(bp)->sp_dma.size); 1201 1202 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, 1203 BP_VF_MBX_DMA(bp)->mapping, 1204 BP_VF_MBX_DMA(bp)->size); 1205 } 1206 1207 int bnx2x_iov_alloc_mem(struct bnx2x *bp) 1208 { 1209 size_t tot_size; 1210 int i, rc = 0; 1211 1212 if (!IS_SRIOV(bp)) 1213 return rc; 1214 1215 /* allocate vfs hw contexts */ 1216 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * 1217 BNX2X_CIDS_PER_VF * sizeof(union cdu_context); 1218 1219 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1220 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); 1221 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); 1222 1223 if (cxt->size) { 1224 BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size); 1225 } else { 1226 cxt->addr = NULL; 1227 cxt->mapping = 0; 1228 } 1229 tot_size -= cxt->size; 1230 } 1231 1232 /* allocate vfs ramrods dma memory - client_init and set_mac */ 1233 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); 1234 BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping, 1235 tot_size); 1236 BP_VFDB(bp)->sp_dma.size = tot_size; 1237 1238 /* allocate mailboxes */ 1239 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; 1240 BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping, 1241 tot_size); 1242 BP_VF_MBX_DMA(bp)->size = tot_size; 1243 1244 return 0; 1245 1246 alloc_mem_err: 1247 return -ENOMEM; 1248 } 1249 1250 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 1251 struct bnx2x_vf_queue *q) 1252 { 1253 u8 cl_id = vfq_cl_id(vf, q); 1254 u8 func_id = FW_VF_HANDLE(vf->abs_vfid); 1255 unsigned long q_type = 0; 1256 1257 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 1258 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 1259 1260 /* Queue State object */ 1261 bnx2x_init_queue_obj(bp, &q->sp_obj, 1262 cl_id, &q->cid, 1, func_id, 1263 bnx2x_vf_sp(bp, vf, q_data), 1264 bnx2x_vf_sp_map(bp, vf, q_data), 1265 q_type); 1266 1267 DP(BNX2X_MSG_IOV, 1268 "initialized vf %d's queue object. func id set to %d\n", 1269 vf->abs_vfid, q->sp_obj.func_id); 1270 1271 /* mac/vlan objects are per queue, but only those 1272 * that belong to the leading queue are initialized 1273 */ 1274 if (vfq_is_leading(q)) { 1275 /* mac */ 1276 bnx2x_init_mac_obj(bp, &q->mac_obj, 1277 cl_id, q->cid, func_id, 1278 bnx2x_vf_sp(bp, vf, mac_rdata), 1279 bnx2x_vf_sp_map(bp, vf, mac_rdata), 1280 BNX2X_FILTER_MAC_PENDING, 1281 &vf->filter_state, 1282 BNX2X_OBJ_TYPE_RX_TX, 1283 &bp->macs_pool); 1284 /* vlan */ 1285 bnx2x_init_vlan_obj(bp, &q->vlan_obj, 1286 cl_id, q->cid, func_id, 1287 bnx2x_vf_sp(bp, vf, vlan_rdata), 1288 bnx2x_vf_sp_map(bp, vf, vlan_rdata), 1289 BNX2X_FILTER_VLAN_PENDING, 1290 &vf->filter_state, 1291 BNX2X_OBJ_TYPE_RX_TX, 1292 &bp->vlans_pool); 1293 1294 /* mcast */ 1295 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id, 1296 q->cid, func_id, func_id, 1297 bnx2x_vf_sp(bp, vf, mcast_rdata), 1298 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 1299 BNX2X_FILTER_MCAST_PENDING, 1300 &vf->filter_state, 1301 BNX2X_OBJ_TYPE_RX_TX); 1302 1303 vf->leading_rss = cl_id; 1304 } 1305 } 1306 1307 /* called by bnx2x_nic_load */ 1308 int bnx2x_iov_nic_init(struct bnx2x *bp) 1309 { 1310 int vfid, qcount, i; 1311 1312 if (!IS_SRIOV(bp)) { 1313 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 1314 return 0; 1315 } 1316 1317 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); 1318 1319 /* initialize vf database */ 1320 for_each_vf(bp, vfid) { 1321 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 1322 1323 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * 1324 BNX2X_CIDS_PER_VF; 1325 1326 union cdu_context *base_cxt = (union cdu_context *) 1327 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 1328 (base_vf_cid & (ILT_PAGE_CIDS-1)); 1329 1330 DP(BNX2X_MSG_IOV, 1331 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", 1332 vf->abs_vfid, vf_sb_count(vf), base_vf_cid, 1333 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 1334 1335 /* init statically provisioned resources */ 1336 bnx2x_iov_static_resc(bp, &vf->alloc_resc); 1337 1338 /* queues are initialized during VF-ACQUIRE */ 1339 1340 /* reserve the vf vlan credit */ 1341 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf)); 1342 1343 vf->filter_state = 0; 1344 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 1345 1346 /* init mcast object - This object will be re-initialized 1347 * during VF-ACQUIRE with the proper cl_id and cid. 1348 * It needs to be initialized here so that it can be safely 1349 * handled by a subsequent FLR flow. 1350 */ 1351 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, 1352 0xFF, 0xFF, 0xFF, 1353 bnx2x_vf_sp(bp, vf, mcast_rdata), 1354 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 1355 BNX2X_FILTER_MCAST_PENDING, 1356 &vf->filter_state, 1357 BNX2X_OBJ_TYPE_RX_TX); 1358 1359 /* set the mailbox message addresses */ 1360 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) 1361 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * 1362 MBX_MSG_ALIGNED_SIZE); 1363 1364 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + 1365 vfid * MBX_MSG_ALIGNED_SIZE; 1366 1367 /* Enable vf mailbox */ 1368 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 1369 } 1370 1371 /* Final VF init */ 1372 qcount = 0; 1373 for_each_vf(bp, i) { 1374 struct bnx2x_virtf *vf = BP_VF(bp, i); 1375 1376 /* fill in the BDF and bars */ 1377 vf->bus = bnx2x_vf_bus(bp, i); 1378 vf->devfn = bnx2x_vf_devfn(bp, i); 1379 bnx2x_vf_set_bars(bp, vf); 1380 1381 DP(BNX2X_MSG_IOV, 1382 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", 1383 vf->abs_vfid, vf->bus, vf->devfn, 1384 (unsigned)vf->bars[0].bar, vf->bars[0].size, 1385 (unsigned)vf->bars[1].bar, vf->bars[1].size, 1386 (unsigned)vf->bars[2].bar, vf->bars[2].size); 1387 1388 /* set local queue arrays */ 1389 vf->vfqs = &bp->vfdb->vfqs[qcount]; 1390 qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs); 1391 } 1392 1393 return 0; 1394 } 1395 1396 /* called by bnx2x_init_hw_func, returns the next ilt line */ 1397 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) 1398 { 1399 int i; 1400 struct bnx2x_ilt *ilt = BP_ILT(bp); 1401 1402 if (!IS_SRIOV(bp)) 1403 return line; 1404 1405 /* set vfs ilt lines */ 1406 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1407 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); 1408 1409 ilt->lines[line+i].page = hw_cxt->addr; 1410 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 1411 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 1412 } 1413 return line + i; 1414 } 1415 1416 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) 1417 { 1418 return ((cid >= BNX2X_FIRST_VF_CID) && 1419 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); 1420 } 1421 1422 static 1423 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, 1424 struct bnx2x_vf_queue *vfq, 1425 union event_ring_elem *elem) 1426 { 1427 unsigned long ramrod_flags = 0; 1428 int rc = 0; 1429 1430 /* Always push next commands out, don't wait here */ 1431 set_bit(RAMROD_CONT, &ramrod_flags); 1432 1433 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 1434 case BNX2X_FILTER_MAC_PENDING: 1435 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, 1436 &ramrod_flags); 1437 break; 1438 case BNX2X_FILTER_VLAN_PENDING: 1439 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, 1440 &ramrod_flags); 1441 break; 1442 default: 1443 BNX2X_ERR("Unsupported classification command: %d\n", 1444 elem->message.data.eth_event.echo); 1445 return; 1446 } 1447 if (rc < 0) 1448 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 1449 else if (rc > 0) 1450 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); 1451 } 1452 1453 static 1454 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, 1455 struct bnx2x_virtf *vf) 1456 { 1457 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 1458 int rc; 1459 1460 rparam.mcast_obj = &vf->mcast_obj; 1461 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); 1462 1463 /* If there are pending mcast commands - send them */ 1464 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { 1465 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 1466 if (rc < 0) 1467 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 1468 rc); 1469 } 1470 } 1471 1472 static 1473 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, 1474 struct bnx2x_virtf *vf) 1475 { 1476 smp_mb__before_clear_bit(); 1477 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 1478 smp_mb__after_clear_bit(); 1479 } 1480 1481 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) 1482 { 1483 struct bnx2x_virtf *vf; 1484 int qidx = 0, abs_vfid; 1485 u8 opcode; 1486 u16 cid = 0xffff; 1487 1488 if (!IS_SRIOV(bp)) 1489 return 1; 1490 1491 /* first get the cid - the only events we handle here are cfc-delete 1492 * and set-mac completion 1493 */ 1494 opcode = elem->message.opcode; 1495 1496 switch (opcode) { 1497 case EVENT_RING_OPCODE_CFC_DEL: 1498 cid = SW_CID((__force __le32) 1499 elem->message.data.cfc_del_event.cid); 1500 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); 1501 break; 1502 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 1503 case EVENT_RING_OPCODE_MULTICAST_RULES: 1504 case EVENT_RING_OPCODE_FILTERS_RULES: 1505 cid = (elem->message.data.eth_event.echo & 1506 BNX2X_SWCID_MASK); 1507 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 1508 break; 1509 case EVENT_RING_OPCODE_VF_FLR: 1510 abs_vfid = elem->message.data.vf_flr_event.vf_id; 1511 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", 1512 abs_vfid); 1513 goto get_vf; 1514 case EVENT_RING_OPCODE_MALICIOUS_VF: 1515 abs_vfid = elem->message.data.malicious_vf_event.vf_id; 1516 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n", 1517 abs_vfid); 1518 goto get_vf; 1519 default: 1520 return 1; 1521 } 1522 1523 /* check if the cid is the VF range */ 1524 if (!bnx2x_iov_is_vf_cid(bp, cid)) { 1525 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); 1526 return 1; 1527 } 1528 1529 /* extract vf and rxq index from vf_cid - relies on the following: 1530 * 1. vfid on cid reflects the true abs_vfid 1531 * 2. the max number of VFs (per path) is 64 1532 */ 1533 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); 1534 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 1535 get_vf: 1536 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 1537 1538 if (!vf) { 1539 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", 1540 cid, abs_vfid); 1541 return 0; 1542 } 1543 1544 switch (opcode) { 1545 case EVENT_RING_OPCODE_CFC_DEL: 1546 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", 1547 vf->abs_vfid, qidx); 1548 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, 1549 &vfq_get(vf, 1550 qidx)->sp_obj, 1551 BNX2X_Q_CMD_CFC_DEL); 1552 break; 1553 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 1554 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", 1555 vf->abs_vfid, qidx); 1556 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); 1557 break; 1558 case EVENT_RING_OPCODE_MULTICAST_RULES: 1559 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", 1560 vf->abs_vfid, qidx); 1561 bnx2x_vf_handle_mcast_eqe(bp, vf); 1562 break; 1563 case EVENT_RING_OPCODE_FILTERS_RULES: 1564 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", 1565 vf->abs_vfid, qidx); 1566 bnx2x_vf_handle_filters_eqe(bp, vf); 1567 break; 1568 case EVENT_RING_OPCODE_VF_FLR: 1569 DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n", 1570 vf->abs_vfid); 1571 /* Do nothing for now */ 1572 break; 1573 case EVENT_RING_OPCODE_MALICIOUS_VF: 1574 DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n", 1575 vf->abs_vfid); 1576 /* Do nothing for now */ 1577 break; 1578 } 1579 /* SRIOV: reschedule any 'in_progress' operations */ 1580 bnx2x_iov_sp_event(bp, cid, false); 1581 1582 return 0; 1583 } 1584 1585 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) 1586 { 1587 /* extract the vf from vf_cid - relies on the following: 1588 * 1. vfid on cid reflects the true abs_vfid 1589 * 2. the max number of VFs (per path) is 64 1590 */ 1591 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 1592 return bnx2x_vf_by_abs_fid(bp, abs_vfid); 1593 } 1594 1595 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 1596 struct bnx2x_queue_sp_obj **q_obj) 1597 { 1598 struct bnx2x_virtf *vf; 1599 1600 if (!IS_SRIOV(bp)) 1601 return; 1602 1603 vf = bnx2x_vf_by_cid(bp, vf_cid); 1604 1605 if (vf) { 1606 /* extract queue index from vf_cid - relies on the following: 1607 * 1. vfid on cid reflects the true abs_vfid 1608 * 2. the max number of VFs (per path) is 64 1609 */ 1610 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); 1611 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); 1612 } else { 1613 BNX2X_ERR("No vf matching cid %d\n", vf_cid); 1614 } 1615 } 1616 1617 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work) 1618 { 1619 struct bnx2x_virtf *vf; 1620 1621 /* check if the cid is the VF range */ 1622 if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid)) 1623 return; 1624 1625 vf = bnx2x_vf_by_cid(bp, vf_cid); 1626 if (vf) { 1627 /* set in_progress flag */ 1628 atomic_set(&vf->op_in_progress, 1); 1629 if (queue_work) 1630 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 1631 } 1632 } 1633 1634 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) 1635 { 1636 int i; 1637 int first_queue_query_index, num_queues_req; 1638 dma_addr_t cur_data_offset; 1639 struct stats_query_entry *cur_query_entry; 1640 u8 stats_count = 0; 1641 bool is_fcoe = false; 1642 1643 if (!IS_SRIOV(bp)) 1644 return; 1645 1646 if (!NO_FCOE(bp)) 1647 is_fcoe = true; 1648 1649 /* fcoe adds one global request and one queue request */ 1650 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; 1651 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1652 (is_fcoe ? 0 : 1); 1653 1654 DP(BNX2X_MSG_IOV, 1655 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", 1656 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, 1657 first_queue_query_index + num_queues_req); 1658 1659 cur_data_offset = bp->fw_stats_data_mapping + 1660 offsetof(struct bnx2x_fw_stats_data, queue_stats) + 1661 num_queues_req * sizeof(struct per_queue_stats); 1662 1663 cur_query_entry = &bp->fw_stats_req-> 1664 query[first_queue_query_index + num_queues_req]; 1665 1666 for_each_vf(bp, i) { 1667 int j; 1668 struct bnx2x_virtf *vf = BP_VF(bp, i); 1669 1670 if (vf->state != VF_ENABLED) { 1671 DP(BNX2X_MSG_IOV, 1672 "vf %d not enabled so no stats for it\n", 1673 vf->abs_vfid); 1674 continue; 1675 } 1676 1677 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); 1678 for_each_vfq(vf, j) { 1679 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 1680 1681 /* collect stats fro active queues only */ 1682 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 1683 BNX2X_Q_LOGICAL_STATE_STOPPED) 1684 continue; 1685 1686 /* create stats query entry for this queue */ 1687 cur_query_entry->kind = STATS_TYPE_QUEUE; 1688 cur_query_entry->index = vfq_cl_id(vf, rxq); 1689 cur_query_entry->funcID = 1690 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 1691 cur_query_entry->address.hi = 1692 cpu_to_le32(U64_HI(vf->fw_stat_map)); 1693 cur_query_entry->address.lo = 1694 cpu_to_le32(U64_LO(vf->fw_stat_map)); 1695 DP(BNX2X_MSG_IOV, 1696 "added address %x %x for vf %d queue %d client %d\n", 1697 cur_query_entry->address.hi, 1698 cur_query_entry->address.lo, cur_query_entry->funcID, 1699 j, cur_query_entry->index); 1700 cur_query_entry++; 1701 cur_data_offset += sizeof(struct per_queue_stats); 1702 stats_count++; 1703 } 1704 } 1705 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 1706 } 1707 1708 void bnx2x_iov_sp_task(struct bnx2x *bp) 1709 { 1710 int i; 1711 1712 if (!IS_SRIOV(bp)) 1713 return; 1714 /* Iterate over all VFs and invoke state transition for VFs with 1715 * 'in-progress' slow-path operations 1716 */ 1717 DP(BNX2X_MSG_IOV, "searching for pending vf operations\n"); 1718 for_each_vf(bp, i) { 1719 struct bnx2x_virtf *vf = BP_VF(bp, i); 1720 1721 if (!list_empty(&vf->op_list_head) && 1722 atomic_read(&vf->op_in_progress)) { 1723 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i); 1724 bnx2x_vfop_cur(bp, vf)->transition(bp, vf); 1725 } 1726 } 1727 } 1728 1729 static inline 1730 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) 1731 { 1732 int i; 1733 struct bnx2x_virtf *vf = NULL; 1734 1735 for_each_vf(bp, i) { 1736 vf = BP_VF(bp, i); 1737 if (stat_id >= vf->igu_base_id && 1738 stat_id < vf->igu_base_id + vf_sb_count(vf)) 1739 break; 1740 } 1741 return vf; 1742 } 1743 1744 /* VF API helpers */ 1745 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, 1746 u8 enable) 1747 { 1748 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; 1749 u32 val = enable ? (abs_vfid | (1 << 6)) : 0; 1750 1751 REG_WR(bp, reg, val); 1752 } 1753 1754 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) 1755 { 1756 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), 1757 BNX2X_VF_MAX_QUEUES); 1758 } 1759 1760 static 1761 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, 1762 struct vf_pf_resc_request *req_resc) 1763 { 1764 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 1765 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 1766 1767 return ((req_resc->num_rxqs <= rxq_cnt) && 1768 (req_resc->num_txqs <= txq_cnt) && 1769 (req_resc->num_sbs <= vf_sb_count(vf)) && 1770 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 1771 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); 1772 } 1773 1774 /* CORE VF API */ 1775 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 1776 struct vf_pf_resc_request *resc) 1777 { 1778 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * 1779 BNX2X_CIDS_PER_VF; 1780 1781 union cdu_context *base_cxt = (union cdu_context *) 1782 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 1783 (base_vf_cid & (ILT_PAGE_CIDS-1)); 1784 int i; 1785 1786 /* if state is 'acquired' the VF was not released or FLR'd, in 1787 * this case the returned resources match the acquired already 1788 * acquired resources. Verify that the requested numbers do 1789 * not exceed the already acquired numbers. 1790 */ 1791 if (vf->state == VF_ACQUIRED) { 1792 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", 1793 vf->abs_vfid); 1794 1795 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 1796 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", 1797 vf->abs_vfid); 1798 return -EINVAL; 1799 } 1800 return 0; 1801 } 1802 1803 /* Otherwise vf state must be 'free' or 'reset' */ 1804 if (vf->state != VF_FREE && vf->state != VF_RESET) { 1805 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", 1806 vf->abs_vfid, vf->state); 1807 return -EINVAL; 1808 } 1809 1810 /* static allocation: 1811 * the global maximum number are fixed per VF. fail the request if 1812 * requested number exceed these globals 1813 */ 1814 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 1815 DP(BNX2X_MSG_IOV, 1816 "cannot fulfill vf resource request. Placing maximal available values in response\n"); 1817 /* set the max resource in the vf */ 1818 return -ENOMEM; 1819 } 1820 1821 /* Set resources counters - 0 request means max available */ 1822 vf_sb_count(vf) = resc->num_sbs; 1823 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 1824 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 1825 if (resc->num_mac_filters) 1826 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 1827 if (resc->num_vlan_filters) 1828 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; 1829 1830 DP(BNX2X_MSG_IOV, 1831 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 1832 vf_sb_count(vf), vf_rxq_count(vf), 1833 vf_txq_count(vf), vf_mac_rules_cnt(vf), 1834 vf_vlan_rules_cnt(vf)); 1835 1836 /* Initialize the queues */ 1837 if (!vf->vfqs) { 1838 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); 1839 return -EINVAL; 1840 } 1841 1842 for_each_vfq(vf, i) { 1843 struct bnx2x_vf_queue *q = vfq_get(vf, i); 1844 1845 if (!q) { 1846 DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i); 1847 return -EINVAL; 1848 } 1849 1850 q->index = i; 1851 q->cxt = &((base_cxt + i)->eth); 1852 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; 1853 1854 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", 1855 vf->abs_vfid, i, q->index, q->cid, q->cxt); 1856 1857 /* init SP objects */ 1858 bnx2x_vfq_init(bp, vf, q); 1859 } 1860 vf->state = VF_ACQUIRED; 1861 return 0; 1862 } 1863 1864 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) 1865 { 1866 struct bnx2x_func_init_params func_init = {0}; 1867 u16 flags = 0; 1868 int i; 1869 1870 /* the sb resources are initialized at this point, do the 1871 * FW/HW initializations 1872 */ 1873 for_each_vf_sb(vf, i) 1874 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, 1875 vf_igu_sb(vf, i), vf_igu_sb(vf, i)); 1876 1877 /* Sanity checks */ 1878 if (vf->state != VF_ACQUIRED) { 1879 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", 1880 vf->abs_vfid, vf->state); 1881 return -EINVAL; 1882 } 1883 /* FLR cleanup epilogue */ 1884 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) 1885 return -EBUSY; 1886 1887 /* reset IGU VF statistics: MSIX */ 1888 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); 1889 1890 /* vf init */ 1891 if (vf->cfg_flags & VF_CFG_STATS) 1892 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); 1893 1894 if (vf->cfg_flags & VF_CFG_TPA) 1895 flags |= FUNC_FLG_TPA; 1896 1897 if (is_vf_multi(vf)) 1898 flags |= FUNC_FLG_RSS; 1899 1900 /* function setup */ 1901 func_init.func_flgs = flags; 1902 func_init.pf_id = BP_FUNC(bp); 1903 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); 1904 func_init.fw_stat_map = vf->fw_stat_map; 1905 func_init.spq_map = vf->spq_map; 1906 func_init.spq_prod = 0; 1907 bnx2x_func_init(bp, &func_init); 1908 1909 /* Enable the vf */ 1910 bnx2x_vf_enable_access(bp, vf->abs_vfid); 1911 bnx2x_vf_enable_traffic(bp, vf); 1912 1913 /* queue protection table */ 1914 for_each_vfq(vf, i) 1915 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 1916 vfq_qzone_id(vf, vfq_get(vf, i)), true); 1917 1918 vf->state = VF_ENABLED; 1919 1920 return 0; 1921 } 1922 1923 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 1924 enum channel_tlvs tlv) 1925 { 1926 /* lock the channel */ 1927 mutex_lock(&vf->op_mutex); 1928 1929 /* record the locking op */ 1930 vf->op_current = tlv; 1931 1932 /* log the lock */ 1933 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", 1934 vf->abs_vfid, tlv); 1935 } 1936 1937 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 1938 enum channel_tlvs expected_tlv) 1939 { 1940 WARN(expected_tlv != vf->op_current, 1941 "lock mismatch: expected %d found %d", expected_tlv, 1942 vf->op_current); 1943 1944 /* lock the channel */ 1945 mutex_unlock(&vf->op_mutex); 1946 1947 /* log the unlock */ 1948 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 1949 vf->abs_vfid, vf->op_current); 1950 1951 /* record the locking op */ 1952 vf->op_current = CHANNEL_TLV_NONE; 1953 } 1954