1 /* bnx2x_sriov.c: Broadcom Everest network driver. 2 * 3 * Copyright 2009-2013 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 17 * Ariel Elior <ariele@broadcom.com> 18 * 19 */ 20 #include "bnx2x.h" 21 #include "bnx2x_init.h" 22 #include "bnx2x_cmn.h" 23 #include "bnx2x_sp.h" 24 #include <linux/crc32.h> 25 #include <linux/if_vlan.h> 26 27 /* General service functions */ 28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 29 u16 pf_id) 30 { 31 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 32 pf_id); 33 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 34 pf_id); 35 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 36 pf_id); 37 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 38 pf_id); 39 } 40 41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 42 u8 enable) 43 { 44 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 45 enable); 46 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 47 enable); 48 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 49 enable); 50 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 51 enable); 52 } 53 54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 55 { 56 int idx; 57 58 for_each_vf(bp, idx) 59 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) 60 break; 61 return idx; 62 } 63 64 static 65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 66 { 67 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); 68 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; 69 } 70 71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, 72 u8 igu_sb_id, u8 segment, u16 index, u8 op, 73 u8 update) 74 { 75 /* acking a VF sb through the PF - use the GRC */ 76 u32 ctl; 77 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 78 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 79 u32 func_encode = vf->abs_vfid; 80 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; 81 struct igu_regular cmd_data = {0}; 82 83 cmd_data.sb_id_and_flags = 84 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 85 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 86 (update << IGU_REGULAR_BUPDATE_SHIFT) | 87 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 88 89 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 90 func_encode << IGU_CTRL_REG_FID_SHIFT | 91 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 92 93 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 94 cmd_data.sb_id_and_flags, igu_addr_data); 95 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); 96 mmiowb(); 97 barrier(); 98 99 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 100 ctl, igu_addr_ctl); 101 REG_WR(bp, igu_addr_ctl, ctl); 102 mmiowb(); 103 barrier(); 104 } 105 106 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp, 107 struct bnx2x_virtf *vf, 108 bool print_err) 109 { 110 if (!bnx2x_leading_vfq(vf, sp_initialized)) { 111 if (print_err) 112 BNX2X_ERR("Slowpath objects not yet initialized!\n"); 113 else 114 DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n"); 115 return false; 116 } 117 return true; 118 } 119 120 /* VFOP operations states */ 121 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 122 struct bnx2x_queue_init_params *init_params, 123 struct bnx2x_queue_setup_params *setup_params, 124 u16 q_idx, u16 sb_idx) 125 { 126 DP(BNX2X_MSG_IOV, 127 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", 128 vf->abs_vfid, 129 q_idx, 130 sb_idx, 131 init_params->tx.sb_cq_index, 132 init_params->tx.hc_rate, 133 setup_params->flags, 134 setup_params->txq_params.traffic_type); 135 } 136 137 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 138 struct bnx2x_queue_init_params *init_params, 139 struct bnx2x_queue_setup_params *setup_params, 140 u16 q_idx, u16 sb_idx) 141 { 142 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; 143 144 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" 145 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", 146 vf->abs_vfid, 147 q_idx, 148 sb_idx, 149 init_params->rx.sb_cq_index, 150 init_params->rx.hc_rate, 151 setup_params->gen_params.mtu, 152 rxq_params->buf_sz, 153 rxq_params->sge_buf_sz, 154 rxq_params->max_sges_pkt, 155 rxq_params->tpa_agg_sz, 156 setup_params->flags, 157 rxq_params->drop_flags, 158 rxq_params->cache_line_log); 159 } 160 161 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 162 struct bnx2x_virtf *vf, 163 struct bnx2x_vf_queue *q, 164 struct bnx2x_vf_queue_construct_params *p, 165 unsigned long q_type) 166 { 167 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; 168 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; 169 170 /* INIT */ 171 172 /* Enable host coalescing in the transition to INIT state */ 173 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) 174 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); 175 176 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) 177 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); 178 179 /* FW SB ID */ 180 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 181 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 182 183 /* context */ 184 init_p->cxts[0] = q->cxt; 185 186 /* SETUP */ 187 188 /* Setup-op general parameters */ 189 setup_p->gen_params.spcl_id = vf->sp_cl_id; 190 setup_p->gen_params.stat_id = vfq_stat_id(vf, q); 191 192 /* Setup-op pause params: 193 * Nothing to do, the pause thresholds are set by default to 0 which 194 * effectively turns off the feature for this queue. We don't want 195 * one queue (VF) to interfering with another queue (another VF) 196 */ 197 if (vf->cfg_flags & VF_CFG_FW_FC) 198 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", 199 vf->abs_vfid); 200 /* Setup-op flags: 201 * collect statistics, zero statistics, local-switching, security, 202 * OV for Flex10, RSS and MCAST for leading 203 */ 204 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) 205 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); 206 207 /* for VFs, enable tx switching, bd coherency, and mac address 208 * anti-spoofing 209 */ 210 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); 211 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 212 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 213 214 /* Setup-op rx parameters */ 215 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 216 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 217 218 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); 219 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); 220 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); 221 222 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) 223 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; 224 } 225 226 /* Setup-op tx parameters */ 227 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { 228 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; 229 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 230 } 231 } 232 233 static int bnx2x_vf_queue_create(struct bnx2x *bp, 234 struct bnx2x_virtf *vf, int qid, 235 struct bnx2x_vf_queue_construct_params *qctor) 236 { 237 struct bnx2x_queue_state_params *q_params; 238 int rc = 0; 239 240 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 241 242 /* Prepare ramrod information */ 243 q_params = &qctor->qstate; 244 q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 245 set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags); 246 247 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 248 BNX2X_Q_LOGICAL_STATE_ACTIVE) { 249 DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n"); 250 goto out; 251 } 252 253 /* Run Queue 'construction' ramrods */ 254 q_params->cmd = BNX2X_Q_CMD_INIT; 255 rc = bnx2x_queue_state_change(bp, q_params); 256 if (rc) 257 goto out; 258 259 memcpy(&q_params->params.setup, &qctor->prep_qsetup, 260 sizeof(struct bnx2x_queue_setup_params)); 261 q_params->cmd = BNX2X_Q_CMD_SETUP; 262 rc = bnx2x_queue_state_change(bp, q_params); 263 if (rc) 264 goto out; 265 266 /* enable interrupts */ 267 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)), 268 USTORM_ID, 0, IGU_INT_ENABLE, 0); 269 out: 270 return rc; 271 } 272 273 static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf, 274 int qid) 275 { 276 enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT, 277 BNX2X_Q_CMD_TERMINATE, 278 BNX2X_Q_CMD_CFC_DEL}; 279 struct bnx2x_queue_state_params q_params; 280 int rc, i; 281 282 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 283 284 /* Prepare ramrod information */ 285 memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params)); 286 q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 287 set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 288 289 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) == 290 BNX2X_Q_LOGICAL_STATE_STOPPED) { 291 DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n"); 292 goto out; 293 } 294 295 /* Run Queue 'destruction' ramrods */ 296 for (i = 0; i < ARRAY_SIZE(cmds); i++) { 297 q_params.cmd = cmds[i]; 298 rc = bnx2x_queue_state_change(bp, &q_params); 299 if (rc) { 300 BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]); 301 return rc; 302 } 303 } 304 out: 305 /* Clean Context */ 306 if (bnx2x_vfq(vf, qid, cxt)) { 307 bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0; 308 bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0; 309 } 310 311 return 0; 312 } 313 314 static void 315 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) 316 { 317 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 318 if (vf) { 319 /* the first igu entry belonging to VFs of this PF */ 320 if (!BP_VFDB(bp)->first_vf_igu_entry) 321 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; 322 323 /* the first igu entry belonging to this VF */ 324 if (!vf_sb_count(vf)) 325 vf->igu_base_id = igu_sb_id; 326 327 ++vf_sb_count(vf); 328 ++vf->sb_count; 329 } 330 BP_VFDB(bp)->vf_sbs_pool++; 331 } 332 333 static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp, 334 struct bnx2x_vlan_mac_obj *obj, 335 atomic_t *counter) 336 { 337 struct list_head *pos; 338 int read_lock; 339 int cnt = 0; 340 341 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); 342 if (read_lock) 343 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); 344 345 list_for_each(pos, &obj->head) 346 cnt++; 347 348 if (!read_lock) 349 bnx2x_vlan_mac_h_read_unlock(bp, obj); 350 351 atomic_set(counter, cnt); 352 } 353 354 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, 355 int qid, bool drv_only, bool mac) 356 { 357 struct bnx2x_vlan_mac_ramrod_params ramrod; 358 int rc; 359 360 DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid, 361 mac ? "MACs" : "VLANs"); 362 363 /* Prepare ramrod params */ 364 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); 365 if (mac) { 366 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); 367 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 368 } else { 369 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 370 &ramrod.user_req.vlan_mac_flags); 371 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 372 } 373 ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL; 374 375 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); 376 if (drv_only) 377 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); 378 else 379 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); 380 381 /* Start deleting */ 382 rc = ramrod.vlan_mac_obj->delete_all(bp, 383 ramrod.vlan_mac_obj, 384 &ramrod.user_req.vlan_mac_flags, 385 &ramrod.ramrod_flags); 386 if (rc) { 387 BNX2X_ERR("Failed to delete all %s\n", 388 mac ? "MACs" : "VLANs"); 389 return rc; 390 } 391 392 /* Clear the vlan counters */ 393 if (!mac) 394 atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0); 395 396 return 0; 397 } 398 399 static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, 400 struct bnx2x_virtf *vf, int qid, 401 struct bnx2x_vf_mac_vlan_filter *filter, 402 bool drv_only) 403 { 404 struct bnx2x_vlan_mac_ramrod_params ramrod; 405 int rc; 406 407 DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n", 408 vf->abs_vfid, filter->add ? "Adding" : "Deleting", 409 filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN"); 410 411 /* Prepare ramrod params */ 412 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); 413 if (filter->type == BNX2X_VF_FILTER_VLAN) { 414 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 415 &ramrod.user_req.vlan_mac_flags); 416 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 417 ramrod.user_req.u.vlan.vlan = filter->vid; 418 } else { 419 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); 420 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 421 memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN); 422 } 423 ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD : 424 BNX2X_VLAN_MAC_DEL; 425 426 /* Verify there are available vlan credits */ 427 if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && 428 (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= 429 vf_vlan_rules_cnt(vf))) { 430 BNX2X_ERR("No credits for vlan\n"); 431 return -ENOMEM; 432 } 433 434 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); 435 if (drv_only) 436 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); 437 else 438 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); 439 440 /* Add/Remove the filter */ 441 rc = bnx2x_config_vlan_mac(bp, &ramrod); 442 if (rc && rc != -EEXIST) { 443 BNX2X_ERR("Failed to %s %s\n", 444 filter->add ? "add" : "delete", 445 filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : 446 "VLAN"); 447 return rc; 448 } 449 450 /* Update the vlan counters */ 451 if (filter->type == BNX2X_VF_FILTER_VLAN) 452 bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj, 453 &bnx2x_vfq(vf, qid, vlan_count)); 454 455 return 0; 456 } 457 458 int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, 459 struct bnx2x_vf_mac_vlan_filters *filters, 460 int qid, bool drv_only) 461 { 462 int rc = 0, i; 463 464 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 465 466 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 467 return -EINVAL; 468 469 /* Prepare ramrod params */ 470 for (i = 0; i < filters->count; i++) { 471 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, 472 &filters->filters[i], drv_only); 473 if (rc) 474 break; 475 } 476 477 /* Rollback if needed */ 478 if (i != filters->count) { 479 BNX2X_ERR("Managed only %d/%d filters - rolling back\n", 480 i, filters->count + 1); 481 while (--i >= 0) { 482 filters->filters[i].add = !filters->filters[i].add; 483 bnx2x_vf_mac_vlan_config(bp, vf, qid, 484 &filters->filters[i], 485 drv_only); 486 } 487 } 488 489 /* It's our responsibility to free the filters */ 490 kfree(filters); 491 492 return rc; 493 } 494 495 int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, 496 struct bnx2x_vf_queue_construct_params *qctor) 497 { 498 int rc; 499 500 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 501 502 rc = bnx2x_vf_queue_create(bp, vf, qid, qctor); 503 if (rc) 504 goto op_err; 505 506 /* Configure vlan0 for leading queue */ 507 if (!qid) { 508 struct bnx2x_vf_mac_vlan_filter filter; 509 510 memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter)); 511 filter.type = BNX2X_VF_FILTER_VLAN; 512 filter.add = true; 513 filter.vid = 0; 514 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false); 515 if (rc) 516 goto op_err; 517 } 518 519 /* Schedule the configuration of any pending vlan filters */ 520 vf->cfg_flags |= VF_CFG_VLAN; 521 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, 522 BNX2X_MSG_IOV); 523 return 0; 524 op_err: 525 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); 526 return rc; 527 } 528 529 static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, 530 int qid) 531 { 532 int rc; 533 534 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 535 536 /* If needed, clean the filtering data base */ 537 if ((qid == LEADING_IDX) && 538 bnx2x_validate_vf_sp_objs(bp, vf, false)) { 539 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false); 540 if (rc) 541 goto op_err; 542 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true); 543 if (rc) 544 goto op_err; 545 } 546 547 /* Terminate queue */ 548 if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) { 549 struct bnx2x_queue_state_params qstate; 550 551 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); 552 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 553 qstate.q_obj->state = BNX2X_Q_STATE_STOPPED; 554 qstate.cmd = BNX2X_Q_CMD_TERMINATE; 555 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); 556 rc = bnx2x_queue_state_change(bp, &qstate); 557 if (rc) 558 goto op_err; 559 } 560 561 return 0; 562 op_err: 563 BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); 564 return rc; 565 } 566 567 int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, 568 bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only) 569 { 570 struct bnx2x_mcast_list_elem *mc = NULL; 571 struct bnx2x_mcast_ramrod_params mcast; 572 int rc, i; 573 574 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 575 576 /* Prepare Multicast command */ 577 memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params)); 578 mcast.mcast_obj = &vf->mcast_obj; 579 if (drv_only) 580 set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags); 581 else 582 set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags); 583 if (mc_num) { 584 mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem), 585 GFP_KERNEL); 586 if (!mc) { 587 BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n"); 588 return -ENOMEM; 589 } 590 } 591 592 /* clear existing mcasts */ 593 mcast.mcast_list_len = vf->mcast_list_len; 594 vf->mcast_list_len = mc_num; 595 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); 596 if (rc) { 597 BNX2X_ERR("Failed to remove multicasts\n"); 598 if (mc) 599 kfree(mc); 600 return rc; 601 } 602 603 /* update mcast list on the ramrod params */ 604 if (mc_num) { 605 INIT_LIST_HEAD(&mcast.mcast_list); 606 for (i = 0; i < mc_num; i++) { 607 mc[i].mac = mcasts[i]; 608 list_add_tail(&mc[i].link, 609 &mcast.mcast_list); 610 } 611 612 /* add new mcasts */ 613 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); 614 if (rc) 615 BNX2X_ERR("Faled to add multicasts\n"); 616 kfree(mc); 617 } 618 619 return rc; 620 } 621 622 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, 623 struct bnx2x_rx_mode_ramrod_params *ramrod, 624 struct bnx2x_virtf *vf, 625 unsigned long accept_flags) 626 { 627 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); 628 629 memset(ramrod, 0, sizeof(*ramrod)); 630 ramrod->cid = vfq->cid; 631 ramrod->cl_id = vfq_cl_id(vf, vfq); 632 ramrod->rx_mode_obj = &bp->rx_mode_obj; 633 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); 634 ramrod->rx_accept_flags = accept_flags; 635 ramrod->tx_accept_flags = accept_flags; 636 ramrod->pstate = &vf->filter_state; 637 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; 638 639 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 640 set_bit(RAMROD_RX, &ramrod->ramrod_flags); 641 set_bit(RAMROD_TX, &ramrod->ramrod_flags); 642 643 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); 644 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); 645 } 646 647 int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, 648 int qid, unsigned long accept_flags) 649 { 650 struct bnx2x_rx_mode_ramrod_params ramrod; 651 652 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 653 654 bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags); 655 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); 656 vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags; 657 return bnx2x_config_rx_mode(bp, &ramrod); 658 } 659 660 int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) 661 { 662 int rc; 663 664 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 665 666 /* Remove all classification configuration for leading queue */ 667 if (qid == LEADING_IDX) { 668 rc = bnx2x_vf_rxmode(bp, vf, qid, 0); 669 if (rc) 670 goto op_err; 671 672 /* Remove filtering if feasible */ 673 if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { 674 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, 675 false, false); 676 if (rc) 677 goto op_err; 678 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, 679 false, true); 680 if (rc) 681 goto op_err; 682 rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); 683 if (rc) 684 goto op_err; 685 } 686 } 687 688 /* Destroy queue */ 689 rc = bnx2x_vf_queue_destroy(bp, vf, qid); 690 if (rc) 691 goto op_err; 692 return rc; 693 op_err: 694 BNX2X_ERR("vf[%d:%d] error: rc %d\n", 695 vf->abs_vfid, qid, rc); 696 return rc; 697 } 698 699 /* VF enable primitives 700 * when pretend is required the caller is responsible 701 * for calling pretend prior to calling these routines 702 */ 703 704 /* internal vf enable - until vf is enabled internally all transactions 705 * are blocked. This routine should always be called last with pretend. 706 */ 707 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) 708 { 709 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); 710 } 711 712 /* clears vf error in all semi blocks */ 713 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) 714 { 715 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); 716 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); 717 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); 718 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); 719 } 720 721 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) 722 { 723 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; 724 u32 was_err_reg = 0; 725 726 switch (was_err_group) { 727 case 0: 728 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; 729 break; 730 case 1: 731 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; 732 break; 733 case 2: 734 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; 735 break; 736 case 3: 737 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; 738 break; 739 } 740 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); 741 } 742 743 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) 744 { 745 int i; 746 u32 val; 747 748 /* Set VF masks and configuration - pretend */ 749 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 750 751 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 752 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 753 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 754 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 755 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 756 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 757 758 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 759 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); 760 if (vf->cfg_flags & VF_CFG_INT_SIMD) 761 val |= IGU_VF_CONF_SINGLE_ISR_EN; 762 val &= ~IGU_VF_CONF_PARENT_MASK; 763 val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT; 764 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 765 766 DP(BNX2X_MSG_IOV, 767 "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n", 768 vf->abs_vfid, val); 769 770 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 771 772 /* iterate over all queues, clear sb consumer */ 773 for (i = 0; i < vf_sb_count(vf); i++) { 774 u8 igu_sb_id = vf_igu_sb(vf, i); 775 776 /* zero prod memory */ 777 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); 778 779 /* clear sb state machine */ 780 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, 781 false /* VF */); 782 783 /* disable + update */ 784 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, 785 IGU_INT_DISABLE, 1); 786 } 787 } 788 789 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) 790 { 791 /* set the VF-PF association in the FW */ 792 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); 793 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); 794 795 /* clear vf errors*/ 796 bnx2x_vf_semi_clear_err(bp, abs_vfid); 797 bnx2x_vf_pglue_clear_err(bp, abs_vfid); 798 799 /* internal vf-enable - pretend */ 800 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); 801 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); 802 bnx2x_vf_enable_internal(bp, true); 803 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 804 } 805 806 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) 807 { 808 /* Reset vf in IGU interrupts are still disabled */ 809 bnx2x_vf_igu_reset(bp, vf); 810 811 /* pretend to enable the vf with the PBF */ 812 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 813 REG_WR(bp, PBF_REG_DISABLE_VF, 0); 814 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 815 } 816 817 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) 818 { 819 struct pci_dev *dev; 820 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 821 822 if (!vf) 823 return false; 824 825 dev = pci_get_bus_and_slot(vf->bus, vf->devfn); 826 if (dev) 827 return bnx2x_is_pcie_pending(dev); 828 return false; 829 } 830 831 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) 832 { 833 /* Verify no pending pci transactions */ 834 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) 835 BNX2X_ERR("PCIE Transactions still pending\n"); 836 837 return 0; 838 } 839 840 /* must be called after the number of PF queues and the number of VFs are 841 * both known 842 */ 843 static void 844 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 845 { 846 struct vf_pf_resc_request *resc = &vf->alloc_resc; 847 u16 vlan_count = 0; 848 849 /* will be set only during VF-ACQUIRE */ 850 resc->num_rxqs = 0; 851 resc->num_txqs = 0; 852 853 /* no credit calculations for macs (just yet) */ 854 resc->num_mac_filters = 1; 855 856 /* divvy up vlan rules */ 857 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 858 vlan_count = 1 << ilog2(vlan_count); 859 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); 860 861 /* no real limitation */ 862 resc->num_mc_filters = 0; 863 864 /* num_sbs already set */ 865 resc->num_sbs = vf->sb_count; 866 } 867 868 /* FLR routines: */ 869 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 870 { 871 /* reset the state variables */ 872 bnx2x_iov_static_resc(bp, vf); 873 vf->state = VF_FREE; 874 } 875 876 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) 877 { 878 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 879 880 /* DQ usage counter */ 881 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 882 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, 883 "DQ VF usage counter timed out", 884 poll_cnt); 885 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 886 887 /* FW cleanup command - poll for the results */ 888 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), 889 poll_cnt)) 890 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid); 891 892 /* verify TX hw is flushed */ 893 bnx2x_tx_hw_flushed(bp, poll_cnt); 894 } 895 896 static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) 897 { 898 int rc, i; 899 900 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 901 902 /* the cleanup operations are valid if and only if the VF 903 * was first acquired. 904 */ 905 for (i = 0; i < vf_rxq_count(vf); i++) { 906 rc = bnx2x_vf_queue_flr(bp, vf, i); 907 if (rc) 908 goto out; 909 } 910 911 /* remove multicasts */ 912 bnx2x_vf_mcast(bp, vf, NULL, 0, true); 913 914 /* dispatch final cleanup and wait for HW queues to flush */ 915 bnx2x_vf_flr_clnup_hw(bp, vf); 916 917 /* release VF resources */ 918 bnx2x_vf_free_resc(bp, vf); 919 920 /* re-open the mailbox */ 921 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 922 return; 923 out: 924 BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n", 925 vf->abs_vfid, i, rc); 926 } 927 928 static void bnx2x_vf_flr_clnup(struct bnx2x *bp) 929 { 930 struct bnx2x_virtf *vf; 931 int i; 932 933 for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) { 934 /* VF should be RESET & in FLR cleanup states */ 935 if (bnx2x_vf(bp, i, state) != VF_RESET || 936 !bnx2x_vf(bp, i, flr_clnup_stage)) 937 continue; 938 939 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", 940 i, BNX2X_NR_VIRTFN(bp)); 941 942 vf = BP_VF(bp, i); 943 944 /* lock the vf pf channel */ 945 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 946 947 /* invoke the VF FLR SM */ 948 bnx2x_vf_flr(bp, vf); 949 950 /* mark the VF to be ACKED and continue */ 951 vf->flr_clnup_stage = false; 952 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 953 } 954 955 /* Acknowledge the handled VFs. 956 * we are acknowledge all the vfs which an flr was requested for, even 957 * if amongst them there are such that we never opened, since the mcp 958 * will interrupt us immediately again if we only ack some of the bits, 959 * resulting in an endless loop. This can happen for example in KVM 960 * where an 'all ones' flr request is sometimes given by hyper visor 961 */ 962 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n", 963 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 964 for (i = 0; i < FLRD_VFS_DWORDS; i++) 965 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 966 bp->vfdb->flrd_vfs[i]); 967 968 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); 969 970 /* clear the acked bits - better yet if the MCP implemented 971 * write to clear semantics 972 */ 973 for (i = 0; i < FLRD_VFS_DWORDS; i++) 974 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); 975 } 976 977 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) 978 { 979 int i; 980 981 /* Read FLR'd VFs */ 982 for (i = 0; i < FLRD_VFS_DWORDS; i++) 983 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); 984 985 DP(BNX2X_MSG_MCP, 986 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n", 987 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 988 989 for_each_vf(bp, i) { 990 struct bnx2x_virtf *vf = BP_VF(bp, i); 991 u32 reset = 0; 992 993 if (vf->abs_vfid < 32) 994 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); 995 else 996 reset = bp->vfdb->flrd_vfs[1] & 997 (1 << (vf->abs_vfid - 32)); 998 999 if (reset) { 1000 /* set as reset and ready for cleanup */ 1001 vf->state = VF_RESET; 1002 vf->flr_clnup_stage = true; 1003 1004 DP(BNX2X_MSG_IOV, 1005 "Initiating Final cleanup for VF %d\n", 1006 vf->abs_vfid); 1007 } 1008 } 1009 1010 /* do the FLR cleanup for all marked VFs*/ 1011 bnx2x_vf_flr_clnup(bp); 1012 } 1013 1014 /* IOV global initialization routines */ 1015 void bnx2x_iov_init_dq(struct bnx2x *bp) 1016 { 1017 if (!IS_SRIOV(bp)) 1018 return; 1019 1020 /* Set the DQ such that the CID reflect the abs_vfid */ 1021 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); 1022 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 1023 1024 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to 1025 * the PF L2 queues 1026 */ 1027 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 1028 1029 /* The VF window size is the log2 of the max number of CIDs per VF */ 1030 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 1031 1032 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 1033 * the Pf doorbell size although the 2 are independent. 1034 */ 1035 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); 1036 1037 /* No security checks for now - 1038 * configure single rule (out of 16) mask = 0x1, value = 0x0, 1039 * CID range 0 - 0x1ffff 1040 */ 1041 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); 1042 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); 1043 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1044 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1045 1046 /* set the VF doorbell threshold */ 1047 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 1048 } 1049 1050 void bnx2x_iov_init_dmae(struct bnx2x *bp) 1051 { 1052 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) 1053 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 1054 } 1055 1056 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1057 { 1058 struct pci_dev *dev = bp->pdev; 1059 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1060 1061 return dev->bus->number + ((dev->devfn + iov->offset + 1062 iov->stride * vfid) >> 8); 1063 } 1064 1065 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) 1066 { 1067 struct pci_dev *dev = bp->pdev; 1068 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1069 1070 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; 1071 } 1072 1073 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) 1074 { 1075 int i, n; 1076 struct pci_dev *dev = bp->pdev; 1077 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1078 1079 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { 1080 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); 1081 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); 1082 1083 size /= iov->total; 1084 vf->bars[n].bar = start + size * vf->abs_vfid; 1085 vf->bars[n].size = size; 1086 } 1087 } 1088 1089 static int bnx2x_ari_enabled(struct pci_dev *dev) 1090 { 1091 return dev->bus->self && dev->bus->self->ari_enabled; 1092 } 1093 1094 static void 1095 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) 1096 { 1097 int sb_id; 1098 u32 val; 1099 u8 fid, current_pf = 0; 1100 1101 /* IGU in normal mode - read CAM */ 1102 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 1103 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); 1104 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 1105 continue; 1106 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1107 if (fid & IGU_FID_ENCODE_IS_PF) 1108 current_pf = fid & IGU_FID_PF_NUM_MASK; 1109 else if (current_pf == BP_FUNC(bp)) 1110 bnx2x_vf_set_igu_info(bp, sb_id, 1111 (fid & IGU_FID_VF_NUM_MASK)); 1112 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1113 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 1114 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 1115 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 1116 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1117 } 1118 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); 1119 } 1120 1121 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1122 { 1123 if (bp->vfdb) { 1124 kfree(bp->vfdb->vfqs); 1125 kfree(bp->vfdb->vfs); 1126 kfree(bp->vfdb); 1127 } 1128 bp->vfdb = NULL; 1129 } 1130 1131 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1132 { 1133 int pos; 1134 struct pci_dev *dev = bp->pdev; 1135 1136 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 1137 if (!pos) { 1138 BNX2X_ERR("failed to find SRIOV capability in device\n"); 1139 return -ENODEV; 1140 } 1141 1142 iov->pos = pos; 1143 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); 1144 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 1145 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); 1146 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); 1147 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 1148 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 1149 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 1150 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 1151 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 1152 1153 return 0; 1154 } 1155 1156 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1157 { 1158 u32 val; 1159 1160 /* read the SRIOV capability structure 1161 * The fields can be read via configuration read or 1162 * directly from the device (starting at offset PCICFG_OFFSET) 1163 */ 1164 if (bnx2x_sriov_pci_cfg_info(bp, iov)) 1165 return -ENODEV; 1166 1167 /* get the number of SRIOV bars */ 1168 iov->nres = 0; 1169 1170 /* read the first_vfid */ 1171 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); 1172 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) 1173 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); 1174 1175 DP(BNX2X_MSG_IOV, 1176 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 1177 BP_FUNC(bp), 1178 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, 1179 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 1180 1181 return 0; 1182 } 1183 1184 /* must be called after PF bars are mapped */ 1185 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1186 int num_vfs_param) 1187 { 1188 int err, i; 1189 struct bnx2x_sriov *iov; 1190 struct pci_dev *dev = bp->pdev; 1191 1192 bp->vfdb = NULL; 1193 1194 /* verify is pf */ 1195 if (IS_VF(bp)) 1196 return 0; 1197 1198 /* verify sriov capability is present in configuration space */ 1199 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) 1200 return 0; 1201 1202 /* verify chip revision */ 1203 if (CHIP_IS_E1x(bp)) 1204 return 0; 1205 1206 /* check if SRIOV support is turned off */ 1207 if (!num_vfs_param) 1208 return 0; 1209 1210 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ 1211 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { 1212 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", 1213 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); 1214 return 0; 1215 } 1216 1217 /* SRIOV can be enabled only with MSIX */ 1218 if (int_mode_param == BNX2X_INT_MODE_MSI || 1219 int_mode_param == BNX2X_INT_MODE_INTX) { 1220 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); 1221 return 0; 1222 } 1223 1224 err = -EIO; 1225 /* verify ari is enabled */ 1226 if (!bnx2x_ari_enabled(bp->pdev)) { 1227 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n"); 1228 return 0; 1229 } 1230 1231 /* verify igu is in normal mode */ 1232 if (CHIP_INT_MODE_IS_BC(bp)) { 1233 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 1234 return 0; 1235 } 1236 1237 /* allocate the vfs database */ 1238 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); 1239 if (!bp->vfdb) { 1240 BNX2X_ERR("failed to allocate vf database\n"); 1241 err = -ENOMEM; 1242 goto failed; 1243 } 1244 1245 /* get the sriov info - Linux already collected all the pertinent 1246 * information, however the sriov structure is for the private use 1247 * of the pci module. Also we want this information regardless 1248 * of the hyper-visor. 1249 */ 1250 iov = &(bp->vfdb->sriov); 1251 err = bnx2x_sriov_info(bp, iov); 1252 if (err) 1253 goto failed; 1254 1255 /* SR-IOV capability was enabled but there are no VFs*/ 1256 if (iov->total == 0) 1257 goto failed; 1258 1259 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); 1260 1261 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n", 1262 num_vfs_param, iov->nr_virtfn); 1263 1264 /* allocate the vf array */ 1265 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 1266 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); 1267 if (!bp->vfdb->vfs) { 1268 BNX2X_ERR("failed to allocate vf array\n"); 1269 err = -ENOMEM; 1270 goto failed; 1271 } 1272 1273 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ 1274 for_each_vf(bp, i) { 1275 bnx2x_vf(bp, i, index) = i; 1276 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; 1277 bnx2x_vf(bp, i, state) = VF_FREE; 1278 mutex_init(&bnx2x_vf(bp, i, op_mutex)); 1279 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; 1280 } 1281 1282 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 1283 bnx2x_get_vf_igu_cam_info(bp); 1284 1285 /* allocate the queue arrays for all VFs */ 1286 bp->vfdb->vfqs = kzalloc( 1287 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), 1288 GFP_KERNEL); 1289 1290 DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs); 1291 1292 if (!bp->vfdb->vfqs) { 1293 BNX2X_ERR("failed to allocate vf queue array\n"); 1294 err = -ENOMEM; 1295 goto failed; 1296 } 1297 1298 /* Prepare the VFs event synchronization mechanism */ 1299 mutex_init(&bp->vfdb->event_mutex); 1300 1301 return 0; 1302 failed: 1303 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 1304 __bnx2x_iov_free_vfdb(bp); 1305 return err; 1306 } 1307 1308 void bnx2x_iov_remove_one(struct bnx2x *bp) 1309 { 1310 int vf_idx; 1311 1312 /* if SRIOV is not enabled there's nothing to do */ 1313 if (!IS_SRIOV(bp)) 1314 return; 1315 1316 DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); 1317 pci_disable_sriov(bp->pdev); 1318 DP(BNX2X_MSG_IOV, "sriov disabled\n"); 1319 1320 /* disable access to all VFs */ 1321 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { 1322 bnx2x_pretend_func(bp, 1323 HW_VF_HANDLE(bp, 1324 bp->vfdb->sriov.first_vf_in_pf + 1325 vf_idx)); 1326 DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n", 1327 bp->vfdb->sriov.first_vf_in_pf + vf_idx); 1328 bnx2x_vf_enable_internal(bp, 0); 1329 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1330 } 1331 1332 /* free vf database */ 1333 __bnx2x_iov_free_vfdb(bp); 1334 } 1335 1336 void bnx2x_iov_free_mem(struct bnx2x *bp) 1337 { 1338 int i; 1339 1340 if (!IS_SRIOV(bp)) 1341 return; 1342 1343 /* free vfs hw contexts */ 1344 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1345 struct hw_dma *cxt = &bp->vfdb->context[i]; 1346 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); 1347 } 1348 1349 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, 1350 BP_VFDB(bp)->sp_dma.mapping, 1351 BP_VFDB(bp)->sp_dma.size); 1352 1353 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, 1354 BP_VF_MBX_DMA(bp)->mapping, 1355 BP_VF_MBX_DMA(bp)->size); 1356 1357 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, 1358 BP_VF_BULLETIN_DMA(bp)->mapping, 1359 BP_VF_BULLETIN_DMA(bp)->size); 1360 } 1361 1362 int bnx2x_iov_alloc_mem(struct bnx2x *bp) 1363 { 1364 size_t tot_size; 1365 int i, rc = 0; 1366 1367 if (!IS_SRIOV(bp)) 1368 return rc; 1369 1370 /* allocate vfs hw contexts */ 1371 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * 1372 BNX2X_CIDS_PER_VF * sizeof(union cdu_context); 1373 1374 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1375 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); 1376 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); 1377 1378 if (cxt->size) { 1379 cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size); 1380 if (!cxt->addr) 1381 goto alloc_mem_err; 1382 } else { 1383 cxt->addr = NULL; 1384 cxt->mapping = 0; 1385 } 1386 tot_size -= cxt->size; 1387 } 1388 1389 /* allocate vfs ramrods dma memory - client_init and set_mac */ 1390 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); 1391 BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping, 1392 tot_size); 1393 if (!BP_VFDB(bp)->sp_dma.addr) 1394 goto alloc_mem_err; 1395 BP_VFDB(bp)->sp_dma.size = tot_size; 1396 1397 /* allocate mailboxes */ 1398 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; 1399 BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping, 1400 tot_size); 1401 if (!BP_VF_MBX_DMA(bp)->addr) 1402 goto alloc_mem_err; 1403 1404 BP_VF_MBX_DMA(bp)->size = tot_size; 1405 1406 /* allocate local bulletin boards */ 1407 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; 1408 BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping, 1409 tot_size); 1410 if (!BP_VF_BULLETIN_DMA(bp)->addr) 1411 goto alloc_mem_err; 1412 1413 BP_VF_BULLETIN_DMA(bp)->size = tot_size; 1414 1415 return 0; 1416 1417 alloc_mem_err: 1418 return -ENOMEM; 1419 } 1420 1421 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 1422 struct bnx2x_vf_queue *q) 1423 { 1424 u8 cl_id = vfq_cl_id(vf, q); 1425 u8 func_id = FW_VF_HANDLE(vf->abs_vfid); 1426 unsigned long q_type = 0; 1427 1428 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 1429 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 1430 1431 /* Queue State object */ 1432 bnx2x_init_queue_obj(bp, &q->sp_obj, 1433 cl_id, &q->cid, 1, func_id, 1434 bnx2x_vf_sp(bp, vf, q_data), 1435 bnx2x_vf_sp_map(bp, vf, q_data), 1436 q_type); 1437 1438 /* sp indication is set only when vlan/mac/etc. are initialized */ 1439 q->sp_initialized = false; 1440 1441 DP(BNX2X_MSG_IOV, 1442 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", 1443 vf->abs_vfid, q->sp_obj.func_id, q->cid); 1444 } 1445 1446 /* called by bnx2x_nic_load */ 1447 int bnx2x_iov_nic_init(struct bnx2x *bp) 1448 { 1449 int vfid; 1450 1451 if (!IS_SRIOV(bp)) { 1452 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 1453 return 0; 1454 } 1455 1456 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); 1457 1458 /* let FLR complete ... */ 1459 msleep(100); 1460 1461 /* initialize vf database */ 1462 for_each_vf(bp, vfid) { 1463 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 1464 1465 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * 1466 BNX2X_CIDS_PER_VF; 1467 1468 union cdu_context *base_cxt = (union cdu_context *) 1469 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 1470 (base_vf_cid & (ILT_PAGE_CIDS-1)); 1471 1472 DP(BNX2X_MSG_IOV, 1473 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", 1474 vf->abs_vfid, vf_sb_count(vf), base_vf_cid, 1475 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 1476 1477 /* init statically provisioned resources */ 1478 bnx2x_iov_static_resc(bp, vf); 1479 1480 /* queues are initialized during VF-ACQUIRE */ 1481 1482 /* reserve the vf vlan credit */ 1483 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf)); 1484 1485 vf->filter_state = 0; 1486 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 1487 1488 /* init mcast object - This object will be re-initialized 1489 * during VF-ACQUIRE with the proper cl_id and cid. 1490 * It needs to be initialized here so that it can be safely 1491 * handled by a subsequent FLR flow. 1492 */ 1493 vf->mcast_list_len = 0; 1494 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, 1495 0xFF, 0xFF, 0xFF, 1496 bnx2x_vf_sp(bp, vf, mcast_rdata), 1497 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 1498 BNX2X_FILTER_MCAST_PENDING, 1499 &vf->filter_state, 1500 BNX2X_OBJ_TYPE_RX_TX); 1501 1502 /* set the mailbox message addresses */ 1503 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) 1504 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * 1505 MBX_MSG_ALIGNED_SIZE); 1506 1507 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + 1508 vfid * MBX_MSG_ALIGNED_SIZE; 1509 1510 /* Enable vf mailbox */ 1511 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 1512 } 1513 1514 /* Final VF init */ 1515 for_each_vf(bp, vfid) { 1516 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 1517 1518 /* fill in the BDF and bars */ 1519 vf->bus = bnx2x_vf_bus(bp, vfid); 1520 vf->devfn = bnx2x_vf_devfn(bp, vfid); 1521 bnx2x_vf_set_bars(bp, vf); 1522 1523 DP(BNX2X_MSG_IOV, 1524 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", 1525 vf->abs_vfid, vf->bus, vf->devfn, 1526 (unsigned)vf->bars[0].bar, vf->bars[0].size, 1527 (unsigned)vf->bars[1].bar, vf->bars[1].size, 1528 (unsigned)vf->bars[2].bar, vf->bars[2].size); 1529 } 1530 1531 return 0; 1532 } 1533 1534 /* called by bnx2x_chip_cleanup */ 1535 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) 1536 { 1537 int i; 1538 1539 if (!IS_SRIOV(bp)) 1540 return 0; 1541 1542 /* release all the VFs */ 1543 for_each_vf(bp, i) 1544 bnx2x_vf_release(bp, BP_VF(bp, i)); 1545 1546 return 0; 1547 } 1548 1549 /* called by bnx2x_init_hw_func, returns the next ilt line */ 1550 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) 1551 { 1552 int i; 1553 struct bnx2x_ilt *ilt = BP_ILT(bp); 1554 1555 if (!IS_SRIOV(bp)) 1556 return line; 1557 1558 /* set vfs ilt lines */ 1559 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1560 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); 1561 1562 ilt->lines[line+i].page = hw_cxt->addr; 1563 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 1564 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 1565 } 1566 return line + i; 1567 } 1568 1569 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) 1570 { 1571 return ((cid >= BNX2X_FIRST_VF_CID) && 1572 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); 1573 } 1574 1575 static 1576 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, 1577 struct bnx2x_vf_queue *vfq, 1578 union event_ring_elem *elem) 1579 { 1580 unsigned long ramrod_flags = 0; 1581 int rc = 0; 1582 1583 /* Always push next commands out, don't wait here */ 1584 set_bit(RAMROD_CONT, &ramrod_flags); 1585 1586 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 1587 case BNX2X_FILTER_MAC_PENDING: 1588 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, 1589 &ramrod_flags); 1590 break; 1591 case BNX2X_FILTER_VLAN_PENDING: 1592 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, 1593 &ramrod_flags); 1594 break; 1595 default: 1596 BNX2X_ERR("Unsupported classification command: %d\n", 1597 elem->message.data.eth_event.echo); 1598 return; 1599 } 1600 if (rc < 0) 1601 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 1602 else if (rc > 0) 1603 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); 1604 } 1605 1606 static 1607 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, 1608 struct bnx2x_virtf *vf) 1609 { 1610 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 1611 int rc; 1612 1613 rparam.mcast_obj = &vf->mcast_obj; 1614 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); 1615 1616 /* If there are pending mcast commands - send them */ 1617 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { 1618 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 1619 if (rc < 0) 1620 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 1621 rc); 1622 } 1623 } 1624 1625 static 1626 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, 1627 struct bnx2x_virtf *vf) 1628 { 1629 smp_mb__before_clear_bit(); 1630 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 1631 smp_mb__after_clear_bit(); 1632 } 1633 1634 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, 1635 struct bnx2x_virtf *vf) 1636 { 1637 vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw); 1638 } 1639 1640 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) 1641 { 1642 struct bnx2x_virtf *vf; 1643 int qidx = 0, abs_vfid; 1644 u8 opcode; 1645 u16 cid = 0xffff; 1646 1647 if (!IS_SRIOV(bp)) 1648 return 1; 1649 1650 /* first get the cid - the only events we handle here are cfc-delete 1651 * and set-mac completion 1652 */ 1653 opcode = elem->message.opcode; 1654 1655 switch (opcode) { 1656 case EVENT_RING_OPCODE_CFC_DEL: 1657 cid = SW_CID((__force __le32) 1658 elem->message.data.cfc_del_event.cid); 1659 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); 1660 break; 1661 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 1662 case EVENT_RING_OPCODE_MULTICAST_RULES: 1663 case EVENT_RING_OPCODE_FILTERS_RULES: 1664 case EVENT_RING_OPCODE_RSS_UPDATE_RULES: 1665 cid = (elem->message.data.eth_event.echo & 1666 BNX2X_SWCID_MASK); 1667 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 1668 break; 1669 case EVENT_RING_OPCODE_VF_FLR: 1670 abs_vfid = elem->message.data.vf_flr_event.vf_id; 1671 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", 1672 abs_vfid); 1673 goto get_vf; 1674 case EVENT_RING_OPCODE_MALICIOUS_VF: 1675 abs_vfid = elem->message.data.malicious_vf_event.vf_id; 1676 BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n", 1677 abs_vfid, 1678 elem->message.data.malicious_vf_event.err_id); 1679 goto get_vf; 1680 default: 1681 return 1; 1682 } 1683 1684 /* check if the cid is the VF range */ 1685 if (!bnx2x_iov_is_vf_cid(bp, cid)) { 1686 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); 1687 return 1; 1688 } 1689 1690 /* extract vf and rxq index from vf_cid - relies on the following: 1691 * 1. vfid on cid reflects the true abs_vfid 1692 * 2. The max number of VFs (per path) is 64 1693 */ 1694 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); 1695 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 1696 get_vf: 1697 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 1698 1699 if (!vf) { 1700 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", 1701 cid, abs_vfid); 1702 return 0; 1703 } 1704 1705 switch (opcode) { 1706 case EVENT_RING_OPCODE_CFC_DEL: 1707 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", 1708 vf->abs_vfid, qidx); 1709 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, 1710 &vfq_get(vf, 1711 qidx)->sp_obj, 1712 BNX2X_Q_CMD_CFC_DEL); 1713 break; 1714 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 1715 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", 1716 vf->abs_vfid, qidx); 1717 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); 1718 break; 1719 case EVENT_RING_OPCODE_MULTICAST_RULES: 1720 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", 1721 vf->abs_vfid, qidx); 1722 bnx2x_vf_handle_mcast_eqe(bp, vf); 1723 break; 1724 case EVENT_RING_OPCODE_FILTERS_RULES: 1725 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", 1726 vf->abs_vfid, qidx); 1727 bnx2x_vf_handle_filters_eqe(bp, vf); 1728 break; 1729 case EVENT_RING_OPCODE_RSS_UPDATE_RULES: 1730 DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n", 1731 vf->abs_vfid, qidx); 1732 bnx2x_vf_handle_rss_update_eqe(bp, vf); 1733 case EVENT_RING_OPCODE_VF_FLR: 1734 case EVENT_RING_OPCODE_MALICIOUS_VF: 1735 /* Do nothing for now */ 1736 return 0; 1737 } 1738 1739 return 0; 1740 } 1741 1742 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) 1743 { 1744 /* extract the vf from vf_cid - relies on the following: 1745 * 1. vfid on cid reflects the true abs_vfid 1746 * 2. The max number of VFs (per path) is 64 1747 */ 1748 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 1749 return bnx2x_vf_by_abs_fid(bp, abs_vfid); 1750 } 1751 1752 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 1753 struct bnx2x_queue_sp_obj **q_obj) 1754 { 1755 struct bnx2x_virtf *vf; 1756 1757 if (!IS_SRIOV(bp)) 1758 return; 1759 1760 vf = bnx2x_vf_by_cid(bp, vf_cid); 1761 1762 if (vf) { 1763 /* extract queue index from vf_cid - relies on the following: 1764 * 1. vfid on cid reflects the true abs_vfid 1765 * 2. The max number of VFs (per path) is 64 1766 */ 1767 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); 1768 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); 1769 } else { 1770 BNX2X_ERR("No vf matching cid %d\n", vf_cid); 1771 } 1772 } 1773 1774 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) 1775 { 1776 int i; 1777 int first_queue_query_index, num_queues_req; 1778 dma_addr_t cur_data_offset; 1779 struct stats_query_entry *cur_query_entry; 1780 u8 stats_count = 0; 1781 bool is_fcoe = false; 1782 1783 if (!IS_SRIOV(bp)) 1784 return; 1785 1786 if (!NO_FCOE(bp)) 1787 is_fcoe = true; 1788 1789 /* fcoe adds one global request and one queue request */ 1790 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; 1791 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1792 (is_fcoe ? 0 : 1); 1793 1794 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), 1795 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", 1796 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, 1797 first_queue_query_index + num_queues_req); 1798 1799 cur_data_offset = bp->fw_stats_data_mapping + 1800 offsetof(struct bnx2x_fw_stats_data, queue_stats) + 1801 num_queues_req * sizeof(struct per_queue_stats); 1802 1803 cur_query_entry = &bp->fw_stats_req-> 1804 query[first_queue_query_index + num_queues_req]; 1805 1806 for_each_vf(bp, i) { 1807 int j; 1808 struct bnx2x_virtf *vf = BP_VF(bp, i); 1809 1810 if (vf->state != VF_ENABLED) { 1811 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), 1812 "vf %d not enabled so no stats for it\n", 1813 vf->abs_vfid); 1814 continue; 1815 } 1816 1817 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); 1818 for_each_vfq(vf, j) { 1819 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 1820 1821 dma_addr_t q_stats_addr = 1822 vf->fw_stat_map + j * vf->stats_stride; 1823 1824 /* collect stats fro active queues only */ 1825 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 1826 BNX2X_Q_LOGICAL_STATE_STOPPED) 1827 continue; 1828 1829 /* create stats query entry for this queue */ 1830 cur_query_entry->kind = STATS_TYPE_QUEUE; 1831 cur_query_entry->index = vfq_stat_id(vf, rxq); 1832 cur_query_entry->funcID = 1833 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 1834 cur_query_entry->address.hi = 1835 cpu_to_le32(U64_HI(q_stats_addr)); 1836 cur_query_entry->address.lo = 1837 cpu_to_le32(U64_LO(q_stats_addr)); 1838 DP(BNX2X_MSG_IOV, 1839 "added address %x %x for vf %d queue %d client %d\n", 1840 cur_query_entry->address.hi, 1841 cur_query_entry->address.lo, cur_query_entry->funcID, 1842 j, cur_query_entry->index); 1843 cur_query_entry++; 1844 cur_data_offset += sizeof(struct per_queue_stats); 1845 stats_count++; 1846 1847 /* all stats are coalesced to the leading queue */ 1848 if (vf->cfg_flags & VF_CFG_STATS_COALESCE) 1849 break; 1850 } 1851 } 1852 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 1853 } 1854 1855 static inline 1856 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) 1857 { 1858 int i; 1859 struct bnx2x_virtf *vf = NULL; 1860 1861 for_each_vf(bp, i) { 1862 vf = BP_VF(bp, i); 1863 if (stat_id >= vf->igu_base_id && 1864 stat_id < vf->igu_base_id + vf_sb_count(vf)) 1865 break; 1866 } 1867 return vf; 1868 } 1869 1870 /* VF API helpers */ 1871 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, 1872 u8 enable) 1873 { 1874 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; 1875 u32 val = enable ? (abs_vfid | (1 << 6)) : 0; 1876 1877 REG_WR(bp, reg, val); 1878 } 1879 1880 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) 1881 { 1882 int i; 1883 1884 for_each_vfq(vf, i) 1885 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 1886 vfq_qzone_id(vf, vfq_get(vf, i)), false); 1887 } 1888 1889 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) 1890 { 1891 u32 val; 1892 1893 /* clear the VF configuration - pretend */ 1894 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1895 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 1896 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | 1897 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK); 1898 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 1899 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1900 } 1901 1902 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) 1903 { 1904 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), 1905 BNX2X_VF_MAX_QUEUES); 1906 } 1907 1908 static 1909 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, 1910 struct vf_pf_resc_request *req_resc) 1911 { 1912 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 1913 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 1914 1915 return ((req_resc->num_rxqs <= rxq_cnt) && 1916 (req_resc->num_txqs <= txq_cnt) && 1917 (req_resc->num_sbs <= vf_sb_count(vf)) && 1918 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 1919 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); 1920 } 1921 1922 /* CORE VF API */ 1923 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 1924 struct vf_pf_resc_request *resc) 1925 { 1926 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * 1927 BNX2X_CIDS_PER_VF; 1928 1929 union cdu_context *base_cxt = (union cdu_context *) 1930 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 1931 (base_vf_cid & (ILT_PAGE_CIDS-1)); 1932 int i; 1933 1934 /* if state is 'acquired' the VF was not released or FLR'd, in 1935 * this case the returned resources match the acquired already 1936 * acquired resources. Verify that the requested numbers do 1937 * not exceed the already acquired numbers. 1938 */ 1939 if (vf->state == VF_ACQUIRED) { 1940 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", 1941 vf->abs_vfid); 1942 1943 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 1944 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", 1945 vf->abs_vfid); 1946 return -EINVAL; 1947 } 1948 return 0; 1949 } 1950 1951 /* Otherwise vf state must be 'free' or 'reset' */ 1952 if (vf->state != VF_FREE && vf->state != VF_RESET) { 1953 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", 1954 vf->abs_vfid, vf->state); 1955 return -EINVAL; 1956 } 1957 1958 /* static allocation: 1959 * the global maximum number are fixed per VF. Fail the request if 1960 * requested number exceed these globals 1961 */ 1962 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 1963 DP(BNX2X_MSG_IOV, 1964 "cannot fulfill vf resource request. Placing maximal available values in response\n"); 1965 /* set the max resource in the vf */ 1966 return -ENOMEM; 1967 } 1968 1969 /* Set resources counters - 0 request means max available */ 1970 vf_sb_count(vf) = resc->num_sbs; 1971 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 1972 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 1973 if (resc->num_mac_filters) 1974 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 1975 if (resc->num_vlan_filters) 1976 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; 1977 1978 DP(BNX2X_MSG_IOV, 1979 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 1980 vf_sb_count(vf), vf_rxq_count(vf), 1981 vf_txq_count(vf), vf_mac_rules_cnt(vf), 1982 vf_vlan_rules_cnt(vf)); 1983 1984 /* Initialize the queues */ 1985 if (!vf->vfqs) { 1986 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); 1987 return -EINVAL; 1988 } 1989 1990 for_each_vfq(vf, i) { 1991 struct bnx2x_vf_queue *q = vfq_get(vf, i); 1992 1993 if (!q) { 1994 BNX2X_ERR("q number %d was not allocated\n", i); 1995 return -EINVAL; 1996 } 1997 1998 q->index = i; 1999 q->cxt = &((base_cxt + i)->eth); 2000 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; 2001 2002 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", 2003 vf->abs_vfid, i, q->index, q->cid, q->cxt); 2004 2005 /* init SP objects */ 2006 bnx2x_vfq_init(bp, vf, q); 2007 } 2008 vf->state = VF_ACQUIRED; 2009 return 0; 2010 } 2011 2012 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) 2013 { 2014 struct bnx2x_func_init_params func_init = {0}; 2015 u16 flags = 0; 2016 int i; 2017 2018 /* the sb resources are initialized at this point, do the 2019 * FW/HW initializations 2020 */ 2021 for_each_vf_sb(vf, i) 2022 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, 2023 vf_igu_sb(vf, i), vf_igu_sb(vf, i)); 2024 2025 /* Sanity checks */ 2026 if (vf->state != VF_ACQUIRED) { 2027 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", 2028 vf->abs_vfid, vf->state); 2029 return -EINVAL; 2030 } 2031 2032 /* let FLR complete ... */ 2033 msleep(100); 2034 2035 /* FLR cleanup epilogue */ 2036 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) 2037 return -EBUSY; 2038 2039 /* reset IGU VF statistics: MSIX */ 2040 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); 2041 2042 /* vf init */ 2043 if (vf->cfg_flags & VF_CFG_STATS) 2044 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); 2045 2046 if (vf->cfg_flags & VF_CFG_TPA) 2047 flags |= FUNC_FLG_TPA; 2048 2049 if (is_vf_multi(vf)) 2050 flags |= FUNC_FLG_RSS; 2051 2052 /* function setup */ 2053 func_init.func_flgs = flags; 2054 func_init.pf_id = BP_FUNC(bp); 2055 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); 2056 func_init.fw_stat_map = vf->fw_stat_map; 2057 func_init.spq_map = vf->spq_map; 2058 func_init.spq_prod = 0; 2059 bnx2x_func_init(bp, &func_init); 2060 2061 /* Enable the vf */ 2062 bnx2x_vf_enable_access(bp, vf->abs_vfid); 2063 bnx2x_vf_enable_traffic(bp, vf); 2064 2065 /* queue protection table */ 2066 for_each_vfq(vf, i) 2067 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2068 vfq_qzone_id(vf, vfq_get(vf, i)), true); 2069 2070 vf->state = VF_ENABLED; 2071 2072 /* update vf bulletin board */ 2073 bnx2x_post_vf_bulletin(bp, vf->index); 2074 2075 return 0; 2076 } 2077 2078 struct set_vf_state_cookie { 2079 struct bnx2x_virtf *vf; 2080 u8 state; 2081 }; 2082 2083 static void bnx2x_set_vf_state(void *cookie) 2084 { 2085 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; 2086 2087 p->vf->state = p->state; 2088 } 2089 2090 int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) 2091 { 2092 int rc = 0, i; 2093 2094 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 2095 2096 /* Close all queues */ 2097 for (i = 0; i < vf_rxq_count(vf); i++) { 2098 rc = bnx2x_vf_queue_teardown(bp, vf, i); 2099 if (rc) 2100 goto op_err; 2101 } 2102 2103 /* disable the interrupts */ 2104 DP(BNX2X_MSG_IOV, "disabling igu\n"); 2105 bnx2x_vf_igu_disable(bp, vf); 2106 2107 /* disable the VF */ 2108 DP(BNX2X_MSG_IOV, "clearing qtbl\n"); 2109 bnx2x_vf_clr_qtbl(bp, vf); 2110 2111 /* need to make sure there are no outstanding stats ramrods which may 2112 * cause the device to access the VF's stats buffer which it will free 2113 * as soon as we return from the close flow. 2114 */ 2115 { 2116 struct set_vf_state_cookie cookie; 2117 2118 cookie.vf = vf; 2119 cookie.state = VF_ACQUIRED; 2120 bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); 2121 } 2122 2123 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2124 2125 return 0; 2126 op_err: 2127 BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc); 2128 return rc; 2129 } 2130 2131 /* VF release can be called either: 1. The VF was acquired but 2132 * not enabled 2. the vf was enabled or in the process of being 2133 * enabled 2134 */ 2135 int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf) 2136 { 2137 int rc; 2138 2139 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, 2140 vf->state == VF_FREE ? "Free" : 2141 vf->state == VF_ACQUIRED ? "Acquired" : 2142 vf->state == VF_ENABLED ? "Enabled" : 2143 vf->state == VF_RESET ? "Reset" : 2144 "Unknown"); 2145 2146 switch (vf->state) { 2147 case VF_ENABLED: 2148 rc = bnx2x_vf_close(bp, vf); 2149 if (rc) 2150 goto op_err; 2151 /* Fallthrough to release resources */ 2152 case VF_ACQUIRED: 2153 DP(BNX2X_MSG_IOV, "about to free resources\n"); 2154 bnx2x_vf_free_resc(bp, vf); 2155 break; 2156 2157 case VF_FREE: 2158 case VF_RESET: 2159 default: 2160 break; 2161 } 2162 return 0; 2163 op_err: 2164 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc); 2165 return rc; 2166 } 2167 2168 int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, 2169 struct bnx2x_config_rss_params *rss) 2170 { 2171 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 2172 set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags); 2173 return bnx2x_config_rss(bp, rss); 2174 } 2175 2176 int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, 2177 struct vfpf_tpa_tlv *tlv, 2178 struct bnx2x_queue_update_tpa_params *params) 2179 { 2180 aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr; 2181 struct bnx2x_queue_state_params qstate; 2182 int qid, rc = 0; 2183 2184 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 2185 2186 /* Set ramrod params */ 2187 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); 2188 memcpy(&qstate.params.update_tpa, params, 2189 sizeof(struct bnx2x_queue_update_tpa_params)); 2190 qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA; 2191 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); 2192 2193 for (qid = 0; qid < vf_rxq_count(vf); qid++) { 2194 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 2195 qstate.params.update_tpa.sge_map = sge_addr[qid]; 2196 DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n", 2197 vf->abs_vfid, qid, U64_HI(sge_addr[qid]), 2198 U64_LO(sge_addr[qid])); 2199 rc = bnx2x_queue_state_change(bp, &qstate); 2200 if (rc) { 2201 BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n", 2202 U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]), 2203 vf->abs_vfid, qid); 2204 return rc; 2205 } 2206 } 2207 2208 return rc; 2209 } 2210 2211 /* VF release ~ VF close + VF release-resources 2212 * Release is the ultimate SW shutdown and is called whenever an 2213 * irrecoverable error is encountered. 2214 */ 2215 int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) 2216 { 2217 int rc; 2218 2219 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); 2220 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 2221 2222 rc = bnx2x_vf_free(bp, vf); 2223 if (rc) 2224 WARN(rc, 2225 "VF[%d] Failed to allocate resources for release op- rc=%d\n", 2226 vf->abs_vfid, rc); 2227 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 2228 return rc; 2229 } 2230 2231 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, 2232 struct bnx2x_virtf *vf, u32 *sbdf) 2233 { 2234 *sbdf = vf->devfn | (vf->bus << 8); 2235 } 2236 2237 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 2238 enum channel_tlvs tlv) 2239 { 2240 /* we don't lock the channel for unsupported tlvs */ 2241 if (!bnx2x_tlv_supported(tlv)) { 2242 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n"); 2243 return; 2244 } 2245 2246 /* lock the channel */ 2247 mutex_lock(&vf->op_mutex); 2248 2249 /* record the locking op */ 2250 vf->op_current = tlv; 2251 2252 /* log the lock */ 2253 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", 2254 vf->abs_vfid, tlv); 2255 } 2256 2257 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 2258 enum channel_tlvs expected_tlv) 2259 { 2260 enum channel_tlvs current_tlv; 2261 2262 if (!vf) { 2263 BNX2X_ERR("VF was %p\n", vf); 2264 return; 2265 } 2266 2267 current_tlv = vf->op_current; 2268 2269 /* we don't unlock the channel for unsupported tlvs */ 2270 if (!bnx2x_tlv_supported(expected_tlv)) 2271 return; 2272 2273 WARN(expected_tlv != vf->op_current, 2274 "lock mismatch: expected %d found %d", expected_tlv, 2275 vf->op_current); 2276 2277 /* record the locking op */ 2278 vf->op_current = CHANNEL_TLV_NONE; 2279 2280 /* lock the channel */ 2281 mutex_unlock(&vf->op_mutex); 2282 2283 /* log the unlock */ 2284 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 2285 vf->abs_vfid, vf->op_current); 2286 } 2287 2288 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) 2289 { 2290 struct bnx2x_queue_state_params q_params; 2291 u32 prev_flags; 2292 int i, rc; 2293 2294 /* Verify changes are needed and record current Tx switching state */ 2295 prev_flags = bp->flags; 2296 if (enable) 2297 bp->flags |= TX_SWITCHING; 2298 else 2299 bp->flags &= ~TX_SWITCHING; 2300 if (prev_flags == bp->flags) 2301 return 0; 2302 2303 /* Verify state enables the sending of queue ramrods */ 2304 if ((bp->state != BNX2X_STATE_OPEN) || 2305 (bnx2x_get_q_logical_state(bp, 2306 &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) != 2307 BNX2X_Q_LOGICAL_STATE_ACTIVE)) 2308 return 0; 2309 2310 /* send q. update ramrod to configure Tx switching */ 2311 memset(&q_params, 0, sizeof(q_params)); 2312 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 2313 q_params.cmd = BNX2X_Q_CMD_UPDATE; 2314 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, 2315 &q_params.params.update.update_flags); 2316 if (enable) 2317 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING, 2318 &q_params.params.update.update_flags); 2319 else 2320 __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING, 2321 &q_params.params.update.update_flags); 2322 2323 /* send the ramrod on all the queues of the PF */ 2324 for_each_eth_queue(bp, i) { 2325 struct bnx2x_fastpath *fp = &bp->fp[i]; 2326 2327 /* Set the appropriate Queue object */ 2328 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 2329 2330 /* Update the Queue state */ 2331 rc = bnx2x_queue_state_change(bp, &q_params); 2332 if (rc) { 2333 BNX2X_ERR("Failed to configure Tx switching\n"); 2334 return rc; 2335 } 2336 } 2337 2338 DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled"); 2339 return 0; 2340 } 2341 2342 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) 2343 { 2344 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 2345 2346 if (!IS_SRIOV(bp)) { 2347 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n"); 2348 return -EINVAL; 2349 } 2350 2351 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 2352 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 2353 2354 /* HW channel is only operational when PF is up */ 2355 if (bp->state != BNX2X_STATE_OPEN) { 2356 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n"); 2357 return -EINVAL; 2358 } 2359 2360 /* we are always bound by the total_vfs in the configuration space */ 2361 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { 2362 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n", 2363 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 2364 num_vfs_param = BNX2X_NR_VIRTFN(bp); 2365 } 2366 2367 bp->requested_nr_virtfn = num_vfs_param; 2368 if (num_vfs_param == 0) { 2369 bnx2x_set_pf_tx_switching(bp, false); 2370 pci_disable_sriov(dev); 2371 return 0; 2372 } else { 2373 return bnx2x_enable_sriov(bp); 2374 } 2375 } 2376 2377 #define IGU_ENTRY_SIZE 4 2378 2379 int bnx2x_enable_sriov(struct bnx2x *bp) 2380 { 2381 int rc = 0, req_vfs = bp->requested_nr_virtfn; 2382 int vf_idx, sb_idx, vfq_idx, qcount, first_vf; 2383 u32 igu_entry, address; 2384 u16 num_vf_queues; 2385 2386 if (req_vfs == 0) 2387 return 0; 2388 2389 first_vf = bp->vfdb->sriov.first_vf_in_pf; 2390 2391 /* statically distribute vf sb pool between VFs */ 2392 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES, 2393 BP_VFDB(bp)->vf_sbs_pool / req_vfs); 2394 2395 /* zero previous values learned from igu cam */ 2396 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) { 2397 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 2398 2399 vf->sb_count = 0; 2400 vf_sb_count(BP_VF(bp, vf_idx)) = 0; 2401 } 2402 bp->vfdb->vf_sbs_pool = 0; 2403 2404 /* prepare IGU cam */ 2405 sb_idx = BP_VFDB(bp)->first_vf_igu_entry; 2406 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE; 2407 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 2408 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) { 2409 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT | 2410 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT | 2411 IGU_REG_MAPPING_MEMORY_VALID; 2412 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n", 2413 sb_idx, vf_idx); 2414 REG_WR(bp, address, igu_entry); 2415 sb_idx++; 2416 address += IGU_ENTRY_SIZE; 2417 } 2418 } 2419 2420 /* Reinitialize vf database according to igu cam */ 2421 bnx2x_get_vf_igu_cam_info(bp); 2422 2423 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n", 2424 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); 2425 2426 qcount = 0; 2427 for_each_vf(bp, vf_idx) { 2428 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 2429 2430 /* set local queue arrays */ 2431 vf->vfqs = &bp->vfdb->vfqs[qcount]; 2432 qcount += vf_sb_count(vf); 2433 bnx2x_iov_static_resc(bp, vf); 2434 } 2435 2436 /* prepare msix vectors in VF configuration space - the value in the 2437 * PCI configuration space should be the index of the last entry, 2438 * namely one less than the actual size of the table 2439 */ 2440 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 2441 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 2442 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 2443 num_vf_queues - 1); 2444 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", 2445 vf_idx, num_vf_queues - 1); 2446 } 2447 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2448 2449 /* enable sriov. This will probe all the VFs, and consequentially cause 2450 * the "acquire" messages to appear on the VF PF channel. 2451 */ 2452 DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); 2453 bnx2x_disable_sriov(bp); 2454 2455 rc = bnx2x_set_pf_tx_switching(bp, true); 2456 if (rc) 2457 return rc; 2458 2459 rc = pci_enable_sriov(bp->pdev, req_vfs); 2460 if (rc) { 2461 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 2462 return rc; 2463 } 2464 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); 2465 return req_vfs; 2466 } 2467 2468 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) 2469 { 2470 int vfidx; 2471 struct pf_vf_bulletin_content *bulletin; 2472 2473 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); 2474 for_each_vf(bp, vfidx) { 2475 bulletin = BP_VF_BULLETIN(bp, vfidx); 2476 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) 2477 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); 2478 } 2479 } 2480 2481 void bnx2x_disable_sriov(struct bnx2x *bp) 2482 { 2483 pci_disable_sriov(bp->pdev); 2484 } 2485 2486 static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, 2487 struct bnx2x_virtf **vf, 2488 struct pf_vf_bulletin_content **bulletin) 2489 { 2490 if (bp->state != BNX2X_STATE_OPEN) { 2491 BNX2X_ERR("vf ndo called though PF is down\n"); 2492 return -EINVAL; 2493 } 2494 2495 if (!IS_SRIOV(bp)) { 2496 BNX2X_ERR("vf ndo called though sriov is disabled\n"); 2497 return -EINVAL; 2498 } 2499 2500 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { 2501 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n", 2502 vfidx, BNX2X_NR_VIRTFN(bp)); 2503 return -EINVAL; 2504 } 2505 2506 /* init members */ 2507 *vf = BP_VF(bp, vfidx); 2508 *bulletin = BP_VF_BULLETIN(bp, vfidx); 2509 2510 if (!*vf) { 2511 BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n", 2512 vfidx); 2513 return -EINVAL; 2514 } 2515 2516 if (!(*vf)->vfqs) { 2517 BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n", 2518 vfidx); 2519 return -EINVAL; 2520 } 2521 2522 if (!*bulletin) { 2523 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n", 2524 vfidx); 2525 return -EINVAL; 2526 } 2527 2528 return 0; 2529 } 2530 2531 int bnx2x_get_vf_config(struct net_device *dev, int vfidx, 2532 struct ifla_vf_info *ivi) 2533 { 2534 struct bnx2x *bp = netdev_priv(dev); 2535 struct bnx2x_virtf *vf = NULL; 2536 struct pf_vf_bulletin_content *bulletin = NULL; 2537 struct bnx2x_vlan_mac_obj *mac_obj; 2538 struct bnx2x_vlan_mac_obj *vlan_obj; 2539 int rc; 2540 2541 /* sanity and init */ 2542 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 2543 if (rc) 2544 return rc; 2545 mac_obj = &bnx2x_leading_vfq(vf, mac_obj); 2546 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 2547 if (!mac_obj || !vlan_obj) { 2548 BNX2X_ERR("VF partially initialized\n"); 2549 return -EINVAL; 2550 } 2551 2552 ivi->vf = vfidx; 2553 ivi->qos = 0; 2554 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */ 2555 ivi->spoofchk = 1; /*always enabled */ 2556 if (vf->state == VF_ENABLED) { 2557 /* mac and vlan are in vlan_mac objects */ 2558 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) { 2559 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 2560 0, ETH_ALEN); 2561 vlan_obj->get_n_elements(bp, vlan_obj, 1, 2562 (u8 *)&ivi->vlan, 0, 2563 VLAN_HLEN); 2564 } 2565 } else { 2566 /* mac */ 2567 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 2568 /* mac configured by ndo so its in bulletin board */ 2569 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); 2570 else 2571 /* function has not been loaded yet. Show mac as 0s */ 2572 memset(&ivi->mac, 0, ETH_ALEN); 2573 2574 /* vlan */ 2575 if (bulletin->valid_bitmap & (1 << VLAN_VALID)) 2576 /* vlan configured by ndo so its in bulletin board */ 2577 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); 2578 else 2579 /* function has not been loaded yet. Show vlans as 0s */ 2580 memset(&ivi->vlan, 0, VLAN_HLEN); 2581 } 2582 2583 return 0; 2584 } 2585 2586 /* New mac for VF. Consider these cases: 2587 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and 2588 * supply at acquire. 2589 * 2. VF has already been acquired but has not yet initialized - store in local 2590 * bulletin board. mac will be posted on VF bulletin board after VF init. VF 2591 * will configure this mac when it is ready. 2592 * 3. VF has already initialized but has not yet setup a queue - post the new 2593 * mac on VF's bulletin board right now. VF will configure this mac when it 2594 * is ready. 2595 * 4. VF has already set a queue - delete any macs already configured for this 2596 * queue and manually config the new mac. 2597 * In any event, once this function has been called refuse any attempts by the 2598 * VF to configure any mac for itself except for this mac. In case of a race 2599 * where the VF fails to see the new post on its bulletin board before sending a 2600 * mac configuration request, the PF will simply fail the request and VF can try 2601 * again after consulting its bulletin board. 2602 */ 2603 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) 2604 { 2605 struct bnx2x *bp = netdev_priv(dev); 2606 int rc, q_logical_state; 2607 struct bnx2x_virtf *vf = NULL; 2608 struct pf_vf_bulletin_content *bulletin = NULL; 2609 2610 /* sanity and init */ 2611 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 2612 if (rc) 2613 return rc; 2614 if (!is_valid_ether_addr(mac)) { 2615 BNX2X_ERR("mac address invalid\n"); 2616 return -EINVAL; 2617 } 2618 2619 /* update PF's copy of the VF's bulletin. Will no longer accept mac 2620 * configuration requests from vf unless match this mac 2621 */ 2622 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; 2623 memcpy(bulletin->mac, mac, ETH_ALEN); 2624 2625 /* Post update on VF's bulletin board */ 2626 rc = bnx2x_post_vf_bulletin(bp, vfidx); 2627 if (rc) { 2628 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); 2629 return rc; 2630 } 2631 2632 q_logical_state = 2633 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); 2634 if (vf->state == VF_ENABLED && 2635 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 2636 /* configure the mac in device on this vf's queue */ 2637 unsigned long ramrod_flags = 0; 2638 struct bnx2x_vlan_mac_obj *mac_obj; 2639 2640 /* User should be able to see failure reason in system logs */ 2641 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 2642 return -EINVAL; 2643 2644 /* must lock vfpf channel to protect against vf flows */ 2645 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 2646 2647 /* remove existing eth macs */ 2648 mac_obj = &bnx2x_leading_vfq(vf, mac_obj); 2649 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 2650 if (rc) { 2651 BNX2X_ERR("failed to delete eth macs\n"); 2652 rc = -EINVAL; 2653 goto out; 2654 } 2655 2656 /* remove existing uc list macs */ 2657 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); 2658 if (rc) { 2659 BNX2X_ERR("failed to delete uc_list macs\n"); 2660 rc = -EINVAL; 2661 goto out; 2662 } 2663 2664 /* configure the new mac to device */ 2665 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2666 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 2667 BNX2X_ETH_MAC, &ramrod_flags); 2668 2669 out: 2670 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 2671 } 2672 2673 return 0; 2674 } 2675 2676 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 2677 { 2678 struct bnx2x_queue_state_params q_params = {NULL}; 2679 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 2680 struct bnx2x_queue_update_params *update_params; 2681 struct pf_vf_bulletin_content *bulletin = NULL; 2682 struct bnx2x_rx_mode_ramrod_params rx_ramrod; 2683 struct bnx2x *bp = netdev_priv(dev); 2684 struct bnx2x_vlan_mac_obj *vlan_obj; 2685 unsigned long vlan_mac_flags = 0; 2686 unsigned long ramrod_flags = 0; 2687 struct bnx2x_virtf *vf = NULL; 2688 unsigned long accept_flags; 2689 int rc; 2690 2691 /* sanity and init */ 2692 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 2693 if (rc) 2694 return rc; 2695 2696 if (vlan > 4095) { 2697 BNX2X_ERR("illegal vlan value %d\n", vlan); 2698 return -EINVAL; 2699 } 2700 2701 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", 2702 vfidx, vlan, 0); 2703 2704 /* update PF's copy of the VF's bulletin. No point in posting the vlan 2705 * to the VF since it doesn't have anything to do with it. But it useful 2706 * to store it here in case the VF is not up yet and we can only 2707 * configure the vlan later when it does. Treat vlan id 0 as remove the 2708 * Host tag. 2709 */ 2710 if (vlan > 0) 2711 bulletin->valid_bitmap |= 1 << VLAN_VALID; 2712 else 2713 bulletin->valid_bitmap &= ~(1 << VLAN_VALID); 2714 bulletin->vlan = vlan; 2715 2716 /* is vf initialized and queue set up? */ 2717 if (vf->state != VF_ENABLED || 2718 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != 2719 BNX2X_Q_LOGICAL_STATE_ACTIVE) 2720 return rc; 2721 2722 /* User should be able to see error in system logs */ 2723 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 2724 return -EINVAL; 2725 2726 /* must lock vfpf channel to protect against vf flows */ 2727 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 2728 2729 /* remove existing vlans */ 2730 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2731 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 2732 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 2733 &ramrod_flags); 2734 if (rc) { 2735 BNX2X_ERR("failed to delete vlans\n"); 2736 rc = -EINVAL; 2737 goto out; 2738 } 2739 2740 /* need to remove/add the VF's accept_any_vlan bit */ 2741 accept_flags = bnx2x_leading_vfq(vf, accept_flags); 2742 if (vlan) 2743 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 2744 else 2745 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 2746 2747 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, 2748 accept_flags); 2749 bnx2x_leading_vfq(vf, accept_flags) = accept_flags; 2750 bnx2x_config_rx_mode(bp, &rx_ramrod); 2751 2752 /* configure the new vlan to device */ 2753 memset(&ramrod_param, 0, sizeof(ramrod_param)); 2754 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2755 ramrod_param.vlan_mac_obj = vlan_obj; 2756 ramrod_param.ramrod_flags = ramrod_flags; 2757 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 2758 &ramrod_param.user_req.vlan_mac_flags); 2759 ramrod_param.user_req.u.vlan.vlan = vlan; 2760 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 2761 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 2762 if (rc) { 2763 BNX2X_ERR("failed to configure vlan\n"); 2764 rc = -EINVAL; 2765 goto out; 2766 } 2767 2768 /* send queue update ramrod to configure default vlan and silent 2769 * vlan removal 2770 */ 2771 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 2772 q_params.cmd = BNX2X_Q_CMD_UPDATE; 2773 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); 2774 update_params = &q_params.params.update; 2775 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 2776 &update_params->update_flags); 2777 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 2778 &update_params->update_flags); 2779 if (vlan == 0) { 2780 /* if vlan is 0 then we want to leave the VF traffic 2781 * untagged, and leave the incoming traffic untouched 2782 * (i.e. do not remove any vlan tags). 2783 */ 2784 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 2785 &update_params->update_flags); 2786 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 2787 &update_params->update_flags); 2788 } else { 2789 /* configure default vlan to vf queue and set silent 2790 * vlan removal (the vf remains unaware of this vlan). 2791 */ 2792 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 2793 &update_params->update_flags); 2794 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 2795 &update_params->update_flags); 2796 update_params->def_vlan = vlan; 2797 update_params->silent_removal_value = 2798 vlan & VLAN_VID_MASK; 2799 update_params->silent_removal_mask = VLAN_VID_MASK; 2800 } 2801 2802 /* Update the Queue state */ 2803 rc = bnx2x_queue_state_change(bp, &q_params); 2804 if (rc) { 2805 BNX2X_ERR("Failed to configure default VLAN\n"); 2806 goto out; 2807 } 2808 2809 2810 /* clear the flag indicating that this VF needs its vlan 2811 * (will only be set if the HV configured the Vlan before vf was 2812 * up and we were called because the VF came up later 2813 */ 2814 out: 2815 vf->cfg_flags &= ~VF_CFG_VLAN; 2816 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 2817 2818 return rc; 2819 } 2820 2821 /* crc is the first field in the bulletin board. Compute the crc over the 2822 * entire bulletin board excluding the crc field itself. Use the length field 2823 * as the Bulletin Board was posted by a PF with possibly a different version 2824 * from the vf which will sample it. Therefore, the length is computed by the 2825 * PF and the used blindly by the VF. 2826 */ 2827 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, 2828 struct pf_vf_bulletin_content *bulletin) 2829 { 2830 return crc32(BULLETIN_CRC_SEED, 2831 ((u8 *)bulletin) + sizeof(bulletin->crc), 2832 bulletin->length - sizeof(bulletin->crc)); 2833 } 2834 2835 /* Check for new posts on the bulletin board */ 2836 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) 2837 { 2838 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; 2839 int attempts; 2840 2841 /* bulletin board hasn't changed since last sample */ 2842 if (bp->old_bulletin.version == bulletin.version) 2843 return PFVF_BULLETIN_UNCHANGED; 2844 2845 /* validate crc of new bulletin board */ 2846 if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) { 2847 /* sampling structure in mid post may result with corrupted data 2848 * validate crc to ensure coherency. 2849 */ 2850 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { 2851 bulletin = bp->pf2vf_bulletin->content; 2852 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, 2853 &bulletin)) 2854 break; 2855 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", 2856 bulletin.crc, 2857 bnx2x_crc_vf_bulletin(bp, &bulletin)); 2858 } 2859 if (attempts >= BULLETIN_ATTEMPTS) { 2860 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", 2861 attempts); 2862 return PFVF_BULLETIN_CRC_ERR; 2863 } 2864 } 2865 2866 /* the mac address in bulletin board is valid and is new */ 2867 if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID && 2868 !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) { 2869 /* update new mac to net device */ 2870 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); 2871 } 2872 2873 /* the vlan in bulletin board is valid and is new */ 2874 if (bulletin.valid_bitmap & 1 << VLAN_VALID) 2875 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN); 2876 2877 /* copy new bulletin board to bp */ 2878 bp->old_bulletin = bulletin; 2879 2880 return PFVF_BULLETIN_UPDATED; 2881 } 2882 2883 void bnx2x_timer_sriov(struct bnx2x *bp) 2884 { 2885 bnx2x_sample_bulletin(bp); 2886 2887 /* if channel is down we need to self destruct */ 2888 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) 2889 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 2890 BNX2X_MSG_IOV); 2891 } 2892 2893 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 2894 { 2895 /* vf doorbells are embedded within the regview */ 2896 return bp->regview + PXP_VF_ADDR_DB_START; 2897 } 2898 2899 int bnx2x_vf_pci_alloc(struct bnx2x *bp) 2900 { 2901 mutex_init(&bp->vf2pf_mutex); 2902 2903 /* allocate vf2pf mailbox for vf to pf channel */ 2904 bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping, 2905 sizeof(struct bnx2x_vf_mbx_msg)); 2906 if (!bp->vf2pf_mbox) 2907 goto alloc_mem_err; 2908 2909 /* allocate pf 2 vf bulletin board */ 2910 bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping, 2911 sizeof(union pf_vf_bulletin)); 2912 if (!bp->pf2vf_bulletin) 2913 goto alloc_mem_err; 2914 2915 return 0; 2916 2917 alloc_mem_err: 2918 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 2919 sizeof(struct bnx2x_vf_mbx_msg)); 2920 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, 2921 sizeof(union pf_vf_bulletin)); 2922 return -ENOMEM; 2923 } 2924 2925 void bnx2x_iov_channel_down(struct bnx2x *bp) 2926 { 2927 int vf_idx; 2928 struct pf_vf_bulletin_content *bulletin; 2929 2930 if (!IS_SRIOV(bp)) 2931 return; 2932 2933 for_each_vf(bp, vf_idx) { 2934 /* locate this VFs bulletin board and update the channel down 2935 * bit 2936 */ 2937 bulletin = BP_VF_BULLETIN(bp, vf_idx); 2938 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN; 2939 2940 /* update vf bulletin board */ 2941 bnx2x_post_vf_bulletin(bp, vf_idx); 2942 } 2943 } 2944 2945 void bnx2x_iov_task(struct work_struct *work) 2946 { 2947 struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work); 2948 2949 if (!netif_running(bp->dev)) 2950 return; 2951 2952 if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR, 2953 &bp->iov_task_state)) 2954 bnx2x_vf_handle_flr_event(bp); 2955 2956 if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG, 2957 &bp->iov_task_state)) 2958 bnx2x_vf_mbx(bp); 2959 } 2960 2961 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) 2962 { 2963 smp_mb__before_clear_bit(); 2964 set_bit(flag, &bp->iov_task_state); 2965 smp_mb__after_clear_bit(); 2966 DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); 2967 queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); 2968 } 2969