1 /* bnx2x_sriov.c: Broadcom Everest network driver. 2 * 3 * Copyright 2009-2013 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com> 16 * Written by: Shmulik Ravid 17 * Ariel Elior <ariel.elior@qlogic.com> 18 * 19 */ 20 #include "bnx2x.h" 21 #include "bnx2x_init.h" 22 #include "bnx2x_cmn.h" 23 #include "bnx2x_sp.h" 24 #include <linux/crc32.h> 25 #include <linux/if_vlan.h> 26 27 /* General service functions */ 28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 29 u16 pf_id) 30 { 31 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 32 pf_id); 33 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 34 pf_id); 35 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 36 pf_id); 37 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 38 pf_id); 39 } 40 41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 42 u8 enable) 43 { 44 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 45 enable); 46 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 47 enable); 48 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 49 enable); 50 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 51 enable); 52 } 53 54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 55 { 56 int idx; 57 58 for_each_vf(bp, idx) 59 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) 60 break; 61 return idx; 62 } 63 64 static 65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 66 { 67 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); 68 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; 69 } 70 71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, 72 u8 igu_sb_id, u8 segment, u16 index, u8 op, 73 u8 update) 74 { 75 /* acking a VF sb through the PF - use the GRC */ 76 u32 ctl; 77 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 78 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 79 u32 func_encode = vf->abs_vfid; 80 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; 81 struct igu_regular cmd_data = {0}; 82 83 cmd_data.sb_id_and_flags = 84 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 85 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 86 (update << IGU_REGULAR_BUPDATE_SHIFT) | 87 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 88 89 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 90 func_encode << IGU_CTRL_REG_FID_SHIFT | 91 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 92 93 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 94 cmd_data.sb_id_and_flags, igu_addr_data); 95 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); 96 mmiowb(); 97 barrier(); 98 99 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 100 ctl, igu_addr_ctl); 101 REG_WR(bp, igu_addr_ctl, ctl); 102 mmiowb(); 103 barrier(); 104 } 105 106 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp, 107 struct bnx2x_virtf *vf, 108 bool print_err) 109 { 110 if (!bnx2x_leading_vfq(vf, sp_initialized)) { 111 if (print_err) 112 BNX2X_ERR("Slowpath objects not yet initialized!\n"); 113 else 114 DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n"); 115 return false; 116 } 117 return true; 118 } 119 120 /* VFOP operations states */ 121 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 122 struct bnx2x_queue_init_params *init_params, 123 struct bnx2x_queue_setup_params *setup_params, 124 u16 q_idx, u16 sb_idx) 125 { 126 DP(BNX2X_MSG_IOV, 127 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", 128 vf->abs_vfid, 129 q_idx, 130 sb_idx, 131 init_params->tx.sb_cq_index, 132 init_params->tx.hc_rate, 133 setup_params->flags, 134 setup_params->txq_params.traffic_type); 135 } 136 137 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 138 struct bnx2x_queue_init_params *init_params, 139 struct bnx2x_queue_setup_params *setup_params, 140 u16 q_idx, u16 sb_idx) 141 { 142 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; 143 144 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" 145 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", 146 vf->abs_vfid, 147 q_idx, 148 sb_idx, 149 init_params->rx.sb_cq_index, 150 init_params->rx.hc_rate, 151 setup_params->gen_params.mtu, 152 rxq_params->buf_sz, 153 rxq_params->sge_buf_sz, 154 rxq_params->max_sges_pkt, 155 rxq_params->tpa_agg_sz, 156 setup_params->flags, 157 rxq_params->drop_flags, 158 rxq_params->cache_line_log); 159 } 160 161 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 162 struct bnx2x_virtf *vf, 163 struct bnx2x_vf_queue *q, 164 struct bnx2x_vf_queue_construct_params *p, 165 unsigned long q_type) 166 { 167 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; 168 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; 169 170 /* INIT */ 171 172 /* Enable host coalescing in the transition to INIT state */ 173 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) 174 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); 175 176 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) 177 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); 178 179 /* FW SB ID */ 180 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 181 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 182 183 /* context */ 184 init_p->cxts[0] = q->cxt; 185 186 /* SETUP */ 187 188 /* Setup-op general parameters */ 189 setup_p->gen_params.spcl_id = vf->sp_cl_id; 190 setup_p->gen_params.stat_id = vfq_stat_id(vf, q); 191 192 /* Setup-op pause params: 193 * Nothing to do, the pause thresholds are set by default to 0 which 194 * effectively turns off the feature for this queue. We don't want 195 * one queue (VF) to interfering with another queue (another VF) 196 */ 197 if (vf->cfg_flags & VF_CFG_FW_FC) 198 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", 199 vf->abs_vfid); 200 /* Setup-op flags: 201 * collect statistics, zero statistics, local-switching, security, 202 * OV for Flex10, RSS and MCAST for leading 203 */ 204 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) 205 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); 206 207 /* for VFs, enable tx switching, bd coherency, and mac address 208 * anti-spoofing 209 */ 210 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); 211 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 212 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 213 214 /* Setup-op rx parameters */ 215 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 216 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 217 218 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); 219 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); 220 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); 221 222 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) 223 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; 224 } 225 226 /* Setup-op tx parameters */ 227 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { 228 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; 229 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 230 } 231 } 232 233 static int bnx2x_vf_queue_create(struct bnx2x *bp, 234 struct bnx2x_virtf *vf, int qid, 235 struct bnx2x_vf_queue_construct_params *qctor) 236 { 237 struct bnx2x_queue_state_params *q_params; 238 int rc = 0; 239 240 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 241 242 /* Prepare ramrod information */ 243 q_params = &qctor->qstate; 244 q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 245 set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags); 246 247 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 248 BNX2X_Q_LOGICAL_STATE_ACTIVE) { 249 DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n"); 250 goto out; 251 } 252 253 /* Run Queue 'construction' ramrods */ 254 q_params->cmd = BNX2X_Q_CMD_INIT; 255 rc = bnx2x_queue_state_change(bp, q_params); 256 if (rc) 257 goto out; 258 259 memcpy(&q_params->params.setup, &qctor->prep_qsetup, 260 sizeof(struct bnx2x_queue_setup_params)); 261 q_params->cmd = BNX2X_Q_CMD_SETUP; 262 rc = bnx2x_queue_state_change(bp, q_params); 263 if (rc) 264 goto out; 265 266 /* enable interrupts */ 267 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)), 268 USTORM_ID, 0, IGU_INT_ENABLE, 0); 269 out: 270 return rc; 271 } 272 273 static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf, 274 int qid) 275 { 276 enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT, 277 BNX2X_Q_CMD_TERMINATE, 278 BNX2X_Q_CMD_CFC_DEL}; 279 struct bnx2x_queue_state_params q_params; 280 int rc, i; 281 282 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 283 284 /* Prepare ramrod information */ 285 memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params)); 286 q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 287 set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 288 289 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) == 290 BNX2X_Q_LOGICAL_STATE_STOPPED) { 291 DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n"); 292 goto out; 293 } 294 295 /* Run Queue 'destruction' ramrods */ 296 for (i = 0; i < ARRAY_SIZE(cmds); i++) { 297 q_params.cmd = cmds[i]; 298 rc = bnx2x_queue_state_change(bp, &q_params); 299 if (rc) { 300 BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]); 301 return rc; 302 } 303 } 304 out: 305 /* Clean Context */ 306 if (bnx2x_vfq(vf, qid, cxt)) { 307 bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0; 308 bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0; 309 } 310 311 return 0; 312 } 313 314 static void 315 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) 316 { 317 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 318 if (vf) { 319 /* the first igu entry belonging to VFs of this PF */ 320 if (!BP_VFDB(bp)->first_vf_igu_entry) 321 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; 322 323 /* the first igu entry belonging to this VF */ 324 if (!vf_sb_count(vf)) 325 vf->igu_base_id = igu_sb_id; 326 327 ++vf_sb_count(vf); 328 ++vf->sb_count; 329 } 330 BP_VFDB(bp)->vf_sbs_pool++; 331 } 332 333 static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp, 334 struct bnx2x_vlan_mac_obj *obj, 335 atomic_t *counter) 336 { 337 struct list_head *pos; 338 int read_lock; 339 int cnt = 0; 340 341 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); 342 if (read_lock) 343 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); 344 345 list_for_each(pos, &obj->head) 346 cnt++; 347 348 if (!read_lock) 349 bnx2x_vlan_mac_h_read_unlock(bp, obj); 350 351 atomic_set(counter, cnt); 352 } 353 354 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, 355 int qid, bool drv_only, bool mac) 356 { 357 struct bnx2x_vlan_mac_ramrod_params ramrod; 358 int rc; 359 360 DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid, 361 mac ? "MACs" : "VLANs"); 362 363 /* Prepare ramrod params */ 364 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); 365 if (mac) { 366 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); 367 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 368 } else { 369 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 370 &ramrod.user_req.vlan_mac_flags); 371 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 372 } 373 ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL; 374 375 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); 376 if (drv_only) 377 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); 378 else 379 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); 380 381 /* Start deleting */ 382 rc = ramrod.vlan_mac_obj->delete_all(bp, 383 ramrod.vlan_mac_obj, 384 &ramrod.user_req.vlan_mac_flags, 385 &ramrod.ramrod_flags); 386 if (rc) { 387 BNX2X_ERR("Failed to delete all %s\n", 388 mac ? "MACs" : "VLANs"); 389 return rc; 390 } 391 392 /* Clear the vlan counters */ 393 if (!mac) 394 atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0); 395 396 return 0; 397 } 398 399 static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, 400 struct bnx2x_virtf *vf, int qid, 401 struct bnx2x_vf_mac_vlan_filter *filter, 402 bool drv_only) 403 { 404 struct bnx2x_vlan_mac_ramrod_params ramrod; 405 int rc; 406 407 DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n", 408 vf->abs_vfid, filter->add ? "Adding" : "Deleting", 409 filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN"); 410 411 /* Prepare ramrod params */ 412 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); 413 if (filter->type == BNX2X_VF_FILTER_VLAN) { 414 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 415 &ramrod.user_req.vlan_mac_flags); 416 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 417 ramrod.user_req.u.vlan.vlan = filter->vid; 418 } else { 419 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); 420 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 421 memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN); 422 } 423 ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD : 424 BNX2X_VLAN_MAC_DEL; 425 426 /* Verify there are available vlan credits */ 427 if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && 428 (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= 429 vf_vlan_rules_cnt(vf))) { 430 BNX2X_ERR("No credits for vlan [%d >= %d]\n", 431 atomic_read(&bnx2x_vfq(vf, qid, vlan_count)), 432 vf_vlan_rules_cnt(vf)); 433 return -ENOMEM; 434 } 435 436 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); 437 if (drv_only) 438 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); 439 else 440 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); 441 442 /* Add/Remove the filter */ 443 rc = bnx2x_config_vlan_mac(bp, &ramrod); 444 if (rc && rc != -EEXIST) { 445 BNX2X_ERR("Failed to %s %s\n", 446 filter->add ? "add" : "delete", 447 filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : 448 "VLAN"); 449 return rc; 450 } 451 452 /* Update the vlan counters */ 453 if (filter->type == BNX2X_VF_FILTER_VLAN) 454 bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj, 455 &bnx2x_vfq(vf, qid, vlan_count)); 456 457 return 0; 458 } 459 460 int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, 461 struct bnx2x_vf_mac_vlan_filters *filters, 462 int qid, bool drv_only) 463 { 464 int rc = 0, i; 465 466 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 467 468 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 469 return -EINVAL; 470 471 /* Prepare ramrod params */ 472 for (i = 0; i < filters->count; i++) { 473 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, 474 &filters->filters[i], drv_only); 475 if (rc) 476 break; 477 } 478 479 /* Rollback if needed */ 480 if (i != filters->count) { 481 BNX2X_ERR("Managed only %d/%d filters - rolling back\n", 482 i, filters->count + 1); 483 while (--i >= 0) { 484 filters->filters[i].add = !filters->filters[i].add; 485 bnx2x_vf_mac_vlan_config(bp, vf, qid, 486 &filters->filters[i], 487 drv_only); 488 } 489 } 490 491 /* It's our responsibility to free the filters */ 492 kfree(filters); 493 494 return rc; 495 } 496 497 int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, 498 struct bnx2x_vf_queue_construct_params *qctor) 499 { 500 int rc; 501 502 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 503 504 rc = bnx2x_vf_queue_create(bp, vf, qid, qctor); 505 if (rc) 506 goto op_err; 507 508 /* Configure vlan0 for leading queue */ 509 if (!qid) { 510 struct bnx2x_vf_mac_vlan_filter filter; 511 512 memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter)); 513 filter.type = BNX2X_VF_FILTER_VLAN; 514 filter.add = true; 515 filter.vid = 0; 516 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false); 517 if (rc) 518 goto op_err; 519 } 520 521 /* Schedule the configuration of any pending vlan filters */ 522 vf->cfg_flags |= VF_CFG_VLAN; 523 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, 524 BNX2X_MSG_IOV); 525 return 0; 526 op_err: 527 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); 528 return rc; 529 } 530 531 static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, 532 int qid) 533 { 534 int rc; 535 536 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 537 538 /* If needed, clean the filtering data base */ 539 if ((qid == LEADING_IDX) && 540 bnx2x_validate_vf_sp_objs(bp, vf, false)) { 541 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false); 542 if (rc) 543 goto op_err; 544 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true); 545 if (rc) 546 goto op_err; 547 } 548 549 /* Terminate queue */ 550 if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) { 551 struct bnx2x_queue_state_params qstate; 552 553 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); 554 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 555 qstate.q_obj->state = BNX2X_Q_STATE_STOPPED; 556 qstate.cmd = BNX2X_Q_CMD_TERMINATE; 557 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); 558 rc = bnx2x_queue_state_change(bp, &qstate); 559 if (rc) 560 goto op_err; 561 } 562 563 return 0; 564 op_err: 565 BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); 566 return rc; 567 } 568 569 int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, 570 bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only) 571 { 572 struct bnx2x_mcast_list_elem *mc = NULL; 573 struct bnx2x_mcast_ramrod_params mcast; 574 int rc, i; 575 576 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 577 578 /* Prepare Multicast command */ 579 memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params)); 580 mcast.mcast_obj = &vf->mcast_obj; 581 if (drv_only) 582 set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags); 583 else 584 set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags); 585 if (mc_num) { 586 mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem), 587 GFP_KERNEL); 588 if (!mc) { 589 BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n"); 590 return -ENOMEM; 591 } 592 } 593 594 /* clear existing mcasts */ 595 mcast.mcast_list_len = vf->mcast_list_len; 596 vf->mcast_list_len = mc_num; 597 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); 598 if (rc) { 599 BNX2X_ERR("Failed to remove multicasts\n"); 600 if (mc) 601 kfree(mc); 602 return rc; 603 } 604 605 /* update mcast list on the ramrod params */ 606 if (mc_num) { 607 INIT_LIST_HEAD(&mcast.mcast_list); 608 for (i = 0; i < mc_num; i++) { 609 mc[i].mac = mcasts[i]; 610 list_add_tail(&mc[i].link, 611 &mcast.mcast_list); 612 } 613 614 /* add new mcasts */ 615 mcast.mcast_list_len = mc_num; 616 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); 617 if (rc) 618 BNX2X_ERR("Faled to add multicasts\n"); 619 kfree(mc); 620 } 621 622 return rc; 623 } 624 625 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, 626 struct bnx2x_rx_mode_ramrod_params *ramrod, 627 struct bnx2x_virtf *vf, 628 unsigned long accept_flags) 629 { 630 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); 631 632 memset(ramrod, 0, sizeof(*ramrod)); 633 ramrod->cid = vfq->cid; 634 ramrod->cl_id = vfq_cl_id(vf, vfq); 635 ramrod->rx_mode_obj = &bp->rx_mode_obj; 636 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); 637 ramrod->rx_accept_flags = accept_flags; 638 ramrod->tx_accept_flags = accept_flags; 639 ramrod->pstate = &vf->filter_state; 640 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; 641 642 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 643 set_bit(RAMROD_RX, &ramrod->ramrod_flags); 644 set_bit(RAMROD_TX, &ramrod->ramrod_flags); 645 646 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); 647 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); 648 } 649 650 int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, 651 int qid, unsigned long accept_flags) 652 { 653 struct bnx2x_rx_mode_ramrod_params ramrod; 654 655 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 656 657 bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags); 658 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); 659 vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags; 660 return bnx2x_config_rx_mode(bp, &ramrod); 661 } 662 663 int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) 664 { 665 int rc; 666 667 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 668 669 /* Remove all classification configuration for leading queue */ 670 if (qid == LEADING_IDX) { 671 rc = bnx2x_vf_rxmode(bp, vf, qid, 0); 672 if (rc) 673 goto op_err; 674 675 /* Remove filtering if feasible */ 676 if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { 677 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, 678 false, false); 679 if (rc) 680 goto op_err; 681 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, 682 false, true); 683 if (rc) 684 goto op_err; 685 rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); 686 if (rc) 687 goto op_err; 688 } 689 } 690 691 /* Destroy queue */ 692 rc = bnx2x_vf_queue_destroy(bp, vf, qid); 693 if (rc) 694 goto op_err; 695 return rc; 696 op_err: 697 BNX2X_ERR("vf[%d:%d] error: rc %d\n", 698 vf->abs_vfid, qid, rc); 699 return rc; 700 } 701 702 /* VF enable primitives 703 * when pretend is required the caller is responsible 704 * for calling pretend prior to calling these routines 705 */ 706 707 /* internal vf enable - until vf is enabled internally all transactions 708 * are blocked. This routine should always be called last with pretend. 709 */ 710 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) 711 { 712 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); 713 } 714 715 /* clears vf error in all semi blocks */ 716 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) 717 { 718 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); 719 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); 720 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); 721 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); 722 } 723 724 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) 725 { 726 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; 727 u32 was_err_reg = 0; 728 729 switch (was_err_group) { 730 case 0: 731 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; 732 break; 733 case 1: 734 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; 735 break; 736 case 2: 737 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; 738 break; 739 case 3: 740 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; 741 break; 742 } 743 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); 744 } 745 746 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) 747 { 748 int i; 749 u32 val; 750 751 /* Set VF masks and configuration - pretend */ 752 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 753 754 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 755 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 756 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 757 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 758 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 759 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 760 761 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 762 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); 763 if (vf->cfg_flags & VF_CFG_INT_SIMD) 764 val |= IGU_VF_CONF_SINGLE_ISR_EN; 765 val &= ~IGU_VF_CONF_PARENT_MASK; 766 val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT; 767 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 768 769 DP(BNX2X_MSG_IOV, 770 "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n", 771 vf->abs_vfid, val); 772 773 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 774 775 /* iterate over all queues, clear sb consumer */ 776 for (i = 0; i < vf_sb_count(vf); i++) { 777 u8 igu_sb_id = vf_igu_sb(vf, i); 778 779 /* zero prod memory */ 780 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); 781 782 /* clear sb state machine */ 783 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, 784 false /* VF */); 785 786 /* disable + update */ 787 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, 788 IGU_INT_DISABLE, 1); 789 } 790 } 791 792 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) 793 { 794 /* set the VF-PF association in the FW */ 795 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); 796 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); 797 798 /* clear vf errors*/ 799 bnx2x_vf_semi_clear_err(bp, abs_vfid); 800 bnx2x_vf_pglue_clear_err(bp, abs_vfid); 801 802 /* internal vf-enable - pretend */ 803 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); 804 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); 805 bnx2x_vf_enable_internal(bp, true); 806 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 807 } 808 809 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) 810 { 811 /* Reset vf in IGU interrupts are still disabled */ 812 bnx2x_vf_igu_reset(bp, vf); 813 814 /* pretend to enable the vf with the PBF */ 815 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 816 REG_WR(bp, PBF_REG_DISABLE_VF, 0); 817 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 818 } 819 820 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) 821 { 822 struct pci_dev *dev; 823 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 824 825 if (!vf) 826 return false; 827 828 dev = pci_get_bus_and_slot(vf->bus, vf->devfn); 829 if (dev) 830 return bnx2x_is_pcie_pending(dev); 831 return false; 832 } 833 834 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) 835 { 836 /* Verify no pending pci transactions */ 837 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) 838 BNX2X_ERR("PCIE Transactions still pending\n"); 839 840 return 0; 841 } 842 843 static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp, 844 struct bnx2x_virtf *vf, 845 int new) 846 { 847 int num = vf_vlan_rules_cnt(vf); 848 int diff = new - num; 849 bool rc = true; 850 851 DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n", 852 vf->abs_vfid, new, num); 853 854 if (diff > 0) 855 rc = bp->vlans_pool.get(&bp->vlans_pool, diff); 856 else if (diff < 0) 857 rc = bp->vlans_pool.put(&bp->vlans_pool, -diff); 858 859 if (rc) 860 vf_vlan_rules_cnt(vf) = new; 861 else 862 DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n", 863 vf->abs_vfid); 864 } 865 866 /* must be called after the number of PF queues and the number of VFs are 867 * both known 868 */ 869 static void 870 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 871 { 872 struct vf_pf_resc_request *resc = &vf->alloc_resc; 873 u16 vlan_count = 0; 874 875 /* will be set only during VF-ACQUIRE */ 876 resc->num_rxqs = 0; 877 resc->num_txqs = 0; 878 879 /* no credit calculations for macs (just yet) */ 880 resc->num_mac_filters = 1; 881 882 /* divvy up vlan rules */ 883 bnx2x_iov_re_set_vlan_filters(bp, vf, 0); 884 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 885 vlan_count = 1 << ilog2(vlan_count); 886 bnx2x_iov_re_set_vlan_filters(bp, vf, 887 vlan_count / BNX2X_NR_VIRTFN(bp)); 888 889 /* no real limitation */ 890 resc->num_mc_filters = 0; 891 892 /* num_sbs already set */ 893 resc->num_sbs = vf->sb_count; 894 } 895 896 /* FLR routines: */ 897 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 898 { 899 /* reset the state variables */ 900 bnx2x_iov_static_resc(bp, vf); 901 vf->state = VF_FREE; 902 } 903 904 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) 905 { 906 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 907 908 /* DQ usage counter */ 909 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 910 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, 911 "DQ VF usage counter timed out", 912 poll_cnt); 913 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 914 915 /* FW cleanup command - poll for the results */ 916 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), 917 poll_cnt)) 918 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid); 919 920 /* verify TX hw is flushed */ 921 bnx2x_tx_hw_flushed(bp, poll_cnt); 922 } 923 924 static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) 925 { 926 int rc, i; 927 928 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 929 930 /* the cleanup operations are valid if and only if the VF 931 * was first acquired. 932 */ 933 for (i = 0; i < vf_rxq_count(vf); i++) { 934 rc = bnx2x_vf_queue_flr(bp, vf, i); 935 if (rc) 936 goto out; 937 } 938 939 /* remove multicasts */ 940 bnx2x_vf_mcast(bp, vf, NULL, 0, true); 941 942 /* dispatch final cleanup and wait for HW queues to flush */ 943 bnx2x_vf_flr_clnup_hw(bp, vf); 944 945 /* release VF resources */ 946 bnx2x_vf_free_resc(bp, vf); 947 948 /* re-open the mailbox */ 949 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 950 return; 951 out: 952 BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n", 953 vf->abs_vfid, i, rc); 954 } 955 956 static void bnx2x_vf_flr_clnup(struct bnx2x *bp) 957 { 958 struct bnx2x_virtf *vf; 959 int i; 960 961 for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) { 962 /* VF should be RESET & in FLR cleanup states */ 963 if (bnx2x_vf(bp, i, state) != VF_RESET || 964 !bnx2x_vf(bp, i, flr_clnup_stage)) 965 continue; 966 967 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", 968 i, BNX2X_NR_VIRTFN(bp)); 969 970 vf = BP_VF(bp, i); 971 972 /* lock the vf pf channel */ 973 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 974 975 /* invoke the VF FLR SM */ 976 bnx2x_vf_flr(bp, vf); 977 978 /* mark the VF to be ACKED and continue */ 979 vf->flr_clnup_stage = false; 980 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 981 } 982 983 /* Acknowledge the handled VFs. 984 * we are acknowledge all the vfs which an flr was requested for, even 985 * if amongst them there are such that we never opened, since the mcp 986 * will interrupt us immediately again if we only ack some of the bits, 987 * resulting in an endless loop. This can happen for example in KVM 988 * where an 'all ones' flr request is sometimes given by hyper visor 989 */ 990 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n", 991 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 992 for (i = 0; i < FLRD_VFS_DWORDS; i++) 993 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 994 bp->vfdb->flrd_vfs[i]); 995 996 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); 997 998 /* clear the acked bits - better yet if the MCP implemented 999 * write to clear semantics 1000 */ 1001 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1002 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); 1003 } 1004 1005 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) 1006 { 1007 int i; 1008 1009 /* Read FLR'd VFs */ 1010 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1011 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); 1012 1013 DP(BNX2X_MSG_MCP, 1014 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n", 1015 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1016 1017 for_each_vf(bp, i) { 1018 struct bnx2x_virtf *vf = BP_VF(bp, i); 1019 u32 reset = 0; 1020 1021 if (vf->abs_vfid < 32) 1022 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); 1023 else 1024 reset = bp->vfdb->flrd_vfs[1] & 1025 (1 << (vf->abs_vfid - 32)); 1026 1027 if (reset) { 1028 /* set as reset and ready for cleanup */ 1029 vf->state = VF_RESET; 1030 vf->flr_clnup_stage = true; 1031 1032 DP(BNX2X_MSG_IOV, 1033 "Initiating Final cleanup for VF %d\n", 1034 vf->abs_vfid); 1035 } 1036 } 1037 1038 /* do the FLR cleanup for all marked VFs*/ 1039 bnx2x_vf_flr_clnup(bp); 1040 } 1041 1042 /* IOV global initialization routines */ 1043 void bnx2x_iov_init_dq(struct bnx2x *bp) 1044 { 1045 if (!IS_SRIOV(bp)) 1046 return; 1047 1048 /* Set the DQ such that the CID reflect the abs_vfid */ 1049 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); 1050 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 1051 1052 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to 1053 * the PF L2 queues 1054 */ 1055 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 1056 1057 /* The VF window size is the log2 of the max number of CIDs per VF */ 1058 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 1059 1060 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 1061 * the Pf doorbell size although the 2 are independent. 1062 */ 1063 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); 1064 1065 /* No security checks for now - 1066 * configure single rule (out of 16) mask = 0x1, value = 0x0, 1067 * CID range 0 - 0x1ffff 1068 */ 1069 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); 1070 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); 1071 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1072 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1073 1074 /* set the VF doorbell threshold. This threshold represents the amount 1075 * of doorbells allowed in the main DORQ fifo for a specific VF. 1076 */ 1077 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64); 1078 } 1079 1080 void bnx2x_iov_init_dmae(struct bnx2x *bp) 1081 { 1082 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) 1083 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 1084 } 1085 1086 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1087 { 1088 struct pci_dev *dev = bp->pdev; 1089 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1090 1091 return dev->bus->number + ((dev->devfn + iov->offset + 1092 iov->stride * vfid) >> 8); 1093 } 1094 1095 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) 1096 { 1097 struct pci_dev *dev = bp->pdev; 1098 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1099 1100 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; 1101 } 1102 1103 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) 1104 { 1105 int i, n; 1106 struct pci_dev *dev = bp->pdev; 1107 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1108 1109 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { 1110 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); 1111 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); 1112 1113 size /= iov->total; 1114 vf->bars[n].bar = start + size * vf->abs_vfid; 1115 vf->bars[n].size = size; 1116 } 1117 } 1118 1119 static int bnx2x_ari_enabled(struct pci_dev *dev) 1120 { 1121 return dev->bus->self && dev->bus->self->ari_enabled; 1122 } 1123 1124 static void 1125 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) 1126 { 1127 int sb_id; 1128 u32 val; 1129 u8 fid, current_pf = 0; 1130 1131 /* IGU in normal mode - read CAM */ 1132 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 1133 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); 1134 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 1135 continue; 1136 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1137 if (fid & IGU_FID_ENCODE_IS_PF) 1138 current_pf = fid & IGU_FID_PF_NUM_MASK; 1139 else if (current_pf == BP_FUNC(bp)) 1140 bnx2x_vf_set_igu_info(bp, sb_id, 1141 (fid & IGU_FID_VF_NUM_MASK)); 1142 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1143 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 1144 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 1145 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 1146 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1147 } 1148 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); 1149 } 1150 1151 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1152 { 1153 if (bp->vfdb) { 1154 kfree(bp->vfdb->vfqs); 1155 kfree(bp->vfdb->vfs); 1156 kfree(bp->vfdb); 1157 } 1158 bp->vfdb = NULL; 1159 } 1160 1161 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1162 { 1163 int pos; 1164 struct pci_dev *dev = bp->pdev; 1165 1166 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 1167 if (!pos) { 1168 BNX2X_ERR("failed to find SRIOV capability in device\n"); 1169 return -ENODEV; 1170 } 1171 1172 iov->pos = pos; 1173 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); 1174 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 1175 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); 1176 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); 1177 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 1178 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 1179 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 1180 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 1181 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 1182 1183 return 0; 1184 } 1185 1186 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1187 { 1188 u32 val; 1189 1190 /* read the SRIOV capability structure 1191 * The fields can be read via configuration read or 1192 * directly from the device (starting at offset PCICFG_OFFSET) 1193 */ 1194 if (bnx2x_sriov_pci_cfg_info(bp, iov)) 1195 return -ENODEV; 1196 1197 /* get the number of SRIOV bars */ 1198 iov->nres = 0; 1199 1200 /* read the first_vfid */ 1201 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); 1202 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) 1203 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); 1204 1205 DP(BNX2X_MSG_IOV, 1206 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 1207 BP_FUNC(bp), 1208 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, 1209 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 1210 1211 return 0; 1212 } 1213 1214 /* must be called after PF bars are mapped */ 1215 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1216 int num_vfs_param) 1217 { 1218 int err, i; 1219 struct bnx2x_sriov *iov; 1220 struct pci_dev *dev = bp->pdev; 1221 1222 bp->vfdb = NULL; 1223 1224 /* verify is pf */ 1225 if (IS_VF(bp)) 1226 return 0; 1227 1228 /* verify sriov capability is present in configuration space */ 1229 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) 1230 return 0; 1231 1232 /* verify chip revision */ 1233 if (CHIP_IS_E1x(bp)) 1234 return 0; 1235 1236 /* check if SRIOV support is turned off */ 1237 if (!num_vfs_param) 1238 return 0; 1239 1240 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ 1241 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { 1242 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", 1243 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); 1244 return 0; 1245 } 1246 1247 /* SRIOV can be enabled only with MSIX */ 1248 if (int_mode_param == BNX2X_INT_MODE_MSI || 1249 int_mode_param == BNX2X_INT_MODE_INTX) { 1250 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); 1251 return 0; 1252 } 1253 1254 err = -EIO; 1255 /* verify ari is enabled */ 1256 if (!bnx2x_ari_enabled(bp->pdev)) { 1257 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n"); 1258 return 0; 1259 } 1260 1261 /* verify igu is in normal mode */ 1262 if (CHIP_INT_MODE_IS_BC(bp)) { 1263 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 1264 return 0; 1265 } 1266 1267 /* allocate the vfs database */ 1268 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); 1269 if (!bp->vfdb) { 1270 BNX2X_ERR("failed to allocate vf database\n"); 1271 err = -ENOMEM; 1272 goto failed; 1273 } 1274 1275 /* get the sriov info - Linux already collected all the pertinent 1276 * information, however the sriov structure is for the private use 1277 * of the pci module. Also we want this information regardless 1278 * of the hyper-visor. 1279 */ 1280 iov = &(bp->vfdb->sriov); 1281 err = bnx2x_sriov_info(bp, iov); 1282 if (err) 1283 goto failed; 1284 1285 /* SR-IOV capability was enabled but there are no VFs*/ 1286 if (iov->total == 0) 1287 goto failed; 1288 1289 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); 1290 1291 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n", 1292 num_vfs_param, iov->nr_virtfn); 1293 1294 /* allocate the vf array */ 1295 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 1296 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); 1297 if (!bp->vfdb->vfs) { 1298 BNX2X_ERR("failed to allocate vf array\n"); 1299 err = -ENOMEM; 1300 goto failed; 1301 } 1302 1303 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ 1304 for_each_vf(bp, i) { 1305 bnx2x_vf(bp, i, index) = i; 1306 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; 1307 bnx2x_vf(bp, i, state) = VF_FREE; 1308 mutex_init(&bnx2x_vf(bp, i, op_mutex)); 1309 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; 1310 } 1311 1312 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 1313 bnx2x_get_vf_igu_cam_info(bp); 1314 1315 /* allocate the queue arrays for all VFs */ 1316 bp->vfdb->vfqs = kzalloc( 1317 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), 1318 GFP_KERNEL); 1319 1320 DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs); 1321 1322 if (!bp->vfdb->vfqs) { 1323 BNX2X_ERR("failed to allocate vf queue array\n"); 1324 err = -ENOMEM; 1325 goto failed; 1326 } 1327 1328 /* Prepare the VFs event synchronization mechanism */ 1329 mutex_init(&bp->vfdb->event_mutex); 1330 1331 return 0; 1332 failed: 1333 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 1334 __bnx2x_iov_free_vfdb(bp); 1335 return err; 1336 } 1337 1338 void bnx2x_iov_remove_one(struct bnx2x *bp) 1339 { 1340 int vf_idx; 1341 1342 /* if SRIOV is not enabled there's nothing to do */ 1343 if (!IS_SRIOV(bp)) 1344 return; 1345 1346 DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); 1347 pci_disable_sriov(bp->pdev); 1348 DP(BNX2X_MSG_IOV, "sriov disabled\n"); 1349 1350 /* disable access to all VFs */ 1351 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { 1352 bnx2x_pretend_func(bp, 1353 HW_VF_HANDLE(bp, 1354 bp->vfdb->sriov.first_vf_in_pf + 1355 vf_idx)); 1356 DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n", 1357 bp->vfdb->sriov.first_vf_in_pf + vf_idx); 1358 bnx2x_vf_enable_internal(bp, 0); 1359 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1360 } 1361 1362 /* free vf database */ 1363 __bnx2x_iov_free_vfdb(bp); 1364 } 1365 1366 void bnx2x_iov_free_mem(struct bnx2x *bp) 1367 { 1368 int i; 1369 1370 if (!IS_SRIOV(bp)) 1371 return; 1372 1373 /* free vfs hw contexts */ 1374 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1375 struct hw_dma *cxt = &bp->vfdb->context[i]; 1376 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); 1377 } 1378 1379 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, 1380 BP_VFDB(bp)->sp_dma.mapping, 1381 BP_VFDB(bp)->sp_dma.size); 1382 1383 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, 1384 BP_VF_MBX_DMA(bp)->mapping, 1385 BP_VF_MBX_DMA(bp)->size); 1386 1387 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, 1388 BP_VF_BULLETIN_DMA(bp)->mapping, 1389 BP_VF_BULLETIN_DMA(bp)->size); 1390 } 1391 1392 int bnx2x_iov_alloc_mem(struct bnx2x *bp) 1393 { 1394 size_t tot_size; 1395 int i, rc = 0; 1396 1397 if (!IS_SRIOV(bp)) 1398 return rc; 1399 1400 /* allocate vfs hw contexts */ 1401 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * 1402 BNX2X_CIDS_PER_VF * sizeof(union cdu_context); 1403 1404 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1405 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); 1406 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); 1407 1408 if (cxt->size) { 1409 cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size); 1410 if (!cxt->addr) 1411 goto alloc_mem_err; 1412 } else { 1413 cxt->addr = NULL; 1414 cxt->mapping = 0; 1415 } 1416 tot_size -= cxt->size; 1417 } 1418 1419 /* allocate vfs ramrods dma memory - client_init and set_mac */ 1420 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); 1421 BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping, 1422 tot_size); 1423 if (!BP_VFDB(bp)->sp_dma.addr) 1424 goto alloc_mem_err; 1425 BP_VFDB(bp)->sp_dma.size = tot_size; 1426 1427 /* allocate mailboxes */ 1428 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; 1429 BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping, 1430 tot_size); 1431 if (!BP_VF_MBX_DMA(bp)->addr) 1432 goto alloc_mem_err; 1433 1434 BP_VF_MBX_DMA(bp)->size = tot_size; 1435 1436 /* allocate local bulletin boards */ 1437 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; 1438 BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping, 1439 tot_size); 1440 if (!BP_VF_BULLETIN_DMA(bp)->addr) 1441 goto alloc_mem_err; 1442 1443 BP_VF_BULLETIN_DMA(bp)->size = tot_size; 1444 1445 return 0; 1446 1447 alloc_mem_err: 1448 return -ENOMEM; 1449 } 1450 1451 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 1452 struct bnx2x_vf_queue *q) 1453 { 1454 u8 cl_id = vfq_cl_id(vf, q); 1455 u8 func_id = FW_VF_HANDLE(vf->abs_vfid); 1456 unsigned long q_type = 0; 1457 1458 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 1459 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 1460 1461 /* Queue State object */ 1462 bnx2x_init_queue_obj(bp, &q->sp_obj, 1463 cl_id, &q->cid, 1, func_id, 1464 bnx2x_vf_sp(bp, vf, q_data), 1465 bnx2x_vf_sp_map(bp, vf, q_data), 1466 q_type); 1467 1468 /* sp indication is set only when vlan/mac/etc. are initialized */ 1469 q->sp_initialized = false; 1470 1471 DP(BNX2X_MSG_IOV, 1472 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", 1473 vf->abs_vfid, q->sp_obj.func_id, q->cid); 1474 } 1475 1476 /* called by bnx2x_nic_load */ 1477 int bnx2x_iov_nic_init(struct bnx2x *bp) 1478 { 1479 int vfid; 1480 1481 if (!IS_SRIOV(bp)) { 1482 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 1483 return 0; 1484 } 1485 1486 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); 1487 1488 /* let FLR complete ... */ 1489 msleep(100); 1490 1491 /* initialize vf database */ 1492 for_each_vf(bp, vfid) { 1493 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 1494 1495 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * 1496 BNX2X_CIDS_PER_VF; 1497 1498 union cdu_context *base_cxt = (union cdu_context *) 1499 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 1500 (base_vf_cid & (ILT_PAGE_CIDS-1)); 1501 1502 DP(BNX2X_MSG_IOV, 1503 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", 1504 vf->abs_vfid, vf_sb_count(vf), base_vf_cid, 1505 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 1506 1507 /* init statically provisioned resources */ 1508 bnx2x_iov_static_resc(bp, vf); 1509 1510 /* queues are initialized during VF-ACQUIRE */ 1511 vf->filter_state = 0; 1512 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 1513 1514 /* init mcast object - This object will be re-initialized 1515 * during VF-ACQUIRE with the proper cl_id and cid. 1516 * It needs to be initialized here so that it can be safely 1517 * handled by a subsequent FLR flow. 1518 */ 1519 vf->mcast_list_len = 0; 1520 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, 1521 0xFF, 0xFF, 0xFF, 1522 bnx2x_vf_sp(bp, vf, mcast_rdata), 1523 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 1524 BNX2X_FILTER_MCAST_PENDING, 1525 &vf->filter_state, 1526 BNX2X_OBJ_TYPE_RX_TX); 1527 1528 /* set the mailbox message addresses */ 1529 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) 1530 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * 1531 MBX_MSG_ALIGNED_SIZE); 1532 1533 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + 1534 vfid * MBX_MSG_ALIGNED_SIZE; 1535 1536 /* Enable vf mailbox */ 1537 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 1538 } 1539 1540 /* Final VF init */ 1541 for_each_vf(bp, vfid) { 1542 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 1543 1544 /* fill in the BDF and bars */ 1545 vf->bus = bnx2x_vf_bus(bp, vfid); 1546 vf->devfn = bnx2x_vf_devfn(bp, vfid); 1547 bnx2x_vf_set_bars(bp, vf); 1548 1549 DP(BNX2X_MSG_IOV, 1550 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", 1551 vf->abs_vfid, vf->bus, vf->devfn, 1552 (unsigned)vf->bars[0].bar, vf->bars[0].size, 1553 (unsigned)vf->bars[1].bar, vf->bars[1].size, 1554 (unsigned)vf->bars[2].bar, vf->bars[2].size); 1555 } 1556 1557 return 0; 1558 } 1559 1560 /* called by bnx2x_chip_cleanup */ 1561 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) 1562 { 1563 int i; 1564 1565 if (!IS_SRIOV(bp)) 1566 return 0; 1567 1568 /* release all the VFs */ 1569 for_each_vf(bp, i) 1570 bnx2x_vf_release(bp, BP_VF(bp, i)); 1571 1572 return 0; 1573 } 1574 1575 /* called by bnx2x_init_hw_func, returns the next ilt line */ 1576 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) 1577 { 1578 int i; 1579 struct bnx2x_ilt *ilt = BP_ILT(bp); 1580 1581 if (!IS_SRIOV(bp)) 1582 return line; 1583 1584 /* set vfs ilt lines */ 1585 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1586 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); 1587 1588 ilt->lines[line+i].page = hw_cxt->addr; 1589 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 1590 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 1591 } 1592 return line + i; 1593 } 1594 1595 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) 1596 { 1597 return ((cid >= BNX2X_FIRST_VF_CID) && 1598 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); 1599 } 1600 1601 static 1602 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, 1603 struct bnx2x_vf_queue *vfq, 1604 union event_ring_elem *elem) 1605 { 1606 unsigned long ramrod_flags = 0; 1607 int rc = 0; 1608 1609 /* Always push next commands out, don't wait here */ 1610 set_bit(RAMROD_CONT, &ramrod_flags); 1611 1612 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 1613 case BNX2X_FILTER_MAC_PENDING: 1614 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, 1615 &ramrod_flags); 1616 break; 1617 case BNX2X_FILTER_VLAN_PENDING: 1618 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, 1619 &ramrod_flags); 1620 break; 1621 default: 1622 BNX2X_ERR("Unsupported classification command: %d\n", 1623 elem->message.data.eth_event.echo); 1624 return; 1625 } 1626 if (rc < 0) 1627 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 1628 else if (rc > 0) 1629 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); 1630 } 1631 1632 static 1633 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, 1634 struct bnx2x_virtf *vf) 1635 { 1636 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 1637 int rc; 1638 1639 rparam.mcast_obj = &vf->mcast_obj; 1640 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); 1641 1642 /* If there are pending mcast commands - send them */ 1643 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { 1644 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 1645 if (rc < 0) 1646 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 1647 rc); 1648 } 1649 } 1650 1651 static 1652 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, 1653 struct bnx2x_virtf *vf) 1654 { 1655 smp_mb__before_atomic(); 1656 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 1657 smp_mb__after_atomic(); 1658 } 1659 1660 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, 1661 struct bnx2x_virtf *vf) 1662 { 1663 vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw); 1664 } 1665 1666 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) 1667 { 1668 struct bnx2x_virtf *vf; 1669 int qidx = 0, abs_vfid; 1670 u8 opcode; 1671 u16 cid = 0xffff; 1672 1673 if (!IS_SRIOV(bp)) 1674 return 1; 1675 1676 /* first get the cid - the only events we handle here are cfc-delete 1677 * and set-mac completion 1678 */ 1679 opcode = elem->message.opcode; 1680 1681 switch (opcode) { 1682 case EVENT_RING_OPCODE_CFC_DEL: 1683 cid = SW_CID((__force __le32) 1684 elem->message.data.cfc_del_event.cid); 1685 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); 1686 break; 1687 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 1688 case EVENT_RING_OPCODE_MULTICAST_RULES: 1689 case EVENT_RING_OPCODE_FILTERS_RULES: 1690 case EVENT_RING_OPCODE_RSS_UPDATE_RULES: 1691 cid = (elem->message.data.eth_event.echo & 1692 BNX2X_SWCID_MASK); 1693 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 1694 break; 1695 case EVENT_RING_OPCODE_VF_FLR: 1696 abs_vfid = elem->message.data.vf_flr_event.vf_id; 1697 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", 1698 abs_vfid); 1699 goto get_vf; 1700 case EVENT_RING_OPCODE_MALICIOUS_VF: 1701 abs_vfid = elem->message.data.malicious_vf_event.vf_id; 1702 BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n", 1703 abs_vfid, 1704 elem->message.data.malicious_vf_event.err_id); 1705 goto get_vf; 1706 default: 1707 return 1; 1708 } 1709 1710 /* check if the cid is the VF range */ 1711 if (!bnx2x_iov_is_vf_cid(bp, cid)) { 1712 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); 1713 return 1; 1714 } 1715 1716 /* extract vf and rxq index from vf_cid - relies on the following: 1717 * 1. vfid on cid reflects the true abs_vfid 1718 * 2. The max number of VFs (per path) is 64 1719 */ 1720 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); 1721 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 1722 get_vf: 1723 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 1724 1725 if (!vf) { 1726 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", 1727 cid, abs_vfid); 1728 return 0; 1729 } 1730 1731 switch (opcode) { 1732 case EVENT_RING_OPCODE_CFC_DEL: 1733 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", 1734 vf->abs_vfid, qidx); 1735 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, 1736 &vfq_get(vf, 1737 qidx)->sp_obj, 1738 BNX2X_Q_CMD_CFC_DEL); 1739 break; 1740 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 1741 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", 1742 vf->abs_vfid, qidx); 1743 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); 1744 break; 1745 case EVENT_RING_OPCODE_MULTICAST_RULES: 1746 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", 1747 vf->abs_vfid, qidx); 1748 bnx2x_vf_handle_mcast_eqe(bp, vf); 1749 break; 1750 case EVENT_RING_OPCODE_FILTERS_RULES: 1751 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", 1752 vf->abs_vfid, qidx); 1753 bnx2x_vf_handle_filters_eqe(bp, vf); 1754 break; 1755 case EVENT_RING_OPCODE_RSS_UPDATE_RULES: 1756 DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n", 1757 vf->abs_vfid, qidx); 1758 bnx2x_vf_handle_rss_update_eqe(bp, vf); 1759 case EVENT_RING_OPCODE_VF_FLR: 1760 case EVENT_RING_OPCODE_MALICIOUS_VF: 1761 /* Do nothing for now */ 1762 return 0; 1763 } 1764 1765 return 0; 1766 } 1767 1768 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) 1769 { 1770 /* extract the vf from vf_cid - relies on the following: 1771 * 1. vfid on cid reflects the true abs_vfid 1772 * 2. The max number of VFs (per path) is 64 1773 */ 1774 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 1775 return bnx2x_vf_by_abs_fid(bp, abs_vfid); 1776 } 1777 1778 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 1779 struct bnx2x_queue_sp_obj **q_obj) 1780 { 1781 struct bnx2x_virtf *vf; 1782 1783 if (!IS_SRIOV(bp)) 1784 return; 1785 1786 vf = bnx2x_vf_by_cid(bp, vf_cid); 1787 1788 if (vf) { 1789 /* extract queue index from vf_cid - relies on the following: 1790 * 1. vfid on cid reflects the true abs_vfid 1791 * 2. The max number of VFs (per path) is 64 1792 */ 1793 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); 1794 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); 1795 } else { 1796 BNX2X_ERR("No vf matching cid %d\n", vf_cid); 1797 } 1798 } 1799 1800 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) 1801 { 1802 int i; 1803 int first_queue_query_index, num_queues_req; 1804 dma_addr_t cur_data_offset; 1805 struct stats_query_entry *cur_query_entry; 1806 u8 stats_count = 0; 1807 bool is_fcoe = false; 1808 1809 if (!IS_SRIOV(bp)) 1810 return; 1811 1812 if (!NO_FCOE(bp)) 1813 is_fcoe = true; 1814 1815 /* fcoe adds one global request and one queue request */ 1816 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; 1817 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1818 (is_fcoe ? 0 : 1); 1819 1820 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), 1821 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", 1822 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, 1823 first_queue_query_index + num_queues_req); 1824 1825 cur_data_offset = bp->fw_stats_data_mapping + 1826 offsetof(struct bnx2x_fw_stats_data, queue_stats) + 1827 num_queues_req * sizeof(struct per_queue_stats); 1828 1829 cur_query_entry = &bp->fw_stats_req-> 1830 query[first_queue_query_index + num_queues_req]; 1831 1832 for_each_vf(bp, i) { 1833 int j; 1834 struct bnx2x_virtf *vf = BP_VF(bp, i); 1835 1836 if (vf->state != VF_ENABLED) { 1837 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), 1838 "vf %d not enabled so no stats for it\n", 1839 vf->abs_vfid); 1840 continue; 1841 } 1842 1843 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); 1844 for_each_vfq(vf, j) { 1845 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 1846 1847 dma_addr_t q_stats_addr = 1848 vf->fw_stat_map + j * vf->stats_stride; 1849 1850 /* collect stats fro active queues only */ 1851 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 1852 BNX2X_Q_LOGICAL_STATE_STOPPED) 1853 continue; 1854 1855 /* create stats query entry for this queue */ 1856 cur_query_entry->kind = STATS_TYPE_QUEUE; 1857 cur_query_entry->index = vfq_stat_id(vf, rxq); 1858 cur_query_entry->funcID = 1859 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 1860 cur_query_entry->address.hi = 1861 cpu_to_le32(U64_HI(q_stats_addr)); 1862 cur_query_entry->address.lo = 1863 cpu_to_le32(U64_LO(q_stats_addr)); 1864 DP(BNX2X_MSG_IOV, 1865 "added address %x %x for vf %d queue %d client %d\n", 1866 cur_query_entry->address.hi, 1867 cur_query_entry->address.lo, cur_query_entry->funcID, 1868 j, cur_query_entry->index); 1869 cur_query_entry++; 1870 cur_data_offset += sizeof(struct per_queue_stats); 1871 stats_count++; 1872 1873 /* all stats are coalesced to the leading queue */ 1874 if (vf->cfg_flags & VF_CFG_STATS_COALESCE) 1875 break; 1876 } 1877 } 1878 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 1879 } 1880 1881 static inline 1882 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) 1883 { 1884 int i; 1885 struct bnx2x_virtf *vf = NULL; 1886 1887 for_each_vf(bp, i) { 1888 vf = BP_VF(bp, i); 1889 if (stat_id >= vf->igu_base_id && 1890 stat_id < vf->igu_base_id + vf_sb_count(vf)) 1891 break; 1892 } 1893 return vf; 1894 } 1895 1896 /* VF API helpers */ 1897 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, 1898 u8 enable) 1899 { 1900 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; 1901 u32 val = enable ? (abs_vfid | (1 << 6)) : 0; 1902 1903 REG_WR(bp, reg, val); 1904 } 1905 1906 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) 1907 { 1908 int i; 1909 1910 for_each_vfq(vf, i) 1911 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 1912 vfq_qzone_id(vf, vfq_get(vf, i)), false); 1913 } 1914 1915 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) 1916 { 1917 u32 val; 1918 1919 /* clear the VF configuration - pretend */ 1920 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1921 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 1922 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | 1923 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK); 1924 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 1925 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1926 } 1927 1928 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) 1929 { 1930 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), 1931 BNX2X_VF_MAX_QUEUES); 1932 } 1933 1934 static 1935 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, 1936 struct vf_pf_resc_request *req_resc) 1937 { 1938 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 1939 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 1940 1941 /* Save a vlan filter for the Hypervisor */ 1942 return ((req_resc->num_rxqs <= rxq_cnt) && 1943 (req_resc->num_txqs <= txq_cnt) && 1944 (req_resc->num_sbs <= vf_sb_count(vf)) && 1945 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 1946 (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf))); 1947 } 1948 1949 /* CORE VF API */ 1950 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 1951 struct vf_pf_resc_request *resc) 1952 { 1953 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * 1954 BNX2X_CIDS_PER_VF; 1955 1956 union cdu_context *base_cxt = (union cdu_context *) 1957 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 1958 (base_vf_cid & (ILT_PAGE_CIDS-1)); 1959 int i; 1960 1961 /* if state is 'acquired' the VF was not released or FLR'd, in 1962 * this case the returned resources match the acquired already 1963 * acquired resources. Verify that the requested numbers do 1964 * not exceed the already acquired numbers. 1965 */ 1966 if (vf->state == VF_ACQUIRED) { 1967 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", 1968 vf->abs_vfid); 1969 1970 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 1971 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", 1972 vf->abs_vfid); 1973 return -EINVAL; 1974 } 1975 return 0; 1976 } 1977 1978 /* Otherwise vf state must be 'free' or 'reset' */ 1979 if (vf->state != VF_FREE && vf->state != VF_RESET) { 1980 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", 1981 vf->abs_vfid, vf->state); 1982 return -EINVAL; 1983 } 1984 1985 /* static allocation: 1986 * the global maximum number are fixed per VF. Fail the request if 1987 * requested number exceed these globals 1988 */ 1989 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 1990 DP(BNX2X_MSG_IOV, 1991 "cannot fulfill vf resource request. Placing maximal available values in response\n"); 1992 /* set the max resource in the vf */ 1993 return -ENOMEM; 1994 } 1995 1996 /* Set resources counters - 0 request means max available */ 1997 vf_sb_count(vf) = resc->num_sbs; 1998 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 1999 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2000 if (resc->num_mac_filters) 2001 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 2002 /* Add an additional vlan filter credit for the hypervisor */ 2003 bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1); 2004 2005 DP(BNX2X_MSG_IOV, 2006 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2007 vf_sb_count(vf), vf_rxq_count(vf), 2008 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2009 vf_vlan_rules_visible_cnt(vf)); 2010 2011 /* Initialize the queues */ 2012 if (!vf->vfqs) { 2013 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); 2014 return -EINVAL; 2015 } 2016 2017 for_each_vfq(vf, i) { 2018 struct bnx2x_vf_queue *q = vfq_get(vf, i); 2019 2020 if (!q) { 2021 BNX2X_ERR("q number %d was not allocated\n", i); 2022 return -EINVAL; 2023 } 2024 2025 q->index = i; 2026 q->cxt = &((base_cxt + i)->eth); 2027 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; 2028 2029 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", 2030 vf->abs_vfid, i, q->index, q->cid, q->cxt); 2031 2032 /* init SP objects */ 2033 bnx2x_vfq_init(bp, vf, q); 2034 } 2035 vf->state = VF_ACQUIRED; 2036 return 0; 2037 } 2038 2039 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) 2040 { 2041 struct bnx2x_func_init_params func_init = {0}; 2042 u16 flags = 0; 2043 int i; 2044 2045 /* the sb resources are initialized at this point, do the 2046 * FW/HW initializations 2047 */ 2048 for_each_vf_sb(vf, i) 2049 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, 2050 vf_igu_sb(vf, i), vf_igu_sb(vf, i)); 2051 2052 /* Sanity checks */ 2053 if (vf->state != VF_ACQUIRED) { 2054 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", 2055 vf->abs_vfid, vf->state); 2056 return -EINVAL; 2057 } 2058 2059 /* let FLR complete ... */ 2060 msleep(100); 2061 2062 /* FLR cleanup epilogue */ 2063 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) 2064 return -EBUSY; 2065 2066 /* reset IGU VF statistics: MSIX */ 2067 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); 2068 2069 /* vf init */ 2070 if (vf->cfg_flags & VF_CFG_STATS) 2071 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); 2072 2073 if (vf->cfg_flags & VF_CFG_TPA) 2074 flags |= FUNC_FLG_TPA; 2075 2076 if (is_vf_multi(vf)) 2077 flags |= FUNC_FLG_RSS; 2078 2079 /* function setup */ 2080 func_init.func_flgs = flags; 2081 func_init.pf_id = BP_FUNC(bp); 2082 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); 2083 func_init.fw_stat_map = vf->fw_stat_map; 2084 func_init.spq_map = vf->spq_map; 2085 func_init.spq_prod = 0; 2086 bnx2x_func_init(bp, &func_init); 2087 2088 /* Enable the vf */ 2089 bnx2x_vf_enable_access(bp, vf->abs_vfid); 2090 bnx2x_vf_enable_traffic(bp, vf); 2091 2092 /* queue protection table */ 2093 for_each_vfq(vf, i) 2094 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2095 vfq_qzone_id(vf, vfq_get(vf, i)), true); 2096 2097 vf->state = VF_ENABLED; 2098 2099 /* update vf bulletin board */ 2100 bnx2x_post_vf_bulletin(bp, vf->index); 2101 2102 return 0; 2103 } 2104 2105 struct set_vf_state_cookie { 2106 struct bnx2x_virtf *vf; 2107 u8 state; 2108 }; 2109 2110 static void bnx2x_set_vf_state(void *cookie) 2111 { 2112 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; 2113 2114 p->vf->state = p->state; 2115 } 2116 2117 int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) 2118 { 2119 int rc = 0, i; 2120 2121 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 2122 2123 /* Close all queues */ 2124 for (i = 0; i < vf_rxq_count(vf); i++) { 2125 rc = bnx2x_vf_queue_teardown(bp, vf, i); 2126 if (rc) 2127 goto op_err; 2128 } 2129 2130 /* disable the interrupts */ 2131 DP(BNX2X_MSG_IOV, "disabling igu\n"); 2132 bnx2x_vf_igu_disable(bp, vf); 2133 2134 /* disable the VF */ 2135 DP(BNX2X_MSG_IOV, "clearing qtbl\n"); 2136 bnx2x_vf_clr_qtbl(bp, vf); 2137 2138 /* need to make sure there are no outstanding stats ramrods which may 2139 * cause the device to access the VF's stats buffer which it will free 2140 * as soon as we return from the close flow. 2141 */ 2142 { 2143 struct set_vf_state_cookie cookie; 2144 2145 cookie.vf = vf; 2146 cookie.state = VF_ACQUIRED; 2147 bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); 2148 } 2149 2150 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2151 2152 return 0; 2153 op_err: 2154 BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc); 2155 return rc; 2156 } 2157 2158 /* VF release can be called either: 1. The VF was acquired but 2159 * not enabled 2. the vf was enabled or in the process of being 2160 * enabled 2161 */ 2162 int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf) 2163 { 2164 int rc; 2165 2166 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, 2167 vf->state == VF_FREE ? "Free" : 2168 vf->state == VF_ACQUIRED ? "Acquired" : 2169 vf->state == VF_ENABLED ? "Enabled" : 2170 vf->state == VF_RESET ? "Reset" : 2171 "Unknown"); 2172 2173 switch (vf->state) { 2174 case VF_ENABLED: 2175 rc = bnx2x_vf_close(bp, vf); 2176 if (rc) 2177 goto op_err; 2178 /* Fallthrough to release resources */ 2179 case VF_ACQUIRED: 2180 DP(BNX2X_MSG_IOV, "about to free resources\n"); 2181 bnx2x_vf_free_resc(bp, vf); 2182 break; 2183 2184 case VF_FREE: 2185 case VF_RESET: 2186 default: 2187 break; 2188 } 2189 return 0; 2190 op_err: 2191 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc); 2192 return rc; 2193 } 2194 2195 int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, 2196 struct bnx2x_config_rss_params *rss) 2197 { 2198 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 2199 set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags); 2200 return bnx2x_config_rss(bp, rss); 2201 } 2202 2203 int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, 2204 struct vfpf_tpa_tlv *tlv, 2205 struct bnx2x_queue_update_tpa_params *params) 2206 { 2207 aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr; 2208 struct bnx2x_queue_state_params qstate; 2209 int qid, rc = 0; 2210 2211 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 2212 2213 /* Set ramrod params */ 2214 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); 2215 memcpy(&qstate.params.update_tpa, params, 2216 sizeof(struct bnx2x_queue_update_tpa_params)); 2217 qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA; 2218 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); 2219 2220 for (qid = 0; qid < vf_rxq_count(vf); qid++) { 2221 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 2222 qstate.params.update_tpa.sge_map = sge_addr[qid]; 2223 DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n", 2224 vf->abs_vfid, qid, U64_HI(sge_addr[qid]), 2225 U64_LO(sge_addr[qid])); 2226 rc = bnx2x_queue_state_change(bp, &qstate); 2227 if (rc) { 2228 BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n", 2229 U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]), 2230 vf->abs_vfid, qid); 2231 return rc; 2232 } 2233 } 2234 2235 return rc; 2236 } 2237 2238 /* VF release ~ VF close + VF release-resources 2239 * Release is the ultimate SW shutdown and is called whenever an 2240 * irrecoverable error is encountered. 2241 */ 2242 int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) 2243 { 2244 int rc; 2245 2246 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); 2247 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 2248 2249 rc = bnx2x_vf_free(bp, vf); 2250 if (rc) 2251 WARN(rc, 2252 "VF[%d] Failed to allocate resources for release op- rc=%d\n", 2253 vf->abs_vfid, rc); 2254 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 2255 return rc; 2256 } 2257 2258 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, 2259 struct bnx2x_virtf *vf, u32 *sbdf) 2260 { 2261 *sbdf = vf->devfn | (vf->bus << 8); 2262 } 2263 2264 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 2265 enum channel_tlvs tlv) 2266 { 2267 /* we don't lock the channel for unsupported tlvs */ 2268 if (!bnx2x_tlv_supported(tlv)) { 2269 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n"); 2270 return; 2271 } 2272 2273 /* lock the channel */ 2274 mutex_lock(&vf->op_mutex); 2275 2276 /* record the locking op */ 2277 vf->op_current = tlv; 2278 2279 /* log the lock */ 2280 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", 2281 vf->abs_vfid, tlv); 2282 } 2283 2284 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 2285 enum channel_tlvs expected_tlv) 2286 { 2287 enum channel_tlvs current_tlv; 2288 2289 if (!vf) { 2290 BNX2X_ERR("VF was %p\n", vf); 2291 return; 2292 } 2293 2294 current_tlv = vf->op_current; 2295 2296 /* we don't unlock the channel for unsupported tlvs */ 2297 if (!bnx2x_tlv_supported(expected_tlv)) 2298 return; 2299 2300 WARN(expected_tlv != vf->op_current, 2301 "lock mismatch: expected %d found %d", expected_tlv, 2302 vf->op_current); 2303 2304 /* record the locking op */ 2305 vf->op_current = CHANNEL_TLV_NONE; 2306 2307 /* lock the channel */ 2308 mutex_unlock(&vf->op_mutex); 2309 2310 /* log the unlock */ 2311 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 2312 vf->abs_vfid, vf->op_current); 2313 } 2314 2315 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) 2316 { 2317 struct bnx2x_queue_state_params q_params; 2318 u32 prev_flags; 2319 int i, rc; 2320 2321 /* Verify changes are needed and record current Tx switching state */ 2322 prev_flags = bp->flags; 2323 if (enable) 2324 bp->flags |= TX_SWITCHING; 2325 else 2326 bp->flags &= ~TX_SWITCHING; 2327 if (prev_flags == bp->flags) 2328 return 0; 2329 2330 /* Verify state enables the sending of queue ramrods */ 2331 if ((bp->state != BNX2X_STATE_OPEN) || 2332 (bnx2x_get_q_logical_state(bp, 2333 &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) != 2334 BNX2X_Q_LOGICAL_STATE_ACTIVE)) 2335 return 0; 2336 2337 /* send q. update ramrod to configure Tx switching */ 2338 memset(&q_params, 0, sizeof(q_params)); 2339 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 2340 q_params.cmd = BNX2X_Q_CMD_UPDATE; 2341 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, 2342 &q_params.params.update.update_flags); 2343 if (enable) 2344 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING, 2345 &q_params.params.update.update_flags); 2346 else 2347 __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING, 2348 &q_params.params.update.update_flags); 2349 2350 /* send the ramrod on all the queues of the PF */ 2351 for_each_eth_queue(bp, i) { 2352 struct bnx2x_fastpath *fp = &bp->fp[i]; 2353 2354 /* Set the appropriate Queue object */ 2355 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 2356 2357 /* Update the Queue state */ 2358 rc = bnx2x_queue_state_change(bp, &q_params); 2359 if (rc) { 2360 BNX2X_ERR("Failed to configure Tx switching\n"); 2361 return rc; 2362 } 2363 } 2364 2365 DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled"); 2366 return 0; 2367 } 2368 2369 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) 2370 { 2371 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 2372 2373 if (!IS_SRIOV(bp)) { 2374 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n"); 2375 return -EINVAL; 2376 } 2377 2378 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 2379 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 2380 2381 /* HW channel is only operational when PF is up */ 2382 if (bp->state != BNX2X_STATE_OPEN) { 2383 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n"); 2384 return -EINVAL; 2385 } 2386 2387 /* we are always bound by the total_vfs in the configuration space */ 2388 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { 2389 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n", 2390 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 2391 num_vfs_param = BNX2X_NR_VIRTFN(bp); 2392 } 2393 2394 bp->requested_nr_virtfn = num_vfs_param; 2395 if (num_vfs_param == 0) { 2396 bnx2x_set_pf_tx_switching(bp, false); 2397 pci_disable_sriov(dev); 2398 return 0; 2399 } else { 2400 return bnx2x_enable_sriov(bp); 2401 } 2402 } 2403 2404 #define IGU_ENTRY_SIZE 4 2405 2406 int bnx2x_enable_sriov(struct bnx2x *bp) 2407 { 2408 int rc = 0, req_vfs = bp->requested_nr_virtfn; 2409 int vf_idx, sb_idx, vfq_idx, qcount, first_vf; 2410 u32 igu_entry, address; 2411 u16 num_vf_queues; 2412 2413 if (req_vfs == 0) 2414 return 0; 2415 2416 first_vf = bp->vfdb->sriov.first_vf_in_pf; 2417 2418 /* statically distribute vf sb pool between VFs */ 2419 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES, 2420 BP_VFDB(bp)->vf_sbs_pool / req_vfs); 2421 2422 /* zero previous values learned from igu cam */ 2423 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) { 2424 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 2425 2426 vf->sb_count = 0; 2427 vf_sb_count(BP_VF(bp, vf_idx)) = 0; 2428 } 2429 bp->vfdb->vf_sbs_pool = 0; 2430 2431 /* prepare IGU cam */ 2432 sb_idx = BP_VFDB(bp)->first_vf_igu_entry; 2433 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE; 2434 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 2435 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) { 2436 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT | 2437 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT | 2438 IGU_REG_MAPPING_MEMORY_VALID; 2439 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n", 2440 sb_idx, vf_idx); 2441 REG_WR(bp, address, igu_entry); 2442 sb_idx++; 2443 address += IGU_ENTRY_SIZE; 2444 } 2445 } 2446 2447 /* Reinitialize vf database according to igu cam */ 2448 bnx2x_get_vf_igu_cam_info(bp); 2449 2450 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n", 2451 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); 2452 2453 qcount = 0; 2454 for_each_vf(bp, vf_idx) { 2455 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 2456 2457 /* set local queue arrays */ 2458 vf->vfqs = &bp->vfdb->vfqs[qcount]; 2459 qcount += vf_sb_count(vf); 2460 bnx2x_iov_static_resc(bp, vf); 2461 } 2462 2463 /* prepare msix vectors in VF configuration space - the value in the 2464 * PCI configuration space should be the index of the last entry, 2465 * namely one less than the actual size of the table 2466 */ 2467 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 2468 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 2469 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 2470 num_vf_queues - 1); 2471 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", 2472 vf_idx, num_vf_queues - 1); 2473 } 2474 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2475 2476 /* enable sriov. This will probe all the VFs, and consequentially cause 2477 * the "acquire" messages to appear on the VF PF channel. 2478 */ 2479 DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); 2480 bnx2x_disable_sriov(bp); 2481 2482 rc = bnx2x_set_pf_tx_switching(bp, true); 2483 if (rc) 2484 return rc; 2485 2486 rc = pci_enable_sriov(bp->pdev, req_vfs); 2487 if (rc) { 2488 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 2489 return rc; 2490 } 2491 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); 2492 return req_vfs; 2493 } 2494 2495 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) 2496 { 2497 int vfidx; 2498 struct pf_vf_bulletin_content *bulletin; 2499 2500 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); 2501 for_each_vf(bp, vfidx) { 2502 bulletin = BP_VF_BULLETIN(bp, vfidx); 2503 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) 2504 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); 2505 } 2506 } 2507 2508 void bnx2x_disable_sriov(struct bnx2x *bp) 2509 { 2510 pci_disable_sriov(bp->pdev); 2511 } 2512 2513 static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, 2514 struct bnx2x_virtf **vf, 2515 struct pf_vf_bulletin_content **bulletin) 2516 { 2517 if (bp->state != BNX2X_STATE_OPEN) { 2518 BNX2X_ERR("vf ndo called though PF is down\n"); 2519 return -EINVAL; 2520 } 2521 2522 if (!IS_SRIOV(bp)) { 2523 BNX2X_ERR("vf ndo called though sriov is disabled\n"); 2524 return -EINVAL; 2525 } 2526 2527 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { 2528 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n", 2529 vfidx, BNX2X_NR_VIRTFN(bp)); 2530 return -EINVAL; 2531 } 2532 2533 /* init members */ 2534 *vf = BP_VF(bp, vfidx); 2535 *bulletin = BP_VF_BULLETIN(bp, vfidx); 2536 2537 if (!*vf) { 2538 BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n", 2539 vfidx); 2540 return -EINVAL; 2541 } 2542 2543 if (!(*vf)->vfqs) { 2544 BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n", 2545 vfidx); 2546 return -EINVAL; 2547 } 2548 2549 if (!*bulletin) { 2550 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n", 2551 vfidx); 2552 return -EINVAL; 2553 } 2554 2555 return 0; 2556 } 2557 2558 int bnx2x_get_vf_config(struct net_device *dev, int vfidx, 2559 struct ifla_vf_info *ivi) 2560 { 2561 struct bnx2x *bp = netdev_priv(dev); 2562 struct bnx2x_virtf *vf = NULL; 2563 struct pf_vf_bulletin_content *bulletin = NULL; 2564 struct bnx2x_vlan_mac_obj *mac_obj; 2565 struct bnx2x_vlan_mac_obj *vlan_obj; 2566 int rc; 2567 2568 /* sanity and init */ 2569 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 2570 if (rc) 2571 return rc; 2572 mac_obj = &bnx2x_leading_vfq(vf, mac_obj); 2573 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 2574 if (!mac_obj || !vlan_obj) { 2575 BNX2X_ERR("VF partially initialized\n"); 2576 return -EINVAL; 2577 } 2578 2579 ivi->vf = vfidx; 2580 ivi->qos = 0; 2581 ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */ 2582 ivi->min_tx_rate = 0; 2583 ivi->spoofchk = 1; /*always enabled */ 2584 if (vf->state == VF_ENABLED) { 2585 /* mac and vlan are in vlan_mac objects */ 2586 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) { 2587 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 2588 0, ETH_ALEN); 2589 vlan_obj->get_n_elements(bp, vlan_obj, 1, 2590 (u8 *)&ivi->vlan, 0, 2591 VLAN_HLEN); 2592 } 2593 } else { 2594 /* mac */ 2595 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 2596 /* mac configured by ndo so its in bulletin board */ 2597 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); 2598 else 2599 /* function has not been loaded yet. Show mac as 0s */ 2600 memset(&ivi->mac, 0, ETH_ALEN); 2601 2602 /* vlan */ 2603 if (bulletin->valid_bitmap & (1 << VLAN_VALID)) 2604 /* vlan configured by ndo so its in bulletin board */ 2605 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); 2606 else 2607 /* function has not been loaded yet. Show vlans as 0s */ 2608 memset(&ivi->vlan, 0, VLAN_HLEN); 2609 } 2610 2611 return 0; 2612 } 2613 2614 /* New mac for VF. Consider these cases: 2615 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and 2616 * supply at acquire. 2617 * 2. VF has already been acquired but has not yet initialized - store in local 2618 * bulletin board. mac will be posted on VF bulletin board after VF init. VF 2619 * will configure this mac when it is ready. 2620 * 3. VF has already initialized but has not yet setup a queue - post the new 2621 * mac on VF's bulletin board right now. VF will configure this mac when it 2622 * is ready. 2623 * 4. VF has already set a queue - delete any macs already configured for this 2624 * queue and manually config the new mac. 2625 * In any event, once this function has been called refuse any attempts by the 2626 * VF to configure any mac for itself except for this mac. In case of a race 2627 * where the VF fails to see the new post on its bulletin board before sending a 2628 * mac configuration request, the PF will simply fail the request and VF can try 2629 * again after consulting its bulletin board. 2630 */ 2631 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) 2632 { 2633 struct bnx2x *bp = netdev_priv(dev); 2634 int rc, q_logical_state; 2635 struct bnx2x_virtf *vf = NULL; 2636 struct pf_vf_bulletin_content *bulletin = NULL; 2637 2638 /* sanity and init */ 2639 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 2640 if (rc) 2641 return rc; 2642 if (!is_valid_ether_addr(mac)) { 2643 BNX2X_ERR("mac address invalid\n"); 2644 return -EINVAL; 2645 } 2646 2647 /* update PF's copy of the VF's bulletin. Will no longer accept mac 2648 * configuration requests from vf unless match this mac 2649 */ 2650 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; 2651 memcpy(bulletin->mac, mac, ETH_ALEN); 2652 2653 /* Post update on VF's bulletin board */ 2654 rc = bnx2x_post_vf_bulletin(bp, vfidx); 2655 if (rc) { 2656 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); 2657 return rc; 2658 } 2659 2660 q_logical_state = 2661 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); 2662 if (vf->state == VF_ENABLED && 2663 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 2664 /* configure the mac in device on this vf's queue */ 2665 unsigned long ramrod_flags = 0; 2666 struct bnx2x_vlan_mac_obj *mac_obj; 2667 2668 /* User should be able to see failure reason in system logs */ 2669 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 2670 return -EINVAL; 2671 2672 /* must lock vfpf channel to protect against vf flows */ 2673 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 2674 2675 /* remove existing eth macs */ 2676 mac_obj = &bnx2x_leading_vfq(vf, mac_obj); 2677 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 2678 if (rc) { 2679 BNX2X_ERR("failed to delete eth macs\n"); 2680 rc = -EINVAL; 2681 goto out; 2682 } 2683 2684 /* remove existing uc list macs */ 2685 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); 2686 if (rc) { 2687 BNX2X_ERR("failed to delete uc_list macs\n"); 2688 rc = -EINVAL; 2689 goto out; 2690 } 2691 2692 /* configure the new mac to device */ 2693 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2694 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 2695 BNX2X_ETH_MAC, &ramrod_flags); 2696 2697 out: 2698 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 2699 } 2700 2701 return rc; 2702 } 2703 2704 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 2705 { 2706 struct bnx2x_queue_state_params q_params = {NULL}; 2707 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 2708 struct bnx2x_queue_update_params *update_params; 2709 struct pf_vf_bulletin_content *bulletin = NULL; 2710 struct bnx2x_rx_mode_ramrod_params rx_ramrod; 2711 struct bnx2x *bp = netdev_priv(dev); 2712 struct bnx2x_vlan_mac_obj *vlan_obj; 2713 unsigned long vlan_mac_flags = 0; 2714 unsigned long ramrod_flags = 0; 2715 struct bnx2x_virtf *vf = NULL; 2716 unsigned long accept_flags; 2717 int rc; 2718 2719 /* sanity and init */ 2720 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 2721 if (rc) 2722 return rc; 2723 2724 if (vlan > 4095) { 2725 BNX2X_ERR("illegal vlan value %d\n", vlan); 2726 return -EINVAL; 2727 } 2728 2729 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", 2730 vfidx, vlan, 0); 2731 2732 /* update PF's copy of the VF's bulletin. No point in posting the vlan 2733 * to the VF since it doesn't have anything to do with it. But it useful 2734 * to store it here in case the VF is not up yet and we can only 2735 * configure the vlan later when it does. Treat vlan id 0 as remove the 2736 * Host tag. 2737 */ 2738 if (vlan > 0) 2739 bulletin->valid_bitmap |= 1 << VLAN_VALID; 2740 else 2741 bulletin->valid_bitmap &= ~(1 << VLAN_VALID); 2742 bulletin->vlan = vlan; 2743 2744 /* is vf initialized and queue set up? */ 2745 if (vf->state != VF_ENABLED || 2746 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != 2747 BNX2X_Q_LOGICAL_STATE_ACTIVE) 2748 return rc; 2749 2750 /* User should be able to see error in system logs */ 2751 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 2752 return -EINVAL; 2753 2754 /* must lock vfpf channel to protect against vf flows */ 2755 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 2756 2757 /* remove existing vlans */ 2758 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2759 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 2760 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 2761 &ramrod_flags); 2762 if (rc) { 2763 BNX2X_ERR("failed to delete vlans\n"); 2764 rc = -EINVAL; 2765 goto out; 2766 } 2767 2768 /* need to remove/add the VF's accept_any_vlan bit */ 2769 accept_flags = bnx2x_leading_vfq(vf, accept_flags); 2770 if (vlan) 2771 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 2772 else 2773 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 2774 2775 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, 2776 accept_flags); 2777 bnx2x_leading_vfq(vf, accept_flags) = accept_flags; 2778 bnx2x_config_rx_mode(bp, &rx_ramrod); 2779 2780 /* configure the new vlan to device */ 2781 memset(&ramrod_param, 0, sizeof(ramrod_param)); 2782 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2783 ramrod_param.vlan_mac_obj = vlan_obj; 2784 ramrod_param.ramrod_flags = ramrod_flags; 2785 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 2786 &ramrod_param.user_req.vlan_mac_flags); 2787 ramrod_param.user_req.u.vlan.vlan = vlan; 2788 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 2789 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 2790 if (rc) { 2791 BNX2X_ERR("failed to configure vlan\n"); 2792 rc = -EINVAL; 2793 goto out; 2794 } 2795 2796 /* send queue update ramrod to configure default vlan and silent 2797 * vlan removal 2798 */ 2799 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 2800 q_params.cmd = BNX2X_Q_CMD_UPDATE; 2801 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); 2802 update_params = &q_params.params.update; 2803 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 2804 &update_params->update_flags); 2805 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 2806 &update_params->update_flags); 2807 if (vlan == 0) { 2808 /* if vlan is 0 then we want to leave the VF traffic 2809 * untagged, and leave the incoming traffic untouched 2810 * (i.e. do not remove any vlan tags). 2811 */ 2812 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 2813 &update_params->update_flags); 2814 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 2815 &update_params->update_flags); 2816 } else { 2817 /* configure default vlan to vf queue and set silent 2818 * vlan removal (the vf remains unaware of this vlan). 2819 */ 2820 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 2821 &update_params->update_flags); 2822 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 2823 &update_params->update_flags); 2824 update_params->def_vlan = vlan; 2825 update_params->silent_removal_value = 2826 vlan & VLAN_VID_MASK; 2827 update_params->silent_removal_mask = VLAN_VID_MASK; 2828 } 2829 2830 /* Update the Queue state */ 2831 rc = bnx2x_queue_state_change(bp, &q_params); 2832 if (rc) { 2833 BNX2X_ERR("Failed to configure default VLAN\n"); 2834 goto out; 2835 } 2836 2837 2838 /* clear the flag indicating that this VF needs its vlan 2839 * (will only be set if the HV configured the Vlan before vf was 2840 * up and we were called because the VF came up later 2841 */ 2842 out: 2843 vf->cfg_flags &= ~VF_CFG_VLAN; 2844 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 2845 2846 return rc; 2847 } 2848 2849 /* crc is the first field in the bulletin board. Compute the crc over the 2850 * entire bulletin board excluding the crc field itself. Use the length field 2851 * as the Bulletin Board was posted by a PF with possibly a different version 2852 * from the vf which will sample it. Therefore, the length is computed by the 2853 * PF and the used blindly by the VF. 2854 */ 2855 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, 2856 struct pf_vf_bulletin_content *bulletin) 2857 { 2858 return crc32(BULLETIN_CRC_SEED, 2859 ((u8 *)bulletin) + sizeof(bulletin->crc), 2860 bulletin->length - sizeof(bulletin->crc)); 2861 } 2862 2863 /* Check for new posts on the bulletin board */ 2864 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) 2865 { 2866 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; 2867 int attempts; 2868 2869 /* bulletin board hasn't changed since last sample */ 2870 if (bp->old_bulletin.version == bulletin.version) 2871 return PFVF_BULLETIN_UNCHANGED; 2872 2873 /* validate crc of new bulletin board */ 2874 if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) { 2875 /* sampling structure in mid post may result with corrupted data 2876 * validate crc to ensure coherency. 2877 */ 2878 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { 2879 bulletin = bp->pf2vf_bulletin->content; 2880 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, 2881 &bulletin)) 2882 break; 2883 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", 2884 bulletin.crc, 2885 bnx2x_crc_vf_bulletin(bp, &bulletin)); 2886 } 2887 if (attempts >= BULLETIN_ATTEMPTS) { 2888 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", 2889 attempts); 2890 return PFVF_BULLETIN_CRC_ERR; 2891 } 2892 } 2893 2894 /* the mac address in bulletin board is valid and is new */ 2895 if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID && 2896 !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) { 2897 /* update new mac to net device */ 2898 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); 2899 } 2900 2901 /* the vlan in bulletin board is valid and is new */ 2902 if (bulletin.valid_bitmap & 1 << VLAN_VALID) 2903 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN); 2904 2905 /* copy new bulletin board to bp */ 2906 bp->old_bulletin = bulletin; 2907 2908 return PFVF_BULLETIN_UPDATED; 2909 } 2910 2911 void bnx2x_timer_sriov(struct bnx2x *bp) 2912 { 2913 bnx2x_sample_bulletin(bp); 2914 2915 /* if channel is down we need to self destruct */ 2916 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) 2917 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 2918 BNX2X_MSG_IOV); 2919 } 2920 2921 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 2922 { 2923 /* vf doorbells are embedded within the regview */ 2924 return bp->regview + PXP_VF_ADDR_DB_START; 2925 } 2926 2927 void bnx2x_vf_pci_dealloc(struct bnx2x *bp) 2928 { 2929 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 2930 sizeof(struct bnx2x_vf_mbx_msg)); 2931 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, 2932 sizeof(union pf_vf_bulletin)); 2933 } 2934 2935 int bnx2x_vf_pci_alloc(struct bnx2x *bp) 2936 { 2937 mutex_init(&bp->vf2pf_mutex); 2938 2939 /* allocate vf2pf mailbox for vf to pf channel */ 2940 bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping, 2941 sizeof(struct bnx2x_vf_mbx_msg)); 2942 if (!bp->vf2pf_mbox) 2943 goto alloc_mem_err; 2944 2945 /* allocate pf 2 vf bulletin board */ 2946 bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping, 2947 sizeof(union pf_vf_bulletin)); 2948 if (!bp->pf2vf_bulletin) 2949 goto alloc_mem_err; 2950 2951 return 0; 2952 2953 alloc_mem_err: 2954 bnx2x_vf_pci_dealloc(bp); 2955 return -ENOMEM; 2956 } 2957 2958 void bnx2x_iov_channel_down(struct bnx2x *bp) 2959 { 2960 int vf_idx; 2961 struct pf_vf_bulletin_content *bulletin; 2962 2963 if (!IS_SRIOV(bp)) 2964 return; 2965 2966 for_each_vf(bp, vf_idx) { 2967 /* locate this VFs bulletin board and update the channel down 2968 * bit 2969 */ 2970 bulletin = BP_VF_BULLETIN(bp, vf_idx); 2971 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN; 2972 2973 /* update vf bulletin board */ 2974 bnx2x_post_vf_bulletin(bp, vf_idx); 2975 } 2976 } 2977 2978 void bnx2x_iov_task(struct work_struct *work) 2979 { 2980 struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work); 2981 2982 if (!netif_running(bp->dev)) 2983 return; 2984 2985 if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR, 2986 &bp->iov_task_state)) 2987 bnx2x_vf_handle_flr_event(bp); 2988 2989 if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG, 2990 &bp->iov_task_state)) 2991 bnx2x_vf_mbx(bp); 2992 } 2993 2994 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) 2995 { 2996 smp_mb__before_atomic(); 2997 set_bit(flag, &bp->iov_task_state); 2998 smp_mb__after_atomic(); 2999 DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); 3000 queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); 3001 } 3002