1 /* bnx2x_sriov.c: Broadcom Everest network driver. 2 * 3 * Copyright 2009-2013 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com> 16 * Written by: Shmulik Ravid 17 * Ariel Elior <ariel.elior@qlogic.com> 18 * 19 */ 20 #include "bnx2x.h" 21 #include "bnx2x_init.h" 22 #include "bnx2x_cmn.h" 23 #include "bnx2x_sp.h" 24 #include <linux/crc32.h> 25 #include <linux/if_vlan.h> 26 27 /* General service functions */ 28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 29 u16 pf_id) 30 { 31 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 32 pf_id); 33 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 34 pf_id); 35 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 36 pf_id); 37 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 38 pf_id); 39 } 40 41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 42 u8 enable) 43 { 44 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 45 enable); 46 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 47 enable); 48 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 49 enable); 50 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 51 enable); 52 } 53 54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 55 { 56 int idx; 57 58 for_each_vf(bp, idx) 59 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) 60 break; 61 return idx; 62 } 63 64 static 65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 66 { 67 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); 68 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; 69 } 70 71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, 72 u8 igu_sb_id, u8 segment, u16 index, u8 op, 73 u8 update) 74 { 75 /* acking a VF sb through the PF - use the GRC */ 76 u32 ctl; 77 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 78 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 79 u32 func_encode = vf->abs_vfid; 80 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; 81 struct igu_regular cmd_data = {0}; 82 83 cmd_data.sb_id_and_flags = 84 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 85 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 86 (update << IGU_REGULAR_BUPDATE_SHIFT) | 87 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 88 89 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 90 func_encode << IGU_CTRL_REG_FID_SHIFT | 91 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 92 93 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 94 cmd_data.sb_id_and_flags, igu_addr_data); 95 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); 96 mmiowb(); 97 barrier(); 98 99 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 100 ctl, igu_addr_ctl); 101 REG_WR(bp, igu_addr_ctl, ctl); 102 mmiowb(); 103 barrier(); 104 } 105 106 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp, 107 struct bnx2x_virtf *vf, 108 bool print_err) 109 { 110 if (!bnx2x_leading_vfq(vf, sp_initialized)) { 111 if (print_err) 112 BNX2X_ERR("Slowpath objects not yet initialized!\n"); 113 else 114 DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n"); 115 return false; 116 } 117 return true; 118 } 119 120 /* VFOP operations states */ 121 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 122 struct bnx2x_queue_init_params *init_params, 123 struct bnx2x_queue_setup_params *setup_params, 124 u16 q_idx, u16 sb_idx) 125 { 126 DP(BNX2X_MSG_IOV, 127 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", 128 vf->abs_vfid, 129 q_idx, 130 sb_idx, 131 init_params->tx.sb_cq_index, 132 init_params->tx.hc_rate, 133 setup_params->flags, 134 setup_params->txq_params.traffic_type); 135 } 136 137 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 138 struct bnx2x_queue_init_params *init_params, 139 struct bnx2x_queue_setup_params *setup_params, 140 u16 q_idx, u16 sb_idx) 141 { 142 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; 143 144 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" 145 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", 146 vf->abs_vfid, 147 q_idx, 148 sb_idx, 149 init_params->rx.sb_cq_index, 150 init_params->rx.hc_rate, 151 setup_params->gen_params.mtu, 152 rxq_params->buf_sz, 153 rxq_params->sge_buf_sz, 154 rxq_params->max_sges_pkt, 155 rxq_params->tpa_agg_sz, 156 setup_params->flags, 157 rxq_params->drop_flags, 158 rxq_params->cache_line_log); 159 } 160 161 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 162 struct bnx2x_virtf *vf, 163 struct bnx2x_vf_queue *q, 164 struct bnx2x_vf_queue_construct_params *p, 165 unsigned long q_type) 166 { 167 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; 168 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; 169 170 /* INIT */ 171 172 /* Enable host coalescing in the transition to INIT state */ 173 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) 174 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); 175 176 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) 177 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); 178 179 /* FW SB ID */ 180 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 181 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 182 183 /* context */ 184 init_p->cxts[0] = q->cxt; 185 186 /* SETUP */ 187 188 /* Setup-op general parameters */ 189 setup_p->gen_params.spcl_id = vf->sp_cl_id; 190 setup_p->gen_params.stat_id = vfq_stat_id(vf, q); 191 192 /* Setup-op pause params: 193 * Nothing to do, the pause thresholds are set by default to 0 which 194 * effectively turns off the feature for this queue. We don't want 195 * one queue (VF) to interfering with another queue (another VF) 196 */ 197 if (vf->cfg_flags & VF_CFG_FW_FC) 198 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", 199 vf->abs_vfid); 200 /* Setup-op flags: 201 * collect statistics, zero statistics, local-switching, security, 202 * OV for Flex10, RSS and MCAST for leading 203 */ 204 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) 205 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); 206 207 /* for VFs, enable tx switching, bd coherency, and mac address 208 * anti-spoofing 209 */ 210 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); 211 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 212 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 213 214 /* Setup-op rx parameters */ 215 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 216 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 217 218 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); 219 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); 220 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); 221 222 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) 223 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; 224 } 225 226 /* Setup-op tx parameters */ 227 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { 228 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; 229 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 230 } 231 } 232 233 static int bnx2x_vf_queue_create(struct bnx2x *bp, 234 struct bnx2x_virtf *vf, int qid, 235 struct bnx2x_vf_queue_construct_params *qctor) 236 { 237 struct bnx2x_queue_state_params *q_params; 238 int rc = 0; 239 240 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 241 242 /* Prepare ramrod information */ 243 q_params = &qctor->qstate; 244 q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 245 set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags); 246 247 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 248 BNX2X_Q_LOGICAL_STATE_ACTIVE) { 249 DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n"); 250 goto out; 251 } 252 253 /* Run Queue 'construction' ramrods */ 254 q_params->cmd = BNX2X_Q_CMD_INIT; 255 rc = bnx2x_queue_state_change(bp, q_params); 256 if (rc) 257 goto out; 258 259 memcpy(&q_params->params.setup, &qctor->prep_qsetup, 260 sizeof(struct bnx2x_queue_setup_params)); 261 q_params->cmd = BNX2X_Q_CMD_SETUP; 262 rc = bnx2x_queue_state_change(bp, q_params); 263 if (rc) 264 goto out; 265 266 /* enable interrupts */ 267 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)), 268 USTORM_ID, 0, IGU_INT_ENABLE, 0); 269 out: 270 return rc; 271 } 272 273 static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf, 274 int qid) 275 { 276 enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT, 277 BNX2X_Q_CMD_TERMINATE, 278 BNX2X_Q_CMD_CFC_DEL}; 279 struct bnx2x_queue_state_params q_params; 280 int rc, i; 281 282 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 283 284 /* Prepare ramrod information */ 285 memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params)); 286 q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 287 set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 288 289 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) == 290 BNX2X_Q_LOGICAL_STATE_STOPPED) { 291 DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n"); 292 goto out; 293 } 294 295 /* Run Queue 'destruction' ramrods */ 296 for (i = 0; i < ARRAY_SIZE(cmds); i++) { 297 q_params.cmd = cmds[i]; 298 rc = bnx2x_queue_state_change(bp, &q_params); 299 if (rc) { 300 BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]); 301 return rc; 302 } 303 } 304 out: 305 /* Clean Context */ 306 if (bnx2x_vfq(vf, qid, cxt)) { 307 bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0; 308 bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0; 309 } 310 311 return 0; 312 } 313 314 static void 315 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) 316 { 317 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 318 if (vf) { 319 /* the first igu entry belonging to VFs of this PF */ 320 if (!BP_VFDB(bp)->first_vf_igu_entry) 321 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; 322 323 /* the first igu entry belonging to this VF */ 324 if (!vf_sb_count(vf)) 325 vf->igu_base_id = igu_sb_id; 326 327 ++vf_sb_count(vf); 328 ++vf->sb_count; 329 } 330 BP_VFDB(bp)->vf_sbs_pool++; 331 } 332 333 static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp, 334 struct bnx2x_vlan_mac_obj *obj, 335 atomic_t *counter) 336 { 337 struct list_head *pos; 338 int read_lock; 339 int cnt = 0; 340 341 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); 342 if (read_lock) 343 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); 344 345 list_for_each(pos, &obj->head) 346 cnt++; 347 348 if (!read_lock) 349 bnx2x_vlan_mac_h_read_unlock(bp, obj); 350 351 atomic_set(counter, cnt); 352 } 353 354 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, 355 int qid, bool drv_only, bool mac) 356 { 357 struct bnx2x_vlan_mac_ramrod_params ramrod; 358 int rc; 359 360 DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid, 361 mac ? "MACs" : "VLANs"); 362 363 /* Prepare ramrod params */ 364 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); 365 if (mac) { 366 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); 367 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 368 } else { 369 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 370 &ramrod.user_req.vlan_mac_flags); 371 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 372 } 373 ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL; 374 375 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); 376 if (drv_only) 377 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); 378 else 379 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); 380 381 /* Start deleting */ 382 rc = ramrod.vlan_mac_obj->delete_all(bp, 383 ramrod.vlan_mac_obj, 384 &ramrod.user_req.vlan_mac_flags, 385 &ramrod.ramrod_flags); 386 if (rc) { 387 BNX2X_ERR("Failed to delete all %s\n", 388 mac ? "MACs" : "VLANs"); 389 return rc; 390 } 391 392 /* Clear the vlan counters */ 393 if (!mac) 394 atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0); 395 396 return 0; 397 } 398 399 static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, 400 struct bnx2x_virtf *vf, int qid, 401 struct bnx2x_vf_mac_vlan_filter *filter, 402 bool drv_only) 403 { 404 struct bnx2x_vlan_mac_ramrod_params ramrod; 405 int rc; 406 407 DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n", 408 vf->abs_vfid, filter->add ? "Adding" : "Deleting", 409 filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN"); 410 411 /* Prepare ramrod params */ 412 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); 413 if (filter->type == BNX2X_VF_FILTER_VLAN) { 414 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 415 &ramrod.user_req.vlan_mac_flags); 416 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 417 ramrod.user_req.u.vlan.vlan = filter->vid; 418 } else { 419 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); 420 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 421 memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN); 422 } 423 ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD : 424 BNX2X_VLAN_MAC_DEL; 425 426 /* Verify there are available vlan credits */ 427 if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && 428 (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= 429 vf_vlan_rules_cnt(vf))) { 430 BNX2X_ERR("No credits for vlan [%d >= %d]\n", 431 atomic_read(&bnx2x_vfq(vf, qid, vlan_count)), 432 vf_vlan_rules_cnt(vf)); 433 return -ENOMEM; 434 } 435 436 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); 437 if (drv_only) 438 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); 439 else 440 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); 441 442 /* Add/Remove the filter */ 443 rc = bnx2x_config_vlan_mac(bp, &ramrod); 444 if (rc && rc != -EEXIST) { 445 BNX2X_ERR("Failed to %s %s\n", 446 filter->add ? "add" : "delete", 447 filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : 448 "VLAN"); 449 return rc; 450 } 451 452 /* Update the vlan counters */ 453 if (filter->type == BNX2X_VF_FILTER_VLAN) 454 bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj, 455 &bnx2x_vfq(vf, qid, vlan_count)); 456 457 return 0; 458 } 459 460 int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, 461 struct bnx2x_vf_mac_vlan_filters *filters, 462 int qid, bool drv_only) 463 { 464 int rc = 0, i; 465 466 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 467 468 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 469 return -EINVAL; 470 471 /* Prepare ramrod params */ 472 for (i = 0; i < filters->count; i++) { 473 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, 474 &filters->filters[i], drv_only); 475 if (rc) 476 break; 477 } 478 479 /* Rollback if needed */ 480 if (i != filters->count) { 481 BNX2X_ERR("Managed only %d/%d filters - rolling back\n", 482 i, filters->count + 1); 483 while (--i >= 0) { 484 filters->filters[i].add = !filters->filters[i].add; 485 bnx2x_vf_mac_vlan_config(bp, vf, qid, 486 &filters->filters[i], 487 drv_only); 488 } 489 } 490 491 /* It's our responsibility to free the filters */ 492 kfree(filters); 493 494 return rc; 495 } 496 497 int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, 498 struct bnx2x_vf_queue_construct_params *qctor) 499 { 500 int rc; 501 502 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 503 504 rc = bnx2x_vf_queue_create(bp, vf, qid, qctor); 505 if (rc) 506 goto op_err; 507 508 /* Configure vlan0 for leading queue */ 509 if (!qid) { 510 struct bnx2x_vf_mac_vlan_filter filter; 511 512 memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter)); 513 filter.type = BNX2X_VF_FILTER_VLAN; 514 filter.add = true; 515 filter.vid = 0; 516 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false); 517 if (rc) 518 goto op_err; 519 } 520 521 /* Schedule the configuration of any pending vlan filters */ 522 vf->cfg_flags |= VF_CFG_VLAN; 523 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, 524 BNX2X_MSG_IOV); 525 return 0; 526 op_err: 527 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); 528 return rc; 529 } 530 531 static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, 532 int qid) 533 { 534 int rc; 535 536 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 537 538 /* If needed, clean the filtering data base */ 539 if ((qid == LEADING_IDX) && 540 bnx2x_validate_vf_sp_objs(bp, vf, false)) { 541 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false); 542 if (rc) 543 goto op_err; 544 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true); 545 if (rc) 546 goto op_err; 547 } 548 549 /* Terminate queue */ 550 if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) { 551 struct bnx2x_queue_state_params qstate; 552 553 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); 554 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 555 qstate.q_obj->state = BNX2X_Q_STATE_STOPPED; 556 qstate.cmd = BNX2X_Q_CMD_TERMINATE; 557 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); 558 rc = bnx2x_queue_state_change(bp, &qstate); 559 if (rc) 560 goto op_err; 561 } 562 563 return 0; 564 op_err: 565 BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); 566 return rc; 567 } 568 569 int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, 570 bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only) 571 { 572 struct bnx2x_mcast_list_elem *mc = NULL; 573 struct bnx2x_mcast_ramrod_params mcast; 574 int rc, i; 575 576 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 577 578 /* Prepare Multicast command */ 579 memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params)); 580 mcast.mcast_obj = &vf->mcast_obj; 581 if (drv_only) 582 set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags); 583 else 584 set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags); 585 if (mc_num) { 586 mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem), 587 GFP_KERNEL); 588 if (!mc) { 589 BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n"); 590 return -ENOMEM; 591 } 592 } 593 594 /* clear existing mcasts */ 595 mcast.mcast_list_len = vf->mcast_list_len; 596 vf->mcast_list_len = mc_num; 597 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); 598 if (rc) { 599 BNX2X_ERR("Failed to remove multicasts\n"); 600 kfree(mc); 601 return rc; 602 } 603 604 /* update mcast list on the ramrod params */ 605 if (mc_num) { 606 INIT_LIST_HEAD(&mcast.mcast_list); 607 for (i = 0; i < mc_num; i++) { 608 mc[i].mac = mcasts[i]; 609 list_add_tail(&mc[i].link, 610 &mcast.mcast_list); 611 } 612 613 /* add new mcasts */ 614 mcast.mcast_list_len = mc_num; 615 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); 616 if (rc) 617 BNX2X_ERR("Faled to add multicasts\n"); 618 kfree(mc); 619 } 620 621 return rc; 622 } 623 624 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, 625 struct bnx2x_rx_mode_ramrod_params *ramrod, 626 struct bnx2x_virtf *vf, 627 unsigned long accept_flags) 628 { 629 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); 630 631 memset(ramrod, 0, sizeof(*ramrod)); 632 ramrod->cid = vfq->cid; 633 ramrod->cl_id = vfq_cl_id(vf, vfq); 634 ramrod->rx_mode_obj = &bp->rx_mode_obj; 635 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); 636 ramrod->rx_accept_flags = accept_flags; 637 ramrod->tx_accept_flags = accept_flags; 638 ramrod->pstate = &vf->filter_state; 639 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; 640 641 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 642 set_bit(RAMROD_RX, &ramrod->ramrod_flags); 643 set_bit(RAMROD_TX, &ramrod->ramrod_flags); 644 645 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); 646 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); 647 } 648 649 int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, 650 int qid, unsigned long accept_flags) 651 { 652 struct bnx2x_rx_mode_ramrod_params ramrod; 653 654 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 655 656 bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags); 657 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); 658 vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags; 659 return bnx2x_config_rx_mode(bp, &ramrod); 660 } 661 662 int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) 663 { 664 int rc; 665 666 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 667 668 /* Remove all classification configuration for leading queue */ 669 if (qid == LEADING_IDX) { 670 rc = bnx2x_vf_rxmode(bp, vf, qid, 0); 671 if (rc) 672 goto op_err; 673 674 /* Remove filtering if feasible */ 675 if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { 676 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, 677 false, false); 678 if (rc) 679 goto op_err; 680 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, 681 false, true); 682 if (rc) 683 goto op_err; 684 rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); 685 if (rc) 686 goto op_err; 687 } 688 } 689 690 /* Destroy queue */ 691 rc = bnx2x_vf_queue_destroy(bp, vf, qid); 692 if (rc) 693 goto op_err; 694 return rc; 695 op_err: 696 BNX2X_ERR("vf[%d:%d] error: rc %d\n", 697 vf->abs_vfid, qid, rc); 698 return rc; 699 } 700 701 /* VF enable primitives 702 * when pretend is required the caller is responsible 703 * for calling pretend prior to calling these routines 704 */ 705 706 /* internal vf enable - until vf is enabled internally all transactions 707 * are blocked. This routine should always be called last with pretend. 708 */ 709 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) 710 { 711 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); 712 } 713 714 /* clears vf error in all semi blocks */ 715 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) 716 { 717 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); 718 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); 719 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); 720 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); 721 } 722 723 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) 724 { 725 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; 726 u32 was_err_reg = 0; 727 728 switch (was_err_group) { 729 case 0: 730 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; 731 break; 732 case 1: 733 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; 734 break; 735 case 2: 736 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; 737 break; 738 case 3: 739 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; 740 break; 741 } 742 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); 743 } 744 745 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) 746 { 747 int i; 748 u32 val; 749 750 /* Set VF masks and configuration - pretend */ 751 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 752 753 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 754 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 755 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 756 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 757 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 758 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 759 760 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 761 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); 762 if (vf->cfg_flags & VF_CFG_INT_SIMD) 763 val |= IGU_VF_CONF_SINGLE_ISR_EN; 764 val &= ~IGU_VF_CONF_PARENT_MASK; 765 val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT; 766 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 767 768 DP(BNX2X_MSG_IOV, 769 "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n", 770 vf->abs_vfid, val); 771 772 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 773 774 /* iterate over all queues, clear sb consumer */ 775 for (i = 0; i < vf_sb_count(vf); i++) { 776 u8 igu_sb_id = vf_igu_sb(vf, i); 777 778 /* zero prod memory */ 779 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); 780 781 /* clear sb state machine */ 782 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, 783 false /* VF */); 784 785 /* disable + update */ 786 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, 787 IGU_INT_DISABLE, 1); 788 } 789 } 790 791 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) 792 { 793 /* set the VF-PF association in the FW */ 794 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); 795 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); 796 797 /* clear vf errors*/ 798 bnx2x_vf_semi_clear_err(bp, abs_vfid); 799 bnx2x_vf_pglue_clear_err(bp, abs_vfid); 800 801 /* internal vf-enable - pretend */ 802 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); 803 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); 804 bnx2x_vf_enable_internal(bp, true); 805 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 806 } 807 808 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) 809 { 810 /* Reset vf in IGU interrupts are still disabled */ 811 bnx2x_vf_igu_reset(bp, vf); 812 813 /* pretend to enable the vf with the PBF */ 814 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 815 REG_WR(bp, PBF_REG_DISABLE_VF, 0); 816 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 817 } 818 819 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) 820 { 821 struct pci_dev *dev; 822 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 823 824 if (!vf) 825 return false; 826 827 dev = pci_get_bus_and_slot(vf->bus, vf->devfn); 828 if (dev) 829 return bnx2x_is_pcie_pending(dev); 830 return false; 831 } 832 833 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) 834 { 835 /* Verify no pending pci transactions */ 836 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) 837 BNX2X_ERR("PCIE Transactions still pending\n"); 838 839 return 0; 840 } 841 842 static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp, 843 struct bnx2x_virtf *vf, 844 int new) 845 { 846 int num = vf_vlan_rules_cnt(vf); 847 int diff = new - num; 848 bool rc = true; 849 850 DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n", 851 vf->abs_vfid, new, num); 852 853 if (diff > 0) 854 rc = bp->vlans_pool.get(&bp->vlans_pool, diff); 855 else if (diff < 0) 856 rc = bp->vlans_pool.put(&bp->vlans_pool, -diff); 857 858 if (rc) 859 vf_vlan_rules_cnt(vf) = new; 860 else 861 DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n", 862 vf->abs_vfid); 863 } 864 865 /* must be called after the number of PF queues and the number of VFs are 866 * both known 867 */ 868 static void 869 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 870 { 871 struct vf_pf_resc_request *resc = &vf->alloc_resc; 872 u16 vlan_count = 0; 873 874 /* will be set only during VF-ACQUIRE */ 875 resc->num_rxqs = 0; 876 resc->num_txqs = 0; 877 878 /* no credit calculations for macs (just yet) */ 879 resc->num_mac_filters = 1; 880 881 /* divvy up vlan rules */ 882 bnx2x_iov_re_set_vlan_filters(bp, vf, 0); 883 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 884 vlan_count = 1 << ilog2(vlan_count); 885 bnx2x_iov_re_set_vlan_filters(bp, vf, 886 vlan_count / BNX2X_NR_VIRTFN(bp)); 887 888 /* no real limitation */ 889 resc->num_mc_filters = 0; 890 891 /* num_sbs already set */ 892 resc->num_sbs = vf->sb_count; 893 } 894 895 /* FLR routines: */ 896 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 897 { 898 /* reset the state variables */ 899 bnx2x_iov_static_resc(bp, vf); 900 vf->state = VF_FREE; 901 } 902 903 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) 904 { 905 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 906 907 /* DQ usage counter */ 908 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 909 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, 910 "DQ VF usage counter timed out", 911 poll_cnt); 912 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 913 914 /* FW cleanup command - poll for the results */ 915 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), 916 poll_cnt)) 917 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid); 918 919 /* verify TX hw is flushed */ 920 bnx2x_tx_hw_flushed(bp, poll_cnt); 921 } 922 923 static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) 924 { 925 int rc, i; 926 927 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 928 929 /* the cleanup operations are valid if and only if the VF 930 * was first acquired. 931 */ 932 for (i = 0; i < vf_rxq_count(vf); i++) { 933 rc = bnx2x_vf_queue_flr(bp, vf, i); 934 if (rc) 935 goto out; 936 } 937 938 /* remove multicasts */ 939 bnx2x_vf_mcast(bp, vf, NULL, 0, true); 940 941 /* dispatch final cleanup and wait for HW queues to flush */ 942 bnx2x_vf_flr_clnup_hw(bp, vf); 943 944 /* release VF resources */ 945 bnx2x_vf_free_resc(bp, vf); 946 947 /* re-open the mailbox */ 948 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 949 return; 950 out: 951 BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n", 952 vf->abs_vfid, i, rc); 953 } 954 955 static void bnx2x_vf_flr_clnup(struct bnx2x *bp) 956 { 957 struct bnx2x_virtf *vf; 958 int i; 959 960 for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) { 961 /* VF should be RESET & in FLR cleanup states */ 962 if (bnx2x_vf(bp, i, state) != VF_RESET || 963 !bnx2x_vf(bp, i, flr_clnup_stage)) 964 continue; 965 966 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", 967 i, BNX2X_NR_VIRTFN(bp)); 968 969 vf = BP_VF(bp, i); 970 971 /* lock the vf pf channel */ 972 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 973 974 /* invoke the VF FLR SM */ 975 bnx2x_vf_flr(bp, vf); 976 977 /* mark the VF to be ACKED and continue */ 978 vf->flr_clnup_stage = false; 979 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 980 } 981 982 /* Acknowledge the handled VFs. 983 * we are acknowledge all the vfs which an flr was requested for, even 984 * if amongst them there are such that we never opened, since the mcp 985 * will interrupt us immediately again if we only ack some of the bits, 986 * resulting in an endless loop. This can happen for example in KVM 987 * where an 'all ones' flr request is sometimes given by hyper visor 988 */ 989 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n", 990 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 991 for (i = 0; i < FLRD_VFS_DWORDS; i++) 992 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 993 bp->vfdb->flrd_vfs[i]); 994 995 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); 996 997 /* clear the acked bits - better yet if the MCP implemented 998 * write to clear semantics 999 */ 1000 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1001 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); 1002 } 1003 1004 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) 1005 { 1006 int i; 1007 1008 /* Read FLR'd VFs */ 1009 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1010 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); 1011 1012 DP(BNX2X_MSG_MCP, 1013 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n", 1014 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1015 1016 for_each_vf(bp, i) { 1017 struct bnx2x_virtf *vf = BP_VF(bp, i); 1018 u32 reset = 0; 1019 1020 if (vf->abs_vfid < 32) 1021 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); 1022 else 1023 reset = bp->vfdb->flrd_vfs[1] & 1024 (1 << (vf->abs_vfid - 32)); 1025 1026 if (reset) { 1027 /* set as reset and ready for cleanup */ 1028 vf->state = VF_RESET; 1029 vf->flr_clnup_stage = true; 1030 1031 DP(BNX2X_MSG_IOV, 1032 "Initiating Final cleanup for VF %d\n", 1033 vf->abs_vfid); 1034 } 1035 } 1036 1037 /* do the FLR cleanup for all marked VFs*/ 1038 bnx2x_vf_flr_clnup(bp); 1039 } 1040 1041 /* IOV global initialization routines */ 1042 void bnx2x_iov_init_dq(struct bnx2x *bp) 1043 { 1044 if (!IS_SRIOV(bp)) 1045 return; 1046 1047 /* Set the DQ such that the CID reflect the abs_vfid */ 1048 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); 1049 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 1050 1051 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to 1052 * the PF L2 queues 1053 */ 1054 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 1055 1056 /* The VF window size is the log2 of the max number of CIDs per VF */ 1057 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 1058 1059 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 1060 * the Pf doorbell size although the 2 are independent. 1061 */ 1062 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); 1063 1064 /* No security checks for now - 1065 * configure single rule (out of 16) mask = 0x1, value = 0x0, 1066 * CID range 0 - 0x1ffff 1067 */ 1068 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); 1069 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); 1070 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1071 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1072 1073 /* set the VF doorbell threshold. This threshold represents the amount 1074 * of doorbells allowed in the main DORQ fifo for a specific VF. 1075 */ 1076 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64); 1077 } 1078 1079 void bnx2x_iov_init_dmae(struct bnx2x *bp) 1080 { 1081 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) 1082 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 1083 } 1084 1085 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1086 { 1087 struct pci_dev *dev = bp->pdev; 1088 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1089 1090 return dev->bus->number + ((dev->devfn + iov->offset + 1091 iov->stride * vfid) >> 8); 1092 } 1093 1094 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) 1095 { 1096 struct pci_dev *dev = bp->pdev; 1097 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1098 1099 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; 1100 } 1101 1102 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) 1103 { 1104 int i, n; 1105 struct pci_dev *dev = bp->pdev; 1106 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1107 1108 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { 1109 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); 1110 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); 1111 1112 size /= iov->total; 1113 vf->bars[n].bar = start + size * vf->abs_vfid; 1114 vf->bars[n].size = size; 1115 } 1116 } 1117 1118 static int bnx2x_ari_enabled(struct pci_dev *dev) 1119 { 1120 return dev->bus->self && dev->bus->self->ari_enabled; 1121 } 1122 1123 static void 1124 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) 1125 { 1126 int sb_id; 1127 u32 val; 1128 u8 fid, current_pf = 0; 1129 1130 /* IGU in normal mode - read CAM */ 1131 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 1132 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); 1133 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 1134 continue; 1135 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1136 if (fid & IGU_FID_ENCODE_IS_PF) 1137 current_pf = fid & IGU_FID_PF_NUM_MASK; 1138 else if (current_pf == BP_FUNC(bp)) 1139 bnx2x_vf_set_igu_info(bp, sb_id, 1140 (fid & IGU_FID_VF_NUM_MASK)); 1141 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1142 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 1143 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 1144 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 1145 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1146 } 1147 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); 1148 } 1149 1150 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1151 { 1152 if (bp->vfdb) { 1153 kfree(bp->vfdb->vfqs); 1154 kfree(bp->vfdb->vfs); 1155 kfree(bp->vfdb); 1156 } 1157 bp->vfdb = NULL; 1158 } 1159 1160 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1161 { 1162 int pos; 1163 struct pci_dev *dev = bp->pdev; 1164 1165 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 1166 if (!pos) { 1167 BNX2X_ERR("failed to find SRIOV capability in device\n"); 1168 return -ENODEV; 1169 } 1170 1171 iov->pos = pos; 1172 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); 1173 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 1174 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); 1175 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); 1176 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 1177 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 1178 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 1179 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 1180 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 1181 1182 return 0; 1183 } 1184 1185 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1186 { 1187 u32 val; 1188 1189 /* read the SRIOV capability structure 1190 * The fields can be read via configuration read or 1191 * directly from the device (starting at offset PCICFG_OFFSET) 1192 */ 1193 if (bnx2x_sriov_pci_cfg_info(bp, iov)) 1194 return -ENODEV; 1195 1196 /* get the number of SRIOV bars */ 1197 iov->nres = 0; 1198 1199 /* read the first_vfid */ 1200 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); 1201 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) 1202 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); 1203 1204 DP(BNX2X_MSG_IOV, 1205 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 1206 BP_FUNC(bp), 1207 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, 1208 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 1209 1210 return 0; 1211 } 1212 1213 /* must be called after PF bars are mapped */ 1214 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1215 int num_vfs_param) 1216 { 1217 int err, i; 1218 struct bnx2x_sriov *iov; 1219 struct pci_dev *dev = bp->pdev; 1220 1221 bp->vfdb = NULL; 1222 1223 /* verify is pf */ 1224 if (IS_VF(bp)) 1225 return 0; 1226 1227 /* verify sriov capability is present in configuration space */ 1228 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) 1229 return 0; 1230 1231 /* verify chip revision */ 1232 if (CHIP_IS_E1x(bp)) 1233 return 0; 1234 1235 /* check if SRIOV support is turned off */ 1236 if (!num_vfs_param) 1237 return 0; 1238 1239 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ 1240 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { 1241 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", 1242 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); 1243 return 0; 1244 } 1245 1246 /* SRIOV can be enabled only with MSIX */ 1247 if (int_mode_param == BNX2X_INT_MODE_MSI || 1248 int_mode_param == BNX2X_INT_MODE_INTX) { 1249 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); 1250 return 0; 1251 } 1252 1253 err = -EIO; 1254 /* verify ari is enabled */ 1255 if (!bnx2x_ari_enabled(bp->pdev)) { 1256 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n"); 1257 return 0; 1258 } 1259 1260 /* verify igu is in normal mode */ 1261 if (CHIP_INT_MODE_IS_BC(bp)) { 1262 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 1263 return 0; 1264 } 1265 1266 /* allocate the vfs database */ 1267 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); 1268 if (!bp->vfdb) { 1269 BNX2X_ERR("failed to allocate vf database\n"); 1270 err = -ENOMEM; 1271 goto failed; 1272 } 1273 1274 /* get the sriov info - Linux already collected all the pertinent 1275 * information, however the sriov structure is for the private use 1276 * of the pci module. Also we want this information regardless 1277 * of the hyper-visor. 1278 */ 1279 iov = &(bp->vfdb->sriov); 1280 err = bnx2x_sriov_info(bp, iov); 1281 if (err) 1282 goto failed; 1283 1284 /* SR-IOV capability was enabled but there are no VFs*/ 1285 if (iov->total == 0) 1286 goto failed; 1287 1288 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); 1289 1290 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n", 1291 num_vfs_param, iov->nr_virtfn); 1292 1293 /* allocate the vf array */ 1294 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 1295 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); 1296 if (!bp->vfdb->vfs) { 1297 BNX2X_ERR("failed to allocate vf array\n"); 1298 err = -ENOMEM; 1299 goto failed; 1300 } 1301 1302 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ 1303 for_each_vf(bp, i) { 1304 bnx2x_vf(bp, i, index) = i; 1305 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; 1306 bnx2x_vf(bp, i, state) = VF_FREE; 1307 mutex_init(&bnx2x_vf(bp, i, op_mutex)); 1308 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; 1309 } 1310 1311 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 1312 bnx2x_get_vf_igu_cam_info(bp); 1313 1314 /* allocate the queue arrays for all VFs */ 1315 bp->vfdb->vfqs = kzalloc( 1316 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), 1317 GFP_KERNEL); 1318 1319 DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs); 1320 1321 if (!bp->vfdb->vfqs) { 1322 BNX2X_ERR("failed to allocate vf queue array\n"); 1323 err = -ENOMEM; 1324 goto failed; 1325 } 1326 1327 /* Prepare the VFs event synchronization mechanism */ 1328 mutex_init(&bp->vfdb->event_mutex); 1329 1330 return 0; 1331 failed: 1332 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 1333 __bnx2x_iov_free_vfdb(bp); 1334 return err; 1335 } 1336 1337 void bnx2x_iov_remove_one(struct bnx2x *bp) 1338 { 1339 int vf_idx; 1340 1341 /* if SRIOV is not enabled there's nothing to do */ 1342 if (!IS_SRIOV(bp)) 1343 return; 1344 1345 DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); 1346 pci_disable_sriov(bp->pdev); 1347 DP(BNX2X_MSG_IOV, "sriov disabled\n"); 1348 1349 /* disable access to all VFs */ 1350 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { 1351 bnx2x_pretend_func(bp, 1352 HW_VF_HANDLE(bp, 1353 bp->vfdb->sriov.first_vf_in_pf + 1354 vf_idx)); 1355 DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n", 1356 bp->vfdb->sriov.first_vf_in_pf + vf_idx); 1357 bnx2x_vf_enable_internal(bp, 0); 1358 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1359 } 1360 1361 /* free vf database */ 1362 __bnx2x_iov_free_vfdb(bp); 1363 } 1364 1365 void bnx2x_iov_free_mem(struct bnx2x *bp) 1366 { 1367 int i; 1368 1369 if (!IS_SRIOV(bp)) 1370 return; 1371 1372 /* free vfs hw contexts */ 1373 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1374 struct hw_dma *cxt = &bp->vfdb->context[i]; 1375 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); 1376 } 1377 1378 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, 1379 BP_VFDB(bp)->sp_dma.mapping, 1380 BP_VFDB(bp)->sp_dma.size); 1381 1382 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, 1383 BP_VF_MBX_DMA(bp)->mapping, 1384 BP_VF_MBX_DMA(bp)->size); 1385 1386 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, 1387 BP_VF_BULLETIN_DMA(bp)->mapping, 1388 BP_VF_BULLETIN_DMA(bp)->size); 1389 } 1390 1391 int bnx2x_iov_alloc_mem(struct bnx2x *bp) 1392 { 1393 size_t tot_size; 1394 int i, rc = 0; 1395 1396 if (!IS_SRIOV(bp)) 1397 return rc; 1398 1399 /* allocate vfs hw contexts */ 1400 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * 1401 BNX2X_CIDS_PER_VF * sizeof(union cdu_context); 1402 1403 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1404 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); 1405 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); 1406 1407 if (cxt->size) { 1408 cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size); 1409 if (!cxt->addr) 1410 goto alloc_mem_err; 1411 } else { 1412 cxt->addr = NULL; 1413 cxt->mapping = 0; 1414 } 1415 tot_size -= cxt->size; 1416 } 1417 1418 /* allocate vfs ramrods dma memory - client_init and set_mac */ 1419 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); 1420 BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping, 1421 tot_size); 1422 if (!BP_VFDB(bp)->sp_dma.addr) 1423 goto alloc_mem_err; 1424 BP_VFDB(bp)->sp_dma.size = tot_size; 1425 1426 /* allocate mailboxes */ 1427 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; 1428 BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping, 1429 tot_size); 1430 if (!BP_VF_MBX_DMA(bp)->addr) 1431 goto alloc_mem_err; 1432 1433 BP_VF_MBX_DMA(bp)->size = tot_size; 1434 1435 /* allocate local bulletin boards */ 1436 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; 1437 BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping, 1438 tot_size); 1439 if (!BP_VF_BULLETIN_DMA(bp)->addr) 1440 goto alloc_mem_err; 1441 1442 BP_VF_BULLETIN_DMA(bp)->size = tot_size; 1443 1444 return 0; 1445 1446 alloc_mem_err: 1447 return -ENOMEM; 1448 } 1449 1450 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 1451 struct bnx2x_vf_queue *q) 1452 { 1453 u8 cl_id = vfq_cl_id(vf, q); 1454 u8 func_id = FW_VF_HANDLE(vf->abs_vfid); 1455 unsigned long q_type = 0; 1456 1457 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 1458 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 1459 1460 /* Queue State object */ 1461 bnx2x_init_queue_obj(bp, &q->sp_obj, 1462 cl_id, &q->cid, 1, func_id, 1463 bnx2x_vf_sp(bp, vf, q_data), 1464 bnx2x_vf_sp_map(bp, vf, q_data), 1465 q_type); 1466 1467 /* sp indication is set only when vlan/mac/etc. are initialized */ 1468 q->sp_initialized = false; 1469 1470 DP(BNX2X_MSG_IOV, 1471 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", 1472 vf->abs_vfid, q->sp_obj.func_id, q->cid); 1473 } 1474 1475 /* called by bnx2x_nic_load */ 1476 int bnx2x_iov_nic_init(struct bnx2x *bp) 1477 { 1478 int vfid; 1479 1480 if (!IS_SRIOV(bp)) { 1481 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 1482 return 0; 1483 } 1484 1485 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); 1486 1487 /* let FLR complete ... */ 1488 msleep(100); 1489 1490 /* initialize vf database */ 1491 for_each_vf(bp, vfid) { 1492 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 1493 1494 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * 1495 BNX2X_CIDS_PER_VF; 1496 1497 union cdu_context *base_cxt = (union cdu_context *) 1498 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 1499 (base_vf_cid & (ILT_PAGE_CIDS-1)); 1500 1501 DP(BNX2X_MSG_IOV, 1502 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", 1503 vf->abs_vfid, vf_sb_count(vf), base_vf_cid, 1504 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 1505 1506 /* init statically provisioned resources */ 1507 bnx2x_iov_static_resc(bp, vf); 1508 1509 /* queues are initialized during VF-ACQUIRE */ 1510 vf->filter_state = 0; 1511 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 1512 1513 /* init mcast object - This object will be re-initialized 1514 * during VF-ACQUIRE with the proper cl_id and cid. 1515 * It needs to be initialized here so that it can be safely 1516 * handled by a subsequent FLR flow. 1517 */ 1518 vf->mcast_list_len = 0; 1519 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, 1520 0xFF, 0xFF, 0xFF, 1521 bnx2x_vf_sp(bp, vf, mcast_rdata), 1522 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 1523 BNX2X_FILTER_MCAST_PENDING, 1524 &vf->filter_state, 1525 BNX2X_OBJ_TYPE_RX_TX); 1526 1527 /* set the mailbox message addresses */ 1528 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) 1529 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * 1530 MBX_MSG_ALIGNED_SIZE); 1531 1532 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + 1533 vfid * MBX_MSG_ALIGNED_SIZE; 1534 1535 /* Enable vf mailbox */ 1536 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 1537 } 1538 1539 /* Final VF init */ 1540 for_each_vf(bp, vfid) { 1541 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 1542 1543 /* fill in the BDF and bars */ 1544 vf->bus = bnx2x_vf_bus(bp, vfid); 1545 vf->devfn = bnx2x_vf_devfn(bp, vfid); 1546 bnx2x_vf_set_bars(bp, vf); 1547 1548 DP(BNX2X_MSG_IOV, 1549 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", 1550 vf->abs_vfid, vf->bus, vf->devfn, 1551 (unsigned)vf->bars[0].bar, vf->bars[0].size, 1552 (unsigned)vf->bars[1].bar, vf->bars[1].size, 1553 (unsigned)vf->bars[2].bar, vf->bars[2].size); 1554 } 1555 1556 return 0; 1557 } 1558 1559 /* called by bnx2x_chip_cleanup */ 1560 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) 1561 { 1562 int i; 1563 1564 if (!IS_SRIOV(bp)) 1565 return 0; 1566 1567 /* release all the VFs */ 1568 for_each_vf(bp, i) 1569 bnx2x_vf_release(bp, BP_VF(bp, i)); 1570 1571 return 0; 1572 } 1573 1574 /* called by bnx2x_init_hw_func, returns the next ilt line */ 1575 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) 1576 { 1577 int i; 1578 struct bnx2x_ilt *ilt = BP_ILT(bp); 1579 1580 if (!IS_SRIOV(bp)) 1581 return line; 1582 1583 /* set vfs ilt lines */ 1584 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1585 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); 1586 1587 ilt->lines[line+i].page = hw_cxt->addr; 1588 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 1589 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 1590 } 1591 return line + i; 1592 } 1593 1594 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) 1595 { 1596 return ((cid >= BNX2X_FIRST_VF_CID) && 1597 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); 1598 } 1599 1600 static 1601 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, 1602 struct bnx2x_vf_queue *vfq, 1603 union event_ring_elem *elem) 1604 { 1605 unsigned long ramrod_flags = 0; 1606 int rc = 0; 1607 1608 /* Always push next commands out, don't wait here */ 1609 set_bit(RAMROD_CONT, &ramrod_flags); 1610 1611 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 1612 case BNX2X_FILTER_MAC_PENDING: 1613 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, 1614 &ramrod_flags); 1615 break; 1616 case BNX2X_FILTER_VLAN_PENDING: 1617 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, 1618 &ramrod_flags); 1619 break; 1620 default: 1621 BNX2X_ERR("Unsupported classification command: %d\n", 1622 elem->message.data.eth_event.echo); 1623 return; 1624 } 1625 if (rc < 0) 1626 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 1627 else if (rc > 0) 1628 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); 1629 } 1630 1631 static 1632 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, 1633 struct bnx2x_virtf *vf) 1634 { 1635 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 1636 int rc; 1637 1638 rparam.mcast_obj = &vf->mcast_obj; 1639 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); 1640 1641 /* If there are pending mcast commands - send them */ 1642 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { 1643 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 1644 if (rc < 0) 1645 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 1646 rc); 1647 } 1648 } 1649 1650 static 1651 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, 1652 struct bnx2x_virtf *vf) 1653 { 1654 smp_mb__before_atomic(); 1655 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 1656 smp_mb__after_atomic(); 1657 } 1658 1659 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, 1660 struct bnx2x_virtf *vf) 1661 { 1662 vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw); 1663 } 1664 1665 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) 1666 { 1667 struct bnx2x_virtf *vf; 1668 int qidx = 0, abs_vfid; 1669 u8 opcode; 1670 u16 cid = 0xffff; 1671 1672 if (!IS_SRIOV(bp)) 1673 return 1; 1674 1675 /* first get the cid - the only events we handle here are cfc-delete 1676 * and set-mac completion 1677 */ 1678 opcode = elem->message.opcode; 1679 1680 switch (opcode) { 1681 case EVENT_RING_OPCODE_CFC_DEL: 1682 cid = SW_CID((__force __le32) 1683 elem->message.data.cfc_del_event.cid); 1684 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); 1685 break; 1686 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 1687 case EVENT_RING_OPCODE_MULTICAST_RULES: 1688 case EVENT_RING_OPCODE_FILTERS_RULES: 1689 case EVENT_RING_OPCODE_RSS_UPDATE_RULES: 1690 cid = (elem->message.data.eth_event.echo & 1691 BNX2X_SWCID_MASK); 1692 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 1693 break; 1694 case EVENT_RING_OPCODE_VF_FLR: 1695 abs_vfid = elem->message.data.vf_flr_event.vf_id; 1696 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", 1697 abs_vfid); 1698 goto get_vf; 1699 case EVENT_RING_OPCODE_MALICIOUS_VF: 1700 abs_vfid = elem->message.data.malicious_vf_event.vf_id; 1701 BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n", 1702 abs_vfid, 1703 elem->message.data.malicious_vf_event.err_id); 1704 goto get_vf; 1705 default: 1706 return 1; 1707 } 1708 1709 /* check if the cid is the VF range */ 1710 if (!bnx2x_iov_is_vf_cid(bp, cid)) { 1711 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); 1712 return 1; 1713 } 1714 1715 /* extract vf and rxq index from vf_cid - relies on the following: 1716 * 1. vfid on cid reflects the true abs_vfid 1717 * 2. The max number of VFs (per path) is 64 1718 */ 1719 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); 1720 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 1721 get_vf: 1722 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 1723 1724 if (!vf) { 1725 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", 1726 cid, abs_vfid); 1727 return 0; 1728 } 1729 1730 switch (opcode) { 1731 case EVENT_RING_OPCODE_CFC_DEL: 1732 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", 1733 vf->abs_vfid, qidx); 1734 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, 1735 &vfq_get(vf, 1736 qidx)->sp_obj, 1737 BNX2X_Q_CMD_CFC_DEL); 1738 break; 1739 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 1740 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", 1741 vf->abs_vfid, qidx); 1742 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); 1743 break; 1744 case EVENT_RING_OPCODE_MULTICAST_RULES: 1745 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", 1746 vf->abs_vfid, qidx); 1747 bnx2x_vf_handle_mcast_eqe(bp, vf); 1748 break; 1749 case EVENT_RING_OPCODE_FILTERS_RULES: 1750 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", 1751 vf->abs_vfid, qidx); 1752 bnx2x_vf_handle_filters_eqe(bp, vf); 1753 break; 1754 case EVENT_RING_OPCODE_RSS_UPDATE_RULES: 1755 DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n", 1756 vf->abs_vfid, qidx); 1757 bnx2x_vf_handle_rss_update_eqe(bp, vf); 1758 case EVENT_RING_OPCODE_VF_FLR: 1759 case EVENT_RING_OPCODE_MALICIOUS_VF: 1760 /* Do nothing for now */ 1761 return 0; 1762 } 1763 1764 return 0; 1765 } 1766 1767 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) 1768 { 1769 /* extract the vf from vf_cid - relies on the following: 1770 * 1. vfid on cid reflects the true abs_vfid 1771 * 2. The max number of VFs (per path) is 64 1772 */ 1773 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 1774 return bnx2x_vf_by_abs_fid(bp, abs_vfid); 1775 } 1776 1777 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 1778 struct bnx2x_queue_sp_obj **q_obj) 1779 { 1780 struct bnx2x_virtf *vf; 1781 1782 if (!IS_SRIOV(bp)) 1783 return; 1784 1785 vf = bnx2x_vf_by_cid(bp, vf_cid); 1786 1787 if (vf) { 1788 /* extract queue index from vf_cid - relies on the following: 1789 * 1. vfid on cid reflects the true abs_vfid 1790 * 2. The max number of VFs (per path) is 64 1791 */ 1792 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); 1793 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); 1794 } else { 1795 BNX2X_ERR("No vf matching cid %d\n", vf_cid); 1796 } 1797 } 1798 1799 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) 1800 { 1801 int i; 1802 int first_queue_query_index, num_queues_req; 1803 dma_addr_t cur_data_offset; 1804 struct stats_query_entry *cur_query_entry; 1805 u8 stats_count = 0; 1806 bool is_fcoe = false; 1807 1808 if (!IS_SRIOV(bp)) 1809 return; 1810 1811 if (!NO_FCOE(bp)) 1812 is_fcoe = true; 1813 1814 /* fcoe adds one global request and one queue request */ 1815 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; 1816 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1817 (is_fcoe ? 0 : 1); 1818 1819 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), 1820 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", 1821 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, 1822 first_queue_query_index + num_queues_req); 1823 1824 cur_data_offset = bp->fw_stats_data_mapping + 1825 offsetof(struct bnx2x_fw_stats_data, queue_stats) + 1826 num_queues_req * sizeof(struct per_queue_stats); 1827 1828 cur_query_entry = &bp->fw_stats_req-> 1829 query[first_queue_query_index + num_queues_req]; 1830 1831 for_each_vf(bp, i) { 1832 int j; 1833 struct bnx2x_virtf *vf = BP_VF(bp, i); 1834 1835 if (vf->state != VF_ENABLED) { 1836 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), 1837 "vf %d not enabled so no stats for it\n", 1838 vf->abs_vfid); 1839 continue; 1840 } 1841 1842 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); 1843 for_each_vfq(vf, j) { 1844 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 1845 1846 dma_addr_t q_stats_addr = 1847 vf->fw_stat_map + j * vf->stats_stride; 1848 1849 /* collect stats fro active queues only */ 1850 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 1851 BNX2X_Q_LOGICAL_STATE_STOPPED) 1852 continue; 1853 1854 /* create stats query entry for this queue */ 1855 cur_query_entry->kind = STATS_TYPE_QUEUE; 1856 cur_query_entry->index = vfq_stat_id(vf, rxq); 1857 cur_query_entry->funcID = 1858 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 1859 cur_query_entry->address.hi = 1860 cpu_to_le32(U64_HI(q_stats_addr)); 1861 cur_query_entry->address.lo = 1862 cpu_to_le32(U64_LO(q_stats_addr)); 1863 DP(BNX2X_MSG_IOV, 1864 "added address %x %x for vf %d queue %d client %d\n", 1865 cur_query_entry->address.hi, 1866 cur_query_entry->address.lo, cur_query_entry->funcID, 1867 j, cur_query_entry->index); 1868 cur_query_entry++; 1869 cur_data_offset += sizeof(struct per_queue_stats); 1870 stats_count++; 1871 1872 /* all stats are coalesced to the leading queue */ 1873 if (vf->cfg_flags & VF_CFG_STATS_COALESCE) 1874 break; 1875 } 1876 } 1877 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 1878 } 1879 1880 static inline 1881 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) 1882 { 1883 int i; 1884 struct bnx2x_virtf *vf = NULL; 1885 1886 for_each_vf(bp, i) { 1887 vf = BP_VF(bp, i); 1888 if (stat_id >= vf->igu_base_id && 1889 stat_id < vf->igu_base_id + vf_sb_count(vf)) 1890 break; 1891 } 1892 return vf; 1893 } 1894 1895 /* VF API helpers */ 1896 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, 1897 u8 enable) 1898 { 1899 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; 1900 u32 val = enable ? (abs_vfid | (1 << 6)) : 0; 1901 1902 REG_WR(bp, reg, val); 1903 } 1904 1905 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) 1906 { 1907 int i; 1908 1909 for_each_vfq(vf, i) 1910 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 1911 vfq_qzone_id(vf, vfq_get(vf, i)), false); 1912 } 1913 1914 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) 1915 { 1916 u32 val; 1917 1918 /* clear the VF configuration - pretend */ 1919 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1920 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 1921 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | 1922 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK); 1923 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 1924 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1925 } 1926 1927 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) 1928 { 1929 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), 1930 BNX2X_VF_MAX_QUEUES); 1931 } 1932 1933 static 1934 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, 1935 struct vf_pf_resc_request *req_resc) 1936 { 1937 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 1938 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 1939 1940 /* Save a vlan filter for the Hypervisor */ 1941 return ((req_resc->num_rxqs <= rxq_cnt) && 1942 (req_resc->num_txqs <= txq_cnt) && 1943 (req_resc->num_sbs <= vf_sb_count(vf)) && 1944 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 1945 (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf))); 1946 } 1947 1948 /* CORE VF API */ 1949 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 1950 struct vf_pf_resc_request *resc) 1951 { 1952 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * 1953 BNX2X_CIDS_PER_VF; 1954 1955 union cdu_context *base_cxt = (union cdu_context *) 1956 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 1957 (base_vf_cid & (ILT_PAGE_CIDS-1)); 1958 int i; 1959 1960 /* if state is 'acquired' the VF was not released or FLR'd, in 1961 * this case the returned resources match the acquired already 1962 * acquired resources. Verify that the requested numbers do 1963 * not exceed the already acquired numbers. 1964 */ 1965 if (vf->state == VF_ACQUIRED) { 1966 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", 1967 vf->abs_vfid); 1968 1969 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 1970 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", 1971 vf->abs_vfid); 1972 return -EINVAL; 1973 } 1974 return 0; 1975 } 1976 1977 /* Otherwise vf state must be 'free' or 'reset' */ 1978 if (vf->state != VF_FREE && vf->state != VF_RESET) { 1979 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", 1980 vf->abs_vfid, vf->state); 1981 return -EINVAL; 1982 } 1983 1984 /* static allocation: 1985 * the global maximum number are fixed per VF. Fail the request if 1986 * requested number exceed these globals 1987 */ 1988 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 1989 DP(BNX2X_MSG_IOV, 1990 "cannot fulfill vf resource request. Placing maximal available values in response\n"); 1991 /* set the max resource in the vf */ 1992 return -ENOMEM; 1993 } 1994 1995 /* Set resources counters - 0 request means max available */ 1996 vf_sb_count(vf) = resc->num_sbs; 1997 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 1998 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 1999 if (resc->num_mac_filters) 2000 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 2001 /* Add an additional vlan filter credit for the hypervisor */ 2002 bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1); 2003 2004 DP(BNX2X_MSG_IOV, 2005 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2006 vf_sb_count(vf), vf_rxq_count(vf), 2007 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2008 vf_vlan_rules_visible_cnt(vf)); 2009 2010 /* Initialize the queues */ 2011 if (!vf->vfqs) { 2012 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); 2013 return -EINVAL; 2014 } 2015 2016 for_each_vfq(vf, i) { 2017 struct bnx2x_vf_queue *q = vfq_get(vf, i); 2018 2019 if (!q) { 2020 BNX2X_ERR("q number %d was not allocated\n", i); 2021 return -EINVAL; 2022 } 2023 2024 q->index = i; 2025 q->cxt = &((base_cxt + i)->eth); 2026 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; 2027 2028 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", 2029 vf->abs_vfid, i, q->index, q->cid, q->cxt); 2030 2031 /* init SP objects */ 2032 bnx2x_vfq_init(bp, vf, q); 2033 } 2034 vf->state = VF_ACQUIRED; 2035 return 0; 2036 } 2037 2038 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) 2039 { 2040 struct bnx2x_func_init_params func_init = {0}; 2041 u16 flags = 0; 2042 int i; 2043 2044 /* the sb resources are initialized at this point, do the 2045 * FW/HW initializations 2046 */ 2047 for_each_vf_sb(vf, i) 2048 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, 2049 vf_igu_sb(vf, i), vf_igu_sb(vf, i)); 2050 2051 /* Sanity checks */ 2052 if (vf->state != VF_ACQUIRED) { 2053 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", 2054 vf->abs_vfid, vf->state); 2055 return -EINVAL; 2056 } 2057 2058 /* let FLR complete ... */ 2059 msleep(100); 2060 2061 /* FLR cleanup epilogue */ 2062 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) 2063 return -EBUSY; 2064 2065 /* reset IGU VF statistics: MSIX */ 2066 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); 2067 2068 /* vf init */ 2069 if (vf->cfg_flags & VF_CFG_STATS) 2070 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); 2071 2072 if (vf->cfg_flags & VF_CFG_TPA) 2073 flags |= FUNC_FLG_TPA; 2074 2075 if (is_vf_multi(vf)) 2076 flags |= FUNC_FLG_RSS; 2077 2078 /* function setup */ 2079 func_init.func_flgs = flags; 2080 func_init.pf_id = BP_FUNC(bp); 2081 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); 2082 func_init.fw_stat_map = vf->fw_stat_map; 2083 func_init.spq_map = vf->spq_map; 2084 func_init.spq_prod = 0; 2085 bnx2x_func_init(bp, &func_init); 2086 2087 /* Enable the vf */ 2088 bnx2x_vf_enable_access(bp, vf->abs_vfid); 2089 bnx2x_vf_enable_traffic(bp, vf); 2090 2091 /* queue protection table */ 2092 for_each_vfq(vf, i) 2093 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2094 vfq_qzone_id(vf, vfq_get(vf, i)), true); 2095 2096 vf->state = VF_ENABLED; 2097 2098 /* update vf bulletin board */ 2099 bnx2x_post_vf_bulletin(bp, vf->index); 2100 2101 return 0; 2102 } 2103 2104 struct set_vf_state_cookie { 2105 struct bnx2x_virtf *vf; 2106 u8 state; 2107 }; 2108 2109 static void bnx2x_set_vf_state(void *cookie) 2110 { 2111 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; 2112 2113 p->vf->state = p->state; 2114 } 2115 2116 int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) 2117 { 2118 int rc = 0, i; 2119 2120 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 2121 2122 /* Close all queues */ 2123 for (i = 0; i < vf_rxq_count(vf); i++) { 2124 rc = bnx2x_vf_queue_teardown(bp, vf, i); 2125 if (rc) 2126 goto op_err; 2127 } 2128 2129 /* disable the interrupts */ 2130 DP(BNX2X_MSG_IOV, "disabling igu\n"); 2131 bnx2x_vf_igu_disable(bp, vf); 2132 2133 /* disable the VF */ 2134 DP(BNX2X_MSG_IOV, "clearing qtbl\n"); 2135 bnx2x_vf_clr_qtbl(bp, vf); 2136 2137 /* need to make sure there are no outstanding stats ramrods which may 2138 * cause the device to access the VF's stats buffer which it will free 2139 * as soon as we return from the close flow. 2140 */ 2141 { 2142 struct set_vf_state_cookie cookie; 2143 2144 cookie.vf = vf; 2145 cookie.state = VF_ACQUIRED; 2146 bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); 2147 } 2148 2149 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2150 2151 return 0; 2152 op_err: 2153 BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc); 2154 return rc; 2155 } 2156 2157 /* VF release can be called either: 1. The VF was acquired but 2158 * not enabled 2. the vf was enabled or in the process of being 2159 * enabled 2160 */ 2161 int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf) 2162 { 2163 int rc; 2164 2165 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, 2166 vf->state == VF_FREE ? "Free" : 2167 vf->state == VF_ACQUIRED ? "Acquired" : 2168 vf->state == VF_ENABLED ? "Enabled" : 2169 vf->state == VF_RESET ? "Reset" : 2170 "Unknown"); 2171 2172 switch (vf->state) { 2173 case VF_ENABLED: 2174 rc = bnx2x_vf_close(bp, vf); 2175 if (rc) 2176 goto op_err; 2177 /* Fallthrough to release resources */ 2178 case VF_ACQUIRED: 2179 DP(BNX2X_MSG_IOV, "about to free resources\n"); 2180 bnx2x_vf_free_resc(bp, vf); 2181 break; 2182 2183 case VF_FREE: 2184 case VF_RESET: 2185 default: 2186 break; 2187 } 2188 return 0; 2189 op_err: 2190 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc); 2191 return rc; 2192 } 2193 2194 int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, 2195 struct bnx2x_config_rss_params *rss) 2196 { 2197 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 2198 set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags); 2199 return bnx2x_config_rss(bp, rss); 2200 } 2201 2202 int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, 2203 struct vfpf_tpa_tlv *tlv, 2204 struct bnx2x_queue_update_tpa_params *params) 2205 { 2206 aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr; 2207 struct bnx2x_queue_state_params qstate; 2208 int qid, rc = 0; 2209 2210 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 2211 2212 /* Set ramrod params */ 2213 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); 2214 memcpy(&qstate.params.update_tpa, params, 2215 sizeof(struct bnx2x_queue_update_tpa_params)); 2216 qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA; 2217 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); 2218 2219 for (qid = 0; qid < vf_rxq_count(vf); qid++) { 2220 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 2221 qstate.params.update_tpa.sge_map = sge_addr[qid]; 2222 DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n", 2223 vf->abs_vfid, qid, U64_HI(sge_addr[qid]), 2224 U64_LO(sge_addr[qid])); 2225 rc = bnx2x_queue_state_change(bp, &qstate); 2226 if (rc) { 2227 BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n", 2228 U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]), 2229 vf->abs_vfid, qid); 2230 return rc; 2231 } 2232 } 2233 2234 return rc; 2235 } 2236 2237 /* VF release ~ VF close + VF release-resources 2238 * Release is the ultimate SW shutdown and is called whenever an 2239 * irrecoverable error is encountered. 2240 */ 2241 int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) 2242 { 2243 int rc; 2244 2245 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); 2246 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 2247 2248 rc = bnx2x_vf_free(bp, vf); 2249 if (rc) 2250 WARN(rc, 2251 "VF[%d] Failed to allocate resources for release op- rc=%d\n", 2252 vf->abs_vfid, rc); 2253 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 2254 return rc; 2255 } 2256 2257 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, 2258 struct bnx2x_virtf *vf, u32 *sbdf) 2259 { 2260 *sbdf = vf->devfn | (vf->bus << 8); 2261 } 2262 2263 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 2264 enum channel_tlvs tlv) 2265 { 2266 /* we don't lock the channel for unsupported tlvs */ 2267 if (!bnx2x_tlv_supported(tlv)) { 2268 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n"); 2269 return; 2270 } 2271 2272 /* lock the channel */ 2273 mutex_lock(&vf->op_mutex); 2274 2275 /* record the locking op */ 2276 vf->op_current = tlv; 2277 2278 /* log the lock */ 2279 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", 2280 vf->abs_vfid, tlv); 2281 } 2282 2283 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 2284 enum channel_tlvs expected_tlv) 2285 { 2286 enum channel_tlvs current_tlv; 2287 2288 if (!vf) { 2289 BNX2X_ERR("VF was %p\n", vf); 2290 return; 2291 } 2292 2293 current_tlv = vf->op_current; 2294 2295 /* we don't unlock the channel for unsupported tlvs */ 2296 if (!bnx2x_tlv_supported(expected_tlv)) 2297 return; 2298 2299 WARN(expected_tlv != vf->op_current, 2300 "lock mismatch: expected %d found %d", expected_tlv, 2301 vf->op_current); 2302 2303 /* record the locking op */ 2304 vf->op_current = CHANNEL_TLV_NONE; 2305 2306 /* lock the channel */ 2307 mutex_unlock(&vf->op_mutex); 2308 2309 /* log the unlock */ 2310 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 2311 vf->abs_vfid, vf->op_current); 2312 } 2313 2314 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) 2315 { 2316 struct bnx2x_queue_state_params q_params; 2317 u32 prev_flags; 2318 int i, rc; 2319 2320 /* Verify changes are needed and record current Tx switching state */ 2321 prev_flags = bp->flags; 2322 if (enable) 2323 bp->flags |= TX_SWITCHING; 2324 else 2325 bp->flags &= ~TX_SWITCHING; 2326 if (prev_flags == bp->flags) 2327 return 0; 2328 2329 /* Verify state enables the sending of queue ramrods */ 2330 if ((bp->state != BNX2X_STATE_OPEN) || 2331 (bnx2x_get_q_logical_state(bp, 2332 &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) != 2333 BNX2X_Q_LOGICAL_STATE_ACTIVE)) 2334 return 0; 2335 2336 /* send q. update ramrod to configure Tx switching */ 2337 memset(&q_params, 0, sizeof(q_params)); 2338 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 2339 q_params.cmd = BNX2X_Q_CMD_UPDATE; 2340 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, 2341 &q_params.params.update.update_flags); 2342 if (enable) 2343 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING, 2344 &q_params.params.update.update_flags); 2345 else 2346 __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING, 2347 &q_params.params.update.update_flags); 2348 2349 /* send the ramrod on all the queues of the PF */ 2350 for_each_eth_queue(bp, i) { 2351 struct bnx2x_fastpath *fp = &bp->fp[i]; 2352 2353 /* Set the appropriate Queue object */ 2354 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 2355 2356 /* Update the Queue state */ 2357 rc = bnx2x_queue_state_change(bp, &q_params); 2358 if (rc) { 2359 BNX2X_ERR("Failed to configure Tx switching\n"); 2360 return rc; 2361 } 2362 } 2363 2364 DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled"); 2365 return 0; 2366 } 2367 2368 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) 2369 { 2370 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 2371 2372 if (!IS_SRIOV(bp)) { 2373 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n"); 2374 return -EINVAL; 2375 } 2376 2377 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 2378 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 2379 2380 /* HW channel is only operational when PF is up */ 2381 if (bp->state != BNX2X_STATE_OPEN) { 2382 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n"); 2383 return -EINVAL; 2384 } 2385 2386 /* we are always bound by the total_vfs in the configuration space */ 2387 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { 2388 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n", 2389 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 2390 num_vfs_param = BNX2X_NR_VIRTFN(bp); 2391 } 2392 2393 bp->requested_nr_virtfn = num_vfs_param; 2394 if (num_vfs_param == 0) { 2395 bnx2x_set_pf_tx_switching(bp, false); 2396 pci_disable_sriov(dev); 2397 return 0; 2398 } else { 2399 return bnx2x_enable_sriov(bp); 2400 } 2401 } 2402 2403 #define IGU_ENTRY_SIZE 4 2404 2405 int bnx2x_enable_sriov(struct bnx2x *bp) 2406 { 2407 int rc = 0, req_vfs = bp->requested_nr_virtfn; 2408 int vf_idx, sb_idx, vfq_idx, qcount, first_vf; 2409 u32 igu_entry, address; 2410 u16 num_vf_queues; 2411 2412 if (req_vfs == 0) 2413 return 0; 2414 2415 first_vf = bp->vfdb->sriov.first_vf_in_pf; 2416 2417 /* statically distribute vf sb pool between VFs */ 2418 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES, 2419 BP_VFDB(bp)->vf_sbs_pool / req_vfs); 2420 2421 /* zero previous values learned from igu cam */ 2422 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) { 2423 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 2424 2425 vf->sb_count = 0; 2426 vf_sb_count(BP_VF(bp, vf_idx)) = 0; 2427 } 2428 bp->vfdb->vf_sbs_pool = 0; 2429 2430 /* prepare IGU cam */ 2431 sb_idx = BP_VFDB(bp)->first_vf_igu_entry; 2432 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE; 2433 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 2434 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) { 2435 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT | 2436 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT | 2437 IGU_REG_MAPPING_MEMORY_VALID; 2438 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n", 2439 sb_idx, vf_idx); 2440 REG_WR(bp, address, igu_entry); 2441 sb_idx++; 2442 address += IGU_ENTRY_SIZE; 2443 } 2444 } 2445 2446 /* Reinitialize vf database according to igu cam */ 2447 bnx2x_get_vf_igu_cam_info(bp); 2448 2449 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n", 2450 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); 2451 2452 qcount = 0; 2453 for_each_vf(bp, vf_idx) { 2454 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 2455 2456 /* set local queue arrays */ 2457 vf->vfqs = &bp->vfdb->vfqs[qcount]; 2458 qcount += vf_sb_count(vf); 2459 bnx2x_iov_static_resc(bp, vf); 2460 } 2461 2462 /* prepare msix vectors in VF configuration space - the value in the 2463 * PCI configuration space should be the index of the last entry, 2464 * namely one less than the actual size of the table 2465 */ 2466 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 2467 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 2468 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 2469 num_vf_queues - 1); 2470 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", 2471 vf_idx, num_vf_queues - 1); 2472 } 2473 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2474 2475 /* enable sriov. This will probe all the VFs, and consequentially cause 2476 * the "acquire" messages to appear on the VF PF channel. 2477 */ 2478 DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); 2479 bnx2x_disable_sriov(bp); 2480 2481 rc = bnx2x_set_pf_tx_switching(bp, true); 2482 if (rc) 2483 return rc; 2484 2485 rc = pci_enable_sriov(bp->pdev, req_vfs); 2486 if (rc) { 2487 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 2488 return rc; 2489 } 2490 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); 2491 return req_vfs; 2492 } 2493 2494 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) 2495 { 2496 int vfidx; 2497 struct pf_vf_bulletin_content *bulletin; 2498 2499 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); 2500 for_each_vf(bp, vfidx) { 2501 bulletin = BP_VF_BULLETIN(bp, vfidx); 2502 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) 2503 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); 2504 } 2505 } 2506 2507 void bnx2x_disable_sriov(struct bnx2x *bp) 2508 { 2509 pci_disable_sriov(bp->pdev); 2510 } 2511 2512 static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, 2513 struct bnx2x_virtf **vf, 2514 struct pf_vf_bulletin_content **bulletin) 2515 { 2516 if (bp->state != BNX2X_STATE_OPEN) { 2517 BNX2X_ERR("vf ndo called though PF is down\n"); 2518 return -EINVAL; 2519 } 2520 2521 if (!IS_SRIOV(bp)) { 2522 BNX2X_ERR("vf ndo called though sriov is disabled\n"); 2523 return -EINVAL; 2524 } 2525 2526 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { 2527 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n", 2528 vfidx, BNX2X_NR_VIRTFN(bp)); 2529 return -EINVAL; 2530 } 2531 2532 /* init members */ 2533 *vf = BP_VF(bp, vfidx); 2534 *bulletin = BP_VF_BULLETIN(bp, vfidx); 2535 2536 if (!*vf) { 2537 BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n", 2538 vfidx); 2539 return -EINVAL; 2540 } 2541 2542 if (!(*vf)->vfqs) { 2543 BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n", 2544 vfidx); 2545 return -EINVAL; 2546 } 2547 2548 if (!*bulletin) { 2549 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n", 2550 vfidx); 2551 return -EINVAL; 2552 } 2553 2554 return 0; 2555 } 2556 2557 int bnx2x_get_vf_config(struct net_device *dev, int vfidx, 2558 struct ifla_vf_info *ivi) 2559 { 2560 struct bnx2x *bp = netdev_priv(dev); 2561 struct bnx2x_virtf *vf = NULL; 2562 struct pf_vf_bulletin_content *bulletin = NULL; 2563 struct bnx2x_vlan_mac_obj *mac_obj; 2564 struct bnx2x_vlan_mac_obj *vlan_obj; 2565 int rc; 2566 2567 /* sanity and init */ 2568 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 2569 if (rc) 2570 return rc; 2571 mac_obj = &bnx2x_leading_vfq(vf, mac_obj); 2572 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 2573 if (!mac_obj || !vlan_obj) { 2574 BNX2X_ERR("VF partially initialized\n"); 2575 return -EINVAL; 2576 } 2577 2578 ivi->vf = vfidx; 2579 ivi->qos = 0; 2580 ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */ 2581 ivi->min_tx_rate = 0; 2582 ivi->spoofchk = 1; /*always enabled */ 2583 if (vf->state == VF_ENABLED) { 2584 /* mac and vlan are in vlan_mac objects */ 2585 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) { 2586 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 2587 0, ETH_ALEN); 2588 vlan_obj->get_n_elements(bp, vlan_obj, 1, 2589 (u8 *)&ivi->vlan, 0, 2590 VLAN_HLEN); 2591 } 2592 } else { 2593 /* mac */ 2594 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 2595 /* mac configured by ndo so its in bulletin board */ 2596 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); 2597 else 2598 /* function has not been loaded yet. Show mac as 0s */ 2599 memset(&ivi->mac, 0, ETH_ALEN); 2600 2601 /* vlan */ 2602 if (bulletin->valid_bitmap & (1 << VLAN_VALID)) 2603 /* vlan configured by ndo so its in bulletin board */ 2604 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); 2605 else 2606 /* function has not been loaded yet. Show vlans as 0s */ 2607 memset(&ivi->vlan, 0, VLAN_HLEN); 2608 } 2609 2610 return 0; 2611 } 2612 2613 /* New mac for VF. Consider these cases: 2614 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and 2615 * supply at acquire. 2616 * 2. VF has already been acquired but has not yet initialized - store in local 2617 * bulletin board. mac will be posted on VF bulletin board after VF init. VF 2618 * will configure this mac when it is ready. 2619 * 3. VF has already initialized but has not yet setup a queue - post the new 2620 * mac on VF's bulletin board right now. VF will configure this mac when it 2621 * is ready. 2622 * 4. VF has already set a queue - delete any macs already configured for this 2623 * queue and manually config the new mac. 2624 * In any event, once this function has been called refuse any attempts by the 2625 * VF to configure any mac for itself except for this mac. In case of a race 2626 * where the VF fails to see the new post on its bulletin board before sending a 2627 * mac configuration request, the PF will simply fail the request and VF can try 2628 * again after consulting its bulletin board. 2629 */ 2630 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) 2631 { 2632 struct bnx2x *bp = netdev_priv(dev); 2633 int rc, q_logical_state; 2634 struct bnx2x_virtf *vf = NULL; 2635 struct pf_vf_bulletin_content *bulletin = NULL; 2636 2637 /* sanity and init */ 2638 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 2639 if (rc) 2640 return rc; 2641 if (!is_valid_ether_addr(mac)) { 2642 BNX2X_ERR("mac address invalid\n"); 2643 return -EINVAL; 2644 } 2645 2646 /* update PF's copy of the VF's bulletin. Will no longer accept mac 2647 * configuration requests from vf unless match this mac 2648 */ 2649 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; 2650 memcpy(bulletin->mac, mac, ETH_ALEN); 2651 2652 /* Post update on VF's bulletin board */ 2653 rc = bnx2x_post_vf_bulletin(bp, vfidx); 2654 if (rc) { 2655 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); 2656 return rc; 2657 } 2658 2659 q_logical_state = 2660 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); 2661 if (vf->state == VF_ENABLED && 2662 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 2663 /* configure the mac in device on this vf's queue */ 2664 unsigned long ramrod_flags = 0; 2665 struct bnx2x_vlan_mac_obj *mac_obj; 2666 2667 /* User should be able to see failure reason in system logs */ 2668 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 2669 return -EINVAL; 2670 2671 /* must lock vfpf channel to protect against vf flows */ 2672 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 2673 2674 /* remove existing eth macs */ 2675 mac_obj = &bnx2x_leading_vfq(vf, mac_obj); 2676 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 2677 if (rc) { 2678 BNX2X_ERR("failed to delete eth macs\n"); 2679 rc = -EINVAL; 2680 goto out; 2681 } 2682 2683 /* remove existing uc list macs */ 2684 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); 2685 if (rc) { 2686 BNX2X_ERR("failed to delete uc_list macs\n"); 2687 rc = -EINVAL; 2688 goto out; 2689 } 2690 2691 /* configure the new mac to device */ 2692 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2693 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 2694 BNX2X_ETH_MAC, &ramrod_flags); 2695 2696 out: 2697 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 2698 } 2699 2700 return rc; 2701 } 2702 2703 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 2704 { 2705 struct bnx2x_queue_state_params q_params = {NULL}; 2706 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 2707 struct bnx2x_queue_update_params *update_params; 2708 struct pf_vf_bulletin_content *bulletin = NULL; 2709 struct bnx2x_rx_mode_ramrod_params rx_ramrod; 2710 struct bnx2x *bp = netdev_priv(dev); 2711 struct bnx2x_vlan_mac_obj *vlan_obj; 2712 unsigned long vlan_mac_flags = 0; 2713 unsigned long ramrod_flags = 0; 2714 struct bnx2x_virtf *vf = NULL; 2715 unsigned long accept_flags; 2716 int rc; 2717 2718 /* sanity and init */ 2719 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 2720 if (rc) 2721 return rc; 2722 2723 if (vlan > 4095) { 2724 BNX2X_ERR("illegal vlan value %d\n", vlan); 2725 return -EINVAL; 2726 } 2727 2728 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", 2729 vfidx, vlan, 0); 2730 2731 /* update PF's copy of the VF's bulletin. No point in posting the vlan 2732 * to the VF since it doesn't have anything to do with it. But it useful 2733 * to store it here in case the VF is not up yet and we can only 2734 * configure the vlan later when it does. Treat vlan id 0 as remove the 2735 * Host tag. 2736 */ 2737 if (vlan > 0) 2738 bulletin->valid_bitmap |= 1 << VLAN_VALID; 2739 else 2740 bulletin->valid_bitmap &= ~(1 << VLAN_VALID); 2741 bulletin->vlan = vlan; 2742 2743 /* is vf initialized and queue set up? */ 2744 if (vf->state != VF_ENABLED || 2745 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != 2746 BNX2X_Q_LOGICAL_STATE_ACTIVE) 2747 return rc; 2748 2749 /* User should be able to see error in system logs */ 2750 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 2751 return -EINVAL; 2752 2753 /* must lock vfpf channel to protect against vf flows */ 2754 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 2755 2756 /* remove existing vlans */ 2757 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2758 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 2759 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 2760 &ramrod_flags); 2761 if (rc) { 2762 BNX2X_ERR("failed to delete vlans\n"); 2763 rc = -EINVAL; 2764 goto out; 2765 } 2766 2767 /* need to remove/add the VF's accept_any_vlan bit */ 2768 accept_flags = bnx2x_leading_vfq(vf, accept_flags); 2769 if (vlan) 2770 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 2771 else 2772 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 2773 2774 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, 2775 accept_flags); 2776 bnx2x_leading_vfq(vf, accept_flags) = accept_flags; 2777 bnx2x_config_rx_mode(bp, &rx_ramrod); 2778 2779 /* configure the new vlan to device */ 2780 memset(&ramrod_param, 0, sizeof(ramrod_param)); 2781 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2782 ramrod_param.vlan_mac_obj = vlan_obj; 2783 ramrod_param.ramrod_flags = ramrod_flags; 2784 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 2785 &ramrod_param.user_req.vlan_mac_flags); 2786 ramrod_param.user_req.u.vlan.vlan = vlan; 2787 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 2788 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 2789 if (rc) { 2790 BNX2X_ERR("failed to configure vlan\n"); 2791 rc = -EINVAL; 2792 goto out; 2793 } 2794 2795 /* send queue update ramrod to configure default vlan and silent 2796 * vlan removal 2797 */ 2798 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 2799 q_params.cmd = BNX2X_Q_CMD_UPDATE; 2800 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); 2801 update_params = &q_params.params.update; 2802 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 2803 &update_params->update_flags); 2804 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 2805 &update_params->update_flags); 2806 if (vlan == 0) { 2807 /* if vlan is 0 then we want to leave the VF traffic 2808 * untagged, and leave the incoming traffic untouched 2809 * (i.e. do not remove any vlan tags). 2810 */ 2811 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 2812 &update_params->update_flags); 2813 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 2814 &update_params->update_flags); 2815 } else { 2816 /* configure default vlan to vf queue and set silent 2817 * vlan removal (the vf remains unaware of this vlan). 2818 */ 2819 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 2820 &update_params->update_flags); 2821 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 2822 &update_params->update_flags); 2823 update_params->def_vlan = vlan; 2824 update_params->silent_removal_value = 2825 vlan & VLAN_VID_MASK; 2826 update_params->silent_removal_mask = VLAN_VID_MASK; 2827 } 2828 2829 /* Update the Queue state */ 2830 rc = bnx2x_queue_state_change(bp, &q_params); 2831 if (rc) { 2832 BNX2X_ERR("Failed to configure default VLAN\n"); 2833 goto out; 2834 } 2835 2836 2837 /* clear the flag indicating that this VF needs its vlan 2838 * (will only be set if the HV configured the Vlan before vf was 2839 * up and we were called because the VF came up later 2840 */ 2841 out: 2842 vf->cfg_flags &= ~VF_CFG_VLAN; 2843 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 2844 2845 return rc; 2846 } 2847 2848 /* crc is the first field in the bulletin board. Compute the crc over the 2849 * entire bulletin board excluding the crc field itself. Use the length field 2850 * as the Bulletin Board was posted by a PF with possibly a different version 2851 * from the vf which will sample it. Therefore, the length is computed by the 2852 * PF and the used blindly by the VF. 2853 */ 2854 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, 2855 struct pf_vf_bulletin_content *bulletin) 2856 { 2857 return crc32(BULLETIN_CRC_SEED, 2858 ((u8 *)bulletin) + sizeof(bulletin->crc), 2859 bulletin->length - sizeof(bulletin->crc)); 2860 } 2861 2862 /* Check for new posts on the bulletin board */ 2863 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) 2864 { 2865 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; 2866 int attempts; 2867 2868 /* bulletin board hasn't changed since last sample */ 2869 if (bp->old_bulletin.version == bulletin.version) 2870 return PFVF_BULLETIN_UNCHANGED; 2871 2872 /* validate crc of new bulletin board */ 2873 if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) { 2874 /* sampling structure in mid post may result with corrupted data 2875 * validate crc to ensure coherency. 2876 */ 2877 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { 2878 bulletin = bp->pf2vf_bulletin->content; 2879 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, 2880 &bulletin)) 2881 break; 2882 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", 2883 bulletin.crc, 2884 bnx2x_crc_vf_bulletin(bp, &bulletin)); 2885 } 2886 if (attempts >= BULLETIN_ATTEMPTS) { 2887 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", 2888 attempts); 2889 return PFVF_BULLETIN_CRC_ERR; 2890 } 2891 } 2892 2893 /* the mac address in bulletin board is valid and is new */ 2894 if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID && 2895 !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) { 2896 /* update new mac to net device */ 2897 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); 2898 } 2899 2900 /* the vlan in bulletin board is valid and is new */ 2901 if (bulletin.valid_bitmap & 1 << VLAN_VALID) 2902 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN); 2903 2904 /* copy new bulletin board to bp */ 2905 bp->old_bulletin = bulletin; 2906 2907 return PFVF_BULLETIN_UPDATED; 2908 } 2909 2910 void bnx2x_timer_sriov(struct bnx2x *bp) 2911 { 2912 bnx2x_sample_bulletin(bp); 2913 2914 /* if channel is down we need to self destruct */ 2915 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) 2916 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 2917 BNX2X_MSG_IOV); 2918 } 2919 2920 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 2921 { 2922 /* vf doorbells are embedded within the regview */ 2923 return bp->regview + PXP_VF_ADDR_DB_START; 2924 } 2925 2926 void bnx2x_vf_pci_dealloc(struct bnx2x *bp) 2927 { 2928 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 2929 sizeof(struct bnx2x_vf_mbx_msg)); 2930 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, 2931 sizeof(union pf_vf_bulletin)); 2932 } 2933 2934 int bnx2x_vf_pci_alloc(struct bnx2x *bp) 2935 { 2936 mutex_init(&bp->vf2pf_mutex); 2937 2938 /* allocate vf2pf mailbox for vf to pf channel */ 2939 bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping, 2940 sizeof(struct bnx2x_vf_mbx_msg)); 2941 if (!bp->vf2pf_mbox) 2942 goto alloc_mem_err; 2943 2944 /* allocate pf 2 vf bulletin board */ 2945 bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping, 2946 sizeof(union pf_vf_bulletin)); 2947 if (!bp->pf2vf_bulletin) 2948 goto alloc_mem_err; 2949 2950 return 0; 2951 2952 alloc_mem_err: 2953 bnx2x_vf_pci_dealloc(bp); 2954 return -ENOMEM; 2955 } 2956 2957 void bnx2x_iov_channel_down(struct bnx2x *bp) 2958 { 2959 int vf_idx; 2960 struct pf_vf_bulletin_content *bulletin; 2961 2962 if (!IS_SRIOV(bp)) 2963 return; 2964 2965 for_each_vf(bp, vf_idx) { 2966 /* locate this VFs bulletin board and update the channel down 2967 * bit 2968 */ 2969 bulletin = BP_VF_BULLETIN(bp, vf_idx); 2970 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN; 2971 2972 /* update vf bulletin board */ 2973 bnx2x_post_vf_bulletin(bp, vf_idx); 2974 } 2975 } 2976 2977 void bnx2x_iov_task(struct work_struct *work) 2978 { 2979 struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work); 2980 2981 if (!netif_running(bp->dev)) 2982 return; 2983 2984 if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR, 2985 &bp->iov_task_state)) 2986 bnx2x_vf_handle_flr_event(bp); 2987 2988 if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG, 2989 &bp->iov_task_state)) 2990 bnx2x_vf_mbx(bp); 2991 } 2992 2993 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) 2994 { 2995 smp_mb__before_atomic(); 2996 set_bit(flag, &bp->iov_task_state); 2997 smp_mb__after_atomic(); 2998 DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); 2999 queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); 3000 } 3001