1 /* bnx2x_sriov.c: Broadcom Everest network driver. 2 * 3 * Copyright 2009-2013 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 17 * Ariel Elior <ariele@broadcom.com> 18 * 19 */ 20 #include "bnx2x.h" 21 #include "bnx2x_init.h" 22 #include "bnx2x_cmn.h" 23 #include "bnx2x_sp.h" 24 #include <linux/crc32.h> 25 #include <linux/if_vlan.h> 26 27 /* General service functions */ 28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 29 u16 pf_id) 30 { 31 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 32 pf_id); 33 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 34 pf_id); 35 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 36 pf_id); 37 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 38 pf_id); 39 } 40 41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 42 u8 enable) 43 { 44 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 45 enable); 46 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 47 enable); 48 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 49 enable); 50 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 51 enable); 52 } 53 54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 55 { 56 int idx; 57 58 for_each_vf(bp, idx) 59 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) 60 break; 61 return idx; 62 } 63 64 static 65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 66 { 67 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); 68 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; 69 } 70 71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, 72 u8 igu_sb_id, u8 segment, u16 index, u8 op, 73 u8 update) 74 { 75 /* acking a VF sb through the PF - use the GRC */ 76 u32 ctl; 77 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 78 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 79 u32 func_encode = vf->abs_vfid; 80 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; 81 struct igu_regular cmd_data = {0}; 82 83 cmd_data.sb_id_and_flags = 84 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 85 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 86 (update << IGU_REGULAR_BUPDATE_SHIFT) | 87 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 88 89 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 90 func_encode << IGU_CTRL_REG_FID_SHIFT | 91 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 92 93 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 94 cmd_data.sb_id_and_flags, igu_addr_data); 95 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); 96 mmiowb(); 97 barrier(); 98 99 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 100 ctl, igu_addr_ctl); 101 REG_WR(bp, igu_addr_ctl, ctl); 102 mmiowb(); 103 barrier(); 104 } 105 106 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp, 107 struct bnx2x_virtf *vf, 108 bool print_err) 109 { 110 if (!bnx2x_leading_vfq(vf, sp_initialized)) { 111 if (print_err) 112 BNX2X_ERR("Slowpath objects not yet initialized!\n"); 113 else 114 DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n"); 115 return false; 116 } 117 return true; 118 } 119 120 /* VFOP operations states */ 121 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 122 struct bnx2x_queue_init_params *init_params, 123 struct bnx2x_queue_setup_params *setup_params, 124 u16 q_idx, u16 sb_idx) 125 { 126 DP(BNX2X_MSG_IOV, 127 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", 128 vf->abs_vfid, 129 q_idx, 130 sb_idx, 131 init_params->tx.sb_cq_index, 132 init_params->tx.hc_rate, 133 setup_params->flags, 134 setup_params->txq_params.traffic_type); 135 } 136 137 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 138 struct bnx2x_queue_init_params *init_params, 139 struct bnx2x_queue_setup_params *setup_params, 140 u16 q_idx, u16 sb_idx) 141 { 142 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; 143 144 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" 145 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", 146 vf->abs_vfid, 147 q_idx, 148 sb_idx, 149 init_params->rx.sb_cq_index, 150 init_params->rx.hc_rate, 151 setup_params->gen_params.mtu, 152 rxq_params->buf_sz, 153 rxq_params->sge_buf_sz, 154 rxq_params->max_sges_pkt, 155 rxq_params->tpa_agg_sz, 156 setup_params->flags, 157 rxq_params->drop_flags, 158 rxq_params->cache_line_log); 159 } 160 161 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 162 struct bnx2x_virtf *vf, 163 struct bnx2x_vf_queue *q, 164 struct bnx2x_vf_queue_construct_params *p, 165 unsigned long q_type) 166 { 167 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; 168 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; 169 170 /* INIT */ 171 172 /* Enable host coalescing in the transition to INIT state */ 173 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) 174 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); 175 176 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) 177 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); 178 179 /* FW SB ID */ 180 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 181 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 182 183 /* context */ 184 init_p->cxts[0] = q->cxt; 185 186 /* SETUP */ 187 188 /* Setup-op general parameters */ 189 setup_p->gen_params.spcl_id = vf->sp_cl_id; 190 setup_p->gen_params.stat_id = vfq_stat_id(vf, q); 191 192 /* Setup-op pause params: 193 * Nothing to do, the pause thresholds are set by default to 0 which 194 * effectively turns off the feature for this queue. We don't want 195 * one queue (VF) to interfering with another queue (another VF) 196 */ 197 if (vf->cfg_flags & VF_CFG_FW_FC) 198 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", 199 vf->abs_vfid); 200 /* Setup-op flags: 201 * collect statistics, zero statistics, local-switching, security, 202 * OV for Flex10, RSS and MCAST for leading 203 */ 204 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) 205 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); 206 207 /* for VFs, enable tx switching, bd coherency, and mac address 208 * anti-spoofing 209 */ 210 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); 211 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 212 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 213 214 /* Setup-op rx parameters */ 215 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 216 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 217 218 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); 219 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); 220 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); 221 222 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) 223 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; 224 } 225 226 /* Setup-op tx parameters */ 227 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { 228 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; 229 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 230 } 231 } 232 233 static int bnx2x_vf_queue_create(struct bnx2x *bp, 234 struct bnx2x_virtf *vf, int qid, 235 struct bnx2x_vf_queue_construct_params *qctor) 236 { 237 struct bnx2x_queue_state_params *q_params; 238 int rc = 0; 239 240 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 241 242 /* Prepare ramrod information */ 243 q_params = &qctor->qstate; 244 q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 245 set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags); 246 247 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 248 BNX2X_Q_LOGICAL_STATE_ACTIVE) { 249 DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n"); 250 goto out; 251 } 252 253 /* Run Queue 'construction' ramrods */ 254 q_params->cmd = BNX2X_Q_CMD_INIT; 255 rc = bnx2x_queue_state_change(bp, q_params); 256 if (rc) 257 goto out; 258 259 memcpy(&q_params->params.setup, &qctor->prep_qsetup, 260 sizeof(struct bnx2x_queue_setup_params)); 261 q_params->cmd = BNX2X_Q_CMD_SETUP; 262 rc = bnx2x_queue_state_change(bp, q_params); 263 if (rc) 264 goto out; 265 266 /* enable interrupts */ 267 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)), 268 USTORM_ID, 0, IGU_INT_ENABLE, 0); 269 out: 270 return rc; 271 } 272 273 static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf, 274 int qid) 275 { 276 enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT, 277 BNX2X_Q_CMD_TERMINATE, 278 BNX2X_Q_CMD_CFC_DEL}; 279 struct bnx2x_queue_state_params q_params; 280 int rc, i; 281 282 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 283 284 /* Prepare ramrod information */ 285 memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params)); 286 q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 287 set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 288 289 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) == 290 BNX2X_Q_LOGICAL_STATE_STOPPED) { 291 DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n"); 292 goto out; 293 } 294 295 /* Run Queue 'destruction' ramrods */ 296 for (i = 0; i < ARRAY_SIZE(cmds); i++) { 297 q_params.cmd = cmds[i]; 298 rc = bnx2x_queue_state_change(bp, &q_params); 299 if (rc) { 300 BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]); 301 return rc; 302 } 303 } 304 out: 305 /* Clean Context */ 306 if (bnx2x_vfq(vf, qid, cxt)) { 307 bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0; 308 bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0; 309 } 310 311 return 0; 312 } 313 314 static void 315 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) 316 { 317 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 318 if (vf) { 319 /* the first igu entry belonging to VFs of this PF */ 320 if (!BP_VFDB(bp)->first_vf_igu_entry) 321 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; 322 323 /* the first igu entry belonging to this VF */ 324 if (!vf_sb_count(vf)) 325 vf->igu_base_id = igu_sb_id; 326 327 ++vf_sb_count(vf); 328 ++vf->sb_count; 329 } 330 BP_VFDB(bp)->vf_sbs_pool++; 331 } 332 333 static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp, 334 struct bnx2x_vlan_mac_obj *obj, 335 atomic_t *counter) 336 { 337 struct list_head *pos; 338 int read_lock; 339 int cnt = 0; 340 341 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); 342 if (read_lock) 343 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); 344 345 list_for_each(pos, &obj->head) 346 cnt++; 347 348 if (!read_lock) 349 bnx2x_vlan_mac_h_read_unlock(bp, obj); 350 351 atomic_set(counter, cnt); 352 } 353 354 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, 355 int qid, bool drv_only, bool mac) 356 { 357 struct bnx2x_vlan_mac_ramrod_params ramrod; 358 int rc; 359 360 DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid, 361 mac ? "MACs" : "VLANs"); 362 363 /* Prepare ramrod params */ 364 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); 365 if (mac) { 366 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); 367 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 368 } else { 369 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 370 &ramrod.user_req.vlan_mac_flags); 371 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 372 } 373 ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL; 374 375 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); 376 if (drv_only) 377 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); 378 else 379 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); 380 381 /* Start deleting */ 382 rc = ramrod.vlan_mac_obj->delete_all(bp, 383 ramrod.vlan_mac_obj, 384 &ramrod.user_req.vlan_mac_flags, 385 &ramrod.ramrod_flags); 386 if (rc) { 387 BNX2X_ERR("Failed to delete all %s\n", 388 mac ? "MACs" : "VLANs"); 389 return rc; 390 } 391 392 /* Clear the vlan counters */ 393 if (!mac) 394 atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0); 395 396 return 0; 397 } 398 399 static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, 400 struct bnx2x_virtf *vf, int qid, 401 struct bnx2x_vf_mac_vlan_filter *filter, 402 bool drv_only) 403 { 404 struct bnx2x_vlan_mac_ramrod_params ramrod; 405 int rc; 406 407 DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n", 408 vf->abs_vfid, filter->add ? "Adding" : "Deleting", 409 filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN"); 410 411 /* Prepare ramrod params */ 412 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); 413 if (filter->type == BNX2X_VF_FILTER_VLAN) { 414 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 415 &ramrod.user_req.vlan_mac_flags); 416 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 417 ramrod.user_req.u.vlan.vlan = filter->vid; 418 } else { 419 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); 420 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 421 memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN); 422 } 423 ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD : 424 BNX2X_VLAN_MAC_DEL; 425 426 /* Verify there are available vlan credits */ 427 if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && 428 (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= 429 vf_vlan_rules_cnt(vf))) { 430 BNX2X_ERR("No credits for vlan [%d >= %d]\n", 431 atomic_read(&bnx2x_vfq(vf, qid, vlan_count)), 432 vf_vlan_rules_cnt(vf)); 433 return -ENOMEM; 434 } 435 436 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); 437 if (drv_only) 438 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); 439 else 440 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); 441 442 /* Add/Remove the filter */ 443 rc = bnx2x_config_vlan_mac(bp, &ramrod); 444 if (rc && rc != -EEXIST) { 445 BNX2X_ERR("Failed to %s %s\n", 446 filter->add ? "add" : "delete", 447 filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : 448 "VLAN"); 449 return rc; 450 } 451 452 /* Update the vlan counters */ 453 if (filter->type == BNX2X_VF_FILTER_VLAN) 454 bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj, 455 &bnx2x_vfq(vf, qid, vlan_count)); 456 457 return 0; 458 } 459 460 int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, 461 struct bnx2x_vf_mac_vlan_filters *filters, 462 int qid, bool drv_only) 463 { 464 int rc = 0, i; 465 466 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 467 468 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 469 return -EINVAL; 470 471 /* Prepare ramrod params */ 472 for (i = 0; i < filters->count; i++) { 473 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, 474 &filters->filters[i], drv_only); 475 if (rc) 476 break; 477 } 478 479 /* Rollback if needed */ 480 if (i != filters->count) { 481 BNX2X_ERR("Managed only %d/%d filters - rolling back\n", 482 i, filters->count + 1); 483 while (--i >= 0) { 484 filters->filters[i].add = !filters->filters[i].add; 485 bnx2x_vf_mac_vlan_config(bp, vf, qid, 486 &filters->filters[i], 487 drv_only); 488 } 489 } 490 491 /* It's our responsibility to free the filters */ 492 kfree(filters); 493 494 return rc; 495 } 496 497 int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, 498 struct bnx2x_vf_queue_construct_params *qctor) 499 { 500 int rc; 501 502 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 503 504 rc = bnx2x_vf_queue_create(bp, vf, qid, qctor); 505 if (rc) 506 goto op_err; 507 508 /* Configure vlan0 for leading queue */ 509 if (!qid) { 510 struct bnx2x_vf_mac_vlan_filter filter; 511 512 memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter)); 513 filter.type = BNX2X_VF_FILTER_VLAN; 514 filter.add = true; 515 filter.vid = 0; 516 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false); 517 if (rc) 518 goto op_err; 519 } 520 521 /* Schedule the configuration of any pending vlan filters */ 522 vf->cfg_flags |= VF_CFG_VLAN; 523 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, 524 BNX2X_MSG_IOV); 525 return 0; 526 op_err: 527 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); 528 return rc; 529 } 530 531 static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, 532 int qid) 533 { 534 int rc; 535 536 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 537 538 /* If needed, clean the filtering data base */ 539 if ((qid == LEADING_IDX) && 540 bnx2x_validate_vf_sp_objs(bp, vf, false)) { 541 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false); 542 if (rc) 543 goto op_err; 544 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true); 545 if (rc) 546 goto op_err; 547 } 548 549 /* Terminate queue */ 550 if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) { 551 struct bnx2x_queue_state_params qstate; 552 553 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); 554 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 555 qstate.q_obj->state = BNX2X_Q_STATE_STOPPED; 556 qstate.cmd = BNX2X_Q_CMD_TERMINATE; 557 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); 558 rc = bnx2x_queue_state_change(bp, &qstate); 559 if (rc) 560 goto op_err; 561 } 562 563 return 0; 564 op_err: 565 BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); 566 return rc; 567 } 568 569 int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, 570 bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only) 571 { 572 struct bnx2x_mcast_list_elem *mc = NULL; 573 struct bnx2x_mcast_ramrod_params mcast; 574 int rc, i; 575 576 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 577 578 /* Prepare Multicast command */ 579 memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params)); 580 mcast.mcast_obj = &vf->mcast_obj; 581 if (drv_only) 582 set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags); 583 else 584 set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags); 585 if (mc_num) { 586 mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem), 587 GFP_KERNEL); 588 if (!mc) { 589 BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n"); 590 return -ENOMEM; 591 } 592 } 593 594 /* clear existing mcasts */ 595 mcast.mcast_list_len = vf->mcast_list_len; 596 vf->mcast_list_len = mc_num; 597 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); 598 if (rc) { 599 BNX2X_ERR("Failed to remove multicasts\n"); 600 if (mc) 601 kfree(mc); 602 return rc; 603 } 604 605 /* update mcast list on the ramrod params */ 606 if (mc_num) { 607 INIT_LIST_HEAD(&mcast.mcast_list); 608 for (i = 0; i < mc_num; i++) { 609 mc[i].mac = mcasts[i]; 610 list_add_tail(&mc[i].link, 611 &mcast.mcast_list); 612 } 613 614 /* add new mcasts */ 615 mcast.mcast_list_len = mc_num; 616 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); 617 if (rc) 618 BNX2X_ERR("Faled to add multicasts\n"); 619 kfree(mc); 620 } 621 622 return rc; 623 } 624 625 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, 626 struct bnx2x_rx_mode_ramrod_params *ramrod, 627 struct bnx2x_virtf *vf, 628 unsigned long accept_flags) 629 { 630 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); 631 632 memset(ramrod, 0, sizeof(*ramrod)); 633 ramrod->cid = vfq->cid; 634 ramrod->cl_id = vfq_cl_id(vf, vfq); 635 ramrod->rx_mode_obj = &bp->rx_mode_obj; 636 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); 637 ramrod->rx_accept_flags = accept_flags; 638 ramrod->tx_accept_flags = accept_flags; 639 ramrod->pstate = &vf->filter_state; 640 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; 641 642 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 643 set_bit(RAMROD_RX, &ramrod->ramrod_flags); 644 set_bit(RAMROD_TX, &ramrod->ramrod_flags); 645 646 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); 647 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); 648 } 649 650 int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, 651 int qid, unsigned long accept_flags) 652 { 653 struct bnx2x_rx_mode_ramrod_params ramrod; 654 655 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 656 657 bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags); 658 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); 659 vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags; 660 return bnx2x_config_rx_mode(bp, &ramrod); 661 } 662 663 int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) 664 { 665 int rc; 666 667 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 668 669 /* Remove all classification configuration for leading queue */ 670 if (qid == LEADING_IDX) { 671 rc = bnx2x_vf_rxmode(bp, vf, qid, 0); 672 if (rc) 673 goto op_err; 674 675 /* Remove filtering if feasible */ 676 if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { 677 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, 678 false, false); 679 if (rc) 680 goto op_err; 681 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, 682 false, true); 683 if (rc) 684 goto op_err; 685 rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); 686 if (rc) 687 goto op_err; 688 } 689 } 690 691 /* Destroy queue */ 692 rc = bnx2x_vf_queue_destroy(bp, vf, qid); 693 if (rc) 694 goto op_err; 695 return rc; 696 op_err: 697 BNX2X_ERR("vf[%d:%d] error: rc %d\n", 698 vf->abs_vfid, qid, rc); 699 return rc; 700 } 701 702 /* VF enable primitives 703 * when pretend is required the caller is responsible 704 * for calling pretend prior to calling these routines 705 */ 706 707 /* internal vf enable - until vf is enabled internally all transactions 708 * are blocked. This routine should always be called last with pretend. 709 */ 710 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) 711 { 712 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); 713 } 714 715 /* clears vf error in all semi blocks */ 716 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) 717 { 718 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); 719 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); 720 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); 721 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); 722 } 723 724 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) 725 { 726 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; 727 u32 was_err_reg = 0; 728 729 switch (was_err_group) { 730 case 0: 731 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; 732 break; 733 case 1: 734 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; 735 break; 736 case 2: 737 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; 738 break; 739 case 3: 740 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; 741 break; 742 } 743 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); 744 } 745 746 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) 747 { 748 int i; 749 u32 val; 750 751 /* Set VF masks and configuration - pretend */ 752 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 753 754 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 755 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 756 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 757 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 758 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 759 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 760 761 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 762 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); 763 if (vf->cfg_flags & VF_CFG_INT_SIMD) 764 val |= IGU_VF_CONF_SINGLE_ISR_EN; 765 val &= ~IGU_VF_CONF_PARENT_MASK; 766 val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT; 767 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 768 769 DP(BNX2X_MSG_IOV, 770 "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n", 771 vf->abs_vfid, val); 772 773 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 774 775 /* iterate over all queues, clear sb consumer */ 776 for (i = 0; i < vf_sb_count(vf); i++) { 777 u8 igu_sb_id = vf_igu_sb(vf, i); 778 779 /* zero prod memory */ 780 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); 781 782 /* clear sb state machine */ 783 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, 784 false /* VF */); 785 786 /* disable + update */ 787 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, 788 IGU_INT_DISABLE, 1); 789 } 790 } 791 792 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) 793 { 794 /* set the VF-PF association in the FW */ 795 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); 796 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); 797 798 /* clear vf errors*/ 799 bnx2x_vf_semi_clear_err(bp, abs_vfid); 800 bnx2x_vf_pglue_clear_err(bp, abs_vfid); 801 802 /* internal vf-enable - pretend */ 803 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); 804 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); 805 bnx2x_vf_enable_internal(bp, true); 806 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 807 } 808 809 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) 810 { 811 /* Reset vf in IGU interrupts are still disabled */ 812 bnx2x_vf_igu_reset(bp, vf); 813 814 /* pretend to enable the vf with the PBF */ 815 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 816 REG_WR(bp, PBF_REG_DISABLE_VF, 0); 817 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 818 } 819 820 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) 821 { 822 struct pci_dev *dev; 823 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 824 825 if (!vf) 826 return false; 827 828 dev = pci_get_bus_and_slot(vf->bus, vf->devfn); 829 if (dev) 830 return bnx2x_is_pcie_pending(dev); 831 return false; 832 } 833 834 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) 835 { 836 /* Verify no pending pci transactions */ 837 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) 838 BNX2X_ERR("PCIE Transactions still pending\n"); 839 840 return 0; 841 } 842 843 static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp, 844 struct bnx2x_virtf *vf, 845 int new) 846 { 847 int num = vf_vlan_rules_cnt(vf); 848 int diff = new - num; 849 bool rc = true; 850 851 DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n", 852 vf->abs_vfid, new, num); 853 854 if (diff > 0) 855 rc = bp->vlans_pool.get(&bp->vlans_pool, diff); 856 else if (diff < 0) 857 rc = bp->vlans_pool.put(&bp->vlans_pool, -diff); 858 859 if (rc) 860 vf_vlan_rules_cnt(vf) = new; 861 else 862 DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n", 863 vf->abs_vfid); 864 } 865 866 /* must be called after the number of PF queues and the number of VFs are 867 * both known 868 */ 869 static void 870 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 871 { 872 struct vf_pf_resc_request *resc = &vf->alloc_resc; 873 u16 vlan_count = 0; 874 875 /* will be set only during VF-ACQUIRE */ 876 resc->num_rxqs = 0; 877 resc->num_txqs = 0; 878 879 /* no credit calculations for macs (just yet) */ 880 resc->num_mac_filters = 1; 881 882 /* divvy up vlan rules */ 883 bnx2x_iov_re_set_vlan_filters(bp, vf, 0); 884 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 885 vlan_count = 1 << ilog2(vlan_count); 886 bnx2x_iov_re_set_vlan_filters(bp, vf, 887 vlan_count / BNX2X_NR_VIRTFN(bp)); 888 889 /* no real limitation */ 890 resc->num_mc_filters = 0; 891 892 /* num_sbs already set */ 893 resc->num_sbs = vf->sb_count; 894 } 895 896 /* FLR routines: */ 897 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 898 { 899 /* reset the state variables */ 900 bnx2x_iov_static_resc(bp, vf); 901 vf->state = VF_FREE; 902 } 903 904 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) 905 { 906 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 907 908 /* DQ usage counter */ 909 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 910 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, 911 "DQ VF usage counter timed out", 912 poll_cnt); 913 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 914 915 /* FW cleanup command - poll for the results */ 916 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), 917 poll_cnt)) 918 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid); 919 920 /* verify TX hw is flushed */ 921 bnx2x_tx_hw_flushed(bp, poll_cnt); 922 } 923 924 static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) 925 { 926 int rc, i; 927 928 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 929 930 /* the cleanup operations are valid if and only if the VF 931 * was first acquired. 932 */ 933 for (i = 0; i < vf_rxq_count(vf); i++) { 934 rc = bnx2x_vf_queue_flr(bp, vf, i); 935 if (rc) 936 goto out; 937 } 938 939 /* remove multicasts */ 940 bnx2x_vf_mcast(bp, vf, NULL, 0, true); 941 942 /* dispatch final cleanup and wait for HW queues to flush */ 943 bnx2x_vf_flr_clnup_hw(bp, vf); 944 945 /* release VF resources */ 946 bnx2x_vf_free_resc(bp, vf); 947 948 /* re-open the mailbox */ 949 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 950 return; 951 out: 952 BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n", 953 vf->abs_vfid, i, rc); 954 } 955 956 static void bnx2x_vf_flr_clnup(struct bnx2x *bp) 957 { 958 struct bnx2x_virtf *vf; 959 int i; 960 961 for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) { 962 /* VF should be RESET & in FLR cleanup states */ 963 if (bnx2x_vf(bp, i, state) != VF_RESET || 964 !bnx2x_vf(bp, i, flr_clnup_stage)) 965 continue; 966 967 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", 968 i, BNX2X_NR_VIRTFN(bp)); 969 970 vf = BP_VF(bp, i); 971 972 /* lock the vf pf channel */ 973 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 974 975 /* invoke the VF FLR SM */ 976 bnx2x_vf_flr(bp, vf); 977 978 /* mark the VF to be ACKED and continue */ 979 vf->flr_clnup_stage = false; 980 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 981 } 982 983 /* Acknowledge the handled VFs. 984 * we are acknowledge all the vfs which an flr was requested for, even 985 * if amongst them there are such that we never opened, since the mcp 986 * will interrupt us immediately again if we only ack some of the bits, 987 * resulting in an endless loop. This can happen for example in KVM 988 * where an 'all ones' flr request is sometimes given by hyper visor 989 */ 990 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n", 991 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 992 for (i = 0; i < FLRD_VFS_DWORDS; i++) 993 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 994 bp->vfdb->flrd_vfs[i]); 995 996 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); 997 998 /* clear the acked bits - better yet if the MCP implemented 999 * write to clear semantics 1000 */ 1001 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1002 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); 1003 } 1004 1005 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) 1006 { 1007 int i; 1008 1009 /* Read FLR'd VFs */ 1010 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1011 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); 1012 1013 DP(BNX2X_MSG_MCP, 1014 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n", 1015 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1016 1017 for_each_vf(bp, i) { 1018 struct bnx2x_virtf *vf = BP_VF(bp, i); 1019 u32 reset = 0; 1020 1021 if (vf->abs_vfid < 32) 1022 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); 1023 else 1024 reset = bp->vfdb->flrd_vfs[1] & 1025 (1 << (vf->abs_vfid - 32)); 1026 1027 if (reset) { 1028 /* set as reset and ready for cleanup */ 1029 vf->state = VF_RESET; 1030 vf->flr_clnup_stage = true; 1031 1032 DP(BNX2X_MSG_IOV, 1033 "Initiating Final cleanup for VF %d\n", 1034 vf->abs_vfid); 1035 } 1036 } 1037 1038 /* do the FLR cleanup for all marked VFs*/ 1039 bnx2x_vf_flr_clnup(bp); 1040 } 1041 1042 /* IOV global initialization routines */ 1043 void bnx2x_iov_init_dq(struct bnx2x *bp) 1044 { 1045 if (!IS_SRIOV(bp)) 1046 return; 1047 1048 /* Set the DQ such that the CID reflect the abs_vfid */ 1049 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); 1050 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 1051 1052 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to 1053 * the PF L2 queues 1054 */ 1055 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 1056 1057 /* The VF window size is the log2 of the max number of CIDs per VF */ 1058 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 1059 1060 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 1061 * the Pf doorbell size although the 2 are independent. 1062 */ 1063 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); 1064 1065 /* No security checks for now - 1066 * configure single rule (out of 16) mask = 0x1, value = 0x0, 1067 * CID range 0 - 0x1ffff 1068 */ 1069 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); 1070 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); 1071 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1072 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1073 1074 /* set the VF doorbell threshold */ 1075 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4); 1076 } 1077 1078 void bnx2x_iov_init_dmae(struct bnx2x *bp) 1079 { 1080 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) 1081 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 1082 } 1083 1084 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1085 { 1086 struct pci_dev *dev = bp->pdev; 1087 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1088 1089 return dev->bus->number + ((dev->devfn + iov->offset + 1090 iov->stride * vfid) >> 8); 1091 } 1092 1093 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) 1094 { 1095 struct pci_dev *dev = bp->pdev; 1096 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1097 1098 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; 1099 } 1100 1101 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) 1102 { 1103 int i, n; 1104 struct pci_dev *dev = bp->pdev; 1105 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1106 1107 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { 1108 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); 1109 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); 1110 1111 size /= iov->total; 1112 vf->bars[n].bar = start + size * vf->abs_vfid; 1113 vf->bars[n].size = size; 1114 } 1115 } 1116 1117 static int bnx2x_ari_enabled(struct pci_dev *dev) 1118 { 1119 return dev->bus->self && dev->bus->self->ari_enabled; 1120 } 1121 1122 static void 1123 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) 1124 { 1125 int sb_id; 1126 u32 val; 1127 u8 fid, current_pf = 0; 1128 1129 /* IGU in normal mode - read CAM */ 1130 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 1131 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); 1132 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 1133 continue; 1134 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1135 if (fid & IGU_FID_ENCODE_IS_PF) 1136 current_pf = fid & IGU_FID_PF_NUM_MASK; 1137 else if (current_pf == BP_FUNC(bp)) 1138 bnx2x_vf_set_igu_info(bp, sb_id, 1139 (fid & IGU_FID_VF_NUM_MASK)); 1140 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1141 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 1142 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 1143 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 1144 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1145 } 1146 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); 1147 } 1148 1149 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1150 { 1151 if (bp->vfdb) { 1152 kfree(bp->vfdb->vfqs); 1153 kfree(bp->vfdb->vfs); 1154 kfree(bp->vfdb); 1155 } 1156 bp->vfdb = NULL; 1157 } 1158 1159 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1160 { 1161 int pos; 1162 struct pci_dev *dev = bp->pdev; 1163 1164 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 1165 if (!pos) { 1166 BNX2X_ERR("failed to find SRIOV capability in device\n"); 1167 return -ENODEV; 1168 } 1169 1170 iov->pos = pos; 1171 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); 1172 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 1173 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); 1174 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); 1175 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 1176 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 1177 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 1178 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 1179 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 1180 1181 return 0; 1182 } 1183 1184 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1185 { 1186 u32 val; 1187 1188 /* read the SRIOV capability structure 1189 * The fields can be read via configuration read or 1190 * directly from the device (starting at offset PCICFG_OFFSET) 1191 */ 1192 if (bnx2x_sriov_pci_cfg_info(bp, iov)) 1193 return -ENODEV; 1194 1195 /* get the number of SRIOV bars */ 1196 iov->nres = 0; 1197 1198 /* read the first_vfid */ 1199 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); 1200 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) 1201 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); 1202 1203 DP(BNX2X_MSG_IOV, 1204 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 1205 BP_FUNC(bp), 1206 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, 1207 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 1208 1209 return 0; 1210 } 1211 1212 /* must be called after PF bars are mapped */ 1213 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1214 int num_vfs_param) 1215 { 1216 int err, i; 1217 struct bnx2x_sriov *iov; 1218 struct pci_dev *dev = bp->pdev; 1219 1220 bp->vfdb = NULL; 1221 1222 /* verify is pf */ 1223 if (IS_VF(bp)) 1224 return 0; 1225 1226 /* verify sriov capability is present in configuration space */ 1227 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) 1228 return 0; 1229 1230 /* verify chip revision */ 1231 if (CHIP_IS_E1x(bp)) 1232 return 0; 1233 1234 /* check if SRIOV support is turned off */ 1235 if (!num_vfs_param) 1236 return 0; 1237 1238 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ 1239 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { 1240 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", 1241 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); 1242 return 0; 1243 } 1244 1245 /* SRIOV can be enabled only with MSIX */ 1246 if (int_mode_param == BNX2X_INT_MODE_MSI || 1247 int_mode_param == BNX2X_INT_MODE_INTX) { 1248 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); 1249 return 0; 1250 } 1251 1252 err = -EIO; 1253 /* verify ari is enabled */ 1254 if (!bnx2x_ari_enabled(bp->pdev)) { 1255 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n"); 1256 return 0; 1257 } 1258 1259 /* verify igu is in normal mode */ 1260 if (CHIP_INT_MODE_IS_BC(bp)) { 1261 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 1262 return 0; 1263 } 1264 1265 /* allocate the vfs database */ 1266 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); 1267 if (!bp->vfdb) { 1268 BNX2X_ERR("failed to allocate vf database\n"); 1269 err = -ENOMEM; 1270 goto failed; 1271 } 1272 1273 /* get the sriov info - Linux already collected all the pertinent 1274 * information, however the sriov structure is for the private use 1275 * of the pci module. Also we want this information regardless 1276 * of the hyper-visor. 1277 */ 1278 iov = &(bp->vfdb->sriov); 1279 err = bnx2x_sriov_info(bp, iov); 1280 if (err) 1281 goto failed; 1282 1283 /* SR-IOV capability was enabled but there are no VFs*/ 1284 if (iov->total == 0) 1285 goto failed; 1286 1287 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); 1288 1289 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n", 1290 num_vfs_param, iov->nr_virtfn); 1291 1292 /* allocate the vf array */ 1293 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 1294 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); 1295 if (!bp->vfdb->vfs) { 1296 BNX2X_ERR("failed to allocate vf array\n"); 1297 err = -ENOMEM; 1298 goto failed; 1299 } 1300 1301 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ 1302 for_each_vf(bp, i) { 1303 bnx2x_vf(bp, i, index) = i; 1304 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; 1305 bnx2x_vf(bp, i, state) = VF_FREE; 1306 mutex_init(&bnx2x_vf(bp, i, op_mutex)); 1307 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; 1308 } 1309 1310 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 1311 bnx2x_get_vf_igu_cam_info(bp); 1312 1313 /* allocate the queue arrays for all VFs */ 1314 bp->vfdb->vfqs = kzalloc( 1315 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), 1316 GFP_KERNEL); 1317 1318 DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs); 1319 1320 if (!bp->vfdb->vfqs) { 1321 BNX2X_ERR("failed to allocate vf queue array\n"); 1322 err = -ENOMEM; 1323 goto failed; 1324 } 1325 1326 /* Prepare the VFs event synchronization mechanism */ 1327 mutex_init(&bp->vfdb->event_mutex); 1328 1329 return 0; 1330 failed: 1331 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 1332 __bnx2x_iov_free_vfdb(bp); 1333 return err; 1334 } 1335 1336 void bnx2x_iov_remove_one(struct bnx2x *bp) 1337 { 1338 int vf_idx; 1339 1340 /* if SRIOV is not enabled there's nothing to do */ 1341 if (!IS_SRIOV(bp)) 1342 return; 1343 1344 DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); 1345 pci_disable_sriov(bp->pdev); 1346 DP(BNX2X_MSG_IOV, "sriov disabled\n"); 1347 1348 /* disable access to all VFs */ 1349 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { 1350 bnx2x_pretend_func(bp, 1351 HW_VF_HANDLE(bp, 1352 bp->vfdb->sriov.first_vf_in_pf + 1353 vf_idx)); 1354 DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n", 1355 bp->vfdb->sriov.first_vf_in_pf + vf_idx); 1356 bnx2x_vf_enable_internal(bp, 0); 1357 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1358 } 1359 1360 /* free vf database */ 1361 __bnx2x_iov_free_vfdb(bp); 1362 } 1363 1364 void bnx2x_iov_free_mem(struct bnx2x *bp) 1365 { 1366 int i; 1367 1368 if (!IS_SRIOV(bp)) 1369 return; 1370 1371 /* free vfs hw contexts */ 1372 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1373 struct hw_dma *cxt = &bp->vfdb->context[i]; 1374 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); 1375 } 1376 1377 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, 1378 BP_VFDB(bp)->sp_dma.mapping, 1379 BP_VFDB(bp)->sp_dma.size); 1380 1381 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, 1382 BP_VF_MBX_DMA(bp)->mapping, 1383 BP_VF_MBX_DMA(bp)->size); 1384 1385 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, 1386 BP_VF_BULLETIN_DMA(bp)->mapping, 1387 BP_VF_BULLETIN_DMA(bp)->size); 1388 } 1389 1390 int bnx2x_iov_alloc_mem(struct bnx2x *bp) 1391 { 1392 size_t tot_size; 1393 int i, rc = 0; 1394 1395 if (!IS_SRIOV(bp)) 1396 return rc; 1397 1398 /* allocate vfs hw contexts */ 1399 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * 1400 BNX2X_CIDS_PER_VF * sizeof(union cdu_context); 1401 1402 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1403 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); 1404 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); 1405 1406 if (cxt->size) { 1407 cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size); 1408 if (!cxt->addr) 1409 goto alloc_mem_err; 1410 } else { 1411 cxt->addr = NULL; 1412 cxt->mapping = 0; 1413 } 1414 tot_size -= cxt->size; 1415 } 1416 1417 /* allocate vfs ramrods dma memory - client_init and set_mac */ 1418 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); 1419 BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping, 1420 tot_size); 1421 if (!BP_VFDB(bp)->sp_dma.addr) 1422 goto alloc_mem_err; 1423 BP_VFDB(bp)->sp_dma.size = tot_size; 1424 1425 /* allocate mailboxes */ 1426 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; 1427 BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping, 1428 tot_size); 1429 if (!BP_VF_MBX_DMA(bp)->addr) 1430 goto alloc_mem_err; 1431 1432 BP_VF_MBX_DMA(bp)->size = tot_size; 1433 1434 /* allocate local bulletin boards */ 1435 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; 1436 BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping, 1437 tot_size); 1438 if (!BP_VF_BULLETIN_DMA(bp)->addr) 1439 goto alloc_mem_err; 1440 1441 BP_VF_BULLETIN_DMA(bp)->size = tot_size; 1442 1443 return 0; 1444 1445 alloc_mem_err: 1446 return -ENOMEM; 1447 } 1448 1449 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 1450 struct bnx2x_vf_queue *q) 1451 { 1452 u8 cl_id = vfq_cl_id(vf, q); 1453 u8 func_id = FW_VF_HANDLE(vf->abs_vfid); 1454 unsigned long q_type = 0; 1455 1456 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 1457 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 1458 1459 /* Queue State object */ 1460 bnx2x_init_queue_obj(bp, &q->sp_obj, 1461 cl_id, &q->cid, 1, func_id, 1462 bnx2x_vf_sp(bp, vf, q_data), 1463 bnx2x_vf_sp_map(bp, vf, q_data), 1464 q_type); 1465 1466 /* sp indication is set only when vlan/mac/etc. are initialized */ 1467 q->sp_initialized = false; 1468 1469 DP(BNX2X_MSG_IOV, 1470 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", 1471 vf->abs_vfid, q->sp_obj.func_id, q->cid); 1472 } 1473 1474 /* called by bnx2x_nic_load */ 1475 int bnx2x_iov_nic_init(struct bnx2x *bp) 1476 { 1477 int vfid; 1478 1479 if (!IS_SRIOV(bp)) { 1480 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 1481 return 0; 1482 } 1483 1484 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); 1485 1486 /* let FLR complete ... */ 1487 msleep(100); 1488 1489 /* initialize vf database */ 1490 for_each_vf(bp, vfid) { 1491 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 1492 1493 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * 1494 BNX2X_CIDS_PER_VF; 1495 1496 union cdu_context *base_cxt = (union cdu_context *) 1497 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 1498 (base_vf_cid & (ILT_PAGE_CIDS-1)); 1499 1500 DP(BNX2X_MSG_IOV, 1501 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", 1502 vf->abs_vfid, vf_sb_count(vf), base_vf_cid, 1503 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 1504 1505 /* init statically provisioned resources */ 1506 bnx2x_iov_static_resc(bp, vf); 1507 1508 /* queues are initialized during VF-ACQUIRE */ 1509 vf->filter_state = 0; 1510 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 1511 1512 /* init mcast object - This object will be re-initialized 1513 * during VF-ACQUIRE with the proper cl_id and cid. 1514 * It needs to be initialized here so that it can be safely 1515 * handled by a subsequent FLR flow. 1516 */ 1517 vf->mcast_list_len = 0; 1518 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, 1519 0xFF, 0xFF, 0xFF, 1520 bnx2x_vf_sp(bp, vf, mcast_rdata), 1521 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 1522 BNX2X_FILTER_MCAST_PENDING, 1523 &vf->filter_state, 1524 BNX2X_OBJ_TYPE_RX_TX); 1525 1526 /* set the mailbox message addresses */ 1527 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) 1528 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * 1529 MBX_MSG_ALIGNED_SIZE); 1530 1531 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + 1532 vfid * MBX_MSG_ALIGNED_SIZE; 1533 1534 /* Enable vf mailbox */ 1535 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 1536 } 1537 1538 /* Final VF init */ 1539 for_each_vf(bp, vfid) { 1540 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 1541 1542 /* fill in the BDF and bars */ 1543 vf->bus = bnx2x_vf_bus(bp, vfid); 1544 vf->devfn = bnx2x_vf_devfn(bp, vfid); 1545 bnx2x_vf_set_bars(bp, vf); 1546 1547 DP(BNX2X_MSG_IOV, 1548 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", 1549 vf->abs_vfid, vf->bus, vf->devfn, 1550 (unsigned)vf->bars[0].bar, vf->bars[0].size, 1551 (unsigned)vf->bars[1].bar, vf->bars[1].size, 1552 (unsigned)vf->bars[2].bar, vf->bars[2].size); 1553 } 1554 1555 return 0; 1556 } 1557 1558 /* called by bnx2x_chip_cleanup */ 1559 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) 1560 { 1561 int i; 1562 1563 if (!IS_SRIOV(bp)) 1564 return 0; 1565 1566 /* release all the VFs */ 1567 for_each_vf(bp, i) 1568 bnx2x_vf_release(bp, BP_VF(bp, i)); 1569 1570 return 0; 1571 } 1572 1573 /* called by bnx2x_init_hw_func, returns the next ilt line */ 1574 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) 1575 { 1576 int i; 1577 struct bnx2x_ilt *ilt = BP_ILT(bp); 1578 1579 if (!IS_SRIOV(bp)) 1580 return line; 1581 1582 /* set vfs ilt lines */ 1583 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1584 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); 1585 1586 ilt->lines[line+i].page = hw_cxt->addr; 1587 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 1588 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 1589 } 1590 return line + i; 1591 } 1592 1593 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) 1594 { 1595 return ((cid >= BNX2X_FIRST_VF_CID) && 1596 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); 1597 } 1598 1599 static 1600 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, 1601 struct bnx2x_vf_queue *vfq, 1602 union event_ring_elem *elem) 1603 { 1604 unsigned long ramrod_flags = 0; 1605 int rc = 0; 1606 1607 /* Always push next commands out, don't wait here */ 1608 set_bit(RAMROD_CONT, &ramrod_flags); 1609 1610 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 1611 case BNX2X_FILTER_MAC_PENDING: 1612 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, 1613 &ramrod_flags); 1614 break; 1615 case BNX2X_FILTER_VLAN_PENDING: 1616 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, 1617 &ramrod_flags); 1618 break; 1619 default: 1620 BNX2X_ERR("Unsupported classification command: %d\n", 1621 elem->message.data.eth_event.echo); 1622 return; 1623 } 1624 if (rc < 0) 1625 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 1626 else if (rc > 0) 1627 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); 1628 } 1629 1630 static 1631 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, 1632 struct bnx2x_virtf *vf) 1633 { 1634 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 1635 int rc; 1636 1637 rparam.mcast_obj = &vf->mcast_obj; 1638 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); 1639 1640 /* If there are pending mcast commands - send them */ 1641 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { 1642 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 1643 if (rc < 0) 1644 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 1645 rc); 1646 } 1647 } 1648 1649 static 1650 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, 1651 struct bnx2x_virtf *vf) 1652 { 1653 smp_mb__before_clear_bit(); 1654 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 1655 smp_mb__after_clear_bit(); 1656 } 1657 1658 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, 1659 struct bnx2x_virtf *vf) 1660 { 1661 vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw); 1662 } 1663 1664 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) 1665 { 1666 struct bnx2x_virtf *vf; 1667 int qidx = 0, abs_vfid; 1668 u8 opcode; 1669 u16 cid = 0xffff; 1670 1671 if (!IS_SRIOV(bp)) 1672 return 1; 1673 1674 /* first get the cid - the only events we handle here are cfc-delete 1675 * and set-mac completion 1676 */ 1677 opcode = elem->message.opcode; 1678 1679 switch (opcode) { 1680 case EVENT_RING_OPCODE_CFC_DEL: 1681 cid = SW_CID((__force __le32) 1682 elem->message.data.cfc_del_event.cid); 1683 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); 1684 break; 1685 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 1686 case EVENT_RING_OPCODE_MULTICAST_RULES: 1687 case EVENT_RING_OPCODE_FILTERS_RULES: 1688 case EVENT_RING_OPCODE_RSS_UPDATE_RULES: 1689 cid = (elem->message.data.eth_event.echo & 1690 BNX2X_SWCID_MASK); 1691 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 1692 break; 1693 case EVENT_RING_OPCODE_VF_FLR: 1694 abs_vfid = elem->message.data.vf_flr_event.vf_id; 1695 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", 1696 abs_vfid); 1697 goto get_vf; 1698 case EVENT_RING_OPCODE_MALICIOUS_VF: 1699 abs_vfid = elem->message.data.malicious_vf_event.vf_id; 1700 BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n", 1701 abs_vfid, 1702 elem->message.data.malicious_vf_event.err_id); 1703 goto get_vf; 1704 default: 1705 return 1; 1706 } 1707 1708 /* check if the cid is the VF range */ 1709 if (!bnx2x_iov_is_vf_cid(bp, cid)) { 1710 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); 1711 return 1; 1712 } 1713 1714 /* extract vf and rxq index from vf_cid - relies on the following: 1715 * 1. vfid on cid reflects the true abs_vfid 1716 * 2. The max number of VFs (per path) is 64 1717 */ 1718 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); 1719 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 1720 get_vf: 1721 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 1722 1723 if (!vf) { 1724 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", 1725 cid, abs_vfid); 1726 return 0; 1727 } 1728 1729 switch (opcode) { 1730 case EVENT_RING_OPCODE_CFC_DEL: 1731 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", 1732 vf->abs_vfid, qidx); 1733 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, 1734 &vfq_get(vf, 1735 qidx)->sp_obj, 1736 BNX2X_Q_CMD_CFC_DEL); 1737 break; 1738 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 1739 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", 1740 vf->abs_vfid, qidx); 1741 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); 1742 break; 1743 case EVENT_RING_OPCODE_MULTICAST_RULES: 1744 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", 1745 vf->abs_vfid, qidx); 1746 bnx2x_vf_handle_mcast_eqe(bp, vf); 1747 break; 1748 case EVENT_RING_OPCODE_FILTERS_RULES: 1749 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", 1750 vf->abs_vfid, qidx); 1751 bnx2x_vf_handle_filters_eqe(bp, vf); 1752 break; 1753 case EVENT_RING_OPCODE_RSS_UPDATE_RULES: 1754 DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n", 1755 vf->abs_vfid, qidx); 1756 bnx2x_vf_handle_rss_update_eqe(bp, vf); 1757 case EVENT_RING_OPCODE_VF_FLR: 1758 case EVENT_RING_OPCODE_MALICIOUS_VF: 1759 /* Do nothing for now */ 1760 return 0; 1761 } 1762 1763 return 0; 1764 } 1765 1766 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) 1767 { 1768 /* extract the vf from vf_cid - relies on the following: 1769 * 1. vfid on cid reflects the true abs_vfid 1770 * 2. The max number of VFs (per path) is 64 1771 */ 1772 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 1773 return bnx2x_vf_by_abs_fid(bp, abs_vfid); 1774 } 1775 1776 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 1777 struct bnx2x_queue_sp_obj **q_obj) 1778 { 1779 struct bnx2x_virtf *vf; 1780 1781 if (!IS_SRIOV(bp)) 1782 return; 1783 1784 vf = bnx2x_vf_by_cid(bp, vf_cid); 1785 1786 if (vf) { 1787 /* extract queue index from vf_cid - relies on the following: 1788 * 1. vfid on cid reflects the true abs_vfid 1789 * 2. The max number of VFs (per path) is 64 1790 */ 1791 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); 1792 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); 1793 } else { 1794 BNX2X_ERR("No vf matching cid %d\n", vf_cid); 1795 } 1796 } 1797 1798 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) 1799 { 1800 int i; 1801 int first_queue_query_index, num_queues_req; 1802 dma_addr_t cur_data_offset; 1803 struct stats_query_entry *cur_query_entry; 1804 u8 stats_count = 0; 1805 bool is_fcoe = false; 1806 1807 if (!IS_SRIOV(bp)) 1808 return; 1809 1810 if (!NO_FCOE(bp)) 1811 is_fcoe = true; 1812 1813 /* fcoe adds one global request and one queue request */ 1814 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; 1815 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1816 (is_fcoe ? 0 : 1); 1817 1818 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), 1819 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", 1820 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, 1821 first_queue_query_index + num_queues_req); 1822 1823 cur_data_offset = bp->fw_stats_data_mapping + 1824 offsetof(struct bnx2x_fw_stats_data, queue_stats) + 1825 num_queues_req * sizeof(struct per_queue_stats); 1826 1827 cur_query_entry = &bp->fw_stats_req-> 1828 query[first_queue_query_index + num_queues_req]; 1829 1830 for_each_vf(bp, i) { 1831 int j; 1832 struct bnx2x_virtf *vf = BP_VF(bp, i); 1833 1834 if (vf->state != VF_ENABLED) { 1835 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), 1836 "vf %d not enabled so no stats for it\n", 1837 vf->abs_vfid); 1838 continue; 1839 } 1840 1841 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); 1842 for_each_vfq(vf, j) { 1843 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 1844 1845 dma_addr_t q_stats_addr = 1846 vf->fw_stat_map + j * vf->stats_stride; 1847 1848 /* collect stats fro active queues only */ 1849 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 1850 BNX2X_Q_LOGICAL_STATE_STOPPED) 1851 continue; 1852 1853 /* create stats query entry for this queue */ 1854 cur_query_entry->kind = STATS_TYPE_QUEUE; 1855 cur_query_entry->index = vfq_stat_id(vf, rxq); 1856 cur_query_entry->funcID = 1857 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 1858 cur_query_entry->address.hi = 1859 cpu_to_le32(U64_HI(q_stats_addr)); 1860 cur_query_entry->address.lo = 1861 cpu_to_le32(U64_LO(q_stats_addr)); 1862 DP(BNX2X_MSG_IOV, 1863 "added address %x %x for vf %d queue %d client %d\n", 1864 cur_query_entry->address.hi, 1865 cur_query_entry->address.lo, cur_query_entry->funcID, 1866 j, cur_query_entry->index); 1867 cur_query_entry++; 1868 cur_data_offset += sizeof(struct per_queue_stats); 1869 stats_count++; 1870 1871 /* all stats are coalesced to the leading queue */ 1872 if (vf->cfg_flags & VF_CFG_STATS_COALESCE) 1873 break; 1874 } 1875 } 1876 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 1877 } 1878 1879 static inline 1880 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) 1881 { 1882 int i; 1883 struct bnx2x_virtf *vf = NULL; 1884 1885 for_each_vf(bp, i) { 1886 vf = BP_VF(bp, i); 1887 if (stat_id >= vf->igu_base_id && 1888 stat_id < vf->igu_base_id + vf_sb_count(vf)) 1889 break; 1890 } 1891 return vf; 1892 } 1893 1894 /* VF API helpers */ 1895 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, 1896 u8 enable) 1897 { 1898 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; 1899 u32 val = enable ? (abs_vfid | (1 << 6)) : 0; 1900 1901 REG_WR(bp, reg, val); 1902 } 1903 1904 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) 1905 { 1906 int i; 1907 1908 for_each_vfq(vf, i) 1909 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 1910 vfq_qzone_id(vf, vfq_get(vf, i)), false); 1911 } 1912 1913 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) 1914 { 1915 u32 val; 1916 1917 /* clear the VF configuration - pretend */ 1918 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 1919 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 1920 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | 1921 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK); 1922 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 1923 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1924 } 1925 1926 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) 1927 { 1928 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), 1929 BNX2X_VF_MAX_QUEUES); 1930 } 1931 1932 static 1933 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, 1934 struct vf_pf_resc_request *req_resc) 1935 { 1936 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 1937 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 1938 1939 /* Save a vlan filter for the Hypervisor */ 1940 return ((req_resc->num_rxqs <= rxq_cnt) && 1941 (req_resc->num_txqs <= txq_cnt) && 1942 (req_resc->num_sbs <= vf_sb_count(vf)) && 1943 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 1944 (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf))); 1945 } 1946 1947 /* CORE VF API */ 1948 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 1949 struct vf_pf_resc_request *resc) 1950 { 1951 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * 1952 BNX2X_CIDS_PER_VF; 1953 1954 union cdu_context *base_cxt = (union cdu_context *) 1955 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 1956 (base_vf_cid & (ILT_PAGE_CIDS-1)); 1957 int i; 1958 1959 /* if state is 'acquired' the VF was not released or FLR'd, in 1960 * this case the returned resources match the acquired already 1961 * acquired resources. Verify that the requested numbers do 1962 * not exceed the already acquired numbers. 1963 */ 1964 if (vf->state == VF_ACQUIRED) { 1965 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", 1966 vf->abs_vfid); 1967 1968 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 1969 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", 1970 vf->abs_vfid); 1971 return -EINVAL; 1972 } 1973 return 0; 1974 } 1975 1976 /* Otherwise vf state must be 'free' or 'reset' */ 1977 if (vf->state != VF_FREE && vf->state != VF_RESET) { 1978 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", 1979 vf->abs_vfid, vf->state); 1980 return -EINVAL; 1981 } 1982 1983 /* static allocation: 1984 * the global maximum number are fixed per VF. Fail the request if 1985 * requested number exceed these globals 1986 */ 1987 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 1988 DP(BNX2X_MSG_IOV, 1989 "cannot fulfill vf resource request. Placing maximal available values in response\n"); 1990 /* set the max resource in the vf */ 1991 return -ENOMEM; 1992 } 1993 1994 /* Set resources counters - 0 request means max available */ 1995 vf_sb_count(vf) = resc->num_sbs; 1996 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 1997 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 1998 if (resc->num_mac_filters) 1999 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 2000 /* Add an additional vlan filter credit for the hypervisor */ 2001 bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1); 2002 2003 DP(BNX2X_MSG_IOV, 2004 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2005 vf_sb_count(vf), vf_rxq_count(vf), 2006 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2007 vf_vlan_rules_visible_cnt(vf)); 2008 2009 /* Initialize the queues */ 2010 if (!vf->vfqs) { 2011 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); 2012 return -EINVAL; 2013 } 2014 2015 for_each_vfq(vf, i) { 2016 struct bnx2x_vf_queue *q = vfq_get(vf, i); 2017 2018 if (!q) { 2019 BNX2X_ERR("q number %d was not allocated\n", i); 2020 return -EINVAL; 2021 } 2022 2023 q->index = i; 2024 q->cxt = &((base_cxt + i)->eth); 2025 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; 2026 2027 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", 2028 vf->abs_vfid, i, q->index, q->cid, q->cxt); 2029 2030 /* init SP objects */ 2031 bnx2x_vfq_init(bp, vf, q); 2032 } 2033 vf->state = VF_ACQUIRED; 2034 return 0; 2035 } 2036 2037 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) 2038 { 2039 struct bnx2x_func_init_params func_init = {0}; 2040 u16 flags = 0; 2041 int i; 2042 2043 /* the sb resources are initialized at this point, do the 2044 * FW/HW initializations 2045 */ 2046 for_each_vf_sb(vf, i) 2047 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, 2048 vf_igu_sb(vf, i), vf_igu_sb(vf, i)); 2049 2050 /* Sanity checks */ 2051 if (vf->state != VF_ACQUIRED) { 2052 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", 2053 vf->abs_vfid, vf->state); 2054 return -EINVAL; 2055 } 2056 2057 /* let FLR complete ... */ 2058 msleep(100); 2059 2060 /* FLR cleanup epilogue */ 2061 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) 2062 return -EBUSY; 2063 2064 /* reset IGU VF statistics: MSIX */ 2065 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); 2066 2067 /* vf init */ 2068 if (vf->cfg_flags & VF_CFG_STATS) 2069 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); 2070 2071 if (vf->cfg_flags & VF_CFG_TPA) 2072 flags |= FUNC_FLG_TPA; 2073 2074 if (is_vf_multi(vf)) 2075 flags |= FUNC_FLG_RSS; 2076 2077 /* function setup */ 2078 func_init.func_flgs = flags; 2079 func_init.pf_id = BP_FUNC(bp); 2080 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); 2081 func_init.fw_stat_map = vf->fw_stat_map; 2082 func_init.spq_map = vf->spq_map; 2083 func_init.spq_prod = 0; 2084 bnx2x_func_init(bp, &func_init); 2085 2086 /* Enable the vf */ 2087 bnx2x_vf_enable_access(bp, vf->abs_vfid); 2088 bnx2x_vf_enable_traffic(bp, vf); 2089 2090 /* queue protection table */ 2091 for_each_vfq(vf, i) 2092 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2093 vfq_qzone_id(vf, vfq_get(vf, i)), true); 2094 2095 vf->state = VF_ENABLED; 2096 2097 /* update vf bulletin board */ 2098 bnx2x_post_vf_bulletin(bp, vf->index); 2099 2100 return 0; 2101 } 2102 2103 struct set_vf_state_cookie { 2104 struct bnx2x_virtf *vf; 2105 u8 state; 2106 }; 2107 2108 static void bnx2x_set_vf_state(void *cookie) 2109 { 2110 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; 2111 2112 p->vf->state = p->state; 2113 } 2114 2115 int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) 2116 { 2117 int rc = 0, i; 2118 2119 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 2120 2121 /* Close all queues */ 2122 for (i = 0; i < vf_rxq_count(vf); i++) { 2123 rc = bnx2x_vf_queue_teardown(bp, vf, i); 2124 if (rc) 2125 goto op_err; 2126 } 2127 2128 /* disable the interrupts */ 2129 DP(BNX2X_MSG_IOV, "disabling igu\n"); 2130 bnx2x_vf_igu_disable(bp, vf); 2131 2132 /* disable the VF */ 2133 DP(BNX2X_MSG_IOV, "clearing qtbl\n"); 2134 bnx2x_vf_clr_qtbl(bp, vf); 2135 2136 /* need to make sure there are no outstanding stats ramrods which may 2137 * cause the device to access the VF's stats buffer which it will free 2138 * as soon as we return from the close flow. 2139 */ 2140 { 2141 struct set_vf_state_cookie cookie; 2142 2143 cookie.vf = vf; 2144 cookie.state = VF_ACQUIRED; 2145 bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); 2146 } 2147 2148 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2149 2150 return 0; 2151 op_err: 2152 BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc); 2153 return rc; 2154 } 2155 2156 /* VF release can be called either: 1. The VF was acquired but 2157 * not enabled 2. the vf was enabled or in the process of being 2158 * enabled 2159 */ 2160 int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf) 2161 { 2162 int rc; 2163 2164 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, 2165 vf->state == VF_FREE ? "Free" : 2166 vf->state == VF_ACQUIRED ? "Acquired" : 2167 vf->state == VF_ENABLED ? "Enabled" : 2168 vf->state == VF_RESET ? "Reset" : 2169 "Unknown"); 2170 2171 switch (vf->state) { 2172 case VF_ENABLED: 2173 rc = bnx2x_vf_close(bp, vf); 2174 if (rc) 2175 goto op_err; 2176 /* Fallthrough to release resources */ 2177 case VF_ACQUIRED: 2178 DP(BNX2X_MSG_IOV, "about to free resources\n"); 2179 bnx2x_vf_free_resc(bp, vf); 2180 break; 2181 2182 case VF_FREE: 2183 case VF_RESET: 2184 default: 2185 break; 2186 } 2187 return 0; 2188 op_err: 2189 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc); 2190 return rc; 2191 } 2192 2193 int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, 2194 struct bnx2x_config_rss_params *rss) 2195 { 2196 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 2197 set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags); 2198 return bnx2x_config_rss(bp, rss); 2199 } 2200 2201 int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, 2202 struct vfpf_tpa_tlv *tlv, 2203 struct bnx2x_queue_update_tpa_params *params) 2204 { 2205 aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr; 2206 struct bnx2x_queue_state_params qstate; 2207 int qid, rc = 0; 2208 2209 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 2210 2211 /* Set ramrod params */ 2212 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); 2213 memcpy(&qstate.params.update_tpa, params, 2214 sizeof(struct bnx2x_queue_update_tpa_params)); 2215 qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA; 2216 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); 2217 2218 for (qid = 0; qid < vf_rxq_count(vf); qid++) { 2219 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 2220 qstate.params.update_tpa.sge_map = sge_addr[qid]; 2221 DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n", 2222 vf->abs_vfid, qid, U64_HI(sge_addr[qid]), 2223 U64_LO(sge_addr[qid])); 2224 rc = bnx2x_queue_state_change(bp, &qstate); 2225 if (rc) { 2226 BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n", 2227 U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]), 2228 vf->abs_vfid, qid); 2229 return rc; 2230 } 2231 } 2232 2233 return rc; 2234 } 2235 2236 /* VF release ~ VF close + VF release-resources 2237 * Release is the ultimate SW shutdown and is called whenever an 2238 * irrecoverable error is encountered. 2239 */ 2240 int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) 2241 { 2242 int rc; 2243 2244 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); 2245 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 2246 2247 rc = bnx2x_vf_free(bp, vf); 2248 if (rc) 2249 WARN(rc, 2250 "VF[%d] Failed to allocate resources for release op- rc=%d\n", 2251 vf->abs_vfid, rc); 2252 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 2253 return rc; 2254 } 2255 2256 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, 2257 struct bnx2x_virtf *vf, u32 *sbdf) 2258 { 2259 *sbdf = vf->devfn | (vf->bus << 8); 2260 } 2261 2262 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 2263 enum channel_tlvs tlv) 2264 { 2265 /* we don't lock the channel for unsupported tlvs */ 2266 if (!bnx2x_tlv_supported(tlv)) { 2267 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n"); 2268 return; 2269 } 2270 2271 /* lock the channel */ 2272 mutex_lock(&vf->op_mutex); 2273 2274 /* record the locking op */ 2275 vf->op_current = tlv; 2276 2277 /* log the lock */ 2278 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", 2279 vf->abs_vfid, tlv); 2280 } 2281 2282 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 2283 enum channel_tlvs expected_tlv) 2284 { 2285 enum channel_tlvs current_tlv; 2286 2287 if (!vf) { 2288 BNX2X_ERR("VF was %p\n", vf); 2289 return; 2290 } 2291 2292 current_tlv = vf->op_current; 2293 2294 /* we don't unlock the channel for unsupported tlvs */ 2295 if (!bnx2x_tlv_supported(expected_tlv)) 2296 return; 2297 2298 WARN(expected_tlv != vf->op_current, 2299 "lock mismatch: expected %d found %d", expected_tlv, 2300 vf->op_current); 2301 2302 /* record the locking op */ 2303 vf->op_current = CHANNEL_TLV_NONE; 2304 2305 /* lock the channel */ 2306 mutex_unlock(&vf->op_mutex); 2307 2308 /* log the unlock */ 2309 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 2310 vf->abs_vfid, vf->op_current); 2311 } 2312 2313 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) 2314 { 2315 struct bnx2x_queue_state_params q_params; 2316 u32 prev_flags; 2317 int i, rc; 2318 2319 /* Verify changes are needed and record current Tx switching state */ 2320 prev_flags = bp->flags; 2321 if (enable) 2322 bp->flags |= TX_SWITCHING; 2323 else 2324 bp->flags &= ~TX_SWITCHING; 2325 if (prev_flags == bp->flags) 2326 return 0; 2327 2328 /* Verify state enables the sending of queue ramrods */ 2329 if ((bp->state != BNX2X_STATE_OPEN) || 2330 (bnx2x_get_q_logical_state(bp, 2331 &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) != 2332 BNX2X_Q_LOGICAL_STATE_ACTIVE)) 2333 return 0; 2334 2335 /* send q. update ramrod to configure Tx switching */ 2336 memset(&q_params, 0, sizeof(q_params)); 2337 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 2338 q_params.cmd = BNX2X_Q_CMD_UPDATE; 2339 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, 2340 &q_params.params.update.update_flags); 2341 if (enable) 2342 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING, 2343 &q_params.params.update.update_flags); 2344 else 2345 __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING, 2346 &q_params.params.update.update_flags); 2347 2348 /* send the ramrod on all the queues of the PF */ 2349 for_each_eth_queue(bp, i) { 2350 struct bnx2x_fastpath *fp = &bp->fp[i]; 2351 2352 /* Set the appropriate Queue object */ 2353 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 2354 2355 /* Update the Queue state */ 2356 rc = bnx2x_queue_state_change(bp, &q_params); 2357 if (rc) { 2358 BNX2X_ERR("Failed to configure Tx switching\n"); 2359 return rc; 2360 } 2361 } 2362 2363 DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled"); 2364 return 0; 2365 } 2366 2367 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) 2368 { 2369 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 2370 2371 if (!IS_SRIOV(bp)) { 2372 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n"); 2373 return -EINVAL; 2374 } 2375 2376 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 2377 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 2378 2379 /* HW channel is only operational when PF is up */ 2380 if (bp->state != BNX2X_STATE_OPEN) { 2381 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n"); 2382 return -EINVAL; 2383 } 2384 2385 /* we are always bound by the total_vfs in the configuration space */ 2386 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { 2387 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n", 2388 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 2389 num_vfs_param = BNX2X_NR_VIRTFN(bp); 2390 } 2391 2392 bp->requested_nr_virtfn = num_vfs_param; 2393 if (num_vfs_param == 0) { 2394 bnx2x_set_pf_tx_switching(bp, false); 2395 pci_disable_sriov(dev); 2396 return 0; 2397 } else { 2398 return bnx2x_enable_sriov(bp); 2399 } 2400 } 2401 2402 #define IGU_ENTRY_SIZE 4 2403 2404 int bnx2x_enable_sriov(struct bnx2x *bp) 2405 { 2406 int rc = 0, req_vfs = bp->requested_nr_virtfn; 2407 int vf_idx, sb_idx, vfq_idx, qcount, first_vf; 2408 u32 igu_entry, address; 2409 u16 num_vf_queues; 2410 2411 if (req_vfs == 0) 2412 return 0; 2413 2414 first_vf = bp->vfdb->sriov.first_vf_in_pf; 2415 2416 /* statically distribute vf sb pool between VFs */ 2417 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES, 2418 BP_VFDB(bp)->vf_sbs_pool / req_vfs); 2419 2420 /* zero previous values learned from igu cam */ 2421 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) { 2422 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 2423 2424 vf->sb_count = 0; 2425 vf_sb_count(BP_VF(bp, vf_idx)) = 0; 2426 } 2427 bp->vfdb->vf_sbs_pool = 0; 2428 2429 /* prepare IGU cam */ 2430 sb_idx = BP_VFDB(bp)->first_vf_igu_entry; 2431 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE; 2432 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 2433 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) { 2434 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT | 2435 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT | 2436 IGU_REG_MAPPING_MEMORY_VALID; 2437 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n", 2438 sb_idx, vf_idx); 2439 REG_WR(bp, address, igu_entry); 2440 sb_idx++; 2441 address += IGU_ENTRY_SIZE; 2442 } 2443 } 2444 2445 /* Reinitialize vf database according to igu cam */ 2446 bnx2x_get_vf_igu_cam_info(bp); 2447 2448 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n", 2449 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); 2450 2451 qcount = 0; 2452 for_each_vf(bp, vf_idx) { 2453 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 2454 2455 /* set local queue arrays */ 2456 vf->vfqs = &bp->vfdb->vfqs[qcount]; 2457 qcount += vf_sb_count(vf); 2458 bnx2x_iov_static_resc(bp, vf); 2459 } 2460 2461 /* prepare msix vectors in VF configuration space - the value in the 2462 * PCI configuration space should be the index of the last entry, 2463 * namely one less than the actual size of the table 2464 */ 2465 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 2466 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 2467 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 2468 num_vf_queues - 1); 2469 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", 2470 vf_idx, num_vf_queues - 1); 2471 } 2472 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2473 2474 /* enable sriov. This will probe all the VFs, and consequentially cause 2475 * the "acquire" messages to appear on the VF PF channel. 2476 */ 2477 DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); 2478 bnx2x_disable_sriov(bp); 2479 2480 rc = bnx2x_set_pf_tx_switching(bp, true); 2481 if (rc) 2482 return rc; 2483 2484 rc = pci_enable_sriov(bp->pdev, req_vfs); 2485 if (rc) { 2486 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 2487 return rc; 2488 } 2489 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); 2490 return req_vfs; 2491 } 2492 2493 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) 2494 { 2495 int vfidx; 2496 struct pf_vf_bulletin_content *bulletin; 2497 2498 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); 2499 for_each_vf(bp, vfidx) { 2500 bulletin = BP_VF_BULLETIN(bp, vfidx); 2501 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) 2502 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); 2503 } 2504 } 2505 2506 void bnx2x_disable_sriov(struct bnx2x *bp) 2507 { 2508 pci_disable_sriov(bp->pdev); 2509 } 2510 2511 static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, 2512 struct bnx2x_virtf **vf, 2513 struct pf_vf_bulletin_content **bulletin) 2514 { 2515 if (bp->state != BNX2X_STATE_OPEN) { 2516 BNX2X_ERR("vf ndo called though PF is down\n"); 2517 return -EINVAL; 2518 } 2519 2520 if (!IS_SRIOV(bp)) { 2521 BNX2X_ERR("vf ndo called though sriov is disabled\n"); 2522 return -EINVAL; 2523 } 2524 2525 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { 2526 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n", 2527 vfidx, BNX2X_NR_VIRTFN(bp)); 2528 return -EINVAL; 2529 } 2530 2531 /* init members */ 2532 *vf = BP_VF(bp, vfidx); 2533 *bulletin = BP_VF_BULLETIN(bp, vfidx); 2534 2535 if (!*vf) { 2536 BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n", 2537 vfidx); 2538 return -EINVAL; 2539 } 2540 2541 if (!(*vf)->vfqs) { 2542 BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n", 2543 vfidx); 2544 return -EINVAL; 2545 } 2546 2547 if (!*bulletin) { 2548 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n", 2549 vfidx); 2550 return -EINVAL; 2551 } 2552 2553 return 0; 2554 } 2555 2556 int bnx2x_get_vf_config(struct net_device *dev, int vfidx, 2557 struct ifla_vf_info *ivi) 2558 { 2559 struct bnx2x *bp = netdev_priv(dev); 2560 struct bnx2x_virtf *vf = NULL; 2561 struct pf_vf_bulletin_content *bulletin = NULL; 2562 struct bnx2x_vlan_mac_obj *mac_obj; 2563 struct bnx2x_vlan_mac_obj *vlan_obj; 2564 int rc; 2565 2566 /* sanity and init */ 2567 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 2568 if (rc) 2569 return rc; 2570 mac_obj = &bnx2x_leading_vfq(vf, mac_obj); 2571 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 2572 if (!mac_obj || !vlan_obj) { 2573 BNX2X_ERR("VF partially initialized\n"); 2574 return -EINVAL; 2575 } 2576 2577 ivi->vf = vfidx; 2578 ivi->qos = 0; 2579 ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */ 2580 ivi->min_tx_rate = 0; 2581 ivi->spoofchk = 1; /*always enabled */ 2582 if (vf->state == VF_ENABLED) { 2583 /* mac and vlan are in vlan_mac objects */ 2584 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) { 2585 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 2586 0, ETH_ALEN); 2587 vlan_obj->get_n_elements(bp, vlan_obj, 1, 2588 (u8 *)&ivi->vlan, 0, 2589 VLAN_HLEN); 2590 } 2591 } else { 2592 /* mac */ 2593 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 2594 /* mac configured by ndo so its in bulletin board */ 2595 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); 2596 else 2597 /* function has not been loaded yet. Show mac as 0s */ 2598 memset(&ivi->mac, 0, ETH_ALEN); 2599 2600 /* vlan */ 2601 if (bulletin->valid_bitmap & (1 << VLAN_VALID)) 2602 /* vlan configured by ndo so its in bulletin board */ 2603 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); 2604 else 2605 /* function has not been loaded yet. Show vlans as 0s */ 2606 memset(&ivi->vlan, 0, VLAN_HLEN); 2607 } 2608 2609 return 0; 2610 } 2611 2612 /* New mac for VF. Consider these cases: 2613 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and 2614 * supply at acquire. 2615 * 2. VF has already been acquired but has not yet initialized - store in local 2616 * bulletin board. mac will be posted on VF bulletin board after VF init. VF 2617 * will configure this mac when it is ready. 2618 * 3. VF has already initialized but has not yet setup a queue - post the new 2619 * mac on VF's bulletin board right now. VF will configure this mac when it 2620 * is ready. 2621 * 4. VF has already set a queue - delete any macs already configured for this 2622 * queue and manually config the new mac. 2623 * In any event, once this function has been called refuse any attempts by the 2624 * VF to configure any mac for itself except for this mac. In case of a race 2625 * where the VF fails to see the new post on its bulletin board before sending a 2626 * mac configuration request, the PF will simply fail the request and VF can try 2627 * again after consulting its bulletin board. 2628 */ 2629 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) 2630 { 2631 struct bnx2x *bp = netdev_priv(dev); 2632 int rc, q_logical_state; 2633 struct bnx2x_virtf *vf = NULL; 2634 struct pf_vf_bulletin_content *bulletin = NULL; 2635 2636 /* sanity and init */ 2637 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 2638 if (rc) 2639 return rc; 2640 if (!is_valid_ether_addr(mac)) { 2641 BNX2X_ERR("mac address invalid\n"); 2642 return -EINVAL; 2643 } 2644 2645 /* update PF's copy of the VF's bulletin. Will no longer accept mac 2646 * configuration requests from vf unless match this mac 2647 */ 2648 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; 2649 memcpy(bulletin->mac, mac, ETH_ALEN); 2650 2651 /* Post update on VF's bulletin board */ 2652 rc = bnx2x_post_vf_bulletin(bp, vfidx); 2653 if (rc) { 2654 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); 2655 return rc; 2656 } 2657 2658 q_logical_state = 2659 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); 2660 if (vf->state == VF_ENABLED && 2661 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 2662 /* configure the mac in device on this vf's queue */ 2663 unsigned long ramrod_flags = 0; 2664 struct bnx2x_vlan_mac_obj *mac_obj; 2665 2666 /* User should be able to see failure reason in system logs */ 2667 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 2668 return -EINVAL; 2669 2670 /* must lock vfpf channel to protect against vf flows */ 2671 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 2672 2673 /* remove existing eth macs */ 2674 mac_obj = &bnx2x_leading_vfq(vf, mac_obj); 2675 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 2676 if (rc) { 2677 BNX2X_ERR("failed to delete eth macs\n"); 2678 rc = -EINVAL; 2679 goto out; 2680 } 2681 2682 /* remove existing uc list macs */ 2683 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); 2684 if (rc) { 2685 BNX2X_ERR("failed to delete uc_list macs\n"); 2686 rc = -EINVAL; 2687 goto out; 2688 } 2689 2690 /* configure the new mac to device */ 2691 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2692 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 2693 BNX2X_ETH_MAC, &ramrod_flags); 2694 2695 out: 2696 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 2697 } 2698 2699 return rc; 2700 } 2701 2702 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 2703 { 2704 struct bnx2x_queue_state_params q_params = {NULL}; 2705 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 2706 struct bnx2x_queue_update_params *update_params; 2707 struct pf_vf_bulletin_content *bulletin = NULL; 2708 struct bnx2x_rx_mode_ramrod_params rx_ramrod; 2709 struct bnx2x *bp = netdev_priv(dev); 2710 struct bnx2x_vlan_mac_obj *vlan_obj; 2711 unsigned long vlan_mac_flags = 0; 2712 unsigned long ramrod_flags = 0; 2713 struct bnx2x_virtf *vf = NULL; 2714 unsigned long accept_flags; 2715 int rc; 2716 2717 /* sanity and init */ 2718 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin); 2719 if (rc) 2720 return rc; 2721 2722 if (vlan > 4095) { 2723 BNX2X_ERR("illegal vlan value %d\n", vlan); 2724 return -EINVAL; 2725 } 2726 2727 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", 2728 vfidx, vlan, 0); 2729 2730 /* update PF's copy of the VF's bulletin. No point in posting the vlan 2731 * to the VF since it doesn't have anything to do with it. But it useful 2732 * to store it here in case the VF is not up yet and we can only 2733 * configure the vlan later when it does. Treat vlan id 0 as remove the 2734 * Host tag. 2735 */ 2736 if (vlan > 0) 2737 bulletin->valid_bitmap |= 1 << VLAN_VALID; 2738 else 2739 bulletin->valid_bitmap &= ~(1 << VLAN_VALID); 2740 bulletin->vlan = vlan; 2741 2742 /* is vf initialized and queue set up? */ 2743 if (vf->state != VF_ENABLED || 2744 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != 2745 BNX2X_Q_LOGICAL_STATE_ACTIVE) 2746 return rc; 2747 2748 /* User should be able to see error in system logs */ 2749 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 2750 return -EINVAL; 2751 2752 /* must lock vfpf channel to protect against vf flows */ 2753 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 2754 2755 /* remove existing vlans */ 2756 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2757 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 2758 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 2759 &ramrod_flags); 2760 if (rc) { 2761 BNX2X_ERR("failed to delete vlans\n"); 2762 rc = -EINVAL; 2763 goto out; 2764 } 2765 2766 /* need to remove/add the VF's accept_any_vlan bit */ 2767 accept_flags = bnx2x_leading_vfq(vf, accept_flags); 2768 if (vlan) 2769 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 2770 else 2771 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 2772 2773 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, 2774 accept_flags); 2775 bnx2x_leading_vfq(vf, accept_flags) = accept_flags; 2776 bnx2x_config_rx_mode(bp, &rx_ramrod); 2777 2778 /* configure the new vlan to device */ 2779 memset(&ramrod_param, 0, sizeof(ramrod_param)); 2780 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2781 ramrod_param.vlan_mac_obj = vlan_obj; 2782 ramrod_param.ramrod_flags = ramrod_flags; 2783 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 2784 &ramrod_param.user_req.vlan_mac_flags); 2785 ramrod_param.user_req.u.vlan.vlan = vlan; 2786 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 2787 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 2788 if (rc) { 2789 BNX2X_ERR("failed to configure vlan\n"); 2790 rc = -EINVAL; 2791 goto out; 2792 } 2793 2794 /* send queue update ramrod to configure default vlan and silent 2795 * vlan removal 2796 */ 2797 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 2798 q_params.cmd = BNX2X_Q_CMD_UPDATE; 2799 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); 2800 update_params = &q_params.params.update; 2801 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 2802 &update_params->update_flags); 2803 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 2804 &update_params->update_flags); 2805 if (vlan == 0) { 2806 /* if vlan is 0 then we want to leave the VF traffic 2807 * untagged, and leave the incoming traffic untouched 2808 * (i.e. do not remove any vlan tags). 2809 */ 2810 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 2811 &update_params->update_flags); 2812 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 2813 &update_params->update_flags); 2814 } else { 2815 /* configure default vlan to vf queue and set silent 2816 * vlan removal (the vf remains unaware of this vlan). 2817 */ 2818 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 2819 &update_params->update_flags); 2820 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 2821 &update_params->update_flags); 2822 update_params->def_vlan = vlan; 2823 update_params->silent_removal_value = 2824 vlan & VLAN_VID_MASK; 2825 update_params->silent_removal_mask = VLAN_VID_MASK; 2826 } 2827 2828 /* Update the Queue state */ 2829 rc = bnx2x_queue_state_change(bp, &q_params); 2830 if (rc) { 2831 BNX2X_ERR("Failed to configure default VLAN\n"); 2832 goto out; 2833 } 2834 2835 2836 /* clear the flag indicating that this VF needs its vlan 2837 * (will only be set if the HV configured the Vlan before vf was 2838 * up and we were called because the VF came up later 2839 */ 2840 out: 2841 vf->cfg_flags &= ~VF_CFG_VLAN; 2842 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 2843 2844 return rc; 2845 } 2846 2847 /* crc is the first field in the bulletin board. Compute the crc over the 2848 * entire bulletin board excluding the crc field itself. Use the length field 2849 * as the Bulletin Board was posted by a PF with possibly a different version 2850 * from the vf which will sample it. Therefore, the length is computed by the 2851 * PF and the used blindly by the VF. 2852 */ 2853 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, 2854 struct pf_vf_bulletin_content *bulletin) 2855 { 2856 return crc32(BULLETIN_CRC_SEED, 2857 ((u8 *)bulletin) + sizeof(bulletin->crc), 2858 bulletin->length - sizeof(bulletin->crc)); 2859 } 2860 2861 /* Check for new posts on the bulletin board */ 2862 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) 2863 { 2864 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content; 2865 int attempts; 2866 2867 /* bulletin board hasn't changed since last sample */ 2868 if (bp->old_bulletin.version == bulletin.version) 2869 return PFVF_BULLETIN_UNCHANGED; 2870 2871 /* validate crc of new bulletin board */ 2872 if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) { 2873 /* sampling structure in mid post may result with corrupted data 2874 * validate crc to ensure coherency. 2875 */ 2876 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { 2877 bulletin = bp->pf2vf_bulletin->content; 2878 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp, 2879 &bulletin)) 2880 break; 2881 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", 2882 bulletin.crc, 2883 bnx2x_crc_vf_bulletin(bp, &bulletin)); 2884 } 2885 if (attempts >= BULLETIN_ATTEMPTS) { 2886 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", 2887 attempts); 2888 return PFVF_BULLETIN_CRC_ERR; 2889 } 2890 } 2891 2892 /* the mac address in bulletin board is valid and is new */ 2893 if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID && 2894 !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) { 2895 /* update new mac to net device */ 2896 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); 2897 } 2898 2899 /* the vlan in bulletin board is valid and is new */ 2900 if (bulletin.valid_bitmap & 1 << VLAN_VALID) 2901 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN); 2902 2903 /* copy new bulletin board to bp */ 2904 bp->old_bulletin = bulletin; 2905 2906 return PFVF_BULLETIN_UPDATED; 2907 } 2908 2909 void bnx2x_timer_sriov(struct bnx2x *bp) 2910 { 2911 bnx2x_sample_bulletin(bp); 2912 2913 /* if channel is down we need to self destruct */ 2914 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) 2915 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 2916 BNX2X_MSG_IOV); 2917 } 2918 2919 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 2920 { 2921 /* vf doorbells are embedded within the regview */ 2922 return bp->regview + PXP_VF_ADDR_DB_START; 2923 } 2924 2925 void bnx2x_vf_pci_dealloc(struct bnx2x *bp) 2926 { 2927 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 2928 sizeof(struct bnx2x_vf_mbx_msg)); 2929 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, 2930 sizeof(union pf_vf_bulletin)); 2931 } 2932 2933 int bnx2x_vf_pci_alloc(struct bnx2x *bp) 2934 { 2935 mutex_init(&bp->vf2pf_mutex); 2936 2937 /* allocate vf2pf mailbox for vf to pf channel */ 2938 bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping, 2939 sizeof(struct bnx2x_vf_mbx_msg)); 2940 if (!bp->vf2pf_mbox) 2941 goto alloc_mem_err; 2942 2943 /* allocate pf 2 vf bulletin board */ 2944 bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping, 2945 sizeof(union pf_vf_bulletin)); 2946 if (!bp->pf2vf_bulletin) 2947 goto alloc_mem_err; 2948 2949 return 0; 2950 2951 alloc_mem_err: 2952 bnx2x_vf_pci_dealloc(bp); 2953 return -ENOMEM; 2954 } 2955 2956 void bnx2x_iov_channel_down(struct bnx2x *bp) 2957 { 2958 int vf_idx; 2959 struct pf_vf_bulletin_content *bulletin; 2960 2961 if (!IS_SRIOV(bp)) 2962 return; 2963 2964 for_each_vf(bp, vf_idx) { 2965 /* locate this VFs bulletin board and update the channel down 2966 * bit 2967 */ 2968 bulletin = BP_VF_BULLETIN(bp, vf_idx); 2969 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN; 2970 2971 /* update vf bulletin board */ 2972 bnx2x_post_vf_bulletin(bp, vf_idx); 2973 } 2974 } 2975 2976 void bnx2x_iov_task(struct work_struct *work) 2977 { 2978 struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work); 2979 2980 if (!netif_running(bp->dev)) 2981 return; 2982 2983 if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR, 2984 &bp->iov_task_state)) 2985 bnx2x_vf_handle_flr_event(bp); 2986 2987 if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG, 2988 &bp->iov_task_state)) 2989 bnx2x_vf_mbx(bp); 2990 } 2991 2992 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) 2993 { 2994 smp_mb__before_clear_bit(); 2995 set_bit(flag, &bp->iov_task_state); 2996 smp_mb__after_clear_bit(); 2997 DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); 2998 queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); 2999 } 3000