1 /* bnx2x_sriov.c: Broadcom Everest network driver. 2 * 3 * Copyright 2009-2013 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Ariel Elior <ariel.elior@qlogic.com> 16 * Written by: Shmulik Ravid 17 * Ariel Elior <ariel.elior@qlogic.com> 18 * 19 */ 20 #include "bnx2x.h" 21 #include "bnx2x_init.h" 22 #include "bnx2x_cmn.h" 23 #include "bnx2x_sp.h" 24 #include <linux/crc32.h> 25 #include <linux/if_vlan.h> 26 27 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx, 28 struct bnx2x_virtf **vf, 29 struct pf_vf_bulletin_content **bulletin, 30 bool test_queue); 31 32 /* General service functions */ 33 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 34 u16 pf_id) 35 { 36 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 37 pf_id); 38 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 39 pf_id); 40 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 41 pf_id); 42 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 43 pf_id); 44 } 45 46 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 47 u8 enable) 48 { 49 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 50 enable); 51 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 52 enable); 53 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 54 enable); 55 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 56 enable); 57 } 58 59 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 60 { 61 int idx; 62 63 for_each_vf(bp, idx) 64 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) 65 break; 66 return idx; 67 } 68 69 static 70 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 71 { 72 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); 73 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; 74 } 75 76 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, 77 u8 igu_sb_id, u8 segment, u16 index, u8 op, 78 u8 update) 79 { 80 /* acking a VF sb through the PF - use the GRC */ 81 u32 ctl; 82 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 83 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 84 u32 func_encode = vf->abs_vfid; 85 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; 86 struct igu_regular cmd_data = {0}; 87 88 cmd_data.sb_id_and_flags = 89 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 90 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 91 (update << IGU_REGULAR_BUPDATE_SHIFT) | 92 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 93 94 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 95 func_encode << IGU_CTRL_REG_FID_SHIFT | 96 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 97 98 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 99 cmd_data.sb_id_and_flags, igu_addr_data); 100 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); 101 mmiowb(); 102 barrier(); 103 104 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 105 ctl, igu_addr_ctl); 106 REG_WR(bp, igu_addr_ctl, ctl); 107 mmiowb(); 108 barrier(); 109 } 110 111 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp, 112 struct bnx2x_virtf *vf, 113 bool print_err) 114 { 115 if (!bnx2x_leading_vfq(vf, sp_initialized)) { 116 if (print_err) 117 BNX2X_ERR("Slowpath objects not yet initialized!\n"); 118 else 119 DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n"); 120 return false; 121 } 122 return true; 123 } 124 125 /* VFOP operations states */ 126 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 127 struct bnx2x_queue_init_params *init_params, 128 struct bnx2x_queue_setup_params *setup_params, 129 u16 q_idx, u16 sb_idx) 130 { 131 DP(BNX2X_MSG_IOV, 132 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", 133 vf->abs_vfid, 134 q_idx, 135 sb_idx, 136 init_params->tx.sb_cq_index, 137 init_params->tx.hc_rate, 138 setup_params->flags, 139 setup_params->txq_params.traffic_type); 140 } 141 142 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 143 struct bnx2x_queue_init_params *init_params, 144 struct bnx2x_queue_setup_params *setup_params, 145 u16 q_idx, u16 sb_idx) 146 { 147 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; 148 149 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" 150 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", 151 vf->abs_vfid, 152 q_idx, 153 sb_idx, 154 init_params->rx.sb_cq_index, 155 init_params->rx.hc_rate, 156 setup_params->gen_params.mtu, 157 rxq_params->buf_sz, 158 rxq_params->sge_buf_sz, 159 rxq_params->max_sges_pkt, 160 rxq_params->tpa_agg_sz, 161 setup_params->flags, 162 rxq_params->drop_flags, 163 rxq_params->cache_line_log); 164 } 165 166 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 167 struct bnx2x_virtf *vf, 168 struct bnx2x_vf_queue *q, 169 struct bnx2x_vf_queue_construct_params *p, 170 unsigned long q_type) 171 { 172 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; 173 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; 174 175 /* INIT */ 176 177 /* Enable host coalescing in the transition to INIT state */ 178 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) 179 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); 180 181 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) 182 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); 183 184 /* FW SB ID */ 185 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 186 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 187 188 /* context */ 189 init_p->cxts[0] = q->cxt; 190 191 /* SETUP */ 192 193 /* Setup-op general parameters */ 194 setup_p->gen_params.spcl_id = vf->sp_cl_id; 195 setup_p->gen_params.stat_id = vfq_stat_id(vf, q); 196 197 /* Setup-op pause params: 198 * Nothing to do, the pause thresholds are set by default to 0 which 199 * effectively turns off the feature for this queue. We don't want 200 * one queue (VF) to interfering with another queue (another VF) 201 */ 202 if (vf->cfg_flags & VF_CFG_FW_FC) 203 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", 204 vf->abs_vfid); 205 /* Setup-op flags: 206 * collect statistics, zero statistics, local-switching, security, 207 * OV for Flex10, RSS and MCAST for leading 208 */ 209 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) 210 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); 211 212 /* for VFs, enable tx switching, bd coherency, and mac address 213 * anti-spoofing 214 */ 215 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); 216 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 217 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 218 219 /* Setup-op rx parameters */ 220 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 221 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 222 223 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); 224 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); 225 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); 226 227 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) 228 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; 229 } 230 231 /* Setup-op tx parameters */ 232 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { 233 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; 234 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 235 } 236 } 237 238 static int bnx2x_vf_queue_create(struct bnx2x *bp, 239 struct bnx2x_virtf *vf, int qid, 240 struct bnx2x_vf_queue_construct_params *qctor) 241 { 242 struct bnx2x_queue_state_params *q_params; 243 int rc = 0; 244 245 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 246 247 /* Prepare ramrod information */ 248 q_params = &qctor->qstate; 249 q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 250 set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags); 251 252 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 253 BNX2X_Q_LOGICAL_STATE_ACTIVE) { 254 DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n"); 255 goto out; 256 } 257 258 /* Run Queue 'construction' ramrods */ 259 q_params->cmd = BNX2X_Q_CMD_INIT; 260 rc = bnx2x_queue_state_change(bp, q_params); 261 if (rc) 262 goto out; 263 264 memcpy(&q_params->params.setup, &qctor->prep_qsetup, 265 sizeof(struct bnx2x_queue_setup_params)); 266 q_params->cmd = BNX2X_Q_CMD_SETUP; 267 rc = bnx2x_queue_state_change(bp, q_params); 268 if (rc) 269 goto out; 270 271 /* enable interrupts */ 272 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)), 273 USTORM_ID, 0, IGU_INT_ENABLE, 0); 274 out: 275 return rc; 276 } 277 278 static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf, 279 int qid) 280 { 281 enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT, 282 BNX2X_Q_CMD_TERMINATE, 283 BNX2X_Q_CMD_CFC_DEL}; 284 struct bnx2x_queue_state_params q_params; 285 int rc, i; 286 287 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 288 289 /* Prepare ramrod information */ 290 memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params)); 291 q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 292 set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 293 294 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) == 295 BNX2X_Q_LOGICAL_STATE_STOPPED) { 296 DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n"); 297 goto out; 298 } 299 300 /* Run Queue 'destruction' ramrods */ 301 for (i = 0; i < ARRAY_SIZE(cmds); i++) { 302 q_params.cmd = cmds[i]; 303 rc = bnx2x_queue_state_change(bp, &q_params); 304 if (rc) { 305 BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]); 306 return rc; 307 } 308 } 309 out: 310 /* Clean Context */ 311 if (bnx2x_vfq(vf, qid, cxt)) { 312 bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0; 313 bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0; 314 } 315 316 return 0; 317 } 318 319 static void 320 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) 321 { 322 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 323 if (vf) { 324 /* the first igu entry belonging to VFs of this PF */ 325 if (!BP_VFDB(bp)->first_vf_igu_entry) 326 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; 327 328 /* the first igu entry belonging to this VF */ 329 if (!vf_sb_count(vf)) 330 vf->igu_base_id = igu_sb_id; 331 332 ++vf_sb_count(vf); 333 ++vf->sb_count; 334 } 335 BP_VFDB(bp)->vf_sbs_pool++; 336 } 337 338 static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp, 339 struct bnx2x_vlan_mac_obj *obj, 340 atomic_t *counter) 341 { 342 struct list_head *pos; 343 int read_lock; 344 int cnt = 0; 345 346 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); 347 if (read_lock) 348 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); 349 350 list_for_each(pos, &obj->head) 351 cnt++; 352 353 if (!read_lock) 354 bnx2x_vlan_mac_h_read_unlock(bp, obj); 355 356 atomic_set(counter, cnt); 357 } 358 359 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, 360 int qid, bool drv_only, bool mac) 361 { 362 struct bnx2x_vlan_mac_ramrod_params ramrod; 363 int rc; 364 365 DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid, 366 mac ? "MACs" : "VLANs"); 367 368 /* Prepare ramrod params */ 369 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); 370 if (mac) { 371 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); 372 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 373 } else { 374 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 375 &ramrod.user_req.vlan_mac_flags); 376 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 377 } 378 ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL; 379 380 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); 381 if (drv_only) 382 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); 383 else 384 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); 385 386 /* Start deleting */ 387 rc = ramrod.vlan_mac_obj->delete_all(bp, 388 ramrod.vlan_mac_obj, 389 &ramrod.user_req.vlan_mac_flags, 390 &ramrod.ramrod_flags); 391 if (rc) { 392 BNX2X_ERR("Failed to delete all %s\n", 393 mac ? "MACs" : "VLANs"); 394 return rc; 395 } 396 397 /* Clear the vlan counters */ 398 if (!mac) 399 atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0); 400 401 return 0; 402 } 403 404 static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, 405 struct bnx2x_virtf *vf, int qid, 406 struct bnx2x_vf_mac_vlan_filter *filter, 407 bool drv_only) 408 { 409 struct bnx2x_vlan_mac_ramrod_params ramrod; 410 int rc; 411 412 DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n", 413 vf->abs_vfid, filter->add ? "Adding" : "Deleting", 414 filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN"); 415 416 /* Prepare ramrod params */ 417 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); 418 if (filter->type == BNX2X_VF_FILTER_VLAN) { 419 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 420 &ramrod.user_req.vlan_mac_flags); 421 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 422 ramrod.user_req.u.vlan.vlan = filter->vid; 423 } else { 424 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); 425 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 426 memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN); 427 } 428 ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD : 429 BNX2X_VLAN_MAC_DEL; 430 431 /* Verify there are available vlan credits */ 432 if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && 433 (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= 434 vf_vlan_rules_cnt(vf))) { 435 BNX2X_ERR("No credits for vlan [%d >= %d]\n", 436 atomic_read(&bnx2x_vfq(vf, qid, vlan_count)), 437 vf_vlan_rules_cnt(vf)); 438 return -ENOMEM; 439 } 440 441 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); 442 if (drv_only) 443 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); 444 else 445 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); 446 447 /* Add/Remove the filter */ 448 rc = bnx2x_config_vlan_mac(bp, &ramrod); 449 if (rc && rc != -EEXIST) { 450 BNX2X_ERR("Failed to %s %s\n", 451 filter->add ? "add" : "delete", 452 filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : 453 "VLAN"); 454 return rc; 455 } 456 457 /* Update the vlan counters */ 458 if (filter->type == BNX2X_VF_FILTER_VLAN) 459 bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj, 460 &bnx2x_vfq(vf, qid, vlan_count)); 461 462 return 0; 463 } 464 465 int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, 466 struct bnx2x_vf_mac_vlan_filters *filters, 467 int qid, bool drv_only) 468 { 469 int rc = 0, i; 470 471 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 472 473 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 474 return -EINVAL; 475 476 /* Prepare ramrod params */ 477 for (i = 0; i < filters->count; i++) { 478 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, 479 &filters->filters[i], drv_only); 480 if (rc) 481 break; 482 } 483 484 /* Rollback if needed */ 485 if (i != filters->count) { 486 BNX2X_ERR("Managed only %d/%d filters - rolling back\n", 487 i, filters->count + 1); 488 while (--i >= 0) { 489 filters->filters[i].add = !filters->filters[i].add; 490 bnx2x_vf_mac_vlan_config(bp, vf, qid, 491 &filters->filters[i], 492 drv_only); 493 } 494 } 495 496 /* It's our responsibility to free the filters */ 497 kfree(filters); 498 499 return rc; 500 } 501 502 int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, 503 struct bnx2x_vf_queue_construct_params *qctor) 504 { 505 int rc; 506 507 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 508 509 rc = bnx2x_vf_queue_create(bp, vf, qid, qctor); 510 if (rc) 511 goto op_err; 512 513 /* Configure vlan0 for leading queue */ 514 if (!qid) { 515 struct bnx2x_vf_mac_vlan_filter filter; 516 517 memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter)); 518 filter.type = BNX2X_VF_FILTER_VLAN; 519 filter.add = true; 520 filter.vid = 0; 521 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false); 522 if (rc) 523 goto op_err; 524 } 525 526 /* Schedule the configuration of any pending vlan filters */ 527 vf->cfg_flags |= VF_CFG_VLAN; 528 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, 529 BNX2X_MSG_IOV); 530 return 0; 531 op_err: 532 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); 533 return rc; 534 } 535 536 static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, 537 int qid) 538 { 539 int rc; 540 541 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 542 543 /* If needed, clean the filtering data base */ 544 if ((qid == LEADING_IDX) && 545 bnx2x_validate_vf_sp_objs(bp, vf, false)) { 546 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false); 547 if (rc) 548 goto op_err; 549 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true); 550 if (rc) 551 goto op_err; 552 } 553 554 /* Terminate queue */ 555 if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) { 556 struct bnx2x_queue_state_params qstate; 557 558 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); 559 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 560 qstate.q_obj->state = BNX2X_Q_STATE_STOPPED; 561 qstate.cmd = BNX2X_Q_CMD_TERMINATE; 562 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); 563 rc = bnx2x_queue_state_change(bp, &qstate); 564 if (rc) 565 goto op_err; 566 } 567 568 return 0; 569 op_err: 570 BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); 571 return rc; 572 } 573 574 int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, 575 bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only) 576 { 577 struct bnx2x_mcast_list_elem *mc = NULL; 578 struct bnx2x_mcast_ramrod_params mcast; 579 int rc, i; 580 581 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 582 583 /* Prepare Multicast command */ 584 memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params)); 585 mcast.mcast_obj = &vf->mcast_obj; 586 if (drv_only) 587 set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags); 588 else 589 set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags); 590 if (mc_num) { 591 mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem), 592 GFP_KERNEL); 593 if (!mc) { 594 BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n"); 595 return -ENOMEM; 596 } 597 } 598 599 /* clear existing mcasts */ 600 mcast.mcast_list_len = vf->mcast_list_len; 601 vf->mcast_list_len = mc_num; 602 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); 603 if (rc) { 604 BNX2X_ERR("Failed to remove multicasts\n"); 605 kfree(mc); 606 return rc; 607 } 608 609 /* update mcast list on the ramrod params */ 610 if (mc_num) { 611 INIT_LIST_HEAD(&mcast.mcast_list); 612 for (i = 0; i < mc_num; i++) { 613 mc[i].mac = mcasts[i]; 614 list_add_tail(&mc[i].link, 615 &mcast.mcast_list); 616 } 617 618 /* add new mcasts */ 619 mcast.mcast_list_len = mc_num; 620 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); 621 if (rc) 622 BNX2X_ERR("Faled to add multicasts\n"); 623 kfree(mc); 624 } 625 626 return rc; 627 } 628 629 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, 630 struct bnx2x_rx_mode_ramrod_params *ramrod, 631 struct bnx2x_virtf *vf, 632 unsigned long accept_flags) 633 { 634 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); 635 636 memset(ramrod, 0, sizeof(*ramrod)); 637 ramrod->cid = vfq->cid; 638 ramrod->cl_id = vfq_cl_id(vf, vfq); 639 ramrod->rx_mode_obj = &bp->rx_mode_obj; 640 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); 641 ramrod->rx_accept_flags = accept_flags; 642 ramrod->tx_accept_flags = accept_flags; 643 ramrod->pstate = &vf->filter_state; 644 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; 645 646 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 647 set_bit(RAMROD_RX, &ramrod->ramrod_flags); 648 set_bit(RAMROD_TX, &ramrod->ramrod_flags); 649 650 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); 651 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); 652 } 653 654 int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, 655 int qid, unsigned long accept_flags) 656 { 657 struct bnx2x_rx_mode_ramrod_params ramrod; 658 659 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 660 661 bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags); 662 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); 663 vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags; 664 return bnx2x_config_rx_mode(bp, &ramrod); 665 } 666 667 int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) 668 { 669 int rc; 670 671 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 672 673 /* Remove all classification configuration for leading queue */ 674 if (qid == LEADING_IDX) { 675 rc = bnx2x_vf_rxmode(bp, vf, qid, 0); 676 if (rc) 677 goto op_err; 678 679 /* Remove filtering if feasible */ 680 if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { 681 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, 682 false, false); 683 if (rc) 684 goto op_err; 685 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, 686 false, true); 687 if (rc) 688 goto op_err; 689 rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); 690 if (rc) 691 goto op_err; 692 } 693 } 694 695 /* Destroy queue */ 696 rc = bnx2x_vf_queue_destroy(bp, vf, qid); 697 if (rc) 698 goto op_err; 699 return rc; 700 op_err: 701 BNX2X_ERR("vf[%d:%d] error: rc %d\n", 702 vf->abs_vfid, qid, rc); 703 return rc; 704 } 705 706 /* VF enable primitives 707 * when pretend is required the caller is responsible 708 * for calling pretend prior to calling these routines 709 */ 710 711 /* internal vf enable - until vf is enabled internally all transactions 712 * are blocked. This routine should always be called last with pretend. 713 */ 714 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) 715 { 716 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); 717 } 718 719 /* clears vf error in all semi blocks */ 720 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) 721 { 722 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); 723 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); 724 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); 725 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); 726 } 727 728 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) 729 { 730 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; 731 u32 was_err_reg = 0; 732 733 switch (was_err_group) { 734 case 0: 735 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; 736 break; 737 case 1: 738 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; 739 break; 740 case 2: 741 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; 742 break; 743 case 3: 744 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; 745 break; 746 } 747 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); 748 } 749 750 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) 751 { 752 int i; 753 u32 val; 754 755 /* Set VF masks and configuration - pretend */ 756 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 757 758 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 759 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 760 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 761 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 762 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 763 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 764 765 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 766 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); 767 if (vf->cfg_flags & VF_CFG_INT_SIMD) 768 val |= IGU_VF_CONF_SINGLE_ISR_EN; 769 val &= ~IGU_VF_CONF_PARENT_MASK; 770 val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT; 771 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 772 773 DP(BNX2X_MSG_IOV, 774 "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n", 775 vf->abs_vfid, val); 776 777 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 778 779 /* iterate over all queues, clear sb consumer */ 780 for (i = 0; i < vf_sb_count(vf); i++) { 781 u8 igu_sb_id = vf_igu_sb(vf, i); 782 783 /* zero prod memory */ 784 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); 785 786 /* clear sb state machine */ 787 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, 788 false /* VF */); 789 790 /* disable + update */ 791 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, 792 IGU_INT_DISABLE, 1); 793 } 794 } 795 796 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) 797 { 798 /* set the VF-PF association in the FW */ 799 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); 800 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); 801 802 /* clear vf errors*/ 803 bnx2x_vf_semi_clear_err(bp, abs_vfid); 804 bnx2x_vf_pglue_clear_err(bp, abs_vfid); 805 806 /* internal vf-enable - pretend */ 807 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); 808 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); 809 bnx2x_vf_enable_internal(bp, true); 810 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 811 } 812 813 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) 814 { 815 /* Reset vf in IGU interrupts are still disabled */ 816 bnx2x_vf_igu_reset(bp, vf); 817 818 /* pretend to enable the vf with the PBF */ 819 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 820 REG_WR(bp, PBF_REG_DISABLE_VF, 0); 821 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 822 } 823 824 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) 825 { 826 struct pci_dev *dev; 827 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 828 829 if (!vf) 830 return false; 831 832 dev = pci_get_bus_and_slot(vf->bus, vf->devfn); 833 if (dev) 834 return bnx2x_is_pcie_pending(dev); 835 return false; 836 } 837 838 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) 839 { 840 /* Verify no pending pci transactions */ 841 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) 842 BNX2X_ERR("PCIE Transactions still pending\n"); 843 844 return 0; 845 } 846 847 static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp, 848 struct bnx2x_virtf *vf, 849 int new) 850 { 851 int num = vf_vlan_rules_cnt(vf); 852 int diff = new - num; 853 bool rc = true; 854 855 DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n", 856 vf->abs_vfid, new, num); 857 858 if (diff > 0) 859 rc = bp->vlans_pool.get(&bp->vlans_pool, diff); 860 else if (diff < 0) 861 rc = bp->vlans_pool.put(&bp->vlans_pool, -diff); 862 863 if (rc) 864 vf_vlan_rules_cnt(vf) = new; 865 else 866 DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n", 867 vf->abs_vfid); 868 } 869 870 /* must be called after the number of PF queues and the number of VFs are 871 * both known 872 */ 873 static void 874 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 875 { 876 struct vf_pf_resc_request *resc = &vf->alloc_resc; 877 u16 vlan_count = 0; 878 879 /* will be set only during VF-ACQUIRE */ 880 resc->num_rxqs = 0; 881 resc->num_txqs = 0; 882 883 /* no credit calculations for macs (just yet) */ 884 resc->num_mac_filters = 1; 885 886 /* divvy up vlan rules */ 887 bnx2x_iov_re_set_vlan_filters(bp, vf, 0); 888 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 889 vlan_count = 1 << ilog2(vlan_count); 890 bnx2x_iov_re_set_vlan_filters(bp, vf, 891 vlan_count / BNX2X_NR_VIRTFN(bp)); 892 893 /* no real limitation */ 894 resc->num_mc_filters = 0; 895 896 /* num_sbs already set */ 897 resc->num_sbs = vf->sb_count; 898 } 899 900 /* FLR routines: */ 901 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 902 { 903 /* reset the state variables */ 904 bnx2x_iov_static_resc(bp, vf); 905 vf->state = VF_FREE; 906 } 907 908 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) 909 { 910 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 911 912 /* DQ usage counter */ 913 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 914 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, 915 "DQ VF usage counter timed out", 916 poll_cnt); 917 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 918 919 /* FW cleanup command - poll for the results */ 920 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), 921 poll_cnt)) 922 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid); 923 924 /* verify TX hw is flushed */ 925 bnx2x_tx_hw_flushed(bp, poll_cnt); 926 } 927 928 static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) 929 { 930 int rc, i; 931 932 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 933 934 /* the cleanup operations are valid if and only if the VF 935 * was first acquired. 936 */ 937 for (i = 0; i < vf_rxq_count(vf); i++) { 938 rc = bnx2x_vf_queue_flr(bp, vf, i); 939 if (rc) 940 goto out; 941 } 942 943 /* remove multicasts */ 944 bnx2x_vf_mcast(bp, vf, NULL, 0, true); 945 946 /* dispatch final cleanup and wait for HW queues to flush */ 947 bnx2x_vf_flr_clnup_hw(bp, vf); 948 949 /* release VF resources */ 950 bnx2x_vf_free_resc(bp, vf); 951 952 /* re-open the mailbox */ 953 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 954 return; 955 out: 956 BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n", 957 vf->abs_vfid, i, rc); 958 } 959 960 static void bnx2x_vf_flr_clnup(struct bnx2x *bp) 961 { 962 struct bnx2x_virtf *vf; 963 int i; 964 965 for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) { 966 /* VF should be RESET & in FLR cleanup states */ 967 if (bnx2x_vf(bp, i, state) != VF_RESET || 968 !bnx2x_vf(bp, i, flr_clnup_stage)) 969 continue; 970 971 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", 972 i, BNX2X_NR_VIRTFN(bp)); 973 974 vf = BP_VF(bp, i); 975 976 /* lock the vf pf channel */ 977 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 978 979 /* invoke the VF FLR SM */ 980 bnx2x_vf_flr(bp, vf); 981 982 /* mark the VF to be ACKED and continue */ 983 vf->flr_clnup_stage = false; 984 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 985 } 986 987 /* Acknowledge the handled VFs. 988 * we are acknowledge all the vfs which an flr was requested for, even 989 * if amongst them there are such that we never opened, since the mcp 990 * will interrupt us immediately again if we only ack some of the bits, 991 * resulting in an endless loop. This can happen for example in KVM 992 * where an 'all ones' flr request is sometimes given by hyper visor 993 */ 994 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n", 995 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 996 for (i = 0; i < FLRD_VFS_DWORDS; i++) 997 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 998 bp->vfdb->flrd_vfs[i]); 999 1000 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); 1001 1002 /* clear the acked bits - better yet if the MCP implemented 1003 * write to clear semantics 1004 */ 1005 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1006 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); 1007 } 1008 1009 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) 1010 { 1011 int i; 1012 1013 /* Read FLR'd VFs */ 1014 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1015 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); 1016 1017 DP(BNX2X_MSG_MCP, 1018 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n", 1019 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1020 1021 for_each_vf(bp, i) { 1022 struct bnx2x_virtf *vf = BP_VF(bp, i); 1023 u32 reset = 0; 1024 1025 if (vf->abs_vfid < 32) 1026 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); 1027 else 1028 reset = bp->vfdb->flrd_vfs[1] & 1029 (1 << (vf->abs_vfid - 32)); 1030 1031 if (reset) { 1032 /* set as reset and ready for cleanup */ 1033 vf->state = VF_RESET; 1034 vf->flr_clnup_stage = true; 1035 1036 DP(BNX2X_MSG_IOV, 1037 "Initiating Final cleanup for VF %d\n", 1038 vf->abs_vfid); 1039 } 1040 } 1041 1042 /* do the FLR cleanup for all marked VFs*/ 1043 bnx2x_vf_flr_clnup(bp); 1044 } 1045 1046 /* IOV global initialization routines */ 1047 void bnx2x_iov_init_dq(struct bnx2x *bp) 1048 { 1049 if (!IS_SRIOV(bp)) 1050 return; 1051 1052 /* Set the DQ such that the CID reflect the abs_vfid */ 1053 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); 1054 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 1055 1056 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to 1057 * the PF L2 queues 1058 */ 1059 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 1060 1061 /* The VF window size is the log2 of the max number of CIDs per VF */ 1062 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 1063 1064 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 1065 * the Pf doorbell size although the 2 are independent. 1066 */ 1067 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); 1068 1069 /* No security checks for now - 1070 * configure single rule (out of 16) mask = 0x1, value = 0x0, 1071 * CID range 0 - 0x1ffff 1072 */ 1073 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); 1074 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); 1075 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1076 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1077 1078 /* set the VF doorbell threshold. This threshold represents the amount 1079 * of doorbells allowed in the main DORQ fifo for a specific VF. 1080 */ 1081 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64); 1082 } 1083 1084 void bnx2x_iov_init_dmae(struct bnx2x *bp) 1085 { 1086 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) 1087 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 1088 } 1089 1090 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1091 { 1092 struct pci_dev *dev = bp->pdev; 1093 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1094 1095 return dev->bus->number + ((dev->devfn + iov->offset + 1096 iov->stride * vfid) >> 8); 1097 } 1098 1099 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) 1100 { 1101 struct pci_dev *dev = bp->pdev; 1102 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1103 1104 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; 1105 } 1106 1107 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) 1108 { 1109 int i, n; 1110 struct pci_dev *dev = bp->pdev; 1111 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1112 1113 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { 1114 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); 1115 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); 1116 1117 size /= iov->total; 1118 vf->bars[n].bar = start + size * vf->abs_vfid; 1119 vf->bars[n].size = size; 1120 } 1121 } 1122 1123 static int bnx2x_ari_enabled(struct pci_dev *dev) 1124 { 1125 return dev->bus->self && dev->bus->self->ari_enabled; 1126 } 1127 1128 static int 1129 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) 1130 { 1131 int sb_id; 1132 u32 val; 1133 u8 fid, current_pf = 0; 1134 1135 /* IGU in normal mode - read CAM */ 1136 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 1137 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); 1138 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 1139 continue; 1140 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1141 if (fid & IGU_FID_ENCODE_IS_PF) 1142 current_pf = fid & IGU_FID_PF_NUM_MASK; 1143 else if (current_pf == BP_FUNC(bp)) 1144 bnx2x_vf_set_igu_info(bp, sb_id, 1145 (fid & IGU_FID_VF_NUM_MASK)); 1146 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1147 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 1148 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 1149 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 1150 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1151 } 1152 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); 1153 return BP_VFDB(bp)->vf_sbs_pool; 1154 } 1155 1156 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1157 { 1158 if (bp->vfdb) { 1159 kfree(bp->vfdb->vfqs); 1160 kfree(bp->vfdb->vfs); 1161 kfree(bp->vfdb); 1162 } 1163 bp->vfdb = NULL; 1164 } 1165 1166 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1167 { 1168 int pos; 1169 struct pci_dev *dev = bp->pdev; 1170 1171 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 1172 if (!pos) { 1173 BNX2X_ERR("failed to find SRIOV capability in device\n"); 1174 return -ENODEV; 1175 } 1176 1177 iov->pos = pos; 1178 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); 1179 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 1180 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); 1181 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); 1182 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 1183 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 1184 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 1185 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 1186 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 1187 1188 return 0; 1189 } 1190 1191 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1192 { 1193 u32 val; 1194 1195 /* read the SRIOV capability structure 1196 * The fields can be read via configuration read or 1197 * directly from the device (starting at offset PCICFG_OFFSET) 1198 */ 1199 if (bnx2x_sriov_pci_cfg_info(bp, iov)) 1200 return -ENODEV; 1201 1202 /* get the number of SRIOV bars */ 1203 iov->nres = 0; 1204 1205 /* read the first_vfid */ 1206 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); 1207 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) 1208 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); 1209 1210 DP(BNX2X_MSG_IOV, 1211 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 1212 BP_FUNC(bp), 1213 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, 1214 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 1215 1216 return 0; 1217 } 1218 1219 /* must be called after PF bars are mapped */ 1220 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1221 int num_vfs_param) 1222 { 1223 int err, i; 1224 struct bnx2x_sriov *iov; 1225 struct pci_dev *dev = bp->pdev; 1226 1227 bp->vfdb = NULL; 1228 1229 /* verify is pf */ 1230 if (IS_VF(bp)) 1231 return 0; 1232 1233 /* verify sriov capability is present in configuration space */ 1234 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) 1235 return 0; 1236 1237 /* verify chip revision */ 1238 if (CHIP_IS_E1x(bp)) 1239 return 0; 1240 1241 /* check if SRIOV support is turned off */ 1242 if (!num_vfs_param) 1243 return 0; 1244 1245 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ 1246 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { 1247 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", 1248 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); 1249 return 0; 1250 } 1251 1252 /* SRIOV can be enabled only with MSIX */ 1253 if (int_mode_param == BNX2X_INT_MODE_MSI || 1254 int_mode_param == BNX2X_INT_MODE_INTX) { 1255 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); 1256 return 0; 1257 } 1258 1259 err = -EIO; 1260 /* verify ari is enabled */ 1261 if (!bnx2x_ari_enabled(bp->pdev)) { 1262 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n"); 1263 return 0; 1264 } 1265 1266 /* verify igu is in normal mode */ 1267 if (CHIP_INT_MODE_IS_BC(bp)) { 1268 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 1269 return 0; 1270 } 1271 1272 /* allocate the vfs database */ 1273 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); 1274 if (!bp->vfdb) { 1275 BNX2X_ERR("failed to allocate vf database\n"); 1276 err = -ENOMEM; 1277 goto failed; 1278 } 1279 1280 /* get the sriov info - Linux already collected all the pertinent 1281 * information, however the sriov structure is for the private use 1282 * of the pci module. Also we want this information regardless 1283 * of the hyper-visor. 1284 */ 1285 iov = &(bp->vfdb->sriov); 1286 err = bnx2x_sriov_info(bp, iov); 1287 if (err) 1288 goto failed; 1289 1290 /* SR-IOV capability was enabled but there are no VFs*/ 1291 if (iov->total == 0) 1292 goto failed; 1293 1294 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); 1295 1296 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n", 1297 num_vfs_param, iov->nr_virtfn); 1298 1299 /* allocate the vf array */ 1300 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 1301 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); 1302 if (!bp->vfdb->vfs) { 1303 BNX2X_ERR("failed to allocate vf array\n"); 1304 err = -ENOMEM; 1305 goto failed; 1306 } 1307 1308 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ 1309 for_each_vf(bp, i) { 1310 bnx2x_vf(bp, i, index) = i; 1311 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; 1312 bnx2x_vf(bp, i, state) = VF_FREE; 1313 mutex_init(&bnx2x_vf(bp, i, op_mutex)); 1314 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; 1315 } 1316 1317 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 1318 if (!bnx2x_get_vf_igu_cam_info(bp)) { 1319 BNX2X_ERR("No entries in IGU CAM for vfs\n"); 1320 err = -EINVAL; 1321 goto failed; 1322 } 1323 1324 /* allocate the queue arrays for all VFs */ 1325 bp->vfdb->vfqs = kzalloc( 1326 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), 1327 GFP_KERNEL); 1328 1329 if (!bp->vfdb->vfqs) { 1330 BNX2X_ERR("failed to allocate vf queue array\n"); 1331 err = -ENOMEM; 1332 goto failed; 1333 } 1334 1335 /* Prepare the VFs event synchronization mechanism */ 1336 mutex_init(&bp->vfdb->event_mutex); 1337 1338 mutex_init(&bp->vfdb->bulletin_mutex); 1339 1340 return 0; 1341 failed: 1342 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 1343 __bnx2x_iov_free_vfdb(bp); 1344 return err; 1345 } 1346 1347 void bnx2x_iov_remove_one(struct bnx2x *bp) 1348 { 1349 int vf_idx; 1350 1351 /* if SRIOV is not enabled there's nothing to do */ 1352 if (!IS_SRIOV(bp)) 1353 return; 1354 1355 bnx2x_disable_sriov(bp); 1356 1357 /* disable access to all VFs */ 1358 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { 1359 bnx2x_pretend_func(bp, 1360 HW_VF_HANDLE(bp, 1361 bp->vfdb->sriov.first_vf_in_pf + 1362 vf_idx)); 1363 DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n", 1364 bp->vfdb->sriov.first_vf_in_pf + vf_idx); 1365 bnx2x_vf_enable_internal(bp, 0); 1366 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1367 } 1368 1369 /* free vf database */ 1370 __bnx2x_iov_free_vfdb(bp); 1371 } 1372 1373 void bnx2x_iov_free_mem(struct bnx2x *bp) 1374 { 1375 int i; 1376 1377 if (!IS_SRIOV(bp)) 1378 return; 1379 1380 /* free vfs hw contexts */ 1381 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1382 struct hw_dma *cxt = &bp->vfdb->context[i]; 1383 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); 1384 } 1385 1386 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, 1387 BP_VFDB(bp)->sp_dma.mapping, 1388 BP_VFDB(bp)->sp_dma.size); 1389 1390 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, 1391 BP_VF_MBX_DMA(bp)->mapping, 1392 BP_VF_MBX_DMA(bp)->size); 1393 1394 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, 1395 BP_VF_BULLETIN_DMA(bp)->mapping, 1396 BP_VF_BULLETIN_DMA(bp)->size); 1397 } 1398 1399 int bnx2x_iov_alloc_mem(struct bnx2x *bp) 1400 { 1401 size_t tot_size; 1402 int i, rc = 0; 1403 1404 if (!IS_SRIOV(bp)) 1405 return rc; 1406 1407 /* allocate vfs hw contexts */ 1408 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * 1409 BNX2X_CIDS_PER_VF * sizeof(union cdu_context); 1410 1411 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1412 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); 1413 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); 1414 1415 if (cxt->size) { 1416 cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size); 1417 if (!cxt->addr) 1418 goto alloc_mem_err; 1419 } else { 1420 cxt->addr = NULL; 1421 cxt->mapping = 0; 1422 } 1423 tot_size -= cxt->size; 1424 } 1425 1426 /* allocate vfs ramrods dma memory - client_init and set_mac */ 1427 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); 1428 BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping, 1429 tot_size); 1430 if (!BP_VFDB(bp)->sp_dma.addr) 1431 goto alloc_mem_err; 1432 BP_VFDB(bp)->sp_dma.size = tot_size; 1433 1434 /* allocate mailboxes */ 1435 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; 1436 BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping, 1437 tot_size); 1438 if (!BP_VF_MBX_DMA(bp)->addr) 1439 goto alloc_mem_err; 1440 1441 BP_VF_MBX_DMA(bp)->size = tot_size; 1442 1443 /* allocate local bulletin boards */ 1444 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; 1445 BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping, 1446 tot_size); 1447 if (!BP_VF_BULLETIN_DMA(bp)->addr) 1448 goto alloc_mem_err; 1449 1450 BP_VF_BULLETIN_DMA(bp)->size = tot_size; 1451 1452 return 0; 1453 1454 alloc_mem_err: 1455 return -ENOMEM; 1456 } 1457 1458 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 1459 struct bnx2x_vf_queue *q) 1460 { 1461 u8 cl_id = vfq_cl_id(vf, q); 1462 u8 func_id = FW_VF_HANDLE(vf->abs_vfid); 1463 unsigned long q_type = 0; 1464 1465 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 1466 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 1467 1468 /* Queue State object */ 1469 bnx2x_init_queue_obj(bp, &q->sp_obj, 1470 cl_id, &q->cid, 1, func_id, 1471 bnx2x_vf_sp(bp, vf, q_data), 1472 bnx2x_vf_sp_map(bp, vf, q_data), 1473 q_type); 1474 1475 /* sp indication is set only when vlan/mac/etc. are initialized */ 1476 q->sp_initialized = false; 1477 1478 DP(BNX2X_MSG_IOV, 1479 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", 1480 vf->abs_vfid, q->sp_obj.func_id, q->cid); 1481 } 1482 1483 static int bnx2x_max_speed_cap(struct bnx2x *bp) 1484 { 1485 u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)]; 1486 1487 if (supported & 1488 (SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full)) 1489 return 20000; 1490 1491 return 10000; /* assume lowest supported speed is 10G */ 1492 } 1493 1494 int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx) 1495 { 1496 struct bnx2x_link_report_data *state = &bp->last_reported_link; 1497 struct pf_vf_bulletin_content *bulletin; 1498 struct bnx2x_virtf *vf; 1499 bool update = true; 1500 int rc = 0; 1501 1502 /* sanity and init */ 1503 rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false); 1504 if (rc) 1505 return rc; 1506 1507 mutex_lock(&bp->vfdb->bulletin_mutex); 1508 1509 if (vf->link_cfg == IFLA_VF_LINK_STATE_AUTO) { 1510 bulletin->valid_bitmap |= 1 << LINK_VALID; 1511 1512 bulletin->link_speed = state->line_speed; 1513 bulletin->link_flags = 0; 1514 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, 1515 &state->link_report_flags)) 1516 bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN; 1517 if (test_bit(BNX2X_LINK_REPORT_FD, 1518 &state->link_report_flags)) 1519 bulletin->link_flags |= VFPF_LINK_REPORT_FULL_DUPLEX; 1520 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON, 1521 &state->link_report_flags)) 1522 bulletin->link_flags |= VFPF_LINK_REPORT_RX_FC_ON; 1523 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON, 1524 &state->link_report_flags)) 1525 bulletin->link_flags |= VFPF_LINK_REPORT_TX_FC_ON; 1526 } else if (vf->link_cfg == IFLA_VF_LINK_STATE_DISABLE && 1527 !(bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) { 1528 bulletin->valid_bitmap |= 1 << LINK_VALID; 1529 bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN; 1530 } else if (vf->link_cfg == IFLA_VF_LINK_STATE_ENABLE && 1531 (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) { 1532 bulletin->valid_bitmap |= 1 << LINK_VALID; 1533 bulletin->link_speed = bnx2x_max_speed_cap(bp); 1534 bulletin->link_flags &= ~VFPF_LINK_REPORT_LINK_DOWN; 1535 } else { 1536 update = false; 1537 } 1538 1539 if (update) { 1540 DP(NETIF_MSG_LINK | BNX2X_MSG_IOV, 1541 "vf %d mode %u speed %d flags %x\n", idx, 1542 vf->link_cfg, bulletin->link_speed, bulletin->link_flags); 1543 1544 /* Post update on VF's bulletin board */ 1545 rc = bnx2x_post_vf_bulletin(bp, idx); 1546 if (rc) { 1547 BNX2X_ERR("failed to update VF[%d] bulletin\n", idx); 1548 goto out; 1549 } 1550 } 1551 1552 out: 1553 mutex_unlock(&bp->vfdb->bulletin_mutex); 1554 return rc; 1555 } 1556 1557 int bnx2x_set_vf_link_state(struct net_device *dev, int idx, int link_state) 1558 { 1559 struct bnx2x *bp = netdev_priv(dev); 1560 struct bnx2x_virtf *vf = BP_VF(bp, idx); 1561 1562 if (!vf) 1563 return -EINVAL; 1564 1565 if (vf->link_cfg == link_state) 1566 return 0; /* nothing todo */ 1567 1568 vf->link_cfg = link_state; 1569 1570 return bnx2x_iov_link_update_vf(bp, idx); 1571 } 1572 1573 void bnx2x_iov_link_update(struct bnx2x *bp) 1574 { 1575 int vfid; 1576 1577 if (!IS_SRIOV(bp)) 1578 return; 1579 1580 for_each_vf(bp, vfid) 1581 bnx2x_iov_link_update_vf(bp, vfid); 1582 } 1583 1584 /* called by bnx2x_nic_load */ 1585 int bnx2x_iov_nic_init(struct bnx2x *bp) 1586 { 1587 int vfid; 1588 1589 if (!IS_SRIOV(bp)) { 1590 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 1591 return 0; 1592 } 1593 1594 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); 1595 1596 /* let FLR complete ... */ 1597 msleep(100); 1598 1599 /* initialize vf database */ 1600 for_each_vf(bp, vfid) { 1601 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 1602 1603 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * 1604 BNX2X_CIDS_PER_VF; 1605 1606 union cdu_context *base_cxt = (union cdu_context *) 1607 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 1608 (base_vf_cid & (ILT_PAGE_CIDS-1)); 1609 1610 DP(BNX2X_MSG_IOV, 1611 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", 1612 vf->abs_vfid, vf_sb_count(vf), base_vf_cid, 1613 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 1614 1615 /* init statically provisioned resources */ 1616 bnx2x_iov_static_resc(bp, vf); 1617 1618 /* queues are initialized during VF-ACQUIRE */ 1619 vf->filter_state = 0; 1620 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 1621 1622 /* init mcast object - This object will be re-initialized 1623 * during VF-ACQUIRE with the proper cl_id and cid. 1624 * It needs to be initialized here so that it can be safely 1625 * handled by a subsequent FLR flow. 1626 */ 1627 vf->mcast_list_len = 0; 1628 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, 1629 0xFF, 0xFF, 0xFF, 1630 bnx2x_vf_sp(bp, vf, mcast_rdata), 1631 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 1632 BNX2X_FILTER_MCAST_PENDING, 1633 &vf->filter_state, 1634 BNX2X_OBJ_TYPE_RX_TX); 1635 1636 /* set the mailbox message addresses */ 1637 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) 1638 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * 1639 MBX_MSG_ALIGNED_SIZE); 1640 1641 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + 1642 vfid * MBX_MSG_ALIGNED_SIZE; 1643 1644 /* Enable vf mailbox */ 1645 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 1646 } 1647 1648 /* Final VF init */ 1649 for_each_vf(bp, vfid) { 1650 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 1651 1652 /* fill in the BDF and bars */ 1653 vf->bus = bnx2x_vf_bus(bp, vfid); 1654 vf->devfn = bnx2x_vf_devfn(bp, vfid); 1655 bnx2x_vf_set_bars(bp, vf); 1656 1657 DP(BNX2X_MSG_IOV, 1658 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", 1659 vf->abs_vfid, vf->bus, vf->devfn, 1660 (unsigned)vf->bars[0].bar, vf->bars[0].size, 1661 (unsigned)vf->bars[1].bar, vf->bars[1].size, 1662 (unsigned)vf->bars[2].bar, vf->bars[2].size); 1663 } 1664 1665 return 0; 1666 } 1667 1668 /* called by bnx2x_chip_cleanup */ 1669 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) 1670 { 1671 int i; 1672 1673 if (!IS_SRIOV(bp)) 1674 return 0; 1675 1676 /* release all the VFs */ 1677 for_each_vf(bp, i) 1678 bnx2x_vf_release(bp, BP_VF(bp, i)); 1679 1680 return 0; 1681 } 1682 1683 /* called by bnx2x_init_hw_func, returns the next ilt line */ 1684 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) 1685 { 1686 int i; 1687 struct bnx2x_ilt *ilt = BP_ILT(bp); 1688 1689 if (!IS_SRIOV(bp)) 1690 return line; 1691 1692 /* set vfs ilt lines */ 1693 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1694 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); 1695 1696 ilt->lines[line+i].page = hw_cxt->addr; 1697 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 1698 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 1699 } 1700 return line + i; 1701 } 1702 1703 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) 1704 { 1705 return ((cid >= BNX2X_FIRST_VF_CID) && 1706 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); 1707 } 1708 1709 static 1710 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, 1711 struct bnx2x_vf_queue *vfq, 1712 union event_ring_elem *elem) 1713 { 1714 unsigned long ramrod_flags = 0; 1715 int rc = 0; 1716 1717 /* Always push next commands out, don't wait here */ 1718 set_bit(RAMROD_CONT, &ramrod_flags); 1719 1720 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 1721 case BNX2X_FILTER_MAC_PENDING: 1722 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, 1723 &ramrod_flags); 1724 break; 1725 case BNX2X_FILTER_VLAN_PENDING: 1726 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, 1727 &ramrod_flags); 1728 break; 1729 default: 1730 BNX2X_ERR("Unsupported classification command: %d\n", 1731 elem->message.data.eth_event.echo); 1732 return; 1733 } 1734 if (rc < 0) 1735 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 1736 else if (rc > 0) 1737 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); 1738 } 1739 1740 static 1741 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, 1742 struct bnx2x_virtf *vf) 1743 { 1744 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 1745 int rc; 1746 1747 rparam.mcast_obj = &vf->mcast_obj; 1748 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); 1749 1750 /* If there are pending mcast commands - send them */ 1751 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { 1752 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 1753 if (rc < 0) 1754 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 1755 rc); 1756 } 1757 } 1758 1759 static 1760 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, 1761 struct bnx2x_virtf *vf) 1762 { 1763 smp_mb__before_atomic(); 1764 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 1765 smp_mb__after_atomic(); 1766 } 1767 1768 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, 1769 struct bnx2x_virtf *vf) 1770 { 1771 vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw); 1772 } 1773 1774 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) 1775 { 1776 struct bnx2x_virtf *vf; 1777 int qidx = 0, abs_vfid; 1778 u8 opcode; 1779 u16 cid = 0xffff; 1780 1781 if (!IS_SRIOV(bp)) 1782 return 1; 1783 1784 /* first get the cid - the only events we handle here are cfc-delete 1785 * and set-mac completion 1786 */ 1787 opcode = elem->message.opcode; 1788 1789 switch (opcode) { 1790 case EVENT_RING_OPCODE_CFC_DEL: 1791 cid = SW_CID((__force __le32) 1792 elem->message.data.cfc_del_event.cid); 1793 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); 1794 break; 1795 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 1796 case EVENT_RING_OPCODE_MULTICAST_RULES: 1797 case EVENT_RING_OPCODE_FILTERS_RULES: 1798 case EVENT_RING_OPCODE_RSS_UPDATE_RULES: 1799 cid = (elem->message.data.eth_event.echo & 1800 BNX2X_SWCID_MASK); 1801 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 1802 break; 1803 case EVENT_RING_OPCODE_VF_FLR: 1804 abs_vfid = elem->message.data.vf_flr_event.vf_id; 1805 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", 1806 abs_vfid); 1807 goto get_vf; 1808 case EVENT_RING_OPCODE_MALICIOUS_VF: 1809 abs_vfid = elem->message.data.malicious_vf_event.vf_id; 1810 BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n", 1811 abs_vfid, 1812 elem->message.data.malicious_vf_event.err_id); 1813 goto get_vf; 1814 default: 1815 return 1; 1816 } 1817 1818 /* check if the cid is the VF range */ 1819 if (!bnx2x_iov_is_vf_cid(bp, cid)) { 1820 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); 1821 return 1; 1822 } 1823 1824 /* extract vf and rxq index from vf_cid - relies on the following: 1825 * 1. vfid on cid reflects the true abs_vfid 1826 * 2. The max number of VFs (per path) is 64 1827 */ 1828 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); 1829 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 1830 get_vf: 1831 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 1832 1833 if (!vf) { 1834 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", 1835 cid, abs_vfid); 1836 return 0; 1837 } 1838 1839 switch (opcode) { 1840 case EVENT_RING_OPCODE_CFC_DEL: 1841 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", 1842 vf->abs_vfid, qidx); 1843 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, 1844 &vfq_get(vf, 1845 qidx)->sp_obj, 1846 BNX2X_Q_CMD_CFC_DEL); 1847 break; 1848 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 1849 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", 1850 vf->abs_vfid, qidx); 1851 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); 1852 break; 1853 case EVENT_RING_OPCODE_MULTICAST_RULES: 1854 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", 1855 vf->abs_vfid, qidx); 1856 bnx2x_vf_handle_mcast_eqe(bp, vf); 1857 break; 1858 case EVENT_RING_OPCODE_FILTERS_RULES: 1859 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", 1860 vf->abs_vfid, qidx); 1861 bnx2x_vf_handle_filters_eqe(bp, vf); 1862 break; 1863 case EVENT_RING_OPCODE_RSS_UPDATE_RULES: 1864 DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n", 1865 vf->abs_vfid, qidx); 1866 bnx2x_vf_handle_rss_update_eqe(bp, vf); 1867 case EVENT_RING_OPCODE_VF_FLR: 1868 case EVENT_RING_OPCODE_MALICIOUS_VF: 1869 /* Do nothing for now */ 1870 return 0; 1871 } 1872 1873 return 0; 1874 } 1875 1876 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) 1877 { 1878 /* extract the vf from vf_cid - relies on the following: 1879 * 1. vfid on cid reflects the true abs_vfid 1880 * 2. The max number of VFs (per path) is 64 1881 */ 1882 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 1883 return bnx2x_vf_by_abs_fid(bp, abs_vfid); 1884 } 1885 1886 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 1887 struct bnx2x_queue_sp_obj **q_obj) 1888 { 1889 struct bnx2x_virtf *vf; 1890 1891 if (!IS_SRIOV(bp)) 1892 return; 1893 1894 vf = bnx2x_vf_by_cid(bp, vf_cid); 1895 1896 if (vf) { 1897 /* extract queue index from vf_cid - relies on the following: 1898 * 1. vfid on cid reflects the true abs_vfid 1899 * 2. The max number of VFs (per path) is 64 1900 */ 1901 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); 1902 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); 1903 } else { 1904 BNX2X_ERR("No vf matching cid %d\n", vf_cid); 1905 } 1906 } 1907 1908 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) 1909 { 1910 int i; 1911 int first_queue_query_index, num_queues_req; 1912 dma_addr_t cur_data_offset; 1913 struct stats_query_entry *cur_query_entry; 1914 u8 stats_count = 0; 1915 bool is_fcoe = false; 1916 1917 if (!IS_SRIOV(bp)) 1918 return; 1919 1920 if (!NO_FCOE(bp)) 1921 is_fcoe = true; 1922 1923 /* fcoe adds one global request and one queue request */ 1924 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; 1925 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1926 (is_fcoe ? 0 : 1); 1927 1928 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), 1929 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", 1930 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, 1931 first_queue_query_index + num_queues_req); 1932 1933 cur_data_offset = bp->fw_stats_data_mapping + 1934 offsetof(struct bnx2x_fw_stats_data, queue_stats) + 1935 num_queues_req * sizeof(struct per_queue_stats); 1936 1937 cur_query_entry = &bp->fw_stats_req-> 1938 query[first_queue_query_index + num_queues_req]; 1939 1940 for_each_vf(bp, i) { 1941 int j; 1942 struct bnx2x_virtf *vf = BP_VF(bp, i); 1943 1944 if (vf->state != VF_ENABLED) { 1945 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), 1946 "vf %d not enabled so no stats for it\n", 1947 vf->abs_vfid); 1948 continue; 1949 } 1950 1951 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); 1952 for_each_vfq(vf, j) { 1953 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 1954 1955 dma_addr_t q_stats_addr = 1956 vf->fw_stat_map + j * vf->stats_stride; 1957 1958 /* collect stats fro active queues only */ 1959 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 1960 BNX2X_Q_LOGICAL_STATE_STOPPED) 1961 continue; 1962 1963 /* create stats query entry for this queue */ 1964 cur_query_entry->kind = STATS_TYPE_QUEUE; 1965 cur_query_entry->index = vfq_stat_id(vf, rxq); 1966 cur_query_entry->funcID = 1967 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 1968 cur_query_entry->address.hi = 1969 cpu_to_le32(U64_HI(q_stats_addr)); 1970 cur_query_entry->address.lo = 1971 cpu_to_le32(U64_LO(q_stats_addr)); 1972 DP(BNX2X_MSG_IOV, 1973 "added address %x %x for vf %d queue %d client %d\n", 1974 cur_query_entry->address.hi, 1975 cur_query_entry->address.lo, cur_query_entry->funcID, 1976 j, cur_query_entry->index); 1977 cur_query_entry++; 1978 cur_data_offset += sizeof(struct per_queue_stats); 1979 stats_count++; 1980 1981 /* all stats are coalesced to the leading queue */ 1982 if (vf->cfg_flags & VF_CFG_STATS_COALESCE) 1983 break; 1984 } 1985 } 1986 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 1987 } 1988 1989 /* VF API helpers */ 1990 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, 1991 u8 enable) 1992 { 1993 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; 1994 u32 val = enable ? (abs_vfid | (1 << 6)) : 0; 1995 1996 REG_WR(bp, reg, val); 1997 } 1998 1999 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) 2000 { 2001 int i; 2002 2003 for_each_vfq(vf, i) 2004 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2005 vfq_qzone_id(vf, vfq_get(vf, i)), false); 2006 } 2007 2008 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) 2009 { 2010 u32 val; 2011 2012 /* clear the VF configuration - pretend */ 2013 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 2014 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 2015 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | 2016 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK); 2017 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 2018 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2019 } 2020 2021 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) 2022 { 2023 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), 2024 BNX2X_VF_MAX_QUEUES); 2025 } 2026 2027 static 2028 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, 2029 struct vf_pf_resc_request *req_resc) 2030 { 2031 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2032 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2033 2034 /* Save a vlan filter for the Hypervisor */ 2035 return ((req_resc->num_rxqs <= rxq_cnt) && 2036 (req_resc->num_txqs <= txq_cnt) && 2037 (req_resc->num_sbs <= vf_sb_count(vf)) && 2038 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 2039 (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf))); 2040 } 2041 2042 /* CORE VF API */ 2043 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 2044 struct vf_pf_resc_request *resc) 2045 { 2046 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * 2047 BNX2X_CIDS_PER_VF; 2048 2049 union cdu_context *base_cxt = (union cdu_context *) 2050 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2051 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2052 int i; 2053 2054 /* if state is 'acquired' the VF was not released or FLR'd, in 2055 * this case the returned resources match the acquired already 2056 * acquired resources. Verify that the requested numbers do 2057 * not exceed the already acquired numbers. 2058 */ 2059 if (vf->state == VF_ACQUIRED) { 2060 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", 2061 vf->abs_vfid); 2062 2063 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2064 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", 2065 vf->abs_vfid); 2066 return -EINVAL; 2067 } 2068 return 0; 2069 } 2070 2071 /* Otherwise vf state must be 'free' or 'reset' */ 2072 if (vf->state != VF_FREE && vf->state != VF_RESET) { 2073 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", 2074 vf->abs_vfid, vf->state); 2075 return -EINVAL; 2076 } 2077 2078 /* static allocation: 2079 * the global maximum number are fixed per VF. Fail the request if 2080 * requested number exceed these globals 2081 */ 2082 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2083 DP(BNX2X_MSG_IOV, 2084 "cannot fulfill vf resource request. Placing maximal available values in response\n"); 2085 /* set the max resource in the vf */ 2086 return -ENOMEM; 2087 } 2088 2089 /* Set resources counters - 0 request means max available */ 2090 vf_sb_count(vf) = resc->num_sbs; 2091 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2092 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2093 if (resc->num_mac_filters) 2094 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 2095 /* Add an additional vlan filter credit for the hypervisor */ 2096 bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1); 2097 2098 DP(BNX2X_MSG_IOV, 2099 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2100 vf_sb_count(vf), vf_rxq_count(vf), 2101 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2102 vf_vlan_rules_visible_cnt(vf)); 2103 2104 /* Initialize the queues */ 2105 if (!vf->vfqs) { 2106 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); 2107 return -EINVAL; 2108 } 2109 2110 for_each_vfq(vf, i) { 2111 struct bnx2x_vf_queue *q = vfq_get(vf, i); 2112 2113 if (!q) { 2114 BNX2X_ERR("q number %d was not allocated\n", i); 2115 return -EINVAL; 2116 } 2117 2118 q->index = i; 2119 q->cxt = &((base_cxt + i)->eth); 2120 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; 2121 2122 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", 2123 vf->abs_vfid, i, q->index, q->cid, q->cxt); 2124 2125 /* init SP objects */ 2126 bnx2x_vfq_init(bp, vf, q); 2127 } 2128 vf->state = VF_ACQUIRED; 2129 return 0; 2130 } 2131 2132 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) 2133 { 2134 struct bnx2x_func_init_params func_init = {0}; 2135 u16 flags = 0; 2136 int i; 2137 2138 /* the sb resources are initialized at this point, do the 2139 * FW/HW initializations 2140 */ 2141 for_each_vf_sb(vf, i) 2142 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, 2143 vf_igu_sb(vf, i), vf_igu_sb(vf, i)); 2144 2145 /* Sanity checks */ 2146 if (vf->state != VF_ACQUIRED) { 2147 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", 2148 vf->abs_vfid, vf->state); 2149 return -EINVAL; 2150 } 2151 2152 /* let FLR complete ... */ 2153 msleep(100); 2154 2155 /* FLR cleanup epilogue */ 2156 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) 2157 return -EBUSY; 2158 2159 /* reset IGU VF statistics: MSIX */ 2160 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); 2161 2162 /* vf init */ 2163 if (vf->cfg_flags & VF_CFG_STATS) 2164 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); 2165 2166 if (vf->cfg_flags & VF_CFG_TPA) 2167 flags |= FUNC_FLG_TPA; 2168 2169 if (is_vf_multi(vf)) 2170 flags |= FUNC_FLG_RSS; 2171 2172 /* function setup */ 2173 func_init.func_flgs = flags; 2174 func_init.pf_id = BP_FUNC(bp); 2175 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); 2176 func_init.fw_stat_map = vf->fw_stat_map; 2177 func_init.spq_map = vf->spq_map; 2178 func_init.spq_prod = 0; 2179 bnx2x_func_init(bp, &func_init); 2180 2181 /* Enable the vf */ 2182 bnx2x_vf_enable_access(bp, vf->abs_vfid); 2183 bnx2x_vf_enable_traffic(bp, vf); 2184 2185 /* queue protection table */ 2186 for_each_vfq(vf, i) 2187 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2188 vfq_qzone_id(vf, vfq_get(vf, i)), true); 2189 2190 vf->state = VF_ENABLED; 2191 2192 /* update vf bulletin board */ 2193 bnx2x_post_vf_bulletin(bp, vf->index); 2194 2195 return 0; 2196 } 2197 2198 struct set_vf_state_cookie { 2199 struct bnx2x_virtf *vf; 2200 u8 state; 2201 }; 2202 2203 static void bnx2x_set_vf_state(void *cookie) 2204 { 2205 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; 2206 2207 p->vf->state = p->state; 2208 } 2209 2210 int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) 2211 { 2212 int rc = 0, i; 2213 2214 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 2215 2216 /* Close all queues */ 2217 for (i = 0; i < vf_rxq_count(vf); i++) { 2218 rc = bnx2x_vf_queue_teardown(bp, vf, i); 2219 if (rc) 2220 goto op_err; 2221 } 2222 2223 /* disable the interrupts */ 2224 DP(BNX2X_MSG_IOV, "disabling igu\n"); 2225 bnx2x_vf_igu_disable(bp, vf); 2226 2227 /* disable the VF */ 2228 DP(BNX2X_MSG_IOV, "clearing qtbl\n"); 2229 bnx2x_vf_clr_qtbl(bp, vf); 2230 2231 /* need to make sure there are no outstanding stats ramrods which may 2232 * cause the device to access the VF's stats buffer which it will free 2233 * as soon as we return from the close flow. 2234 */ 2235 { 2236 struct set_vf_state_cookie cookie; 2237 2238 cookie.vf = vf; 2239 cookie.state = VF_ACQUIRED; 2240 bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); 2241 } 2242 2243 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2244 2245 return 0; 2246 op_err: 2247 BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc); 2248 return rc; 2249 } 2250 2251 /* VF release can be called either: 1. The VF was acquired but 2252 * not enabled 2. the vf was enabled or in the process of being 2253 * enabled 2254 */ 2255 int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf) 2256 { 2257 int rc; 2258 2259 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, 2260 vf->state == VF_FREE ? "Free" : 2261 vf->state == VF_ACQUIRED ? "Acquired" : 2262 vf->state == VF_ENABLED ? "Enabled" : 2263 vf->state == VF_RESET ? "Reset" : 2264 "Unknown"); 2265 2266 switch (vf->state) { 2267 case VF_ENABLED: 2268 rc = bnx2x_vf_close(bp, vf); 2269 if (rc) 2270 goto op_err; 2271 /* Fallthrough to release resources */ 2272 case VF_ACQUIRED: 2273 DP(BNX2X_MSG_IOV, "about to free resources\n"); 2274 bnx2x_vf_free_resc(bp, vf); 2275 break; 2276 2277 case VF_FREE: 2278 case VF_RESET: 2279 default: 2280 break; 2281 } 2282 return 0; 2283 op_err: 2284 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc); 2285 return rc; 2286 } 2287 2288 int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, 2289 struct bnx2x_config_rss_params *rss) 2290 { 2291 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 2292 set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags); 2293 return bnx2x_config_rss(bp, rss); 2294 } 2295 2296 int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, 2297 struct vfpf_tpa_tlv *tlv, 2298 struct bnx2x_queue_update_tpa_params *params) 2299 { 2300 aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr; 2301 struct bnx2x_queue_state_params qstate; 2302 int qid, rc = 0; 2303 2304 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 2305 2306 /* Set ramrod params */ 2307 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); 2308 memcpy(&qstate.params.update_tpa, params, 2309 sizeof(struct bnx2x_queue_update_tpa_params)); 2310 qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA; 2311 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); 2312 2313 for (qid = 0; qid < vf_rxq_count(vf); qid++) { 2314 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 2315 qstate.params.update_tpa.sge_map = sge_addr[qid]; 2316 DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n", 2317 vf->abs_vfid, qid, U64_HI(sge_addr[qid]), 2318 U64_LO(sge_addr[qid])); 2319 rc = bnx2x_queue_state_change(bp, &qstate); 2320 if (rc) { 2321 BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n", 2322 U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]), 2323 vf->abs_vfid, qid); 2324 return rc; 2325 } 2326 } 2327 2328 return rc; 2329 } 2330 2331 /* VF release ~ VF close + VF release-resources 2332 * Release is the ultimate SW shutdown and is called whenever an 2333 * irrecoverable error is encountered. 2334 */ 2335 int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) 2336 { 2337 int rc; 2338 2339 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); 2340 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 2341 2342 rc = bnx2x_vf_free(bp, vf); 2343 if (rc) 2344 WARN(rc, 2345 "VF[%d] Failed to allocate resources for release op- rc=%d\n", 2346 vf->abs_vfid, rc); 2347 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 2348 return rc; 2349 } 2350 2351 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 2352 enum channel_tlvs tlv) 2353 { 2354 /* we don't lock the channel for unsupported tlvs */ 2355 if (!bnx2x_tlv_supported(tlv)) { 2356 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n"); 2357 return; 2358 } 2359 2360 /* lock the channel */ 2361 mutex_lock(&vf->op_mutex); 2362 2363 /* record the locking op */ 2364 vf->op_current = tlv; 2365 2366 /* log the lock */ 2367 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", 2368 vf->abs_vfid, tlv); 2369 } 2370 2371 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 2372 enum channel_tlvs expected_tlv) 2373 { 2374 enum channel_tlvs current_tlv; 2375 2376 if (!vf) { 2377 BNX2X_ERR("VF was %p\n", vf); 2378 return; 2379 } 2380 2381 current_tlv = vf->op_current; 2382 2383 /* we don't unlock the channel for unsupported tlvs */ 2384 if (!bnx2x_tlv_supported(expected_tlv)) 2385 return; 2386 2387 WARN(expected_tlv != vf->op_current, 2388 "lock mismatch: expected %d found %d", expected_tlv, 2389 vf->op_current); 2390 2391 /* record the locking op */ 2392 vf->op_current = CHANNEL_TLV_NONE; 2393 2394 /* lock the channel */ 2395 mutex_unlock(&vf->op_mutex); 2396 2397 /* log the unlock */ 2398 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 2399 vf->abs_vfid, current_tlv); 2400 } 2401 2402 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) 2403 { 2404 struct bnx2x_queue_state_params q_params; 2405 u32 prev_flags; 2406 int i, rc; 2407 2408 /* Verify changes are needed and record current Tx switching state */ 2409 prev_flags = bp->flags; 2410 if (enable) 2411 bp->flags |= TX_SWITCHING; 2412 else 2413 bp->flags &= ~TX_SWITCHING; 2414 if (prev_flags == bp->flags) 2415 return 0; 2416 2417 /* Verify state enables the sending of queue ramrods */ 2418 if ((bp->state != BNX2X_STATE_OPEN) || 2419 (bnx2x_get_q_logical_state(bp, 2420 &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) != 2421 BNX2X_Q_LOGICAL_STATE_ACTIVE)) 2422 return 0; 2423 2424 /* send q. update ramrod to configure Tx switching */ 2425 memset(&q_params, 0, sizeof(q_params)); 2426 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 2427 q_params.cmd = BNX2X_Q_CMD_UPDATE; 2428 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, 2429 &q_params.params.update.update_flags); 2430 if (enable) 2431 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING, 2432 &q_params.params.update.update_flags); 2433 else 2434 __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING, 2435 &q_params.params.update.update_flags); 2436 2437 /* send the ramrod on all the queues of the PF */ 2438 for_each_eth_queue(bp, i) { 2439 struct bnx2x_fastpath *fp = &bp->fp[i]; 2440 2441 /* Set the appropriate Queue object */ 2442 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 2443 2444 /* Update the Queue state */ 2445 rc = bnx2x_queue_state_change(bp, &q_params); 2446 if (rc) { 2447 BNX2X_ERR("Failed to configure Tx switching\n"); 2448 return rc; 2449 } 2450 } 2451 2452 DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled"); 2453 return 0; 2454 } 2455 2456 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) 2457 { 2458 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 2459 2460 if (!IS_SRIOV(bp)) { 2461 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n"); 2462 return -EINVAL; 2463 } 2464 2465 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 2466 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 2467 2468 /* HW channel is only operational when PF is up */ 2469 if (bp->state != BNX2X_STATE_OPEN) { 2470 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n"); 2471 return -EINVAL; 2472 } 2473 2474 /* we are always bound by the total_vfs in the configuration space */ 2475 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { 2476 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n", 2477 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 2478 num_vfs_param = BNX2X_NR_VIRTFN(bp); 2479 } 2480 2481 bp->requested_nr_virtfn = num_vfs_param; 2482 if (num_vfs_param == 0) { 2483 bnx2x_set_pf_tx_switching(bp, false); 2484 bnx2x_disable_sriov(bp); 2485 return 0; 2486 } else { 2487 return bnx2x_enable_sriov(bp); 2488 } 2489 } 2490 2491 #define IGU_ENTRY_SIZE 4 2492 2493 int bnx2x_enable_sriov(struct bnx2x *bp) 2494 { 2495 int rc = 0, req_vfs = bp->requested_nr_virtfn; 2496 int vf_idx, sb_idx, vfq_idx, qcount, first_vf; 2497 u32 igu_entry, address; 2498 u16 num_vf_queues; 2499 2500 if (req_vfs == 0) 2501 return 0; 2502 2503 first_vf = bp->vfdb->sriov.first_vf_in_pf; 2504 2505 /* statically distribute vf sb pool between VFs */ 2506 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES, 2507 BP_VFDB(bp)->vf_sbs_pool / req_vfs); 2508 2509 /* zero previous values learned from igu cam */ 2510 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) { 2511 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 2512 2513 vf->sb_count = 0; 2514 vf_sb_count(BP_VF(bp, vf_idx)) = 0; 2515 } 2516 bp->vfdb->vf_sbs_pool = 0; 2517 2518 /* prepare IGU cam */ 2519 sb_idx = BP_VFDB(bp)->first_vf_igu_entry; 2520 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE; 2521 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 2522 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) { 2523 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT | 2524 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT | 2525 IGU_REG_MAPPING_MEMORY_VALID; 2526 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n", 2527 sb_idx, vf_idx); 2528 REG_WR(bp, address, igu_entry); 2529 sb_idx++; 2530 address += IGU_ENTRY_SIZE; 2531 } 2532 } 2533 2534 /* Reinitialize vf database according to igu cam */ 2535 bnx2x_get_vf_igu_cam_info(bp); 2536 2537 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n", 2538 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); 2539 2540 qcount = 0; 2541 for_each_vf(bp, vf_idx) { 2542 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 2543 2544 /* set local queue arrays */ 2545 vf->vfqs = &bp->vfdb->vfqs[qcount]; 2546 qcount += vf_sb_count(vf); 2547 bnx2x_iov_static_resc(bp, vf); 2548 } 2549 2550 /* prepare msix vectors in VF configuration space - the value in the 2551 * PCI configuration space should be the index of the last entry, 2552 * namely one less than the actual size of the table 2553 */ 2554 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 2555 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 2556 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 2557 num_vf_queues - 1); 2558 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", 2559 vf_idx, num_vf_queues - 1); 2560 } 2561 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2562 2563 /* enable sriov. This will probe all the VFs, and consequentially cause 2564 * the "acquire" messages to appear on the VF PF channel. 2565 */ 2566 DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); 2567 bnx2x_disable_sriov(bp); 2568 2569 rc = bnx2x_set_pf_tx_switching(bp, true); 2570 if (rc) 2571 return rc; 2572 2573 rc = pci_enable_sriov(bp->pdev, req_vfs); 2574 if (rc) { 2575 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 2576 return rc; 2577 } 2578 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); 2579 return req_vfs; 2580 } 2581 2582 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) 2583 { 2584 int vfidx; 2585 struct pf_vf_bulletin_content *bulletin; 2586 2587 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); 2588 for_each_vf(bp, vfidx) { 2589 bulletin = BP_VF_BULLETIN(bp, vfidx); 2590 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) 2591 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); 2592 } 2593 } 2594 2595 void bnx2x_disable_sriov(struct bnx2x *bp) 2596 { 2597 if (pci_vfs_assigned(bp->pdev)) { 2598 DP(BNX2X_MSG_IOV, 2599 "Unloading driver while VFs are assigned - VFs will not be deallocated\n"); 2600 return; 2601 } 2602 2603 pci_disable_sriov(bp->pdev); 2604 } 2605 2606 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx, 2607 struct bnx2x_virtf **vf, 2608 struct pf_vf_bulletin_content **bulletin, 2609 bool test_queue) 2610 { 2611 if (bp->state != BNX2X_STATE_OPEN) { 2612 BNX2X_ERR("PF is down - can't utilize iov-related functionality\n"); 2613 return -EINVAL; 2614 } 2615 2616 if (!IS_SRIOV(bp)) { 2617 BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n"); 2618 return -EINVAL; 2619 } 2620 2621 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { 2622 BNX2X_ERR("VF is uninitialized - can't utilize iov-related functionality. vfidx was %d BNX2X_NR_VIRTFN was %d\n", 2623 vfidx, BNX2X_NR_VIRTFN(bp)); 2624 return -EINVAL; 2625 } 2626 2627 /* init members */ 2628 *vf = BP_VF(bp, vfidx); 2629 *bulletin = BP_VF_BULLETIN(bp, vfidx); 2630 2631 if (!*vf) { 2632 BNX2X_ERR("Unable to get VF structure for vfidx %d\n", vfidx); 2633 return -EINVAL; 2634 } 2635 2636 if (test_queue && !(*vf)->vfqs) { 2637 BNX2X_ERR("vfqs struct is null. Was this invoked before dynamically enabling SR-IOV? vfidx was %d\n", 2638 vfidx); 2639 return -EINVAL; 2640 } 2641 2642 if (!*bulletin) { 2643 BNX2X_ERR("Bulletin Board struct is null for vfidx %d\n", 2644 vfidx); 2645 return -EINVAL; 2646 } 2647 2648 return 0; 2649 } 2650 2651 int bnx2x_get_vf_config(struct net_device *dev, int vfidx, 2652 struct ifla_vf_info *ivi) 2653 { 2654 struct bnx2x *bp = netdev_priv(dev); 2655 struct bnx2x_virtf *vf = NULL; 2656 struct pf_vf_bulletin_content *bulletin = NULL; 2657 struct bnx2x_vlan_mac_obj *mac_obj; 2658 struct bnx2x_vlan_mac_obj *vlan_obj; 2659 int rc; 2660 2661 /* sanity and init */ 2662 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); 2663 if (rc) 2664 return rc; 2665 2666 mac_obj = &bnx2x_leading_vfq(vf, mac_obj); 2667 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 2668 if (!mac_obj || !vlan_obj) { 2669 BNX2X_ERR("VF partially initialized\n"); 2670 return -EINVAL; 2671 } 2672 2673 ivi->vf = vfidx; 2674 ivi->qos = 0; 2675 ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */ 2676 ivi->min_tx_rate = 0; 2677 ivi->spoofchk = 1; /*always enabled */ 2678 if (vf->state == VF_ENABLED) { 2679 /* mac and vlan are in vlan_mac objects */ 2680 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) { 2681 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 2682 0, ETH_ALEN); 2683 vlan_obj->get_n_elements(bp, vlan_obj, 1, 2684 (u8 *)&ivi->vlan, 0, 2685 VLAN_HLEN); 2686 } 2687 } else { 2688 mutex_lock(&bp->vfdb->bulletin_mutex); 2689 /* mac */ 2690 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 2691 /* mac configured by ndo so its in bulletin board */ 2692 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); 2693 else 2694 /* function has not been loaded yet. Show mac as 0s */ 2695 memset(&ivi->mac, 0, ETH_ALEN); 2696 2697 /* vlan */ 2698 if (bulletin->valid_bitmap & (1 << VLAN_VALID)) 2699 /* vlan configured by ndo so its in bulletin board */ 2700 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); 2701 else 2702 /* function has not been loaded yet. Show vlans as 0s */ 2703 memset(&ivi->vlan, 0, VLAN_HLEN); 2704 2705 mutex_unlock(&bp->vfdb->bulletin_mutex); 2706 } 2707 2708 return 0; 2709 } 2710 2711 /* New mac for VF. Consider these cases: 2712 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and 2713 * supply at acquire. 2714 * 2. VF has already been acquired but has not yet initialized - store in local 2715 * bulletin board. mac will be posted on VF bulletin board after VF init. VF 2716 * will configure this mac when it is ready. 2717 * 3. VF has already initialized but has not yet setup a queue - post the new 2718 * mac on VF's bulletin board right now. VF will configure this mac when it 2719 * is ready. 2720 * 4. VF has already set a queue - delete any macs already configured for this 2721 * queue and manually config the new mac. 2722 * In any event, once this function has been called refuse any attempts by the 2723 * VF to configure any mac for itself except for this mac. In case of a race 2724 * where the VF fails to see the new post on its bulletin board before sending a 2725 * mac configuration request, the PF will simply fail the request and VF can try 2726 * again after consulting its bulletin board. 2727 */ 2728 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) 2729 { 2730 struct bnx2x *bp = netdev_priv(dev); 2731 int rc, q_logical_state; 2732 struct bnx2x_virtf *vf = NULL; 2733 struct pf_vf_bulletin_content *bulletin = NULL; 2734 2735 if (!is_valid_ether_addr(mac)) { 2736 BNX2X_ERR("mac address invalid\n"); 2737 return -EINVAL; 2738 } 2739 2740 /* sanity and init */ 2741 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); 2742 if (rc) 2743 return rc; 2744 2745 mutex_lock(&bp->vfdb->bulletin_mutex); 2746 2747 /* update PF's copy of the VF's bulletin. Will no longer accept mac 2748 * configuration requests from vf unless match this mac 2749 */ 2750 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; 2751 memcpy(bulletin->mac, mac, ETH_ALEN); 2752 2753 /* Post update on VF's bulletin board */ 2754 rc = bnx2x_post_vf_bulletin(bp, vfidx); 2755 2756 /* release lock before checking return code */ 2757 mutex_unlock(&bp->vfdb->bulletin_mutex); 2758 2759 if (rc) { 2760 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); 2761 return rc; 2762 } 2763 2764 q_logical_state = 2765 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); 2766 if (vf->state == VF_ENABLED && 2767 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 2768 /* configure the mac in device on this vf's queue */ 2769 unsigned long ramrod_flags = 0; 2770 struct bnx2x_vlan_mac_obj *mac_obj; 2771 2772 /* User should be able to see failure reason in system logs */ 2773 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 2774 return -EINVAL; 2775 2776 /* must lock vfpf channel to protect against vf flows */ 2777 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 2778 2779 /* remove existing eth macs */ 2780 mac_obj = &bnx2x_leading_vfq(vf, mac_obj); 2781 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 2782 if (rc) { 2783 BNX2X_ERR("failed to delete eth macs\n"); 2784 rc = -EINVAL; 2785 goto out; 2786 } 2787 2788 /* remove existing uc list macs */ 2789 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); 2790 if (rc) { 2791 BNX2X_ERR("failed to delete uc_list macs\n"); 2792 rc = -EINVAL; 2793 goto out; 2794 } 2795 2796 /* configure the new mac to device */ 2797 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2798 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 2799 BNX2X_ETH_MAC, &ramrod_flags); 2800 2801 out: 2802 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 2803 } 2804 2805 return rc; 2806 } 2807 2808 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 2809 { 2810 struct bnx2x_queue_state_params q_params = {NULL}; 2811 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 2812 struct bnx2x_queue_update_params *update_params; 2813 struct pf_vf_bulletin_content *bulletin = NULL; 2814 struct bnx2x_rx_mode_ramrod_params rx_ramrod; 2815 struct bnx2x *bp = netdev_priv(dev); 2816 struct bnx2x_vlan_mac_obj *vlan_obj; 2817 unsigned long vlan_mac_flags = 0; 2818 unsigned long ramrod_flags = 0; 2819 struct bnx2x_virtf *vf = NULL; 2820 unsigned long accept_flags; 2821 int rc; 2822 2823 if (vlan > 4095) { 2824 BNX2X_ERR("illegal vlan value %d\n", vlan); 2825 return -EINVAL; 2826 } 2827 2828 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", 2829 vfidx, vlan, 0); 2830 2831 /* sanity and init */ 2832 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); 2833 if (rc) 2834 return rc; 2835 2836 /* update PF's copy of the VF's bulletin. No point in posting the vlan 2837 * to the VF since it doesn't have anything to do with it. But it useful 2838 * to store it here in case the VF is not up yet and we can only 2839 * configure the vlan later when it does. Treat vlan id 0 as remove the 2840 * Host tag. 2841 */ 2842 mutex_lock(&bp->vfdb->bulletin_mutex); 2843 2844 if (vlan > 0) 2845 bulletin->valid_bitmap |= 1 << VLAN_VALID; 2846 else 2847 bulletin->valid_bitmap &= ~(1 << VLAN_VALID); 2848 bulletin->vlan = vlan; 2849 2850 mutex_unlock(&bp->vfdb->bulletin_mutex); 2851 2852 /* is vf initialized and queue set up? */ 2853 if (vf->state != VF_ENABLED || 2854 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != 2855 BNX2X_Q_LOGICAL_STATE_ACTIVE) 2856 return rc; 2857 2858 /* User should be able to see error in system logs */ 2859 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 2860 return -EINVAL; 2861 2862 /* must lock vfpf channel to protect against vf flows */ 2863 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 2864 2865 /* remove existing vlans */ 2866 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2867 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 2868 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 2869 &ramrod_flags); 2870 if (rc) { 2871 BNX2X_ERR("failed to delete vlans\n"); 2872 rc = -EINVAL; 2873 goto out; 2874 } 2875 2876 /* need to remove/add the VF's accept_any_vlan bit */ 2877 accept_flags = bnx2x_leading_vfq(vf, accept_flags); 2878 if (vlan) 2879 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 2880 else 2881 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 2882 2883 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, 2884 accept_flags); 2885 bnx2x_leading_vfq(vf, accept_flags) = accept_flags; 2886 bnx2x_config_rx_mode(bp, &rx_ramrod); 2887 2888 /* configure the new vlan to device */ 2889 memset(&ramrod_param, 0, sizeof(ramrod_param)); 2890 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2891 ramrod_param.vlan_mac_obj = vlan_obj; 2892 ramrod_param.ramrod_flags = ramrod_flags; 2893 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 2894 &ramrod_param.user_req.vlan_mac_flags); 2895 ramrod_param.user_req.u.vlan.vlan = vlan; 2896 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 2897 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 2898 if (rc) { 2899 BNX2X_ERR("failed to configure vlan\n"); 2900 rc = -EINVAL; 2901 goto out; 2902 } 2903 2904 /* send queue update ramrod to configure default vlan and silent 2905 * vlan removal 2906 */ 2907 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 2908 q_params.cmd = BNX2X_Q_CMD_UPDATE; 2909 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); 2910 update_params = &q_params.params.update; 2911 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 2912 &update_params->update_flags); 2913 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 2914 &update_params->update_flags); 2915 if (vlan == 0) { 2916 /* if vlan is 0 then we want to leave the VF traffic 2917 * untagged, and leave the incoming traffic untouched 2918 * (i.e. do not remove any vlan tags). 2919 */ 2920 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 2921 &update_params->update_flags); 2922 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 2923 &update_params->update_flags); 2924 } else { 2925 /* configure default vlan to vf queue and set silent 2926 * vlan removal (the vf remains unaware of this vlan). 2927 */ 2928 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 2929 &update_params->update_flags); 2930 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 2931 &update_params->update_flags); 2932 update_params->def_vlan = vlan; 2933 update_params->silent_removal_value = 2934 vlan & VLAN_VID_MASK; 2935 update_params->silent_removal_mask = VLAN_VID_MASK; 2936 } 2937 2938 /* Update the Queue state */ 2939 rc = bnx2x_queue_state_change(bp, &q_params); 2940 if (rc) { 2941 BNX2X_ERR("Failed to configure default VLAN\n"); 2942 goto out; 2943 } 2944 2945 2946 /* clear the flag indicating that this VF needs its vlan 2947 * (will only be set if the HV configured the Vlan before vf was 2948 * up and we were called because the VF came up later 2949 */ 2950 out: 2951 vf->cfg_flags &= ~VF_CFG_VLAN; 2952 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 2953 2954 return rc; 2955 } 2956 2957 /* crc is the first field in the bulletin board. Compute the crc over the 2958 * entire bulletin board excluding the crc field itself. Use the length field 2959 * as the Bulletin Board was posted by a PF with possibly a different version 2960 * from the vf which will sample it. Therefore, the length is computed by the 2961 * PF and then used blindly by the VF. 2962 */ 2963 u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin) 2964 { 2965 return crc32(BULLETIN_CRC_SEED, 2966 ((u8 *)bulletin) + sizeof(bulletin->crc), 2967 bulletin->length - sizeof(bulletin->crc)); 2968 } 2969 2970 /* Check for new posts on the bulletin board */ 2971 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) 2972 { 2973 struct pf_vf_bulletin_content *bulletin; 2974 int attempts; 2975 2976 /* sampling structure in mid post may result with corrupted data 2977 * validate crc to ensure coherency. 2978 */ 2979 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { 2980 u32 crc; 2981 2982 /* sample the bulletin board */ 2983 memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin, 2984 sizeof(union pf_vf_bulletin)); 2985 2986 crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content); 2987 2988 if (bp->shadow_bulletin.content.crc == crc) 2989 break; 2990 2991 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", 2992 bp->shadow_bulletin.content.crc, crc); 2993 } 2994 2995 if (attempts >= BULLETIN_ATTEMPTS) { 2996 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", 2997 attempts); 2998 return PFVF_BULLETIN_CRC_ERR; 2999 } 3000 bulletin = &bp->shadow_bulletin.content; 3001 3002 /* bulletin board hasn't changed since last sample */ 3003 if (bp->old_bulletin.version == bulletin->version) 3004 return PFVF_BULLETIN_UNCHANGED; 3005 3006 /* the mac address in bulletin board is valid and is new */ 3007 if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID && 3008 !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) { 3009 /* update new mac to net device */ 3010 memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN); 3011 } 3012 3013 if (bulletin->valid_bitmap & (1 << LINK_VALID)) { 3014 DP(BNX2X_MSG_IOV, "link update speed %d flags %x\n", 3015 bulletin->link_speed, bulletin->link_flags); 3016 3017 bp->vf_link_vars.line_speed = bulletin->link_speed; 3018 bp->vf_link_vars.link_report_flags = 0; 3019 /* Link is down */ 3020 if (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN) 3021 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, 3022 &bp->vf_link_vars.link_report_flags); 3023 /* Full DUPLEX */ 3024 if (bulletin->link_flags & VFPF_LINK_REPORT_FULL_DUPLEX) 3025 __set_bit(BNX2X_LINK_REPORT_FD, 3026 &bp->vf_link_vars.link_report_flags); 3027 /* Rx Flow Control is ON */ 3028 if (bulletin->link_flags & VFPF_LINK_REPORT_RX_FC_ON) 3029 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, 3030 &bp->vf_link_vars.link_report_flags); 3031 /* Tx Flow Control is ON */ 3032 if (bulletin->link_flags & VFPF_LINK_REPORT_TX_FC_ON) 3033 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, 3034 &bp->vf_link_vars.link_report_flags); 3035 __bnx2x_link_report(bp); 3036 } 3037 3038 /* copy new bulletin board to bp */ 3039 memcpy(&bp->old_bulletin, bulletin, 3040 sizeof(struct pf_vf_bulletin_content)); 3041 3042 return PFVF_BULLETIN_UPDATED; 3043 } 3044 3045 void bnx2x_timer_sriov(struct bnx2x *bp) 3046 { 3047 bnx2x_sample_bulletin(bp); 3048 3049 /* if channel is down we need to self destruct */ 3050 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) 3051 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 3052 BNX2X_MSG_IOV); 3053 } 3054 3055 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 3056 { 3057 /* vf doorbells are embedded within the regview */ 3058 return bp->regview + PXP_VF_ADDR_DB_START; 3059 } 3060 3061 void bnx2x_vf_pci_dealloc(struct bnx2x *bp) 3062 { 3063 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3064 sizeof(struct bnx2x_vf_mbx_msg)); 3065 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, 3066 sizeof(union pf_vf_bulletin)); 3067 } 3068 3069 int bnx2x_vf_pci_alloc(struct bnx2x *bp) 3070 { 3071 mutex_init(&bp->vf2pf_mutex); 3072 3073 /* allocate vf2pf mailbox for vf to pf channel */ 3074 bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping, 3075 sizeof(struct bnx2x_vf_mbx_msg)); 3076 if (!bp->vf2pf_mbox) 3077 goto alloc_mem_err; 3078 3079 /* allocate pf 2 vf bulletin board */ 3080 bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping, 3081 sizeof(union pf_vf_bulletin)); 3082 if (!bp->pf2vf_bulletin) 3083 goto alloc_mem_err; 3084 3085 bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true); 3086 3087 return 0; 3088 3089 alloc_mem_err: 3090 bnx2x_vf_pci_dealloc(bp); 3091 return -ENOMEM; 3092 } 3093 3094 void bnx2x_iov_channel_down(struct bnx2x *bp) 3095 { 3096 int vf_idx; 3097 struct pf_vf_bulletin_content *bulletin; 3098 3099 if (!IS_SRIOV(bp)) 3100 return; 3101 3102 for_each_vf(bp, vf_idx) { 3103 /* locate this VFs bulletin board and update the channel down 3104 * bit 3105 */ 3106 bulletin = BP_VF_BULLETIN(bp, vf_idx); 3107 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN; 3108 3109 /* update vf bulletin board */ 3110 bnx2x_post_vf_bulletin(bp, vf_idx); 3111 } 3112 } 3113 3114 void bnx2x_iov_task(struct work_struct *work) 3115 { 3116 struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work); 3117 3118 if (!netif_running(bp->dev)) 3119 return; 3120 3121 if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR, 3122 &bp->iov_task_state)) 3123 bnx2x_vf_handle_flr_event(bp); 3124 3125 if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG, 3126 &bp->iov_task_state)) 3127 bnx2x_vf_mbx(bp); 3128 } 3129 3130 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) 3131 { 3132 smp_mb__before_atomic(); 3133 set_bit(flag, &bp->iov_task_state); 3134 smp_mb__after_atomic(); 3135 DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); 3136 queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); 3137 } 3138