1 /* bnx2x_sriov.c: QLogic Everest network driver. 2 * 3 * Copyright 2009-2013 Broadcom Corporation 4 * Copyright 2014 QLogic Corporation 5 * All rights reserved 6 * 7 * Unless you and QLogic execute a separate written software license 8 * agreement governing use of this software, this software is licensed to you 9 * under the terms of the GNU General Public License version 2, available 10 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 11 * 12 * Notwithstanding the above, under no circumstances may you combine this 13 * software in any way with any other QLogic software provided under a 14 * license other than the GPL, without QLogic's express prior written 15 * consent. 16 * 17 * Maintained by: Ariel Elior <ariel.elior@qlogic.com> 18 * Written by: Shmulik Ravid 19 * Ariel Elior <ariel.elior@qlogic.com> 20 * 21 */ 22 #include "bnx2x.h" 23 #include "bnx2x_init.h" 24 #include "bnx2x_cmn.h" 25 #include "bnx2x_sp.h" 26 #include <linux/crc32.h> 27 #include <linux/if_vlan.h> 28 29 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx, 30 struct bnx2x_virtf **vf, 31 struct pf_vf_bulletin_content **bulletin, 32 bool test_queue); 33 34 /* General service functions */ 35 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 36 u16 pf_id) 37 { 38 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 39 pf_id); 40 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid), 41 pf_id); 42 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid), 43 pf_id); 44 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid), 45 pf_id); 46 } 47 48 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 49 u8 enable) 50 { 51 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 52 enable); 53 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid), 54 enable); 55 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid), 56 enable); 57 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid), 58 enable); 59 } 60 61 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 62 { 63 int idx; 64 65 for_each_vf(bp, idx) 66 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid) 67 break; 68 return idx; 69 } 70 71 static 72 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid) 73 { 74 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid); 75 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL; 76 } 77 78 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf, 79 u8 igu_sb_id, u8 segment, u16 index, u8 op, 80 u8 update) 81 { 82 /* acking a VF sb through the PF - use the GRC */ 83 u32 ctl; 84 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; 85 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; 86 u32 func_encode = vf->abs_vfid; 87 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id; 88 struct igu_regular cmd_data = {0}; 89 90 cmd_data.sb_id_and_flags = 91 ((index << IGU_REGULAR_SB_INDEX_SHIFT) | 92 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 93 (update << IGU_REGULAR_BUPDATE_SHIFT) | 94 (op << IGU_REGULAR_ENABLE_INT_SHIFT)); 95 96 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT | 97 func_encode << IGU_CTRL_REG_FID_SHIFT | 98 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT; 99 100 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 101 cmd_data.sb_id_and_flags, igu_addr_data); 102 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags); 103 mmiowb(); 104 barrier(); 105 106 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n", 107 ctl, igu_addr_ctl); 108 REG_WR(bp, igu_addr_ctl, ctl); 109 mmiowb(); 110 barrier(); 111 } 112 113 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp, 114 struct bnx2x_virtf *vf, 115 bool print_err) 116 { 117 if (!bnx2x_leading_vfq(vf, sp_initialized)) { 118 if (print_err) 119 BNX2X_ERR("Slowpath objects not yet initialized!\n"); 120 else 121 DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n"); 122 return false; 123 } 124 return true; 125 } 126 127 /* VFOP operations states */ 128 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 129 struct bnx2x_queue_init_params *init_params, 130 struct bnx2x_queue_setup_params *setup_params, 131 u16 q_idx, u16 sb_idx) 132 { 133 DP(BNX2X_MSG_IOV, 134 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d", 135 vf->abs_vfid, 136 q_idx, 137 sb_idx, 138 init_params->tx.sb_cq_index, 139 init_params->tx.hc_rate, 140 setup_params->flags, 141 setup_params->txq_params.traffic_type); 142 } 143 144 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 145 struct bnx2x_queue_init_params *init_params, 146 struct bnx2x_queue_setup_params *setup_params, 147 u16 q_idx, u16 sb_idx) 148 { 149 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params; 150 151 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" 152 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n", 153 vf->abs_vfid, 154 q_idx, 155 sb_idx, 156 init_params->rx.sb_cq_index, 157 init_params->rx.hc_rate, 158 setup_params->gen_params.mtu, 159 rxq_params->buf_sz, 160 rxq_params->sge_buf_sz, 161 rxq_params->max_sges_pkt, 162 rxq_params->tpa_agg_sz, 163 setup_params->flags, 164 rxq_params->drop_flags, 165 rxq_params->cache_line_log); 166 } 167 168 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 169 struct bnx2x_virtf *vf, 170 struct bnx2x_vf_queue *q, 171 struct bnx2x_vf_queue_construct_params *p, 172 unsigned long q_type) 173 { 174 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init; 175 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup; 176 177 /* INIT */ 178 179 /* Enable host coalescing in the transition to INIT state */ 180 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags)) 181 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags); 182 183 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags)) 184 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags); 185 186 /* FW SB ID */ 187 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 188 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 189 190 /* context */ 191 init_p->cxts[0] = q->cxt; 192 193 /* SETUP */ 194 195 /* Setup-op general parameters */ 196 setup_p->gen_params.spcl_id = vf->sp_cl_id; 197 setup_p->gen_params.stat_id = vfq_stat_id(vf, q); 198 setup_p->gen_params.fp_hsi = vf->fp_hsi; 199 200 /* Setup-op pause params: 201 * Nothing to do, the pause thresholds are set by default to 0 which 202 * effectively turns off the feature for this queue. We don't want 203 * one queue (VF) to interfering with another queue (another VF) 204 */ 205 if (vf->cfg_flags & VF_CFG_FW_FC) 206 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n", 207 vf->abs_vfid); 208 /* Setup-op flags: 209 * collect statistics, zero statistics, local-switching, security, 210 * OV for Flex10, RSS and MCAST for leading 211 */ 212 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags)) 213 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags); 214 215 /* for VFs, enable tx switching, bd coherency, and mac address 216 * anti-spoofing 217 */ 218 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags); 219 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags); 220 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags); 221 222 /* Setup-op rx parameters */ 223 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) { 224 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params; 225 226 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); 227 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); 228 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid); 229 230 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags)) 231 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES; 232 } 233 234 /* Setup-op tx parameters */ 235 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) { 236 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss; 237 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); 238 } 239 } 240 241 static int bnx2x_vf_queue_create(struct bnx2x *bp, 242 struct bnx2x_virtf *vf, int qid, 243 struct bnx2x_vf_queue_construct_params *qctor) 244 { 245 struct bnx2x_queue_state_params *q_params; 246 int rc = 0; 247 248 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 249 250 /* Prepare ramrod information */ 251 q_params = &qctor->qstate; 252 q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj); 253 set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags); 254 255 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) == 256 BNX2X_Q_LOGICAL_STATE_ACTIVE) { 257 DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n"); 258 goto out; 259 } 260 261 /* Run Queue 'construction' ramrods */ 262 q_params->cmd = BNX2X_Q_CMD_INIT; 263 rc = bnx2x_queue_state_change(bp, q_params); 264 if (rc) 265 goto out; 266 267 memcpy(&q_params->params.setup, &qctor->prep_qsetup, 268 sizeof(struct bnx2x_queue_setup_params)); 269 q_params->cmd = BNX2X_Q_CMD_SETUP; 270 rc = bnx2x_queue_state_change(bp, q_params); 271 if (rc) 272 goto out; 273 274 /* enable interrupts */ 275 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)), 276 USTORM_ID, 0, IGU_INT_ENABLE, 0); 277 out: 278 return rc; 279 } 280 281 static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf, 282 int qid) 283 { 284 enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT, 285 BNX2X_Q_CMD_TERMINATE, 286 BNX2X_Q_CMD_CFC_DEL}; 287 struct bnx2x_queue_state_params q_params; 288 int rc, i; 289 290 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 291 292 /* Prepare ramrod information */ 293 memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params)); 294 q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 295 set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 296 297 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) == 298 BNX2X_Q_LOGICAL_STATE_STOPPED) { 299 DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n"); 300 goto out; 301 } 302 303 /* Run Queue 'destruction' ramrods */ 304 for (i = 0; i < ARRAY_SIZE(cmds); i++) { 305 q_params.cmd = cmds[i]; 306 rc = bnx2x_queue_state_change(bp, &q_params); 307 if (rc) { 308 BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]); 309 return rc; 310 } 311 } 312 out: 313 /* Clean Context */ 314 if (bnx2x_vfq(vf, qid, cxt)) { 315 bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0; 316 bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0; 317 } 318 319 return 0; 320 } 321 322 static void 323 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid) 324 { 325 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 326 if (vf) { 327 /* the first igu entry belonging to VFs of this PF */ 328 if (!BP_VFDB(bp)->first_vf_igu_entry) 329 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id; 330 331 /* the first igu entry belonging to this VF */ 332 if (!vf_sb_count(vf)) 333 vf->igu_base_id = igu_sb_id; 334 335 ++vf_sb_count(vf); 336 ++vf->sb_count; 337 } 338 BP_VFDB(bp)->vf_sbs_pool++; 339 } 340 341 static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp, 342 struct bnx2x_vlan_mac_obj *obj, 343 atomic_t *counter) 344 { 345 struct list_head *pos; 346 int read_lock; 347 int cnt = 0; 348 349 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj); 350 if (read_lock) 351 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n"); 352 353 list_for_each(pos, &obj->head) 354 cnt++; 355 356 if (!read_lock) 357 bnx2x_vlan_mac_h_read_unlock(bp, obj); 358 359 atomic_set(counter, cnt); 360 } 361 362 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf, 363 int qid, bool drv_only, bool mac) 364 { 365 struct bnx2x_vlan_mac_ramrod_params ramrod; 366 int rc; 367 368 DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid, 369 mac ? "MACs" : "VLANs"); 370 371 /* Prepare ramrod params */ 372 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); 373 if (mac) { 374 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); 375 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 376 } else { 377 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 378 &ramrod.user_req.vlan_mac_flags); 379 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 380 } 381 ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL; 382 383 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); 384 if (drv_only) 385 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); 386 else 387 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); 388 389 /* Start deleting */ 390 rc = ramrod.vlan_mac_obj->delete_all(bp, 391 ramrod.vlan_mac_obj, 392 &ramrod.user_req.vlan_mac_flags, 393 &ramrod.ramrod_flags); 394 if (rc) { 395 BNX2X_ERR("Failed to delete all %s\n", 396 mac ? "MACs" : "VLANs"); 397 return rc; 398 } 399 400 /* Clear the vlan counters */ 401 if (!mac) 402 atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0); 403 404 return 0; 405 } 406 407 static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, 408 struct bnx2x_virtf *vf, int qid, 409 struct bnx2x_vf_mac_vlan_filter *filter, 410 bool drv_only) 411 { 412 struct bnx2x_vlan_mac_ramrod_params ramrod; 413 int rc; 414 415 DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n", 416 vf->abs_vfid, filter->add ? "Adding" : "Deleting", 417 filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN"); 418 419 /* Prepare ramrod params */ 420 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params)); 421 if (filter->type == BNX2X_VF_FILTER_VLAN) { 422 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 423 &ramrod.user_req.vlan_mac_flags); 424 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 425 ramrod.user_req.u.vlan.vlan = filter->vid; 426 } else { 427 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags); 428 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 429 memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN); 430 } 431 ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD : 432 BNX2X_VLAN_MAC_DEL; 433 434 /* Verify there are available vlan credits */ 435 if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && 436 (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= 437 vf_vlan_rules_cnt(vf))) { 438 BNX2X_ERR("No credits for vlan [%d >= %d]\n", 439 atomic_read(&bnx2x_vfq(vf, qid, vlan_count)), 440 vf_vlan_rules_cnt(vf)); 441 return -ENOMEM; 442 } 443 444 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags); 445 if (drv_only) 446 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags); 447 else 448 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); 449 450 /* Add/Remove the filter */ 451 rc = bnx2x_config_vlan_mac(bp, &ramrod); 452 if (rc && rc != -EEXIST) { 453 BNX2X_ERR("Failed to %s %s\n", 454 filter->add ? "add" : "delete", 455 filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : 456 "VLAN"); 457 return rc; 458 } 459 460 /* Update the vlan counters */ 461 if (filter->type == BNX2X_VF_FILTER_VLAN) 462 bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj, 463 &bnx2x_vfq(vf, qid, vlan_count)); 464 465 return 0; 466 } 467 468 int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf, 469 struct bnx2x_vf_mac_vlan_filters *filters, 470 int qid, bool drv_only) 471 { 472 int rc = 0, i; 473 474 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 475 476 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 477 return -EINVAL; 478 479 /* Prepare ramrod params */ 480 for (i = 0; i < filters->count; i++) { 481 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, 482 &filters->filters[i], drv_only); 483 if (rc) 484 break; 485 } 486 487 /* Rollback if needed */ 488 if (i != filters->count) { 489 BNX2X_ERR("Managed only %d/%d filters - rolling back\n", 490 i, filters->count + 1); 491 while (--i >= 0) { 492 filters->filters[i].add = !filters->filters[i].add; 493 bnx2x_vf_mac_vlan_config(bp, vf, qid, 494 &filters->filters[i], 495 drv_only); 496 } 497 } 498 499 /* It's our responsibility to free the filters */ 500 kfree(filters); 501 502 return rc; 503 } 504 505 int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid, 506 struct bnx2x_vf_queue_construct_params *qctor) 507 { 508 int rc; 509 510 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 511 512 rc = bnx2x_vf_queue_create(bp, vf, qid, qctor); 513 if (rc) 514 goto op_err; 515 516 /* Configure vlan0 for leading queue */ 517 if (!qid) { 518 struct bnx2x_vf_mac_vlan_filter filter; 519 520 memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter)); 521 filter.type = BNX2X_VF_FILTER_VLAN; 522 filter.add = true; 523 filter.vid = 0; 524 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false); 525 if (rc) 526 goto op_err; 527 } 528 529 /* Schedule the configuration of any pending vlan filters */ 530 vf->cfg_flags |= VF_CFG_VLAN; 531 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN, 532 BNX2X_MSG_IOV); 533 return 0; 534 op_err: 535 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); 536 return rc; 537 } 538 539 static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf, 540 int qid) 541 { 542 int rc; 543 544 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 545 546 /* If needed, clean the filtering data base */ 547 if ((qid == LEADING_IDX) && 548 bnx2x_validate_vf_sp_objs(bp, vf, false)) { 549 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false); 550 if (rc) 551 goto op_err; 552 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true); 553 if (rc) 554 goto op_err; 555 } 556 557 /* Terminate queue */ 558 if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) { 559 struct bnx2x_queue_state_params qstate; 560 561 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); 562 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 563 qstate.q_obj->state = BNX2X_Q_STATE_STOPPED; 564 qstate.cmd = BNX2X_Q_CMD_TERMINATE; 565 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); 566 rc = bnx2x_queue_state_change(bp, &qstate); 567 if (rc) 568 goto op_err; 569 } 570 571 return 0; 572 op_err: 573 BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc); 574 return rc; 575 } 576 577 int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, 578 bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only) 579 { 580 struct bnx2x_mcast_list_elem *mc = NULL; 581 struct bnx2x_mcast_ramrod_params mcast; 582 int rc, i; 583 584 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 585 586 /* Prepare Multicast command */ 587 memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params)); 588 mcast.mcast_obj = &vf->mcast_obj; 589 if (drv_only) 590 set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags); 591 else 592 set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags); 593 if (mc_num) { 594 mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem), 595 GFP_KERNEL); 596 if (!mc) { 597 BNX2X_ERR("Cannot Configure multicasts due to lack of memory\n"); 598 return -ENOMEM; 599 } 600 } 601 602 /* clear existing mcasts */ 603 mcast.mcast_list_len = vf->mcast_list_len; 604 vf->mcast_list_len = mc_num; 605 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL); 606 if (rc) { 607 BNX2X_ERR("Failed to remove multicasts\n"); 608 kfree(mc); 609 return rc; 610 } 611 612 /* update mcast list on the ramrod params */ 613 if (mc_num) { 614 INIT_LIST_HEAD(&mcast.mcast_list); 615 for (i = 0; i < mc_num; i++) { 616 mc[i].mac = mcasts[i]; 617 list_add_tail(&mc[i].link, 618 &mcast.mcast_list); 619 } 620 621 /* add new mcasts */ 622 mcast.mcast_list_len = mc_num; 623 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); 624 if (rc) 625 BNX2X_ERR("Faled to add multicasts\n"); 626 kfree(mc); 627 } 628 629 return rc; 630 } 631 632 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid, 633 struct bnx2x_rx_mode_ramrod_params *ramrod, 634 struct bnx2x_virtf *vf, 635 unsigned long accept_flags) 636 { 637 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid); 638 639 memset(ramrod, 0, sizeof(*ramrod)); 640 ramrod->cid = vfq->cid; 641 ramrod->cl_id = vfq_cl_id(vf, vfq); 642 ramrod->rx_mode_obj = &bp->rx_mode_obj; 643 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid); 644 ramrod->rx_accept_flags = accept_flags; 645 ramrod->tx_accept_flags = accept_flags; 646 ramrod->pstate = &vf->filter_state; 647 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING; 648 649 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 650 set_bit(RAMROD_RX, &ramrod->ramrod_flags); 651 set_bit(RAMROD_TX, &ramrod->ramrod_flags); 652 653 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2); 654 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2); 655 } 656 657 int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf, 658 int qid, unsigned long accept_flags) 659 { 660 struct bnx2x_rx_mode_ramrod_params ramrod; 661 662 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 663 664 bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags); 665 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags); 666 vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags; 667 return bnx2x_config_rx_mode(bp, &ramrod); 668 } 669 670 int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid) 671 { 672 int rc; 673 674 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid); 675 676 /* Remove all classification configuration for leading queue */ 677 if (qid == LEADING_IDX) { 678 rc = bnx2x_vf_rxmode(bp, vf, qid, 0); 679 if (rc) 680 goto op_err; 681 682 /* Remove filtering if feasible */ 683 if (bnx2x_validate_vf_sp_objs(bp, vf, true)) { 684 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, 685 false, false); 686 if (rc) 687 goto op_err; 688 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, 689 false, true); 690 if (rc) 691 goto op_err; 692 rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false); 693 if (rc) 694 goto op_err; 695 } 696 } 697 698 /* Destroy queue */ 699 rc = bnx2x_vf_queue_destroy(bp, vf, qid); 700 if (rc) 701 goto op_err; 702 return rc; 703 op_err: 704 BNX2X_ERR("vf[%d:%d] error: rc %d\n", 705 vf->abs_vfid, qid, rc); 706 return rc; 707 } 708 709 /* VF enable primitives 710 * when pretend is required the caller is responsible 711 * for calling pretend prior to calling these routines 712 */ 713 714 /* internal vf enable - until vf is enabled internally all transactions 715 * are blocked. This routine should always be called last with pretend. 716 */ 717 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable) 718 { 719 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0); 720 } 721 722 /* clears vf error in all semi blocks */ 723 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid) 724 { 725 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid); 726 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid); 727 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid); 728 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid); 729 } 730 731 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid) 732 { 733 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5; 734 u32 was_err_reg = 0; 735 736 switch (was_err_group) { 737 case 0: 738 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR; 739 break; 740 case 1: 741 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR; 742 break; 743 case 2: 744 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR; 745 break; 746 case 3: 747 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR; 748 break; 749 } 750 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f)); 751 } 752 753 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf) 754 { 755 int i; 756 u32 val; 757 758 /* Set VF masks and configuration - pretend */ 759 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 760 761 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); 762 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); 763 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0); 764 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0); 765 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0); 766 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0); 767 768 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 769 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN); 770 if (vf->cfg_flags & VF_CFG_INT_SIMD) 771 val |= IGU_VF_CONF_SINGLE_ISR_EN; 772 val &= ~IGU_VF_CONF_PARENT_MASK; 773 val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT; 774 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 775 776 DP(BNX2X_MSG_IOV, 777 "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n", 778 vf->abs_vfid, val); 779 780 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 781 782 /* iterate over all queues, clear sb consumer */ 783 for (i = 0; i < vf_sb_count(vf); i++) { 784 u8 igu_sb_id = vf_igu_sb(vf, i); 785 786 /* zero prod memory */ 787 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0); 788 789 /* clear sb state machine */ 790 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id, 791 false /* VF */); 792 793 /* disable + update */ 794 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0, 795 IGU_INT_DISABLE, 1); 796 } 797 } 798 799 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid) 800 { 801 /* set the VF-PF association in the FW */ 802 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp)); 803 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1); 804 805 /* clear vf errors*/ 806 bnx2x_vf_semi_clear_err(bp, abs_vfid); 807 bnx2x_vf_pglue_clear_err(bp, abs_vfid); 808 809 /* internal vf-enable - pretend */ 810 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid)); 811 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid); 812 bnx2x_vf_enable_internal(bp, true); 813 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 814 } 815 816 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf) 817 { 818 /* Reset vf in IGU interrupts are still disabled */ 819 bnx2x_vf_igu_reset(bp, vf); 820 821 /* pretend to enable the vf with the PBF */ 822 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 823 REG_WR(bp, PBF_REG_DISABLE_VF, 0); 824 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 825 } 826 827 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid) 828 { 829 struct pci_dev *dev; 830 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 831 832 if (!vf) 833 return false; 834 835 dev = pci_get_bus_and_slot(vf->bus, vf->devfn); 836 if (dev) 837 return bnx2x_is_pcie_pending(dev); 838 return false; 839 } 840 841 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) 842 { 843 /* Verify no pending pci transactions */ 844 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid)) 845 BNX2X_ERR("PCIE Transactions still pending\n"); 846 847 return 0; 848 } 849 850 static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp, 851 struct bnx2x_virtf *vf, 852 int new) 853 { 854 int num = vf_vlan_rules_cnt(vf); 855 int diff = new - num; 856 bool rc = true; 857 858 DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n", 859 vf->abs_vfid, new, num); 860 861 if (diff > 0) 862 rc = bp->vlans_pool.get(&bp->vlans_pool, diff); 863 else if (diff < 0) 864 rc = bp->vlans_pool.put(&bp->vlans_pool, -diff); 865 866 if (rc) 867 vf_vlan_rules_cnt(vf) = new; 868 else 869 DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n", 870 vf->abs_vfid); 871 } 872 873 /* must be called after the number of PF queues and the number of VFs are 874 * both known 875 */ 876 static void 877 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 878 { 879 struct vf_pf_resc_request *resc = &vf->alloc_resc; 880 u16 vlan_count = 0; 881 882 /* will be set only during VF-ACQUIRE */ 883 resc->num_rxqs = 0; 884 resc->num_txqs = 0; 885 886 /* no credit calculations for macs (just yet) */ 887 resc->num_mac_filters = 1; 888 889 /* divvy up vlan rules */ 890 bnx2x_iov_re_set_vlan_filters(bp, vf, 0); 891 vlan_count = bp->vlans_pool.check(&bp->vlans_pool); 892 vlan_count = 1 << ilog2(vlan_count); 893 bnx2x_iov_re_set_vlan_filters(bp, vf, 894 vlan_count / BNX2X_NR_VIRTFN(bp)); 895 896 /* no real limitation */ 897 resc->num_mc_filters = 0; 898 899 /* num_sbs already set */ 900 resc->num_sbs = vf->sb_count; 901 } 902 903 /* FLR routines: */ 904 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) 905 { 906 /* reset the state variables */ 907 bnx2x_iov_static_resc(bp, vf); 908 vf->state = VF_FREE; 909 } 910 911 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf) 912 { 913 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp); 914 915 /* DQ usage counter */ 916 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 917 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT, 918 "DQ VF usage counter timed out", 919 poll_cnt); 920 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 921 922 /* FW cleanup command - poll for the results */ 923 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid), 924 poll_cnt)) 925 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid); 926 927 /* verify TX hw is flushed */ 928 bnx2x_tx_hw_flushed(bp, poll_cnt); 929 } 930 931 static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) 932 { 933 int rc, i; 934 935 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 936 937 /* the cleanup operations are valid if and only if the VF 938 * was first acquired. 939 */ 940 for (i = 0; i < vf_rxq_count(vf); i++) { 941 rc = bnx2x_vf_queue_flr(bp, vf, i); 942 if (rc) 943 goto out; 944 } 945 946 /* remove multicasts */ 947 bnx2x_vf_mcast(bp, vf, NULL, 0, true); 948 949 /* dispatch final cleanup and wait for HW queues to flush */ 950 bnx2x_vf_flr_clnup_hw(bp, vf); 951 952 /* release VF resources */ 953 bnx2x_vf_free_resc(bp, vf); 954 955 /* re-open the mailbox */ 956 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 957 return; 958 out: 959 BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n", 960 vf->abs_vfid, i, rc); 961 } 962 963 static void bnx2x_vf_flr_clnup(struct bnx2x *bp) 964 { 965 struct bnx2x_virtf *vf; 966 int i; 967 968 for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) { 969 /* VF should be RESET & in FLR cleanup states */ 970 if (bnx2x_vf(bp, i, state) != VF_RESET || 971 !bnx2x_vf(bp, i, flr_clnup_stage)) 972 continue; 973 974 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", 975 i, BNX2X_NR_VIRTFN(bp)); 976 977 vf = BP_VF(bp, i); 978 979 /* lock the vf pf channel */ 980 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 981 982 /* invoke the VF FLR SM */ 983 bnx2x_vf_flr(bp, vf); 984 985 /* mark the VF to be ACKED and continue */ 986 vf->flr_clnup_stage = false; 987 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR); 988 } 989 990 /* Acknowledge the handled VFs. 991 * we are acknowledge all the vfs which an flr was requested for, even 992 * if amongst them there are such that we never opened, since the mcp 993 * will interrupt us immediately again if we only ack some of the bits, 994 * resulting in an endless loop. This can happen for example in KVM 995 * where an 'all ones' flr request is sometimes given by hyper visor 996 */ 997 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n", 998 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 999 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1000 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 1001 bp->vfdb->flrd_vfs[i]); 1002 1003 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0); 1004 1005 /* clear the acked bits - better yet if the MCP implemented 1006 * write to clear semantics 1007 */ 1008 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1009 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0); 1010 } 1011 1012 void bnx2x_vf_handle_flr_event(struct bnx2x *bp) 1013 { 1014 int i; 1015 1016 /* Read FLR'd VFs */ 1017 for (i = 0; i < FLRD_VFS_DWORDS; i++) 1018 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]); 1019 1020 DP(BNX2X_MSG_MCP, 1021 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n", 1022 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]); 1023 1024 for_each_vf(bp, i) { 1025 struct bnx2x_virtf *vf = BP_VF(bp, i); 1026 u32 reset = 0; 1027 1028 if (vf->abs_vfid < 32) 1029 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid); 1030 else 1031 reset = bp->vfdb->flrd_vfs[1] & 1032 (1 << (vf->abs_vfid - 32)); 1033 1034 if (reset) { 1035 /* set as reset and ready for cleanup */ 1036 vf->state = VF_RESET; 1037 vf->flr_clnup_stage = true; 1038 1039 DP(BNX2X_MSG_IOV, 1040 "Initiating Final cleanup for VF %d\n", 1041 vf->abs_vfid); 1042 } 1043 } 1044 1045 /* do the FLR cleanup for all marked VFs*/ 1046 bnx2x_vf_flr_clnup(bp); 1047 } 1048 1049 /* IOV global initialization routines */ 1050 void bnx2x_iov_init_dq(struct bnx2x *bp) 1051 { 1052 if (!IS_SRIOV(bp)) 1053 return; 1054 1055 /* Set the DQ such that the CID reflect the abs_vfid */ 1056 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0); 1057 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); 1058 1059 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to 1060 * the PF L2 queues 1061 */ 1062 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); 1063 1064 /* The VF window size is the log2 of the max number of CIDs per VF */ 1065 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); 1066 1067 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match 1068 * the Pf doorbell size although the 2 are independent. 1069 */ 1070 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3); 1071 1072 /* No security checks for now - 1073 * configure single rule (out of 16) mask = 0x1, value = 0x0, 1074 * CID range 0 - 0x1ffff 1075 */ 1076 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1); 1077 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0); 1078 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); 1079 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); 1080 1081 /* set the VF doorbell threshold. This threshold represents the amount 1082 * of doorbells allowed in the main DORQ fifo for a specific VF. 1083 */ 1084 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64); 1085 } 1086 1087 void bnx2x_iov_init_dmae(struct bnx2x *bp) 1088 { 1089 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) 1090 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0); 1091 } 1092 1093 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1094 { 1095 struct pci_dev *dev = bp->pdev; 1096 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1097 1098 return dev->bus->number + ((dev->devfn + iov->offset + 1099 iov->stride * vfid) >> 8); 1100 } 1101 1102 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid) 1103 { 1104 struct pci_dev *dev = bp->pdev; 1105 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1106 1107 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff; 1108 } 1109 1110 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) 1111 { 1112 int i, n; 1113 struct pci_dev *dev = bp->pdev; 1114 struct bnx2x_sriov *iov = &bp->vfdb->sriov; 1115 1116 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) { 1117 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i); 1118 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i); 1119 1120 size /= iov->total; 1121 vf->bars[n].bar = start + size * vf->abs_vfid; 1122 vf->bars[n].size = size; 1123 } 1124 } 1125 1126 static int bnx2x_ari_enabled(struct pci_dev *dev) 1127 { 1128 return dev->bus->self && dev->bus->self->ari_enabled; 1129 } 1130 1131 static int 1132 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) 1133 { 1134 int sb_id; 1135 u32 val; 1136 u8 fid, current_pf = 0; 1137 1138 /* IGU in normal mode - read CAM */ 1139 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 1140 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); 1141 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 1142 continue; 1143 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 1144 if (fid & IGU_FID_ENCODE_IS_PF) 1145 current_pf = fid & IGU_FID_PF_NUM_MASK; 1146 else if (current_pf == BP_FUNC(bp)) 1147 bnx2x_vf_set_igu_info(bp, sb_id, 1148 (fid & IGU_FID_VF_NUM_MASK)); 1149 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 1150 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 1151 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 1152 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 1153 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); 1154 } 1155 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); 1156 return BP_VFDB(bp)->vf_sbs_pool; 1157 } 1158 1159 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 1160 { 1161 if (bp->vfdb) { 1162 kfree(bp->vfdb->vfqs); 1163 kfree(bp->vfdb->vfs); 1164 kfree(bp->vfdb); 1165 } 1166 bp->vfdb = NULL; 1167 } 1168 1169 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1170 { 1171 int pos; 1172 struct pci_dev *dev = bp->pdev; 1173 1174 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 1175 if (!pos) { 1176 BNX2X_ERR("failed to find SRIOV capability in device\n"); 1177 return -ENODEV; 1178 } 1179 1180 iov->pos = pos; 1181 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); 1182 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 1183 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total); 1184 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial); 1185 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); 1186 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); 1187 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); 1188 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); 1189 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); 1190 1191 return 0; 1192 } 1193 1194 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 1195 { 1196 u32 val; 1197 1198 /* read the SRIOV capability structure 1199 * The fields can be read via configuration read or 1200 * directly from the device (starting at offset PCICFG_OFFSET) 1201 */ 1202 if (bnx2x_sriov_pci_cfg_info(bp, iov)) 1203 return -ENODEV; 1204 1205 /* get the number of SRIOV bars */ 1206 iov->nres = 0; 1207 1208 /* read the first_vfid */ 1209 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); 1210 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) 1211 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp)); 1212 1213 DP(BNX2X_MSG_IOV, 1214 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 1215 BP_FUNC(bp), 1216 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, 1217 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 1218 1219 return 0; 1220 } 1221 1222 /* must be called after PF bars are mapped */ 1223 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 1224 int num_vfs_param) 1225 { 1226 int err, i; 1227 struct bnx2x_sriov *iov; 1228 struct pci_dev *dev = bp->pdev; 1229 1230 bp->vfdb = NULL; 1231 1232 /* verify is pf */ 1233 if (IS_VF(bp)) 1234 return 0; 1235 1236 /* verify sriov capability is present in configuration space */ 1237 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) 1238 return 0; 1239 1240 /* verify chip revision */ 1241 if (CHIP_IS_E1x(bp)) 1242 return 0; 1243 1244 /* check if SRIOV support is turned off */ 1245 if (!num_vfs_param) 1246 return 0; 1247 1248 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ 1249 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { 1250 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", 1251 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); 1252 return 0; 1253 } 1254 1255 /* SRIOV can be enabled only with MSIX */ 1256 if (int_mode_param == BNX2X_INT_MODE_MSI || 1257 int_mode_param == BNX2X_INT_MODE_INTX) { 1258 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); 1259 return 0; 1260 } 1261 1262 err = -EIO; 1263 /* verify ari is enabled */ 1264 if (!bnx2x_ari_enabled(bp->pdev)) { 1265 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n"); 1266 return 0; 1267 } 1268 1269 /* verify igu is in normal mode */ 1270 if (CHIP_INT_MODE_IS_BC(bp)) { 1271 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 1272 return 0; 1273 } 1274 1275 /* allocate the vfs database */ 1276 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); 1277 if (!bp->vfdb) { 1278 BNX2X_ERR("failed to allocate vf database\n"); 1279 err = -ENOMEM; 1280 goto failed; 1281 } 1282 1283 /* get the sriov info - Linux already collected all the pertinent 1284 * information, however the sriov structure is for the private use 1285 * of the pci module. Also we want this information regardless 1286 * of the hyper-visor. 1287 */ 1288 iov = &(bp->vfdb->sriov); 1289 err = bnx2x_sriov_info(bp, iov); 1290 if (err) 1291 goto failed; 1292 1293 /* SR-IOV capability was enabled but there are no VFs*/ 1294 if (iov->total == 0) 1295 goto failed; 1296 1297 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param); 1298 1299 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n", 1300 num_vfs_param, iov->nr_virtfn); 1301 1302 /* allocate the vf array */ 1303 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 1304 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); 1305 if (!bp->vfdb->vfs) { 1306 BNX2X_ERR("failed to allocate vf array\n"); 1307 err = -ENOMEM; 1308 goto failed; 1309 } 1310 1311 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ 1312 for_each_vf(bp, i) { 1313 bnx2x_vf(bp, i, index) = i; 1314 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; 1315 bnx2x_vf(bp, i, state) = VF_FREE; 1316 mutex_init(&bnx2x_vf(bp, i, op_mutex)); 1317 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; 1318 } 1319 1320 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 1321 if (!bnx2x_get_vf_igu_cam_info(bp)) { 1322 BNX2X_ERR("No entries in IGU CAM for vfs\n"); 1323 err = -EINVAL; 1324 goto failed; 1325 } 1326 1327 /* allocate the queue arrays for all VFs */ 1328 bp->vfdb->vfqs = kzalloc( 1329 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), 1330 GFP_KERNEL); 1331 1332 if (!bp->vfdb->vfqs) { 1333 BNX2X_ERR("failed to allocate vf queue array\n"); 1334 err = -ENOMEM; 1335 goto failed; 1336 } 1337 1338 /* Prepare the VFs event synchronization mechanism */ 1339 mutex_init(&bp->vfdb->event_mutex); 1340 1341 mutex_init(&bp->vfdb->bulletin_mutex); 1342 1343 return 0; 1344 failed: 1345 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 1346 __bnx2x_iov_free_vfdb(bp); 1347 return err; 1348 } 1349 1350 void bnx2x_iov_remove_one(struct bnx2x *bp) 1351 { 1352 int vf_idx; 1353 1354 /* if SRIOV is not enabled there's nothing to do */ 1355 if (!IS_SRIOV(bp)) 1356 return; 1357 1358 bnx2x_disable_sriov(bp); 1359 1360 /* disable access to all VFs */ 1361 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { 1362 bnx2x_pretend_func(bp, 1363 HW_VF_HANDLE(bp, 1364 bp->vfdb->sriov.first_vf_in_pf + 1365 vf_idx)); 1366 DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n", 1367 bp->vfdb->sriov.first_vf_in_pf + vf_idx); 1368 bnx2x_vf_enable_internal(bp, 0); 1369 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 1370 } 1371 1372 /* free vf database */ 1373 __bnx2x_iov_free_vfdb(bp); 1374 } 1375 1376 void bnx2x_iov_free_mem(struct bnx2x *bp) 1377 { 1378 int i; 1379 1380 if (!IS_SRIOV(bp)) 1381 return; 1382 1383 /* free vfs hw contexts */ 1384 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1385 struct hw_dma *cxt = &bp->vfdb->context[i]; 1386 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); 1387 } 1388 1389 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr, 1390 BP_VFDB(bp)->sp_dma.mapping, 1391 BP_VFDB(bp)->sp_dma.size); 1392 1393 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr, 1394 BP_VF_MBX_DMA(bp)->mapping, 1395 BP_VF_MBX_DMA(bp)->size); 1396 1397 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr, 1398 BP_VF_BULLETIN_DMA(bp)->mapping, 1399 BP_VF_BULLETIN_DMA(bp)->size); 1400 } 1401 1402 int bnx2x_iov_alloc_mem(struct bnx2x *bp) 1403 { 1404 size_t tot_size; 1405 int i, rc = 0; 1406 1407 if (!IS_SRIOV(bp)) 1408 return rc; 1409 1410 /* allocate vfs hw contexts */ 1411 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * 1412 BNX2X_CIDS_PER_VF * sizeof(union cdu_context); 1413 1414 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1415 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i); 1416 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); 1417 1418 if (cxt->size) { 1419 cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size); 1420 if (!cxt->addr) 1421 goto alloc_mem_err; 1422 } else { 1423 cxt->addr = NULL; 1424 cxt->mapping = 0; 1425 } 1426 tot_size -= cxt->size; 1427 } 1428 1429 /* allocate vfs ramrods dma memory - client_init and set_mac */ 1430 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); 1431 BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping, 1432 tot_size); 1433 if (!BP_VFDB(bp)->sp_dma.addr) 1434 goto alloc_mem_err; 1435 BP_VFDB(bp)->sp_dma.size = tot_size; 1436 1437 /* allocate mailboxes */ 1438 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; 1439 BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping, 1440 tot_size); 1441 if (!BP_VF_MBX_DMA(bp)->addr) 1442 goto alloc_mem_err; 1443 1444 BP_VF_MBX_DMA(bp)->size = tot_size; 1445 1446 /* allocate local bulletin boards */ 1447 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; 1448 BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping, 1449 tot_size); 1450 if (!BP_VF_BULLETIN_DMA(bp)->addr) 1451 goto alloc_mem_err; 1452 1453 BP_VF_BULLETIN_DMA(bp)->size = tot_size; 1454 1455 return 0; 1456 1457 alloc_mem_err: 1458 return -ENOMEM; 1459 } 1460 1461 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 1462 struct bnx2x_vf_queue *q) 1463 { 1464 u8 cl_id = vfq_cl_id(vf, q); 1465 u8 func_id = FW_VF_HANDLE(vf->abs_vfid); 1466 unsigned long q_type = 0; 1467 1468 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type); 1469 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type); 1470 1471 /* Queue State object */ 1472 bnx2x_init_queue_obj(bp, &q->sp_obj, 1473 cl_id, &q->cid, 1, func_id, 1474 bnx2x_vf_sp(bp, vf, q_data), 1475 bnx2x_vf_sp_map(bp, vf, q_data), 1476 q_type); 1477 1478 /* sp indication is set only when vlan/mac/etc. are initialized */ 1479 q->sp_initialized = false; 1480 1481 DP(BNX2X_MSG_IOV, 1482 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", 1483 vf->abs_vfid, q->sp_obj.func_id, q->cid); 1484 } 1485 1486 static int bnx2x_max_speed_cap(struct bnx2x *bp) 1487 { 1488 u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)]; 1489 1490 if (supported & 1491 (SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full)) 1492 return 20000; 1493 1494 return 10000; /* assume lowest supported speed is 10G */ 1495 } 1496 1497 int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx) 1498 { 1499 struct bnx2x_link_report_data *state = &bp->last_reported_link; 1500 struct pf_vf_bulletin_content *bulletin; 1501 struct bnx2x_virtf *vf; 1502 bool update = true; 1503 int rc = 0; 1504 1505 /* sanity and init */ 1506 rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false); 1507 if (rc) 1508 return rc; 1509 1510 mutex_lock(&bp->vfdb->bulletin_mutex); 1511 1512 if (vf->link_cfg == IFLA_VF_LINK_STATE_AUTO) { 1513 bulletin->valid_bitmap |= 1 << LINK_VALID; 1514 1515 bulletin->link_speed = state->line_speed; 1516 bulletin->link_flags = 0; 1517 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN, 1518 &state->link_report_flags)) 1519 bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN; 1520 if (test_bit(BNX2X_LINK_REPORT_FD, 1521 &state->link_report_flags)) 1522 bulletin->link_flags |= VFPF_LINK_REPORT_FULL_DUPLEX; 1523 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON, 1524 &state->link_report_flags)) 1525 bulletin->link_flags |= VFPF_LINK_REPORT_RX_FC_ON; 1526 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON, 1527 &state->link_report_flags)) 1528 bulletin->link_flags |= VFPF_LINK_REPORT_TX_FC_ON; 1529 } else if (vf->link_cfg == IFLA_VF_LINK_STATE_DISABLE && 1530 !(bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) { 1531 bulletin->valid_bitmap |= 1 << LINK_VALID; 1532 bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN; 1533 } else if (vf->link_cfg == IFLA_VF_LINK_STATE_ENABLE && 1534 (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) { 1535 bulletin->valid_bitmap |= 1 << LINK_VALID; 1536 bulletin->link_speed = bnx2x_max_speed_cap(bp); 1537 bulletin->link_flags &= ~VFPF_LINK_REPORT_LINK_DOWN; 1538 } else { 1539 update = false; 1540 } 1541 1542 if (update) { 1543 DP(NETIF_MSG_LINK | BNX2X_MSG_IOV, 1544 "vf %d mode %u speed %d flags %x\n", idx, 1545 vf->link_cfg, bulletin->link_speed, bulletin->link_flags); 1546 1547 /* Post update on VF's bulletin board */ 1548 rc = bnx2x_post_vf_bulletin(bp, idx); 1549 if (rc) { 1550 BNX2X_ERR("failed to update VF[%d] bulletin\n", idx); 1551 goto out; 1552 } 1553 } 1554 1555 out: 1556 mutex_unlock(&bp->vfdb->bulletin_mutex); 1557 return rc; 1558 } 1559 1560 int bnx2x_set_vf_link_state(struct net_device *dev, int idx, int link_state) 1561 { 1562 struct bnx2x *bp = netdev_priv(dev); 1563 struct bnx2x_virtf *vf = BP_VF(bp, idx); 1564 1565 if (!vf) 1566 return -EINVAL; 1567 1568 if (vf->link_cfg == link_state) 1569 return 0; /* nothing todo */ 1570 1571 vf->link_cfg = link_state; 1572 1573 return bnx2x_iov_link_update_vf(bp, idx); 1574 } 1575 1576 void bnx2x_iov_link_update(struct bnx2x *bp) 1577 { 1578 int vfid; 1579 1580 if (!IS_SRIOV(bp)) 1581 return; 1582 1583 for_each_vf(bp, vfid) 1584 bnx2x_iov_link_update_vf(bp, vfid); 1585 } 1586 1587 /* called by bnx2x_nic_load */ 1588 int bnx2x_iov_nic_init(struct bnx2x *bp) 1589 { 1590 int vfid; 1591 1592 if (!IS_SRIOV(bp)) { 1593 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n"); 1594 return 0; 1595 } 1596 1597 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); 1598 1599 /* let FLR complete ... */ 1600 msleep(100); 1601 1602 /* initialize vf database */ 1603 for_each_vf(bp, vfid) { 1604 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 1605 1606 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * 1607 BNX2X_CIDS_PER_VF; 1608 1609 union cdu_context *base_cxt = (union cdu_context *) 1610 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 1611 (base_vf_cid & (ILT_PAGE_CIDS-1)); 1612 1613 DP(BNX2X_MSG_IOV, 1614 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n", 1615 vf->abs_vfid, vf_sb_count(vf), base_vf_cid, 1616 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt); 1617 1618 /* init statically provisioned resources */ 1619 bnx2x_iov_static_resc(bp, vf); 1620 1621 /* queues are initialized during VF-ACQUIRE */ 1622 vf->filter_state = 0; 1623 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); 1624 1625 /* init mcast object - This object will be re-initialized 1626 * during VF-ACQUIRE with the proper cl_id and cid. 1627 * It needs to be initialized here so that it can be safely 1628 * handled by a subsequent FLR flow. 1629 */ 1630 vf->mcast_list_len = 0; 1631 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF, 1632 0xFF, 0xFF, 0xFF, 1633 bnx2x_vf_sp(bp, vf, mcast_rdata), 1634 bnx2x_vf_sp_map(bp, vf, mcast_rdata), 1635 BNX2X_FILTER_MCAST_PENDING, 1636 &vf->filter_state, 1637 BNX2X_OBJ_TYPE_RX_TX); 1638 1639 /* set the mailbox message addresses */ 1640 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *) 1641 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid * 1642 MBX_MSG_ALIGNED_SIZE); 1643 1644 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + 1645 vfid * MBX_MSG_ALIGNED_SIZE; 1646 1647 /* Enable vf mailbox */ 1648 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 1649 } 1650 1651 /* Final VF init */ 1652 for_each_vf(bp, vfid) { 1653 struct bnx2x_virtf *vf = BP_VF(bp, vfid); 1654 1655 /* fill in the BDF and bars */ 1656 vf->bus = bnx2x_vf_bus(bp, vfid); 1657 vf->devfn = bnx2x_vf_devfn(bp, vfid); 1658 bnx2x_vf_set_bars(bp, vf); 1659 1660 DP(BNX2X_MSG_IOV, 1661 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n", 1662 vf->abs_vfid, vf->bus, vf->devfn, 1663 (unsigned)vf->bars[0].bar, vf->bars[0].size, 1664 (unsigned)vf->bars[1].bar, vf->bars[1].size, 1665 (unsigned)vf->bars[2].bar, vf->bars[2].size); 1666 } 1667 1668 return 0; 1669 } 1670 1671 /* called by bnx2x_chip_cleanup */ 1672 int bnx2x_iov_chip_cleanup(struct bnx2x *bp) 1673 { 1674 int i; 1675 1676 if (!IS_SRIOV(bp)) 1677 return 0; 1678 1679 /* release all the VFs */ 1680 for_each_vf(bp, i) 1681 bnx2x_vf_release(bp, BP_VF(bp, i)); 1682 1683 return 0; 1684 } 1685 1686 /* called by bnx2x_init_hw_func, returns the next ilt line */ 1687 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) 1688 { 1689 int i; 1690 struct bnx2x_ilt *ilt = BP_ILT(bp); 1691 1692 if (!IS_SRIOV(bp)) 1693 return line; 1694 1695 /* set vfs ilt lines */ 1696 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) { 1697 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i); 1698 1699 ilt->lines[line+i].page = hw_cxt->addr; 1700 ilt->lines[line+i].page_mapping = hw_cxt->mapping; 1701 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ 1702 } 1703 return line + i; 1704 } 1705 1706 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid) 1707 { 1708 return ((cid >= BNX2X_FIRST_VF_CID) && 1709 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS)); 1710 } 1711 1712 static 1713 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp, 1714 struct bnx2x_vf_queue *vfq, 1715 union event_ring_elem *elem) 1716 { 1717 unsigned long ramrod_flags = 0; 1718 int rc = 0; 1719 1720 /* Always push next commands out, don't wait here */ 1721 set_bit(RAMROD_CONT, &ramrod_flags); 1722 1723 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 1724 case BNX2X_FILTER_MAC_PENDING: 1725 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, 1726 &ramrod_flags); 1727 break; 1728 case BNX2X_FILTER_VLAN_PENDING: 1729 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem, 1730 &ramrod_flags); 1731 break; 1732 default: 1733 BNX2X_ERR("Unsupported classification command: %d\n", 1734 elem->message.data.eth_event.echo); 1735 return; 1736 } 1737 if (rc < 0) 1738 BNX2X_ERR("Failed to schedule new commands: %d\n", rc); 1739 else if (rc > 0) 1740 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n"); 1741 } 1742 1743 static 1744 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp, 1745 struct bnx2x_virtf *vf) 1746 { 1747 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 1748 int rc; 1749 1750 rparam.mcast_obj = &vf->mcast_obj; 1751 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw); 1752 1753 /* If there are pending mcast commands - send them */ 1754 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) { 1755 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT); 1756 if (rc < 0) 1757 BNX2X_ERR("Failed to send pending mcast commands: %d\n", 1758 rc); 1759 } 1760 } 1761 1762 static 1763 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, 1764 struct bnx2x_virtf *vf) 1765 { 1766 smp_mb__before_atomic(); 1767 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); 1768 smp_mb__after_atomic(); 1769 } 1770 1771 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, 1772 struct bnx2x_virtf *vf) 1773 { 1774 vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw); 1775 } 1776 1777 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem) 1778 { 1779 struct bnx2x_virtf *vf; 1780 int qidx = 0, abs_vfid; 1781 u8 opcode; 1782 u16 cid = 0xffff; 1783 1784 if (!IS_SRIOV(bp)) 1785 return 1; 1786 1787 /* first get the cid - the only events we handle here are cfc-delete 1788 * and set-mac completion 1789 */ 1790 opcode = elem->message.opcode; 1791 1792 switch (opcode) { 1793 case EVENT_RING_OPCODE_CFC_DEL: 1794 cid = SW_CID((__force __le32) 1795 elem->message.data.cfc_del_event.cid); 1796 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); 1797 break; 1798 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 1799 case EVENT_RING_OPCODE_MULTICAST_RULES: 1800 case EVENT_RING_OPCODE_FILTERS_RULES: 1801 case EVENT_RING_OPCODE_RSS_UPDATE_RULES: 1802 cid = (elem->message.data.eth_event.echo & 1803 BNX2X_SWCID_MASK); 1804 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 1805 break; 1806 case EVENT_RING_OPCODE_VF_FLR: 1807 abs_vfid = elem->message.data.vf_flr_event.vf_id; 1808 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n", 1809 abs_vfid); 1810 goto get_vf; 1811 case EVENT_RING_OPCODE_MALICIOUS_VF: 1812 abs_vfid = elem->message.data.malicious_vf_event.vf_id; 1813 BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n", 1814 abs_vfid, 1815 elem->message.data.malicious_vf_event.err_id); 1816 goto get_vf; 1817 default: 1818 return 1; 1819 } 1820 1821 /* check if the cid is the VF range */ 1822 if (!bnx2x_iov_is_vf_cid(bp, cid)) { 1823 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid); 1824 return 1; 1825 } 1826 1827 /* extract vf and rxq index from vf_cid - relies on the following: 1828 * 1. vfid on cid reflects the true abs_vfid 1829 * 2. The max number of VFs (per path) is 64 1830 */ 1831 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1); 1832 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 1833 get_vf: 1834 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid); 1835 1836 if (!vf) { 1837 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n", 1838 cid, abs_vfid); 1839 return 0; 1840 } 1841 1842 switch (opcode) { 1843 case EVENT_RING_OPCODE_CFC_DEL: 1844 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n", 1845 vf->abs_vfid, qidx); 1846 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp, 1847 &vfq_get(vf, 1848 qidx)->sp_obj, 1849 BNX2X_Q_CMD_CFC_DEL); 1850 break; 1851 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 1852 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n", 1853 vf->abs_vfid, qidx); 1854 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem); 1855 break; 1856 case EVENT_RING_OPCODE_MULTICAST_RULES: 1857 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n", 1858 vf->abs_vfid, qidx); 1859 bnx2x_vf_handle_mcast_eqe(bp, vf); 1860 break; 1861 case EVENT_RING_OPCODE_FILTERS_RULES: 1862 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n", 1863 vf->abs_vfid, qidx); 1864 bnx2x_vf_handle_filters_eqe(bp, vf); 1865 break; 1866 case EVENT_RING_OPCODE_RSS_UPDATE_RULES: 1867 DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n", 1868 vf->abs_vfid, qidx); 1869 bnx2x_vf_handle_rss_update_eqe(bp, vf); 1870 case EVENT_RING_OPCODE_VF_FLR: 1871 case EVENT_RING_OPCODE_MALICIOUS_VF: 1872 /* Do nothing for now */ 1873 return 0; 1874 } 1875 1876 return 0; 1877 } 1878 1879 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid) 1880 { 1881 /* extract the vf from vf_cid - relies on the following: 1882 * 1. vfid on cid reflects the true abs_vfid 1883 * 2. The max number of VFs (per path) is 64 1884 */ 1885 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1); 1886 return bnx2x_vf_by_abs_fid(bp, abs_vfid); 1887 } 1888 1889 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 1890 struct bnx2x_queue_sp_obj **q_obj) 1891 { 1892 struct bnx2x_virtf *vf; 1893 1894 if (!IS_SRIOV(bp)) 1895 return; 1896 1897 vf = bnx2x_vf_by_cid(bp, vf_cid); 1898 1899 if (vf) { 1900 /* extract queue index from vf_cid - relies on the following: 1901 * 1. vfid on cid reflects the true abs_vfid 1902 * 2. The max number of VFs (per path) is 64 1903 */ 1904 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1); 1905 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj); 1906 } else { 1907 BNX2X_ERR("No vf matching cid %d\n", vf_cid); 1908 } 1909 } 1910 1911 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) 1912 { 1913 int i; 1914 int first_queue_query_index, num_queues_req; 1915 dma_addr_t cur_data_offset; 1916 struct stats_query_entry *cur_query_entry; 1917 u8 stats_count = 0; 1918 bool is_fcoe = false; 1919 1920 if (!IS_SRIOV(bp)) 1921 return; 1922 1923 if (!NO_FCOE(bp)) 1924 is_fcoe = true; 1925 1926 /* fcoe adds one global request and one queue request */ 1927 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe; 1928 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1929 (is_fcoe ? 0 : 1); 1930 1931 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), 1932 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", 1933 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, 1934 first_queue_query_index + num_queues_req); 1935 1936 cur_data_offset = bp->fw_stats_data_mapping + 1937 offsetof(struct bnx2x_fw_stats_data, queue_stats) + 1938 num_queues_req * sizeof(struct per_queue_stats); 1939 1940 cur_query_entry = &bp->fw_stats_req-> 1941 query[first_queue_query_index + num_queues_req]; 1942 1943 for_each_vf(bp, i) { 1944 int j; 1945 struct bnx2x_virtf *vf = BP_VF(bp, i); 1946 1947 if (vf->state != VF_ENABLED) { 1948 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), 1949 "vf %d not enabled so no stats for it\n", 1950 vf->abs_vfid); 1951 continue; 1952 } 1953 1954 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid); 1955 for_each_vfq(vf, j) { 1956 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); 1957 1958 dma_addr_t q_stats_addr = 1959 vf->fw_stat_map + j * vf->stats_stride; 1960 1961 /* collect stats fro active queues only */ 1962 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == 1963 BNX2X_Q_LOGICAL_STATE_STOPPED) 1964 continue; 1965 1966 /* create stats query entry for this queue */ 1967 cur_query_entry->kind = STATS_TYPE_QUEUE; 1968 cur_query_entry->index = vfq_stat_id(vf, rxq); 1969 cur_query_entry->funcID = 1970 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid)); 1971 cur_query_entry->address.hi = 1972 cpu_to_le32(U64_HI(q_stats_addr)); 1973 cur_query_entry->address.lo = 1974 cpu_to_le32(U64_LO(q_stats_addr)); 1975 DP(BNX2X_MSG_IOV, 1976 "added address %x %x for vf %d queue %d client %d\n", 1977 cur_query_entry->address.hi, 1978 cur_query_entry->address.lo, cur_query_entry->funcID, 1979 j, cur_query_entry->index); 1980 cur_query_entry++; 1981 cur_data_offset += sizeof(struct per_queue_stats); 1982 stats_count++; 1983 1984 /* all stats are coalesced to the leading queue */ 1985 if (vf->cfg_flags & VF_CFG_STATS_COALESCE) 1986 break; 1987 } 1988 } 1989 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; 1990 } 1991 1992 /* VF API helpers */ 1993 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, 1994 u8 enable) 1995 { 1996 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4; 1997 u32 val = enable ? (abs_vfid | (1 << 6)) : 0; 1998 1999 REG_WR(bp, reg, val); 2000 } 2001 2002 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf) 2003 { 2004 int i; 2005 2006 for_each_vfq(vf, i) 2007 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2008 vfq_qzone_id(vf, vfq_get(vf, i)), false); 2009 } 2010 2011 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf) 2012 { 2013 u32 val; 2014 2015 /* clear the VF configuration - pretend */ 2016 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid)); 2017 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION); 2018 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN | 2019 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK); 2020 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val); 2021 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2022 } 2023 2024 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf) 2025 { 2026 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF), 2027 BNX2X_VF_MAX_QUEUES); 2028 } 2029 2030 static 2031 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, 2032 struct vf_pf_resc_request *req_resc) 2033 { 2034 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2035 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); 2036 2037 /* Save a vlan filter for the Hypervisor */ 2038 return ((req_resc->num_rxqs <= rxq_cnt) && 2039 (req_resc->num_txqs <= txq_cnt) && 2040 (req_resc->num_sbs <= vf_sb_count(vf)) && 2041 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && 2042 (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf))); 2043 } 2044 2045 /* CORE VF API */ 2046 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 2047 struct vf_pf_resc_request *resc) 2048 { 2049 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * 2050 BNX2X_CIDS_PER_VF; 2051 2052 union cdu_context *base_cxt = (union cdu_context *) 2053 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr + 2054 (base_vf_cid & (ILT_PAGE_CIDS-1)); 2055 int i; 2056 2057 /* if state is 'acquired' the VF was not released or FLR'd, in 2058 * this case the returned resources match the acquired already 2059 * acquired resources. Verify that the requested numbers do 2060 * not exceed the already acquired numbers. 2061 */ 2062 if (vf->state == VF_ACQUIRED) { 2063 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n", 2064 vf->abs_vfid); 2065 2066 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2067 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n", 2068 vf->abs_vfid); 2069 return -EINVAL; 2070 } 2071 return 0; 2072 } 2073 2074 /* Otherwise vf state must be 'free' or 'reset' */ 2075 if (vf->state != VF_FREE && vf->state != VF_RESET) { 2076 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n", 2077 vf->abs_vfid, vf->state); 2078 return -EINVAL; 2079 } 2080 2081 /* static allocation: 2082 * the global maximum number are fixed per VF. Fail the request if 2083 * requested number exceed these globals 2084 */ 2085 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) { 2086 DP(BNX2X_MSG_IOV, 2087 "cannot fulfill vf resource request. Placing maximal available values in response\n"); 2088 /* set the max resource in the vf */ 2089 return -ENOMEM; 2090 } 2091 2092 /* Set resources counters - 0 request means max available */ 2093 vf_sb_count(vf) = resc->num_sbs; 2094 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2095 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); 2096 if (resc->num_mac_filters) 2097 vf_mac_rules_cnt(vf) = resc->num_mac_filters; 2098 /* Add an additional vlan filter credit for the hypervisor */ 2099 bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1); 2100 2101 DP(BNX2X_MSG_IOV, 2102 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", 2103 vf_sb_count(vf), vf_rxq_count(vf), 2104 vf_txq_count(vf), vf_mac_rules_cnt(vf), 2105 vf_vlan_rules_visible_cnt(vf)); 2106 2107 /* Initialize the queues */ 2108 if (!vf->vfqs) { 2109 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n"); 2110 return -EINVAL; 2111 } 2112 2113 for_each_vfq(vf, i) { 2114 struct bnx2x_vf_queue *q = vfq_get(vf, i); 2115 2116 if (!q) { 2117 BNX2X_ERR("q number %d was not allocated\n", i); 2118 return -EINVAL; 2119 } 2120 2121 q->index = i; 2122 q->cxt = &((base_cxt + i)->eth); 2123 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; 2124 2125 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n", 2126 vf->abs_vfid, i, q->index, q->cid, q->cxt); 2127 2128 /* init SP objects */ 2129 bnx2x_vfq_init(bp, vf, q); 2130 } 2131 vf->state = VF_ACQUIRED; 2132 return 0; 2133 } 2134 2135 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map) 2136 { 2137 struct bnx2x_func_init_params func_init = {0}; 2138 u16 flags = 0; 2139 int i; 2140 2141 /* the sb resources are initialized at this point, do the 2142 * FW/HW initializations 2143 */ 2144 for_each_vf_sb(vf, i) 2145 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true, 2146 vf_igu_sb(vf, i), vf_igu_sb(vf, i)); 2147 2148 /* Sanity checks */ 2149 if (vf->state != VF_ACQUIRED) { 2150 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n", 2151 vf->abs_vfid, vf->state); 2152 return -EINVAL; 2153 } 2154 2155 /* let FLR complete ... */ 2156 msleep(100); 2157 2158 /* FLR cleanup epilogue */ 2159 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid)) 2160 return -EBUSY; 2161 2162 /* reset IGU VF statistics: MSIX */ 2163 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0); 2164 2165 /* vf init */ 2166 if (vf->cfg_flags & VF_CFG_STATS) 2167 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ); 2168 2169 if (vf->cfg_flags & VF_CFG_TPA) 2170 flags |= FUNC_FLG_TPA; 2171 2172 if (is_vf_multi(vf)) 2173 flags |= FUNC_FLG_RSS; 2174 2175 /* function setup */ 2176 func_init.func_flgs = flags; 2177 func_init.pf_id = BP_FUNC(bp); 2178 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid); 2179 func_init.fw_stat_map = vf->fw_stat_map; 2180 func_init.spq_map = vf->spq_map; 2181 func_init.spq_prod = 0; 2182 bnx2x_func_init(bp, &func_init); 2183 2184 /* Enable the vf */ 2185 bnx2x_vf_enable_access(bp, vf->abs_vfid); 2186 bnx2x_vf_enable_traffic(bp, vf); 2187 2188 /* queue protection table */ 2189 for_each_vfq(vf, i) 2190 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid, 2191 vfq_qzone_id(vf, vfq_get(vf, i)), true); 2192 2193 vf->state = VF_ENABLED; 2194 2195 /* update vf bulletin board */ 2196 bnx2x_post_vf_bulletin(bp, vf->index); 2197 2198 return 0; 2199 } 2200 2201 struct set_vf_state_cookie { 2202 struct bnx2x_virtf *vf; 2203 u8 state; 2204 }; 2205 2206 static void bnx2x_set_vf_state(void *cookie) 2207 { 2208 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie; 2209 2210 p->vf->state = p->state; 2211 } 2212 2213 int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf) 2214 { 2215 int rc = 0, i; 2216 2217 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 2218 2219 /* Close all queues */ 2220 for (i = 0; i < vf_rxq_count(vf); i++) { 2221 rc = bnx2x_vf_queue_teardown(bp, vf, i); 2222 if (rc) 2223 goto op_err; 2224 } 2225 2226 /* disable the interrupts */ 2227 DP(BNX2X_MSG_IOV, "disabling igu\n"); 2228 bnx2x_vf_igu_disable(bp, vf); 2229 2230 /* disable the VF */ 2231 DP(BNX2X_MSG_IOV, "clearing qtbl\n"); 2232 bnx2x_vf_clr_qtbl(bp, vf); 2233 2234 /* need to make sure there are no outstanding stats ramrods which may 2235 * cause the device to access the VF's stats buffer which it will free 2236 * as soon as we return from the close flow. 2237 */ 2238 { 2239 struct set_vf_state_cookie cookie; 2240 2241 cookie.vf = vf; 2242 cookie.state = VF_ACQUIRED; 2243 rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie); 2244 if (rc) 2245 goto op_err; 2246 } 2247 2248 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2249 2250 return 0; 2251 op_err: 2252 BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc); 2253 return rc; 2254 } 2255 2256 /* VF release can be called either: 1. The VF was acquired but 2257 * not enabled 2. the vf was enabled or in the process of being 2258 * enabled 2259 */ 2260 int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf) 2261 { 2262 int rc; 2263 2264 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid, 2265 vf->state == VF_FREE ? "Free" : 2266 vf->state == VF_ACQUIRED ? "Acquired" : 2267 vf->state == VF_ENABLED ? "Enabled" : 2268 vf->state == VF_RESET ? "Reset" : 2269 "Unknown"); 2270 2271 switch (vf->state) { 2272 case VF_ENABLED: 2273 rc = bnx2x_vf_close(bp, vf); 2274 if (rc) 2275 goto op_err; 2276 /* Fallthrough to release resources */ 2277 case VF_ACQUIRED: 2278 DP(BNX2X_MSG_IOV, "about to free resources\n"); 2279 bnx2x_vf_free_resc(bp, vf); 2280 break; 2281 2282 case VF_FREE: 2283 case VF_RESET: 2284 default: 2285 break; 2286 } 2287 return 0; 2288 op_err: 2289 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc); 2290 return rc; 2291 } 2292 2293 int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf, 2294 struct bnx2x_config_rss_params *rss) 2295 { 2296 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 2297 set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags); 2298 return bnx2x_config_rss(bp, rss); 2299 } 2300 2301 int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf, 2302 struct vfpf_tpa_tlv *tlv, 2303 struct bnx2x_queue_update_tpa_params *params) 2304 { 2305 aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr; 2306 struct bnx2x_queue_state_params qstate; 2307 int qid, rc = 0; 2308 2309 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid); 2310 2311 /* Set ramrod params */ 2312 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params)); 2313 memcpy(&qstate.params.update_tpa, params, 2314 sizeof(struct bnx2x_queue_update_tpa_params)); 2315 qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA; 2316 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags); 2317 2318 for (qid = 0; qid < vf_rxq_count(vf); qid++) { 2319 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj); 2320 qstate.params.update_tpa.sge_map = sge_addr[qid]; 2321 DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n", 2322 vf->abs_vfid, qid, U64_HI(sge_addr[qid]), 2323 U64_LO(sge_addr[qid])); 2324 rc = bnx2x_queue_state_change(bp, &qstate); 2325 if (rc) { 2326 BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n", 2327 U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]), 2328 vf->abs_vfid, qid); 2329 return rc; 2330 } 2331 } 2332 2333 return rc; 2334 } 2335 2336 /* VF release ~ VF close + VF release-resources 2337 * Release is the ultimate SW shutdown and is called whenever an 2338 * irrecoverable error is encountered. 2339 */ 2340 int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) 2341 { 2342 int rc; 2343 2344 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid); 2345 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 2346 2347 rc = bnx2x_vf_free(bp, vf); 2348 if (rc) 2349 WARN(rc, 2350 "VF[%d] Failed to allocate resources for release op- rc=%d\n", 2351 vf->abs_vfid, rc); 2352 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 2353 return rc; 2354 } 2355 2356 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 2357 enum channel_tlvs tlv) 2358 { 2359 /* we don't lock the channel for unsupported tlvs */ 2360 if (!bnx2x_tlv_supported(tlv)) { 2361 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n"); 2362 return; 2363 } 2364 2365 /* lock the channel */ 2366 mutex_lock(&vf->op_mutex); 2367 2368 /* record the locking op */ 2369 vf->op_current = tlv; 2370 2371 /* log the lock */ 2372 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n", 2373 vf->abs_vfid, tlv); 2374 } 2375 2376 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 2377 enum channel_tlvs expected_tlv) 2378 { 2379 enum channel_tlvs current_tlv; 2380 2381 if (!vf) { 2382 BNX2X_ERR("VF was %p\n", vf); 2383 return; 2384 } 2385 2386 current_tlv = vf->op_current; 2387 2388 /* we don't unlock the channel for unsupported tlvs */ 2389 if (!bnx2x_tlv_supported(expected_tlv)) 2390 return; 2391 2392 WARN(expected_tlv != vf->op_current, 2393 "lock mismatch: expected %d found %d", expected_tlv, 2394 vf->op_current); 2395 2396 /* record the locking op */ 2397 vf->op_current = CHANNEL_TLV_NONE; 2398 2399 /* lock the channel */ 2400 mutex_unlock(&vf->op_mutex); 2401 2402 /* log the unlock */ 2403 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", 2404 vf->abs_vfid, current_tlv); 2405 } 2406 2407 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) 2408 { 2409 struct bnx2x_queue_state_params q_params; 2410 u32 prev_flags; 2411 int i, rc; 2412 2413 /* Verify changes are needed and record current Tx switching state */ 2414 prev_flags = bp->flags; 2415 if (enable) 2416 bp->flags |= TX_SWITCHING; 2417 else 2418 bp->flags &= ~TX_SWITCHING; 2419 if (prev_flags == bp->flags) 2420 return 0; 2421 2422 /* Verify state enables the sending of queue ramrods */ 2423 if ((bp->state != BNX2X_STATE_OPEN) || 2424 (bnx2x_get_q_logical_state(bp, 2425 &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) != 2426 BNX2X_Q_LOGICAL_STATE_ACTIVE)) 2427 return 0; 2428 2429 /* send q. update ramrod to configure Tx switching */ 2430 memset(&q_params, 0, sizeof(q_params)); 2431 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 2432 q_params.cmd = BNX2X_Q_CMD_UPDATE; 2433 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, 2434 &q_params.params.update.update_flags); 2435 if (enable) 2436 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING, 2437 &q_params.params.update.update_flags); 2438 else 2439 __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING, 2440 &q_params.params.update.update_flags); 2441 2442 /* send the ramrod on all the queues of the PF */ 2443 for_each_eth_queue(bp, i) { 2444 struct bnx2x_fastpath *fp = &bp->fp[i]; 2445 2446 /* Set the appropriate Queue object */ 2447 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 2448 2449 /* Update the Queue state */ 2450 rc = bnx2x_queue_state_change(bp, &q_params); 2451 if (rc) { 2452 BNX2X_ERR("Failed to configure Tx switching\n"); 2453 return rc; 2454 } 2455 } 2456 2457 DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled"); 2458 return 0; 2459 } 2460 2461 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) 2462 { 2463 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev)); 2464 2465 if (!IS_SRIOV(bp)) { 2466 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n"); 2467 return -EINVAL; 2468 } 2469 2470 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n", 2471 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 2472 2473 /* HW channel is only operational when PF is up */ 2474 if (bp->state != BNX2X_STATE_OPEN) { 2475 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n"); 2476 return -EINVAL; 2477 } 2478 2479 /* we are always bound by the total_vfs in the configuration space */ 2480 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) { 2481 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n", 2482 num_vfs_param, BNX2X_NR_VIRTFN(bp)); 2483 num_vfs_param = BNX2X_NR_VIRTFN(bp); 2484 } 2485 2486 bp->requested_nr_virtfn = num_vfs_param; 2487 if (num_vfs_param == 0) { 2488 bnx2x_set_pf_tx_switching(bp, false); 2489 bnx2x_disable_sriov(bp); 2490 return 0; 2491 } else { 2492 return bnx2x_enable_sriov(bp); 2493 } 2494 } 2495 2496 #define IGU_ENTRY_SIZE 4 2497 2498 int bnx2x_enable_sriov(struct bnx2x *bp) 2499 { 2500 int rc = 0, req_vfs = bp->requested_nr_virtfn; 2501 int vf_idx, sb_idx, vfq_idx, qcount, first_vf; 2502 u32 igu_entry, address; 2503 u16 num_vf_queues; 2504 2505 if (req_vfs == 0) 2506 return 0; 2507 2508 first_vf = bp->vfdb->sriov.first_vf_in_pf; 2509 2510 /* statically distribute vf sb pool between VFs */ 2511 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES, 2512 BP_VFDB(bp)->vf_sbs_pool / req_vfs); 2513 2514 /* zero previous values learned from igu cam */ 2515 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) { 2516 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 2517 2518 vf->sb_count = 0; 2519 vf_sb_count(BP_VF(bp, vf_idx)) = 0; 2520 } 2521 bp->vfdb->vf_sbs_pool = 0; 2522 2523 /* prepare IGU cam */ 2524 sb_idx = BP_VFDB(bp)->first_vf_igu_entry; 2525 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE; 2526 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 2527 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) { 2528 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT | 2529 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT | 2530 IGU_REG_MAPPING_MEMORY_VALID; 2531 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n", 2532 sb_idx, vf_idx); 2533 REG_WR(bp, address, igu_entry); 2534 sb_idx++; 2535 address += IGU_ENTRY_SIZE; 2536 } 2537 } 2538 2539 /* Reinitialize vf database according to igu cam */ 2540 bnx2x_get_vf_igu_cam_info(bp); 2541 2542 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n", 2543 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues); 2544 2545 qcount = 0; 2546 for_each_vf(bp, vf_idx) { 2547 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); 2548 2549 /* set local queue arrays */ 2550 vf->vfqs = &bp->vfdb->vfqs[qcount]; 2551 qcount += vf_sb_count(vf); 2552 bnx2x_iov_static_resc(bp, vf); 2553 } 2554 2555 /* prepare msix vectors in VF configuration space - the value in the 2556 * PCI configuration space should be the index of the last entry, 2557 * namely one less than the actual size of the table 2558 */ 2559 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) { 2560 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx)); 2561 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL, 2562 num_vf_queues - 1); 2563 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n", 2564 vf_idx, num_vf_queues - 1); 2565 } 2566 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp)); 2567 2568 /* enable sriov. This will probe all the VFs, and consequentially cause 2569 * the "acquire" messages to appear on the VF PF channel. 2570 */ 2571 DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); 2572 bnx2x_disable_sriov(bp); 2573 2574 rc = bnx2x_set_pf_tx_switching(bp, true); 2575 if (rc) 2576 return rc; 2577 2578 rc = pci_enable_sriov(bp->pdev, req_vfs); 2579 if (rc) { 2580 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 2581 return rc; 2582 } 2583 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); 2584 return req_vfs; 2585 } 2586 2587 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) 2588 { 2589 int vfidx; 2590 struct pf_vf_bulletin_content *bulletin; 2591 2592 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n"); 2593 for_each_vf(bp, vfidx) { 2594 bulletin = BP_VF_BULLETIN(bp, vfidx); 2595 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN) 2596 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0); 2597 } 2598 } 2599 2600 void bnx2x_disable_sriov(struct bnx2x *bp) 2601 { 2602 if (pci_vfs_assigned(bp->pdev)) { 2603 DP(BNX2X_MSG_IOV, 2604 "Unloading driver while VFs are assigned - VFs will not be deallocated\n"); 2605 return; 2606 } 2607 2608 pci_disable_sriov(bp->pdev); 2609 } 2610 2611 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx, 2612 struct bnx2x_virtf **vf, 2613 struct pf_vf_bulletin_content **bulletin, 2614 bool test_queue) 2615 { 2616 if (bp->state != BNX2X_STATE_OPEN) { 2617 BNX2X_ERR("PF is down - can't utilize iov-related functionality\n"); 2618 return -EINVAL; 2619 } 2620 2621 if (!IS_SRIOV(bp)) { 2622 BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n"); 2623 return -EINVAL; 2624 } 2625 2626 if (vfidx >= BNX2X_NR_VIRTFN(bp)) { 2627 BNX2X_ERR("VF is uninitialized - can't utilize iov-related functionality. vfidx was %d BNX2X_NR_VIRTFN was %d\n", 2628 vfidx, BNX2X_NR_VIRTFN(bp)); 2629 return -EINVAL; 2630 } 2631 2632 /* init members */ 2633 *vf = BP_VF(bp, vfidx); 2634 *bulletin = BP_VF_BULLETIN(bp, vfidx); 2635 2636 if (!*vf) { 2637 BNX2X_ERR("Unable to get VF structure for vfidx %d\n", vfidx); 2638 return -EINVAL; 2639 } 2640 2641 if (test_queue && !(*vf)->vfqs) { 2642 BNX2X_ERR("vfqs struct is null. Was this invoked before dynamically enabling SR-IOV? vfidx was %d\n", 2643 vfidx); 2644 return -EINVAL; 2645 } 2646 2647 if (!*bulletin) { 2648 BNX2X_ERR("Bulletin Board struct is null for vfidx %d\n", 2649 vfidx); 2650 return -EINVAL; 2651 } 2652 2653 return 0; 2654 } 2655 2656 int bnx2x_get_vf_config(struct net_device *dev, int vfidx, 2657 struct ifla_vf_info *ivi) 2658 { 2659 struct bnx2x *bp = netdev_priv(dev); 2660 struct bnx2x_virtf *vf = NULL; 2661 struct pf_vf_bulletin_content *bulletin = NULL; 2662 struct bnx2x_vlan_mac_obj *mac_obj; 2663 struct bnx2x_vlan_mac_obj *vlan_obj; 2664 int rc; 2665 2666 /* sanity and init */ 2667 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); 2668 if (rc) 2669 return rc; 2670 2671 mac_obj = &bnx2x_leading_vfq(vf, mac_obj); 2672 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 2673 if (!mac_obj || !vlan_obj) { 2674 BNX2X_ERR("VF partially initialized\n"); 2675 return -EINVAL; 2676 } 2677 2678 ivi->vf = vfidx; 2679 ivi->qos = 0; 2680 ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */ 2681 ivi->min_tx_rate = 0; 2682 ivi->spoofchk = 1; /*always enabled */ 2683 if (vf->state == VF_ENABLED) { 2684 /* mac and vlan are in vlan_mac objects */ 2685 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) { 2686 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 2687 0, ETH_ALEN); 2688 vlan_obj->get_n_elements(bp, vlan_obj, 1, 2689 (u8 *)&ivi->vlan, 0, 2690 VLAN_HLEN); 2691 } 2692 } else { 2693 mutex_lock(&bp->vfdb->bulletin_mutex); 2694 /* mac */ 2695 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 2696 /* mac configured by ndo so its in bulletin board */ 2697 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN); 2698 else 2699 /* function has not been loaded yet. Show mac as 0s */ 2700 eth_zero_addr(ivi->mac); 2701 2702 /* vlan */ 2703 if (bulletin->valid_bitmap & (1 << VLAN_VALID)) 2704 /* vlan configured by ndo so its in bulletin board */ 2705 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN); 2706 else 2707 /* function has not been loaded yet. Show vlans as 0s */ 2708 memset(&ivi->vlan, 0, VLAN_HLEN); 2709 2710 mutex_unlock(&bp->vfdb->bulletin_mutex); 2711 } 2712 2713 return 0; 2714 } 2715 2716 /* New mac for VF. Consider these cases: 2717 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and 2718 * supply at acquire. 2719 * 2. VF has already been acquired but has not yet initialized - store in local 2720 * bulletin board. mac will be posted on VF bulletin board after VF init. VF 2721 * will configure this mac when it is ready. 2722 * 3. VF has already initialized but has not yet setup a queue - post the new 2723 * mac on VF's bulletin board right now. VF will configure this mac when it 2724 * is ready. 2725 * 4. VF has already set a queue - delete any macs already configured for this 2726 * queue and manually config the new mac. 2727 * In any event, once this function has been called refuse any attempts by the 2728 * VF to configure any mac for itself except for this mac. In case of a race 2729 * where the VF fails to see the new post on its bulletin board before sending a 2730 * mac configuration request, the PF will simply fail the request and VF can try 2731 * again after consulting its bulletin board. 2732 */ 2733 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac) 2734 { 2735 struct bnx2x *bp = netdev_priv(dev); 2736 int rc, q_logical_state; 2737 struct bnx2x_virtf *vf = NULL; 2738 struct pf_vf_bulletin_content *bulletin = NULL; 2739 2740 if (!is_valid_ether_addr(mac)) { 2741 BNX2X_ERR("mac address invalid\n"); 2742 return -EINVAL; 2743 } 2744 2745 /* sanity and init */ 2746 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); 2747 if (rc) 2748 return rc; 2749 2750 mutex_lock(&bp->vfdb->bulletin_mutex); 2751 2752 /* update PF's copy of the VF's bulletin. Will no longer accept mac 2753 * configuration requests from vf unless match this mac 2754 */ 2755 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID; 2756 memcpy(bulletin->mac, mac, ETH_ALEN); 2757 2758 /* Post update on VF's bulletin board */ 2759 rc = bnx2x_post_vf_bulletin(bp, vfidx); 2760 2761 /* release lock before checking return code */ 2762 mutex_unlock(&bp->vfdb->bulletin_mutex); 2763 2764 if (rc) { 2765 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx); 2766 return rc; 2767 } 2768 2769 q_logical_state = 2770 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)); 2771 if (vf->state == VF_ENABLED && 2772 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 2773 /* configure the mac in device on this vf's queue */ 2774 unsigned long ramrod_flags = 0; 2775 struct bnx2x_vlan_mac_obj *mac_obj; 2776 2777 /* User should be able to see failure reason in system logs */ 2778 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 2779 return -EINVAL; 2780 2781 /* must lock vfpf channel to protect against vf flows */ 2782 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 2783 2784 /* remove existing eth macs */ 2785 mac_obj = &bnx2x_leading_vfq(vf, mac_obj); 2786 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 2787 if (rc) { 2788 BNX2X_ERR("failed to delete eth macs\n"); 2789 rc = -EINVAL; 2790 goto out; 2791 } 2792 2793 /* remove existing uc list macs */ 2794 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true); 2795 if (rc) { 2796 BNX2X_ERR("failed to delete uc_list macs\n"); 2797 rc = -EINVAL; 2798 goto out; 2799 } 2800 2801 /* configure the new mac to device */ 2802 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2803 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 2804 BNX2X_ETH_MAC, &ramrod_flags); 2805 2806 out: 2807 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 2808 } 2809 2810 return rc; 2811 } 2812 2813 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) 2814 { 2815 struct bnx2x_queue_state_params q_params = {NULL}; 2816 struct bnx2x_vlan_mac_ramrod_params ramrod_param; 2817 struct bnx2x_queue_update_params *update_params; 2818 struct pf_vf_bulletin_content *bulletin = NULL; 2819 struct bnx2x_rx_mode_ramrod_params rx_ramrod; 2820 struct bnx2x *bp = netdev_priv(dev); 2821 struct bnx2x_vlan_mac_obj *vlan_obj; 2822 unsigned long vlan_mac_flags = 0; 2823 unsigned long ramrod_flags = 0; 2824 struct bnx2x_virtf *vf = NULL; 2825 unsigned long accept_flags; 2826 int rc; 2827 2828 if (vlan > 4095) { 2829 BNX2X_ERR("illegal vlan value %d\n", vlan); 2830 return -EINVAL; 2831 } 2832 2833 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n", 2834 vfidx, vlan, 0); 2835 2836 /* sanity and init */ 2837 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true); 2838 if (rc) 2839 return rc; 2840 2841 /* update PF's copy of the VF's bulletin. No point in posting the vlan 2842 * to the VF since it doesn't have anything to do with it. But it useful 2843 * to store it here in case the VF is not up yet and we can only 2844 * configure the vlan later when it does. Treat vlan id 0 as remove the 2845 * Host tag. 2846 */ 2847 mutex_lock(&bp->vfdb->bulletin_mutex); 2848 2849 if (vlan > 0) 2850 bulletin->valid_bitmap |= 1 << VLAN_VALID; 2851 else 2852 bulletin->valid_bitmap &= ~(1 << VLAN_VALID); 2853 bulletin->vlan = vlan; 2854 2855 mutex_unlock(&bp->vfdb->bulletin_mutex); 2856 2857 /* is vf initialized and queue set up? */ 2858 if (vf->state != VF_ENABLED || 2859 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) != 2860 BNX2X_Q_LOGICAL_STATE_ACTIVE) 2861 return rc; 2862 2863 /* User should be able to see error in system logs */ 2864 if (!bnx2x_validate_vf_sp_objs(bp, vf, true)) 2865 return -EINVAL; 2866 2867 /* must lock vfpf channel to protect against vf flows */ 2868 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 2869 2870 /* remove existing vlans */ 2871 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2872 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 2873 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 2874 &ramrod_flags); 2875 if (rc) { 2876 BNX2X_ERR("failed to delete vlans\n"); 2877 rc = -EINVAL; 2878 goto out; 2879 } 2880 2881 /* need to remove/add the VF's accept_any_vlan bit */ 2882 accept_flags = bnx2x_leading_vfq(vf, accept_flags); 2883 if (vlan) 2884 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 2885 else 2886 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags); 2887 2888 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf, 2889 accept_flags); 2890 bnx2x_leading_vfq(vf, accept_flags) = accept_flags; 2891 bnx2x_config_rx_mode(bp, &rx_ramrod); 2892 2893 /* configure the new vlan to device */ 2894 memset(&ramrod_param, 0, sizeof(ramrod_param)); 2895 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 2896 ramrod_param.vlan_mac_obj = vlan_obj; 2897 ramrod_param.ramrod_flags = ramrod_flags; 2898 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, 2899 &ramrod_param.user_req.vlan_mac_flags); 2900 ramrod_param.user_req.u.vlan.vlan = vlan; 2901 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD; 2902 rc = bnx2x_config_vlan_mac(bp, &ramrod_param); 2903 if (rc) { 2904 BNX2X_ERR("failed to configure vlan\n"); 2905 rc = -EINVAL; 2906 goto out; 2907 } 2908 2909 /* send queue update ramrod to configure default vlan and silent 2910 * vlan removal 2911 */ 2912 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); 2913 q_params.cmd = BNX2X_Q_CMD_UPDATE; 2914 q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj); 2915 update_params = &q_params.params.update; 2916 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG, 2917 &update_params->update_flags); 2918 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, 2919 &update_params->update_flags); 2920 if (vlan == 0) { 2921 /* if vlan is 0 then we want to leave the VF traffic 2922 * untagged, and leave the incoming traffic untouched 2923 * (i.e. do not remove any vlan tags). 2924 */ 2925 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 2926 &update_params->update_flags); 2927 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 2928 &update_params->update_flags); 2929 } else { 2930 /* configure default vlan to vf queue and set silent 2931 * vlan removal (the vf remains unaware of this vlan). 2932 */ 2933 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, 2934 &update_params->update_flags); 2935 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, 2936 &update_params->update_flags); 2937 update_params->def_vlan = vlan; 2938 update_params->silent_removal_value = 2939 vlan & VLAN_VID_MASK; 2940 update_params->silent_removal_mask = VLAN_VID_MASK; 2941 } 2942 2943 /* Update the Queue state */ 2944 rc = bnx2x_queue_state_change(bp, &q_params); 2945 if (rc) { 2946 BNX2X_ERR("Failed to configure default VLAN\n"); 2947 goto out; 2948 } 2949 2950 2951 /* clear the flag indicating that this VF needs its vlan 2952 * (will only be set if the HV configured the Vlan before vf was 2953 * up and we were called because the VF came up later 2954 */ 2955 out: 2956 vf->cfg_flags &= ~VF_CFG_VLAN; 2957 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 2958 2959 return rc; 2960 } 2961 2962 /* crc is the first field in the bulletin board. Compute the crc over the 2963 * entire bulletin board excluding the crc field itself. Use the length field 2964 * as the Bulletin Board was posted by a PF with possibly a different version 2965 * from the vf which will sample it. Therefore, the length is computed by the 2966 * PF and then used blindly by the VF. 2967 */ 2968 u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin) 2969 { 2970 return crc32(BULLETIN_CRC_SEED, 2971 ((u8 *)bulletin) + sizeof(bulletin->crc), 2972 bulletin->length - sizeof(bulletin->crc)); 2973 } 2974 2975 /* Check for new posts on the bulletin board */ 2976 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) 2977 { 2978 struct pf_vf_bulletin_content *bulletin; 2979 int attempts; 2980 2981 /* sampling structure in mid post may result with corrupted data 2982 * validate crc to ensure coherency. 2983 */ 2984 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) { 2985 u32 crc; 2986 2987 /* sample the bulletin board */ 2988 memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin, 2989 sizeof(union pf_vf_bulletin)); 2990 2991 crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content); 2992 2993 if (bp->shadow_bulletin.content.crc == crc) 2994 break; 2995 2996 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n", 2997 bp->shadow_bulletin.content.crc, crc); 2998 } 2999 3000 if (attempts >= BULLETIN_ATTEMPTS) { 3001 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n", 3002 attempts); 3003 return PFVF_BULLETIN_CRC_ERR; 3004 } 3005 bulletin = &bp->shadow_bulletin.content; 3006 3007 /* bulletin board hasn't changed since last sample */ 3008 if (bp->old_bulletin.version == bulletin->version) 3009 return PFVF_BULLETIN_UNCHANGED; 3010 3011 /* the mac address in bulletin board is valid and is new */ 3012 if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID && 3013 !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) { 3014 /* update new mac to net device */ 3015 memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN); 3016 } 3017 3018 if (bulletin->valid_bitmap & (1 << LINK_VALID)) { 3019 DP(BNX2X_MSG_IOV, "link update speed %d flags %x\n", 3020 bulletin->link_speed, bulletin->link_flags); 3021 3022 bp->vf_link_vars.line_speed = bulletin->link_speed; 3023 bp->vf_link_vars.link_report_flags = 0; 3024 /* Link is down */ 3025 if (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN) 3026 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN, 3027 &bp->vf_link_vars.link_report_flags); 3028 /* Full DUPLEX */ 3029 if (bulletin->link_flags & VFPF_LINK_REPORT_FULL_DUPLEX) 3030 __set_bit(BNX2X_LINK_REPORT_FD, 3031 &bp->vf_link_vars.link_report_flags); 3032 /* Rx Flow Control is ON */ 3033 if (bulletin->link_flags & VFPF_LINK_REPORT_RX_FC_ON) 3034 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, 3035 &bp->vf_link_vars.link_report_flags); 3036 /* Tx Flow Control is ON */ 3037 if (bulletin->link_flags & VFPF_LINK_REPORT_TX_FC_ON) 3038 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, 3039 &bp->vf_link_vars.link_report_flags); 3040 __bnx2x_link_report(bp); 3041 } 3042 3043 /* copy new bulletin board to bp */ 3044 memcpy(&bp->old_bulletin, bulletin, 3045 sizeof(struct pf_vf_bulletin_content)); 3046 3047 return PFVF_BULLETIN_UPDATED; 3048 } 3049 3050 void bnx2x_timer_sriov(struct bnx2x *bp) 3051 { 3052 bnx2x_sample_bulletin(bp); 3053 3054 /* if channel is down we need to self destruct */ 3055 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) 3056 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 3057 BNX2X_MSG_IOV); 3058 } 3059 3060 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 3061 { 3062 /* vf doorbells are embedded within the regview */ 3063 return bp->regview + PXP_VF_ADDR_DB_START; 3064 } 3065 3066 void bnx2x_vf_pci_dealloc(struct bnx2x *bp) 3067 { 3068 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3069 sizeof(struct bnx2x_vf_mbx_msg)); 3070 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, 3071 sizeof(union pf_vf_bulletin)); 3072 } 3073 3074 int bnx2x_vf_pci_alloc(struct bnx2x *bp) 3075 { 3076 mutex_init(&bp->vf2pf_mutex); 3077 3078 /* allocate vf2pf mailbox for vf to pf channel */ 3079 bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping, 3080 sizeof(struct bnx2x_vf_mbx_msg)); 3081 if (!bp->vf2pf_mbox) 3082 goto alloc_mem_err; 3083 3084 /* allocate pf 2 vf bulletin board */ 3085 bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping, 3086 sizeof(union pf_vf_bulletin)); 3087 if (!bp->pf2vf_bulletin) 3088 goto alloc_mem_err; 3089 3090 bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true); 3091 3092 return 0; 3093 3094 alloc_mem_err: 3095 bnx2x_vf_pci_dealloc(bp); 3096 return -ENOMEM; 3097 } 3098 3099 void bnx2x_iov_channel_down(struct bnx2x *bp) 3100 { 3101 int vf_idx; 3102 struct pf_vf_bulletin_content *bulletin; 3103 3104 if (!IS_SRIOV(bp)) 3105 return; 3106 3107 for_each_vf(bp, vf_idx) { 3108 /* locate this VFs bulletin board and update the channel down 3109 * bit 3110 */ 3111 bulletin = BP_VF_BULLETIN(bp, vf_idx); 3112 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN; 3113 3114 /* update vf bulletin board */ 3115 bnx2x_post_vf_bulletin(bp, vf_idx); 3116 } 3117 } 3118 3119 void bnx2x_iov_task(struct work_struct *work) 3120 { 3121 struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work); 3122 3123 if (!netif_running(bp->dev)) 3124 return; 3125 3126 if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR, 3127 &bp->iov_task_state)) 3128 bnx2x_vf_handle_flr_event(bp); 3129 3130 if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG, 3131 &bp->iov_task_state)) 3132 bnx2x_vf_mbx(bp); 3133 } 3134 3135 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) 3136 { 3137 smp_mb__before_atomic(); 3138 set_bit(flag, &bp->iov_task_state); 3139 smp_mb__after_atomic(); 3140 DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); 3141 queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); 3142 } 3143