1 /* bnx2x_sriov.h: Broadcom Everest network driver. 2 * 3 * Copyright 2009-2013 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 17 * Ariel Elior <ariele@broadcom.com> 18 */ 19 #ifndef BNX2X_SRIOV_H 20 #define BNX2X_SRIOV_H 21 22 #include "bnx2x_vfpf.h" 23 #include "bnx2x.h" 24 25 enum sample_bulletin_result { 26 PFVF_BULLETIN_UNCHANGED, 27 PFVF_BULLETIN_UPDATED, 28 PFVF_BULLETIN_CRC_ERR 29 }; 30 31 #ifdef CONFIG_BNX2X_SRIOV 32 33 /* The bnx2x device structure holds vfdb structure described below. 34 * The VF array is indexed by the relative vfid. 35 */ 36 #define BNX2X_VF_MAX_QUEUES 16 37 #define BNX2X_VF_MAX_TPA_AGG_QUEUES 8 38 39 struct bnx2x_sriov { 40 u32 first_vf_in_pf; 41 42 /* standard SRIOV capability fields, mostly for debugging */ 43 int pos; /* capability position */ 44 int nres; /* number of resources */ 45 u32 cap; /* SR-IOV Capabilities */ 46 u16 ctrl; /* SR-IOV Control */ 47 u16 total; /* total VFs associated with the PF */ 48 u16 initial; /* initial VFs associated with the PF */ 49 u16 nr_virtfn; /* number of VFs available */ 50 u16 offset; /* first VF Routing ID offset */ 51 u16 stride; /* following VF stride */ 52 u32 pgsz; /* page size for BAR alignment */ 53 u8 link; /* Function Dependency Link */ 54 }; 55 56 /* bars */ 57 struct bnx2x_vf_bar { 58 u64 bar; 59 u32 size; 60 }; 61 62 struct bnx2x_vf_bar_info { 63 struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS]; 64 u8 nr_bars; 65 }; 66 67 /* vf queue (used both for rx or tx) */ 68 struct bnx2x_vf_queue { 69 struct eth_context *cxt; 70 71 /* MACs object */ 72 struct bnx2x_vlan_mac_obj mac_obj; 73 74 /* VLANs object */ 75 struct bnx2x_vlan_mac_obj vlan_obj; 76 atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ 77 unsigned long accept_flags; /* last accept flags configured */ 78 79 /* Queue Slow-path State object */ 80 struct bnx2x_queue_sp_obj sp_obj; 81 82 u32 cid; 83 u16 index; 84 u16 sb_idx; 85 bool is_leading; 86 }; 87 88 /* struct bnx2x_vfop_qctor_params - prepare queue construction parameters: 89 * q-init, q-setup and SB index 90 */ 91 struct bnx2x_vfop_qctor_params { 92 struct bnx2x_queue_state_params qstate; 93 struct bnx2x_queue_setup_params prep_qsetup; 94 }; 95 96 /* VFOP parameters (one copy per VF) */ 97 union bnx2x_vfop_params { 98 struct bnx2x_vlan_mac_ramrod_params vlan_mac; 99 struct bnx2x_rx_mode_ramrod_params rx_mode; 100 struct bnx2x_mcast_ramrod_params mcast; 101 struct bnx2x_config_rss_params rss; 102 struct bnx2x_vfop_qctor_params qctor; 103 }; 104 105 /* forward */ 106 struct bnx2x_virtf; 107 108 /* VFOP definitions */ 109 typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf); 110 111 struct bnx2x_vfop_cmd { 112 vfop_handler_t done; 113 bool block; 114 }; 115 116 /* VFOP queue filters command additional arguments */ 117 struct bnx2x_vfop_filter { 118 struct list_head link; 119 int type; 120 #define BNX2X_VFOP_FILTER_MAC 1 121 #define BNX2X_VFOP_FILTER_VLAN 2 122 123 bool add; 124 u8 *mac; 125 u16 vid; 126 }; 127 128 struct bnx2x_vfop_filters { 129 int add_cnt; 130 struct list_head head; 131 struct bnx2x_vfop_filter filters[]; 132 }; 133 134 /* transient list allocated, built and saved until its 135 * passed to the SP-VERBs layer. 136 */ 137 struct bnx2x_vfop_args_mcast { 138 int mc_num; 139 struct bnx2x_mcast_list_elem *mc; 140 }; 141 142 struct bnx2x_vfop_args_qctor { 143 int qid; 144 u16 sb_idx; 145 }; 146 147 struct bnx2x_vfop_args_qdtor { 148 int qid; 149 struct eth_context *cxt; 150 }; 151 152 struct bnx2x_vfop_args_defvlan { 153 int qid; 154 bool enable; 155 u16 vid; 156 u8 prio; 157 }; 158 159 struct bnx2x_vfop_args_qx { 160 int qid; 161 bool en_add; 162 }; 163 164 struct bnx2x_vfop_args_filters { 165 struct bnx2x_vfop_filters *multi_filter; 166 atomic_t *credit; /* non NULL means 'don't consume credit' */ 167 }; 168 169 union bnx2x_vfop_args { 170 struct bnx2x_vfop_args_mcast mc_list; 171 struct bnx2x_vfop_args_qctor qctor; 172 struct bnx2x_vfop_args_qdtor qdtor; 173 struct bnx2x_vfop_args_defvlan defvlan; 174 struct bnx2x_vfop_args_qx qx; 175 struct bnx2x_vfop_args_filters filters; 176 }; 177 178 struct bnx2x_vfop { 179 struct list_head link; 180 int rc; /* return code */ 181 int state; /* next state */ 182 union bnx2x_vfop_args args; /* extra arguments */ 183 union bnx2x_vfop_params *op_p; /* ramrod params */ 184 185 /* state machine callbacks */ 186 vfop_handler_t transition; 187 vfop_handler_t done; 188 }; 189 190 /* vf context */ 191 struct bnx2x_virtf { 192 u16 cfg_flags; 193 #define VF_CFG_STATS 0x0001 194 #define VF_CFG_FW_FC 0x0002 195 #define VF_CFG_TPA 0x0004 196 #define VF_CFG_INT_SIMD 0x0008 197 #define VF_CACHE_LINE 0x0010 198 #define VF_CFG_VLAN 0x0020 199 #define VF_CFG_STATS_COALESCE 0x0040 200 201 u8 state; 202 #define VF_FREE 0 /* VF ready to be acquired holds no resc */ 203 #define VF_ACQUIRED 1 /* VF acquired, but not initialized */ 204 #define VF_ENABLED 2 /* VF Enabled */ 205 #define VF_RESET 3 /* VF FLR'd, pending cleanup */ 206 207 /* non 0 during flr cleanup */ 208 u8 flr_clnup_stage; 209 #define VF_FLR_CLN 1 /* reclaim resources and do 'final cleanup' 210 * sans the end-wait 211 */ 212 #define VF_FLR_ACK 2 /* ACK flr notification */ 213 #define VF_FLR_EPILOG 3 /* wait for VF remnants to dissipate in the HW 214 * ~ final cleanup' end wait 215 */ 216 217 /* dma */ 218 dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ 219 u16 stats_stride; 220 dma_addr_t spq_map; 221 dma_addr_t bulletin_map; 222 223 /* Allocated resources counters. Before the VF is acquired, the 224 * counters hold the following values: 225 * 226 * - xxq_count = 0 as the queues memory is not allocated yet. 227 * 228 * - sb_count = The number of status blocks configured for this VF in 229 * the IGU CAM. Initially read during probe. 230 * 231 * - xx_rules_count = The number of rules statically and equally 232 * allocated for each VF, during PF load. 233 */ 234 struct vf_pf_resc_request alloc_resc; 235 #define vf_rxq_count(vf) ((vf)->alloc_resc.num_rxqs) 236 #define vf_txq_count(vf) ((vf)->alloc_resc.num_txqs) 237 #define vf_sb_count(vf) ((vf)->alloc_resc.num_sbs) 238 #define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters) 239 #define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters) 240 #define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters) 241 242 u8 sb_count; /* actual number of SBs */ 243 u8 igu_base_id; /* base igu status block id */ 244 245 struct bnx2x_vf_queue *vfqs; 246 #define LEADING_IDX 0 247 #define bnx2x_vfq_is_leading(vfq) ((vfq)->index == LEADING_IDX) 248 #define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var) 249 #define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var) 250 251 u8 index; /* index in the vf array */ 252 u8 abs_vfid; 253 u8 sp_cl_id; 254 u32 error; /* 0 means all's-well */ 255 256 /* BDF */ 257 unsigned int bus; 258 unsigned int devfn; 259 260 /* bars */ 261 struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS]; 262 263 /* set-mac ramrod state 1-pending, 0-done */ 264 unsigned long filter_state; 265 266 /* leading rss client id ~~ the client id of the first rxq, must be 267 * set for each txq. 268 */ 269 int leading_rss; 270 271 /* MCAST object */ 272 int mcast_list_len; 273 struct bnx2x_mcast_obj mcast_obj; 274 275 /* RSS configuration object */ 276 struct bnx2x_rss_config_obj rss_conf_obj; 277 278 /* slow-path operations */ 279 atomic_t op_in_progress; 280 int op_rc; 281 bool op_wait_blocking; 282 struct list_head op_list_head; 283 union bnx2x_vfop_params op_params; 284 struct mutex op_mutex; /* one vfop at a time mutex */ 285 enum channel_tlvs op_current; 286 }; 287 288 #define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn) 289 290 #define for_each_vf(bp, var) \ 291 for ((var) = 0; (var) < BNX2X_NR_VIRTFN(bp); (var)++) 292 293 #define for_each_vfq(vf, var) \ 294 for ((var) = 0; (var) < vf_rxq_count(vf); (var)++) 295 296 #define for_each_vf_sb(vf, var) \ 297 for ((var) = 0; (var) < vf_sb_count(vf); (var)++) 298 299 #define is_vf_multi(vf) (vf_rxq_count(vf) > 1) 300 301 #define HW_VF_HANDLE(bp, abs_vfid) \ 302 (u16)(BP_ABS_FUNC((bp)) | (1<<3) | ((u16)(abs_vfid) << 4)) 303 304 #define FW_PF_MAX_HANDLE 8 305 306 #define FW_VF_HANDLE(abs_vfid) \ 307 (abs_vfid + FW_PF_MAX_HANDLE) 308 309 /* locking and unlocking the channel mutex */ 310 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 311 enum channel_tlvs tlv); 312 313 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 314 enum channel_tlvs expected_tlv); 315 316 /* VF mail box (aka vf-pf channel) */ 317 318 /* a container for the bi-directional vf<-->pf messages. 319 * The actual response will be placed according to the offset parameter 320 * provided in the request 321 */ 322 323 #define MBX_MSG_ALIGN 8 324 #define MBX_MSG_ALIGNED_SIZE (roundup(sizeof(struct bnx2x_vf_mbx_msg), \ 325 MBX_MSG_ALIGN)) 326 327 struct bnx2x_vf_mbx_msg { 328 union vfpf_tlvs req; 329 union pfvf_tlvs resp; 330 }; 331 332 struct bnx2x_vf_mbx { 333 struct bnx2x_vf_mbx_msg *msg; 334 dma_addr_t msg_mapping; 335 336 /* VF GPA address */ 337 u32 vf_addr_lo; 338 u32 vf_addr_hi; 339 340 struct vfpf_first_tlv first_tlv; /* saved VF request header */ 341 342 u8 flags; 343 #define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent 344 * more then one pending msg 345 */ 346 }; 347 348 struct bnx2x_vf_sp { 349 union { 350 struct eth_classify_rules_ramrod_data e2; 351 } mac_rdata; 352 353 union { 354 struct eth_classify_rules_ramrod_data e2; 355 } vlan_rdata; 356 357 union { 358 struct eth_filter_rules_ramrod_data e2; 359 } rx_mode_rdata; 360 361 union { 362 struct eth_multicast_rules_ramrod_data e2; 363 } mcast_rdata; 364 365 union { 366 struct client_init_ramrod_data init_data; 367 struct client_update_ramrod_data update_data; 368 } q_data; 369 370 union { 371 struct eth_rss_update_ramrod_data e2; 372 } rss_rdata; 373 }; 374 375 struct hw_dma { 376 void *addr; 377 dma_addr_t mapping; 378 size_t size; 379 }; 380 381 struct bnx2x_vfdb { 382 #define BP_VFDB(bp) ((bp)->vfdb) 383 /* vf array */ 384 struct bnx2x_virtf *vfs; 385 #define BP_VF(bp, idx) (&((bp)->vfdb->vfs[(idx)])) 386 #define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[(idx)].var) 387 388 /* queue array - for all vfs */ 389 struct bnx2x_vf_queue *vfqs; 390 391 /* vf HW contexts */ 392 struct hw_dma context[BNX2X_VF_CIDS/ILT_PAGE_CIDS]; 393 #define BP_VF_CXT_PAGE(bp, i) (&(bp)->vfdb->context[(i)]) 394 395 /* SR-IOV information */ 396 struct bnx2x_sriov sriov; 397 struct hw_dma mbx_dma; 398 #define BP_VF_MBX_DMA(bp) (&((bp)->vfdb->mbx_dma)) 399 struct bnx2x_vf_mbx mbxs[BNX2X_MAX_NUM_OF_VFS]; 400 #define BP_VF_MBX(bp, vfid) (&((bp)->vfdb->mbxs[(vfid)])) 401 402 struct hw_dma bulletin_dma; 403 #define BP_VF_BULLETIN_DMA(bp) (&((bp)->vfdb->bulletin_dma)) 404 #define BP_VF_BULLETIN(bp, vf) \ 405 (((struct pf_vf_bulletin_content *)(BP_VF_BULLETIN_DMA(bp)->addr)) \ 406 + (vf)) 407 408 struct hw_dma sp_dma; 409 #define bnx2x_vf_sp(bp, vf, field) ((bp)->vfdb->sp_dma.addr + \ 410 (vf)->index * sizeof(struct bnx2x_vf_sp) + \ 411 offsetof(struct bnx2x_vf_sp, field)) 412 #define bnx2x_vf_sp_map(bp, vf, field) ((bp)->vfdb->sp_dma.mapping + \ 413 (vf)->index * sizeof(struct bnx2x_vf_sp) + \ 414 offsetof(struct bnx2x_vf_sp, field)) 415 416 #define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32) 417 u32 flrd_vfs[FLRD_VFS_DWORDS]; 418 419 /* the number of msix vectors belonging to this PF designated for VFs */ 420 u16 vf_sbs_pool; 421 u16 first_vf_igu_entry; 422 }; 423 424 /* queue access */ 425 static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index) 426 { 427 return &(vf->vfqs[index]); 428 } 429 430 /* FW ids */ 431 static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx) 432 { 433 return vf->igu_base_id + sb_idx; 434 } 435 436 static inline u8 vf_hc_qzone(struct bnx2x_virtf *vf, u16 sb_idx) 437 { 438 return vf_igu_sb(vf, sb_idx); 439 } 440 441 static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) 442 { 443 return vf->igu_base_id + q->index; 444 } 445 446 static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) 447 { 448 if (vf->cfg_flags & VF_CFG_STATS_COALESCE) 449 return vf->leading_rss; 450 else 451 return vfq_cl_id(vf, q); 452 } 453 454 static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) 455 { 456 return vfq_cl_id(vf, q); 457 } 458 459 /* global iov routines */ 460 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line); 461 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, int num_vfs_param); 462 void bnx2x_iov_remove_one(struct bnx2x *bp); 463 void bnx2x_iov_free_mem(struct bnx2x *bp); 464 int bnx2x_iov_alloc_mem(struct bnx2x *bp); 465 int bnx2x_iov_nic_init(struct bnx2x *bp); 466 int bnx2x_iov_chip_cleanup(struct bnx2x *bp); 467 void bnx2x_iov_init_dq(struct bnx2x *bp); 468 void bnx2x_iov_init_dmae(struct bnx2x *bp); 469 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 470 struct bnx2x_queue_sp_obj **q_obj); 471 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work); 472 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem); 473 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp); 474 void bnx2x_iov_storm_stats_update(struct bnx2x *bp); 475 void bnx2x_iov_sp_task(struct bnx2x *bp); 476 /* global vf mailbox routines */ 477 void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event); 478 void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid); 479 480 /* CORE VF API */ 481 typedef u8 bnx2x_mac_addr_t[ETH_ALEN]; 482 483 /* acquire */ 484 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 485 struct vf_pf_resc_request *resc); 486 /* init */ 487 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 488 dma_addr_t *sb_map); 489 490 /* VFOP generic helpers */ 491 #define bnx2x_vfop_default(state) do { \ 492 BNX2X_ERR("Bad state %d\n", (state)); \ 493 vfop->rc = -EINVAL; \ 494 goto op_err; \ 495 } while (0) 496 497 enum { 498 VFOP_DONE, 499 VFOP_CONT, 500 VFOP_VERIFY_PEND, 501 }; 502 503 #define bnx2x_vfop_finalize(vf, rc, next) do { \ 504 if ((rc) < 0) \ 505 goto op_err; \ 506 else if ((rc) > 0) \ 507 goto op_pending; \ 508 else if ((next) == VFOP_DONE) \ 509 goto op_done; \ 510 else if ((next) == VFOP_VERIFY_PEND) \ 511 BNX2X_ERR("expected pending\n"); \ 512 else { \ 513 DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n"); \ 514 atomic_set(&vf->op_in_progress, 1); \ 515 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \ 516 return; \ 517 } \ 518 } while (0) 519 520 #define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr) \ 521 do { \ 522 vfop->state = first_state; \ 523 vfop->op_p = &vf->op_params; \ 524 vfop->transition = trans_hndlr; \ 525 vfop->done = done_hndlr; \ 526 } while (0) 527 528 static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp, 529 struct bnx2x_virtf *vf) 530 { 531 WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!"); 532 WARN_ON(list_empty(&vf->op_list_head)); 533 return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link); 534 } 535 536 static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp, 537 struct bnx2x_virtf *vf) 538 { 539 struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL); 540 541 WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!"); 542 if (vfop) { 543 INIT_LIST_HEAD(&vfop->link); 544 list_add(&vfop->link, &vf->op_list_head); 545 } 546 return vfop; 547 } 548 549 static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf, 550 struct bnx2x_vfop *vfop) 551 { 552 /* rc < 0 - error, otherwise set to 0 */ 553 DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc); 554 if (vfop->rc >= 0) 555 vfop->rc = 0; 556 DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc); 557 558 /* unlink the current op context and propagate error code 559 * must be done before invoking the 'done()' handler 560 */ 561 WARN(!mutex_is_locked(&vf->op_mutex), 562 "about to access vf op linked list but mutex was not locked!"); 563 list_del(&vfop->link); 564 565 if (list_empty(&vf->op_list_head)) { 566 DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc); 567 vf->op_rc = vfop->rc; 568 DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n", 569 vf->op_rc, vfop->rc); 570 } else { 571 struct bnx2x_vfop *cur_vfop; 572 573 DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc); 574 cur_vfop = bnx2x_vfop_cur(bp, vf); 575 cur_vfop->rc = vfop->rc; 576 DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n", 577 vf->op_rc, vfop->rc); 578 } 579 580 /* invoke done handler */ 581 if (vfop->done) { 582 DP(BNX2X_MSG_IOV, "calling done handler\n"); 583 vfop->done(bp, vf); 584 } else { 585 /* there is no done handler for the operation to unlock 586 * the mutex. Must have gotten here from PF initiated VF RELEASE 587 */ 588 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 589 } 590 591 DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n", 592 vf->op_rc, vfop->rc); 593 594 /* if this is the last nested op reset the wait_blocking flag 595 * to release any blocking wrappers, only after 'done()' is invoked 596 */ 597 if (list_empty(&vf->op_list_head)) { 598 DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc); 599 vf->op_wait_blocking = false; 600 } 601 602 kfree(vfop); 603 } 604 605 static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp, 606 struct bnx2x_virtf *vf) 607 { 608 /* can take a while if any port is running */ 609 int cnt = 5000; 610 611 might_sleep(); 612 while (cnt--) { 613 if (vf->op_wait_blocking == false) { 614 #ifdef BNX2X_STOP_ON_ERROR 615 DP(BNX2X_MSG_IOV, "exit (cnt %d)\n", 5000 - cnt); 616 #endif 617 return 0; 618 } 619 usleep_range(1000, 2000); 620 621 if (bp->panic) 622 return -EIO; 623 } 624 625 /* timeout! */ 626 #ifdef BNX2X_STOP_ON_ERROR 627 bnx2x_panic(); 628 #endif 629 630 return -EBUSY; 631 } 632 633 static inline int bnx2x_vfop_transition(struct bnx2x *bp, 634 struct bnx2x_virtf *vf, 635 vfop_handler_t transition, 636 bool block) 637 { 638 if (block) 639 vf->op_wait_blocking = true; 640 transition(bp, vf); 641 if (block) 642 return bnx2x_vfop_wait_blocking(bp, vf); 643 return 0; 644 } 645 646 /* VFOP queue construction helpers */ 647 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 648 struct bnx2x_queue_init_params *init_params, 649 struct bnx2x_queue_setup_params *setup_params, 650 u16 q_idx, u16 sb_idx); 651 652 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 653 struct bnx2x_queue_init_params *init_params, 654 struct bnx2x_queue_setup_params *setup_params, 655 u16 q_idx, u16 sb_idx); 656 657 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 658 struct bnx2x_virtf *vf, 659 struct bnx2x_vf_queue *q, 660 struct bnx2x_vfop_qctor_params *p, 661 unsigned long q_type); 662 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, 663 struct bnx2x_virtf *vf, 664 struct bnx2x_vfop_cmd *cmd, 665 struct bnx2x_vfop_filters *macs, 666 int qid, bool drv_only); 667 668 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, 669 struct bnx2x_virtf *vf, 670 struct bnx2x_vfop_cmd *cmd, 671 struct bnx2x_vfop_filters *vlans, 672 int qid, bool drv_only); 673 674 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, 675 struct bnx2x_virtf *vf, 676 struct bnx2x_vfop_cmd *cmd, 677 int qid); 678 679 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, 680 struct bnx2x_virtf *vf, 681 struct bnx2x_vfop_cmd *cmd, 682 int qid); 683 684 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, 685 struct bnx2x_virtf *vf, 686 struct bnx2x_vfop_cmd *cmd, 687 bnx2x_mac_addr_t *mcasts, 688 int mcast_num, bool drv_only); 689 690 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, 691 struct bnx2x_virtf *vf, 692 struct bnx2x_vfop_cmd *cmd, 693 int qid, unsigned long accept_flags); 694 695 int bnx2x_vfop_close_cmd(struct bnx2x *bp, 696 struct bnx2x_virtf *vf, 697 struct bnx2x_vfop_cmd *cmd); 698 699 int bnx2x_vfop_release_cmd(struct bnx2x *bp, 700 struct bnx2x_virtf *vf, 701 struct bnx2x_vfop_cmd *cmd); 702 703 int bnx2x_vfop_rss_cmd(struct bnx2x *bp, 704 struct bnx2x_virtf *vf, 705 struct bnx2x_vfop_cmd *cmd); 706 707 /* VF release ~ VF close + VF release-resources 708 * 709 * Release is the ultimate SW shutdown and is called whenever an 710 * irrecoverable error is encountered. 711 */ 712 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block); 713 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid); 714 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf); 715 716 /* FLR routines */ 717 718 /* VF FLR helpers */ 719 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid); 720 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid); 721 722 /* Handles an FLR (or VF_DISABLE) notification form the MCP */ 723 void bnx2x_vf_handle_flr_event(struct bnx2x *bp); 724 725 bool bnx2x_tlv_supported(u16 tlvtype); 726 727 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, 728 struct pf_vf_bulletin_content *bulletin); 729 int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf); 730 731 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); 732 733 /* VF side vfpf channel functions */ 734 int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count); 735 int bnx2x_vfpf_release(struct bnx2x *bp); 736 int bnx2x_vfpf_release(struct bnx2x *bp); 737 int bnx2x_vfpf_init(struct bnx2x *bp); 738 void bnx2x_vfpf_close_vf(struct bnx2x *bp); 739 int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, 740 bool is_leading); 741 int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set); 742 int bnx2x_vfpf_config_rss(struct bnx2x *bp, 743 struct bnx2x_config_rss_params *params); 744 int bnx2x_vfpf_set_mcast(struct net_device *dev); 745 int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp); 746 747 static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf, 748 size_t buf_len) 749 { 750 strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len); 751 } 752 753 static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, 754 struct bnx2x_fastpath *fp) 755 { 756 return PXP_VF_ADDR_USDM_QUEUES_START + 757 bp->acquire_resp.resc.hw_qid[fp->index] * 758 sizeof(struct ustorm_queue_zone_data); 759 } 760 761 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); 762 void bnx2x_timer_sriov(struct bnx2x *bp); 763 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp); 764 int bnx2x_vf_pci_alloc(struct bnx2x *bp); 765 int bnx2x_enable_sriov(struct bnx2x *bp); 766 void bnx2x_disable_sriov(struct bnx2x *bp); 767 static inline int bnx2x_vf_headroom(struct bnx2x *bp) 768 { 769 return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF; 770 } 771 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); 772 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); 773 void bnx2x_iov_channel_down(struct bnx2x *bp); 774 775 #else /* CONFIG_BNX2X_SRIOV */ 776 777 static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 778 struct bnx2x_queue_sp_obj **q_obj) {} 779 static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, 780 bool queue_work) {} 781 static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {} 782 static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp, 783 union event_ring_elem *elem) {return 1; } 784 static inline void bnx2x_iov_sp_task(struct bnx2x *bp) {} 785 static inline void bnx2x_vf_mbx(struct bnx2x *bp, 786 struct vf_pf_event_data *vfpf_event) {} 787 static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; } 788 static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {} 789 static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; } 790 static inline void bnx2x_iov_free_mem(struct bnx2x *bp) {} 791 static inline int bnx2x_iov_chip_cleanup(struct bnx2x *bp) {return 0; } 792 static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {} 793 static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 794 int num_vfs_param) {return 0; } 795 static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {} 796 static inline int bnx2x_enable_sriov(struct bnx2x *bp) {return 0; } 797 static inline void bnx2x_disable_sriov(struct bnx2x *bp) {} 798 static inline int bnx2x_vfpf_acquire(struct bnx2x *bp, 799 u8 tx_count, u8 rx_count) {return 0; } 800 static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; } 801 static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; } 802 static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {} 803 static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; } 804 static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, 805 u8 vf_qid, bool set) {return 0; } 806 static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp, 807 struct bnx2x_config_rss_params *params) {return 0; } 808 static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; } 809 static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; } 810 static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; } 811 static inline int bnx2x_vf_headroom(struct bnx2x *bp) {return 0; } 812 static inline void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) {} 813 static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf, 814 size_t buf_len) {} 815 static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, 816 struct bnx2x_fastpath *fp) {return 0; } 817 static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) 818 { 819 return PFVF_BULLETIN_UNCHANGED; 820 } 821 static inline void bnx2x_timer_sriov(struct bnx2x *bp) {} 822 823 static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 824 { 825 return NULL; 826 } 827 828 static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } 829 static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} 830 static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } 831 static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {} 832 833 #endif /* CONFIG_BNX2X_SRIOV */ 834 #endif /* bnx2x_sriov.h */ 835