1 /* bnx2x_sriov.h: Broadcom Everest network driver. 2 * 3 * Copyright 2009-2013 Broadcom Corporation 4 * 5 * Unless you and Broadcom execute a separate written software license 6 * agreement governing use of this software, this software is licensed to you 7 * under the terms of the GNU General Public License version 2, available 8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL"). 9 * 10 * Notwithstanding the above, under no circumstances may you combine this 11 * software in any way with any other Broadcom software provided under a 12 * license other than the GPL, without Broadcom's express prior written 13 * consent. 14 * 15 * Maintained by: Eilon Greenstein <eilong@broadcom.com> 16 * Written by: Shmulik Ravid <shmulikr@broadcom.com> 17 * Ariel Elior <ariele@broadcom.com> 18 */ 19 #ifndef BNX2X_SRIOV_H 20 #define BNX2X_SRIOV_H 21 22 #include "bnx2x_vfpf.h" 23 #include "bnx2x.h" 24 25 enum sample_bulletin_result { 26 PFVF_BULLETIN_UNCHANGED, 27 PFVF_BULLETIN_UPDATED, 28 PFVF_BULLETIN_CRC_ERR 29 }; 30 31 #ifdef CONFIG_BNX2X_SRIOV 32 33 /* The bnx2x device structure holds vfdb structure described below. 34 * The VF array is indexed by the relative vfid. 35 */ 36 #define BNX2X_VF_MAX_QUEUES 16 37 #define BNX2X_VF_MAX_TPA_AGG_QUEUES 8 38 39 struct bnx2x_sriov { 40 u32 first_vf_in_pf; 41 42 /* standard SRIOV capability fields, mostly for debugging */ 43 int pos; /* capability position */ 44 int nres; /* number of resources */ 45 u32 cap; /* SR-IOV Capabilities */ 46 u16 ctrl; /* SR-IOV Control */ 47 u16 total; /* total VFs associated with the PF */ 48 u16 initial; /* initial VFs associated with the PF */ 49 u16 nr_virtfn; /* number of VFs available */ 50 u16 offset; /* first VF Routing ID offset */ 51 u16 stride; /* following VF stride */ 52 u32 pgsz; /* page size for BAR alignment */ 53 u8 link; /* Function Dependency Link */ 54 }; 55 56 /* bars */ 57 struct bnx2x_vf_bar { 58 u64 bar; 59 u32 size; 60 }; 61 62 struct bnx2x_vf_bar_info { 63 struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS]; 64 u8 nr_bars; 65 }; 66 67 /* vf queue (used both for rx or tx) */ 68 struct bnx2x_vf_queue { 69 struct eth_context *cxt; 70 71 /* MACs object */ 72 struct bnx2x_vlan_mac_obj mac_obj; 73 74 /* VLANs object */ 75 struct bnx2x_vlan_mac_obj vlan_obj; 76 atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */ 77 unsigned long accept_flags; /* last accept flags configured */ 78 79 /* Queue Slow-path State object */ 80 struct bnx2x_queue_sp_obj sp_obj; 81 82 u32 cid; 83 u16 index; 84 u16 sb_idx; 85 bool is_leading; 86 bool sp_initialized; 87 }; 88 89 /* struct bnx2x_vfop_qctor_params - prepare queue construction parameters: 90 * q-init, q-setup and SB index 91 */ 92 struct bnx2x_vfop_qctor_params { 93 struct bnx2x_queue_state_params qstate; 94 struct bnx2x_queue_setup_params prep_qsetup; 95 }; 96 97 /* VFOP parameters (one copy per VF) */ 98 union bnx2x_vfop_params { 99 struct bnx2x_vlan_mac_ramrod_params vlan_mac; 100 struct bnx2x_rx_mode_ramrod_params rx_mode; 101 struct bnx2x_mcast_ramrod_params mcast; 102 struct bnx2x_config_rss_params rss; 103 struct bnx2x_vfop_qctor_params qctor; 104 struct bnx2x_queue_state_params qstate; 105 }; 106 107 /* forward */ 108 struct bnx2x_virtf; 109 110 /* VFOP definitions */ 111 typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf); 112 113 struct bnx2x_vfop_cmd { 114 vfop_handler_t done; 115 bool block; 116 }; 117 118 /* VFOP queue filters command additional arguments */ 119 struct bnx2x_vfop_filter { 120 struct list_head link; 121 int type; 122 #define BNX2X_VFOP_FILTER_MAC 1 123 #define BNX2X_VFOP_FILTER_VLAN 2 124 125 bool add; 126 u8 *mac; 127 u16 vid; 128 }; 129 130 struct bnx2x_vfop_filters { 131 int add_cnt; 132 struct list_head head; 133 struct bnx2x_vfop_filter filters[]; 134 }; 135 136 /* transient list allocated, built and saved until its 137 * passed to the SP-VERBs layer. 138 */ 139 struct bnx2x_vfop_args_mcast { 140 int mc_num; 141 struct bnx2x_mcast_list_elem *mc; 142 }; 143 144 struct bnx2x_vfop_args_qctor { 145 int qid; 146 u16 sb_idx; 147 }; 148 149 struct bnx2x_vfop_args_qdtor { 150 int qid; 151 struct eth_context *cxt; 152 }; 153 154 struct bnx2x_vfop_args_defvlan { 155 int qid; 156 bool enable; 157 u16 vid; 158 u8 prio; 159 }; 160 161 struct bnx2x_vfop_args_qx { 162 int qid; 163 bool en_add; 164 }; 165 166 struct bnx2x_vfop_args_filters { 167 struct bnx2x_vfop_filters *multi_filter; 168 atomic_t *credit; /* non NULL means 'don't consume credit' */ 169 }; 170 171 struct bnx2x_vfop_args_tpa { 172 int qid; 173 dma_addr_t sge_map[PFVF_MAX_QUEUES_PER_VF]; 174 }; 175 176 union bnx2x_vfop_args { 177 struct bnx2x_vfop_args_mcast mc_list; 178 struct bnx2x_vfop_args_qctor qctor; 179 struct bnx2x_vfop_args_qdtor qdtor; 180 struct bnx2x_vfop_args_defvlan defvlan; 181 struct bnx2x_vfop_args_qx qx; 182 struct bnx2x_vfop_args_filters filters; 183 struct bnx2x_vfop_args_tpa tpa; 184 }; 185 186 struct bnx2x_vfop { 187 struct list_head link; 188 int rc; /* return code */ 189 int state; /* next state */ 190 union bnx2x_vfop_args args; /* extra arguments */ 191 union bnx2x_vfop_params *op_p; /* ramrod params */ 192 193 /* state machine callbacks */ 194 vfop_handler_t transition; 195 vfop_handler_t done; 196 }; 197 198 /* vf context */ 199 struct bnx2x_virtf { 200 u16 cfg_flags; 201 #define VF_CFG_STATS 0x0001 202 #define VF_CFG_FW_FC 0x0002 203 #define VF_CFG_TPA 0x0004 204 #define VF_CFG_INT_SIMD 0x0008 205 #define VF_CACHE_LINE 0x0010 206 #define VF_CFG_VLAN 0x0020 207 #define VF_CFG_STATS_COALESCE 0x0040 208 209 u8 state; 210 #define VF_FREE 0 /* VF ready to be acquired holds no resc */ 211 #define VF_ACQUIRED 1 /* VF acquired, but not initialized */ 212 #define VF_ENABLED 2 /* VF Enabled */ 213 #define VF_RESET 3 /* VF FLR'd, pending cleanup */ 214 215 /* non 0 during flr cleanup */ 216 u8 flr_clnup_stage; 217 #define VF_FLR_CLN 1 /* reclaim resources and do 'final cleanup' 218 * sans the end-wait 219 */ 220 #define VF_FLR_ACK 2 /* ACK flr notification */ 221 #define VF_FLR_EPILOG 3 /* wait for VF remnants to dissipate in the HW 222 * ~ final cleanup' end wait 223 */ 224 225 /* dma */ 226 dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */ 227 u16 stats_stride; 228 dma_addr_t spq_map; 229 dma_addr_t bulletin_map; 230 231 /* Allocated resources counters. Before the VF is acquired, the 232 * counters hold the following values: 233 * 234 * - xxq_count = 0 as the queues memory is not allocated yet. 235 * 236 * - sb_count = The number of status blocks configured for this VF in 237 * the IGU CAM. Initially read during probe. 238 * 239 * - xx_rules_count = The number of rules statically and equally 240 * allocated for each VF, during PF load. 241 */ 242 struct vf_pf_resc_request alloc_resc; 243 #define vf_rxq_count(vf) ((vf)->alloc_resc.num_rxqs) 244 #define vf_txq_count(vf) ((vf)->alloc_resc.num_txqs) 245 #define vf_sb_count(vf) ((vf)->alloc_resc.num_sbs) 246 #define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters) 247 #define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters) 248 #define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters) 249 250 u8 sb_count; /* actual number of SBs */ 251 u8 igu_base_id; /* base igu status block id */ 252 253 struct bnx2x_vf_queue *vfqs; 254 #define LEADING_IDX 0 255 #define bnx2x_vfq_is_leading(vfq) ((vfq)->index == LEADING_IDX) 256 #define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var) 257 #define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var) 258 259 u8 index; /* index in the vf array */ 260 u8 abs_vfid; 261 u8 sp_cl_id; 262 u32 error; /* 0 means all's-well */ 263 264 /* BDF */ 265 unsigned int bus; 266 unsigned int devfn; 267 268 /* bars */ 269 struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS]; 270 271 /* set-mac ramrod state 1-pending, 0-done */ 272 unsigned long filter_state; 273 274 /* leading rss client id ~~ the client id of the first rxq, must be 275 * set for each txq. 276 */ 277 int leading_rss; 278 279 /* MCAST object */ 280 int mcast_list_len; 281 struct bnx2x_mcast_obj mcast_obj; 282 283 /* RSS configuration object */ 284 struct bnx2x_rss_config_obj rss_conf_obj; 285 286 /* slow-path operations */ 287 atomic_t op_in_progress; 288 int op_rc; 289 bool op_wait_blocking; 290 struct list_head op_list_head; 291 union bnx2x_vfop_params op_params; 292 struct mutex op_mutex; /* one vfop at a time mutex */ 293 enum channel_tlvs op_current; 294 }; 295 296 #define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn) 297 298 #define for_each_vf(bp, var) \ 299 for ((var) = 0; (var) < BNX2X_NR_VIRTFN(bp); (var)++) 300 301 #define for_each_vfq(vf, var) \ 302 for ((var) = 0; (var) < vf_rxq_count(vf); (var)++) 303 304 #define for_each_vf_sb(vf, var) \ 305 for ((var) = 0; (var) < vf_sb_count(vf); (var)++) 306 307 #define is_vf_multi(vf) (vf_rxq_count(vf) > 1) 308 309 #define HW_VF_HANDLE(bp, abs_vfid) \ 310 (u16)(BP_ABS_FUNC((bp)) | (1<<3) | ((u16)(abs_vfid) << 4)) 311 312 #define FW_PF_MAX_HANDLE 8 313 314 #define FW_VF_HANDLE(abs_vfid) \ 315 (abs_vfid + FW_PF_MAX_HANDLE) 316 317 /* locking and unlocking the channel mutex */ 318 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 319 enum channel_tlvs tlv); 320 321 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 322 enum channel_tlvs expected_tlv); 323 324 /* VF mail box (aka vf-pf channel) */ 325 326 /* a container for the bi-directional vf<-->pf messages. 327 * The actual response will be placed according to the offset parameter 328 * provided in the request 329 */ 330 331 #define MBX_MSG_ALIGN 8 332 #define MBX_MSG_ALIGNED_SIZE (roundup(sizeof(struct bnx2x_vf_mbx_msg), \ 333 MBX_MSG_ALIGN)) 334 335 struct bnx2x_vf_mbx_msg { 336 union vfpf_tlvs req; 337 union pfvf_tlvs resp; 338 }; 339 340 struct bnx2x_vf_mbx { 341 struct bnx2x_vf_mbx_msg *msg; 342 dma_addr_t msg_mapping; 343 344 /* VF GPA address */ 345 u32 vf_addr_lo; 346 u32 vf_addr_hi; 347 348 struct vfpf_first_tlv first_tlv; /* saved VF request header */ 349 350 u8 flags; 351 #define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent 352 * more then one pending msg 353 */ 354 }; 355 356 struct bnx2x_vf_sp { 357 union { 358 struct eth_classify_rules_ramrod_data e2; 359 } mac_rdata; 360 361 union { 362 struct eth_classify_rules_ramrod_data e2; 363 } vlan_rdata; 364 365 union { 366 struct eth_filter_rules_ramrod_data e2; 367 } rx_mode_rdata; 368 369 union { 370 struct eth_multicast_rules_ramrod_data e2; 371 } mcast_rdata; 372 373 union { 374 struct client_init_ramrod_data init_data; 375 struct client_update_ramrod_data update_data; 376 } q_data; 377 378 union { 379 struct eth_rss_update_ramrod_data e2; 380 } rss_rdata; 381 }; 382 383 struct hw_dma { 384 void *addr; 385 dma_addr_t mapping; 386 size_t size; 387 }; 388 389 struct bnx2x_vfdb { 390 #define BP_VFDB(bp) ((bp)->vfdb) 391 /* vf array */ 392 struct bnx2x_virtf *vfs; 393 #define BP_VF(bp, idx) (&((bp)->vfdb->vfs[(idx)])) 394 #define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[(idx)].var) 395 396 /* queue array - for all vfs */ 397 struct bnx2x_vf_queue *vfqs; 398 399 /* vf HW contexts */ 400 struct hw_dma context[BNX2X_VF_CIDS/ILT_PAGE_CIDS]; 401 #define BP_VF_CXT_PAGE(bp, i) (&(bp)->vfdb->context[(i)]) 402 403 /* SR-IOV information */ 404 struct bnx2x_sriov sriov; 405 struct hw_dma mbx_dma; 406 #define BP_VF_MBX_DMA(bp) (&((bp)->vfdb->mbx_dma)) 407 struct bnx2x_vf_mbx mbxs[BNX2X_MAX_NUM_OF_VFS]; 408 #define BP_VF_MBX(bp, vfid) (&((bp)->vfdb->mbxs[(vfid)])) 409 410 struct hw_dma bulletin_dma; 411 #define BP_VF_BULLETIN_DMA(bp) (&((bp)->vfdb->bulletin_dma)) 412 #define BP_VF_BULLETIN(bp, vf) \ 413 (((struct pf_vf_bulletin_content *)(BP_VF_BULLETIN_DMA(bp)->addr)) \ 414 + (vf)) 415 416 struct hw_dma sp_dma; 417 #define bnx2x_vf_sp(bp, vf, field) ((bp)->vfdb->sp_dma.addr + \ 418 (vf)->index * sizeof(struct bnx2x_vf_sp) + \ 419 offsetof(struct bnx2x_vf_sp, field)) 420 #define bnx2x_vf_sp_map(bp, vf, field) ((bp)->vfdb->sp_dma.mapping + \ 421 (vf)->index * sizeof(struct bnx2x_vf_sp) + \ 422 offsetof(struct bnx2x_vf_sp, field)) 423 424 #define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32) 425 u32 flrd_vfs[FLRD_VFS_DWORDS]; 426 427 /* the number of msix vectors belonging to this PF designated for VFs */ 428 u16 vf_sbs_pool; 429 u16 first_vf_igu_entry; 430 }; 431 432 /* queue access */ 433 static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index) 434 { 435 return &(vf->vfqs[index]); 436 } 437 438 /* FW ids */ 439 static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx) 440 { 441 return vf->igu_base_id + sb_idx; 442 } 443 444 static inline u8 vf_hc_qzone(struct bnx2x_virtf *vf, u16 sb_idx) 445 { 446 return vf_igu_sb(vf, sb_idx); 447 } 448 449 static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) 450 { 451 return vf->igu_base_id + q->index; 452 } 453 454 static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) 455 { 456 if (vf->cfg_flags & VF_CFG_STATS_COALESCE) 457 return vf->leading_rss; 458 else 459 return vfq_cl_id(vf, q); 460 } 461 462 static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) 463 { 464 return vfq_cl_id(vf, q); 465 } 466 467 /* global iov routines */ 468 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line); 469 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, int num_vfs_param); 470 void bnx2x_iov_remove_one(struct bnx2x *bp); 471 void bnx2x_iov_free_mem(struct bnx2x *bp); 472 int bnx2x_iov_alloc_mem(struct bnx2x *bp); 473 int bnx2x_iov_nic_init(struct bnx2x *bp); 474 int bnx2x_iov_chip_cleanup(struct bnx2x *bp); 475 void bnx2x_iov_init_dq(struct bnx2x *bp); 476 void bnx2x_iov_init_dmae(struct bnx2x *bp); 477 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 478 struct bnx2x_queue_sp_obj **q_obj); 479 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work); 480 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem); 481 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp); 482 void bnx2x_iov_storm_stats_update(struct bnx2x *bp); 483 void bnx2x_iov_sp_task(struct bnx2x *bp); 484 /* global vf mailbox routines */ 485 void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event); 486 void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid); 487 488 /* CORE VF API */ 489 typedef u8 bnx2x_mac_addr_t[ETH_ALEN]; 490 491 /* acquire */ 492 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, 493 struct vf_pf_resc_request *resc); 494 /* init */ 495 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 496 dma_addr_t *sb_map); 497 498 /* VFOP generic helpers */ 499 #define bnx2x_vfop_default(state) do { \ 500 BNX2X_ERR("Bad state %d\n", (state)); \ 501 vfop->rc = -EINVAL; \ 502 goto op_err; \ 503 } while (0) 504 505 enum { 506 VFOP_DONE, 507 VFOP_CONT, 508 VFOP_VERIFY_PEND, 509 }; 510 511 #define bnx2x_vfop_finalize(vf, rc, next) do { \ 512 if ((rc) < 0) \ 513 goto op_err; \ 514 else if ((rc) > 0) \ 515 goto op_pending; \ 516 else if ((next) == VFOP_DONE) \ 517 goto op_done; \ 518 else if ((next) == VFOP_VERIFY_PEND) \ 519 BNX2X_ERR("expected pending\n"); \ 520 else { \ 521 DP(BNX2X_MSG_IOV, "no ramrod. Scheduling\n"); \ 522 atomic_set(&vf->op_in_progress, 1); \ 523 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \ 524 return; \ 525 } \ 526 } while (0) 527 528 #define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr) \ 529 do { \ 530 vfop->state = first_state; \ 531 vfop->op_p = &vf->op_params; \ 532 vfop->transition = trans_hndlr; \ 533 vfop->done = done_hndlr; \ 534 } while (0) 535 536 static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp, 537 struct bnx2x_virtf *vf) 538 { 539 WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!"); 540 WARN_ON(list_empty(&vf->op_list_head)); 541 return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link); 542 } 543 544 static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp, 545 struct bnx2x_virtf *vf) 546 { 547 struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL); 548 549 WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!"); 550 if (vfop) { 551 INIT_LIST_HEAD(&vfop->link); 552 list_add(&vfop->link, &vf->op_list_head); 553 } 554 return vfop; 555 } 556 557 static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf, 558 struct bnx2x_vfop *vfop) 559 { 560 /* rc < 0 - error, otherwise set to 0 */ 561 DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc); 562 if (vfop->rc >= 0) 563 vfop->rc = 0; 564 DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc); 565 566 /* unlink the current op context and propagate error code 567 * must be done before invoking the 'done()' handler 568 */ 569 WARN(!mutex_is_locked(&vf->op_mutex), 570 "about to access vf op linked list but mutex was not locked!"); 571 list_del(&vfop->link); 572 573 if (list_empty(&vf->op_list_head)) { 574 DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc); 575 vf->op_rc = vfop->rc; 576 DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n", 577 vf->op_rc, vfop->rc); 578 } else { 579 struct bnx2x_vfop *cur_vfop; 580 581 DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc); 582 cur_vfop = bnx2x_vfop_cur(bp, vf); 583 cur_vfop->rc = vfop->rc; 584 DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n", 585 vf->op_rc, vfop->rc); 586 } 587 588 /* invoke done handler */ 589 if (vfop->done) { 590 DP(BNX2X_MSG_IOV, "calling done handler\n"); 591 vfop->done(bp, vf); 592 } else { 593 /* there is no done handler for the operation to unlock 594 * the mutex. Must have gotten here from PF initiated VF RELEASE 595 */ 596 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF); 597 } 598 599 DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n", 600 vf->op_rc, vfop->rc); 601 602 /* if this is the last nested op reset the wait_blocking flag 603 * to release any blocking wrappers, only after 'done()' is invoked 604 */ 605 if (list_empty(&vf->op_list_head)) { 606 DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc); 607 vf->op_wait_blocking = false; 608 } 609 610 kfree(vfop); 611 } 612 613 static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp, 614 struct bnx2x_virtf *vf) 615 { 616 /* can take a while if any port is running */ 617 int cnt = 5000; 618 619 might_sleep(); 620 while (cnt--) { 621 if (vf->op_wait_blocking == false) { 622 #ifdef BNX2X_STOP_ON_ERROR 623 DP(BNX2X_MSG_IOV, "exit (cnt %d)\n", 5000 - cnt); 624 #endif 625 return 0; 626 } 627 usleep_range(1000, 2000); 628 629 if (bp->panic) 630 return -EIO; 631 } 632 633 /* timeout! */ 634 #ifdef BNX2X_STOP_ON_ERROR 635 bnx2x_panic(); 636 #endif 637 638 return -EBUSY; 639 } 640 641 static inline int bnx2x_vfop_transition(struct bnx2x *bp, 642 struct bnx2x_virtf *vf, 643 vfop_handler_t transition, 644 bool block) 645 { 646 if (block) 647 vf->op_wait_blocking = true; 648 transition(bp, vf); 649 if (block) 650 return bnx2x_vfop_wait_blocking(bp, vf); 651 return 0; 652 } 653 654 /* VFOP queue construction helpers */ 655 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 656 struct bnx2x_queue_init_params *init_params, 657 struct bnx2x_queue_setup_params *setup_params, 658 u16 q_idx, u16 sb_idx); 659 660 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf, 661 struct bnx2x_queue_init_params *init_params, 662 struct bnx2x_queue_setup_params *setup_params, 663 u16 q_idx, u16 sb_idx); 664 665 void bnx2x_vfop_qctor_prep(struct bnx2x *bp, 666 struct bnx2x_virtf *vf, 667 struct bnx2x_vf_queue *q, 668 struct bnx2x_vfop_qctor_params *p, 669 unsigned long q_type); 670 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp, 671 struct bnx2x_virtf *vf, 672 struct bnx2x_vfop_cmd *cmd, 673 struct bnx2x_vfop_filters *macs, 674 int qid, bool drv_only); 675 676 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp, 677 struct bnx2x_virtf *vf, 678 struct bnx2x_vfop_cmd *cmd, 679 struct bnx2x_vfop_filters *vlans, 680 int qid, bool drv_only); 681 682 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp, 683 struct bnx2x_virtf *vf, 684 struct bnx2x_vfop_cmd *cmd, 685 int qid); 686 687 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp, 688 struct bnx2x_virtf *vf, 689 struct bnx2x_vfop_cmd *cmd, 690 int qid); 691 692 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp, 693 struct bnx2x_virtf *vf, 694 struct bnx2x_vfop_cmd *cmd, 695 bnx2x_mac_addr_t *mcasts, 696 int mcast_num, bool drv_only); 697 698 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp, 699 struct bnx2x_virtf *vf, 700 struct bnx2x_vfop_cmd *cmd, 701 int qid, unsigned long accept_flags); 702 703 int bnx2x_vfop_close_cmd(struct bnx2x *bp, 704 struct bnx2x_virtf *vf, 705 struct bnx2x_vfop_cmd *cmd); 706 707 int bnx2x_vfop_release_cmd(struct bnx2x *bp, 708 struct bnx2x_virtf *vf, 709 struct bnx2x_vfop_cmd *cmd); 710 711 int bnx2x_vfop_rss_cmd(struct bnx2x *bp, 712 struct bnx2x_virtf *vf, 713 struct bnx2x_vfop_cmd *cmd); 714 715 int bnx2x_vfop_tpa_cmd(struct bnx2x *bp, 716 struct bnx2x_virtf *vf, 717 struct bnx2x_vfop_cmd *cmd, 718 struct vfpf_tpa_tlv *tpa_tlv); 719 720 /* VF release ~ VF close + VF release-resources 721 * 722 * Release is the ultimate SW shutdown and is called whenever an 723 * irrecoverable error is encountered. 724 */ 725 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block); 726 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid); 727 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf); 728 729 /* FLR routines */ 730 731 /* VF FLR helpers */ 732 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid); 733 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid); 734 735 /* Handles an FLR (or VF_DISABLE) notification form the MCP */ 736 void bnx2x_vf_handle_flr_event(struct bnx2x *bp); 737 738 bool bnx2x_tlv_supported(u16 tlvtype); 739 740 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp, 741 struct pf_vf_bulletin_content *bulletin); 742 int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf); 743 744 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); 745 746 /* VF side vfpf channel functions */ 747 int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count); 748 int bnx2x_vfpf_release(struct bnx2x *bp); 749 int bnx2x_vfpf_release(struct bnx2x *bp); 750 int bnx2x_vfpf_init(struct bnx2x *bp); 751 void bnx2x_vfpf_close_vf(struct bnx2x *bp); 752 int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, 753 bool is_leading); 754 int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set); 755 int bnx2x_vfpf_config_rss(struct bnx2x *bp, 756 struct bnx2x_config_rss_params *params); 757 int bnx2x_vfpf_set_mcast(struct net_device *dev); 758 int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp); 759 760 static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf, 761 size_t buf_len) 762 { 763 strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len); 764 } 765 766 static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, 767 struct bnx2x_fastpath *fp) 768 { 769 return PXP_VF_ADDR_USDM_QUEUES_START + 770 bp->acquire_resp.resc.hw_qid[fp->index] * 771 sizeof(struct ustorm_queue_zone_data); 772 } 773 774 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); 775 void bnx2x_timer_sriov(struct bnx2x *bp); 776 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp); 777 int bnx2x_vf_pci_alloc(struct bnx2x *bp); 778 int bnx2x_enable_sriov(struct bnx2x *bp); 779 void bnx2x_disable_sriov(struct bnx2x *bp); 780 static inline int bnx2x_vf_headroom(struct bnx2x *bp) 781 { 782 return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF; 783 } 784 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp); 785 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); 786 void bnx2x_iov_channel_down(struct bnx2x *bp); 787 788 #else /* CONFIG_BNX2X_SRIOV */ 789 790 static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid, 791 struct bnx2x_queue_sp_obj **q_obj) {} 792 static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, 793 bool queue_work) {} 794 static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {} 795 static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp, 796 union event_ring_elem *elem) {return 1; } 797 static inline void bnx2x_iov_sp_task(struct bnx2x *bp) {} 798 static inline void bnx2x_vf_mbx(struct bnx2x *bp, 799 struct vf_pf_event_data *vfpf_event) {} 800 static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; } 801 static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {} 802 static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; } 803 static inline void bnx2x_iov_free_mem(struct bnx2x *bp) {} 804 static inline int bnx2x_iov_chip_cleanup(struct bnx2x *bp) {return 0; } 805 static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {} 806 static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 807 int num_vfs_param) {return 0; } 808 static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {} 809 static inline int bnx2x_enable_sriov(struct bnx2x *bp) {return 0; } 810 static inline void bnx2x_disable_sriov(struct bnx2x *bp) {} 811 static inline int bnx2x_vfpf_acquire(struct bnx2x *bp, 812 u8 tx_count, u8 rx_count) {return 0; } 813 static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; } 814 static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; } 815 static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {} 816 static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; } 817 static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, 818 u8 vf_qid, bool set) {return 0; } 819 static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp, 820 struct bnx2x_config_rss_params *params) {return 0; } 821 static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; } 822 static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; } 823 static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; } 824 static inline int bnx2x_vf_headroom(struct bnx2x *bp) {return 0; } 825 static inline void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) {} 826 static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf, 827 size_t buf_len) {} 828 static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, 829 struct bnx2x_fastpath *fp) {return 0; } 830 static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp) 831 { 832 return PFVF_BULLETIN_UNCHANGED; 833 } 834 static inline void bnx2x_timer_sriov(struct bnx2x *bp) {} 835 836 static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 837 { 838 return NULL; 839 } 840 841 static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } 842 static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} 843 static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } 844 static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {} 845 846 #endif /* CONFIG_BNX2X_SRIOV */ 847 #endif /* bnx2x_sriov.h */ 848