1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #ifndef _QED_VF_H
10 #define _QED_VF_H
11 
12 #include "qed_l2.h"
13 #include "qed_mcp.h"
14 
15 #define T_ETH_INDIRECTION_TABLE_SIZE 128
16 #define T_ETH_RSS_KEY_SIZE 10
17 
18 struct vf_pf_resc_request {
19 	u8 num_rxqs;
20 	u8 num_txqs;
21 	u8 num_sbs;
22 	u8 num_mac_filters;
23 	u8 num_vlan_filters;
24 	u8 num_mc_filters;
25 	u16 padding;
26 };
27 
28 struct hw_sb_info {
29 	u16 hw_sb_id;
30 	u8 sb_qid;
31 	u8 padding[5];
32 };
33 
34 #define TLV_BUFFER_SIZE                 1024
35 
36 enum {
37 	PFVF_STATUS_WAITING,
38 	PFVF_STATUS_SUCCESS,
39 	PFVF_STATUS_FAILURE,
40 	PFVF_STATUS_NOT_SUPPORTED,
41 	PFVF_STATUS_NO_RESOURCE,
42 	PFVF_STATUS_FORCED,
43 	PFVF_STATUS_MALICIOUS,
44 };
45 
46 /* vf pf channel tlvs */
47 /* general tlv header (used for both vf->pf request and pf->vf response) */
48 struct channel_tlv {
49 	u16 type;
50 	u16 length;
51 };
52 
53 /* header of first vf->pf tlv carries the offset used to calculate reponse
54  * buffer address
55  */
56 struct vfpf_first_tlv {
57 	struct channel_tlv tl;
58 	u32 padding;
59 	u64 reply_address;
60 };
61 
62 /* header of pf->vf tlvs, carries the status of handling the request */
63 struct pfvf_tlv {
64 	struct channel_tlv tl;
65 	u8 status;
66 	u8 padding[3];
67 };
68 
69 /* response tlv used for most tlvs */
70 struct pfvf_def_resp_tlv {
71 	struct pfvf_tlv hdr;
72 };
73 
74 /* used to terminate and pad a tlv list */
75 struct channel_list_end_tlv {
76 	struct channel_tlv tl;
77 	u8 padding[4];
78 };
79 
80 #define VFPF_ACQUIRE_OS_LINUX (0)
81 #define VFPF_ACQUIRE_OS_WINDOWS (1)
82 #define VFPF_ACQUIRE_OS_ESX (2)
83 #define VFPF_ACQUIRE_OS_SOLARIS (3)
84 #define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
85 
86 struct vfpf_acquire_tlv {
87 	struct vfpf_first_tlv first_tlv;
88 
89 	struct vf_pf_vfdev_info {
90 #define VFPF_ACQUIRE_CAP_PRE_FP_HSI     (1 << 0) /* VF pre-FP hsi version */
91 #define VFPF_ACQUIRE_CAP_100G		(1 << 1) /* VF can support 100g */
92 		u64 capabilities;
93 		u8 fw_major;
94 		u8 fw_minor;
95 		u8 fw_revision;
96 		u8 fw_engineering;
97 		u32 driver_version;
98 		u16 opaque_fid;	/* ME register value */
99 		u8 os_type;	/* VFPF_ACQUIRE_OS_* value */
100 		u8 eth_fp_hsi_major;
101 		u8 eth_fp_hsi_minor;
102 		u8 padding[3];
103 	} vfdev_info;
104 
105 	struct vf_pf_resc_request resc_request;
106 
107 	u64 bulletin_addr;
108 	u32 bulletin_size;
109 	u32 padding;
110 };
111 
112 /* receive side scaling tlv */
113 struct vfpf_vport_update_rss_tlv {
114 	struct channel_tlv tl;
115 
116 	u8 update_rss_flags;
117 #define VFPF_UPDATE_RSS_CONFIG_FLAG       BIT(0)
118 #define VFPF_UPDATE_RSS_CAPS_FLAG         BIT(1)
119 #define VFPF_UPDATE_RSS_IND_TABLE_FLAG    BIT(2)
120 #define VFPF_UPDATE_RSS_KEY_FLAG          BIT(3)
121 
122 	u8 rss_enable;
123 	u8 rss_caps;
124 	u8 rss_table_size_log;	/* The table size is 2 ^ rss_table_size_log */
125 	u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
126 	u32 rss_key[T_ETH_RSS_KEY_SIZE];
127 };
128 
129 struct pfvf_storm_stats {
130 	u32 address;
131 	u32 len;
132 };
133 
134 struct pfvf_stats_info {
135 	struct pfvf_storm_stats mstats;
136 	struct pfvf_storm_stats pstats;
137 	struct pfvf_storm_stats tstats;
138 	struct pfvf_storm_stats ustats;
139 };
140 
141 struct pfvf_acquire_resp_tlv {
142 	struct pfvf_tlv hdr;
143 
144 	struct pf_vf_pfdev_info {
145 		u32 chip_num;
146 		u32 mfw_ver;
147 
148 		u16 fw_major;
149 		u16 fw_minor;
150 		u16 fw_rev;
151 		u16 fw_eng;
152 
153 		u64 capabilities;
154 #define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED	BIT(0)
155 #define PFVF_ACQUIRE_CAP_100G			BIT(1)	/* If set, 100g PF */
156 /* There are old PF versions where the PF might mistakenly override the sanity
157  * mechanism [version-based] and allow a VF that can't be supported to pass
158  * the acquisition phase.
159  * To overcome this, PFs now indicate that they're past that point and the new
160  * VFs would fail probe on the older PFs that fail to do so.
161  */
162 #define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE	BIT(2)
163 
164 		u16 db_size;
165 		u8 indices_per_sb;
166 		u8 os_type;
167 
168 		/* These should match the PF's qed_dev values */
169 		u16 chip_rev;
170 		u8 dev_type;
171 
172 		u8 padding;
173 
174 		struct pfvf_stats_info stats_info;
175 
176 		u8 port_mac[ETH_ALEN];
177 
178 		/* It's possible PF had to configure an older fastpath HSI
179 		 * [in case VF is newer than PF]. This is communicated back
180 		 * to the VF. It can also be used in case of error due to
181 		 * non-matching versions to shed light in VF about failure.
182 		 */
183 		u8 major_fp_hsi;
184 		u8 minor_fp_hsi;
185 	} pfdev_info;
186 
187 	struct pf_vf_resc {
188 #define PFVF_MAX_QUEUES_PER_VF		16
189 #define PFVF_MAX_SBS_PER_VF		16
190 		struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
191 		u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
192 		u8 cid[PFVF_MAX_QUEUES_PER_VF];
193 
194 		u8 num_rxqs;
195 		u8 num_txqs;
196 		u8 num_sbs;
197 		u8 num_mac_filters;
198 		u8 num_vlan_filters;
199 		u8 num_mc_filters;
200 		u8 padding[2];
201 	} resc;
202 
203 	u32 bulletin_size;
204 	u32 padding;
205 };
206 
207 struct pfvf_start_queue_resp_tlv {
208 	struct pfvf_tlv hdr;
209 	u32 offset;		/* offset to consumer/producer of queue */
210 	u8 padding[4];
211 };
212 
213 /* Setup Queue */
214 struct vfpf_start_rxq_tlv {
215 	struct vfpf_first_tlv first_tlv;
216 
217 	/* physical addresses */
218 	u64 rxq_addr;
219 	u64 deprecated_sge_addr;
220 	u64 cqe_pbl_addr;
221 
222 	u16 cqe_pbl_size;
223 	u16 hw_sb;
224 	u16 rx_qid;
225 	u16 hc_rate;		/* desired interrupts per sec. */
226 
227 	u16 bd_max_bytes;
228 	u16 stat_id;
229 	u8 sb_index;
230 	u8 padding[3];
231 };
232 
233 struct vfpf_start_txq_tlv {
234 	struct vfpf_first_tlv first_tlv;
235 
236 	/* physical addresses */
237 	u64 pbl_addr;
238 	u16 pbl_size;
239 	u16 stat_id;
240 	u16 tx_qid;
241 	u16 hw_sb;
242 
243 	u32 flags;		/* VFPF_QUEUE_FLG_X flags */
244 	u16 hc_rate;		/* desired interrupts per sec. */
245 	u8 sb_index;
246 	u8 padding[3];
247 };
248 
249 /* Stop RX Queue */
250 struct vfpf_stop_rxqs_tlv {
251 	struct vfpf_first_tlv first_tlv;
252 
253 	u16 rx_qid;
254 	u8 num_rxqs;
255 	u8 cqe_completion;
256 	u8 padding[4];
257 };
258 
259 /* Stop TX Queues */
260 struct vfpf_stop_txqs_tlv {
261 	struct vfpf_first_tlv first_tlv;
262 
263 	u16 tx_qid;
264 	u8 num_txqs;
265 	u8 padding[5];
266 };
267 
268 struct vfpf_update_rxq_tlv {
269 	struct vfpf_first_tlv first_tlv;
270 
271 	u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
272 
273 	u16 rx_qid;
274 	u8 num_rxqs;
275 	u8 flags;
276 #define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG    BIT(0)
277 #define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG          BIT(1)
278 #define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG        BIT(2)
279 
280 	u8 padding[4];
281 };
282 
283 /* Set Queue Filters */
284 struct vfpf_q_mac_vlan_filter {
285 	u32 flags;
286 #define VFPF_Q_FILTER_DEST_MAC_VALID    0x01
287 #define VFPF_Q_FILTER_VLAN_TAG_VALID    0x02
288 #define VFPF_Q_FILTER_SET_MAC           0x100	/* set/clear */
289 
290 	u8 mac[ETH_ALEN];
291 	u16 vlan_tag;
292 
293 	u8 padding[4];
294 };
295 
296 /* Start a vport */
297 struct vfpf_vport_start_tlv {
298 	struct vfpf_first_tlv first_tlv;
299 
300 	u64 sb_addr[PFVF_MAX_SBS_PER_VF];
301 
302 	u32 tpa_mode;
303 	u16 dep1;
304 	u16 mtu;
305 
306 	u8 vport_id;
307 	u8 inner_vlan_removal;
308 
309 	u8 only_untagged;
310 	u8 max_buffers_per_cqe;
311 
312 	u8 padding[4];
313 };
314 
315 /* Extended tlvs - need to add rss, mcast, accept mode tlvs */
316 struct vfpf_vport_update_activate_tlv {
317 	struct channel_tlv tl;
318 	u8 update_rx;
319 	u8 update_tx;
320 	u8 active_rx;
321 	u8 active_tx;
322 };
323 
324 struct vfpf_vport_update_tx_switch_tlv {
325 	struct channel_tlv tl;
326 	u8 tx_switching;
327 	u8 padding[3];
328 };
329 
330 struct vfpf_vport_update_vlan_strip_tlv {
331 	struct channel_tlv tl;
332 	u8 remove_vlan;
333 	u8 padding[3];
334 };
335 
336 struct vfpf_vport_update_mcast_bin_tlv {
337 	struct channel_tlv tl;
338 	u8 padding[4];
339 
340 	u64 bins[8];
341 };
342 
343 struct vfpf_vport_update_accept_param_tlv {
344 	struct channel_tlv tl;
345 	u8 update_rx_mode;
346 	u8 update_tx_mode;
347 	u8 rx_accept_filter;
348 	u8 tx_accept_filter;
349 };
350 
351 struct vfpf_vport_update_accept_any_vlan_tlv {
352 	struct channel_tlv tl;
353 	u8 update_accept_any_vlan_flg;
354 	u8 accept_any_vlan;
355 
356 	u8 padding[2];
357 };
358 
359 struct vfpf_vport_update_sge_tpa_tlv {
360 	struct channel_tlv tl;
361 
362 	u16 sge_tpa_flags;
363 #define VFPF_TPA_IPV4_EN_FLAG		BIT(0)
364 #define VFPF_TPA_IPV6_EN_FLAG		BIT(1)
365 #define VFPF_TPA_PKT_SPLIT_FLAG		BIT(2)
366 #define VFPF_TPA_HDR_DATA_SPLIT_FLAG	BIT(3)
367 #define VFPF_TPA_GRO_CONSIST_FLAG	BIT(4)
368 
369 	u8 update_sge_tpa_flags;
370 #define VFPF_UPDATE_SGE_DEPRECATED_FLAG	BIT(0)
371 #define VFPF_UPDATE_TPA_EN_FLAG		BIT(1)
372 #define VFPF_UPDATE_TPA_PARAM_FLAG	BIT(2)
373 
374 	u8 max_buffers_per_cqe;
375 
376 	u16 deprecated_sge_buff_size;
377 	u16 tpa_max_size;
378 	u16 tpa_min_size_to_start;
379 	u16 tpa_min_size_to_cont;
380 
381 	u8 tpa_max_aggs_num;
382 	u8 padding[7];
383 };
384 
385 /* Primary tlv as a header for various extended tlvs for
386  * various functionalities in vport update ramrod.
387  */
388 struct vfpf_vport_update_tlv {
389 	struct vfpf_first_tlv first_tlv;
390 };
391 
392 struct vfpf_ucast_filter_tlv {
393 	struct vfpf_first_tlv first_tlv;
394 
395 	u8 opcode;
396 	u8 type;
397 
398 	u8 mac[ETH_ALEN];
399 
400 	u16 vlan;
401 	u16 padding[3];
402 };
403 
404 struct tlv_buffer_size {
405 	u8 tlv_buffer[TLV_BUFFER_SIZE];
406 };
407 
408 union vfpf_tlvs {
409 	struct vfpf_first_tlv first_tlv;
410 	struct vfpf_acquire_tlv acquire;
411 	struct vfpf_start_rxq_tlv start_rxq;
412 	struct vfpf_start_txq_tlv start_txq;
413 	struct vfpf_stop_rxqs_tlv stop_rxqs;
414 	struct vfpf_stop_txqs_tlv stop_txqs;
415 	struct vfpf_update_rxq_tlv update_rxq;
416 	struct vfpf_vport_start_tlv start_vport;
417 	struct vfpf_vport_update_tlv vport_update;
418 	struct vfpf_ucast_filter_tlv ucast_filter;
419 	struct channel_list_end_tlv list_end;
420 	struct tlv_buffer_size tlv_buf_size;
421 };
422 
423 union pfvf_tlvs {
424 	struct pfvf_def_resp_tlv default_resp;
425 	struct pfvf_acquire_resp_tlv acquire_resp;
426 	struct tlv_buffer_size tlv_buf_size;
427 	struct pfvf_start_queue_resp_tlv queue_start;
428 };
429 
430 enum qed_bulletin_bit {
431 	/* Alert the VF that a forced MAC was set by the PF */
432 	MAC_ADDR_FORCED = 0,
433 	/* Alert the VF that a forced VLAN was set by the PF */
434 	VLAN_ADDR_FORCED = 2,
435 
436 	/* Indicate that `default_only_untagged' contains actual data */
437 	VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
438 	VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
439 
440 	/* Alert the VF that suggested mac was sent by the PF.
441 	 * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set.
442 	 */
443 	VFPF_BULLETIN_MAC_ADDR = 5
444 };
445 
446 struct qed_bulletin_content {
447 	/* crc of structure to ensure is not in mid-update */
448 	u32 crc;
449 
450 	u32 version;
451 
452 	/* bitmap indicating which fields hold valid values */
453 	u64 valid_bitmap;
454 
455 	/* used for MAC_ADDR or MAC_ADDR_FORCED */
456 	u8 mac[ETH_ALEN];
457 
458 	/* If valid, 1 => only untagged Rx if no vlan is configured */
459 	u8 default_only_untagged;
460 	u8 padding;
461 
462 	/* The following is a 'copy' of qed_mcp_link_state,
463 	 * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's
464 	 * possible the structs will increase further along the road we cannot
465 	 * have it here; Instead we need to have all of its fields.
466 	 */
467 	u8 req_autoneg;
468 	u8 req_autoneg_pause;
469 	u8 req_forced_rx;
470 	u8 req_forced_tx;
471 	u8 padding2[4];
472 
473 	u32 req_adv_speed;
474 	u32 req_forced_speed;
475 	u32 req_loopback;
476 	u32 padding3;
477 
478 	u8 link_up;
479 	u8 full_duplex;
480 	u8 autoneg;
481 	u8 autoneg_complete;
482 	u8 parallel_detection;
483 	u8 pfc_enabled;
484 	u8 partner_tx_flow_ctrl_en;
485 	u8 partner_rx_flow_ctrl_en;
486 	u8 partner_adv_pause;
487 	u8 sfp_tx_fault;
488 	u8 padding4[6];
489 
490 	u32 speed;
491 	u32 partner_adv_speed;
492 
493 	u32 capability_speed;
494 
495 	/* Forced vlan */
496 	u16 pvid;
497 	u16 padding5;
498 };
499 
500 struct qed_bulletin {
501 	dma_addr_t phys;
502 	struct qed_bulletin_content *p_virt;
503 	u32 size;
504 };
505 
506 enum {
507 	CHANNEL_TLV_NONE,	/* ends tlv sequence */
508 	CHANNEL_TLV_ACQUIRE,
509 	CHANNEL_TLV_VPORT_START,
510 	CHANNEL_TLV_VPORT_UPDATE,
511 	CHANNEL_TLV_VPORT_TEARDOWN,
512 	CHANNEL_TLV_START_RXQ,
513 	CHANNEL_TLV_START_TXQ,
514 	CHANNEL_TLV_STOP_RXQS,
515 	CHANNEL_TLV_STOP_TXQS,
516 	CHANNEL_TLV_UPDATE_RXQ,
517 	CHANNEL_TLV_INT_CLEANUP,
518 	CHANNEL_TLV_CLOSE,
519 	CHANNEL_TLV_RELEASE,
520 	CHANNEL_TLV_LIST_END,
521 	CHANNEL_TLV_UCAST_FILTER,
522 	CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
523 	CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
524 	CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
525 	CHANNEL_TLV_VPORT_UPDATE_MCAST,
526 	CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
527 	CHANNEL_TLV_VPORT_UPDATE_RSS,
528 	CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
529 	CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
530 	CHANNEL_TLV_MAX,
531 
532 	/* Required for iterating over vport-update tlvs.
533 	 * Will break in case non-sequential vport-update tlvs.
534 	 */
535 	CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
536 };
537 
538 /* This data is held in the qed_hwfn structure for VFs only. */
539 struct qed_vf_iov {
540 	union vfpf_tlvs *vf2pf_request;
541 	dma_addr_t vf2pf_request_phys;
542 	union pfvf_tlvs *pf2vf_reply;
543 	dma_addr_t pf2vf_reply_phys;
544 
545 	/* Should be taken whenever the mailbox buffers are accessed */
546 	struct mutex mutex;
547 	u8 *offset;
548 
549 	/* Bulletin Board */
550 	struct qed_bulletin bulletin;
551 	struct qed_bulletin_content bulletin_shadow;
552 
553 	/* we set aside a copy of the acquire response */
554 	struct pfvf_acquire_resp_tlv acquire_resp;
555 
556 	/* In case PF originates prior to the fp-hsi version comparison,
557 	 * this has to be propagated as it affects the fastpath.
558 	 */
559 	bool b_pre_fp_hsi;
560 };
561 
562 #ifdef CONFIG_QED_SRIOV
563 /**
564  * @brief Read the VF bulletin and act on it if needed
565  *
566  * @param p_hwfn
567  * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
568  *
569  * @return enum _qed_status
570  */
571 int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
572 
573 /**
574  * @brief Get link paramters for VF from qed
575  *
576  * @param p_hwfn
577  * @param params - the link params structure to be filled for the VF
578  */
579 void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
580 			    struct qed_mcp_link_params *params);
581 
582 /**
583  * @brief Get link state for VF from qed
584  *
585  * @param p_hwfn
586  * @param link - the link state structure to be filled for the VF
587  */
588 void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
589 			   struct qed_mcp_link_state *link);
590 
591 /**
592  * @brief Get link capabilities for VF from qed
593  *
594  * @param p_hwfn
595  * @param p_link_caps - the link capabilities structure to be filled for the VF
596  */
597 void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
598 			  struct qed_mcp_link_capabilities *p_link_caps);
599 
600 /**
601  * @brief Get number of Rx queues allocated for VF by qed
602  *
603  *  @param p_hwfn
604  *  @param num_rxqs - allocated RX queues
605  */
606 void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
607 
608 /**
609  * @brief Get port mac address for VF
610  *
611  * @param p_hwfn
612  * @param port_mac - destination location for port mac
613  */
614 void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
615 
616 /**
617  * @brief Get number of VLAN filters allocated for VF by qed
618  *
619  *  @param p_hwfn
620  *  @param num_rxqs - allocated VLAN filters
621  */
622 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
623 				 u8 *num_vlan_filters);
624 
625 /**
626  * @brief Check if VF can set a MAC address
627  *
628  * @param p_hwfn
629  * @param mac
630  *
631  * @return bool
632  */
633 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
634 
635 /**
636  * @brief Set firmware version information in dev_info from VFs acquire response tlv
637  *
638  * @param p_hwfn
639  * @param fw_major
640  * @param fw_minor
641  * @param fw_rev
642  * @param fw_eng
643  */
644 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
645 			   u16 *fw_major, u16 *fw_minor,
646 			   u16 *fw_rev, u16 *fw_eng);
647 
648 /**
649  * @brief hw preparation for VF
650  *      sends ACQUIRE message
651  *
652  * @param p_hwfn
653  *
654  * @return int
655  */
656 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
657 
658 /**
659  * @brief VF - start the RX Queue by sending a message to the PF
660  * @param p_hwfn
661  * @param cid                   - zero based within the VF
662  * @param rx_queue_id           - zero based within the VF
663  * @param sb                    - VF status block for this queue
664  * @param sb_index              - Index within the status block
665  * @param bd_max_bytes          - maximum number of bytes per bd
666  * @param bd_chain_phys_addr    - physical address of bd chain
667  * @param cqe_pbl_addr          - physical address of pbl
668  * @param cqe_pbl_size          - pbl size
669  * @param pp_prod               - pointer to the producer to be
670  *				  used in fastpath
671  *
672  * @return int
673  */
674 int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
675 			u8 rx_queue_id,
676 			u16 sb,
677 			u8 sb_index,
678 			u16 bd_max_bytes,
679 			dma_addr_t bd_chain_phys_addr,
680 			dma_addr_t cqe_pbl_addr,
681 			u16 cqe_pbl_size, void __iomem **pp_prod);
682 
683 /**
684  * @brief VF - start the TX queue by sending a message to the
685  *        PF.
686  *
687  * @param p_hwfn
688  * @param tx_queue_id           - zero based within the VF
689  * @param sb                    - status block for this queue
690  * @param sb_index              - index within the status block
691  * @param bd_chain_phys_addr    - physical address of tx chain
692  * @param pp_doorbell           - pointer to address to which to
693  *                      write the doorbell too..
694  *
695  * @return int
696  */
697 int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
698 			u16 tx_queue_id,
699 			u16 sb,
700 			u8 sb_index,
701 			dma_addr_t pbl_addr,
702 			u16 pbl_size, void __iomem **pp_doorbell);
703 
704 /**
705  * @brief VF - stop the RX queue by sending a message to the PF
706  *
707  * @param p_hwfn
708  * @param rx_qid
709  * @param cqe_completion
710  *
711  * @return int
712  */
713 int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
714 		       u16 rx_qid, bool cqe_completion);
715 
716 /**
717  * @brief VF - stop the TX queue by sending a message to the PF
718  *
719  * @param p_hwfn
720  * @param tx_qid
721  *
722  * @return int
723  */
724 int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid);
725 
726 /**
727  * @brief VF - send a vport update command
728  *
729  * @param p_hwfn
730  * @param params
731  *
732  * @return int
733  */
734 int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
735 			   struct qed_sp_vport_update_params *p_params);
736 
737 /**
738  *
739  * @brief VF - send a close message to PF
740  *
741  * @param p_hwfn
742  *
743  * @return enum _qed_status
744  */
745 int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
746 
747 /**
748  * @brief VF - free vf`s memories
749  *
750  * @param p_hwfn
751  *
752  * @return enum _qed_status
753  */
754 int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
755 
756 /**
757  * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
758  *        sb_id. For VFs igu sbs don't have to be contiguous
759  *
760  * @param p_hwfn
761  * @param sb_id
762  *
763  * @return INLINE u16
764  */
765 u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
766 
767 /**
768  * @brief qed_vf_pf_vport_start - perform vport start for VF.
769  *
770  * @param p_hwfn
771  * @param vport_id
772  * @param mtu
773  * @param inner_vlan_removal
774  * @param tpa_mode
775  * @param max_buffers_per_cqe,
776  * @param only_untagged - default behavior regarding vlan acceptance
777  *
778  * @return enum _qed_status
779  */
780 int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
781 			  u8 vport_id,
782 			  u16 mtu,
783 			  u8 inner_vlan_removal,
784 			  enum qed_tpa_mode tpa_mode,
785 			  u8 max_buffers_per_cqe, u8 only_untagged);
786 
787 /**
788  * @brief qed_vf_pf_vport_stop - stop the VF's vport
789  *
790  * @param p_hwfn
791  *
792  * @return enum _qed_status
793  */
794 int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
795 
796 int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
797 			   struct qed_filter_ucast *p_param);
798 
799 void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
800 			    struct qed_filter_mcast *p_filter_cmd);
801 
802 /**
803  * @brief qed_vf_pf_int_cleanup - clean the SB of the VF
804  *
805  * @param p_hwfn
806  *
807  * @return enum _qed_status
808  */
809 int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
810 
811 /**
812  * @brief - return the link params in a given bulletin board
813  *
814  * @param p_hwfn
815  * @param p_params - pointer to a struct to fill with link params
816  * @param p_bulletin
817  */
818 void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
819 			      struct qed_mcp_link_params *p_params,
820 			      struct qed_bulletin_content *p_bulletin);
821 
822 /**
823  * @brief - return the link state in a given bulletin board
824  *
825  * @param p_hwfn
826  * @param p_link - pointer to a struct to fill with link state
827  * @param p_bulletin
828  */
829 void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
830 			     struct qed_mcp_link_state *p_link,
831 			     struct qed_bulletin_content *p_bulletin);
832 
833 /**
834  * @brief - return the link capabilities in a given bulletin board
835  *
836  * @param p_hwfn
837  * @param p_link - pointer to a struct to fill with link capabilities
838  * @param p_bulletin
839  */
840 void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
841 			    struct qed_mcp_link_capabilities *p_link_caps,
842 			    struct qed_bulletin_content *p_bulletin);
843 
844 void qed_iov_vf_task(struct work_struct *work);
845 #else
846 static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
847 					  struct qed_mcp_link_params *params)
848 {
849 }
850 
851 static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
852 					 struct qed_mcp_link_state *link)
853 {
854 }
855 
856 static inline void
857 qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
858 		     struct qed_mcp_link_capabilities *p_link_caps)
859 {
860 }
861 
862 static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
863 {
864 }
865 
866 static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
867 {
868 }
869 
870 static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
871 					       u8 *num_vlan_filters)
872 {
873 }
874 
875 static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
876 {
877 	return false;
878 }
879 
880 static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
881 					 u16 *fw_major, u16 *fw_minor,
882 					 u16 *fw_rev, u16 *fw_eng)
883 {
884 }
885 
886 static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
887 {
888 	return -EINVAL;
889 }
890 
891 static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
892 				      u8 rx_queue_id,
893 				      u16 sb,
894 				      u8 sb_index,
895 				      u16 bd_max_bytes,
896 				      dma_addr_t bd_chain_phys_adr,
897 				      dma_addr_t cqe_pbl_addr,
898 				      u16 cqe_pbl_size, void __iomem **pp_prod)
899 {
900 	return -EINVAL;
901 }
902 
903 static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
904 				      u16 tx_queue_id,
905 				      u16 sb,
906 				      u8 sb_index,
907 				      dma_addr_t pbl_addr,
908 				      u16 pbl_size, void __iomem **pp_doorbell)
909 {
910 	return -EINVAL;
911 }
912 
913 static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
914 				     u16 rx_qid, bool cqe_completion)
915 {
916 	return -EINVAL;
917 }
918 
919 static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
920 {
921 	return -EINVAL;
922 }
923 
924 static inline int
925 qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
926 		       struct qed_sp_vport_update_params *p_params)
927 {
928 	return -EINVAL;
929 }
930 
931 static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
932 {
933 	return -EINVAL;
934 }
935 
936 static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
937 {
938 	return -EINVAL;
939 }
940 
941 static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
942 {
943 	return 0;
944 }
945 
946 static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
947 					u8 vport_id,
948 					u16 mtu,
949 					u8 inner_vlan_removal,
950 					enum qed_tpa_mode tpa_mode,
951 					u8 max_buffers_per_cqe,
952 					u8 only_untagged)
953 {
954 	return -EINVAL;
955 }
956 
957 static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
958 {
959 	return -EINVAL;
960 }
961 
962 static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
963 					 struct qed_filter_ucast *p_param)
964 {
965 	return -EINVAL;
966 }
967 
968 static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
969 					  struct qed_filter_mcast *p_filter_cmd)
970 {
971 }
972 
973 static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
974 {
975 	return -EINVAL;
976 }
977 
978 static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
979 					    struct qed_mcp_link_params
980 					    *p_params,
981 					    struct qed_bulletin_content
982 					    *p_bulletin)
983 {
984 }
985 
986 static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
987 					   struct qed_mcp_link_state *p_link,
988 					   struct qed_bulletin_content
989 					   *p_bulletin)
990 {
991 }
992 
993 static inline void
994 __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
995 		       struct qed_mcp_link_capabilities *p_link_caps,
996 		       struct qed_bulletin_content *p_bulletin)
997 {
998 }
999 
1000 static inline void qed_iov_vf_task(struct work_struct *work)
1001 {
1002 }
1003 #endif
1004 
1005 #endif
1006