1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #ifndef _QED_VF_H
10 #define _QED_VF_H
11 
12 #include "qed_l2.h"
13 #include "qed_mcp.h"
14 
15 #define T_ETH_INDIRECTION_TABLE_SIZE 128
16 #define T_ETH_RSS_KEY_SIZE 10
17 
18 struct vf_pf_resc_request {
19 	u8 num_rxqs;
20 	u8 num_txqs;
21 	u8 num_sbs;
22 	u8 num_mac_filters;
23 	u8 num_vlan_filters;
24 	u8 num_mc_filters;
25 	u16 padding;
26 };
27 
28 struct hw_sb_info {
29 	u16 hw_sb_id;
30 	u8 sb_qid;
31 	u8 padding[5];
32 };
33 
34 #define TLV_BUFFER_SIZE                 1024
35 
36 enum {
37 	PFVF_STATUS_WAITING,
38 	PFVF_STATUS_SUCCESS,
39 	PFVF_STATUS_FAILURE,
40 	PFVF_STATUS_NOT_SUPPORTED,
41 	PFVF_STATUS_NO_RESOURCE,
42 	PFVF_STATUS_FORCED,
43 };
44 
45 /* vf pf channel tlvs */
46 /* general tlv header (used for both vf->pf request and pf->vf response) */
47 struct channel_tlv {
48 	u16 type;
49 	u16 length;
50 };
51 
52 /* header of first vf->pf tlv carries the offset used to calculate reponse
53  * buffer address
54  */
55 struct vfpf_first_tlv {
56 	struct channel_tlv tl;
57 	u32 padding;
58 	u64 reply_address;
59 };
60 
61 /* header of pf->vf tlvs, carries the status of handling the request */
62 struct pfvf_tlv {
63 	struct channel_tlv tl;
64 	u8 status;
65 	u8 padding[3];
66 };
67 
68 /* response tlv used for most tlvs */
69 struct pfvf_def_resp_tlv {
70 	struct pfvf_tlv hdr;
71 };
72 
73 /* used to terminate and pad a tlv list */
74 struct channel_list_end_tlv {
75 	struct channel_tlv tl;
76 	u8 padding[4];
77 };
78 
79 #define VFPF_ACQUIRE_OS_LINUX (0)
80 #define VFPF_ACQUIRE_OS_WINDOWS (1)
81 #define VFPF_ACQUIRE_OS_ESX (2)
82 #define VFPF_ACQUIRE_OS_SOLARIS (3)
83 #define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
84 
85 struct vfpf_acquire_tlv {
86 	struct vfpf_first_tlv first_tlv;
87 
88 	struct vf_pf_vfdev_info {
89 #define VFPF_ACQUIRE_CAP_PRE_FP_HSI     (1 << 0) /* VF pre-FP hsi version */
90 #define VFPF_ACQUIRE_CAP_100G		(1 << 1) /* VF can support 100g */
91 		u64 capabilities;
92 		u8 fw_major;
93 		u8 fw_minor;
94 		u8 fw_revision;
95 		u8 fw_engineering;
96 		u32 driver_version;
97 		u16 opaque_fid;	/* ME register value */
98 		u8 os_type;	/* VFPF_ACQUIRE_OS_* value */
99 		u8 eth_fp_hsi_major;
100 		u8 eth_fp_hsi_minor;
101 		u8 padding[3];
102 	} vfdev_info;
103 
104 	struct vf_pf_resc_request resc_request;
105 
106 	u64 bulletin_addr;
107 	u32 bulletin_size;
108 	u32 padding;
109 };
110 
111 /* receive side scaling tlv */
112 struct vfpf_vport_update_rss_tlv {
113 	struct channel_tlv tl;
114 
115 	u8 update_rss_flags;
116 #define VFPF_UPDATE_RSS_CONFIG_FLAG       BIT(0)
117 #define VFPF_UPDATE_RSS_CAPS_FLAG         BIT(1)
118 #define VFPF_UPDATE_RSS_IND_TABLE_FLAG    BIT(2)
119 #define VFPF_UPDATE_RSS_KEY_FLAG          BIT(3)
120 
121 	u8 rss_enable;
122 	u8 rss_caps;
123 	u8 rss_table_size_log;	/* The table size is 2 ^ rss_table_size_log */
124 	u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
125 	u32 rss_key[T_ETH_RSS_KEY_SIZE];
126 };
127 
128 struct pfvf_storm_stats {
129 	u32 address;
130 	u32 len;
131 };
132 
133 struct pfvf_stats_info {
134 	struct pfvf_storm_stats mstats;
135 	struct pfvf_storm_stats pstats;
136 	struct pfvf_storm_stats tstats;
137 	struct pfvf_storm_stats ustats;
138 };
139 
140 struct pfvf_acquire_resp_tlv {
141 	struct pfvf_tlv hdr;
142 
143 	struct pf_vf_pfdev_info {
144 		u32 chip_num;
145 		u32 mfw_ver;
146 
147 		u16 fw_major;
148 		u16 fw_minor;
149 		u16 fw_rev;
150 		u16 fw_eng;
151 
152 		u64 capabilities;
153 #define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED	BIT(0)
154 #define PFVF_ACQUIRE_CAP_100G			BIT(1)	/* If set, 100g PF */
155 /* There are old PF versions where the PF might mistakenly override the sanity
156  * mechanism [version-based] and allow a VF that can't be supported to pass
157  * the acquisition phase.
158  * To overcome this, PFs now indicate that they're past that point and the new
159  * VFs would fail probe on the older PFs that fail to do so.
160  */
161 #define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE	BIT(2)
162 
163 		u16 db_size;
164 		u8 indices_per_sb;
165 		u8 os_type;
166 
167 		/* These should match the PF's qed_dev values */
168 		u16 chip_rev;
169 		u8 dev_type;
170 
171 		u8 padding;
172 
173 		struct pfvf_stats_info stats_info;
174 
175 		u8 port_mac[ETH_ALEN];
176 
177 		/* It's possible PF had to configure an older fastpath HSI
178 		 * [in case VF is newer than PF]. This is communicated back
179 		 * to the VF. It can also be used in case of error due to
180 		 * non-matching versions to shed light in VF about failure.
181 		 */
182 		u8 major_fp_hsi;
183 		u8 minor_fp_hsi;
184 	} pfdev_info;
185 
186 	struct pf_vf_resc {
187 #define PFVF_MAX_QUEUES_PER_VF		16
188 #define PFVF_MAX_SBS_PER_VF		16
189 		struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
190 		u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
191 		u8 cid[PFVF_MAX_QUEUES_PER_VF];
192 
193 		u8 num_rxqs;
194 		u8 num_txqs;
195 		u8 num_sbs;
196 		u8 num_mac_filters;
197 		u8 num_vlan_filters;
198 		u8 num_mc_filters;
199 		u8 padding[2];
200 	} resc;
201 
202 	u32 bulletin_size;
203 	u32 padding;
204 };
205 
206 struct pfvf_start_queue_resp_tlv {
207 	struct pfvf_tlv hdr;
208 	u32 offset;		/* offset to consumer/producer of queue */
209 	u8 padding[4];
210 };
211 
212 /* Setup Queue */
213 struct vfpf_start_rxq_tlv {
214 	struct vfpf_first_tlv first_tlv;
215 
216 	/* physical addresses */
217 	u64 rxq_addr;
218 	u64 deprecated_sge_addr;
219 	u64 cqe_pbl_addr;
220 
221 	u16 cqe_pbl_size;
222 	u16 hw_sb;
223 	u16 rx_qid;
224 	u16 hc_rate;		/* desired interrupts per sec. */
225 
226 	u16 bd_max_bytes;
227 	u16 stat_id;
228 	u8 sb_index;
229 	u8 padding[3];
230 };
231 
232 struct vfpf_start_txq_tlv {
233 	struct vfpf_first_tlv first_tlv;
234 
235 	/* physical addresses */
236 	u64 pbl_addr;
237 	u16 pbl_size;
238 	u16 stat_id;
239 	u16 tx_qid;
240 	u16 hw_sb;
241 
242 	u32 flags;		/* VFPF_QUEUE_FLG_X flags */
243 	u16 hc_rate;		/* desired interrupts per sec. */
244 	u8 sb_index;
245 	u8 padding[3];
246 };
247 
248 /* Stop RX Queue */
249 struct vfpf_stop_rxqs_tlv {
250 	struct vfpf_first_tlv first_tlv;
251 
252 	u16 rx_qid;
253 	u8 num_rxqs;
254 	u8 cqe_completion;
255 	u8 padding[4];
256 };
257 
258 /* Stop TX Queues */
259 struct vfpf_stop_txqs_tlv {
260 	struct vfpf_first_tlv first_tlv;
261 
262 	u16 tx_qid;
263 	u8 num_txqs;
264 	u8 padding[5];
265 };
266 
267 struct vfpf_update_rxq_tlv {
268 	struct vfpf_first_tlv first_tlv;
269 
270 	u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
271 
272 	u16 rx_qid;
273 	u8 num_rxqs;
274 	u8 flags;
275 #define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG    BIT(0)
276 #define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG          BIT(1)
277 #define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG        BIT(2)
278 
279 	u8 padding[4];
280 };
281 
282 /* Set Queue Filters */
283 struct vfpf_q_mac_vlan_filter {
284 	u32 flags;
285 #define VFPF_Q_FILTER_DEST_MAC_VALID    0x01
286 #define VFPF_Q_FILTER_VLAN_TAG_VALID    0x02
287 #define VFPF_Q_FILTER_SET_MAC           0x100	/* set/clear */
288 
289 	u8 mac[ETH_ALEN];
290 	u16 vlan_tag;
291 
292 	u8 padding[4];
293 };
294 
295 /* Start a vport */
296 struct vfpf_vport_start_tlv {
297 	struct vfpf_first_tlv first_tlv;
298 
299 	u64 sb_addr[PFVF_MAX_SBS_PER_VF];
300 
301 	u32 tpa_mode;
302 	u16 dep1;
303 	u16 mtu;
304 
305 	u8 vport_id;
306 	u8 inner_vlan_removal;
307 
308 	u8 only_untagged;
309 	u8 max_buffers_per_cqe;
310 
311 	u8 padding[4];
312 };
313 
314 /* Extended tlvs - need to add rss, mcast, accept mode tlvs */
315 struct vfpf_vport_update_activate_tlv {
316 	struct channel_tlv tl;
317 	u8 update_rx;
318 	u8 update_tx;
319 	u8 active_rx;
320 	u8 active_tx;
321 };
322 
323 struct vfpf_vport_update_tx_switch_tlv {
324 	struct channel_tlv tl;
325 	u8 tx_switching;
326 	u8 padding[3];
327 };
328 
329 struct vfpf_vport_update_vlan_strip_tlv {
330 	struct channel_tlv tl;
331 	u8 remove_vlan;
332 	u8 padding[3];
333 };
334 
335 struct vfpf_vport_update_mcast_bin_tlv {
336 	struct channel_tlv tl;
337 	u8 padding[4];
338 
339 	u64 bins[8];
340 };
341 
342 struct vfpf_vport_update_accept_param_tlv {
343 	struct channel_tlv tl;
344 	u8 update_rx_mode;
345 	u8 update_tx_mode;
346 	u8 rx_accept_filter;
347 	u8 tx_accept_filter;
348 };
349 
350 struct vfpf_vport_update_accept_any_vlan_tlv {
351 	struct channel_tlv tl;
352 	u8 update_accept_any_vlan_flg;
353 	u8 accept_any_vlan;
354 
355 	u8 padding[2];
356 };
357 
358 struct vfpf_vport_update_sge_tpa_tlv {
359 	struct channel_tlv tl;
360 
361 	u16 sge_tpa_flags;
362 #define VFPF_TPA_IPV4_EN_FLAG		BIT(0)
363 #define VFPF_TPA_IPV6_EN_FLAG		BIT(1)
364 #define VFPF_TPA_PKT_SPLIT_FLAG		BIT(2)
365 #define VFPF_TPA_HDR_DATA_SPLIT_FLAG	BIT(3)
366 #define VFPF_TPA_GRO_CONSIST_FLAG	BIT(4)
367 
368 	u8 update_sge_tpa_flags;
369 #define VFPF_UPDATE_SGE_DEPRECATED_FLAG	BIT(0)
370 #define VFPF_UPDATE_TPA_EN_FLAG		BIT(1)
371 #define VFPF_UPDATE_TPA_PARAM_FLAG	BIT(2)
372 
373 	u8 max_buffers_per_cqe;
374 
375 	u16 deprecated_sge_buff_size;
376 	u16 tpa_max_size;
377 	u16 tpa_min_size_to_start;
378 	u16 tpa_min_size_to_cont;
379 
380 	u8 tpa_max_aggs_num;
381 	u8 padding[7];
382 };
383 
384 /* Primary tlv as a header for various extended tlvs for
385  * various functionalities in vport update ramrod.
386  */
387 struct vfpf_vport_update_tlv {
388 	struct vfpf_first_tlv first_tlv;
389 };
390 
391 struct vfpf_ucast_filter_tlv {
392 	struct vfpf_first_tlv first_tlv;
393 
394 	u8 opcode;
395 	u8 type;
396 
397 	u8 mac[ETH_ALEN];
398 
399 	u16 vlan;
400 	u16 padding[3];
401 };
402 
403 struct tlv_buffer_size {
404 	u8 tlv_buffer[TLV_BUFFER_SIZE];
405 };
406 
407 union vfpf_tlvs {
408 	struct vfpf_first_tlv first_tlv;
409 	struct vfpf_acquire_tlv acquire;
410 	struct vfpf_start_rxq_tlv start_rxq;
411 	struct vfpf_start_txq_tlv start_txq;
412 	struct vfpf_stop_rxqs_tlv stop_rxqs;
413 	struct vfpf_stop_txqs_tlv stop_txqs;
414 	struct vfpf_update_rxq_tlv update_rxq;
415 	struct vfpf_vport_start_tlv start_vport;
416 	struct vfpf_vport_update_tlv vport_update;
417 	struct vfpf_ucast_filter_tlv ucast_filter;
418 	struct channel_list_end_tlv list_end;
419 	struct tlv_buffer_size tlv_buf_size;
420 };
421 
422 union pfvf_tlvs {
423 	struct pfvf_def_resp_tlv default_resp;
424 	struct pfvf_acquire_resp_tlv acquire_resp;
425 	struct tlv_buffer_size tlv_buf_size;
426 	struct pfvf_start_queue_resp_tlv queue_start;
427 };
428 
429 enum qed_bulletin_bit {
430 	/* Alert the VF that a forced MAC was set by the PF */
431 	MAC_ADDR_FORCED = 0,
432 	/* Alert the VF that a forced VLAN was set by the PF */
433 	VLAN_ADDR_FORCED = 2,
434 
435 	/* Indicate that `default_only_untagged' contains actual data */
436 	VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
437 	VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
438 
439 	/* Alert the VF that suggested mac was sent by the PF.
440 	 * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set.
441 	 */
442 	VFPF_BULLETIN_MAC_ADDR = 5
443 };
444 
445 struct qed_bulletin_content {
446 	/* crc of structure to ensure is not in mid-update */
447 	u32 crc;
448 
449 	u32 version;
450 
451 	/* bitmap indicating which fields hold valid values */
452 	u64 valid_bitmap;
453 
454 	/* used for MAC_ADDR or MAC_ADDR_FORCED */
455 	u8 mac[ETH_ALEN];
456 
457 	/* If valid, 1 => only untagged Rx if no vlan is configured */
458 	u8 default_only_untagged;
459 	u8 padding;
460 
461 	/* The following is a 'copy' of qed_mcp_link_state,
462 	 * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's
463 	 * possible the structs will increase further along the road we cannot
464 	 * have it here; Instead we need to have all of its fields.
465 	 */
466 	u8 req_autoneg;
467 	u8 req_autoneg_pause;
468 	u8 req_forced_rx;
469 	u8 req_forced_tx;
470 	u8 padding2[4];
471 
472 	u32 req_adv_speed;
473 	u32 req_forced_speed;
474 	u32 req_loopback;
475 	u32 padding3;
476 
477 	u8 link_up;
478 	u8 full_duplex;
479 	u8 autoneg;
480 	u8 autoneg_complete;
481 	u8 parallel_detection;
482 	u8 pfc_enabled;
483 	u8 partner_tx_flow_ctrl_en;
484 	u8 partner_rx_flow_ctrl_en;
485 	u8 partner_adv_pause;
486 	u8 sfp_tx_fault;
487 	u8 padding4[6];
488 
489 	u32 speed;
490 	u32 partner_adv_speed;
491 
492 	u32 capability_speed;
493 
494 	/* Forced vlan */
495 	u16 pvid;
496 	u16 padding5;
497 };
498 
499 struct qed_bulletin {
500 	dma_addr_t phys;
501 	struct qed_bulletin_content *p_virt;
502 	u32 size;
503 };
504 
505 enum {
506 	CHANNEL_TLV_NONE,	/* ends tlv sequence */
507 	CHANNEL_TLV_ACQUIRE,
508 	CHANNEL_TLV_VPORT_START,
509 	CHANNEL_TLV_VPORT_UPDATE,
510 	CHANNEL_TLV_VPORT_TEARDOWN,
511 	CHANNEL_TLV_START_RXQ,
512 	CHANNEL_TLV_START_TXQ,
513 	CHANNEL_TLV_STOP_RXQS,
514 	CHANNEL_TLV_STOP_TXQS,
515 	CHANNEL_TLV_UPDATE_RXQ,
516 	CHANNEL_TLV_INT_CLEANUP,
517 	CHANNEL_TLV_CLOSE,
518 	CHANNEL_TLV_RELEASE,
519 	CHANNEL_TLV_LIST_END,
520 	CHANNEL_TLV_UCAST_FILTER,
521 	CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
522 	CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
523 	CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
524 	CHANNEL_TLV_VPORT_UPDATE_MCAST,
525 	CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
526 	CHANNEL_TLV_VPORT_UPDATE_RSS,
527 	CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
528 	CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
529 	CHANNEL_TLV_MAX,
530 
531 	/* Required for iterating over vport-update tlvs.
532 	 * Will break in case non-sequential vport-update tlvs.
533 	 */
534 	CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
535 };
536 
537 /* This data is held in the qed_hwfn structure for VFs only. */
538 struct qed_vf_iov {
539 	union vfpf_tlvs *vf2pf_request;
540 	dma_addr_t vf2pf_request_phys;
541 	union pfvf_tlvs *pf2vf_reply;
542 	dma_addr_t pf2vf_reply_phys;
543 
544 	/* Should be taken whenever the mailbox buffers are accessed */
545 	struct mutex mutex;
546 	u8 *offset;
547 
548 	/* Bulletin Board */
549 	struct qed_bulletin bulletin;
550 	struct qed_bulletin_content bulletin_shadow;
551 
552 	/* we set aside a copy of the acquire response */
553 	struct pfvf_acquire_resp_tlv acquire_resp;
554 
555 	/* In case PF originates prior to the fp-hsi version comparison,
556 	 * this has to be propagated as it affects the fastpath.
557 	 */
558 	bool b_pre_fp_hsi;
559 };
560 
561 #ifdef CONFIG_QED_SRIOV
562 /**
563  * @brief Read the VF bulletin and act on it if needed
564  *
565  * @param p_hwfn
566  * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
567  *
568  * @return enum _qed_status
569  */
570 int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
571 
572 /**
573  * @brief Get link paramters for VF from qed
574  *
575  * @param p_hwfn
576  * @param params - the link params structure to be filled for the VF
577  */
578 void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
579 			    struct qed_mcp_link_params *params);
580 
581 /**
582  * @brief Get link state for VF from qed
583  *
584  * @param p_hwfn
585  * @param link - the link state structure to be filled for the VF
586  */
587 void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
588 			   struct qed_mcp_link_state *link);
589 
590 /**
591  * @brief Get link capabilities for VF from qed
592  *
593  * @param p_hwfn
594  * @param p_link_caps - the link capabilities structure to be filled for the VF
595  */
596 void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
597 			  struct qed_mcp_link_capabilities *p_link_caps);
598 
599 /**
600  * @brief Get number of Rx queues allocated for VF by qed
601  *
602  *  @param p_hwfn
603  *  @param num_rxqs - allocated RX queues
604  */
605 void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
606 
607 /**
608  * @brief Get port mac address for VF
609  *
610  * @param p_hwfn
611  * @param port_mac - destination location for port mac
612  */
613 void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
614 
615 /**
616  * @brief Get number of VLAN filters allocated for VF by qed
617  *
618  *  @param p_hwfn
619  *  @param num_rxqs - allocated VLAN filters
620  */
621 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
622 				 u8 *num_vlan_filters);
623 
624 /**
625  * @brief Check if VF can set a MAC address
626  *
627  * @param p_hwfn
628  * @param mac
629  *
630  * @return bool
631  */
632 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
633 
634 /**
635  * @brief Set firmware version information in dev_info from VFs acquire response tlv
636  *
637  * @param p_hwfn
638  * @param fw_major
639  * @param fw_minor
640  * @param fw_rev
641  * @param fw_eng
642  */
643 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
644 			   u16 *fw_major, u16 *fw_minor,
645 			   u16 *fw_rev, u16 *fw_eng);
646 
647 /**
648  * @brief hw preparation for VF
649  *      sends ACQUIRE message
650  *
651  * @param p_hwfn
652  *
653  * @return int
654  */
655 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
656 
657 /**
658  * @brief VF - start the RX Queue by sending a message to the PF
659  * @param p_hwfn
660  * @param cid                   - zero based within the VF
661  * @param rx_queue_id           - zero based within the VF
662  * @param sb                    - VF status block for this queue
663  * @param sb_index              - Index within the status block
664  * @param bd_max_bytes          - maximum number of bytes per bd
665  * @param bd_chain_phys_addr    - physical address of bd chain
666  * @param cqe_pbl_addr          - physical address of pbl
667  * @param cqe_pbl_size          - pbl size
668  * @param pp_prod               - pointer to the producer to be
669  *				  used in fastpath
670  *
671  * @return int
672  */
673 int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
674 			u8 rx_queue_id,
675 			u16 sb,
676 			u8 sb_index,
677 			u16 bd_max_bytes,
678 			dma_addr_t bd_chain_phys_addr,
679 			dma_addr_t cqe_pbl_addr,
680 			u16 cqe_pbl_size, void __iomem **pp_prod);
681 
682 /**
683  * @brief VF - start the TX queue by sending a message to the
684  *        PF.
685  *
686  * @param p_hwfn
687  * @param tx_queue_id           - zero based within the VF
688  * @param sb                    - status block for this queue
689  * @param sb_index              - index within the status block
690  * @param bd_chain_phys_addr    - physical address of tx chain
691  * @param pp_doorbell           - pointer to address to which to
692  *                      write the doorbell too..
693  *
694  * @return int
695  */
696 int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
697 			u16 tx_queue_id,
698 			u16 sb,
699 			u8 sb_index,
700 			dma_addr_t pbl_addr,
701 			u16 pbl_size, void __iomem **pp_doorbell);
702 
703 /**
704  * @brief VF - stop the RX queue by sending a message to the PF
705  *
706  * @param p_hwfn
707  * @param rx_qid
708  * @param cqe_completion
709  *
710  * @return int
711  */
712 int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
713 		       u16 rx_qid, bool cqe_completion);
714 
715 /**
716  * @brief VF - stop the TX queue by sending a message to the PF
717  *
718  * @param p_hwfn
719  * @param tx_qid
720  *
721  * @return int
722  */
723 int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid);
724 
725 /**
726  * @brief VF - send a vport update command
727  *
728  * @param p_hwfn
729  * @param params
730  *
731  * @return int
732  */
733 int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
734 			   struct qed_sp_vport_update_params *p_params);
735 
736 /**
737  *
738  * @brief VF - send a close message to PF
739  *
740  * @param p_hwfn
741  *
742  * @return enum _qed_status
743  */
744 int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
745 
746 /**
747  * @brief VF - free vf`s memories
748  *
749  * @param p_hwfn
750  *
751  * @return enum _qed_status
752  */
753 int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
754 
755 /**
756  * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
757  *        sb_id. For VFs igu sbs don't have to be contiguous
758  *
759  * @param p_hwfn
760  * @param sb_id
761  *
762  * @return INLINE u16
763  */
764 u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
765 
766 /**
767  * @brief qed_vf_pf_vport_start - perform vport start for VF.
768  *
769  * @param p_hwfn
770  * @param vport_id
771  * @param mtu
772  * @param inner_vlan_removal
773  * @param tpa_mode
774  * @param max_buffers_per_cqe,
775  * @param only_untagged - default behavior regarding vlan acceptance
776  *
777  * @return enum _qed_status
778  */
779 int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
780 			  u8 vport_id,
781 			  u16 mtu,
782 			  u8 inner_vlan_removal,
783 			  enum qed_tpa_mode tpa_mode,
784 			  u8 max_buffers_per_cqe, u8 only_untagged);
785 
786 /**
787  * @brief qed_vf_pf_vport_stop - stop the VF's vport
788  *
789  * @param p_hwfn
790  *
791  * @return enum _qed_status
792  */
793 int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
794 
795 int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
796 			   struct qed_filter_ucast *p_param);
797 
798 void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
799 			    struct qed_filter_mcast *p_filter_cmd);
800 
801 /**
802  * @brief qed_vf_pf_int_cleanup - clean the SB of the VF
803  *
804  * @param p_hwfn
805  *
806  * @return enum _qed_status
807  */
808 int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
809 
810 /**
811  * @brief - return the link params in a given bulletin board
812  *
813  * @param p_hwfn
814  * @param p_params - pointer to a struct to fill with link params
815  * @param p_bulletin
816  */
817 void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
818 			      struct qed_mcp_link_params *p_params,
819 			      struct qed_bulletin_content *p_bulletin);
820 
821 /**
822  * @brief - return the link state in a given bulletin board
823  *
824  * @param p_hwfn
825  * @param p_link - pointer to a struct to fill with link state
826  * @param p_bulletin
827  */
828 void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
829 			     struct qed_mcp_link_state *p_link,
830 			     struct qed_bulletin_content *p_bulletin);
831 
832 /**
833  * @brief - return the link capabilities in a given bulletin board
834  *
835  * @param p_hwfn
836  * @param p_link - pointer to a struct to fill with link capabilities
837  * @param p_bulletin
838  */
839 void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
840 			    struct qed_mcp_link_capabilities *p_link_caps,
841 			    struct qed_bulletin_content *p_bulletin);
842 
843 void qed_iov_vf_task(struct work_struct *work);
844 #else
845 static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
846 					  struct qed_mcp_link_params *params)
847 {
848 }
849 
850 static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
851 					 struct qed_mcp_link_state *link)
852 {
853 }
854 
855 static inline void
856 qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
857 		     struct qed_mcp_link_capabilities *p_link_caps)
858 {
859 }
860 
861 static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
862 {
863 }
864 
865 static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
866 {
867 }
868 
869 static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
870 					       u8 *num_vlan_filters)
871 {
872 }
873 
874 static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
875 {
876 	return false;
877 }
878 
879 static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
880 					 u16 *fw_major, u16 *fw_minor,
881 					 u16 *fw_rev, u16 *fw_eng)
882 {
883 }
884 
885 static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
886 {
887 	return -EINVAL;
888 }
889 
890 static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
891 				      u8 rx_queue_id,
892 				      u16 sb,
893 				      u8 sb_index,
894 				      u16 bd_max_bytes,
895 				      dma_addr_t bd_chain_phys_adr,
896 				      dma_addr_t cqe_pbl_addr,
897 				      u16 cqe_pbl_size, void __iomem **pp_prod)
898 {
899 	return -EINVAL;
900 }
901 
902 static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
903 				      u16 tx_queue_id,
904 				      u16 sb,
905 				      u8 sb_index,
906 				      dma_addr_t pbl_addr,
907 				      u16 pbl_size, void __iomem **pp_doorbell)
908 {
909 	return -EINVAL;
910 }
911 
912 static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
913 				     u16 rx_qid, bool cqe_completion)
914 {
915 	return -EINVAL;
916 }
917 
918 static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
919 {
920 	return -EINVAL;
921 }
922 
923 static inline int
924 qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
925 		       struct qed_sp_vport_update_params *p_params)
926 {
927 	return -EINVAL;
928 }
929 
930 static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
931 {
932 	return -EINVAL;
933 }
934 
935 static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
936 {
937 	return -EINVAL;
938 }
939 
940 static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
941 {
942 	return 0;
943 }
944 
945 static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
946 					u8 vport_id,
947 					u16 mtu,
948 					u8 inner_vlan_removal,
949 					enum qed_tpa_mode tpa_mode,
950 					u8 max_buffers_per_cqe,
951 					u8 only_untagged)
952 {
953 	return -EINVAL;
954 }
955 
956 static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
957 {
958 	return -EINVAL;
959 }
960 
961 static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
962 					 struct qed_filter_ucast *p_param)
963 {
964 	return -EINVAL;
965 }
966 
967 static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
968 					  struct qed_filter_mcast *p_filter_cmd)
969 {
970 }
971 
972 static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
973 {
974 	return -EINVAL;
975 }
976 
977 static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
978 					    struct qed_mcp_link_params
979 					    *p_params,
980 					    struct qed_bulletin_content
981 					    *p_bulletin)
982 {
983 }
984 
985 static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
986 					   struct qed_mcp_link_state *p_link,
987 					   struct qed_bulletin_content
988 					   *p_bulletin)
989 {
990 }
991 
992 static inline void
993 __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
994 		       struct qed_mcp_link_capabilities *p_link_caps,
995 		       struct qed_bulletin_content *p_bulletin)
996 {
997 }
998 
999 static inline void qed_iov_vf_task(struct work_struct *work)
1000 {
1001 }
1002 #endif
1003 
1004 #endif
1005