1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #ifndef _QED_VF_H
10 #define _QED_VF_H
11 
12 #include "qed_l2.h"
13 #include "qed_mcp.h"
14 
15 #define T_ETH_INDIRECTION_TABLE_SIZE 128
16 #define T_ETH_RSS_KEY_SIZE 10
17 
18 struct vf_pf_resc_request {
19 	u8 num_rxqs;
20 	u8 num_txqs;
21 	u8 num_sbs;
22 	u8 num_mac_filters;
23 	u8 num_vlan_filters;
24 	u8 num_mc_filters;
25 	u16 padding;
26 };
27 
28 struct hw_sb_info {
29 	u16 hw_sb_id;
30 	u8 sb_qid;
31 	u8 padding[5];
32 };
33 
34 #define TLV_BUFFER_SIZE                 1024
35 
36 enum {
37 	PFVF_STATUS_WAITING,
38 	PFVF_STATUS_SUCCESS,
39 	PFVF_STATUS_FAILURE,
40 	PFVF_STATUS_NOT_SUPPORTED,
41 	PFVF_STATUS_NO_RESOURCE,
42 	PFVF_STATUS_FORCED,
43 	PFVF_STATUS_MALICIOUS,
44 };
45 
46 /* vf pf channel tlvs */
47 /* general tlv header (used for both vf->pf request and pf->vf response) */
48 struct channel_tlv {
49 	u16 type;
50 	u16 length;
51 };
52 
53 /* header of first vf->pf tlv carries the offset used to calculate reponse
54  * buffer address
55  */
56 struct vfpf_first_tlv {
57 	struct channel_tlv tl;
58 	u32 padding;
59 	u64 reply_address;
60 };
61 
62 /* header of pf->vf tlvs, carries the status of handling the request */
63 struct pfvf_tlv {
64 	struct channel_tlv tl;
65 	u8 status;
66 	u8 padding[3];
67 };
68 
69 /* response tlv used for most tlvs */
70 struct pfvf_def_resp_tlv {
71 	struct pfvf_tlv hdr;
72 };
73 
74 /* used to terminate and pad a tlv list */
75 struct channel_list_end_tlv {
76 	struct channel_tlv tl;
77 	u8 padding[4];
78 };
79 
80 #define VFPF_ACQUIRE_OS_LINUX (0)
81 #define VFPF_ACQUIRE_OS_WINDOWS (1)
82 #define VFPF_ACQUIRE_OS_ESX (2)
83 #define VFPF_ACQUIRE_OS_SOLARIS (3)
84 #define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
85 
86 struct vfpf_acquire_tlv {
87 	struct vfpf_first_tlv first_tlv;
88 
89 	struct vf_pf_vfdev_info {
90 #define VFPF_ACQUIRE_CAP_PRE_FP_HSI     (1 << 0) /* VF pre-FP hsi version */
91 #define VFPF_ACQUIRE_CAP_100G		(1 << 1) /* VF can support 100g */
92 		u64 capabilities;
93 		u8 fw_major;
94 		u8 fw_minor;
95 		u8 fw_revision;
96 		u8 fw_engineering;
97 		u32 driver_version;
98 		u16 opaque_fid;	/* ME register value */
99 		u8 os_type;	/* VFPF_ACQUIRE_OS_* value */
100 		u8 eth_fp_hsi_major;
101 		u8 eth_fp_hsi_minor;
102 		u8 padding[3];
103 	} vfdev_info;
104 
105 	struct vf_pf_resc_request resc_request;
106 
107 	u64 bulletin_addr;
108 	u32 bulletin_size;
109 	u32 padding;
110 };
111 
112 /* receive side scaling tlv */
113 struct vfpf_vport_update_rss_tlv {
114 	struct channel_tlv tl;
115 
116 	u8 update_rss_flags;
117 #define VFPF_UPDATE_RSS_CONFIG_FLAG       BIT(0)
118 #define VFPF_UPDATE_RSS_CAPS_FLAG         BIT(1)
119 #define VFPF_UPDATE_RSS_IND_TABLE_FLAG    BIT(2)
120 #define VFPF_UPDATE_RSS_KEY_FLAG          BIT(3)
121 
122 	u8 rss_enable;
123 	u8 rss_caps;
124 	u8 rss_table_size_log;	/* The table size is 2 ^ rss_table_size_log */
125 	u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
126 	u32 rss_key[T_ETH_RSS_KEY_SIZE];
127 };
128 
129 struct pfvf_storm_stats {
130 	u32 address;
131 	u32 len;
132 };
133 
134 struct pfvf_stats_info {
135 	struct pfvf_storm_stats mstats;
136 	struct pfvf_storm_stats pstats;
137 	struct pfvf_storm_stats tstats;
138 	struct pfvf_storm_stats ustats;
139 };
140 
141 struct pfvf_acquire_resp_tlv {
142 	struct pfvf_tlv hdr;
143 
144 	struct pf_vf_pfdev_info {
145 		u32 chip_num;
146 		u32 mfw_ver;
147 
148 		u16 fw_major;
149 		u16 fw_minor;
150 		u16 fw_rev;
151 		u16 fw_eng;
152 
153 		u64 capabilities;
154 #define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED	BIT(0)
155 #define PFVF_ACQUIRE_CAP_100G			BIT(1)	/* If set, 100g PF */
156 /* There are old PF versions where the PF might mistakenly override the sanity
157  * mechanism [version-based] and allow a VF that can't be supported to pass
158  * the acquisition phase.
159  * To overcome this, PFs now indicate that they're past that point and the new
160  * VFs would fail probe on the older PFs that fail to do so.
161  */
162 #define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE	BIT(2)
163 
164 		u16 db_size;
165 		u8 indices_per_sb;
166 		u8 os_type;
167 
168 		/* These should match the PF's qed_dev values */
169 		u16 chip_rev;
170 		u8 dev_type;
171 
172 		u8 padding;
173 
174 		struct pfvf_stats_info stats_info;
175 
176 		u8 port_mac[ETH_ALEN];
177 
178 		/* It's possible PF had to configure an older fastpath HSI
179 		 * [in case VF is newer than PF]. This is communicated back
180 		 * to the VF. It can also be used in case of error due to
181 		 * non-matching versions to shed light in VF about failure.
182 		 */
183 		u8 major_fp_hsi;
184 		u8 minor_fp_hsi;
185 	} pfdev_info;
186 
187 	struct pf_vf_resc {
188 #define PFVF_MAX_QUEUES_PER_VF		16
189 #define PFVF_MAX_SBS_PER_VF		16
190 		struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
191 		u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
192 		u8 cid[PFVF_MAX_QUEUES_PER_VF];
193 
194 		u8 num_rxqs;
195 		u8 num_txqs;
196 		u8 num_sbs;
197 		u8 num_mac_filters;
198 		u8 num_vlan_filters;
199 		u8 num_mc_filters;
200 		u8 padding[2];
201 	} resc;
202 
203 	u32 bulletin_size;
204 	u32 padding;
205 };
206 
207 struct pfvf_start_queue_resp_tlv {
208 	struct pfvf_tlv hdr;
209 	u32 offset;		/* offset to consumer/producer of queue */
210 	u8 padding[4];
211 };
212 
213 /* Setup Queue */
214 struct vfpf_start_rxq_tlv {
215 	struct vfpf_first_tlv first_tlv;
216 
217 	/* physical addresses */
218 	u64 rxq_addr;
219 	u64 deprecated_sge_addr;
220 	u64 cqe_pbl_addr;
221 
222 	u16 cqe_pbl_size;
223 	u16 hw_sb;
224 	u16 rx_qid;
225 	u16 hc_rate;		/* desired interrupts per sec. */
226 
227 	u16 bd_max_bytes;
228 	u16 stat_id;
229 	u8 sb_index;
230 	u8 padding[3];
231 };
232 
233 struct vfpf_start_txq_tlv {
234 	struct vfpf_first_tlv first_tlv;
235 
236 	/* physical addresses */
237 	u64 pbl_addr;
238 	u16 pbl_size;
239 	u16 stat_id;
240 	u16 tx_qid;
241 	u16 hw_sb;
242 
243 	u32 flags;		/* VFPF_QUEUE_FLG_X flags */
244 	u16 hc_rate;		/* desired interrupts per sec. */
245 	u8 sb_index;
246 	u8 padding[3];
247 };
248 
249 /* Stop RX Queue */
250 struct vfpf_stop_rxqs_tlv {
251 	struct vfpf_first_tlv first_tlv;
252 
253 	u16 rx_qid;
254 	u8 num_rxqs;
255 	u8 cqe_completion;
256 	u8 padding[4];
257 };
258 
259 /* Stop TX Queues */
260 struct vfpf_stop_txqs_tlv {
261 	struct vfpf_first_tlv first_tlv;
262 
263 	u16 tx_qid;
264 	u8 num_txqs;
265 	u8 padding[5];
266 };
267 
268 struct vfpf_update_rxq_tlv {
269 	struct vfpf_first_tlv first_tlv;
270 
271 	u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
272 
273 	u16 rx_qid;
274 	u8 num_rxqs;
275 	u8 flags;
276 #define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG    BIT(0)
277 #define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG          BIT(1)
278 #define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG        BIT(2)
279 
280 	u8 padding[4];
281 };
282 
283 /* Set Queue Filters */
284 struct vfpf_q_mac_vlan_filter {
285 	u32 flags;
286 #define VFPF_Q_FILTER_DEST_MAC_VALID    0x01
287 #define VFPF_Q_FILTER_VLAN_TAG_VALID    0x02
288 #define VFPF_Q_FILTER_SET_MAC           0x100	/* set/clear */
289 
290 	u8 mac[ETH_ALEN];
291 	u16 vlan_tag;
292 
293 	u8 padding[4];
294 };
295 
296 /* Start a vport */
297 struct vfpf_vport_start_tlv {
298 	struct vfpf_first_tlv first_tlv;
299 
300 	u64 sb_addr[PFVF_MAX_SBS_PER_VF];
301 
302 	u32 tpa_mode;
303 	u16 dep1;
304 	u16 mtu;
305 
306 	u8 vport_id;
307 	u8 inner_vlan_removal;
308 
309 	u8 only_untagged;
310 	u8 max_buffers_per_cqe;
311 
312 	u8 padding[4];
313 };
314 
315 /* Extended tlvs - need to add rss, mcast, accept mode tlvs */
316 struct vfpf_vport_update_activate_tlv {
317 	struct channel_tlv tl;
318 	u8 update_rx;
319 	u8 update_tx;
320 	u8 active_rx;
321 	u8 active_tx;
322 };
323 
324 struct vfpf_vport_update_tx_switch_tlv {
325 	struct channel_tlv tl;
326 	u8 tx_switching;
327 	u8 padding[3];
328 };
329 
330 struct vfpf_vport_update_vlan_strip_tlv {
331 	struct channel_tlv tl;
332 	u8 remove_vlan;
333 	u8 padding[3];
334 };
335 
336 struct vfpf_vport_update_mcast_bin_tlv {
337 	struct channel_tlv tl;
338 	u8 padding[4];
339 
340 	u64 bins[8];
341 };
342 
343 struct vfpf_vport_update_accept_param_tlv {
344 	struct channel_tlv tl;
345 	u8 update_rx_mode;
346 	u8 update_tx_mode;
347 	u8 rx_accept_filter;
348 	u8 tx_accept_filter;
349 };
350 
351 struct vfpf_vport_update_accept_any_vlan_tlv {
352 	struct channel_tlv tl;
353 	u8 update_accept_any_vlan_flg;
354 	u8 accept_any_vlan;
355 
356 	u8 padding[2];
357 };
358 
359 struct vfpf_vport_update_sge_tpa_tlv {
360 	struct channel_tlv tl;
361 
362 	u16 sge_tpa_flags;
363 #define VFPF_TPA_IPV4_EN_FLAG		BIT(0)
364 #define VFPF_TPA_IPV6_EN_FLAG		BIT(1)
365 #define VFPF_TPA_PKT_SPLIT_FLAG		BIT(2)
366 #define VFPF_TPA_HDR_DATA_SPLIT_FLAG	BIT(3)
367 #define VFPF_TPA_GRO_CONSIST_FLAG	BIT(4)
368 
369 	u8 update_sge_tpa_flags;
370 #define VFPF_UPDATE_SGE_DEPRECATED_FLAG	BIT(0)
371 #define VFPF_UPDATE_TPA_EN_FLAG		BIT(1)
372 #define VFPF_UPDATE_TPA_PARAM_FLAG	BIT(2)
373 
374 	u8 max_buffers_per_cqe;
375 
376 	u16 deprecated_sge_buff_size;
377 	u16 tpa_max_size;
378 	u16 tpa_min_size_to_start;
379 	u16 tpa_min_size_to_cont;
380 
381 	u8 tpa_max_aggs_num;
382 	u8 padding[7];
383 };
384 
385 /* Primary tlv as a header for various extended tlvs for
386  * various functionalities in vport update ramrod.
387  */
388 struct vfpf_vport_update_tlv {
389 	struct vfpf_first_tlv first_tlv;
390 };
391 
392 struct vfpf_ucast_filter_tlv {
393 	struct vfpf_first_tlv first_tlv;
394 
395 	u8 opcode;
396 	u8 type;
397 
398 	u8 mac[ETH_ALEN];
399 
400 	u16 vlan;
401 	u16 padding[3];
402 };
403 
404 struct tlv_buffer_size {
405 	u8 tlv_buffer[TLV_BUFFER_SIZE];
406 };
407 
408 union vfpf_tlvs {
409 	struct vfpf_first_tlv first_tlv;
410 	struct vfpf_acquire_tlv acquire;
411 	struct vfpf_start_rxq_tlv start_rxq;
412 	struct vfpf_start_txq_tlv start_txq;
413 	struct vfpf_stop_rxqs_tlv stop_rxqs;
414 	struct vfpf_stop_txqs_tlv stop_txqs;
415 	struct vfpf_update_rxq_tlv update_rxq;
416 	struct vfpf_vport_start_tlv start_vport;
417 	struct vfpf_vport_update_tlv vport_update;
418 	struct vfpf_ucast_filter_tlv ucast_filter;
419 	struct channel_list_end_tlv list_end;
420 	struct tlv_buffer_size tlv_buf_size;
421 };
422 
423 union pfvf_tlvs {
424 	struct pfvf_def_resp_tlv default_resp;
425 	struct pfvf_acquire_resp_tlv acquire_resp;
426 	struct tlv_buffer_size tlv_buf_size;
427 	struct pfvf_start_queue_resp_tlv queue_start;
428 };
429 
430 enum qed_bulletin_bit {
431 	/* Alert the VF that a forced MAC was set by the PF */
432 	MAC_ADDR_FORCED = 0,
433 	/* Alert the VF that a forced VLAN was set by the PF */
434 	VLAN_ADDR_FORCED = 2,
435 
436 	/* Indicate that `default_only_untagged' contains actual data */
437 	VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
438 	VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
439 
440 	/* Alert the VF that suggested mac was sent by the PF.
441 	 * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set.
442 	 */
443 	VFPF_BULLETIN_MAC_ADDR = 5
444 };
445 
446 struct qed_bulletin_content {
447 	/* crc of structure to ensure is not in mid-update */
448 	u32 crc;
449 
450 	u32 version;
451 
452 	/* bitmap indicating which fields hold valid values */
453 	u64 valid_bitmap;
454 
455 	/* used for MAC_ADDR or MAC_ADDR_FORCED */
456 	u8 mac[ETH_ALEN];
457 
458 	/* If valid, 1 => only untagged Rx if no vlan is configured */
459 	u8 default_only_untagged;
460 	u8 padding;
461 
462 	/* The following is a 'copy' of qed_mcp_link_state,
463 	 * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's
464 	 * possible the structs will increase further along the road we cannot
465 	 * have it here; Instead we need to have all of its fields.
466 	 */
467 	u8 req_autoneg;
468 	u8 req_autoneg_pause;
469 	u8 req_forced_rx;
470 	u8 req_forced_tx;
471 	u8 padding2[4];
472 
473 	u32 req_adv_speed;
474 	u32 req_forced_speed;
475 	u32 req_loopback;
476 	u32 padding3;
477 
478 	u8 link_up;
479 	u8 full_duplex;
480 	u8 autoneg;
481 	u8 autoneg_complete;
482 	u8 parallel_detection;
483 	u8 pfc_enabled;
484 	u8 partner_tx_flow_ctrl_en;
485 	u8 partner_rx_flow_ctrl_en;
486 	u8 partner_adv_pause;
487 	u8 sfp_tx_fault;
488 	u8 padding4[6];
489 
490 	u32 speed;
491 	u32 partner_adv_speed;
492 
493 	u32 capability_speed;
494 
495 	/* Forced vlan */
496 	u16 pvid;
497 	u16 padding5;
498 };
499 
500 struct qed_bulletin {
501 	dma_addr_t phys;
502 	struct qed_bulletin_content *p_virt;
503 	u32 size;
504 };
505 
506 enum {
507 	CHANNEL_TLV_NONE,	/* ends tlv sequence */
508 	CHANNEL_TLV_ACQUIRE,
509 	CHANNEL_TLV_VPORT_START,
510 	CHANNEL_TLV_VPORT_UPDATE,
511 	CHANNEL_TLV_VPORT_TEARDOWN,
512 	CHANNEL_TLV_START_RXQ,
513 	CHANNEL_TLV_START_TXQ,
514 	CHANNEL_TLV_STOP_RXQS,
515 	CHANNEL_TLV_STOP_TXQS,
516 	CHANNEL_TLV_UPDATE_RXQ,
517 	CHANNEL_TLV_INT_CLEANUP,
518 	CHANNEL_TLV_CLOSE,
519 	CHANNEL_TLV_RELEASE,
520 	CHANNEL_TLV_LIST_END,
521 	CHANNEL_TLV_UCAST_FILTER,
522 	CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
523 	CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
524 	CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
525 	CHANNEL_TLV_VPORT_UPDATE_MCAST,
526 	CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
527 	CHANNEL_TLV_VPORT_UPDATE_RSS,
528 	CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
529 	CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
530 	CHANNEL_TLV_MAX,
531 
532 	/* Required for iterating over vport-update tlvs.
533 	 * Will break in case non-sequential vport-update tlvs.
534 	 */
535 	CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
536 };
537 
538 /* This data is held in the qed_hwfn structure for VFs only. */
539 struct qed_vf_iov {
540 	union vfpf_tlvs *vf2pf_request;
541 	dma_addr_t vf2pf_request_phys;
542 	union pfvf_tlvs *pf2vf_reply;
543 	dma_addr_t pf2vf_reply_phys;
544 
545 	/* Should be taken whenever the mailbox buffers are accessed */
546 	struct mutex mutex;
547 	u8 *offset;
548 
549 	/* Bulletin Board */
550 	struct qed_bulletin bulletin;
551 	struct qed_bulletin_content bulletin_shadow;
552 
553 	/* we set aside a copy of the acquire response */
554 	struct pfvf_acquire_resp_tlv acquire_resp;
555 
556 	/* In case PF originates prior to the fp-hsi version comparison,
557 	 * this has to be propagated as it affects the fastpath.
558 	 */
559 	bool b_pre_fp_hsi;
560 };
561 
562 #ifdef CONFIG_QED_SRIOV
563 /**
564  * @brief Read the VF bulletin and act on it if needed
565  *
566  * @param p_hwfn
567  * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
568  *
569  * @return enum _qed_status
570  */
571 int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
572 
573 /**
574  * @brief Get link paramters for VF from qed
575  *
576  * @param p_hwfn
577  * @param params - the link params structure to be filled for the VF
578  */
579 void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
580 			    struct qed_mcp_link_params *params);
581 
582 /**
583  * @brief Get link state for VF from qed
584  *
585  * @param p_hwfn
586  * @param link - the link state structure to be filled for the VF
587  */
588 void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
589 			   struct qed_mcp_link_state *link);
590 
591 /**
592  * @brief Get link capabilities for VF from qed
593  *
594  * @param p_hwfn
595  * @param p_link_caps - the link capabilities structure to be filled for the VF
596  */
597 void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
598 			  struct qed_mcp_link_capabilities *p_link_caps);
599 
600 /**
601  * @brief Get number of Rx queues allocated for VF by qed
602  *
603  *  @param p_hwfn
604  *  @param num_rxqs - allocated RX queues
605  */
606 void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
607 
608 /**
609  * @brief Get port mac address for VF
610  *
611  * @param p_hwfn
612  * @param port_mac - destination location for port mac
613  */
614 void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
615 
616 /**
617  * @brief Get number of VLAN filters allocated for VF by qed
618  *
619  *  @param p_hwfn
620  *  @param num_rxqs - allocated VLAN filters
621  */
622 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
623 				 u8 *num_vlan_filters);
624 
625 /**
626  * @brief Get number of MAC filters allocated for VF by qed
627  *
628  *  @param p_hwfn
629  *  @param num_rxqs - allocated MAC filters
630  */
631 void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters);
632 
633 /**
634  * @brief Check if VF can set a MAC address
635  *
636  * @param p_hwfn
637  * @param mac
638  *
639  * @return bool
640  */
641 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
642 
643 /**
644  * @brief Set firmware version information in dev_info from VFs acquire response tlv
645  *
646  * @param p_hwfn
647  * @param fw_major
648  * @param fw_minor
649  * @param fw_rev
650  * @param fw_eng
651  */
652 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
653 			   u16 *fw_major, u16 *fw_minor,
654 			   u16 *fw_rev, u16 *fw_eng);
655 
656 /**
657  * @brief hw preparation for VF
658  *      sends ACQUIRE message
659  *
660  * @param p_hwfn
661  *
662  * @return int
663  */
664 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
665 
666 /**
667  * @brief VF - start the RX Queue by sending a message to the PF
668  * @param p_hwfn
669  * @param p_cid			- Only relative fields are relevant
670  * @param bd_max_bytes          - maximum number of bytes per bd
671  * @param bd_chain_phys_addr    - physical address of bd chain
672  * @param cqe_pbl_addr          - physical address of pbl
673  * @param cqe_pbl_size          - pbl size
674  * @param pp_prod               - pointer to the producer to be
675  *				  used in fastpath
676  *
677  * @return int
678  */
679 int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
680 			struct qed_queue_cid *p_cid,
681 			u16 bd_max_bytes,
682 			dma_addr_t bd_chain_phys_addr,
683 			dma_addr_t cqe_pbl_addr,
684 			u16 cqe_pbl_size, void __iomem **pp_prod);
685 
686 /**
687  * @brief VF - start the TX queue by sending a message to the
688  *        PF.
689  *
690  * @param p_hwfn
691  * @param tx_queue_id           - zero based within the VF
692  * @param sb                    - status block for this queue
693  * @param sb_index              - index within the status block
694  * @param bd_chain_phys_addr    - physical address of tx chain
695  * @param pp_doorbell           - pointer to address to which to
696  *                      write the doorbell too..
697  *
698  * @return int
699  */
700 int
701 qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
702 		    struct qed_queue_cid *p_cid,
703 		    dma_addr_t pbl_addr,
704 		    u16 pbl_size, void __iomem **pp_doorbell);
705 
706 /**
707  * @brief VF - stop the RX queue by sending a message to the PF
708  *
709  * @param p_hwfn
710  * @param p_cid
711  * @param cqe_completion
712  *
713  * @return int
714  */
715 int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
716 		       struct qed_queue_cid *p_cid, bool cqe_completion);
717 
718 /**
719  * @brief VF - stop the TX queue by sending a message to the PF
720  *
721  * @param p_hwfn
722  * @param tx_qid
723  *
724  * @return int
725  */
726 int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid);
727 
728 /**
729  * @brief VF - send a vport update command
730  *
731  * @param p_hwfn
732  * @param params
733  *
734  * @return int
735  */
736 int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
737 			   struct qed_sp_vport_update_params *p_params);
738 
739 /**
740  *
741  * @brief VF - send a close message to PF
742  *
743  * @param p_hwfn
744  *
745  * @return enum _qed_status
746  */
747 int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
748 
749 /**
750  * @brief VF - free vf`s memories
751  *
752  * @param p_hwfn
753  *
754  * @return enum _qed_status
755  */
756 int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
757 
758 /**
759  * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
760  *        sb_id. For VFs igu sbs don't have to be contiguous
761  *
762  * @param p_hwfn
763  * @param sb_id
764  *
765  * @return INLINE u16
766  */
767 u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
768 
769 /**
770  * @brief qed_vf_pf_vport_start - perform vport start for VF.
771  *
772  * @param p_hwfn
773  * @param vport_id
774  * @param mtu
775  * @param inner_vlan_removal
776  * @param tpa_mode
777  * @param max_buffers_per_cqe,
778  * @param only_untagged - default behavior regarding vlan acceptance
779  *
780  * @return enum _qed_status
781  */
782 int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
783 			  u8 vport_id,
784 			  u16 mtu,
785 			  u8 inner_vlan_removal,
786 			  enum qed_tpa_mode tpa_mode,
787 			  u8 max_buffers_per_cqe, u8 only_untagged);
788 
789 /**
790  * @brief qed_vf_pf_vport_stop - stop the VF's vport
791  *
792  * @param p_hwfn
793  *
794  * @return enum _qed_status
795  */
796 int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
797 
798 int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
799 			   struct qed_filter_ucast *p_param);
800 
801 void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
802 			    struct qed_filter_mcast *p_filter_cmd);
803 
804 /**
805  * @brief qed_vf_pf_int_cleanup - clean the SB of the VF
806  *
807  * @param p_hwfn
808  *
809  * @return enum _qed_status
810  */
811 int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
812 
813 /**
814  * @brief - return the link params in a given bulletin board
815  *
816  * @param p_hwfn
817  * @param p_params - pointer to a struct to fill with link params
818  * @param p_bulletin
819  */
820 void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
821 			      struct qed_mcp_link_params *p_params,
822 			      struct qed_bulletin_content *p_bulletin);
823 
824 /**
825  * @brief - return the link state in a given bulletin board
826  *
827  * @param p_hwfn
828  * @param p_link - pointer to a struct to fill with link state
829  * @param p_bulletin
830  */
831 void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
832 			     struct qed_mcp_link_state *p_link,
833 			     struct qed_bulletin_content *p_bulletin);
834 
835 /**
836  * @brief - return the link capabilities in a given bulletin board
837  *
838  * @param p_hwfn
839  * @param p_link - pointer to a struct to fill with link capabilities
840  * @param p_bulletin
841  */
842 void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
843 			    struct qed_mcp_link_capabilities *p_link_caps,
844 			    struct qed_bulletin_content *p_bulletin);
845 
846 void qed_iov_vf_task(struct work_struct *work);
847 #else
848 static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
849 					  struct qed_mcp_link_params *params)
850 {
851 }
852 
853 static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
854 					 struct qed_mcp_link_state *link)
855 {
856 }
857 
858 static inline void
859 qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
860 		     struct qed_mcp_link_capabilities *p_link_caps)
861 {
862 }
863 
864 static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
865 {
866 }
867 
868 static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
869 {
870 }
871 
872 static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
873 					       u8 *num_vlan_filters)
874 {
875 }
876 
877 static inline void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn,
878 					      u8 *num_mac_filters)
879 {
880 }
881 
882 static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
883 {
884 	return false;
885 }
886 
887 static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
888 					 u16 *fw_major, u16 *fw_minor,
889 					 u16 *fw_rev, u16 *fw_eng)
890 {
891 }
892 
893 static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
894 {
895 	return -EINVAL;
896 }
897 
898 static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
899 				      struct qed_queue_cid *p_cid,
900 				      u16 bd_max_bytes,
901 				      dma_addr_t bd_chain_phys_adr,
902 				      dma_addr_t cqe_pbl_addr,
903 				      u16 cqe_pbl_size, void __iomem **pp_prod)
904 {
905 	return -EINVAL;
906 }
907 
908 static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
909 				      struct qed_queue_cid *p_cid,
910 				      dma_addr_t pbl_addr,
911 				      u16 pbl_size, void __iomem **pp_doorbell)
912 {
913 	return -EINVAL;
914 }
915 
916 static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
917 				     struct qed_queue_cid *p_cid,
918 				     bool cqe_completion)
919 {
920 	return -EINVAL;
921 }
922 
923 static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn,
924 				     struct qed_queue_cid *p_cid)
925 {
926 	return -EINVAL;
927 }
928 
929 static inline int
930 qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
931 		       struct qed_sp_vport_update_params *p_params)
932 {
933 	return -EINVAL;
934 }
935 
936 static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
937 {
938 	return -EINVAL;
939 }
940 
941 static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
942 {
943 	return -EINVAL;
944 }
945 
946 static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
947 {
948 	return 0;
949 }
950 
951 static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
952 					u8 vport_id,
953 					u16 mtu,
954 					u8 inner_vlan_removal,
955 					enum qed_tpa_mode tpa_mode,
956 					u8 max_buffers_per_cqe,
957 					u8 only_untagged)
958 {
959 	return -EINVAL;
960 }
961 
962 static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
963 {
964 	return -EINVAL;
965 }
966 
967 static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
968 					 struct qed_filter_ucast *p_param)
969 {
970 	return -EINVAL;
971 }
972 
973 static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
974 					  struct qed_filter_mcast *p_filter_cmd)
975 {
976 }
977 
978 static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
979 {
980 	return -EINVAL;
981 }
982 
983 static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
984 					    struct qed_mcp_link_params
985 					    *p_params,
986 					    struct qed_bulletin_content
987 					    *p_bulletin)
988 {
989 }
990 
991 static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
992 					   struct qed_mcp_link_state *p_link,
993 					   struct qed_bulletin_content
994 					   *p_bulletin)
995 {
996 }
997 
998 static inline void
999 __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1000 		       struct qed_mcp_link_capabilities *p_link_caps,
1001 		       struct qed_bulletin_content *p_bulletin)
1002 {
1003 }
1004 
1005 static inline void qed_iov_vf_task(struct work_struct *work)
1006 {
1007 }
1008 #endif
1009 
1010 #endif
1011