1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #ifndef _QED_VF_H
10 #define _QED_VF_H
11 
12 #include "qed_l2.h"
13 #include "qed_mcp.h"
14 
15 #define T_ETH_INDIRECTION_TABLE_SIZE 128
16 #define T_ETH_RSS_KEY_SIZE 10
17 
18 struct vf_pf_resc_request {
19 	u8 num_rxqs;
20 	u8 num_txqs;
21 	u8 num_sbs;
22 	u8 num_mac_filters;
23 	u8 num_vlan_filters;
24 	u8 num_mc_filters;
25 	u16 padding;
26 };
27 
28 struct hw_sb_info {
29 	u16 hw_sb_id;
30 	u8 sb_qid;
31 	u8 padding[5];
32 };
33 
34 #define TLV_BUFFER_SIZE                 1024
35 
36 enum {
37 	PFVF_STATUS_WAITING,
38 	PFVF_STATUS_SUCCESS,
39 	PFVF_STATUS_FAILURE,
40 	PFVF_STATUS_NOT_SUPPORTED,
41 	PFVF_STATUS_NO_RESOURCE,
42 	PFVF_STATUS_FORCED,
43 };
44 
45 /* vf pf channel tlvs */
46 /* general tlv header (used for both vf->pf request and pf->vf response) */
47 struct channel_tlv {
48 	u16 type;
49 	u16 length;
50 };
51 
52 /* header of first vf->pf tlv carries the offset used to calculate reponse
53  * buffer address
54  */
55 struct vfpf_first_tlv {
56 	struct channel_tlv tl;
57 	u32 padding;
58 	u64 reply_address;
59 };
60 
61 /* header of pf->vf tlvs, carries the status of handling the request */
62 struct pfvf_tlv {
63 	struct channel_tlv tl;
64 	u8 status;
65 	u8 padding[3];
66 };
67 
68 /* response tlv used for most tlvs */
69 struct pfvf_def_resp_tlv {
70 	struct pfvf_tlv hdr;
71 };
72 
73 /* used to terminate and pad a tlv list */
74 struct channel_list_end_tlv {
75 	struct channel_tlv tl;
76 	u8 padding[4];
77 };
78 
79 #define VFPF_ACQUIRE_OS_LINUX (0)
80 #define VFPF_ACQUIRE_OS_WINDOWS (1)
81 #define VFPF_ACQUIRE_OS_ESX (2)
82 #define VFPF_ACQUIRE_OS_SOLARIS (3)
83 #define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
84 
85 struct vfpf_acquire_tlv {
86 	struct vfpf_first_tlv first_tlv;
87 
88 	struct vf_pf_vfdev_info {
89 #define VFPF_ACQUIRE_CAP_PRE_FP_HSI     (1 << 0) /* VF pre-FP hsi version */
90 #define VFPF_ACQUIRE_CAP_100G		(1 << 1) /* VF can support 100g */
91 		u64 capabilities;
92 		u8 fw_major;
93 		u8 fw_minor;
94 		u8 fw_revision;
95 		u8 fw_engineering;
96 		u32 driver_version;
97 		u16 opaque_fid;	/* ME register value */
98 		u8 os_type;	/* VFPF_ACQUIRE_OS_* value */
99 		u8 eth_fp_hsi_major;
100 		u8 eth_fp_hsi_minor;
101 		u8 padding[3];
102 	} vfdev_info;
103 
104 	struct vf_pf_resc_request resc_request;
105 
106 	u64 bulletin_addr;
107 	u32 bulletin_size;
108 	u32 padding;
109 };
110 
111 /* receive side scaling tlv */
112 struct vfpf_vport_update_rss_tlv {
113 	struct channel_tlv tl;
114 
115 	u8 update_rss_flags;
116 #define VFPF_UPDATE_RSS_CONFIG_FLAG       BIT(0)
117 #define VFPF_UPDATE_RSS_CAPS_FLAG         BIT(1)
118 #define VFPF_UPDATE_RSS_IND_TABLE_FLAG    BIT(2)
119 #define VFPF_UPDATE_RSS_KEY_FLAG          BIT(3)
120 
121 	u8 rss_enable;
122 	u8 rss_caps;
123 	u8 rss_table_size_log;	/* The table size is 2 ^ rss_table_size_log */
124 	u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
125 	u32 rss_key[T_ETH_RSS_KEY_SIZE];
126 };
127 
128 struct pfvf_storm_stats {
129 	u32 address;
130 	u32 len;
131 };
132 
133 struct pfvf_stats_info {
134 	struct pfvf_storm_stats mstats;
135 	struct pfvf_storm_stats pstats;
136 	struct pfvf_storm_stats tstats;
137 	struct pfvf_storm_stats ustats;
138 };
139 
140 struct pfvf_acquire_resp_tlv {
141 	struct pfvf_tlv hdr;
142 
143 	struct pf_vf_pfdev_info {
144 		u32 chip_num;
145 		u32 mfw_ver;
146 
147 		u16 fw_major;
148 		u16 fw_minor;
149 		u16 fw_rev;
150 		u16 fw_eng;
151 
152 		u64 capabilities;
153 #define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED	BIT(0)
154 #define PFVF_ACQUIRE_CAP_100G			BIT(1)	/* If set, 100g PF */
155 /* There are old PF versions where the PF might mistakenly override the sanity
156  * mechanism [version-based] and allow a VF that can't be supported to pass
157  * the acquisition phase.
158  * To overcome this, PFs now indicate that they're past that point and the new
159  * VFs would fail probe on the older PFs that fail to do so.
160  */
161 #define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE	BIT(2)
162 
163 		u16 db_size;
164 		u8 indices_per_sb;
165 		u8 os_type;
166 
167 		/* These should match the PF's qed_dev values */
168 		u16 chip_rev;
169 		u8 dev_type;
170 
171 		u8 padding;
172 
173 		struct pfvf_stats_info stats_info;
174 
175 		u8 port_mac[ETH_ALEN];
176 
177 		/* It's possible PF had to configure an older fastpath HSI
178 		 * [in case VF is newer than PF]. This is communicated back
179 		 * to the VF. It can also be used in case of error due to
180 		 * non-matching versions to shed light in VF about failure.
181 		 */
182 		u8 major_fp_hsi;
183 		u8 minor_fp_hsi;
184 	} pfdev_info;
185 
186 	struct pf_vf_resc {
187 #define PFVF_MAX_QUEUES_PER_VF		16
188 #define PFVF_MAX_SBS_PER_VF		16
189 		struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
190 		u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
191 		u8 cid[PFVF_MAX_QUEUES_PER_VF];
192 
193 		u8 num_rxqs;
194 		u8 num_txqs;
195 		u8 num_sbs;
196 		u8 num_mac_filters;
197 		u8 num_vlan_filters;
198 		u8 num_mc_filters;
199 		u8 padding[2];
200 	} resc;
201 
202 	u32 bulletin_size;
203 	u32 padding;
204 };
205 
206 struct pfvf_start_queue_resp_tlv {
207 	struct pfvf_tlv hdr;
208 	u32 offset;		/* offset to consumer/producer of queue */
209 	u8 padding[4];
210 };
211 
212 /* Setup Queue */
213 struct vfpf_start_rxq_tlv {
214 	struct vfpf_first_tlv first_tlv;
215 
216 	/* physical addresses */
217 	u64 rxq_addr;
218 	u64 deprecated_sge_addr;
219 	u64 cqe_pbl_addr;
220 
221 	u16 cqe_pbl_size;
222 	u16 hw_sb;
223 	u16 rx_qid;
224 	u16 hc_rate;		/* desired interrupts per sec. */
225 
226 	u16 bd_max_bytes;
227 	u16 stat_id;
228 	u8 sb_index;
229 	u8 padding[3];
230 };
231 
232 struct vfpf_start_txq_tlv {
233 	struct vfpf_first_tlv first_tlv;
234 
235 	/* physical addresses */
236 	u64 pbl_addr;
237 	u16 pbl_size;
238 	u16 stat_id;
239 	u16 tx_qid;
240 	u16 hw_sb;
241 
242 	u32 flags;		/* VFPF_QUEUE_FLG_X flags */
243 	u16 hc_rate;		/* desired interrupts per sec. */
244 	u8 sb_index;
245 	u8 padding[3];
246 };
247 
248 /* Stop RX Queue */
249 struct vfpf_stop_rxqs_tlv {
250 	struct vfpf_first_tlv first_tlv;
251 
252 	u16 rx_qid;
253 	u8 num_rxqs;
254 	u8 cqe_completion;
255 	u8 padding[4];
256 };
257 
258 /* Stop TX Queues */
259 struct vfpf_stop_txqs_tlv {
260 	struct vfpf_first_tlv first_tlv;
261 
262 	u16 tx_qid;
263 	u8 num_txqs;
264 	u8 padding[5];
265 };
266 
267 struct vfpf_update_rxq_tlv {
268 	struct vfpf_first_tlv first_tlv;
269 
270 	u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
271 
272 	u16 rx_qid;
273 	u8 num_rxqs;
274 	u8 flags;
275 #define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG    BIT(0)
276 #define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG          BIT(1)
277 #define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG        BIT(2)
278 
279 	u8 padding[4];
280 };
281 
282 /* Set Queue Filters */
283 struct vfpf_q_mac_vlan_filter {
284 	u32 flags;
285 #define VFPF_Q_FILTER_DEST_MAC_VALID    0x01
286 #define VFPF_Q_FILTER_VLAN_TAG_VALID    0x02
287 #define VFPF_Q_FILTER_SET_MAC           0x100	/* set/clear */
288 
289 	u8 mac[ETH_ALEN];
290 	u16 vlan_tag;
291 
292 	u8 padding[4];
293 };
294 
295 /* Start a vport */
296 struct vfpf_vport_start_tlv {
297 	struct vfpf_first_tlv first_tlv;
298 
299 	u64 sb_addr[PFVF_MAX_SBS_PER_VF];
300 
301 	u32 tpa_mode;
302 	u16 dep1;
303 	u16 mtu;
304 
305 	u8 vport_id;
306 	u8 inner_vlan_removal;
307 
308 	u8 only_untagged;
309 	u8 max_buffers_per_cqe;
310 
311 	u8 padding[4];
312 };
313 
314 /* Extended tlvs - need to add rss, mcast, accept mode tlvs */
315 struct vfpf_vport_update_activate_tlv {
316 	struct channel_tlv tl;
317 	u8 update_rx;
318 	u8 update_tx;
319 	u8 active_rx;
320 	u8 active_tx;
321 };
322 
323 struct vfpf_vport_update_tx_switch_tlv {
324 	struct channel_tlv tl;
325 	u8 tx_switching;
326 	u8 padding[3];
327 };
328 
329 struct vfpf_vport_update_vlan_strip_tlv {
330 	struct channel_tlv tl;
331 	u8 remove_vlan;
332 	u8 padding[3];
333 };
334 
335 struct vfpf_vport_update_mcast_bin_tlv {
336 	struct channel_tlv tl;
337 	u8 padding[4];
338 
339 	u64 bins[8];
340 };
341 
342 struct vfpf_vport_update_accept_param_tlv {
343 	struct channel_tlv tl;
344 	u8 update_rx_mode;
345 	u8 update_tx_mode;
346 	u8 rx_accept_filter;
347 	u8 tx_accept_filter;
348 };
349 
350 struct vfpf_vport_update_accept_any_vlan_tlv {
351 	struct channel_tlv tl;
352 	u8 update_accept_any_vlan_flg;
353 	u8 accept_any_vlan;
354 
355 	u8 padding[2];
356 };
357 
358 struct vfpf_vport_update_sge_tpa_tlv {
359 	struct channel_tlv tl;
360 
361 	u16 sge_tpa_flags;
362 #define VFPF_TPA_IPV4_EN_FLAG		BIT(0)
363 #define VFPF_TPA_IPV6_EN_FLAG		BIT(1)
364 #define VFPF_TPA_PKT_SPLIT_FLAG		BIT(2)
365 #define VFPF_TPA_HDR_DATA_SPLIT_FLAG	BIT(3)
366 #define VFPF_TPA_GRO_CONSIST_FLAG	BIT(4)
367 
368 	u8 update_sge_tpa_flags;
369 #define VFPF_UPDATE_SGE_DEPRECATED_FLAG	BIT(0)
370 #define VFPF_UPDATE_TPA_EN_FLAG		BIT(1)
371 #define VFPF_UPDATE_TPA_PARAM_FLAG	BIT(2)
372 
373 	u8 max_buffers_per_cqe;
374 
375 	u16 deprecated_sge_buff_size;
376 	u16 tpa_max_size;
377 	u16 tpa_min_size_to_start;
378 	u16 tpa_min_size_to_cont;
379 
380 	u8 tpa_max_aggs_num;
381 	u8 padding[7];
382 };
383 
384 /* Primary tlv as a header for various extended tlvs for
385  * various functionalities in vport update ramrod.
386  */
387 struct vfpf_vport_update_tlv {
388 	struct vfpf_first_tlv first_tlv;
389 };
390 
391 struct vfpf_ucast_filter_tlv {
392 	struct vfpf_first_tlv first_tlv;
393 
394 	u8 opcode;
395 	u8 type;
396 
397 	u8 mac[ETH_ALEN];
398 
399 	u16 vlan;
400 	u16 padding[3];
401 };
402 
403 struct tlv_buffer_size {
404 	u8 tlv_buffer[TLV_BUFFER_SIZE];
405 };
406 
407 union vfpf_tlvs {
408 	struct vfpf_first_tlv first_tlv;
409 	struct vfpf_acquire_tlv acquire;
410 	struct vfpf_start_rxq_tlv start_rxq;
411 	struct vfpf_start_txq_tlv start_txq;
412 	struct vfpf_stop_rxqs_tlv stop_rxqs;
413 	struct vfpf_stop_txqs_tlv stop_txqs;
414 	struct vfpf_update_rxq_tlv update_rxq;
415 	struct vfpf_vport_start_tlv start_vport;
416 	struct vfpf_vport_update_tlv vport_update;
417 	struct vfpf_ucast_filter_tlv ucast_filter;
418 	struct channel_list_end_tlv list_end;
419 	struct tlv_buffer_size tlv_buf_size;
420 };
421 
422 union pfvf_tlvs {
423 	struct pfvf_def_resp_tlv default_resp;
424 	struct pfvf_acquire_resp_tlv acquire_resp;
425 	struct tlv_buffer_size tlv_buf_size;
426 	struct pfvf_start_queue_resp_tlv queue_start;
427 };
428 
429 enum qed_bulletin_bit {
430 	/* Alert the VF that a forced MAC was set by the PF */
431 	MAC_ADDR_FORCED = 0,
432 	/* Alert the VF that a forced VLAN was set by the PF */
433 	VLAN_ADDR_FORCED = 2,
434 
435 	/* Indicate that `default_only_untagged' contains actual data */
436 	VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
437 	VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
438 
439 	/* Alert the VF that suggested mac was sent by the PF.
440 	 * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set.
441 	 */
442 	VFPF_BULLETIN_MAC_ADDR = 5
443 };
444 
445 struct qed_bulletin_content {
446 	/* crc of structure to ensure is not in mid-update */
447 	u32 crc;
448 
449 	u32 version;
450 
451 	/* bitmap indicating which fields hold valid values */
452 	u64 valid_bitmap;
453 
454 	/* used for MAC_ADDR or MAC_ADDR_FORCED */
455 	u8 mac[ETH_ALEN];
456 
457 	/* If valid, 1 => only untagged Rx if no vlan is configured */
458 	u8 default_only_untagged;
459 	u8 padding;
460 
461 	/* The following is a 'copy' of qed_mcp_link_state,
462 	 * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's
463 	 * possible the structs will increase further along the road we cannot
464 	 * have it here; Instead we need to have all of its fields.
465 	 */
466 	u8 req_autoneg;
467 	u8 req_autoneg_pause;
468 	u8 req_forced_rx;
469 	u8 req_forced_tx;
470 	u8 padding2[4];
471 
472 	u32 req_adv_speed;
473 	u32 req_forced_speed;
474 	u32 req_loopback;
475 	u32 padding3;
476 
477 	u8 link_up;
478 	u8 full_duplex;
479 	u8 autoneg;
480 	u8 autoneg_complete;
481 	u8 parallel_detection;
482 	u8 pfc_enabled;
483 	u8 partner_tx_flow_ctrl_en;
484 	u8 partner_rx_flow_ctrl_en;
485 	u8 partner_adv_pause;
486 	u8 sfp_tx_fault;
487 	u8 padding4[6];
488 
489 	u32 speed;
490 	u32 partner_adv_speed;
491 
492 	u32 capability_speed;
493 
494 	/* Forced vlan */
495 	u16 pvid;
496 	u16 padding5;
497 };
498 
499 struct qed_bulletin {
500 	dma_addr_t phys;
501 	struct qed_bulletin_content *p_virt;
502 	u32 size;
503 };
504 
505 enum {
506 	CHANNEL_TLV_NONE,	/* ends tlv sequence */
507 	CHANNEL_TLV_ACQUIRE,
508 	CHANNEL_TLV_VPORT_START,
509 	CHANNEL_TLV_VPORT_UPDATE,
510 	CHANNEL_TLV_VPORT_TEARDOWN,
511 	CHANNEL_TLV_START_RXQ,
512 	CHANNEL_TLV_START_TXQ,
513 	CHANNEL_TLV_STOP_RXQS,
514 	CHANNEL_TLV_STOP_TXQS,
515 	CHANNEL_TLV_UPDATE_RXQ,
516 	CHANNEL_TLV_INT_CLEANUP,
517 	CHANNEL_TLV_CLOSE,
518 	CHANNEL_TLV_RELEASE,
519 	CHANNEL_TLV_LIST_END,
520 	CHANNEL_TLV_UCAST_FILTER,
521 	CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
522 	CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
523 	CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
524 	CHANNEL_TLV_VPORT_UPDATE_MCAST,
525 	CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
526 	CHANNEL_TLV_VPORT_UPDATE_RSS,
527 	CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
528 	CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
529 	CHANNEL_TLV_MAX,
530 
531 	/* Required for iterating over vport-update tlvs.
532 	 * Will break in case non-sequential vport-update tlvs.
533 	 */
534 	CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
535 };
536 
537 /* This data is held in the qed_hwfn structure for VFs only. */
538 struct qed_vf_iov {
539 	union vfpf_tlvs *vf2pf_request;
540 	dma_addr_t vf2pf_request_phys;
541 	union pfvf_tlvs *pf2vf_reply;
542 	dma_addr_t pf2vf_reply_phys;
543 
544 	/* Should be taken whenever the mailbox buffers are accessed */
545 	struct mutex mutex;
546 	u8 *offset;
547 
548 	/* Bulletin Board */
549 	struct qed_bulletin bulletin;
550 	struct qed_bulletin_content bulletin_shadow;
551 
552 	/* we set aside a copy of the acquire response */
553 	struct pfvf_acquire_resp_tlv acquire_resp;
554 };
555 
556 #ifdef CONFIG_QED_SRIOV
557 /**
558  * @brief Read the VF bulletin and act on it if needed
559  *
560  * @param p_hwfn
561  * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
562  *
563  * @return enum _qed_status
564  */
565 int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
566 
567 /**
568  * @brief Get link paramters for VF from qed
569  *
570  * @param p_hwfn
571  * @param params - the link params structure to be filled for the VF
572  */
573 void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
574 			    struct qed_mcp_link_params *params);
575 
576 /**
577  * @brief Get link state for VF from qed
578  *
579  * @param p_hwfn
580  * @param link - the link state structure to be filled for the VF
581  */
582 void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
583 			   struct qed_mcp_link_state *link);
584 
585 /**
586  * @brief Get link capabilities for VF from qed
587  *
588  * @param p_hwfn
589  * @param p_link_caps - the link capabilities structure to be filled for the VF
590  */
591 void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
592 			  struct qed_mcp_link_capabilities *p_link_caps);
593 
594 /**
595  * @brief Get number of Rx queues allocated for VF by qed
596  *
597  *  @param p_hwfn
598  *  @param num_rxqs - allocated RX queues
599  */
600 void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
601 
602 /**
603  * @brief Get port mac address for VF
604  *
605  * @param p_hwfn
606  * @param port_mac - destination location for port mac
607  */
608 void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
609 
610 /**
611  * @brief Get number of VLAN filters allocated for VF by qed
612  *
613  *  @param p_hwfn
614  *  @param num_rxqs - allocated VLAN filters
615  */
616 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
617 				 u8 *num_vlan_filters);
618 
619 /**
620  * @brief Check if VF can set a MAC address
621  *
622  * @param p_hwfn
623  * @param mac
624  *
625  * @return bool
626  */
627 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
628 
629 /**
630  * @brief Set firmware version information in dev_info from VFs acquire response tlv
631  *
632  * @param p_hwfn
633  * @param fw_major
634  * @param fw_minor
635  * @param fw_rev
636  * @param fw_eng
637  */
638 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
639 			   u16 *fw_major, u16 *fw_minor,
640 			   u16 *fw_rev, u16 *fw_eng);
641 
642 /**
643  * @brief hw preparation for VF
644  *      sends ACQUIRE message
645  *
646  * @param p_hwfn
647  *
648  * @return int
649  */
650 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
651 
652 /**
653  * @brief VF - start the RX Queue by sending a message to the PF
654  * @param p_hwfn
655  * @param cid                   - zero based within the VF
656  * @param rx_queue_id           - zero based within the VF
657  * @param sb                    - VF status block for this queue
658  * @param sb_index              - Index within the status block
659  * @param bd_max_bytes          - maximum number of bytes per bd
660  * @param bd_chain_phys_addr    - physical address of bd chain
661  * @param cqe_pbl_addr          - physical address of pbl
662  * @param cqe_pbl_size          - pbl size
663  * @param pp_prod               - pointer to the producer to be
664  *				  used in fastpath
665  *
666  * @return int
667  */
668 int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
669 			u8 rx_queue_id,
670 			u16 sb,
671 			u8 sb_index,
672 			u16 bd_max_bytes,
673 			dma_addr_t bd_chain_phys_addr,
674 			dma_addr_t cqe_pbl_addr,
675 			u16 cqe_pbl_size, void __iomem **pp_prod);
676 
677 /**
678  * @brief VF - start the TX queue by sending a message to the
679  *        PF.
680  *
681  * @param p_hwfn
682  * @param tx_queue_id           - zero based within the VF
683  * @param sb                    - status block for this queue
684  * @param sb_index              - index within the status block
685  * @param bd_chain_phys_addr    - physical address of tx chain
686  * @param pp_doorbell           - pointer to address to which to
687  *                      write the doorbell too..
688  *
689  * @return int
690  */
691 int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
692 			u16 tx_queue_id,
693 			u16 sb,
694 			u8 sb_index,
695 			dma_addr_t pbl_addr,
696 			u16 pbl_size, void __iomem **pp_doorbell);
697 
698 /**
699  * @brief VF - stop the RX queue by sending a message to the PF
700  *
701  * @param p_hwfn
702  * @param rx_qid
703  * @param cqe_completion
704  *
705  * @return int
706  */
707 int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
708 		       u16 rx_qid, bool cqe_completion);
709 
710 /**
711  * @brief VF - stop the TX queue by sending a message to the PF
712  *
713  * @param p_hwfn
714  * @param tx_qid
715  *
716  * @return int
717  */
718 int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid);
719 
720 /**
721  * @brief VF - send a vport update command
722  *
723  * @param p_hwfn
724  * @param params
725  *
726  * @return int
727  */
728 int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
729 			   struct qed_sp_vport_update_params *p_params);
730 
731 /**
732  *
733  * @brief VF - send a close message to PF
734  *
735  * @param p_hwfn
736  *
737  * @return enum _qed_status
738  */
739 int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
740 
741 /**
742  * @brief VF - free vf`s memories
743  *
744  * @param p_hwfn
745  *
746  * @return enum _qed_status
747  */
748 int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
749 
750 /**
751  * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
752  *        sb_id. For VFs igu sbs don't have to be contiguous
753  *
754  * @param p_hwfn
755  * @param sb_id
756  *
757  * @return INLINE u16
758  */
759 u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
760 
761 /**
762  * @brief qed_vf_pf_vport_start - perform vport start for VF.
763  *
764  * @param p_hwfn
765  * @param vport_id
766  * @param mtu
767  * @param inner_vlan_removal
768  * @param tpa_mode
769  * @param max_buffers_per_cqe,
770  * @param only_untagged - default behavior regarding vlan acceptance
771  *
772  * @return enum _qed_status
773  */
774 int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
775 			  u8 vport_id,
776 			  u16 mtu,
777 			  u8 inner_vlan_removal,
778 			  enum qed_tpa_mode tpa_mode,
779 			  u8 max_buffers_per_cqe, u8 only_untagged);
780 
781 /**
782  * @brief qed_vf_pf_vport_stop - stop the VF's vport
783  *
784  * @param p_hwfn
785  *
786  * @return enum _qed_status
787  */
788 int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
789 
790 int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
791 			   struct qed_filter_ucast *p_param);
792 
793 void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
794 			    struct qed_filter_mcast *p_filter_cmd);
795 
796 /**
797  * @brief qed_vf_pf_int_cleanup - clean the SB of the VF
798  *
799  * @param p_hwfn
800  *
801  * @return enum _qed_status
802  */
803 int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
804 
805 /**
806  * @brief - return the link params in a given bulletin board
807  *
808  * @param p_hwfn
809  * @param p_params - pointer to a struct to fill with link params
810  * @param p_bulletin
811  */
812 void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
813 			      struct qed_mcp_link_params *p_params,
814 			      struct qed_bulletin_content *p_bulletin);
815 
816 /**
817  * @brief - return the link state in a given bulletin board
818  *
819  * @param p_hwfn
820  * @param p_link - pointer to a struct to fill with link state
821  * @param p_bulletin
822  */
823 void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
824 			     struct qed_mcp_link_state *p_link,
825 			     struct qed_bulletin_content *p_bulletin);
826 
827 /**
828  * @brief - return the link capabilities in a given bulletin board
829  *
830  * @param p_hwfn
831  * @param p_link - pointer to a struct to fill with link capabilities
832  * @param p_bulletin
833  */
834 void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
835 			    struct qed_mcp_link_capabilities *p_link_caps,
836 			    struct qed_bulletin_content *p_bulletin);
837 
838 void qed_iov_vf_task(struct work_struct *work);
839 #else
840 static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
841 					  struct qed_mcp_link_params *params)
842 {
843 }
844 
845 static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
846 					 struct qed_mcp_link_state *link)
847 {
848 }
849 
850 static inline void
851 qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
852 		     struct qed_mcp_link_capabilities *p_link_caps)
853 {
854 }
855 
856 static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
857 {
858 }
859 
860 static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
861 {
862 }
863 
864 static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
865 					       u8 *num_vlan_filters)
866 {
867 }
868 
869 static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
870 {
871 	return false;
872 }
873 
874 static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
875 					 u16 *fw_major, u16 *fw_minor,
876 					 u16 *fw_rev, u16 *fw_eng)
877 {
878 }
879 
880 static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
881 {
882 	return -EINVAL;
883 }
884 
885 static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
886 				      u8 rx_queue_id,
887 				      u16 sb,
888 				      u8 sb_index,
889 				      u16 bd_max_bytes,
890 				      dma_addr_t bd_chain_phys_adr,
891 				      dma_addr_t cqe_pbl_addr,
892 				      u16 cqe_pbl_size, void __iomem **pp_prod)
893 {
894 	return -EINVAL;
895 }
896 
897 static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
898 				      u16 tx_queue_id,
899 				      u16 sb,
900 				      u8 sb_index,
901 				      dma_addr_t pbl_addr,
902 				      u16 pbl_size, void __iomem **pp_doorbell)
903 {
904 	return -EINVAL;
905 }
906 
907 static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
908 				     u16 rx_qid, bool cqe_completion)
909 {
910 	return -EINVAL;
911 }
912 
913 static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
914 {
915 	return -EINVAL;
916 }
917 
918 static inline int
919 qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
920 		       struct qed_sp_vport_update_params *p_params)
921 {
922 	return -EINVAL;
923 }
924 
925 static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
926 {
927 	return -EINVAL;
928 }
929 
930 static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
931 {
932 	return -EINVAL;
933 }
934 
935 static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
936 {
937 	return 0;
938 }
939 
940 static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
941 					u8 vport_id,
942 					u16 mtu,
943 					u8 inner_vlan_removal,
944 					enum qed_tpa_mode tpa_mode,
945 					u8 max_buffers_per_cqe,
946 					u8 only_untagged)
947 {
948 	return -EINVAL;
949 }
950 
951 static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
952 {
953 	return -EINVAL;
954 }
955 
956 static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
957 					 struct qed_filter_ucast *p_param)
958 {
959 	return -EINVAL;
960 }
961 
962 static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
963 					  struct qed_filter_mcast *p_filter_cmd)
964 {
965 }
966 
967 static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
968 {
969 	return -EINVAL;
970 }
971 
972 static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
973 					    struct qed_mcp_link_params
974 					    *p_params,
975 					    struct qed_bulletin_content
976 					    *p_bulletin)
977 {
978 }
979 
980 static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
981 					   struct qed_mcp_link_state *p_link,
982 					   struct qed_bulletin_content
983 					   *p_bulletin)
984 {
985 }
986 
987 static inline void
988 __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
989 		       struct qed_mcp_link_capabilities *p_link_caps,
990 		       struct qed_bulletin_content *p_bulletin)
991 {
992 }
993 
994 static inline void qed_iov_vf_task(struct work_struct *work)
995 {
996 }
997 #endif
998 
999 #endif
1000