1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  */
5 
6 #ifndef _QED_VF_H
7 #define _QED_VF_H
8 
9 #include "qed_l2.h"
10 #include "qed_mcp.h"
11 
12 #define T_ETH_INDIRECTION_TABLE_SIZE 128
13 #define T_ETH_RSS_KEY_SIZE 10
14 
15 struct vf_pf_resc_request {
16 	u8 num_rxqs;
17 	u8 num_txqs;
18 	u8 num_sbs;
19 	u8 num_mac_filters;
20 	u8 num_vlan_filters;
21 	u8 num_mc_filters;
22 	u8 num_cids;
23 	u8 padding;
24 };
25 
26 struct hw_sb_info {
27 	u16 hw_sb_id;
28 	u8 sb_qid;
29 	u8 padding[5];
30 };
31 
32 #define TLV_BUFFER_SIZE                 1024
33 
34 enum {
35 	PFVF_STATUS_WAITING,
36 	PFVF_STATUS_SUCCESS,
37 	PFVF_STATUS_FAILURE,
38 	PFVF_STATUS_NOT_SUPPORTED,
39 	PFVF_STATUS_NO_RESOURCE,
40 	PFVF_STATUS_FORCED,
41 	PFVF_STATUS_MALICIOUS,
42 };
43 
44 /* vf pf channel tlvs */
45 /* general tlv header (used for both vf->pf request and pf->vf response) */
46 struct channel_tlv {
47 	u16 type;
48 	u16 length;
49 };
50 
51 /* header of first vf->pf tlv carries the offset used to calculate response
52  * buffer address
53  */
54 struct vfpf_first_tlv {
55 	struct channel_tlv tl;
56 	u32 padding;
57 	u64 reply_address;
58 };
59 
60 /* header of pf->vf tlvs, carries the status of handling the request */
61 struct pfvf_tlv {
62 	struct channel_tlv tl;
63 	u8 status;
64 	u8 padding[3];
65 };
66 
67 /* response tlv used for most tlvs */
68 struct pfvf_def_resp_tlv {
69 	struct pfvf_tlv hdr;
70 };
71 
72 /* used to terminate and pad a tlv list */
73 struct channel_list_end_tlv {
74 	struct channel_tlv tl;
75 	u8 padding[4];
76 };
77 
78 #define VFPF_ACQUIRE_OS_LINUX (0)
79 #define VFPF_ACQUIRE_OS_WINDOWS (1)
80 #define VFPF_ACQUIRE_OS_ESX (2)
81 #define VFPF_ACQUIRE_OS_SOLARIS (3)
82 #define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
83 
84 struct vfpf_acquire_tlv {
85 	struct vfpf_first_tlv first_tlv;
86 
87 	struct vf_pf_vfdev_info {
88 #define VFPF_ACQUIRE_CAP_PRE_FP_HSI     BIT(0) /* VF pre-FP hsi version */
89 #define VFPF_ACQUIRE_CAP_100G		BIT(1) /* VF can support 100g */
90 	/* A requirement for supporting multi-Tx queues on a single queue-zone,
91 	 * VF would pass qids as additional information whenever passing queue
92 	 * references.
93 	 */
94 #define VFPF_ACQUIRE_CAP_QUEUE_QIDS     BIT(2)
95 
96 	/* The VF is using the physical bar. While this is mostly internal
97 	 * to the VF, might affect the number of CIDs supported assuming
98 	 * QUEUE_QIDS is set.
99 	 */
100 #define VFPF_ACQUIRE_CAP_PHYSICAL_BAR   BIT(3)
101 		u64 capabilities;
102 		u8 fw_major;
103 		u8 fw_minor;
104 		u8 fw_revision;
105 		u8 fw_engineering;
106 		u32 driver_version;
107 		u16 opaque_fid;	/* ME register value */
108 		u8 os_type;	/* VFPF_ACQUIRE_OS_* value */
109 		u8 eth_fp_hsi_major;
110 		u8 eth_fp_hsi_minor;
111 		u8 padding[3];
112 	} vfdev_info;
113 
114 	struct vf_pf_resc_request resc_request;
115 
116 	u64 bulletin_addr;
117 	u32 bulletin_size;
118 	u32 padding;
119 };
120 
121 /* receive side scaling tlv */
122 struct vfpf_vport_update_rss_tlv {
123 	struct channel_tlv tl;
124 
125 	u8 update_rss_flags;
126 #define VFPF_UPDATE_RSS_CONFIG_FLAG       BIT(0)
127 #define VFPF_UPDATE_RSS_CAPS_FLAG         BIT(1)
128 #define VFPF_UPDATE_RSS_IND_TABLE_FLAG    BIT(2)
129 #define VFPF_UPDATE_RSS_KEY_FLAG          BIT(3)
130 
131 	u8 rss_enable;
132 	u8 rss_caps;
133 	u8 rss_table_size_log;	/* The table size is 2 ^ rss_table_size_log */
134 	u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
135 	u32 rss_key[T_ETH_RSS_KEY_SIZE];
136 };
137 
138 struct pfvf_storm_stats {
139 	u32 address;
140 	u32 len;
141 };
142 
143 struct pfvf_stats_info {
144 	struct pfvf_storm_stats mstats;
145 	struct pfvf_storm_stats pstats;
146 	struct pfvf_storm_stats tstats;
147 	struct pfvf_storm_stats ustats;
148 };
149 
150 struct pfvf_acquire_resp_tlv {
151 	struct pfvf_tlv hdr;
152 
153 	struct pf_vf_pfdev_info {
154 		u32 chip_num;
155 		u32 mfw_ver;
156 
157 		u16 fw_major;
158 		u16 fw_minor;
159 		u16 fw_rev;
160 		u16 fw_eng;
161 
162 		u64 capabilities;
163 #define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED	BIT(0)
164 #define PFVF_ACQUIRE_CAP_100G			BIT(1)	/* If set, 100g PF */
165 /* There are old PF versions where the PF might mistakenly override the sanity
166  * mechanism [version-based] and allow a VF that can't be supported to pass
167  * the acquisition phase.
168  * To overcome this, PFs now indicate that they're past that point and the new
169  * VFs would fail probe on the older PFs that fail to do so.
170  */
171 #define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE	BIT(2)
172 
173 	/* PF expects queues to be received with additional qids */
174 #define PFVF_ACQUIRE_CAP_QUEUE_QIDS             BIT(3)
175 
176 		u16 db_size;
177 		u8 indices_per_sb;
178 		u8 os_type;
179 
180 		/* These should match the PF's qed_dev values */
181 		u16 chip_rev;
182 		u8 dev_type;
183 
184 		/* Doorbell bar size configured in HW: log(size) or 0 */
185 		u8 bar_size;
186 
187 		struct pfvf_stats_info stats_info;
188 
189 		u8 port_mac[ETH_ALEN];
190 
191 		/* It's possible PF had to configure an older fastpath HSI
192 		 * [in case VF is newer than PF]. This is communicated back
193 		 * to the VF. It can also be used in case of error due to
194 		 * non-matching versions to shed light in VF about failure.
195 		 */
196 		u8 major_fp_hsi;
197 		u8 minor_fp_hsi;
198 	} pfdev_info;
199 
200 	struct pf_vf_resc {
201 #define PFVF_MAX_QUEUES_PER_VF		16
202 #define PFVF_MAX_SBS_PER_VF		16
203 		struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
204 		u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
205 		u8 cid[PFVF_MAX_QUEUES_PER_VF];
206 
207 		u8 num_rxqs;
208 		u8 num_txqs;
209 		u8 num_sbs;
210 		u8 num_mac_filters;
211 		u8 num_vlan_filters;
212 		u8 num_mc_filters;
213 		u8 num_cids;
214 		u8 padding;
215 	} resc;
216 
217 	u32 bulletin_size;
218 	u32 padding;
219 };
220 
221 struct pfvf_start_queue_resp_tlv {
222 	struct pfvf_tlv hdr;
223 	u32 offset;		/* offset to consumer/producer of queue */
224 	u8 padding[4];
225 };
226 
227 /* Extended queue information - additional index for reference inside qzone.
228  * If communicated between VF/PF, each TLV relating to queues should be
229  * extended by one such [or have a future base TLV that already contains info].
230  */
231 struct vfpf_qid_tlv {
232 	struct channel_tlv tl;
233 	u8 qid;
234 	u8 padding[3];
235 };
236 
237 /* Setup Queue */
238 struct vfpf_start_rxq_tlv {
239 	struct vfpf_first_tlv first_tlv;
240 
241 	/* physical addresses */
242 	u64 rxq_addr;
243 	u64 deprecated_sge_addr;
244 	u64 cqe_pbl_addr;
245 
246 	u16 cqe_pbl_size;
247 	u16 hw_sb;
248 	u16 rx_qid;
249 	u16 hc_rate;		/* desired interrupts per sec. */
250 
251 	u16 bd_max_bytes;
252 	u16 stat_id;
253 	u8 sb_index;
254 	u8 padding[3];
255 };
256 
257 struct vfpf_start_txq_tlv {
258 	struct vfpf_first_tlv first_tlv;
259 
260 	/* physical addresses */
261 	u64 pbl_addr;
262 	u16 pbl_size;
263 	u16 stat_id;
264 	u16 tx_qid;
265 	u16 hw_sb;
266 
267 	u32 flags;		/* VFPF_QUEUE_FLG_X flags */
268 	u16 hc_rate;		/* desired interrupts per sec. */
269 	u8 sb_index;
270 	u8 padding[3];
271 };
272 
273 /* Stop RX Queue */
274 struct vfpf_stop_rxqs_tlv {
275 	struct vfpf_first_tlv first_tlv;
276 
277 	u16 rx_qid;
278 
279 	/* this field is deprecated and should *always* be set to '1' */
280 	u8 num_rxqs;
281 	u8 cqe_completion;
282 	u8 padding[4];
283 };
284 
285 /* Stop TX Queues */
286 struct vfpf_stop_txqs_tlv {
287 	struct vfpf_first_tlv first_tlv;
288 
289 	u16 tx_qid;
290 
291 	/* this field is deprecated and should *always* be set to '1' */
292 	u8 num_txqs;
293 	u8 padding[5];
294 };
295 
296 struct vfpf_update_rxq_tlv {
297 	struct vfpf_first_tlv first_tlv;
298 
299 	u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
300 
301 	u16 rx_qid;
302 	u8 num_rxqs;
303 	u8 flags;
304 #define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG    BIT(0)
305 #define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG          BIT(1)
306 #define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG        BIT(2)
307 
308 	u8 padding[4];
309 };
310 
311 /* Set Queue Filters */
312 struct vfpf_q_mac_vlan_filter {
313 	u32 flags;
314 #define VFPF_Q_FILTER_DEST_MAC_VALID    0x01
315 #define VFPF_Q_FILTER_VLAN_TAG_VALID    0x02
316 #define VFPF_Q_FILTER_SET_MAC           0x100	/* set/clear */
317 
318 	u8 mac[ETH_ALEN];
319 	u16 vlan_tag;
320 
321 	u8 padding[4];
322 };
323 
324 /* Start a vport */
325 struct vfpf_vport_start_tlv {
326 	struct vfpf_first_tlv first_tlv;
327 
328 	u64 sb_addr[PFVF_MAX_SBS_PER_VF];
329 
330 	u32 tpa_mode;
331 	u16 dep1;
332 	u16 mtu;
333 
334 	u8 vport_id;
335 	u8 inner_vlan_removal;
336 
337 	u8 only_untagged;
338 	u8 max_buffers_per_cqe;
339 
340 	u8 padding[4];
341 };
342 
343 /* Extended tlvs - need to add rss, mcast, accept mode tlvs */
344 struct vfpf_vport_update_activate_tlv {
345 	struct channel_tlv tl;
346 	u8 update_rx;
347 	u8 update_tx;
348 	u8 active_rx;
349 	u8 active_tx;
350 };
351 
352 struct vfpf_vport_update_tx_switch_tlv {
353 	struct channel_tlv tl;
354 	u8 tx_switching;
355 	u8 padding[3];
356 };
357 
358 struct vfpf_vport_update_vlan_strip_tlv {
359 	struct channel_tlv tl;
360 	u8 remove_vlan;
361 	u8 padding[3];
362 };
363 
364 struct vfpf_vport_update_mcast_bin_tlv {
365 	struct channel_tlv tl;
366 	u8 padding[4];
367 
368 	/* There are only 256 approx bins, and in HSI they're divided into
369 	 * 32-bit values. As old VFs used to set-bit to the values on its side,
370 	 * the upper half of the array is never expected to contain any data.
371 	 */
372 	u64 bins[4];
373 	u64 obsolete_bins[4];
374 };
375 
376 struct vfpf_vport_update_accept_param_tlv {
377 	struct channel_tlv tl;
378 	u8 update_rx_mode;
379 	u8 update_tx_mode;
380 	u8 rx_accept_filter;
381 	u8 tx_accept_filter;
382 };
383 
384 struct vfpf_vport_update_accept_any_vlan_tlv {
385 	struct channel_tlv tl;
386 	u8 update_accept_any_vlan_flg;
387 	u8 accept_any_vlan;
388 
389 	u8 padding[2];
390 };
391 
392 struct vfpf_vport_update_sge_tpa_tlv {
393 	struct channel_tlv tl;
394 
395 	u16 sge_tpa_flags;
396 #define VFPF_TPA_IPV4_EN_FLAG		BIT(0)
397 #define VFPF_TPA_IPV6_EN_FLAG		BIT(1)
398 #define VFPF_TPA_PKT_SPLIT_FLAG		BIT(2)
399 #define VFPF_TPA_HDR_DATA_SPLIT_FLAG	BIT(3)
400 #define VFPF_TPA_GRO_CONSIST_FLAG	BIT(4)
401 
402 	u8 update_sge_tpa_flags;
403 #define VFPF_UPDATE_SGE_DEPRECATED_FLAG	BIT(0)
404 #define VFPF_UPDATE_TPA_EN_FLAG		BIT(1)
405 #define VFPF_UPDATE_TPA_PARAM_FLAG	BIT(2)
406 
407 	u8 max_buffers_per_cqe;
408 
409 	u16 deprecated_sge_buff_size;
410 	u16 tpa_max_size;
411 	u16 tpa_min_size_to_start;
412 	u16 tpa_min_size_to_cont;
413 
414 	u8 tpa_max_aggs_num;
415 	u8 padding[7];
416 };
417 
418 /* Primary tlv as a header for various extended tlvs for
419  * various functionalities in vport update ramrod.
420  */
421 struct vfpf_vport_update_tlv {
422 	struct vfpf_first_tlv first_tlv;
423 };
424 
425 struct vfpf_ucast_filter_tlv {
426 	struct vfpf_first_tlv first_tlv;
427 
428 	u8 opcode;
429 	u8 type;
430 
431 	u8 mac[ETH_ALEN];
432 
433 	u16 vlan;
434 	u16 padding[3];
435 };
436 
437 /* tunnel update param tlv */
438 struct vfpf_update_tunn_param_tlv {
439 	struct vfpf_first_tlv first_tlv;
440 
441 	u8 tun_mode_update_mask;
442 	u8 tunn_mode;
443 	u8 update_tun_cls;
444 	u8 vxlan_clss;
445 	u8 l2gre_clss;
446 	u8 ipgre_clss;
447 	u8 l2geneve_clss;
448 	u8 ipgeneve_clss;
449 	u8 update_geneve_port;
450 	u8 update_vxlan_port;
451 	u16 geneve_port;
452 	u16 vxlan_port;
453 	u8 padding[2];
454 };
455 
456 struct pfvf_update_tunn_param_tlv {
457 	struct pfvf_tlv hdr;
458 
459 	u16 tunn_feature_mask;
460 	u8 vxlan_mode;
461 	u8 l2geneve_mode;
462 	u8 ipgeneve_mode;
463 	u8 l2gre_mode;
464 	u8 ipgre_mode;
465 	u8 vxlan_clss;
466 	u8 l2gre_clss;
467 	u8 ipgre_clss;
468 	u8 l2geneve_clss;
469 	u8 ipgeneve_clss;
470 	u16 vxlan_udp_port;
471 	u16 geneve_udp_port;
472 };
473 
474 struct tlv_buffer_size {
475 	u8 tlv_buffer[TLV_BUFFER_SIZE];
476 };
477 
478 struct vfpf_update_coalesce {
479 	struct vfpf_first_tlv first_tlv;
480 	u16 rx_coal;
481 	u16 tx_coal;
482 	u16 qid;
483 	u8 padding[2];
484 };
485 
486 struct vfpf_read_coal_req_tlv {
487 	struct vfpf_first_tlv first_tlv;
488 	u16 qid;
489 	u8 is_rx;
490 	u8 padding[5];
491 };
492 
493 struct pfvf_read_coal_resp_tlv {
494 	struct pfvf_tlv hdr;
495 	u16 coal;
496 	u8 padding[6];
497 };
498 
499 struct vfpf_bulletin_update_mac_tlv {
500 	struct vfpf_first_tlv first_tlv;
501 	u8 mac[ETH_ALEN];
502 	u8 padding[2];
503 };
504 
505 union vfpf_tlvs {
506 	struct vfpf_first_tlv first_tlv;
507 	struct vfpf_acquire_tlv acquire;
508 	struct vfpf_start_rxq_tlv start_rxq;
509 	struct vfpf_start_txq_tlv start_txq;
510 	struct vfpf_stop_rxqs_tlv stop_rxqs;
511 	struct vfpf_stop_txqs_tlv stop_txqs;
512 	struct vfpf_update_rxq_tlv update_rxq;
513 	struct vfpf_vport_start_tlv start_vport;
514 	struct vfpf_vport_update_tlv vport_update;
515 	struct vfpf_ucast_filter_tlv ucast_filter;
516 	struct vfpf_update_tunn_param_tlv tunn_param_update;
517 	struct vfpf_update_coalesce update_coalesce;
518 	struct vfpf_read_coal_req_tlv read_coal_req;
519 	struct vfpf_bulletin_update_mac_tlv bulletin_update_mac;
520 	struct tlv_buffer_size tlv_buf_size;
521 };
522 
523 union pfvf_tlvs {
524 	struct pfvf_def_resp_tlv default_resp;
525 	struct pfvf_acquire_resp_tlv acquire_resp;
526 	struct tlv_buffer_size tlv_buf_size;
527 	struct pfvf_start_queue_resp_tlv queue_start;
528 	struct pfvf_update_tunn_param_tlv tunn_param_resp;
529 	struct pfvf_read_coal_resp_tlv read_coal_resp;
530 };
531 
532 enum qed_bulletin_bit {
533 	/* Alert the VF that a forced MAC was set by the PF */
534 	MAC_ADDR_FORCED = 0,
535 	/* Alert the VF that a forced VLAN was set by the PF */
536 	VLAN_ADDR_FORCED = 2,
537 
538 	/* Indicate that `default_only_untagged' contains actual data */
539 	VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
540 	VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
541 
542 	/* Alert the VF that suggested mac was sent by the PF.
543 	 * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set.
544 	 */
545 	VFPF_BULLETIN_MAC_ADDR = 5
546 };
547 
548 struct qed_bulletin_content {
549 	/* crc of structure to ensure is not in mid-update */
550 	u32 crc;
551 
552 	u32 version;
553 
554 	/* bitmap indicating which fields hold valid values */
555 	u64 valid_bitmap;
556 
557 	/* used for MAC_ADDR or MAC_ADDR_FORCED */
558 	u8 mac[ETH_ALEN];
559 
560 	/* If valid, 1 => only untagged Rx if no vlan is configured */
561 	u8 default_only_untagged;
562 	u8 padding;
563 
564 	/* The following is a 'copy' of qed_mcp_link_state,
565 	 * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's
566 	 * possible the structs will increase further along the road we cannot
567 	 * have it here; Instead we need to have all of its fields.
568 	 */
569 	u8 req_autoneg;
570 	u8 req_autoneg_pause;
571 	u8 req_forced_rx;
572 	u8 req_forced_tx;
573 	u8 padding2[4];
574 
575 	u32 req_adv_speed;
576 	u32 req_forced_speed;
577 	u32 req_loopback;
578 	u32 padding3;
579 
580 	u8 link_up;
581 	u8 full_duplex;
582 	u8 autoneg;
583 	u8 autoneg_complete;
584 	u8 parallel_detection;
585 	u8 pfc_enabled;
586 	u8 partner_tx_flow_ctrl_en;
587 	u8 partner_rx_flow_ctrl_en;
588 	u8 partner_adv_pause;
589 	u8 sfp_tx_fault;
590 	u16 vxlan_udp_port;
591 	u16 geneve_udp_port;
592 	u8 padding4[2];
593 
594 	u32 speed;
595 	u32 partner_adv_speed;
596 
597 	u32 capability_speed;
598 
599 	/* Forced vlan */
600 	u16 pvid;
601 	u16 padding5;
602 };
603 
604 struct qed_bulletin {
605 	dma_addr_t phys;
606 	struct qed_bulletin_content *p_virt;
607 	u32 size;
608 };
609 
610 enum {
611 	CHANNEL_TLV_NONE,	/* ends tlv sequence */
612 	CHANNEL_TLV_ACQUIRE,
613 	CHANNEL_TLV_VPORT_START,
614 	CHANNEL_TLV_VPORT_UPDATE,
615 	CHANNEL_TLV_VPORT_TEARDOWN,
616 	CHANNEL_TLV_START_RXQ,
617 	CHANNEL_TLV_START_TXQ,
618 	CHANNEL_TLV_STOP_RXQS,
619 	CHANNEL_TLV_STOP_TXQS,
620 	CHANNEL_TLV_UPDATE_RXQ,
621 	CHANNEL_TLV_INT_CLEANUP,
622 	CHANNEL_TLV_CLOSE,
623 	CHANNEL_TLV_RELEASE,
624 	CHANNEL_TLV_LIST_END,
625 	CHANNEL_TLV_UCAST_FILTER,
626 	CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
627 	CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
628 	CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
629 	CHANNEL_TLV_VPORT_UPDATE_MCAST,
630 	CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
631 	CHANNEL_TLV_VPORT_UPDATE_RSS,
632 	CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
633 	CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
634 	CHANNEL_TLV_UPDATE_TUNN_PARAM,
635 	CHANNEL_TLV_COALESCE_UPDATE,
636 	CHANNEL_TLV_QID,
637 	CHANNEL_TLV_COALESCE_READ,
638 	CHANNEL_TLV_BULLETIN_UPDATE_MAC,
639 	CHANNEL_TLV_MAX,
640 
641 	/* Required for iterating over vport-update tlvs.
642 	 * Will break in case non-sequential vport-update tlvs.
643 	 */
644 	CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
645 };
646 
647 /* Default number of CIDs [total of both Rx and Tx] to be requested
648  * by default, and maximum possible number.
649  */
650 #define QED_ETH_VF_DEFAULT_NUM_CIDS (32)
651 #define QED_ETH_VF_MAX_NUM_CIDS (250)
652 
653 /* This data is held in the qed_hwfn structure for VFs only. */
654 struct qed_vf_iov {
655 	union vfpf_tlvs *vf2pf_request;
656 	dma_addr_t vf2pf_request_phys;
657 	union pfvf_tlvs *pf2vf_reply;
658 	dma_addr_t pf2vf_reply_phys;
659 
660 	/* Should be taken whenever the mailbox buffers are accessed */
661 	struct mutex mutex;
662 	u8 *offset;
663 
664 	/* Bulletin Board */
665 	struct qed_bulletin bulletin;
666 	struct qed_bulletin_content bulletin_shadow;
667 
668 	/* we set aside a copy of the acquire response */
669 	struct pfvf_acquire_resp_tlv acquire_resp;
670 
671 	/* In case PF originates prior to the fp-hsi version comparison,
672 	 * this has to be propagated as it affects the fastpath.
673 	 */
674 	bool b_pre_fp_hsi;
675 
676 	/* Current day VFs are passing the SBs physical address on vport
677 	 * start, and as they lack an IGU mapping they need to store the
678 	 * addresses of previously registered SBs.
679 	 * Even if we were to change configuration flow, due to backward
680 	 * compatibility [with older PFs] we'd still need to store these.
681 	 */
682 	struct qed_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
683 
684 	/* Determines whether VF utilizes doorbells via limited register
685 	 * bar or via the doorbell bar.
686 	 */
687 	bool b_doorbell_bar;
688 };
689 
690 /**
691  * qed_vf_pf_set_coalesce(): VF - Set Rx/Tx coalesce per VF's relative queue.
692  *                                Coalesce value '0' will omit the
693  *                                configuration.
694  *
695  * @p_hwfn: HW device data.
696  * @rx_coal: coalesce value in micro second for rx queue.
697  * @tx_coal: coalesce value in micro second for tx queue.
698  * @p_cid: queue cid.
699  *
700  * Return: Int.
701  *
702  **/
703 int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
704 			   u16 rx_coal,
705 			   u16 tx_coal, struct qed_queue_cid *p_cid);
706 
707 /**
708  * qed_vf_pf_get_coalesce(): VF - Get coalesce per VF's relative queue.
709  *
710  * @p_hwfn: HW device data.
711  * @p_coal: coalesce value in micro second for VF queues.
712  * @p_cid: queue cid.
713  *
714  * Return: Int.
715  **/
716 int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
717 			   u16 *p_coal, struct qed_queue_cid *p_cid);
718 
719 #ifdef CONFIG_QED_SRIOV
720 /**
721  * qed_vf_read_bulletin(): Read the VF bulletin and act on it if needed.
722  *
723  * @p_hwfn: HW device data.
724  * @p_change: qed fills 1 iff bulletin board has changed, 0 otherwise.
725  *
726  * Return: enum _qed_status.
727  */
728 int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
729 
730 /**
731  * qed_vf_get_link_params(): Get link parameters for VF from qed
732  *
733  * @p_hwfn: HW device data.
734  * @params: the link params structure to be filled for the VF.
735  *
736  * Return: Void.
737  */
738 void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
739 			    struct qed_mcp_link_params *params);
740 
741 /**
742  * qed_vf_get_link_state(): Get link state for VF from qed.
743  *
744  * @p_hwfn: HW device data.
745  * @link: the link state structure to be filled for the VF
746  *
747  * Return: Void.
748  */
749 void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
750 			   struct qed_mcp_link_state *link);
751 
752 /**
753  * qed_vf_get_link_caps(): Get link capabilities for VF from qed.
754  *
755  * @p_hwfn: HW device data.
756  * @p_link_caps: the link capabilities structure to be filled for the VF
757  *
758  * Return: Void.
759  */
760 void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
761 			  struct qed_mcp_link_capabilities *p_link_caps);
762 
763 /**
764  * qed_vf_get_num_rxqs(): Get number of Rx queues allocated for VF by qed
765  *
766  * @p_hwfn: HW device data.
767  * @num_rxqs: allocated RX queues
768  *
769  * Return: Void.
770  */
771 void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
772 
773 /**
774  * qed_vf_get_num_txqs(): Get number of Rx queues allocated for VF by qed
775  *
776  * @p_hwfn: HW device data.
777  * @num_txqs: allocated RX queues
778  *
779  * Return: Void.
780  */
781 void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs);
782 
783 /**
784  * qed_vf_get_num_cids(): Get number of available connections
785  *                        [both Rx and Tx] for VF
786  *
787  * @p_hwfn: HW device data.
788  * @num_cids: allocated number of connections
789  *
790  * Return: Void.
791  */
792 void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids);
793 
794 /**
795  * qed_vf_get_port_mac(): Get port mac address for VF.
796  *
797  * @p_hwfn: HW device data.
798  * @port_mac: destination location for port mac
799  *
800  * Return: Void.
801  */
802 void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
803 
804 /**
805  * qed_vf_get_num_vlan_filters(): Get number of VLAN filters allocated
806  *                                for VF by qed.
807  *
808  * @p_hwfn: HW device data.
809  * @num_vlan_filters: allocated VLAN filters
810  *
811  * Return: Void.
812  */
813 void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
814 				 u8 *num_vlan_filters);
815 
816 /**
817  * qed_vf_get_num_mac_filters(): Get number of MAC filters allocated
818  *                               for VF by qed
819  *
820  * @p_hwfn: HW device data.
821  * @num_mac_filters: allocated MAC filters
822  *
823  * Return: Void.
824  */
825 void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters);
826 
827 /**
828  * qed_vf_check_mac(): Check if VF can set a MAC address
829  *
830  * @p_hwfn: HW device data.
831  * @mac: Mac.
832  *
833  * Return: bool.
834  */
835 bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
836 
837 /**
838  * qed_vf_get_fw_version(): Set firmware version information
839  *                          in dev_info from VFs acquire response tlv
840  *
841  * @p_hwfn: HW device data.
842  * @fw_major: FW major.
843  * @fw_minor: FW minor.
844  * @fw_rev: FW rev.
845  * @fw_eng: FW eng.
846  *
847  * Return: Void.
848  */
849 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
850 			   u16 *fw_major, u16 *fw_minor,
851 			   u16 *fw_rev, u16 *fw_eng);
852 
853 /**
854  * qed_vf_hw_prepare(): hw preparation for VF  sends ACQUIRE message
855  *
856  * @p_hwfn: HW device data.
857  *
858  * Return: Int.
859  */
860 int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
861 
862 /**
863  * qed_vf_pf_rxq_start(): start the RX Queue by sending a message to the PF
864  *
865  * @p_hwfn: HW device data.
866  * @p_cid: Only relative fields are relevant
867  * @bd_max_bytes: maximum number of bytes per bd
868  * @bd_chain_phys_addr: physical address of bd chain
869  * @cqe_pbl_addr: physical address of pbl
870  * @cqe_pbl_size: pbl size
871  * @pp_prod: pointer to the producer to be used in fastpath
872  *
873  * Return: Int.
874  */
875 int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
876 			struct qed_queue_cid *p_cid,
877 			u16 bd_max_bytes,
878 			dma_addr_t bd_chain_phys_addr,
879 			dma_addr_t cqe_pbl_addr,
880 			u16 cqe_pbl_size, void __iomem **pp_prod);
881 
882 /**
883  * qed_vf_pf_txq_start(): VF - start the TX queue by sending a message to the
884  *                        PF.
885  *
886  * @p_hwfn: HW device data.
887  * @p_cid: CID.
888  * @pbl_addr: PBL address.
889  * @pbl_size: PBL Size.
890  * @pp_doorbell: pointer to address to which to write the doorbell too.
891  *
892  * Return: Int.
893  */
894 int
895 qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
896 		    struct qed_queue_cid *p_cid,
897 		    dma_addr_t pbl_addr,
898 		    u16 pbl_size, void __iomem **pp_doorbell);
899 
900 /**
901  * qed_vf_pf_rxq_stop(): VF - stop the RX queue by sending a message to the PF.
902  *
903  * @p_hwfn: HW device data.
904  * @p_cid: CID.
905  * @cqe_completion: CQE Completion.
906  *
907  * Return: Int.
908  */
909 int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
910 		       struct qed_queue_cid *p_cid, bool cqe_completion);
911 
912 /**
913  * qed_vf_pf_txq_stop(): VF - stop the TX queue by sending a message to the PF.
914  *
915  * @p_hwfn: HW device data.
916  * @p_cid: CID.
917  *
918  * Return: Int.
919  */
920 int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid);
921 
922 /**
923  * qed_vf_pf_vport_update(): VF - send a vport update command.
924  *
925  * @p_hwfn: HW device data.
926  * @p_params: Params
927  *
928  * Return: Int.
929  */
930 int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
931 			   struct qed_sp_vport_update_params *p_params);
932 
933 /**
934  * qed_vf_pf_reset(): VF - send a close message to PF.
935  *
936  * @p_hwfn: HW device data.
937  *
938  * Return: enum _qed_status
939  */
940 int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
941 
942 /**
943  * qed_vf_pf_release(): VF - free vf`s memories.
944  *
945  * @p_hwfn: HW device data.
946  *
947  * Return: enum _qed_status
948  */
949 int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
950 
951 /**
952  * qed_vf_get_igu_sb_id(): Get the IGU SB ID for a given
953  *        sb_id. For VFs igu sbs don't have to be contiguous
954  *
955  * @p_hwfn: HW device data.
956  * @sb_id: SB ID.
957  *
958  * Return: INLINE u16
959  */
960 u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
961 
962 /**
963  * qed_vf_set_sb_info(): Stores [or removes] a configured sb_info.
964  *
965  * @p_hwfn: HW device data.
966  * @sb_id: zero-based SB index [for fastpath]
967  * @p_sb:  may be NULL [during removal].
968  *
969  * Return: Void.
970  */
971 void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
972 			u16 sb_id, struct qed_sb_info *p_sb);
973 
974 /**
975  * qed_vf_pf_vport_start(): perform vport start for VF.
976  *
977  * @p_hwfn: HW device data.
978  * @vport_id: Vport ID.
979  * @mtu: MTU.
980  * @inner_vlan_removal: Innter VLAN removal.
981  * @tpa_mode: TPA mode
982  * @max_buffers_per_cqe: Max buffer pre CQE.
983  * @only_untagged: default behavior regarding vlan acceptance
984  *
985  * Return: enum _qed_status
986  */
987 int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
988 			  u8 vport_id,
989 			  u16 mtu,
990 			  u8 inner_vlan_removal,
991 			  enum qed_tpa_mode tpa_mode,
992 			  u8 max_buffers_per_cqe, u8 only_untagged);
993 
994 /**
995  * qed_vf_pf_vport_stop(): stop the VF's vport
996  *
997  * @p_hwfn: HW device data.
998  *
999  * Return: enum _qed_status
1000  */
1001 int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
1002 
1003 int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
1004 			   struct qed_filter_ucast *p_param);
1005 
1006 void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
1007 			    struct qed_filter_mcast *p_filter_cmd);
1008 
1009 /**
1010  * qed_vf_pf_int_cleanup(): clean the SB of the VF
1011  *
1012  * @p_hwfn: HW device data.
1013  *
1014  * Return: enum _qed_status
1015  */
1016 int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
1017 
1018 /**
1019  * __qed_vf_get_link_params(): return the link params in a given bulletin board
1020  *
1021  * @p_hwfn: HW device data.
1022  * @p_params: pointer to a struct to fill with link params
1023  * @p_bulletin: Bulletin.
1024  *
1025  * Return: Void.
1026  */
1027 void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1028 			      struct qed_mcp_link_params *p_params,
1029 			      struct qed_bulletin_content *p_bulletin);
1030 
1031 /**
1032  * __qed_vf_get_link_state(): return the link state in a given bulletin board
1033  *
1034  * @p_hwfn: HW device data.
1035  * @p_link: pointer to a struct to fill with link state
1036  * @p_bulletin: Bulletin.
1037  *
1038  * Return: Void.
1039  */
1040 void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1041 			     struct qed_mcp_link_state *p_link,
1042 			     struct qed_bulletin_content *p_bulletin);
1043 
1044 /**
1045  * __qed_vf_get_link_caps(): return the link capabilities in a given
1046  *                           bulletin board
1047  *
1048  * @p_hwfn: HW device data.
1049  * @p_link_caps: pointer to a struct to fill with link capabilities
1050  * @p_bulletin: Bulletin.
1051  *
1052  * Return: Void.
1053  */
1054 void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1055 			    struct qed_mcp_link_capabilities *p_link_caps,
1056 			    struct qed_bulletin_content *p_bulletin);
1057 
1058 void qed_iov_vf_task(struct work_struct *work);
1059 void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun);
1060 int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
1061 				  struct qed_tunnel_info *p_tunn);
1062 
1063 u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id);
1064 /**
1065  * qed_vf_pf_bulletin_update_mac(): Ask PF to update the MAC address in
1066  *                                  it's bulletin board
1067  *
1068  * @p_hwfn: HW device data.
1069  * @p_mac: mac address to be updated in bulletin board
1070  *
1071  * Return: Int.
1072  */
1073 int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, const u8 *p_mac);
1074 
1075 #else
qed_vf_get_link_params(struct qed_hwfn * p_hwfn,struct qed_mcp_link_params * params)1076 static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1077 					  struct qed_mcp_link_params *params)
1078 {
1079 }
1080 
qed_vf_get_link_state(struct qed_hwfn * p_hwfn,struct qed_mcp_link_state * link)1081 static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1082 					 struct qed_mcp_link_state *link)
1083 {
1084 }
1085 
1086 static inline void
qed_vf_get_link_caps(struct qed_hwfn * p_hwfn,struct qed_mcp_link_capabilities * p_link_caps)1087 qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1088 		     struct qed_mcp_link_capabilities *p_link_caps)
1089 {
1090 }
1091 
qed_vf_get_num_rxqs(struct qed_hwfn * p_hwfn,u8 * num_rxqs)1092 static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
1093 {
1094 }
1095 
qed_vf_get_num_txqs(struct qed_hwfn * p_hwfn,u8 * num_txqs)1096 static inline void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs)
1097 {
1098 }
1099 
qed_vf_get_num_cids(struct qed_hwfn * p_hwfn,u8 * num_cids)1100 static inline void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids)
1101 {
1102 }
1103 
qed_vf_get_port_mac(struct qed_hwfn * p_hwfn,u8 * port_mac)1104 static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
1105 {
1106 }
1107 
qed_vf_get_num_vlan_filters(struct qed_hwfn * p_hwfn,u8 * num_vlan_filters)1108 static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
1109 					       u8 *num_vlan_filters)
1110 {
1111 }
1112 
qed_vf_get_num_mac_filters(struct qed_hwfn * p_hwfn,u8 * num_mac_filters)1113 static inline void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn,
1114 					      u8 *num_mac_filters)
1115 {
1116 }
1117 
qed_vf_check_mac(struct qed_hwfn * p_hwfn,u8 * mac)1118 static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
1119 {
1120 	return false;
1121 }
1122 
qed_vf_get_fw_version(struct qed_hwfn * p_hwfn,u16 * fw_major,u16 * fw_minor,u16 * fw_rev,u16 * fw_eng)1123 static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
1124 					 u16 *fw_major, u16 *fw_minor,
1125 					 u16 *fw_rev, u16 *fw_eng)
1126 {
1127 }
1128 
qed_vf_hw_prepare(struct qed_hwfn * p_hwfn)1129 static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
1130 {
1131 	return -EINVAL;
1132 }
1133 
qed_vf_pf_rxq_start(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid,u16 bd_max_bytes,dma_addr_t bd_chain_phys_adr,dma_addr_t cqe_pbl_addr,u16 cqe_pbl_size,void __iomem ** pp_prod)1134 static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
1135 				      struct qed_queue_cid *p_cid,
1136 				      u16 bd_max_bytes,
1137 				      dma_addr_t bd_chain_phys_adr,
1138 				      dma_addr_t cqe_pbl_addr,
1139 				      u16 cqe_pbl_size, void __iomem **pp_prod)
1140 {
1141 	return -EINVAL;
1142 }
1143 
qed_vf_pf_txq_start(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid,dma_addr_t pbl_addr,u16 pbl_size,void __iomem ** pp_doorbell)1144 static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
1145 				      struct qed_queue_cid *p_cid,
1146 				      dma_addr_t pbl_addr,
1147 				      u16 pbl_size, void __iomem **pp_doorbell)
1148 {
1149 	return -EINVAL;
1150 }
1151 
qed_vf_pf_rxq_stop(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid,bool cqe_completion)1152 static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
1153 				     struct qed_queue_cid *p_cid,
1154 				     bool cqe_completion)
1155 {
1156 	return -EINVAL;
1157 }
1158 
qed_vf_pf_txq_stop(struct qed_hwfn * p_hwfn,struct qed_queue_cid * p_cid)1159 static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn,
1160 				     struct qed_queue_cid *p_cid)
1161 {
1162 	return -EINVAL;
1163 }
1164 
1165 static inline int
qed_vf_pf_vport_update(struct qed_hwfn * p_hwfn,struct qed_sp_vport_update_params * p_params)1166 qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
1167 		       struct qed_sp_vport_update_params *p_params)
1168 {
1169 	return -EINVAL;
1170 }
1171 
qed_vf_pf_reset(struct qed_hwfn * p_hwfn)1172 static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
1173 {
1174 	return -EINVAL;
1175 }
1176 
qed_vf_pf_release(struct qed_hwfn * p_hwfn)1177 static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
1178 {
1179 	return -EINVAL;
1180 }
1181 
qed_vf_get_igu_sb_id(struct qed_hwfn * p_hwfn,u16 sb_id)1182 static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
1183 {
1184 	return 0;
1185 }
1186 
qed_vf_set_sb_info(struct qed_hwfn * p_hwfn,u16 sb_id,struct qed_sb_info * p_sb)1187 static inline void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, u16 sb_id,
1188 				      struct qed_sb_info *p_sb)
1189 {
1190 }
1191 
qed_vf_pf_vport_start(struct qed_hwfn * p_hwfn,u8 vport_id,u16 mtu,u8 inner_vlan_removal,enum qed_tpa_mode tpa_mode,u8 max_buffers_per_cqe,u8 only_untagged)1192 static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
1193 					u8 vport_id,
1194 					u16 mtu,
1195 					u8 inner_vlan_removal,
1196 					enum qed_tpa_mode tpa_mode,
1197 					u8 max_buffers_per_cqe,
1198 					u8 only_untagged)
1199 {
1200 	return -EINVAL;
1201 }
1202 
qed_vf_pf_vport_stop(struct qed_hwfn * p_hwfn)1203 static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
1204 {
1205 	return -EINVAL;
1206 }
1207 
qed_vf_pf_filter_ucast(struct qed_hwfn * p_hwfn,struct qed_filter_ucast * p_param)1208 static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
1209 					 struct qed_filter_ucast *p_param)
1210 {
1211 	return -EINVAL;
1212 }
1213 
qed_vf_pf_filter_mcast(struct qed_hwfn * p_hwfn,struct qed_filter_mcast * p_filter_cmd)1214 static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
1215 					  struct qed_filter_mcast *p_filter_cmd)
1216 {
1217 }
1218 
qed_vf_pf_int_cleanup(struct qed_hwfn * p_hwfn)1219 static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
1220 {
1221 	return -EINVAL;
1222 }
1223 
__qed_vf_get_link_params(struct qed_hwfn * p_hwfn,struct qed_mcp_link_params * p_params,struct qed_bulletin_content * p_bulletin)1224 static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
1225 					    struct qed_mcp_link_params
1226 					    *p_params,
1227 					    struct qed_bulletin_content
1228 					    *p_bulletin)
1229 {
1230 }
1231 
__qed_vf_get_link_state(struct qed_hwfn * p_hwfn,struct qed_mcp_link_state * p_link,struct qed_bulletin_content * p_bulletin)1232 static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
1233 					   struct qed_mcp_link_state *p_link,
1234 					   struct qed_bulletin_content
1235 					   *p_bulletin)
1236 {
1237 }
1238 
1239 static inline void
__qed_vf_get_link_caps(struct qed_hwfn * p_hwfn,struct qed_mcp_link_capabilities * p_link_caps,struct qed_bulletin_content * p_bulletin)1240 __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
1241 		       struct qed_mcp_link_capabilities *p_link_caps,
1242 		       struct qed_bulletin_content *p_bulletin)
1243 {
1244 }
1245 
qed_iov_vf_task(struct work_struct * work)1246 static inline void qed_iov_vf_task(struct work_struct *work)
1247 {
1248 }
1249 
1250 static inline void
qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info * p_tun)1251 qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun)
1252 {
1253 }
1254 
qed_vf_pf_tunnel_param_update(struct qed_hwfn * p_hwfn,struct qed_tunnel_info * p_tunn)1255 static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
1256 						struct qed_tunnel_info *p_tunn)
1257 {
1258 	return -EINVAL;
1259 }
1260 
qed_vf_pf_bulletin_update_mac(struct qed_hwfn * p_hwfn,const u8 * p_mac)1261 static inline int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
1262 						const u8 *p_mac)
1263 {
1264 	return -EINVAL;
1265 }
1266 
1267 static inline u32
qed_vf_hw_bar_size(struct qed_hwfn * p_hwfn,enum BAR_ID bar_id)1268 qed_vf_hw_bar_size(struct qed_hwfn  *p_hwfn,
1269 		   enum BAR_ID bar_id)
1270 {
1271 	return 0;
1272 }
1273 #endif
1274 
1275 #endif
1276