1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  */
5 
6 #ifndef _QED_SRIOV_H
7 #define _QED_SRIOV_H
8 #include <linux/types.h>
9 #include "qed_vf.h"
10 
11 #define QED_ETH_VF_NUM_MAC_FILTERS 1
12 #define QED_ETH_VF_NUM_VLAN_FILTERS 2
13 #define QED_VF_ARRAY_LENGTH (3)
14 
15 #ifdef CONFIG_QED_SRIOV
16 #define IS_VF(cdev)             ((cdev)->b_is_vf)
17 #define IS_PF(cdev)             (!((cdev)->b_is_vf))
18 #define IS_PF_SRIOV(p_hwfn)     (!!((p_hwfn)->cdev->p_iov_info))
19 #else
20 #define IS_VF(cdev)             (0)
21 #define IS_PF(cdev)             (1)
22 #define IS_PF_SRIOV(p_hwfn)     (0)
23 #endif
24 #define IS_PF_SRIOV_ALLOC(p_hwfn)       (!!((p_hwfn)->pf_iov_info))
25 
26 #define QED_MAX_VF_CHAINS_PER_PF 16
27 
28 #define QED_ETH_MAX_VF_NUM_VLAN_FILTERS	\
29 	(MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
30 
31 enum qed_iov_vport_update_flag {
32 	QED_IOV_VP_UPDATE_ACTIVATE,
33 	QED_IOV_VP_UPDATE_VLAN_STRIP,
34 	QED_IOV_VP_UPDATE_TX_SWITCH,
35 	QED_IOV_VP_UPDATE_MCAST,
36 	QED_IOV_VP_UPDATE_ACCEPT_PARAM,
37 	QED_IOV_VP_UPDATE_RSS,
38 	QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN,
39 	QED_IOV_VP_UPDATE_SGE_TPA,
40 	QED_IOV_VP_UPDATE_MAX,
41 };
42 
43 struct qed_public_vf_info {
44 	/* These copies will later be reflected in the bulletin board,
45 	 * but this copy should be newer.
46 	 */
47 	u8 forced_mac[ETH_ALEN];
48 	u16 forced_vlan;
49 	u8 mac[ETH_ALEN];
50 
51 	/* IFLA_VF_LINK_STATE_<X> */
52 	int link_state;
53 
54 	/* Currently configured Tx rate in MB/sec. 0 if unconfigured */
55 	int tx_rate;
56 
57 	/* Trusted VFs can configure promiscuous mode.
58 	 * Also store shadow promisc configuration if needed.
59 	 */
60 	bool is_trusted_configured;
61 	bool is_trusted_request;
62 	u8 rx_accept_mode;
63 	u8 tx_accept_mode;
64 };
65 
66 struct qed_iov_vf_init_params {
67 	u16 rel_vf_id;
68 
69 	/* Number of requested Queues; Currently, don't support different
70 	 * number of Rx/Tx queues.
71 	 */
72 
73 	u16 num_queues;
74 
75 	/* Allow the client to choose which qzones to use for Rx/Tx,
76 	 * and which queue_base to use for Tx queues on a per-queue basis.
77 	 * Notice values should be relative to the PF resources.
78 	 */
79 	u16 req_rx_queue[QED_MAX_VF_CHAINS_PER_PF];
80 	u16 req_tx_queue[QED_MAX_VF_CHAINS_PER_PF];
81 };
82 
83 /* This struct is part of qed_dev and contains data relevant to all hwfns;
84  * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
85  */
86 struct qed_hw_sriov_info {
87 	int pos;		/* capability position */
88 	int nres;		/* number of resources */
89 	u32 cap;		/* SR-IOV Capabilities */
90 	u16 ctrl;		/* SR-IOV Control */
91 	u16 total_vfs;		/* total VFs associated with the PF */
92 	u16 num_vfs;		/* number of vfs that have been started */
93 	u16 initial_vfs;	/* initial VFs associated with the PF */
94 	u16 nr_virtfn;		/* number of VFs available */
95 	u16 offset;		/* first VF Routing ID offset */
96 	u16 stride;		/* following VF stride */
97 	u16 vf_device_id;	/* VF device id */
98 	u32 pgsz;		/* page size for BAR alignment */
99 	u8 link;		/* Function Dependency Link */
100 
101 	u32 first_vf_in_pf;
102 };
103 
104 /* This mailbox is maintained per VF in its PF contains all information
105  * required for sending / receiving a message.
106  */
107 struct qed_iov_vf_mbx {
108 	union vfpf_tlvs *req_virt;
109 	dma_addr_t req_phys;
110 	union pfvf_tlvs *reply_virt;
111 	dma_addr_t reply_phys;
112 
113 	/* Address in VF where a pending message is located */
114 	dma_addr_t pending_req;
115 
116 	/* Message from VF awaits handling */
117 	bool b_pending_msg;
118 
119 	u8 *offset;
120 
121 	/* saved VF request header */
122 	struct vfpf_first_tlv first_tlv;
123 };
124 
125 #define QED_IOV_LEGACY_QID_RX (0)
126 #define QED_IOV_LEGACY_QID_TX (1)
127 #define QED_IOV_QID_INVALID (0xFE)
128 
129 struct qed_vf_queue_cid {
130 	bool b_is_tx;
131 	struct qed_queue_cid *p_cid;
132 };
133 
134 /* Describes a qzone associated with the VF */
135 struct qed_vf_queue {
136 	u16 fw_rx_qid;
137 	u16 fw_tx_qid;
138 
139 	struct qed_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
140 };
141 
142 enum vf_state {
143 	VF_FREE = 0,		/* VF ready to be acquired holds no resc */
144 	VF_ACQUIRED,		/* VF, acquired, but not initalized */
145 	VF_ENABLED,		/* VF, Enabled */
146 	VF_RESET,		/* VF, FLR'd, pending cleanup */
147 	VF_STOPPED		/* VF, Stopped */
148 };
149 
150 struct qed_vf_vlan_shadow {
151 	bool used;
152 	u16 vid;
153 };
154 
155 struct qed_vf_shadow_config {
156 	/* Shadow copy of all guest vlans */
157 	struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
158 
159 	/* Shadow copy of all configured MACs; Empty if forcing MACs */
160 	u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
161 	u8 inner_vlan_removal;
162 };
163 
164 /* PFs maintain an array of this structure, per VF */
165 struct qed_vf_info {
166 	struct qed_iov_vf_mbx vf_mbx;
167 	enum vf_state state;
168 	bool b_init;
169 	bool b_malicious;
170 	u8 to_disable;
171 
172 	struct qed_bulletin bulletin;
173 	dma_addr_t vf_bulletin;
174 
175 	/* PF saves a copy of the last VF acquire message */
176 	struct vfpf_acquire_tlv acquire;
177 
178 	u32 concrete_fid;
179 	u16 opaque_fid;
180 	u16 mtu;
181 
182 	u8 vport_id;
183 	u8 relative_vf_id;
184 	u8 abs_vf_id;
185 #define QED_VF_ABS_ID(p_hwfn, p_vf)	(QED_PATH_ID(p_hwfn) ?		      \
186 					 (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
187 					 (p_vf)->abs_vf_id)
188 
189 	u8 vport_instance;
190 	u8 num_rxqs;
191 	u8 num_txqs;
192 
193 	u16 rx_coal;
194 	u16 tx_coal;
195 
196 	u8 num_sbs;
197 
198 	u8 num_mac_filters;
199 	u8 num_vlan_filters;
200 
201 	struct qed_vf_queue vf_queues[QED_MAX_VF_CHAINS_PER_PF];
202 	u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
203 	u8 num_active_rxqs;
204 	struct qed_public_vf_info p_vf_info;
205 	bool spoof_chk;
206 	bool req_spoofchk_val;
207 
208 	/* Stores the configuration requested by VF */
209 	struct qed_vf_shadow_config shadow_config;
210 
211 	/* A bitfield using bulletin's valid-map bits, used to indicate
212 	 * which of the bulletin board features have been configured.
213 	 */
214 	u64 configured_features;
215 #define QED_IOV_CONFIGURED_FEATURES_MASK        ((1 << MAC_ADDR_FORCED) | \
216 						 (1 << VLAN_ADDR_FORCED))
217 };
218 
219 /* This structure is part of qed_hwfn and used only for PFs that have sriov
220  * capability enabled.
221  */
222 struct qed_pf_iov {
223 	struct qed_vf_info vfs_array[MAX_NUM_VFS];
224 	u64 pending_flr[QED_VF_ARRAY_LENGTH];
225 
226 	/* Allocate message address continuosuly and split to each VF */
227 	void *mbx_msg_virt_addr;
228 	dma_addr_t mbx_msg_phys_addr;
229 	u32 mbx_msg_size;
230 	void *mbx_reply_virt_addr;
231 	dma_addr_t mbx_reply_phys_addr;
232 	u32 mbx_reply_size;
233 	void *p_bulletins;
234 	dma_addr_t bulletins_phys;
235 	u32 bulletins_size;
236 };
237 
238 enum qed_iov_wq_flag {
239 	QED_IOV_WQ_MSG_FLAG,
240 	QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
241 	QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
242 	QED_IOV_WQ_STOP_WQ_FLAG,
243 	QED_IOV_WQ_FLR_FLAG,
244 	QED_IOV_WQ_TRUST_FLAG,
245 	QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
246 };
247 
248 #ifdef CONFIG_QED_SRIOV
249 /**
250  * @brief Check if given VF ID @vfid is valid
251  *        w.r.t. @b_enabled_only value
252  *        if b_enabled_only = true - only enabled VF id is valid
253  *        else any VF id less than max_vfs is valid
254  *
255  * @param p_hwfn
256  * @param rel_vf_id - Relative VF ID
257  * @param b_enabled_only - consider only enabled VF
258  * @param b_non_malicious - true iff we want to validate vf isn't malicious.
259  *
260  * @return bool - true for valid VF ID
261  */
262 bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
263 			   int rel_vf_id,
264 			   bool b_enabled_only, bool b_non_malicious);
265 
266 /**
267  * @brief - Given a VF index, return index of next [including that] active VF.
268  *
269  * @param p_hwfn
270  * @param rel_vf_id
271  *
272  * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
273  */
274 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
275 
276 void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
277 				    int vfid, u16 vxlan_port, u16 geneve_port);
278 
279 /**
280  * @brief Read sriov related information and allocated resources
281  *  reads from configuration space, shmem, etc.
282  *
283  * @param p_hwfn
284  *
285  * @return int
286  */
287 int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
288 
289 /**
290  * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
291  *
292  * @param p_hwfn
293  * @param p_iov
294  * @param type
295  * @param length
296  *
297  * @return pointer to the newly placed tlv
298  */
299 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
300 
301 /**
302  * @brief list the types and lengths of the tlvs on the buffer
303  *
304  * @param p_hwfn
305  * @param tlvs_list
306  */
307 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
308 
309 /**
310  * @brief qed_iov_alloc - allocate sriov related resources
311  *
312  * @param p_hwfn
313  *
314  * @return int
315  */
316 int qed_iov_alloc(struct qed_hwfn *p_hwfn);
317 
318 /**
319  * @brief qed_iov_setup - setup sriov related resources
320  *
321  * @param p_hwfn
322  */
323 void qed_iov_setup(struct qed_hwfn *p_hwfn);
324 
325 /**
326  * @brief qed_iov_free - free sriov related resources
327  *
328  * @param p_hwfn
329  */
330 void qed_iov_free(struct qed_hwfn *p_hwfn);
331 
332 /**
333  * @brief free sriov related memory that was allocated during hw_prepare
334  *
335  * @param cdev
336  */
337 void qed_iov_free_hw_info(struct qed_dev *cdev);
338 
339 /**
340  * @brief Mark structs of vfs that have been FLR-ed.
341  *
342  * @param p_hwfn
343  * @param disabled_vfs - bitmask of all VFs on path that were FLRed
344  *
345  * @return true iff one of the PF's vfs got FLRed. false otherwise.
346  */
347 bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
348 
349 /**
350  * @brief Search extended TLVs in request/reply buffer.
351  *
352  * @param p_hwfn
353  * @param p_tlvs_list - Pointer to tlvs list
354  * @param req_type - Type of TLV
355  *
356  * @return pointer to tlv type if found, otherwise returns NULL.
357  */
358 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
359 			       void *p_tlvs_list, u16 req_type);
360 
361 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
362 int qed_iov_wq_start(struct qed_dev *cdev);
363 
364 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
365 void qed_vf_start_iov_wq(struct qed_dev *cdev);
366 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
367 void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
368 #else
369 static inline bool
370 qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
371 		      int rel_vf_id, bool b_enabled_only, bool b_non_malicious)
372 {
373 	return false;
374 }
375 
376 static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
377 					     u16 rel_vf_id)
378 {
379 	return MAX_NUM_VFS;
380 }
381 
382 static inline void
383 qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid,
384 			       u16 vxlan_port, u16 geneve_port)
385 {
386 }
387 
388 static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
389 {
390 	return 0;
391 }
392 
393 static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
394 {
395 	return 0;
396 }
397 
398 static inline void qed_iov_setup(struct qed_hwfn *p_hwfn)
399 {
400 }
401 
402 static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
403 {
404 }
405 
406 static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
407 {
408 }
409 
410 static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
411 				       u32 *disabled_vfs)
412 {
413 	return false;
414 }
415 
416 static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
417 {
418 }
419 
420 static inline int qed_iov_wq_start(struct qed_dev *cdev)
421 {
422 	return 0;
423 }
424 
425 static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
426 				    enum qed_iov_wq_flag flag)
427 {
428 }
429 
430 static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
431 {
432 }
433 
434 static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
435 {
436 	return 0;
437 }
438 
439 static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
440 {
441 }
442 #endif
443 
444 #define qed_for_each_vf(_p_hwfn, _i)			  \
445 	for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
446 	     _i < MAX_NUM_VFS;				  \
447 	     _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
448 
449 #endif
450