1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef _QED_SRIOV_H
34 #define _QED_SRIOV_H
35 #include <linux/types.h>
36 #include "qed_vf.h"
37 
38 #define QED_ETH_VF_NUM_MAC_FILTERS 1
39 #define QED_ETH_VF_NUM_VLAN_FILTERS 2
40 #define QED_VF_ARRAY_LENGTH (3)
41 
42 #ifdef CONFIG_QED_SRIOV
43 #define IS_VF(cdev)             ((cdev)->b_is_vf)
44 #define IS_PF(cdev)             (!((cdev)->b_is_vf))
45 #define IS_PF_SRIOV(p_hwfn)     (!!((p_hwfn)->cdev->p_iov_info))
46 #else
47 #define IS_VF(cdev)             (0)
48 #define IS_PF(cdev)             (1)
49 #define IS_PF_SRIOV(p_hwfn)     (0)
50 #endif
51 #define IS_PF_SRIOV_ALLOC(p_hwfn)       (!!((p_hwfn)->pf_iov_info))
52 
53 #define QED_MAX_VF_CHAINS_PER_PF 16
54 
55 #define QED_ETH_MAX_VF_NUM_VLAN_FILTERS	\
56 	(MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
57 
58 enum qed_iov_vport_update_flag {
59 	QED_IOV_VP_UPDATE_ACTIVATE,
60 	QED_IOV_VP_UPDATE_VLAN_STRIP,
61 	QED_IOV_VP_UPDATE_TX_SWITCH,
62 	QED_IOV_VP_UPDATE_MCAST,
63 	QED_IOV_VP_UPDATE_ACCEPT_PARAM,
64 	QED_IOV_VP_UPDATE_RSS,
65 	QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN,
66 	QED_IOV_VP_UPDATE_SGE_TPA,
67 	QED_IOV_VP_UPDATE_MAX,
68 };
69 
70 struct qed_public_vf_info {
71 	/* These copies will later be reflected in the bulletin board,
72 	 * but this copy should be newer.
73 	 */
74 	u8 forced_mac[ETH_ALEN];
75 	u16 forced_vlan;
76 	u8 mac[ETH_ALEN];
77 
78 	/* IFLA_VF_LINK_STATE_<X> */
79 	int link_state;
80 
81 	/* Currently configured Tx rate in MB/sec. 0 if unconfigured */
82 	int tx_rate;
83 
84 	/* Trusted VFs can configure promiscuous mode.
85 	 * Also store shadow promisc configuration if needed.
86 	 */
87 	bool is_trusted_configured;
88 	bool is_trusted_request;
89 	u8 rx_accept_mode;
90 	u8 tx_accept_mode;
91 };
92 
93 struct qed_iov_vf_init_params {
94 	u16 rel_vf_id;
95 
96 	/* Number of requested Queues; Currently, don't support different
97 	 * number of Rx/Tx queues.
98 	 */
99 
100 	u16 num_queues;
101 
102 	/* Allow the client to choose which qzones to use for Rx/Tx,
103 	 * and which queue_base to use for Tx queues on a per-queue basis.
104 	 * Notice values should be relative to the PF resources.
105 	 */
106 	u16 req_rx_queue[QED_MAX_VF_CHAINS_PER_PF];
107 	u16 req_tx_queue[QED_MAX_VF_CHAINS_PER_PF];
108 };
109 
110 /* This struct is part of qed_dev and contains data relevant to all hwfns;
111  * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
112  */
113 struct qed_hw_sriov_info {
114 	int pos;		/* capability position */
115 	int nres;		/* number of resources */
116 	u32 cap;		/* SR-IOV Capabilities */
117 	u16 ctrl;		/* SR-IOV Control */
118 	u16 total_vfs;		/* total VFs associated with the PF */
119 	u16 num_vfs;		/* number of vfs that have been started */
120 	u16 initial_vfs;	/* initial VFs associated with the PF */
121 	u16 nr_virtfn;		/* number of VFs available */
122 	u16 offset;		/* first VF Routing ID offset */
123 	u16 stride;		/* following VF stride */
124 	u16 vf_device_id;	/* VF device id */
125 	u32 pgsz;		/* page size for BAR alignment */
126 	u8 link;		/* Function Dependency Link */
127 
128 	u32 first_vf_in_pf;
129 };
130 
131 /* This mailbox is maintained per VF in its PF contains all information
132  * required for sending / receiving a message.
133  */
134 struct qed_iov_vf_mbx {
135 	union vfpf_tlvs *req_virt;
136 	dma_addr_t req_phys;
137 	union pfvf_tlvs *reply_virt;
138 	dma_addr_t reply_phys;
139 
140 	/* Address in VF where a pending message is located */
141 	dma_addr_t pending_req;
142 
143 	/* Message from VF awaits handling */
144 	bool b_pending_msg;
145 
146 	u8 *offset;
147 
148 	/* saved VF request header */
149 	struct vfpf_first_tlv first_tlv;
150 };
151 
152 struct qed_vf_q_info {
153 	u16 fw_rx_qid;
154 	struct qed_queue_cid *p_rx_cid;
155 	u16 fw_tx_qid;
156 	struct qed_queue_cid *p_tx_cid;
157 	u8 fw_cid;
158 };
159 
160 enum vf_state {
161 	VF_FREE = 0,		/* VF ready to be acquired holds no resc */
162 	VF_ACQUIRED,		/* VF, acquired, but not initalized */
163 	VF_ENABLED,		/* VF, Enabled */
164 	VF_RESET,		/* VF, FLR'd, pending cleanup */
165 	VF_STOPPED		/* VF, Stopped */
166 };
167 
168 struct qed_vf_vlan_shadow {
169 	bool used;
170 	u16 vid;
171 };
172 
173 struct qed_vf_shadow_config {
174 	/* Shadow copy of all guest vlans */
175 	struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
176 
177 	/* Shadow copy of all configured MACs; Empty if forcing MACs */
178 	u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
179 	u8 inner_vlan_removal;
180 };
181 
182 /* PFs maintain an array of this structure, per VF */
183 struct qed_vf_info {
184 	struct qed_iov_vf_mbx vf_mbx;
185 	enum vf_state state;
186 	bool b_init;
187 	bool b_malicious;
188 	u8 to_disable;
189 
190 	struct qed_bulletin bulletin;
191 	dma_addr_t vf_bulletin;
192 
193 	/* PF saves a copy of the last VF acquire message */
194 	struct vfpf_acquire_tlv acquire;
195 
196 	u32 concrete_fid;
197 	u16 opaque_fid;
198 	u16 mtu;
199 
200 	u8 vport_id;
201 	u8 relative_vf_id;
202 	u8 abs_vf_id;
203 #define QED_VF_ABS_ID(p_hwfn, p_vf)	(QED_PATH_ID(p_hwfn) ?		      \
204 					 (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
205 					 (p_vf)->abs_vf_id)
206 
207 	u8 vport_instance;
208 	u8 num_rxqs;
209 	u8 num_txqs;
210 
211 	u8 num_sbs;
212 
213 	u8 num_mac_filters;
214 	u8 num_vlan_filters;
215 	struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF];
216 	u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
217 	u8 num_active_rxqs;
218 	struct qed_public_vf_info p_vf_info;
219 	bool spoof_chk;
220 	bool req_spoofchk_val;
221 
222 	/* Stores the configuration requested by VF */
223 	struct qed_vf_shadow_config shadow_config;
224 
225 	/* A bitfield using bulletin's valid-map bits, used to indicate
226 	 * which of the bulletin board features have been configured.
227 	 */
228 	u64 configured_features;
229 #define QED_IOV_CONFIGURED_FEATURES_MASK        ((1 << MAC_ADDR_FORCED) | \
230 						 (1 << VLAN_ADDR_FORCED))
231 };
232 
233 /* This structure is part of qed_hwfn and used only for PFs that have sriov
234  * capability enabled.
235  */
236 struct qed_pf_iov {
237 	struct qed_vf_info vfs_array[MAX_NUM_VFS];
238 	u64 pending_flr[QED_VF_ARRAY_LENGTH];
239 
240 	/* Allocate message address continuosuly and split to each VF */
241 	void *mbx_msg_virt_addr;
242 	dma_addr_t mbx_msg_phys_addr;
243 	u32 mbx_msg_size;
244 	void *mbx_reply_virt_addr;
245 	dma_addr_t mbx_reply_phys_addr;
246 	u32 mbx_reply_size;
247 	void *p_bulletins;
248 	dma_addr_t bulletins_phys;
249 	u32 bulletins_size;
250 };
251 
252 enum qed_iov_wq_flag {
253 	QED_IOV_WQ_MSG_FLAG,
254 	QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
255 	QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
256 	QED_IOV_WQ_STOP_WQ_FLAG,
257 	QED_IOV_WQ_FLR_FLAG,
258 	QED_IOV_WQ_TRUST_FLAG,
259 	QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
260 };
261 
262 #ifdef CONFIG_QED_SRIOV
263 /**
264  * @brief - Given a VF index, return index of next [including that] active VF.
265  *
266  * @param p_hwfn
267  * @param rel_vf_id
268  *
269  * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
270  */
271 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
272 
273 void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
274 				    int vfid, u16 vxlan_port, u16 geneve_port);
275 
276 /**
277  * @brief Read sriov related information and allocated resources
278  *  reads from configuraiton space, shmem, etc.
279  *
280  * @param p_hwfn
281  *
282  * @return int
283  */
284 int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
285 
286 /**
287  * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
288  *
289  * @param p_hwfn
290  * @param p_iov
291  * @param type
292  * @param length
293  *
294  * @return pointer to the newly placed tlv
295  */
296 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
297 
298 /**
299  * @brief list the types and lengths of the tlvs on the buffer
300  *
301  * @param p_hwfn
302  * @param tlvs_list
303  */
304 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
305 
306 /**
307  * @brief qed_iov_alloc - allocate sriov related resources
308  *
309  * @param p_hwfn
310  *
311  * @return int
312  */
313 int qed_iov_alloc(struct qed_hwfn *p_hwfn);
314 
315 /**
316  * @brief qed_iov_setup - setup sriov related resources
317  *
318  * @param p_hwfn
319  * @param p_ptt
320  */
321 void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
322 
323 /**
324  * @brief qed_iov_free - free sriov related resources
325  *
326  * @param p_hwfn
327  */
328 void qed_iov_free(struct qed_hwfn *p_hwfn);
329 
330 /**
331  * @brief free sriov related memory that was allocated during hw_prepare
332  *
333  * @param cdev
334  */
335 void qed_iov_free_hw_info(struct qed_dev *cdev);
336 
337 /**
338  * @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
339  *
340  * @param p_hwfn
341  * @param opcode
342  * @param echo
343  * @param data
344  */
345 int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
346 			u8 opcode, __le16 echo, union event_ring_data *data);
347 
348 /**
349  * @brief Mark structs of vfs that have been FLR-ed.
350  *
351  * @param p_hwfn
352  * @param disabled_vfs - bitmask of all VFs on path that were FLRed
353  *
354  * @return true iff one of the PF's vfs got FLRed. false otherwise.
355  */
356 bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
357 
358 /**
359  * @brief Search extended TLVs in request/reply buffer.
360  *
361  * @param p_hwfn
362  * @param p_tlvs_list - Pointer to tlvs list
363  * @param req_type - Type of TLV
364  *
365  * @return pointer to tlv type if found, otherwise returns NULL.
366  */
367 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
368 			       void *p_tlvs_list, u16 req_type);
369 
370 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
371 int qed_iov_wq_start(struct qed_dev *cdev);
372 
373 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
374 void qed_vf_start_iov_wq(struct qed_dev *cdev);
375 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
376 void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
377 #else
378 static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
379 					     u16 rel_vf_id)
380 {
381 	return MAX_NUM_VFS;
382 }
383 
384 static inline void
385 qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid,
386 			       u16 vxlan_port, u16 geneve_port)
387 {
388 }
389 
390 static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
391 {
392 	return 0;
393 }
394 
395 static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
396 {
397 	return 0;
398 }
399 
400 static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
401 {
402 }
403 
404 static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
405 {
406 }
407 
408 static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
409 {
410 }
411 
412 static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
413 				      u8 opcode,
414 				      __le16 echo, union event_ring_data *data)
415 {
416 	return -EINVAL;
417 }
418 
419 static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
420 				       u32 *disabled_vfs)
421 {
422 	return false;
423 }
424 
425 static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
426 {
427 }
428 
429 static inline int qed_iov_wq_start(struct qed_dev *cdev)
430 {
431 	return 0;
432 }
433 
434 static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
435 				    enum qed_iov_wq_flag flag)
436 {
437 }
438 
439 static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
440 {
441 }
442 
443 static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
444 {
445 	return 0;
446 }
447 
448 static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
449 {
450 }
451 #endif
452 
453 #define qed_for_each_vf(_p_hwfn, _i)			  \
454 	for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
455 	     _i < MAX_NUM_VFS;				  \
456 	     _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
457 
458 #endif
459