1 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
3  * Copyright 2016 NXP
4  */
5 
6 #ifndef __DPAA2_ETH_H
7 #define __DPAA2_ETH_H
8 
9 #include <linux/netdevice.h>
10 #include <linux/if_vlan.h>
11 #include <linux/fsl/mc.h>
12 
13 #include <soc/fsl/dpaa2-io.h>
14 #include <soc/fsl/dpaa2-fd.h>
15 #include "dpni.h"
16 #include "dpni-cmd.h"
17 
18 #include "dpaa2-eth-trace.h"
19 #include "dpaa2-eth-debugfs.h"
20 
21 #define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0)
22 
23 #define DPAA2_ETH_STORE_SIZE		16
24 
25 /* Maximum number of scatter-gather entries in an ingress frame,
26  * considering the maximum receive frame size is 64K
27  */
28 #define DPAA2_ETH_MAX_SG_ENTRIES	((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE)
29 
30 /* Maximum acceptable MTU value. It is in direct relation with the hardware
31  * enforced Max Frame Length (currently 10k).
32  */
33 #define DPAA2_ETH_MFL			(10 * 1024)
34 #define DPAA2_ETH_MAX_MTU		(DPAA2_ETH_MFL - VLAN_ETH_HLEN)
35 /* Convert L3 MTU to L2 MFL */
36 #define DPAA2_ETH_L2_MAX_FRM(mtu)	((mtu) + VLAN_ETH_HLEN)
37 
38 /* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
39  * frames in the Rx queues (length of the current frame is not
40  * taken into account when making the taildrop decision)
41  */
42 #define DPAA2_ETH_TAILDROP_THRESH	(64 * 1024)
43 
44 /* Maximum number of Tx confirmation frames to be processed
45  * in a single NAPI call
46  */
47 #define DPAA2_ETH_TXCONF_PER_NAPI	256
48 
49 /* Buffer quota per queue. Must be large enough such that for minimum sized
50  * frames taildrop kicks in before the bpool gets depleted, so we compute
51  * how many 64B frames fit inside the taildrop threshold and add a margin
52  * to accommodate the buffer refill delay.
53  */
54 #define DPAA2_ETH_MAX_FRAMES_PER_QUEUE	(DPAA2_ETH_TAILDROP_THRESH / 64)
55 #define DPAA2_ETH_NUM_BUFS		(DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
56 #define DPAA2_ETH_REFILL_THRESH \
57 	(DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD)
58 
59 /* Maximum number of buffers that can be acquired/released through a single
60  * QBMan command
61  */
62 #define DPAA2_ETH_BUFS_PER_CMD		7
63 
64 /* Hardware requires alignment for ingress/egress buffer addresses */
65 #define DPAA2_ETH_TX_BUF_ALIGN		64
66 
67 #define DPAA2_ETH_RX_BUF_RAW_SIZE	PAGE_SIZE
68 #define DPAA2_ETH_RX_BUF_TAILROOM \
69 	SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
70 #define DPAA2_ETH_RX_BUF_SIZE \
71 	(DPAA2_ETH_RX_BUF_RAW_SIZE - DPAA2_ETH_RX_BUF_TAILROOM)
72 
73 /* Hardware annotation area in RX/TX buffers */
74 #define DPAA2_ETH_RX_HWA_SIZE		64
75 #define DPAA2_ETH_TX_HWA_SIZE		128
76 
77 /* PTP nominal frequency 1GHz */
78 #define DPAA2_PTP_CLK_PERIOD_NS		1
79 
80 /* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned
81  * to 256B. For newer revisions, the requirement is only for 64B alignment
82  */
83 #define DPAA2_ETH_RX_BUF_ALIGN_REV1	256
84 #define DPAA2_ETH_RX_BUF_ALIGN		64
85 
86 /* We are accommodating a skb backpointer and some S/G info
87  * in the frame's software annotation. The hardware
88  * options are either 0 or 64, so we choose the latter.
89  */
90 #define DPAA2_ETH_SWA_SIZE		64
91 
92 /* We store different information in the software annotation area of a Tx frame
93  * based on what type of frame it is
94  */
95 enum dpaa2_eth_swa_type {
96 	DPAA2_ETH_SWA_SINGLE,
97 	DPAA2_ETH_SWA_SG,
98 	DPAA2_ETH_SWA_XDP,
99 };
100 
101 /* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
102 struct dpaa2_eth_swa {
103 	enum dpaa2_eth_swa_type type;
104 	union {
105 		struct {
106 			struct sk_buff *skb;
107 		} single;
108 		struct {
109 			struct sk_buff *skb;
110 			struct scatterlist *scl;
111 			int num_sg;
112 			int sgt_size;
113 		} sg;
114 		struct {
115 			int dma_size;
116 			struct xdp_frame *xdpf;
117 		} xdp;
118 	};
119 };
120 
121 /* Annotation valid bits in FD FRC */
122 #define DPAA2_FD_FRC_FASV		0x8000
123 #define DPAA2_FD_FRC_FAEADV		0x4000
124 #define DPAA2_FD_FRC_FAPRV		0x2000
125 #define DPAA2_FD_FRC_FAIADV		0x1000
126 #define DPAA2_FD_FRC_FASWOV		0x0800
127 #define DPAA2_FD_FRC_FAICFDV		0x0400
128 
129 /* Error bits in FD CTRL */
130 #define DPAA2_FD_RX_ERR_MASK		(FD_CTRL_SBE | FD_CTRL_FAERR)
131 #define DPAA2_FD_TX_ERR_MASK		(FD_CTRL_UFD	| \
132 					 FD_CTRL_SBE	| \
133 					 FD_CTRL_FSE	| \
134 					 FD_CTRL_FAERR)
135 
136 /* Annotation bits in FD CTRL */
137 #define DPAA2_FD_CTRL_ASAL		0x00020000	/* ASAL = 128B */
138 
139 /* Frame annotation status */
140 struct dpaa2_fas {
141 	u8 reserved;
142 	u8 ppid;
143 	__le16 ifpid;
144 	__le32 status;
145 };
146 
147 /* Frame annotation status word is located in the first 8 bytes
148  * of the buffer's hardware annoatation area
149  */
150 #define DPAA2_FAS_OFFSET		0
151 #define DPAA2_FAS_SIZE			(sizeof(struct dpaa2_fas))
152 
153 /* Timestamp is located in the next 8 bytes of the buffer's
154  * hardware annotation area
155  */
156 #define DPAA2_TS_OFFSET			0x8
157 
158 /* Frame annotation egress action descriptor */
159 #define DPAA2_FAEAD_OFFSET		0x58
160 
161 struct dpaa2_faead {
162 	__le32 conf_fqid;
163 	__le32 ctrl;
164 };
165 
166 #define DPAA2_FAEAD_A2V			0x20000000
167 #define DPAA2_FAEAD_A4V			0x08000000
168 #define DPAA2_FAEAD_UPDV		0x00001000
169 #define DPAA2_FAEAD_EBDDV		0x00002000
170 #define DPAA2_FAEAD_UPD			0x00000010
171 
172 /* Accessors for the hardware annotation fields that we use */
173 static inline void *dpaa2_get_hwa(void *buf_addr, bool swa)
174 {
175 	return buf_addr + (swa ? DPAA2_ETH_SWA_SIZE : 0);
176 }
177 
178 static inline struct dpaa2_fas *dpaa2_get_fas(void *buf_addr, bool swa)
179 {
180 	return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAS_OFFSET;
181 }
182 
183 static inline __le64 *dpaa2_get_ts(void *buf_addr, bool swa)
184 {
185 	return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET;
186 }
187 
188 static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa)
189 {
190 	return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAEAD_OFFSET;
191 }
192 
193 /* Error and status bits in the frame annotation status word */
194 /* Debug frame, otherwise supposed to be discarded */
195 #define DPAA2_FAS_DISC			0x80000000
196 /* MACSEC frame */
197 #define DPAA2_FAS_MS			0x40000000
198 #define DPAA2_FAS_PTP			0x08000000
199 /* Ethernet multicast frame */
200 #define DPAA2_FAS_MC			0x04000000
201 /* Ethernet broadcast frame */
202 #define DPAA2_FAS_BC			0x02000000
203 #define DPAA2_FAS_KSE			0x00040000
204 #define DPAA2_FAS_EOFHE			0x00020000
205 #define DPAA2_FAS_MNLE			0x00010000
206 #define DPAA2_FAS_TIDE			0x00008000
207 #define DPAA2_FAS_PIEE			0x00004000
208 /* Frame length error */
209 #define DPAA2_FAS_FLE			0x00002000
210 /* Frame physical error */
211 #define DPAA2_FAS_FPE			0x00001000
212 #define DPAA2_FAS_PTE			0x00000080
213 #define DPAA2_FAS_ISP			0x00000040
214 #define DPAA2_FAS_PHE			0x00000020
215 #define DPAA2_FAS_BLE			0x00000010
216 /* L3 csum validation performed */
217 #define DPAA2_FAS_L3CV			0x00000008
218 /* L3 csum error */
219 #define DPAA2_FAS_L3CE			0x00000004
220 /* L4 csum validation performed */
221 #define DPAA2_FAS_L4CV			0x00000002
222 /* L4 csum error */
223 #define DPAA2_FAS_L4CE			0x00000001
224 /* Possible errors on the ingress path */
225 #define DPAA2_FAS_RX_ERR_MASK		(DPAA2_FAS_KSE		| \
226 					 DPAA2_FAS_EOFHE	| \
227 					 DPAA2_FAS_MNLE		| \
228 					 DPAA2_FAS_TIDE		| \
229 					 DPAA2_FAS_PIEE		| \
230 					 DPAA2_FAS_FLE		| \
231 					 DPAA2_FAS_FPE		| \
232 					 DPAA2_FAS_PTE		| \
233 					 DPAA2_FAS_ISP		| \
234 					 DPAA2_FAS_PHE		| \
235 					 DPAA2_FAS_BLE		| \
236 					 DPAA2_FAS_L3CE		| \
237 					 DPAA2_FAS_L4CE)
238 
239 /* Time in milliseconds between link state updates */
240 #define DPAA2_ETH_LINK_STATE_REFRESH	1000
241 
242 /* Number of times to retry a frame enqueue before giving up.
243  * Value determined empirically, in order to minimize the number
244  * of frames dropped on Tx
245  */
246 #define DPAA2_ETH_ENQUEUE_RETRIES	10
247 
248 /* Driver statistics, other than those in struct rtnl_link_stats64.
249  * These are usually collected per-CPU and aggregated by ethtool.
250  */
251 struct dpaa2_eth_drv_stats {
252 	__u64	tx_conf_frames;
253 	__u64	tx_conf_bytes;
254 	__u64	tx_sg_frames;
255 	__u64	tx_sg_bytes;
256 	__u64	tx_reallocs;
257 	__u64	rx_sg_frames;
258 	__u64	rx_sg_bytes;
259 	/* Enqueues retried due to portal busy */
260 	__u64	tx_portal_busy;
261 };
262 
263 /* Per-FQ statistics */
264 struct dpaa2_eth_fq_stats {
265 	/* Number of frames received on this queue */
266 	__u64 frames;
267 };
268 
269 /* Per-channel statistics */
270 struct dpaa2_eth_ch_stats {
271 	/* Volatile dequeues retried due to portal busy */
272 	__u64 dequeue_portal_busy;
273 	/* Pull errors */
274 	__u64 pull_err;
275 	/* Number of CDANs; useful to estimate avg NAPI len */
276 	__u64 cdan;
277 	/* XDP counters */
278 	__u64 xdp_drop;
279 	__u64 xdp_tx;
280 	__u64 xdp_tx_err;
281 	__u64 xdp_redirect;
282 };
283 
284 /* Maximum number of queues associated with a DPNI */
285 #define DPAA2_ETH_MAX_TCS		8
286 #define DPAA2_ETH_MAX_RX_QUEUES		16
287 #define DPAA2_ETH_MAX_TX_QUEUES		16
288 #define DPAA2_ETH_MAX_QUEUES		(DPAA2_ETH_MAX_RX_QUEUES + \
289 					DPAA2_ETH_MAX_TX_QUEUES)
290 #define DPAA2_ETH_MAX_NETDEV_QUEUES	\
291 	(DPAA2_ETH_MAX_TX_QUEUES * DPAA2_ETH_MAX_TCS)
292 
293 #define DPAA2_ETH_MAX_DPCONS		16
294 
295 enum dpaa2_eth_fq_type {
296 	DPAA2_RX_FQ = 0,
297 	DPAA2_TX_CONF_FQ,
298 };
299 
300 struct dpaa2_eth_priv;
301 
302 struct dpaa2_eth_fq {
303 	u32 fqid;
304 	u32 tx_qdbin;
305 	u32 tx_fqid[DPAA2_ETH_MAX_TCS];
306 	u16 flowid;
307 	u8 tc;
308 	int target_cpu;
309 	u32 dq_frames;
310 	u32 dq_bytes;
311 	struct dpaa2_eth_channel *channel;
312 	enum dpaa2_eth_fq_type type;
313 
314 	void (*consume)(struct dpaa2_eth_priv *priv,
315 			struct dpaa2_eth_channel *ch,
316 			const struct dpaa2_fd *fd,
317 			struct dpaa2_eth_fq *fq);
318 	struct dpaa2_eth_fq_stats stats;
319 };
320 
321 struct dpaa2_eth_ch_xdp {
322 	struct bpf_prog *prog;
323 	u64 drop_bufs[DPAA2_ETH_BUFS_PER_CMD];
324 	int drop_cnt;
325 	unsigned int res;
326 };
327 
328 struct dpaa2_eth_channel {
329 	struct dpaa2_io_notification_ctx nctx;
330 	struct fsl_mc_device *dpcon;
331 	int dpcon_id;
332 	int ch_id;
333 	struct napi_struct napi;
334 	struct dpaa2_io *dpio;
335 	struct dpaa2_io_store *store;
336 	struct dpaa2_eth_priv *priv;
337 	int buf_count;
338 	struct dpaa2_eth_ch_stats stats;
339 	struct dpaa2_eth_ch_xdp xdp;
340 	struct xdp_rxq_info xdp_rxq;
341 	struct list_head *rx_list;
342 };
343 
344 struct dpaa2_eth_dist_fields {
345 	u64 rxnfc_field;
346 	enum net_prot cls_prot;
347 	int cls_field;
348 	int size;
349 	u64 id;
350 };
351 
352 struct dpaa2_eth_cls_rule {
353 	struct ethtool_rx_flow_spec fs;
354 	u8 in_use;
355 };
356 
357 /* Driver private data */
358 struct dpaa2_eth_priv {
359 	struct net_device *net_dev;
360 
361 	u8 num_fqs;
362 	struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
363 	int (*enqueue)(struct dpaa2_eth_priv *priv,
364 		       struct dpaa2_eth_fq *fq,
365 		       struct dpaa2_fd *fd, u8 prio);
366 
367 	u8 num_channels;
368 	struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
369 
370 	struct dpni_attr dpni_attrs;
371 	u16 dpni_ver_major;
372 	u16 dpni_ver_minor;
373 	u16 tx_data_offset;
374 
375 	struct fsl_mc_device *dpbp_dev;
376 	u16 bpid;
377 	struct iommu_domain *iommu_domain;
378 
379 	bool tx_tstamp; /* Tx timestamping enabled */
380 	bool rx_tstamp; /* Rx timestamping enabled */
381 
382 	u16 tx_qdid;
383 	struct fsl_mc_io *mc_io;
384 	/* Cores which have an affine DPIO/DPCON.
385 	 * This is the cpu set on which Rx and Tx conf frames are processed
386 	 */
387 	struct cpumask dpio_cpumask;
388 
389 	/* Standard statistics */
390 	struct rtnl_link_stats64 __percpu *percpu_stats;
391 	/* Extra stats, in addition to the ones known by the kernel */
392 	struct dpaa2_eth_drv_stats __percpu *percpu_extras;
393 
394 	u16 mc_token;
395 
396 	struct dpni_link_state link_state;
397 	bool do_link_poll;
398 	struct task_struct *poll_thread;
399 
400 	/* enabled ethtool hashing bits */
401 	u64 rx_hash_fields;
402 	u64 rx_cls_fields;
403 	struct dpaa2_eth_cls_rule *cls_rules;
404 	u8 rx_cls_enabled;
405 	struct bpf_prog *xdp_prog;
406 #ifdef CONFIG_DEBUG_FS
407 	struct dpaa2_debugfs dbg;
408 #endif
409 };
410 
411 #define DPAA2_RXH_SUPPORTED	(RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
412 				| RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \
413 				| RXH_L4_B_2_3)
414 
415 /* default Rx hash options, set during probing */
416 #define DPAA2_RXH_DEFAULT	(RXH_L3_PROTO | RXH_IP_SRC | RXH_IP_DST | \
417 				 RXH_L4_B_0_1 | RXH_L4_B_2_3)
418 
419 #define dpaa2_eth_hash_enabled(priv)	\
420 	((priv)->dpni_attrs.num_queues > 1)
421 
422 /* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */
423 #define DPAA2_CLASSIFIER_DMA_SIZE 256
424 
425 extern const struct ethtool_ops dpaa2_ethtool_ops;
426 extern int dpaa2_phc_index;
427 
428 static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
429 					 u16 ver_major, u16 ver_minor)
430 {
431 	if (priv->dpni_ver_major == ver_major)
432 		return priv->dpni_ver_minor - ver_minor;
433 	return priv->dpni_ver_major - ver_major;
434 }
435 
436 /* Minimum firmware version that supports a more flexible API
437  * for configuring the Rx flow hash key
438  */
439 #define DPNI_RX_DIST_KEY_VER_MAJOR	7
440 #define DPNI_RX_DIST_KEY_VER_MINOR	5
441 
442 #define dpaa2_eth_has_legacy_dist(priv)					\
443 	(dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR,	\
444 				DPNI_RX_DIST_KEY_VER_MINOR) < 0)
445 
446 #define dpaa2_eth_fs_enabled(priv)	\
447 	(!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
448 
449 #define dpaa2_eth_fs_mask_enabled(priv)	\
450 	((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
451 
452 #define dpaa2_eth_fs_count(priv)        \
453 	((priv)->dpni_attrs.fs_entries)
454 
455 #define dpaa2_eth_tc_count(priv)	\
456 	((priv)->dpni_attrs.num_tcs)
457 
458 /* We have exactly one {Rx, Tx conf} queue per channel */
459 #define dpaa2_eth_queue_count(priv)     \
460 	((priv)->num_channels)
461 
462 enum dpaa2_eth_rx_dist {
463 	DPAA2_ETH_RX_DIST_HASH,
464 	DPAA2_ETH_RX_DIST_CLS
465 };
466 
467 /* Unique IDs for the supported Rx classification header fields */
468 #define DPAA2_ETH_DIST_ETHDST		BIT(0)
469 #define DPAA2_ETH_DIST_ETHSRC		BIT(1)
470 #define DPAA2_ETH_DIST_ETHTYPE		BIT(2)
471 #define DPAA2_ETH_DIST_VLAN		BIT(3)
472 #define DPAA2_ETH_DIST_IPSRC		BIT(4)
473 #define DPAA2_ETH_DIST_IPDST		BIT(5)
474 #define DPAA2_ETH_DIST_IPPROTO		BIT(6)
475 #define DPAA2_ETH_DIST_L4SRC		BIT(7)
476 #define DPAA2_ETH_DIST_L4DST		BIT(8)
477 #define DPAA2_ETH_DIST_ALL		(~0ULL)
478 
479 static inline
480 unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
481 				       struct sk_buff *skb)
482 {
483 	unsigned int headroom = DPAA2_ETH_SWA_SIZE;
484 
485 	/* If we don't have an skb (e.g. XDP buffer), we only need space for
486 	 * the software annotation area
487 	 */
488 	if (!skb)
489 		return headroom;
490 
491 	/* For non-linear skbs we have no headroom requirement, as we build a
492 	 * SG frame with a newly allocated SGT buffer
493 	 */
494 	if (skb_is_nonlinear(skb))
495 		return 0;
496 
497 	/* If we have Tx timestamping, need 128B hardware annotation */
498 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
499 		headroom += DPAA2_ETH_TX_HWA_SIZE;
500 
501 	return headroom;
502 }
503 
504 /* Extra headroom space requested to hardware, in order to make sure there's
505  * no realloc'ing in forwarding scenarios
506  */
507 static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
508 {
509 	return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE;
510 }
511 
512 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
513 int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
514 int dpaa2_eth_cls_key_size(u64 key);
515 int dpaa2_eth_cls_fld_off(int prot, int field);
516 void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);
517 
518 #endif	/* __DPAA2_H */
519