1 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2 /* Copyright 2017-2019 NXP */
3 
4 #include <linux/timer.h>
5 #include <linux/pci.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/skbuff.h>
10 #include <linux/ethtool.h>
11 #include <linux/if_vlan.h>
12 #include <linux/phylink.h>
13 #include <linux/dim.h>
14 
15 #include "enetc_hw.h"
16 
17 #define ENETC_MAC_MAXFRM_SIZE	9600
18 #define ENETC_MAX_MTU		(ENETC_MAC_MAXFRM_SIZE - \
19 				(ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN))
20 
21 struct enetc_tx_swbd {
22 	union {
23 		struct sk_buff *skb;
24 		struct xdp_frame *xdp_frame;
25 	};
26 	dma_addr_t dma;
27 	struct page *page;	/* valid only if is_xdp_tx */
28 	u16 page_offset;	/* valid only if is_xdp_tx */
29 	u16 len;
30 	enum dma_data_direction dir;
31 	u8 is_dma_page:1;
32 	u8 check_wb:1;
33 	u8 do_twostep_tstamp:1;
34 	u8 is_eof:1;
35 	u8 is_xdp_tx:1;
36 	u8 is_xdp_redirect:1;
37 };
38 
39 #define ENETC_RX_MAXFRM_SIZE	ENETC_MAC_MAXFRM_SIZE
40 #define ENETC_RXB_TRUESIZE	2048 /* PAGE_SIZE >> 1 */
41 #define ENETC_RXB_PAD		NET_SKB_PAD /* add extra space if needed */
42 #define ENETC_RXB_DMA_SIZE	\
43 	(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
44 #define ENETC_RXB_DMA_SIZE_XDP	\
45 	(SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - XDP_PACKET_HEADROOM)
46 
47 struct enetc_rx_swbd {
48 	dma_addr_t dma;
49 	struct page *page;
50 	u16 page_offset;
51 	enum dma_data_direction dir;
52 	u16 len;
53 };
54 
55 /* ENETC overhead: optional extension BD + 1 BD gap */
56 #define ENETC_TXBDS_NEEDED(val)	((val) + 2)
57 /* max # of chained Tx BDs is 15, including head and extension BD */
58 #define ENETC_MAX_SKB_FRAGS	13
59 #define ENETC_TXBDS_MAX_NEEDED	ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
60 
61 struct enetc_ring_stats {
62 	unsigned int packets;
63 	unsigned int bytes;
64 	unsigned int rx_alloc_errs;
65 	unsigned int xdp_drops;
66 	unsigned int xdp_tx;
67 	unsigned int xdp_tx_drops;
68 	unsigned int xdp_redirect;
69 	unsigned int xdp_redirect_failures;
70 	unsigned int xdp_redirect_sg;
71 	unsigned int recycles;
72 	unsigned int recycle_failures;
73 };
74 
75 struct enetc_xdp_data {
76 	struct xdp_rxq_info rxq;
77 	struct bpf_prog *prog;
78 	int xdp_tx_in_flight;
79 };
80 
81 #define ENETC_RX_RING_DEFAULT_SIZE	2048
82 #define ENETC_TX_RING_DEFAULT_SIZE	2048
83 #define ENETC_DEFAULT_TX_WORK		(ENETC_TX_RING_DEFAULT_SIZE / 2)
84 
85 struct enetc_bdr {
86 	struct device *dev; /* for DMA mapping */
87 	struct net_device *ndev;
88 	void *bd_base; /* points to Rx or Tx BD ring */
89 	union {
90 		void __iomem *tpir;
91 		void __iomem *rcir;
92 	};
93 	u16 index;
94 	int bd_count; /* # of BDs */
95 	int next_to_use;
96 	int next_to_clean;
97 	union {
98 		struct enetc_tx_swbd *tx_swbd;
99 		struct enetc_rx_swbd *rx_swbd;
100 	};
101 	union {
102 		void __iomem *tcir; /* Tx */
103 		int next_to_alloc; /* Rx */
104 	};
105 	void __iomem *idr; /* Interrupt Detect Register pointer */
106 
107 	int buffer_offset;
108 	struct enetc_xdp_data xdp;
109 
110 	struct enetc_ring_stats stats;
111 
112 	dma_addr_t bd_dma_base;
113 	u8 tsd_enable; /* Time specific departure */
114 	bool ext_en; /* enable h/w descriptor extensions */
115 
116 	/* DMA buffer for TSO headers */
117 	char *tso_headers;
118 	dma_addr_t tso_headers_dma;
119 } ____cacheline_aligned_in_smp;
120 
121 static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
122 {
123 	if (unlikely(++*i == bdr->bd_count))
124 		*i = 0;
125 }
126 
127 static inline int enetc_bd_unused(struct enetc_bdr *bdr)
128 {
129 	if (bdr->next_to_clean > bdr->next_to_use)
130 		return bdr->next_to_clean - bdr->next_to_use - 1;
131 
132 	return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1;
133 }
134 
135 static inline int enetc_swbd_unused(struct enetc_bdr *bdr)
136 {
137 	if (bdr->next_to_clean > bdr->next_to_alloc)
138 		return bdr->next_to_clean - bdr->next_to_alloc - 1;
139 
140 	return bdr->bd_count + bdr->next_to_clean - bdr->next_to_alloc - 1;
141 }
142 
143 /* Control BD ring */
144 #define ENETC_CBDR_DEFAULT_SIZE	64
145 struct enetc_cbdr {
146 	void *bd_base; /* points to Rx or Tx BD ring */
147 	void __iomem *pir;
148 	void __iomem *cir;
149 	void __iomem *mr; /* mode register */
150 
151 	int bd_count; /* # of BDs */
152 	int next_to_use;
153 	int next_to_clean;
154 
155 	dma_addr_t bd_dma_base;
156 	struct device *dma_dev;
157 };
158 
159 #define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i]))
160 
161 static inline union enetc_rx_bd *enetc_rxbd(struct enetc_bdr *rx_ring, int i)
162 {
163 	int hw_idx = i;
164 
165 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
166 	if (rx_ring->ext_en)
167 		hw_idx = 2 * i;
168 #endif
169 	return &(((union enetc_rx_bd *)rx_ring->bd_base)[hw_idx]);
170 }
171 
172 static inline void enetc_rxbd_next(struct enetc_bdr *rx_ring,
173 				   union enetc_rx_bd **old_rxbd, int *old_index)
174 {
175 	union enetc_rx_bd *new_rxbd = *old_rxbd;
176 	int new_index = *old_index;
177 
178 	new_rxbd++;
179 
180 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
181 	if (rx_ring->ext_en)
182 		new_rxbd++;
183 #endif
184 
185 	if (unlikely(++new_index == rx_ring->bd_count)) {
186 		new_rxbd = rx_ring->bd_base;
187 		new_index = 0;
188 	}
189 
190 	*old_rxbd = new_rxbd;
191 	*old_index = new_index;
192 }
193 
194 static inline union enetc_rx_bd *enetc_rxbd_ext(union enetc_rx_bd *rxbd)
195 {
196 	return ++rxbd;
197 }
198 
199 struct enetc_msg_swbd {
200 	void *vaddr;
201 	dma_addr_t dma;
202 	int size;
203 };
204 
205 #define ENETC_REV1	0x1
206 enum enetc_errata {
207 	ENETC_ERR_VLAN_ISOL	= BIT(0),
208 	ENETC_ERR_UCMCSWP	= BIT(1),
209 };
210 
211 #define ENETC_SI_F_QBV BIT(0)
212 #define ENETC_SI_F_PSFP BIT(1)
213 
214 /* PCI IEP device data */
215 struct enetc_si {
216 	struct pci_dev *pdev;
217 	struct enetc_hw hw;
218 	enum enetc_errata errata;
219 
220 	struct net_device *ndev; /* back ref. */
221 
222 	struct enetc_cbdr cbd_ring;
223 
224 	int num_rx_rings; /* how many rings are available in the SI */
225 	int num_tx_rings;
226 	int num_fs_entries;
227 	int num_rss; /* number of RSS buckets */
228 	unsigned short pad;
229 	int hw_features;
230 };
231 
232 #define ENETC_SI_ALIGN	32
233 
234 static inline void *enetc_si_priv(const struct enetc_si *si)
235 {
236 	return (char *)si + ALIGN(sizeof(struct enetc_si), ENETC_SI_ALIGN);
237 }
238 
239 static inline bool enetc_si_is_pf(struct enetc_si *si)
240 {
241 	return !!(si->hw.port);
242 }
243 
244 static inline int enetc_pf_to_port(struct pci_dev *pf_pdev)
245 {
246 	switch (pf_pdev->devfn) {
247 	case 0:
248 		return 0;
249 	case 1:
250 		return 1;
251 	case 2:
252 		return 2;
253 	case 6:
254 		return 3;
255 	default:
256 		return -1;
257 	}
258 }
259 
260 #define ENETC_MAX_NUM_TXQS	8
261 #define ENETC_INT_NAME_MAX	(IFNAMSIZ + 8)
262 
263 struct enetc_int_vector {
264 	void __iomem *rbier;
265 	void __iomem *tbier_base;
266 	void __iomem *ricr1;
267 	unsigned long tx_rings_map;
268 	int count_tx_rings;
269 	u32 rx_ictt;
270 	u16 comp_cnt;
271 	bool rx_dim_en, rx_napi_work;
272 	struct napi_struct napi ____cacheline_aligned_in_smp;
273 	struct dim rx_dim ____cacheline_aligned_in_smp;
274 	char name[ENETC_INT_NAME_MAX];
275 
276 	struct enetc_bdr rx_ring;
277 	struct enetc_bdr tx_ring[];
278 } ____cacheline_aligned_in_smp;
279 
280 struct enetc_cls_rule {
281 	struct ethtool_rx_flow_spec fs;
282 	int used;
283 };
284 
285 #define ENETC_MAX_BDR_INT	2 /* fixed to max # of available cpus */
286 struct psfp_cap {
287 	u32 max_streamid;
288 	u32 max_psfp_filter;
289 	u32 max_psfp_gate;
290 	u32 max_psfp_gatelist;
291 	u32 max_psfp_meter;
292 };
293 
294 #define ENETC_F_TX_TSTAMP_MASK	0xff
295 /* TODO: more hardware offloads */
296 enum enetc_active_offloads {
297 	/* 8 bits reserved for TX timestamp types (hwtstamp_tx_types) */
298 	ENETC_F_TX_TSTAMP		= BIT(0),
299 	ENETC_F_TX_ONESTEP_SYNC_TSTAMP	= BIT(1),
300 
301 	ENETC_F_RX_TSTAMP		= BIT(8),
302 	ENETC_F_QBV			= BIT(9),
303 	ENETC_F_QCI			= BIT(10),
304 };
305 
306 enum enetc_flags_bit {
307 	ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS = 0,
308 };
309 
310 /* interrupt coalescing modes */
311 enum enetc_ic_mode {
312 	/* one interrupt per frame */
313 	ENETC_IC_NONE = 0,
314 	/* activated when int coalescing time is set to a non-0 value */
315 	ENETC_IC_RX_MANUAL = BIT(0),
316 	ENETC_IC_TX_MANUAL = BIT(1),
317 	/* use dynamic interrupt moderation */
318 	ENETC_IC_RX_ADAPTIVE = BIT(2),
319 };
320 
321 #define ENETC_RXIC_PKTTHR	min_t(u32, 256, ENETC_RX_RING_DEFAULT_SIZE / 2)
322 #define ENETC_TXIC_PKTTHR	min_t(u32, 128, ENETC_TX_RING_DEFAULT_SIZE / 2)
323 #define ENETC_TXIC_TIMETHR	enetc_usecs_to_cycles(600)
324 
325 struct enetc_ndev_priv {
326 	struct net_device *ndev;
327 	struct device *dev; /* dma-mapping device */
328 	struct enetc_si *si;
329 
330 	int bdr_int_num; /* number of Rx/Tx ring interrupts */
331 	struct enetc_int_vector *int_vector[ENETC_MAX_BDR_INT];
332 	u16 num_rx_rings, num_tx_rings;
333 	u16 rx_bd_count, tx_bd_count;
334 
335 	u16 msg_enable;
336 	enum enetc_active_offloads active_offloads;
337 
338 	u32 speed; /* store speed for compare update pspeed */
339 
340 	struct enetc_bdr **xdp_tx_ring;
341 	struct enetc_bdr *tx_ring[16];
342 	struct enetc_bdr *rx_ring[16];
343 
344 	struct enetc_cls_rule *cls_rules;
345 
346 	struct psfp_cap psfp_cap;
347 
348 	struct phylink *phylink;
349 	int ic_mode;
350 	u32 tx_ictt;
351 
352 	struct bpf_prog *xdp_prog;
353 
354 	unsigned long flags;
355 
356 	struct work_struct	tx_onestep_tstamp;
357 	struct sk_buff_head	tx_skbs;
358 };
359 
360 /* Messaging */
361 
362 /* VF-PF set primary MAC address message format */
363 struct enetc_msg_cmd_set_primary_mac {
364 	struct enetc_msg_cmd_header header;
365 	struct sockaddr mac;
366 };
367 
368 #define ENETC_CBD(R, i)	(&(((struct enetc_cbd *)((R).bd_base))[i]))
369 
370 #define ENETC_CBDR_TIMEOUT	1000 /* usecs */
371 
372 /* PTP driver exports */
373 extern int enetc_phc_index;
374 
375 /* SI common */
376 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv);
377 void enetc_pci_remove(struct pci_dev *pdev);
378 int enetc_alloc_msix(struct enetc_ndev_priv *priv);
379 void enetc_free_msix(struct enetc_ndev_priv *priv);
380 void enetc_get_si_caps(struct enetc_si *si);
381 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv);
382 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv);
383 void enetc_free_si_resources(struct enetc_ndev_priv *priv);
384 int enetc_configure_si(struct enetc_ndev_priv *priv);
385 
386 int enetc_open(struct net_device *ndev);
387 int enetc_close(struct net_device *ndev);
388 void enetc_start(struct net_device *ndev);
389 void enetc_stop(struct net_device *ndev);
390 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
391 struct net_device_stats *enetc_get_stats(struct net_device *ndev);
392 int enetc_set_features(struct net_device *ndev,
393 		       netdev_features_t features);
394 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
395 int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
396 		   void *type_data);
397 int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp);
398 int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
399 		   struct xdp_frame **frames, u32 flags);
400 
401 /* ethtool */
402 void enetc_set_ethtool_ops(struct net_device *ndev);
403 
404 /* control buffer descriptor ring (CBDR) */
405 int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
406 		     struct enetc_cbdr *cbdr);
407 void enetc_teardown_cbdr(struct enetc_cbdr *cbdr);
408 int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
409 			    char *mac_addr, int si_map);
410 int enetc_clear_mac_flt_entry(struct enetc_si *si, int index);
411 int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
412 		       int index);
413 void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes);
414 int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
415 int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
416 int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
417 
418 #ifdef CONFIG_FSL_ENETC_QOS
419 int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
420 void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed);
421 int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
422 int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data);
423 int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
424 			    void *cb_priv);
425 int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
426 int enetc_psfp_init(struct enetc_ndev_priv *priv);
427 int enetc_psfp_clean(struct enetc_ndev_priv *priv);
428 
429 static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
430 {
431 	u32 reg;
432 
433 	reg = enetc_port_rd(&priv->si->hw, ENETC_PSIDCAPR);
434 	priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK;
435 	/* Port stream filter capability */
436 	reg = enetc_port_rd(&priv->si->hw, ENETC_PSFCAPR);
437 	priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK;
438 	/* Port stream gate capability */
439 	reg = enetc_port_rd(&priv->si->hw, ENETC_PSGCAPR);
440 	priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK);
441 	priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16;
442 	/* Port flow meter capability */
443 	reg = enetc_port_rd(&priv->si->hw, ENETC_PFMCAPR);
444 	priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK;
445 }
446 
447 static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv)
448 {
449 	struct enetc_hw *hw = &priv->si->hw;
450 	int err;
451 
452 	enetc_get_max_cap(priv);
453 
454 	err = enetc_psfp_init(priv);
455 	if (err)
456 		return err;
457 
458 	enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) |
459 		 ENETC_PPSFPMR_PSFPEN | ENETC_PPSFPMR_VS |
460 		 ENETC_PPSFPMR_PVC | ENETC_PPSFPMR_PVZC);
461 
462 	return 0;
463 }
464 
465 static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
466 {
467 	struct enetc_hw *hw = &priv->si->hw;
468 	int err;
469 
470 	err = enetc_psfp_clean(priv);
471 	if (err)
472 		return err;
473 
474 	enetc_wr(hw, ENETC_PPSFPMR, enetc_rd(hw, ENETC_PPSFPMR) &
475 		 ~ENETC_PPSFPMR_PSFPEN & ~ENETC_PPSFPMR_VS &
476 		 ~ENETC_PPSFPMR_PVC & ~ENETC_PPSFPMR_PVZC);
477 
478 	memset(&priv->psfp_cap, 0, sizeof(struct psfp_cap));
479 
480 	return 0;
481 }
482 
483 #else
484 #define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
485 #define enetc_sched_speed_set(priv, speed) (void)0
486 #define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
487 #define enetc_setup_tc_txtime(ndev, type_data) -EOPNOTSUPP
488 #define enetc_setup_tc_psfp(ndev, type_data) -EOPNOTSUPP
489 #define enetc_setup_tc_block_cb NULL
490 
491 #define enetc_get_max_cap(p)		\
492 	memset(&((p)->psfp_cap), 0, sizeof(struct psfp_cap))
493 
494 static inline int enetc_psfp_enable(struct enetc_ndev_priv *priv)
495 {
496 	return 0;
497 }
498 
499 static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
500 {
501 	return 0;
502 }
503 #endif
504