1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
2 /*
3  * Copyright 2008 - 2015 Freescale Semiconductor Inc.
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include "fman_dtsec.h"
9 #include "fman.h"
10 #include "mac.h"
11 
12 #include <linux/slab.h>
13 #include <linux/bitrev.h>
14 #include <linux/io.h>
15 #include <linux/delay.h>
16 #include <linux/phy.h>
17 #include <linux/crc32.h>
18 #include <linux/of_mdio.h>
19 #include <linux/mii.h>
20 #include <linux/netdevice.h>
21 
22 /* TBI register addresses */
23 #define MII_TBICON		0x11
24 
25 /* TBICON register bit fields */
26 #define TBICON_SOFT_RESET	0x8000	/* Soft reset */
27 #define TBICON_DISABLE_RX_DIS	0x2000	/* Disable receive disparity */
28 #define TBICON_DISABLE_TX_DIS	0x1000	/* Disable transmit disparity */
29 #define TBICON_AN_SENSE		0x0100	/* Auto-negotiation sense enable */
30 #define TBICON_CLK_SELECT	0x0020	/* Clock select */
31 #define TBICON_MI_MODE		0x0010	/* GMII mode (TBI if not set) */
32 
33 /* Interrupt Mask Register (IMASK) */
34 #define DTSEC_IMASK_BREN	0x80000000
35 #define DTSEC_IMASK_RXCEN	0x40000000
36 #define DTSEC_IMASK_MSROEN	0x04000000
37 #define DTSEC_IMASK_GTSCEN	0x02000000
38 #define DTSEC_IMASK_BTEN	0x01000000
39 #define DTSEC_IMASK_TXCEN	0x00800000
40 #define DTSEC_IMASK_TXEEN	0x00400000
41 #define DTSEC_IMASK_LCEN	0x00040000
42 #define DTSEC_IMASK_CRLEN	0x00020000
43 #define DTSEC_IMASK_XFUNEN	0x00010000
44 #define DTSEC_IMASK_ABRTEN	0x00008000
45 #define DTSEC_IMASK_IFERREN	0x00004000
46 #define DTSEC_IMASK_MAGEN	0x00000800
47 #define DTSEC_IMASK_MMRDEN	0x00000400
48 #define DTSEC_IMASK_MMWREN	0x00000200
49 #define DTSEC_IMASK_GRSCEN	0x00000100
50 #define DTSEC_IMASK_TDPEEN	0x00000002
51 #define DTSEC_IMASK_RDPEEN	0x00000001
52 
53 #define DTSEC_EVENTS_MASK		\
54 	 ((u32)(DTSEC_IMASK_BREN    |	\
55 		DTSEC_IMASK_RXCEN   |	\
56 		DTSEC_IMASK_BTEN    |	\
57 		DTSEC_IMASK_TXCEN   |	\
58 		DTSEC_IMASK_TXEEN   |	\
59 		DTSEC_IMASK_ABRTEN  |	\
60 		DTSEC_IMASK_LCEN    |	\
61 		DTSEC_IMASK_CRLEN   |	\
62 		DTSEC_IMASK_XFUNEN  |	\
63 		DTSEC_IMASK_IFERREN |	\
64 		DTSEC_IMASK_MAGEN   |	\
65 		DTSEC_IMASK_TDPEEN  |	\
66 		DTSEC_IMASK_RDPEEN))
67 
68 /* dtsec timestamp event bits */
69 #define TMR_PEMASK_TSREEN	0x00010000
70 #define TMR_PEVENT_TSRE		0x00010000
71 
72 /* Group address bit indication */
73 #define MAC_GROUP_ADDRESS	0x0000010000000000ULL
74 
75 /* Defaults */
76 #define DEFAULT_HALFDUP_RETRANSMIT		0xf
77 #define DEFAULT_HALFDUP_COLL_WINDOW		0x37
78 #define DEFAULT_TX_PAUSE_TIME			0xf000
79 #define DEFAULT_RX_PREPEND			0
80 #define DEFAULT_PREAMBLE_LEN			7
81 #define DEFAULT_TX_PAUSE_TIME_EXTD		0
82 #define DEFAULT_NON_BACK_TO_BACK_IPG1		0x40
83 #define DEFAULT_NON_BACK_TO_BACK_IPG2		0x60
84 #define DEFAULT_MIN_IFG_ENFORCEMENT		0x50
85 #define DEFAULT_BACK_TO_BACK_IPG		0x60
86 #define DEFAULT_MAXIMUM_FRAME			0x600
87 
88 /* register related defines (bits, field offsets..) */
89 #define DTSEC_ID2_INT_REDUCED_OFF	0x00010000
90 
91 #define DTSEC_ECNTRL_GMIIM		0x00000040
92 #define DTSEC_ECNTRL_TBIM		0x00000020
93 #define DTSEC_ECNTRL_RPM		0x00000010
94 #define DTSEC_ECNTRL_R100M		0x00000008
95 #define DTSEC_ECNTRL_RMM		0x00000004
96 #define DTSEC_ECNTRL_SGMIIM		0x00000002
97 #define DTSEC_ECNTRL_QSGMIIM		0x00000001
98 
99 #define TCTRL_TTSE			0x00000040
100 #define TCTRL_GTS			0x00000020
101 
102 #define RCTRL_PAL_MASK			0x001f0000
103 #define RCTRL_PAL_SHIFT			16
104 #define RCTRL_GHTX			0x00000400
105 #define RCTRL_RTSE			0x00000040
106 #define RCTRL_GRS			0x00000020
107 #define RCTRL_MPROM			0x00000008
108 #define RCTRL_RSF			0x00000004
109 #define RCTRL_UPROM			0x00000001
110 
111 #define MACCFG1_SOFT_RESET		0x80000000
112 #define MACCFG1_RX_FLOW			0x00000020
113 #define MACCFG1_TX_FLOW			0x00000010
114 #define MACCFG1_TX_EN			0x00000001
115 #define MACCFG1_RX_EN			0x00000004
116 
117 #define MACCFG2_NIBBLE_MODE		0x00000100
118 #define MACCFG2_BYTE_MODE		0x00000200
119 #define MACCFG2_PAD_CRC_EN		0x00000004
120 #define MACCFG2_FULL_DUPLEX		0x00000001
121 #define MACCFG2_PREAMBLE_LENGTH_MASK	0x0000f000
122 #define MACCFG2_PREAMBLE_LENGTH_SHIFT	12
123 
124 #define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT	24
125 #define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT	16
126 #define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT	8
127 
128 #define IPGIFG_NON_BACK_TO_BACK_IPG_1	0x7F000000
129 #define IPGIFG_NON_BACK_TO_BACK_IPG_2	0x007F0000
130 #define IPGIFG_MIN_IFG_ENFORCEMENT	0x0000FF00
131 #define IPGIFG_BACK_TO_BACK_IPG	0x0000007F
132 
133 #define HAFDUP_EXCESS_DEFER			0x00010000
134 #define HAFDUP_COLLISION_WINDOW		0x000003ff
135 #define HAFDUP_RETRANSMISSION_MAX_SHIFT	12
136 #define HAFDUP_RETRANSMISSION_MAX		0x0000f000
137 
138 #define NUM_OF_HASH_REGS	8	/* Number of hash table registers */
139 
140 #define PTV_PTE_MASK		0xffff0000
141 #define PTV_PT_MASK		0x0000ffff
142 #define PTV_PTE_SHIFT		16
143 
144 #define MAX_PACKET_ALIGNMENT		31
145 #define MAX_INTER_PACKET_GAP		0x7f
146 #define MAX_RETRANSMISSION		0x0f
147 #define MAX_COLLISION_WINDOW		0x03ff
148 
149 /* Hash table size (32 bits*8 regs) */
150 #define DTSEC_HASH_TABLE_SIZE		256
151 /* Extended Hash table size (32 bits*16 regs) */
152 #define EXTENDED_HASH_TABLE_SIZE	512
153 
154 /* dTSEC Memory Map registers */
155 struct dtsec_regs {
156 	/* dTSEC General Control and Status Registers */
157 	u32 tsec_id;		/* 0x000 ETSEC_ID register */
158 	u32 tsec_id2;		/* 0x004 ETSEC_ID2 register */
159 	u32 ievent;		/* 0x008 Interrupt event register */
160 	u32 imask;		/* 0x00C Interrupt mask register */
161 	u32 reserved0010[1];
162 	u32 ecntrl;		/* 0x014 E control register */
163 	u32 ptv;		/* 0x018 Pause time value register */
164 	u32 tbipa;		/* 0x01C TBI PHY address register */
165 	u32 tmr_ctrl;		/* 0x020 Time-stamp Control register */
166 	u32 tmr_pevent;		/* 0x024 Time-stamp event register */
167 	u32 tmr_pemask;		/* 0x028 Timer event mask register */
168 	u32 reserved002c[5];
169 	u32 tctrl;		/* 0x040 Transmit control register */
170 	u32 reserved0044[3];
171 	u32 rctrl;		/* 0x050 Receive control register */
172 	u32 reserved0054[11];
173 	u32 igaddr[8];		/* 0x080-0x09C Individual/group address */
174 	u32 gaddr[8];		/* 0x0A0-0x0BC Group address registers 0-7 */
175 	u32 reserved00c0[16];
176 	u32 maccfg1;		/* 0x100 MAC configuration #1 */
177 	u32 maccfg2;		/* 0x104 MAC configuration #2 */
178 	u32 ipgifg;		/* 0x108 IPG/IFG */
179 	u32 hafdup;		/* 0x10C Half-duplex */
180 	u32 maxfrm;		/* 0x110 Maximum frame */
181 	u32 reserved0114[10];
182 	u32 ifstat;		/* 0x13C Interface status */
183 	u32 macstnaddr1;	/* 0x140 Station Address,part 1 */
184 	u32 macstnaddr2;	/* 0x144 Station Address,part 2 */
185 	struct {
186 		u32 exact_match1;	/* octets 1-4 */
187 		u32 exact_match2;	/* octets 5-6 */
188 	} macaddr[15];		/* 0x148-0x1BC mac exact match addresses 1-15 */
189 	u32 reserved01c0[16];
190 	u32 tr64;	/* 0x200 Tx and Rx 64 byte frame counter */
191 	u32 tr127;	/* 0x204 Tx and Rx 65 to 127 byte frame counter */
192 	u32 tr255;	/* 0x208 Tx and Rx 128 to 255 byte frame counter */
193 	u32 tr511;	/* 0x20C Tx and Rx 256 to 511 byte frame counter */
194 	u32 tr1k;	/* 0x210 Tx and Rx 512 to 1023 byte frame counter */
195 	u32 trmax;	/* 0x214 Tx and Rx 1024 to 1518 byte frame counter */
196 	u32 trmgv;
197 	/* 0x218 Tx and Rx 1519 to 1522 byte good VLAN frame count */
198 	u32 rbyt;	/* 0x21C receive byte counter */
199 	u32 rpkt;	/* 0x220 receive packet counter */
200 	u32 rfcs;	/* 0x224 receive FCS error counter */
201 	u32 rmca;	/* 0x228 RMCA Rx multicast packet counter */
202 	u32 rbca;	/* 0x22C Rx broadcast packet counter */
203 	u32 rxcf;	/* 0x230 Rx control frame packet counter */
204 	u32 rxpf;	/* 0x234 Rx pause frame packet counter */
205 	u32 rxuo;	/* 0x238 Rx unknown OP code counter */
206 	u32 raln;	/* 0x23C Rx alignment error counter */
207 	u32 rflr;	/* 0x240 Rx frame length error counter */
208 	u32 rcde;	/* 0x244 Rx code error counter */
209 	u32 rcse;	/* 0x248 Rx carrier sense error counter */
210 	u32 rund;	/* 0x24C Rx undersize packet counter */
211 	u32 rovr;	/* 0x250 Rx oversize packet counter */
212 	u32 rfrg;	/* 0x254 Rx fragments counter */
213 	u32 rjbr;	/* 0x258 Rx jabber counter */
214 	u32 rdrp;	/* 0x25C Rx drop */
215 	u32 tbyt;	/* 0x260 Tx byte counter */
216 	u32 tpkt;	/* 0x264 Tx packet counter */
217 	u32 tmca;	/* 0x268 Tx multicast packet counter */
218 	u32 tbca;	/* 0x26C Tx broadcast packet counter */
219 	u32 txpf;	/* 0x270 Tx pause control frame counter */
220 	u32 tdfr;	/* 0x274 Tx deferral packet counter */
221 	u32 tedf;	/* 0x278 Tx excessive deferral packet counter */
222 	u32 tscl;	/* 0x27C Tx single collision packet counter */
223 	u32 tmcl;	/* 0x280 Tx multiple collision packet counter */
224 	u32 tlcl;	/* 0x284 Tx late collision packet counter */
225 	u32 txcl;	/* 0x288 Tx excessive collision packet counter */
226 	u32 tncl;	/* 0x28C Tx total collision counter */
227 	u32 reserved0290[1];
228 	u32 tdrp;	/* 0x294 Tx drop frame counter */
229 	u32 tjbr;	/* 0x298 Tx jabber frame counter */
230 	u32 tfcs;	/* 0x29C Tx FCS error counter */
231 	u32 txcf;	/* 0x2A0 Tx control frame counter */
232 	u32 tovr;	/* 0x2A4 Tx oversize frame counter */
233 	u32 tund;	/* 0x2A8 Tx undersize frame counter */
234 	u32 tfrg;	/* 0x2AC Tx fragments frame counter */
235 	u32 car1;	/* 0x2B0 carry register one register* */
236 	u32 car2;	/* 0x2B4 carry register two register* */
237 	u32 cam1;	/* 0x2B8 carry register one mask register */
238 	u32 cam2;	/* 0x2BC carry register two mask register */
239 	u32 reserved02c0[848];
240 };
241 
242 /* struct dtsec_cfg - dTSEC configuration
243  * Transmit half-duplex flow control, under software control for 10/100-Mbps
244  * half-duplex media. If set, back pressure is applied to media by raising
245  * carrier.
246  * halfdup_retransmit:
247  * Number of retransmission attempts following a collision.
248  * If this is exceeded dTSEC aborts transmission due to excessive collisions.
249  * The standard specifies the attempt limit to be 15.
250  * halfdup_coll_window:
251  * The number of bytes of the frame during which collisions may occur.
252  * The default value of 55 corresponds to the frame byte at the end of the
253  * standard 512-bit slot time window. If collisions are detected after this
254  * byte, the late collision event is asserted and transmission of current
255  * frame is aborted.
256  * tx_pad_crc:
257  * Pad and append CRC. If set, the MAC pads all ransmitted short frames and
258  * appends a CRC to every frame regardless of padding requirement.
259  * tx_pause_time:
260  * Transmit pause time value. This pause value is used as part of the pause
261  * frame to be sent when a transmit pause frame is initiated.
262  * If set to 0 this disables transmission of pause frames.
263  * preamble_len:
264  * Length, in bytes, of the preamble field preceding each Ethernet
265  * start-of-frame delimiter byte. The default value of 0x7 should be used in
266  * order to guarantee reliable operation with IEEE 802.3 compliant hardware.
267  * rx_prepend:
268  * Packet alignment padding length. The specified number of bytes (1-31)
269  * of zero padding are inserted before the start of each received frame.
270  * For Ethernet, where optional preamble extraction is enabled, the padding
271  * appears before the preamble, otherwise the padding precedes the
272  * layer 2 header.
273  *
274  * This structure contains basic dTSEC configuration and must be passed to
275  * init() function. A default set of configuration values can be
276  * obtained by calling set_dflts().
277  */
278 struct dtsec_cfg {
279 	u16 halfdup_retransmit;
280 	u16 halfdup_coll_window;
281 	bool tx_pad_crc;
282 	u16 tx_pause_time;
283 	bool ptp_tsu_en;
284 	bool ptp_exception_en;
285 	u32 preamble_len;
286 	u32 rx_prepend;
287 	u16 tx_pause_time_extd;
288 	u16 maximum_frame;
289 	u32 non_back_to_back_ipg1;
290 	u32 non_back_to_back_ipg2;
291 	u32 min_ifg_enforcement;
292 	u32 back_to_back_ipg;
293 };
294 
295 struct fman_mac {
296 	/* pointer to dTSEC memory mapped registers */
297 	struct dtsec_regs __iomem *regs;
298 	/* MAC address of device */
299 	u64 addr;
300 	/* Ethernet physical interface */
301 	phy_interface_t phy_if;
302 	u16 max_speed;
303 	struct mac_device *dev_id; /* device cookie used by the exception cbs */
304 	fman_mac_exception_cb *exception_cb;
305 	fman_mac_exception_cb *event_cb;
306 	/* Number of individual addresses in registers for this station */
307 	u8 num_of_ind_addr_in_regs;
308 	/* pointer to driver's global address hash table */
309 	struct eth_hash_t *multicast_addr_hash;
310 	/* pointer to driver's individual address hash table */
311 	struct eth_hash_t *unicast_addr_hash;
312 	u8 mac_id;
313 	u32 exceptions;
314 	bool ptp_tsu_enabled;
315 	bool en_tsu_err_exception;
316 	struct dtsec_cfg *dtsec_drv_param;
317 	void *fm;
318 	struct fman_rev_info fm_rev_info;
319 	bool basex_if;
320 	struct mdio_device *tbidev;
321 	struct phylink_pcs pcs;
322 };
323 
set_dflts(struct dtsec_cfg * cfg)324 static void set_dflts(struct dtsec_cfg *cfg)
325 {
326 	cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
327 	cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
328 	cfg->tx_pad_crc = true;
329 	cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
330 	/* PHY address 0 is reserved (DPAA RM) */
331 	cfg->rx_prepend = DEFAULT_RX_PREPEND;
332 	cfg->ptp_tsu_en = true;
333 	cfg->ptp_exception_en = true;
334 	cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
335 	cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
336 	cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
337 	cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
338 	cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
339 	cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
340 	cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
341 }
342 
set_mac_address(struct dtsec_regs __iomem * regs,const u8 * adr)343 static void set_mac_address(struct dtsec_regs __iomem *regs, const u8 *adr)
344 {
345 	u32 tmp;
346 
347 	tmp = (u32)((adr[5] << 24) |
348 		    (adr[4] << 16) | (adr[3] << 8) | adr[2]);
349 	iowrite32be(tmp, &regs->macstnaddr1);
350 
351 	tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
352 	iowrite32be(tmp, &regs->macstnaddr2);
353 }
354 
init(struct dtsec_regs __iomem * regs,struct dtsec_cfg * cfg,phy_interface_t iface,u16 iface_speed,u64 addr,u32 exception_mask,u8 tbi_addr)355 static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
356 		phy_interface_t iface, u16 iface_speed, u64 addr,
357 		u32 exception_mask, u8 tbi_addr)
358 {
359 	enet_addr_t eth_addr;
360 	u32 tmp = 0;
361 	int i;
362 
363 	/* Soft reset */
364 	iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
365 	iowrite32be(0, &regs->maccfg1);
366 
367 	if (cfg->tx_pause_time)
368 		tmp |= cfg->tx_pause_time;
369 	if (cfg->tx_pause_time_extd)
370 		tmp |= cfg->tx_pause_time_extd << PTV_PTE_SHIFT;
371 	iowrite32be(tmp, &regs->ptv);
372 
373 	tmp = 0;
374 	tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK;
375 	/* Accept short frames */
376 	tmp |= RCTRL_RSF;
377 
378 	iowrite32be(tmp, &regs->rctrl);
379 
380 	/* Assign a Phy Address to the TBI (TBIPA).
381 	 * Done also in cases where TBI is not selected to avoid conflict with
382 	 * the external PHY's Physical address
383 	 */
384 	iowrite32be(tbi_addr, &regs->tbipa);
385 
386 	iowrite32be(0, &regs->tmr_ctrl);
387 
388 	if (cfg->ptp_tsu_en) {
389 		tmp = 0;
390 		tmp |= TMR_PEVENT_TSRE;
391 		iowrite32be(tmp, &regs->tmr_pevent);
392 
393 		if (cfg->ptp_exception_en) {
394 			tmp = 0;
395 			tmp |= TMR_PEMASK_TSREEN;
396 			iowrite32be(tmp, &regs->tmr_pemask);
397 		}
398 	}
399 
400 	tmp = 0;
401 	tmp |= MACCFG1_RX_FLOW;
402 	tmp |= MACCFG1_TX_FLOW;
403 	iowrite32be(tmp, &regs->maccfg1);
404 
405 	tmp = 0;
406 
407 	tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
408 		MACCFG2_PREAMBLE_LENGTH_MASK;
409 	if (cfg->tx_pad_crc)
410 		tmp |= MACCFG2_PAD_CRC_EN;
411 	iowrite32be(tmp, &regs->maccfg2);
412 
413 	tmp = (((cfg->non_back_to_back_ipg1 <<
414 		 IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT)
415 		& IPGIFG_NON_BACK_TO_BACK_IPG_1)
416 	       | ((cfg->non_back_to_back_ipg2 <<
417 		   IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT)
418 		 & IPGIFG_NON_BACK_TO_BACK_IPG_2)
419 	       | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT)
420 		 & IPGIFG_MIN_IFG_ENFORCEMENT)
421 	       | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG));
422 	iowrite32be(tmp, &regs->ipgifg);
423 
424 	tmp = 0;
425 	tmp |= HAFDUP_EXCESS_DEFER;
426 	tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
427 		& HAFDUP_RETRANSMISSION_MAX);
428 	tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
429 
430 	iowrite32be(tmp, &regs->hafdup);
431 
432 	/* Initialize Maximum frame length */
433 	iowrite32be(cfg->maximum_frame, &regs->maxfrm);
434 
435 	iowrite32be(0xffffffff, &regs->cam1);
436 	iowrite32be(0xffffffff, &regs->cam2);
437 
438 	iowrite32be(exception_mask, &regs->imask);
439 
440 	iowrite32be(0xffffffff, &regs->ievent);
441 
442 	if (addr) {
443 		MAKE_ENET_ADDR_FROM_UINT64(addr, eth_addr);
444 		set_mac_address(regs, (const u8 *)eth_addr);
445 	}
446 
447 	/* HASH */
448 	for (i = 0; i < NUM_OF_HASH_REGS; i++) {
449 		/* Initialize IADDRx */
450 		iowrite32be(0, &regs->igaddr[i]);
451 		/* Initialize GADDRx */
452 		iowrite32be(0, &regs->gaddr[i]);
453 	}
454 
455 	return 0;
456 }
457 
set_bucket(struct dtsec_regs __iomem * regs,int bucket,bool enable)458 static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
459 		       bool enable)
460 {
461 	int reg_idx = (bucket >> 5) & 0xf;
462 	int bit_idx = bucket & 0x1f;
463 	u32 bit_mask = 0x80000000 >> bit_idx;
464 	u32 __iomem *reg;
465 
466 	if (reg_idx > 7)
467 		reg = &regs->gaddr[reg_idx - 8];
468 	else
469 		reg = &regs->igaddr[reg_idx];
470 
471 	if (enable)
472 		iowrite32be(ioread32be(reg) | bit_mask, reg);
473 	else
474 		iowrite32be(ioread32be(reg) & (~bit_mask), reg);
475 }
476 
check_init_parameters(struct fman_mac * dtsec)477 static int check_init_parameters(struct fman_mac *dtsec)
478 {
479 	if ((dtsec->dtsec_drv_param)->rx_prepend >
480 	    MAX_PACKET_ALIGNMENT) {
481 		pr_err("packetAlignmentPadding can't be > than %d\n",
482 		       MAX_PACKET_ALIGNMENT);
483 		return -EINVAL;
484 	}
485 	if (((dtsec->dtsec_drv_param)->non_back_to_back_ipg1 >
486 	     MAX_INTER_PACKET_GAP) ||
487 	    ((dtsec->dtsec_drv_param)->non_back_to_back_ipg2 >
488 	     MAX_INTER_PACKET_GAP) ||
489 	     ((dtsec->dtsec_drv_param)->back_to_back_ipg >
490 	      MAX_INTER_PACKET_GAP)) {
491 		pr_err("Inter packet gap can't be greater than %d\n",
492 		       MAX_INTER_PACKET_GAP);
493 		return -EINVAL;
494 	}
495 	if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
496 	    MAX_RETRANSMISSION) {
497 		pr_err("maxRetransmission can't be greater than %d\n",
498 		       MAX_RETRANSMISSION);
499 		return -EINVAL;
500 	}
501 	if ((dtsec->dtsec_drv_param)->halfdup_coll_window >
502 	    MAX_COLLISION_WINDOW) {
503 		pr_err("collisionWindow can't be greater than %d\n",
504 		       MAX_COLLISION_WINDOW);
505 		return -EINVAL;
506 	/* If Auto negotiation process is disabled, need to set up the PHY
507 	 * using the MII Management Interface
508 	 */
509 	}
510 	if (!dtsec->exception_cb) {
511 		pr_err("uninitialized exception_cb\n");
512 		return -EINVAL;
513 	}
514 	if (!dtsec->event_cb) {
515 		pr_err("uninitialized event_cb\n");
516 		return -EINVAL;
517 	}
518 
519 	return 0;
520 }
521 
get_exception_flag(enum fman_mac_exceptions exception)522 static int get_exception_flag(enum fman_mac_exceptions exception)
523 {
524 	u32 bit_mask;
525 
526 	switch (exception) {
527 	case FM_MAC_EX_1G_BAB_RX:
528 		bit_mask = DTSEC_IMASK_BREN;
529 		break;
530 	case FM_MAC_EX_1G_RX_CTL:
531 		bit_mask = DTSEC_IMASK_RXCEN;
532 		break;
533 	case FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET:
534 		bit_mask = DTSEC_IMASK_GTSCEN;
535 		break;
536 	case FM_MAC_EX_1G_BAB_TX:
537 		bit_mask = DTSEC_IMASK_BTEN;
538 		break;
539 	case FM_MAC_EX_1G_TX_CTL:
540 		bit_mask = DTSEC_IMASK_TXCEN;
541 		break;
542 	case FM_MAC_EX_1G_TX_ERR:
543 		bit_mask = DTSEC_IMASK_TXEEN;
544 		break;
545 	case FM_MAC_EX_1G_LATE_COL:
546 		bit_mask = DTSEC_IMASK_LCEN;
547 		break;
548 	case FM_MAC_EX_1G_COL_RET_LMT:
549 		bit_mask = DTSEC_IMASK_CRLEN;
550 		break;
551 	case FM_MAC_EX_1G_TX_FIFO_UNDRN:
552 		bit_mask = DTSEC_IMASK_XFUNEN;
553 		break;
554 	case FM_MAC_EX_1G_MAG_PCKT:
555 		bit_mask = DTSEC_IMASK_MAGEN;
556 		break;
557 	case FM_MAC_EX_1G_MII_MNG_RD_COMPLET:
558 		bit_mask = DTSEC_IMASK_MMRDEN;
559 		break;
560 	case FM_MAC_EX_1G_MII_MNG_WR_COMPLET:
561 		bit_mask = DTSEC_IMASK_MMWREN;
562 		break;
563 	case FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET:
564 		bit_mask = DTSEC_IMASK_GRSCEN;
565 		break;
566 	case FM_MAC_EX_1G_DATA_ERR:
567 		bit_mask = DTSEC_IMASK_TDPEEN;
568 		break;
569 	case FM_MAC_EX_1G_RX_MIB_CNT_OVFL:
570 		bit_mask = DTSEC_IMASK_MSROEN;
571 		break;
572 	default:
573 		bit_mask = 0;
574 		break;
575 	}
576 
577 	return bit_mask;
578 }
579 
dtsec_get_max_frame_length(struct fman_mac * dtsec)580 static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
581 {
582 	struct dtsec_regs __iomem *regs = dtsec->regs;
583 
584 	return (u16)ioread32be(&regs->maxfrm);
585 }
586 
dtsec_isr(void * handle)587 static void dtsec_isr(void *handle)
588 {
589 	struct fman_mac *dtsec = (struct fman_mac *)handle;
590 	struct dtsec_regs __iomem *regs = dtsec->regs;
591 	u32 event;
592 
593 	/* do not handle MDIO events */
594 	event = ioread32be(&regs->ievent) &
595 		(u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN));
596 
597 	event &= ioread32be(&regs->imask);
598 
599 	iowrite32be(event, &regs->ievent);
600 
601 	if (event & DTSEC_IMASK_BREN)
602 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX);
603 	if (event & DTSEC_IMASK_RXCEN)
604 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL);
605 	if (event & DTSEC_IMASK_GTSCEN)
606 		dtsec->exception_cb(dtsec->dev_id,
607 				    FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET);
608 	if (event & DTSEC_IMASK_BTEN)
609 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX);
610 	if (event & DTSEC_IMASK_TXCEN)
611 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL);
612 	if (event & DTSEC_IMASK_TXEEN)
613 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR);
614 	if (event & DTSEC_IMASK_LCEN)
615 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL);
616 	if (event & DTSEC_IMASK_CRLEN)
617 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
618 	if (event & DTSEC_IMASK_XFUNEN) {
619 		/* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */
620 		/* FIXME: This races with the rest of the driver! */
621 		if (dtsec->fm_rev_info.major == 2) {
622 			u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
623 			/* a. Write 0x00E0_0C00 to DTSEC_ID
624 			 *	This is a read only register
625 			 * b. Read and save the value of TPKT
626 			 */
627 			tpkt1 = ioread32be(&regs->tpkt);
628 
629 			/* c. Read the register at dTSEC address offset 0x32C */
630 			tmp_reg1 = ioread32be(&regs->reserved02c0[27]);
631 
632 			/* d. Compare bits [9:15] to bits [25:31] of the
633 			 * register at address offset 0x32C.
634 			 */
635 			if ((tmp_reg1 & 0x007F0000) !=
636 				(tmp_reg1 & 0x0000007F)) {
637 				/* If they are not equal, save the value of
638 				 * this register and wait for at least
639 				 * MAXFRM*16 ns
640 				 */
641 				usleep_range((u32)(min
642 					(dtsec_get_max_frame_length(dtsec) *
643 					16 / 1000, 1)), (u32)
644 					(min(dtsec_get_max_frame_length
645 					(dtsec) * 16 / 1000, 1) + 1));
646 			}
647 
648 			/* e. Read and save TPKT again and read the register
649 			 * at dTSEC address offset 0x32C again
650 			 */
651 			tpkt2 = ioread32be(&regs->tpkt);
652 			tmp_reg2 = ioread32be(&regs->reserved02c0[27]);
653 
654 			/* f. Compare the value of TPKT saved in step b to
655 			 * value read in step e. Also compare bits [9:15] of
656 			 * the register at offset 0x32C saved in step d to the
657 			 * value of bits [9:15] saved in step e. If the two
658 			 * registers values are unchanged, then the transmit
659 			 * portion of the dTSEC controller is locked up and
660 			 * the user should proceed to the recover sequence.
661 			 */
662 			if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) ==
663 				(tmp_reg2 & 0x007F0000))) {
664 				/* recover sequence */
665 
666 				/* a.Write a 1 to RCTRL[GRS] */
667 
668 				iowrite32be(ioread32be(&regs->rctrl) |
669 					    RCTRL_GRS, &regs->rctrl);
670 
671 				/* b.Wait until IEVENT[GRSC]=1, or at least
672 				 * 100 us has elapsed.
673 				 */
674 				for (i = 0; i < 100; i++) {
675 					if (ioread32be(&regs->ievent) &
676 					    DTSEC_IMASK_GRSCEN)
677 						break;
678 					udelay(1);
679 				}
680 				if (ioread32be(&regs->ievent) &
681 				    DTSEC_IMASK_GRSCEN)
682 					iowrite32be(DTSEC_IMASK_GRSCEN,
683 						    &regs->ievent);
684 				else
685 					pr_debug("Rx lockup due to Tx lockup\n");
686 
687 				/* c.Write a 1 to bit n of FM_RSTC
688 				 * (offset 0x0CC of FPM)
689 				 */
690 				fman_reset_mac(dtsec->fm, dtsec->mac_id);
691 
692 				/* d.Wait 4 Tx clocks (32 ns) */
693 				udelay(1);
694 
695 				/* e.Write a 0 to bit n of FM_RSTC. */
696 				/* cleared by FMAN
697 				 */
698 			}
699 		}
700 
701 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN);
702 	}
703 	if (event & DTSEC_IMASK_MAGEN)
704 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT);
705 	if (event & DTSEC_IMASK_GRSCEN)
706 		dtsec->exception_cb(dtsec->dev_id,
707 				    FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET);
708 	if (event & DTSEC_IMASK_TDPEEN)
709 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR);
710 	if (event & DTSEC_IMASK_RDPEEN)
711 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR);
712 
713 	/* masked interrupts */
714 	WARN_ON(event & DTSEC_IMASK_ABRTEN);
715 	WARN_ON(event & DTSEC_IMASK_IFERREN);
716 }
717 
dtsec_1588_isr(void * handle)718 static void dtsec_1588_isr(void *handle)
719 {
720 	struct fman_mac *dtsec = (struct fman_mac *)handle;
721 	struct dtsec_regs __iomem *regs = dtsec->regs;
722 	u32 event;
723 
724 	if (dtsec->ptp_tsu_enabled) {
725 		event = ioread32be(&regs->tmr_pevent);
726 		event &= ioread32be(&regs->tmr_pemask);
727 
728 		if (event) {
729 			iowrite32be(event, &regs->tmr_pevent);
730 			WARN_ON(event & TMR_PEVENT_TSRE);
731 			dtsec->exception_cb(dtsec->dev_id,
732 					    FM_MAC_EX_1G_1588_TS_RX_ERR);
733 		}
734 	}
735 }
736 
free_init_resources(struct fman_mac * dtsec)737 static void free_init_resources(struct fman_mac *dtsec)
738 {
739 	fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
740 			     FMAN_INTR_TYPE_ERR);
741 	fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
742 			     FMAN_INTR_TYPE_NORMAL);
743 
744 	/* release the driver's group hash table */
745 	free_hash_table(dtsec->multicast_addr_hash);
746 	dtsec->multicast_addr_hash = NULL;
747 
748 	/* release the driver's individual hash table */
749 	free_hash_table(dtsec->unicast_addr_hash);
750 	dtsec->unicast_addr_hash = NULL;
751 }
752 
pcs_to_dtsec(struct phylink_pcs * pcs)753 static struct fman_mac *pcs_to_dtsec(struct phylink_pcs *pcs)
754 {
755 	return container_of(pcs, struct fman_mac, pcs);
756 }
757 
dtsec_pcs_get_state(struct phylink_pcs * pcs,struct phylink_link_state * state)758 static void dtsec_pcs_get_state(struct phylink_pcs *pcs,
759 				struct phylink_link_state *state)
760 {
761 	struct fman_mac *dtsec = pcs_to_dtsec(pcs);
762 
763 	phylink_mii_c22_pcs_get_state(dtsec->tbidev, state);
764 }
765 
dtsec_pcs_config(struct phylink_pcs * pcs,unsigned int neg_mode,phy_interface_t interface,const unsigned long * advertising,bool permit_pause_to_mac)766 static int dtsec_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
767 			    phy_interface_t interface,
768 			    const unsigned long *advertising,
769 			    bool permit_pause_to_mac)
770 {
771 	struct fman_mac *dtsec = pcs_to_dtsec(pcs);
772 
773 	return phylink_mii_c22_pcs_config(dtsec->tbidev, interface,
774 					  advertising, neg_mode);
775 }
776 
dtsec_pcs_an_restart(struct phylink_pcs * pcs)777 static void dtsec_pcs_an_restart(struct phylink_pcs *pcs)
778 {
779 	struct fman_mac *dtsec = pcs_to_dtsec(pcs);
780 
781 	phylink_mii_c22_pcs_an_restart(dtsec->tbidev);
782 }
783 
784 static const struct phylink_pcs_ops dtsec_pcs_ops = {
785 	.pcs_get_state = dtsec_pcs_get_state,
786 	.pcs_config = dtsec_pcs_config,
787 	.pcs_an_restart = dtsec_pcs_an_restart,
788 };
789 
graceful_start(struct fman_mac * dtsec)790 static void graceful_start(struct fman_mac *dtsec)
791 {
792 	struct dtsec_regs __iomem *regs = dtsec->regs;
793 
794 	iowrite32be(ioread32be(&regs->tctrl) & ~TCTRL_GTS, &regs->tctrl);
795 	iowrite32be(ioread32be(&regs->rctrl) & ~RCTRL_GRS, &regs->rctrl);
796 }
797 
graceful_stop(struct fman_mac * dtsec)798 static void graceful_stop(struct fman_mac *dtsec)
799 {
800 	struct dtsec_regs __iomem *regs = dtsec->regs;
801 	u32 tmp;
802 
803 	/* Graceful stop - Assert the graceful Rx stop bit */
804 	tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
805 	iowrite32be(tmp, &regs->rctrl);
806 
807 	if (dtsec->fm_rev_info.major == 2) {
808 		/* Workaround for dTSEC Errata A002 */
809 		usleep_range(100, 200);
810 	} else {
811 		/* Workaround for dTSEC Errata A004839 */
812 		usleep_range(10, 50);
813 	}
814 
815 	/* Graceful stop - Assert the graceful Tx stop bit */
816 	if (dtsec->fm_rev_info.major == 2) {
817 		/* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
818 		pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
819 	} else {
820 		tmp = ioread32be(&regs->tctrl) | TCTRL_GTS;
821 		iowrite32be(tmp, &regs->tctrl);
822 
823 		/* Workaround for dTSEC Errata A0012, A0014 */
824 		usleep_range(10, 50);
825 	}
826 }
827 
dtsec_enable(struct fman_mac * dtsec)828 static int dtsec_enable(struct fman_mac *dtsec)
829 {
830 	return 0;
831 }
832 
dtsec_disable(struct fman_mac * dtsec)833 static void dtsec_disable(struct fman_mac *dtsec)
834 {
835 }
836 
dtsec_set_tx_pause_frames(struct fman_mac * dtsec,u8 __maybe_unused priority,u16 pause_time,u16 __maybe_unused thresh_time)837 static int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
838 				     u8 __maybe_unused priority,
839 				     u16 pause_time,
840 				     u16 __maybe_unused thresh_time)
841 {
842 	struct dtsec_regs __iomem *regs = dtsec->regs;
843 	u32 ptv = 0;
844 
845 	if (pause_time) {
846 		/* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
847 		if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
848 			pr_warn("pause-time: %d illegal.Should be > 320\n",
849 				pause_time);
850 			return -EINVAL;
851 		}
852 
853 		ptv = ioread32be(&regs->ptv);
854 		ptv &= PTV_PTE_MASK;
855 		ptv |= pause_time & PTV_PT_MASK;
856 		iowrite32be(ptv, &regs->ptv);
857 
858 		/* trigger the transmission of a flow-control pause frame */
859 		iowrite32be(ioread32be(&regs->maccfg1) | MACCFG1_TX_FLOW,
860 			    &regs->maccfg1);
861 	} else
862 		iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
863 			    &regs->maccfg1);
864 
865 	return 0;
866 }
867 
dtsec_accept_rx_pause_frames(struct fman_mac * dtsec,bool en)868 static int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
869 {
870 	struct dtsec_regs __iomem *regs = dtsec->regs;
871 	u32 tmp;
872 
873 	tmp = ioread32be(&regs->maccfg1);
874 	if (en)
875 		tmp |= MACCFG1_RX_FLOW;
876 	else
877 		tmp &= ~MACCFG1_RX_FLOW;
878 	iowrite32be(tmp, &regs->maccfg1);
879 
880 	return 0;
881 }
882 
dtsec_select_pcs(struct phylink_config * config,phy_interface_t iface)883 static struct phylink_pcs *dtsec_select_pcs(struct phylink_config *config,
884 					    phy_interface_t iface)
885 {
886 	struct fman_mac *dtsec = fman_config_to_mac(config)->fman_mac;
887 
888 	switch (iface) {
889 	case PHY_INTERFACE_MODE_SGMII:
890 	case PHY_INTERFACE_MODE_1000BASEX:
891 	case PHY_INTERFACE_MODE_2500BASEX:
892 		return &dtsec->pcs;
893 	default:
894 		return NULL;
895 	}
896 }
897 
dtsec_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)898 static void dtsec_mac_config(struct phylink_config *config, unsigned int mode,
899 			     const struct phylink_link_state *state)
900 {
901 	struct mac_device *mac_dev = fman_config_to_mac(config);
902 	struct dtsec_regs __iomem *regs = mac_dev->fman_mac->regs;
903 	u32 tmp;
904 
905 	switch (state->interface) {
906 	case PHY_INTERFACE_MODE_RMII:
907 		tmp = DTSEC_ECNTRL_RMM;
908 		break;
909 	case PHY_INTERFACE_MODE_RGMII:
910 	case PHY_INTERFACE_MODE_RGMII_ID:
911 	case PHY_INTERFACE_MODE_RGMII_RXID:
912 	case PHY_INTERFACE_MODE_RGMII_TXID:
913 		tmp = DTSEC_ECNTRL_GMIIM | DTSEC_ECNTRL_RPM;
914 		break;
915 	case PHY_INTERFACE_MODE_SGMII:
916 	case PHY_INTERFACE_MODE_1000BASEX:
917 	case PHY_INTERFACE_MODE_2500BASEX:
918 		tmp = DTSEC_ECNTRL_TBIM | DTSEC_ECNTRL_SGMIIM;
919 		break;
920 	default:
921 		dev_warn(mac_dev->dev, "cannot configure dTSEC for %s\n",
922 			 phy_modes(state->interface));
923 		return;
924 	}
925 
926 	iowrite32be(tmp, &regs->ecntrl);
927 }
928 
dtsec_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)929 static void dtsec_link_up(struct phylink_config *config, struct phy_device *phy,
930 			  unsigned int mode, phy_interface_t interface,
931 			  int speed, int duplex, bool tx_pause, bool rx_pause)
932 {
933 	struct mac_device *mac_dev = fman_config_to_mac(config);
934 	struct fman_mac *dtsec = mac_dev->fman_mac;
935 	struct dtsec_regs __iomem *regs = dtsec->regs;
936 	u16 pause_time = tx_pause ? FSL_FM_PAUSE_TIME_ENABLE :
937 			 FSL_FM_PAUSE_TIME_DISABLE;
938 	u32 tmp;
939 
940 	dtsec_set_tx_pause_frames(dtsec, 0, pause_time, 0);
941 	dtsec_accept_rx_pause_frames(dtsec, rx_pause);
942 
943 	tmp = ioread32be(&regs->ecntrl);
944 	if (speed == SPEED_100)
945 		tmp |= DTSEC_ECNTRL_R100M;
946 	else
947 		tmp &= ~DTSEC_ECNTRL_R100M;
948 	iowrite32be(tmp, &regs->ecntrl);
949 
950 	tmp = ioread32be(&regs->maccfg2);
951 	tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE | MACCFG2_FULL_DUPLEX);
952 	if (speed >= SPEED_1000)
953 		tmp |= MACCFG2_BYTE_MODE;
954 	else
955 		tmp |= MACCFG2_NIBBLE_MODE;
956 
957 	if (duplex == DUPLEX_FULL)
958 		tmp |= MACCFG2_FULL_DUPLEX;
959 
960 	iowrite32be(tmp, &regs->maccfg2);
961 
962 	mac_dev->update_speed(mac_dev, speed);
963 
964 	/* Enable */
965 	tmp = ioread32be(&regs->maccfg1);
966 	tmp |= MACCFG1_RX_EN | MACCFG1_TX_EN;
967 	iowrite32be(tmp, &regs->maccfg1);
968 
969 	/* Graceful start - clear the graceful Rx/Tx stop bit */
970 	graceful_start(dtsec);
971 }
972 
dtsec_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)973 static void dtsec_link_down(struct phylink_config *config, unsigned int mode,
974 			    phy_interface_t interface)
975 {
976 	struct fman_mac *dtsec = fman_config_to_mac(config)->fman_mac;
977 	struct dtsec_regs __iomem *regs = dtsec->regs;
978 	u32 tmp;
979 
980 	/* Graceful stop - Assert the graceful Rx/Tx stop bit */
981 	graceful_stop(dtsec);
982 
983 	tmp = ioread32be(&regs->maccfg1);
984 	tmp &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
985 	iowrite32be(tmp, &regs->maccfg1);
986 }
987 
988 static const struct phylink_mac_ops dtsec_mac_ops = {
989 	.mac_select_pcs = dtsec_select_pcs,
990 	.mac_config = dtsec_mac_config,
991 	.mac_link_up = dtsec_link_up,
992 	.mac_link_down = dtsec_link_down,
993 };
994 
dtsec_modify_mac_address(struct fman_mac * dtsec,const enet_addr_t * enet_addr)995 static int dtsec_modify_mac_address(struct fman_mac *dtsec,
996 				    const enet_addr_t *enet_addr)
997 {
998 	graceful_stop(dtsec);
999 
1000 	/* Initialize MAC Station Address registers (1 & 2)
1001 	 * Station address have to be swapped (big endian to little endian
1002 	 */
1003 	dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
1004 	set_mac_address(dtsec->regs, (const u8 *)(*enet_addr));
1005 
1006 	graceful_start(dtsec);
1007 
1008 	return 0;
1009 }
1010 
dtsec_add_hash_mac_address(struct fman_mac * dtsec,enet_addr_t * eth_addr)1011 static int dtsec_add_hash_mac_address(struct fman_mac *dtsec,
1012 				      enet_addr_t *eth_addr)
1013 {
1014 	struct dtsec_regs __iomem *regs = dtsec->regs;
1015 	struct eth_hash_entry *hash_entry;
1016 	u64 addr;
1017 	s32 bucket;
1018 	u32 crc = 0xFFFFFFFF;
1019 	bool mcast, ghtx;
1020 
1021 	addr = ENET_ADDR_TO_UINT64(*eth_addr);
1022 
1023 	ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
1024 	mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
1025 
1026 	/* Cannot handle unicast mac addr when GHTX is on */
1027 	if (ghtx && !mcast) {
1028 		pr_err("Could not compute hash bucket\n");
1029 		return -EINVAL;
1030 	}
1031 	crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
1032 	crc = bitrev32(crc);
1033 
1034 	/* considering the 9 highest order bits in crc H[8:0]:
1035 	 *if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register
1036 	 *and H[5:1] (next 5 bits) identify the hash bit
1037 	 *if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register
1038 	 *and H[4:0] (next 5 bits) identify the hash bit.
1039 	 *
1040 	 *In bucket index output the low 5 bits identify the hash register
1041 	 *bit, while the higher 4 bits identify the hash register
1042 	 */
1043 
1044 	if (ghtx) {
1045 		bucket = (s32)((crc >> 23) & 0x1ff);
1046 	} else {
1047 		bucket = (s32)((crc >> 24) & 0xff);
1048 		/* if !ghtx and mcast the bit must be set in gaddr instead of
1049 		 *igaddr.
1050 		 */
1051 		if (mcast)
1052 			bucket += 0x100;
1053 	}
1054 
1055 	set_bucket(dtsec->regs, bucket, true);
1056 
1057 	/* Create element to be added to the driver hash table */
1058 	hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
1059 	if (!hash_entry)
1060 		return -ENOMEM;
1061 	hash_entry->addr = addr;
1062 	INIT_LIST_HEAD(&hash_entry->node);
1063 
1064 	if (addr & MAC_GROUP_ADDRESS)
1065 		/* Group Address */
1066 		list_add_tail(&hash_entry->node,
1067 			      &dtsec->multicast_addr_hash->lsts[bucket]);
1068 	else
1069 		list_add_tail(&hash_entry->node,
1070 			      &dtsec->unicast_addr_hash->lsts[bucket]);
1071 
1072 	return 0;
1073 }
1074 
dtsec_set_allmulti(struct fman_mac * dtsec,bool enable)1075 static int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
1076 {
1077 	u32 tmp;
1078 	struct dtsec_regs __iomem *regs = dtsec->regs;
1079 
1080 	tmp = ioread32be(&regs->rctrl);
1081 	if (enable)
1082 		tmp |= RCTRL_MPROM;
1083 	else
1084 		tmp &= ~RCTRL_MPROM;
1085 
1086 	iowrite32be(tmp, &regs->rctrl);
1087 
1088 	return 0;
1089 }
1090 
dtsec_set_tstamp(struct fman_mac * dtsec,bool enable)1091 static int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
1092 {
1093 	struct dtsec_regs __iomem *regs = dtsec->regs;
1094 	u32 rctrl, tctrl;
1095 
1096 	rctrl = ioread32be(&regs->rctrl);
1097 	tctrl = ioread32be(&regs->tctrl);
1098 
1099 	if (enable) {
1100 		rctrl |= RCTRL_RTSE;
1101 		tctrl |= TCTRL_TTSE;
1102 	} else {
1103 		rctrl &= ~RCTRL_RTSE;
1104 		tctrl &= ~TCTRL_TTSE;
1105 	}
1106 
1107 	iowrite32be(rctrl, &regs->rctrl);
1108 	iowrite32be(tctrl, &regs->tctrl);
1109 
1110 	return 0;
1111 }
1112 
dtsec_del_hash_mac_address(struct fman_mac * dtsec,enet_addr_t * eth_addr)1113 static int dtsec_del_hash_mac_address(struct fman_mac *dtsec,
1114 				      enet_addr_t *eth_addr)
1115 {
1116 	struct dtsec_regs __iomem *regs = dtsec->regs;
1117 	struct list_head *pos;
1118 	struct eth_hash_entry *hash_entry = NULL;
1119 	u64 addr;
1120 	s32 bucket;
1121 	u32 crc = 0xFFFFFFFF;
1122 	bool mcast, ghtx;
1123 
1124 	addr = ENET_ADDR_TO_UINT64(*eth_addr);
1125 
1126 	ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
1127 	mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
1128 
1129 	/* Cannot handle unicast mac addr when GHTX is on */
1130 	if (ghtx && !mcast) {
1131 		pr_err("Could not compute hash bucket\n");
1132 		return -EINVAL;
1133 	}
1134 	crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
1135 	crc = bitrev32(crc);
1136 
1137 	if (ghtx) {
1138 		bucket = (s32)((crc >> 23) & 0x1ff);
1139 	} else {
1140 		bucket = (s32)((crc >> 24) & 0xff);
1141 		/* if !ghtx and mcast the bit must be set
1142 		 * in gaddr instead of igaddr.
1143 		 */
1144 		if (mcast)
1145 			bucket += 0x100;
1146 	}
1147 
1148 	if (addr & MAC_GROUP_ADDRESS) {
1149 		/* Group Address */
1150 		list_for_each(pos,
1151 			      &dtsec->multicast_addr_hash->lsts[bucket]) {
1152 			hash_entry = ETH_HASH_ENTRY_OBJ(pos);
1153 			if (hash_entry && hash_entry->addr == addr) {
1154 				list_del_init(&hash_entry->node);
1155 				kfree(hash_entry);
1156 				break;
1157 			}
1158 		}
1159 		if (list_empty(&dtsec->multicast_addr_hash->lsts[bucket]))
1160 			set_bucket(dtsec->regs, bucket, false);
1161 	} else {
1162 		/* Individual Address */
1163 		list_for_each(pos,
1164 			      &dtsec->unicast_addr_hash->lsts[bucket]) {
1165 			hash_entry = ETH_HASH_ENTRY_OBJ(pos);
1166 			if (hash_entry && hash_entry->addr == addr) {
1167 				list_del_init(&hash_entry->node);
1168 				kfree(hash_entry);
1169 				break;
1170 			}
1171 		}
1172 		if (list_empty(&dtsec->unicast_addr_hash->lsts[bucket]))
1173 			set_bucket(dtsec->regs, bucket, false);
1174 	}
1175 
1176 	/* address does not exist */
1177 	WARN_ON(!hash_entry);
1178 
1179 	return 0;
1180 }
1181 
dtsec_set_promiscuous(struct fman_mac * dtsec,bool new_val)1182 static int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
1183 {
1184 	struct dtsec_regs __iomem *regs = dtsec->regs;
1185 	u32 tmp;
1186 
1187 	/* Set unicast promiscuous */
1188 	tmp = ioread32be(&regs->rctrl);
1189 	if (new_val)
1190 		tmp |= RCTRL_UPROM;
1191 	else
1192 		tmp &= ~RCTRL_UPROM;
1193 
1194 	iowrite32be(tmp, &regs->rctrl);
1195 
1196 	/* Set multicast promiscuous */
1197 	tmp = ioread32be(&regs->rctrl);
1198 	if (new_val)
1199 		tmp |= RCTRL_MPROM;
1200 	else
1201 		tmp &= ~RCTRL_MPROM;
1202 
1203 	iowrite32be(tmp, &regs->rctrl);
1204 
1205 	return 0;
1206 }
1207 
dtsec_set_exception(struct fman_mac * dtsec,enum fman_mac_exceptions exception,bool enable)1208 static int dtsec_set_exception(struct fman_mac *dtsec,
1209 			       enum fman_mac_exceptions exception, bool enable)
1210 {
1211 	struct dtsec_regs __iomem *regs = dtsec->regs;
1212 	u32 bit_mask = 0;
1213 
1214 	if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) {
1215 		bit_mask = get_exception_flag(exception);
1216 		if (bit_mask) {
1217 			if (enable)
1218 				dtsec->exceptions |= bit_mask;
1219 			else
1220 				dtsec->exceptions &= ~bit_mask;
1221 		} else {
1222 			pr_err("Undefined exception\n");
1223 			return -EINVAL;
1224 		}
1225 		if (enable)
1226 			iowrite32be(ioread32be(&regs->imask) | bit_mask,
1227 				    &regs->imask);
1228 		else
1229 			iowrite32be(ioread32be(&regs->imask) & ~bit_mask,
1230 				    &regs->imask);
1231 	} else {
1232 		if (!dtsec->ptp_tsu_enabled) {
1233 			pr_err("Exception valid for 1588 only\n");
1234 			return -EINVAL;
1235 		}
1236 		switch (exception) {
1237 		case FM_MAC_EX_1G_1588_TS_RX_ERR:
1238 			if (enable) {
1239 				dtsec->en_tsu_err_exception = true;
1240 				iowrite32be(ioread32be(&regs->tmr_pemask) |
1241 					    TMR_PEMASK_TSREEN,
1242 					    &regs->tmr_pemask);
1243 			} else {
1244 				dtsec->en_tsu_err_exception = false;
1245 				iowrite32be(ioread32be(&regs->tmr_pemask) &
1246 					    ~TMR_PEMASK_TSREEN,
1247 					    &regs->tmr_pemask);
1248 			}
1249 			break;
1250 		default:
1251 			pr_err("Undefined exception\n");
1252 			return -EINVAL;
1253 		}
1254 	}
1255 
1256 	return 0;
1257 }
1258 
dtsec_init(struct fman_mac * dtsec)1259 static int dtsec_init(struct fman_mac *dtsec)
1260 {
1261 	struct dtsec_regs __iomem *regs = dtsec->regs;
1262 	struct dtsec_cfg *dtsec_drv_param;
1263 	u16 max_frm_ln, tbicon;
1264 	int err;
1265 
1266 	if (DEFAULT_RESET_ON_INIT &&
1267 	    (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) {
1268 		pr_err("Can't reset MAC!\n");
1269 		return -EINVAL;
1270 	}
1271 
1272 	err = check_init_parameters(dtsec);
1273 	if (err)
1274 		return err;
1275 
1276 	dtsec_drv_param = dtsec->dtsec_drv_param;
1277 
1278 	err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
1279 		   dtsec->max_speed, dtsec->addr, dtsec->exceptions,
1280 		   dtsec->tbidev->addr);
1281 	if (err) {
1282 		free_init_resources(dtsec);
1283 		pr_err("DTSEC version doesn't support this i/f mode\n");
1284 		return err;
1285 	}
1286 
1287 	/* Configure the TBI PHY Control Register */
1288 	tbicon = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
1289 	mdiodev_write(dtsec->tbidev, MII_TBICON, tbicon);
1290 
1291 	tbicon = TBICON_CLK_SELECT;
1292 	mdiodev_write(dtsec->tbidev, MII_TBICON, tbicon);
1293 
1294 	/* Max Frame Length */
1295 	max_frm_ln = (u16)ioread32be(&regs->maxfrm);
1296 	err = fman_set_mac_max_frame(dtsec->fm, dtsec->mac_id, max_frm_ln);
1297 	if (err) {
1298 		pr_err("Setting max frame length failed\n");
1299 		free_init_resources(dtsec);
1300 		return -EINVAL;
1301 	}
1302 
1303 	dtsec->multicast_addr_hash =
1304 	alloc_hash_table(EXTENDED_HASH_TABLE_SIZE);
1305 	if (!dtsec->multicast_addr_hash) {
1306 		free_init_resources(dtsec);
1307 		pr_err("MC hash table is failed\n");
1308 		return -ENOMEM;
1309 	}
1310 
1311 	dtsec->unicast_addr_hash = alloc_hash_table(DTSEC_HASH_TABLE_SIZE);
1312 	if (!dtsec->unicast_addr_hash) {
1313 		free_init_resources(dtsec);
1314 		pr_err("UC hash table is failed\n");
1315 		return -ENOMEM;
1316 	}
1317 
1318 	/* register err intr handler for dtsec to FPM (err) */
1319 	fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
1320 			   FMAN_INTR_TYPE_ERR, dtsec_isr, dtsec);
1321 	/* register 1588 intr handler for TMR to FPM (normal) */
1322 	fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
1323 			   FMAN_INTR_TYPE_NORMAL, dtsec_1588_isr, dtsec);
1324 
1325 	kfree(dtsec_drv_param);
1326 	dtsec->dtsec_drv_param = NULL;
1327 
1328 	return 0;
1329 }
1330 
dtsec_free(struct fman_mac * dtsec)1331 static int dtsec_free(struct fman_mac *dtsec)
1332 {
1333 	free_init_resources(dtsec);
1334 
1335 	kfree(dtsec->dtsec_drv_param);
1336 	dtsec->dtsec_drv_param = NULL;
1337 	if (!IS_ERR_OR_NULL(dtsec->tbidev))
1338 		put_device(&dtsec->tbidev->dev);
1339 	kfree(dtsec);
1340 
1341 	return 0;
1342 }
1343 
dtsec_config(struct mac_device * mac_dev,struct fman_mac_params * params)1344 static struct fman_mac *dtsec_config(struct mac_device *mac_dev,
1345 				     struct fman_mac_params *params)
1346 {
1347 	struct fman_mac *dtsec;
1348 	struct dtsec_cfg *dtsec_drv_param;
1349 
1350 	/* allocate memory for the UCC GETH data structure. */
1351 	dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
1352 	if (!dtsec)
1353 		return NULL;
1354 
1355 	/* allocate memory for the d_tsec driver parameters data structure. */
1356 	dtsec_drv_param = kzalloc(sizeof(*dtsec_drv_param), GFP_KERNEL);
1357 	if (!dtsec_drv_param)
1358 		goto err_dtsec;
1359 
1360 	/* Plant parameter structure pointer */
1361 	dtsec->dtsec_drv_param = dtsec_drv_param;
1362 
1363 	set_dflts(dtsec_drv_param);
1364 
1365 	dtsec->regs = mac_dev->vaddr;
1366 	dtsec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
1367 	dtsec->phy_if = mac_dev->phy_if;
1368 	dtsec->mac_id = params->mac_id;
1369 	dtsec->exceptions = (DTSEC_IMASK_BREN	|
1370 			     DTSEC_IMASK_RXCEN	|
1371 			     DTSEC_IMASK_BTEN	|
1372 			     DTSEC_IMASK_TXCEN	|
1373 			     DTSEC_IMASK_TXEEN	|
1374 			     DTSEC_IMASK_ABRTEN	|
1375 			     DTSEC_IMASK_LCEN	|
1376 			     DTSEC_IMASK_CRLEN	|
1377 			     DTSEC_IMASK_XFUNEN	|
1378 			     DTSEC_IMASK_IFERREN |
1379 			     DTSEC_IMASK_MAGEN	|
1380 			     DTSEC_IMASK_TDPEEN	|
1381 			     DTSEC_IMASK_RDPEEN);
1382 	dtsec->exception_cb = params->exception_cb;
1383 	dtsec->event_cb = params->event_cb;
1384 	dtsec->dev_id = mac_dev;
1385 	dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
1386 	dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
1387 
1388 	dtsec->fm = params->fm;
1389 
1390 	/* Save FMan revision */
1391 	fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
1392 
1393 	return dtsec;
1394 
1395 err_dtsec:
1396 	kfree(dtsec);
1397 	return NULL;
1398 }
1399 
dtsec_initialization(struct mac_device * mac_dev,struct device_node * mac_node,struct fman_mac_params * params)1400 int dtsec_initialization(struct mac_device *mac_dev,
1401 			 struct device_node *mac_node,
1402 			 struct fman_mac_params *params)
1403 {
1404 	int			err;
1405 	struct fman_mac		*dtsec;
1406 	struct device_node	*phy_node;
1407 	unsigned long		 capabilities;
1408 	unsigned long		*supported;
1409 
1410 	mac_dev->phylink_ops		= &dtsec_mac_ops;
1411 	mac_dev->set_promisc		= dtsec_set_promiscuous;
1412 	mac_dev->change_addr		= dtsec_modify_mac_address;
1413 	mac_dev->add_hash_mac_addr	= dtsec_add_hash_mac_address;
1414 	mac_dev->remove_hash_mac_addr	= dtsec_del_hash_mac_address;
1415 	mac_dev->set_exception		= dtsec_set_exception;
1416 	mac_dev->set_allmulti		= dtsec_set_allmulti;
1417 	mac_dev->set_tstamp		= dtsec_set_tstamp;
1418 	mac_dev->set_multi		= fman_set_multi;
1419 	mac_dev->enable			= dtsec_enable;
1420 	mac_dev->disable		= dtsec_disable;
1421 
1422 	mac_dev->fman_mac = dtsec_config(mac_dev, params);
1423 	if (!mac_dev->fman_mac) {
1424 		err = -EINVAL;
1425 		goto _return;
1426 	}
1427 
1428 	dtsec = mac_dev->fman_mac;
1429 	dtsec->dtsec_drv_param->maximum_frame = fman_get_max_frm();
1430 	dtsec->dtsec_drv_param->tx_pad_crc = true;
1431 
1432 	phy_node = of_parse_phandle(mac_node, "tbi-handle", 0);
1433 	if (!phy_node || !of_device_is_available(phy_node)) {
1434 		of_node_put(phy_node);
1435 		err = -EINVAL;
1436 		dev_err_probe(mac_dev->dev, err,
1437 			      "TBI PCS node is not available\n");
1438 		goto _return_fm_mac_free;
1439 	}
1440 
1441 	dtsec->tbidev = of_mdio_find_device(phy_node);
1442 	of_node_put(phy_node);
1443 	if (!dtsec->tbidev) {
1444 		err = -EPROBE_DEFER;
1445 		dev_err_probe(mac_dev->dev, err,
1446 			      "could not find mdiodev for PCS\n");
1447 		goto _return_fm_mac_free;
1448 	}
1449 	dtsec->pcs.ops = &dtsec_pcs_ops;
1450 	dtsec->pcs.neg_mode = true;
1451 	dtsec->pcs.poll = true;
1452 
1453 	supported = mac_dev->phylink_config.supported_interfaces;
1454 
1455 	/* FIXME: Can we use DTSEC_ID2_INT_FULL_OFF to determine if these are
1456 	 * supported? If not, we can determine support via the phy if SerDes
1457 	 * support is added.
1458 	 */
1459 	if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII ||
1460 	    mac_dev->phy_if == PHY_INTERFACE_MODE_1000BASEX) {
1461 		__set_bit(PHY_INTERFACE_MODE_SGMII, supported);
1462 		__set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
1463 	} else if (mac_dev->phy_if == PHY_INTERFACE_MODE_2500BASEX) {
1464 		__set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
1465 	}
1466 
1467 	if (!(ioread32be(&dtsec->regs->tsec_id2) & DTSEC_ID2_INT_REDUCED_OFF)) {
1468 		phy_interface_set_rgmii(supported);
1469 
1470 		/* DTSEC_ID2_INT_REDUCED_OFF indicates that the dTSEC supports
1471 		 * RMII and RGMII. However, the only SoCs which support RMII
1472 		 * are the P1017 and P1023. Avoid advertising this mode on
1473 		 * other SoCs. This is a bit of a moot point, since there's no
1474 		 * in-tree support for ethernet on these platforms...
1475 		 */
1476 		if (of_machine_is_compatible("fsl,P1023") ||
1477 		    of_machine_is_compatible("fsl,P1023RDB"))
1478 			__set_bit(PHY_INTERFACE_MODE_RMII, supported);
1479 	}
1480 
1481 	capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
1482 	capabilities |= MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
1483 	mac_dev->phylink_config.mac_capabilities = capabilities;
1484 
1485 	err = dtsec_init(dtsec);
1486 	if (err < 0)
1487 		goto _return_fm_mac_free;
1488 
1489 	/* For 1G MAC, disable by default the MIB counters overflow interrupt */
1490 	err = dtsec_set_exception(dtsec, FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
1491 	if (err < 0)
1492 		goto _return_fm_mac_free;
1493 
1494 	dev_info(mac_dev->dev, "FMan dTSEC version: 0x%08x\n",
1495 		 ioread32be(&dtsec->regs->tsec_id));
1496 
1497 	goto _return;
1498 
1499 _return_fm_mac_free:
1500 	dtsec_free(dtsec);
1501 
1502 _return:
1503 	return err;
1504 }
1505