1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
2 /*
3  * Copyright 2008 - 2015 Freescale Semiconductor Inc.
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include "fman_dtsec.h"
9 #include "fman.h"
10 
11 #include <linux/slab.h>
12 #include <linux/bitrev.h>
13 #include <linux/io.h>
14 #include <linux/delay.h>
15 #include <linux/phy.h>
16 #include <linux/crc32.h>
17 #include <linux/of_mdio.h>
18 #include <linux/mii.h>
19 
20 /* TBI register addresses */
21 #define MII_TBICON		0x11
22 
23 /* TBICON register bit fields */
24 #define TBICON_SOFT_RESET	0x8000	/* Soft reset */
25 #define TBICON_DISABLE_RX_DIS	0x2000	/* Disable receive disparity */
26 #define TBICON_DISABLE_TX_DIS	0x1000	/* Disable transmit disparity */
27 #define TBICON_AN_SENSE		0x0100	/* Auto-negotiation sense enable */
28 #define TBICON_CLK_SELECT	0x0020	/* Clock select */
29 #define TBICON_MI_MODE		0x0010	/* GMII mode (TBI if not set) */
30 
31 #define TBIANA_SGMII		0x4001
32 #define TBIANA_1000X		0x01a0
33 
34 /* Interrupt Mask Register (IMASK) */
35 #define DTSEC_IMASK_BREN	0x80000000
36 #define DTSEC_IMASK_RXCEN	0x40000000
37 #define DTSEC_IMASK_MSROEN	0x04000000
38 #define DTSEC_IMASK_GTSCEN	0x02000000
39 #define DTSEC_IMASK_BTEN	0x01000000
40 #define DTSEC_IMASK_TXCEN	0x00800000
41 #define DTSEC_IMASK_TXEEN	0x00400000
42 #define DTSEC_IMASK_LCEN	0x00040000
43 #define DTSEC_IMASK_CRLEN	0x00020000
44 #define DTSEC_IMASK_XFUNEN	0x00010000
45 #define DTSEC_IMASK_ABRTEN	0x00008000
46 #define DTSEC_IMASK_IFERREN	0x00004000
47 #define DTSEC_IMASK_MAGEN	0x00000800
48 #define DTSEC_IMASK_MMRDEN	0x00000400
49 #define DTSEC_IMASK_MMWREN	0x00000200
50 #define DTSEC_IMASK_GRSCEN	0x00000100
51 #define DTSEC_IMASK_TDPEEN	0x00000002
52 #define DTSEC_IMASK_RDPEEN	0x00000001
53 
54 #define DTSEC_EVENTS_MASK		\
55 	 ((u32)(DTSEC_IMASK_BREN    |	\
56 		DTSEC_IMASK_RXCEN   |	\
57 		DTSEC_IMASK_BTEN    |	\
58 		DTSEC_IMASK_TXCEN   |	\
59 		DTSEC_IMASK_TXEEN   |	\
60 		DTSEC_IMASK_ABRTEN  |	\
61 		DTSEC_IMASK_LCEN    |	\
62 		DTSEC_IMASK_CRLEN   |	\
63 		DTSEC_IMASK_XFUNEN  |	\
64 		DTSEC_IMASK_IFERREN |	\
65 		DTSEC_IMASK_MAGEN   |	\
66 		DTSEC_IMASK_TDPEEN  |	\
67 		DTSEC_IMASK_RDPEEN))
68 
69 /* dtsec timestamp event bits */
70 #define TMR_PEMASK_TSREEN	0x00010000
71 #define TMR_PEVENT_TSRE		0x00010000
72 
73 /* Group address bit indication */
74 #define MAC_GROUP_ADDRESS	0x0000010000000000ULL
75 
76 /* Defaults */
77 #define DEFAULT_HALFDUP_RETRANSMIT		0xf
78 #define DEFAULT_HALFDUP_COLL_WINDOW		0x37
79 #define DEFAULT_TX_PAUSE_TIME			0xf000
80 #define DEFAULT_RX_PREPEND			0
81 #define DEFAULT_PREAMBLE_LEN			7
82 #define DEFAULT_TX_PAUSE_TIME_EXTD		0
83 #define DEFAULT_NON_BACK_TO_BACK_IPG1		0x40
84 #define DEFAULT_NON_BACK_TO_BACK_IPG2		0x60
85 #define DEFAULT_MIN_IFG_ENFORCEMENT		0x50
86 #define DEFAULT_BACK_TO_BACK_IPG		0x60
87 #define DEFAULT_MAXIMUM_FRAME			0x600
88 
89 /* register related defines (bits, field offsets..) */
90 #define DTSEC_ID2_INT_REDUCED_OFF	0x00010000
91 
92 #define DTSEC_ECNTRL_GMIIM		0x00000040
93 #define DTSEC_ECNTRL_TBIM		0x00000020
94 #define DTSEC_ECNTRL_SGMIIM		0x00000002
95 #define DTSEC_ECNTRL_RPM		0x00000010
96 #define DTSEC_ECNTRL_R100M		0x00000008
97 #define DTSEC_ECNTRL_QSGMIIM		0x00000001
98 
99 #define TCTRL_TTSE			0x00000040
100 #define TCTRL_GTS			0x00000020
101 
102 #define RCTRL_PAL_MASK			0x001f0000
103 #define RCTRL_PAL_SHIFT			16
104 #define RCTRL_GHTX			0x00000400
105 #define RCTRL_RTSE			0x00000040
106 #define RCTRL_GRS			0x00000020
107 #define RCTRL_MPROM			0x00000008
108 #define RCTRL_RSF			0x00000004
109 #define RCTRL_UPROM			0x00000001
110 
111 #define MACCFG1_SOFT_RESET		0x80000000
112 #define MACCFG1_RX_FLOW			0x00000020
113 #define MACCFG1_TX_FLOW			0x00000010
114 #define MACCFG1_TX_EN			0x00000001
115 #define MACCFG1_RX_EN			0x00000004
116 
117 #define MACCFG2_NIBBLE_MODE		0x00000100
118 #define MACCFG2_BYTE_MODE		0x00000200
119 #define MACCFG2_PAD_CRC_EN		0x00000004
120 #define MACCFG2_FULL_DUPLEX		0x00000001
121 #define MACCFG2_PREAMBLE_LENGTH_MASK	0x0000f000
122 #define MACCFG2_PREAMBLE_LENGTH_SHIFT	12
123 
124 #define IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT	24
125 #define IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT	16
126 #define IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT	8
127 
128 #define IPGIFG_NON_BACK_TO_BACK_IPG_1	0x7F000000
129 #define IPGIFG_NON_BACK_TO_BACK_IPG_2	0x007F0000
130 #define IPGIFG_MIN_IFG_ENFORCEMENT	0x0000FF00
131 #define IPGIFG_BACK_TO_BACK_IPG	0x0000007F
132 
133 #define HAFDUP_EXCESS_DEFER			0x00010000
134 #define HAFDUP_COLLISION_WINDOW		0x000003ff
135 #define HAFDUP_RETRANSMISSION_MAX_SHIFT	12
136 #define HAFDUP_RETRANSMISSION_MAX		0x0000f000
137 
138 #define NUM_OF_HASH_REGS	8	/* Number of hash table registers */
139 
140 #define PTV_PTE_MASK		0xffff0000
141 #define PTV_PT_MASK		0x0000ffff
142 #define PTV_PTE_SHIFT		16
143 
144 #define MAX_PACKET_ALIGNMENT		31
145 #define MAX_INTER_PACKET_GAP		0x7f
146 #define MAX_RETRANSMISSION		0x0f
147 #define MAX_COLLISION_WINDOW		0x03ff
148 
149 /* Hash table size (32 bits*8 regs) */
150 #define DTSEC_HASH_TABLE_SIZE		256
151 /* Extended Hash table size (32 bits*16 regs) */
152 #define EXTENDED_HASH_TABLE_SIZE	512
153 
154 /* dTSEC Memory Map registers */
155 struct dtsec_regs {
156 	/* dTSEC General Control and Status Registers */
157 	u32 tsec_id;		/* 0x000 ETSEC_ID register */
158 	u32 tsec_id2;		/* 0x004 ETSEC_ID2 register */
159 	u32 ievent;		/* 0x008 Interrupt event register */
160 	u32 imask;		/* 0x00C Interrupt mask register */
161 	u32 reserved0010[1];
162 	u32 ecntrl;		/* 0x014 E control register */
163 	u32 ptv;		/* 0x018 Pause time value register */
164 	u32 tbipa;		/* 0x01C TBI PHY address register */
165 	u32 tmr_ctrl;		/* 0x020 Time-stamp Control register */
166 	u32 tmr_pevent;		/* 0x024 Time-stamp event register */
167 	u32 tmr_pemask;		/* 0x028 Timer event mask register */
168 	u32 reserved002c[5];
169 	u32 tctrl;		/* 0x040 Transmit control register */
170 	u32 reserved0044[3];
171 	u32 rctrl;		/* 0x050 Receive control register */
172 	u32 reserved0054[11];
173 	u32 igaddr[8];		/* 0x080-0x09C Individual/group address */
174 	u32 gaddr[8];		/* 0x0A0-0x0BC Group address registers 0-7 */
175 	u32 reserved00c0[16];
176 	u32 maccfg1;		/* 0x100 MAC configuration #1 */
177 	u32 maccfg2;		/* 0x104 MAC configuration #2 */
178 	u32 ipgifg;		/* 0x108 IPG/IFG */
179 	u32 hafdup;		/* 0x10C Half-duplex */
180 	u32 maxfrm;		/* 0x110 Maximum frame */
181 	u32 reserved0114[10];
182 	u32 ifstat;		/* 0x13C Interface status */
183 	u32 macstnaddr1;	/* 0x140 Station Address,part 1 */
184 	u32 macstnaddr2;	/* 0x144 Station Address,part 2 */
185 	struct {
186 		u32 exact_match1;	/* octets 1-4 */
187 		u32 exact_match2;	/* octets 5-6 */
188 	} macaddr[15];		/* 0x148-0x1BC mac exact match addresses 1-15 */
189 	u32 reserved01c0[16];
190 	u32 tr64;	/* 0x200 Tx and Rx 64 byte frame counter */
191 	u32 tr127;	/* 0x204 Tx and Rx 65 to 127 byte frame counter */
192 	u32 tr255;	/* 0x208 Tx and Rx 128 to 255 byte frame counter */
193 	u32 tr511;	/* 0x20C Tx and Rx 256 to 511 byte frame counter */
194 	u32 tr1k;	/* 0x210 Tx and Rx 512 to 1023 byte frame counter */
195 	u32 trmax;	/* 0x214 Tx and Rx 1024 to 1518 byte frame counter */
196 	u32 trmgv;
197 	/* 0x218 Tx and Rx 1519 to 1522 byte good VLAN frame count */
198 	u32 rbyt;	/* 0x21C receive byte counter */
199 	u32 rpkt;	/* 0x220 receive packet counter */
200 	u32 rfcs;	/* 0x224 receive FCS error counter */
201 	u32 rmca;	/* 0x228 RMCA Rx multicast packet counter */
202 	u32 rbca;	/* 0x22C Rx broadcast packet counter */
203 	u32 rxcf;	/* 0x230 Rx control frame packet counter */
204 	u32 rxpf;	/* 0x234 Rx pause frame packet counter */
205 	u32 rxuo;	/* 0x238 Rx unknown OP code counter */
206 	u32 raln;	/* 0x23C Rx alignment error counter */
207 	u32 rflr;	/* 0x240 Rx frame length error counter */
208 	u32 rcde;	/* 0x244 Rx code error counter */
209 	u32 rcse;	/* 0x248 Rx carrier sense error counter */
210 	u32 rund;	/* 0x24C Rx undersize packet counter */
211 	u32 rovr;	/* 0x250 Rx oversize packet counter */
212 	u32 rfrg;	/* 0x254 Rx fragments counter */
213 	u32 rjbr;	/* 0x258 Rx jabber counter */
214 	u32 rdrp;	/* 0x25C Rx drop */
215 	u32 tbyt;	/* 0x260 Tx byte counter */
216 	u32 tpkt;	/* 0x264 Tx packet counter */
217 	u32 tmca;	/* 0x268 Tx multicast packet counter */
218 	u32 tbca;	/* 0x26C Tx broadcast packet counter */
219 	u32 txpf;	/* 0x270 Tx pause control frame counter */
220 	u32 tdfr;	/* 0x274 Tx deferral packet counter */
221 	u32 tedf;	/* 0x278 Tx excessive deferral packet counter */
222 	u32 tscl;	/* 0x27C Tx single collision packet counter */
223 	u32 tmcl;	/* 0x280 Tx multiple collision packet counter */
224 	u32 tlcl;	/* 0x284 Tx late collision packet counter */
225 	u32 txcl;	/* 0x288 Tx excessive collision packet counter */
226 	u32 tncl;	/* 0x28C Tx total collision counter */
227 	u32 reserved0290[1];
228 	u32 tdrp;	/* 0x294 Tx drop frame counter */
229 	u32 tjbr;	/* 0x298 Tx jabber frame counter */
230 	u32 tfcs;	/* 0x29C Tx FCS error counter */
231 	u32 txcf;	/* 0x2A0 Tx control frame counter */
232 	u32 tovr;	/* 0x2A4 Tx oversize frame counter */
233 	u32 tund;	/* 0x2A8 Tx undersize frame counter */
234 	u32 tfrg;	/* 0x2AC Tx fragments frame counter */
235 	u32 car1;	/* 0x2B0 carry register one register* */
236 	u32 car2;	/* 0x2B4 carry register two register* */
237 	u32 cam1;	/* 0x2B8 carry register one mask register */
238 	u32 cam2;	/* 0x2BC carry register two mask register */
239 	u32 reserved02c0[848];
240 };
241 
242 /* struct dtsec_cfg - dTSEC configuration
243  * Transmit half-duplex flow control, under software control for 10/100-Mbps
244  * half-duplex media. If set, back pressure is applied to media by raising
245  * carrier.
246  * halfdup_retransmit:
247  * Number of retransmission attempts following a collision.
248  * If this is exceeded dTSEC aborts transmission due to excessive collisions.
249  * The standard specifies the attempt limit to be 15.
250  * halfdup_coll_window:
251  * The number of bytes of the frame during which collisions may occur.
252  * The default value of 55 corresponds to the frame byte at the end of the
253  * standard 512-bit slot time window. If collisions are detected after this
254  * byte, the late collision event is asserted and transmission of current
255  * frame is aborted.
256  * tx_pad_crc:
257  * Pad and append CRC. If set, the MAC pads all ransmitted short frames and
258  * appends a CRC to every frame regardless of padding requirement.
259  * tx_pause_time:
260  * Transmit pause time value. This pause value is used as part of the pause
261  * frame to be sent when a transmit pause frame is initiated.
262  * If set to 0 this disables transmission of pause frames.
263  * preamble_len:
264  * Length, in bytes, of the preamble field preceding each Ethernet
265  * start-of-frame delimiter byte. The default value of 0x7 should be used in
266  * order to guarantee reliable operation with IEEE 802.3 compliant hardware.
267  * rx_prepend:
268  * Packet alignment padding length. The specified number of bytes (1-31)
269  * of zero padding are inserted before the start of each received frame.
270  * For Ethernet, where optional preamble extraction is enabled, the padding
271  * appears before the preamble, otherwise the padding precedes the
272  * layer 2 header.
273  *
274  * This structure contains basic dTSEC configuration and must be passed to
275  * init() function. A default set of configuration values can be
276  * obtained by calling set_dflts().
277  */
278 struct dtsec_cfg {
279 	u16 halfdup_retransmit;
280 	u16 halfdup_coll_window;
281 	bool tx_pad_crc;
282 	u16 tx_pause_time;
283 	bool ptp_tsu_en;
284 	bool ptp_exception_en;
285 	u32 preamble_len;
286 	u32 rx_prepend;
287 	u16 tx_pause_time_extd;
288 	u16 maximum_frame;
289 	u32 non_back_to_back_ipg1;
290 	u32 non_back_to_back_ipg2;
291 	u32 min_ifg_enforcement;
292 	u32 back_to_back_ipg;
293 };
294 
295 struct fman_mac {
296 	/* pointer to dTSEC memory mapped registers */
297 	struct dtsec_regs __iomem *regs;
298 	/* MAC address of device */
299 	u64 addr;
300 	/* Ethernet physical interface */
301 	phy_interface_t phy_if;
302 	u16 max_speed;
303 	void *dev_id; /* device cookie used by the exception cbs */
304 	fman_mac_exception_cb *exception_cb;
305 	fman_mac_exception_cb *event_cb;
306 	/* Number of individual addresses in registers for this station */
307 	u8 num_of_ind_addr_in_regs;
308 	/* pointer to driver's global address hash table */
309 	struct eth_hash_t *multicast_addr_hash;
310 	/* pointer to driver's individual address hash table */
311 	struct eth_hash_t *unicast_addr_hash;
312 	u8 mac_id;
313 	u32 exceptions;
314 	bool ptp_tsu_enabled;
315 	bool en_tsu_err_exception;
316 	struct dtsec_cfg *dtsec_drv_param;
317 	void *fm;
318 	struct fman_rev_info fm_rev_info;
319 	bool basex_if;
320 	struct phy_device *tbiphy;
321 };
322 
323 static void set_dflts(struct dtsec_cfg *cfg)
324 {
325 	cfg->halfdup_retransmit = DEFAULT_HALFDUP_RETRANSMIT;
326 	cfg->halfdup_coll_window = DEFAULT_HALFDUP_COLL_WINDOW;
327 	cfg->tx_pad_crc = true;
328 	cfg->tx_pause_time = DEFAULT_TX_PAUSE_TIME;
329 	/* PHY address 0 is reserved (DPAA RM) */
330 	cfg->rx_prepend = DEFAULT_RX_PREPEND;
331 	cfg->ptp_tsu_en = true;
332 	cfg->ptp_exception_en = true;
333 	cfg->preamble_len = DEFAULT_PREAMBLE_LEN;
334 	cfg->tx_pause_time_extd = DEFAULT_TX_PAUSE_TIME_EXTD;
335 	cfg->non_back_to_back_ipg1 = DEFAULT_NON_BACK_TO_BACK_IPG1;
336 	cfg->non_back_to_back_ipg2 = DEFAULT_NON_BACK_TO_BACK_IPG2;
337 	cfg->min_ifg_enforcement = DEFAULT_MIN_IFG_ENFORCEMENT;
338 	cfg->back_to_back_ipg = DEFAULT_BACK_TO_BACK_IPG;
339 	cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
340 }
341 
342 static void set_mac_address(struct dtsec_regs __iomem *regs, const u8 *adr)
343 {
344 	u32 tmp;
345 
346 	tmp = (u32)((adr[5] << 24) |
347 		    (adr[4] << 16) | (adr[3] << 8) | adr[2]);
348 	iowrite32be(tmp, &regs->macstnaddr1);
349 
350 	tmp = (u32)((adr[1] << 24) | (adr[0] << 16));
351 	iowrite32be(tmp, &regs->macstnaddr2);
352 }
353 
354 static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
355 		phy_interface_t iface, u16 iface_speed, u64 addr,
356 		u32 exception_mask, u8 tbi_addr)
357 {
358 	bool is_rgmii, is_sgmii, is_qsgmii;
359 	enet_addr_t eth_addr;
360 	u32 tmp;
361 	int i;
362 
363 	/* Soft reset */
364 	iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
365 	iowrite32be(0, &regs->maccfg1);
366 
367 	/* dtsec_id2 */
368 	tmp = ioread32be(&regs->tsec_id2);
369 
370 	/* check RGMII support */
371 	if (iface == PHY_INTERFACE_MODE_RGMII ||
372 	    iface == PHY_INTERFACE_MODE_RGMII_ID ||
373 	    iface == PHY_INTERFACE_MODE_RGMII_RXID ||
374 	    iface == PHY_INTERFACE_MODE_RGMII_TXID ||
375 	    iface == PHY_INTERFACE_MODE_RMII)
376 		if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
377 			return -EINVAL;
378 
379 	if (iface == PHY_INTERFACE_MODE_SGMII ||
380 	    iface == PHY_INTERFACE_MODE_MII)
381 		if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
382 			return -EINVAL;
383 
384 	is_rgmii = iface == PHY_INTERFACE_MODE_RGMII ||
385 		   iface == PHY_INTERFACE_MODE_RGMII_ID ||
386 		   iface == PHY_INTERFACE_MODE_RGMII_RXID ||
387 		   iface == PHY_INTERFACE_MODE_RGMII_TXID;
388 	is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
389 	is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
390 
391 	tmp = 0;
392 	if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII)
393 		tmp |= DTSEC_ECNTRL_GMIIM;
394 	if (is_sgmii)
395 		tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
396 	if (is_qsgmii)
397 		tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
398 			DTSEC_ECNTRL_QSGMIIM);
399 	if (is_rgmii)
400 		tmp |= DTSEC_ECNTRL_RPM;
401 	if (iface_speed == SPEED_100)
402 		tmp |= DTSEC_ECNTRL_R100M;
403 
404 	iowrite32be(tmp, &regs->ecntrl);
405 
406 	tmp = 0;
407 
408 	if (cfg->tx_pause_time)
409 		tmp |= cfg->tx_pause_time;
410 	if (cfg->tx_pause_time_extd)
411 		tmp |= cfg->tx_pause_time_extd << PTV_PTE_SHIFT;
412 	iowrite32be(tmp, &regs->ptv);
413 
414 	tmp = 0;
415 	tmp |= (cfg->rx_prepend << RCTRL_PAL_SHIFT) & RCTRL_PAL_MASK;
416 	/* Accept short frames */
417 	tmp |= RCTRL_RSF;
418 
419 	iowrite32be(tmp, &regs->rctrl);
420 
421 	/* Assign a Phy Address to the TBI (TBIPA).
422 	 * Done also in cases where TBI is not selected to avoid conflict with
423 	 * the external PHY's Physical address
424 	 */
425 	iowrite32be(tbi_addr, &regs->tbipa);
426 
427 	iowrite32be(0, &regs->tmr_ctrl);
428 
429 	if (cfg->ptp_tsu_en) {
430 		tmp = 0;
431 		tmp |= TMR_PEVENT_TSRE;
432 		iowrite32be(tmp, &regs->tmr_pevent);
433 
434 		if (cfg->ptp_exception_en) {
435 			tmp = 0;
436 			tmp |= TMR_PEMASK_TSREEN;
437 			iowrite32be(tmp, &regs->tmr_pemask);
438 		}
439 	}
440 
441 	tmp = 0;
442 	tmp |= MACCFG1_RX_FLOW;
443 	tmp |= MACCFG1_TX_FLOW;
444 	iowrite32be(tmp, &regs->maccfg1);
445 
446 	tmp = 0;
447 
448 	if (iface_speed < SPEED_1000)
449 		tmp |= MACCFG2_NIBBLE_MODE;
450 	else if (iface_speed == SPEED_1000)
451 		tmp |= MACCFG2_BYTE_MODE;
452 
453 	tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
454 		MACCFG2_PREAMBLE_LENGTH_MASK;
455 	if (cfg->tx_pad_crc)
456 		tmp |= MACCFG2_PAD_CRC_EN;
457 	/* Full Duplex */
458 	tmp |= MACCFG2_FULL_DUPLEX;
459 	iowrite32be(tmp, &regs->maccfg2);
460 
461 	tmp = (((cfg->non_back_to_back_ipg1 <<
462 		 IPGIFG_NON_BACK_TO_BACK_IPG_1_SHIFT)
463 		& IPGIFG_NON_BACK_TO_BACK_IPG_1)
464 	       | ((cfg->non_back_to_back_ipg2 <<
465 		   IPGIFG_NON_BACK_TO_BACK_IPG_2_SHIFT)
466 		 & IPGIFG_NON_BACK_TO_BACK_IPG_2)
467 	       | ((cfg->min_ifg_enforcement << IPGIFG_MIN_IFG_ENFORCEMENT_SHIFT)
468 		 & IPGIFG_MIN_IFG_ENFORCEMENT)
469 	       | (cfg->back_to_back_ipg & IPGIFG_BACK_TO_BACK_IPG));
470 	iowrite32be(tmp, &regs->ipgifg);
471 
472 	tmp = 0;
473 	tmp |= HAFDUP_EXCESS_DEFER;
474 	tmp |= ((cfg->halfdup_retransmit << HAFDUP_RETRANSMISSION_MAX_SHIFT)
475 		& HAFDUP_RETRANSMISSION_MAX);
476 	tmp |= (cfg->halfdup_coll_window & HAFDUP_COLLISION_WINDOW);
477 
478 	iowrite32be(tmp, &regs->hafdup);
479 
480 	/* Initialize Maximum frame length */
481 	iowrite32be(cfg->maximum_frame, &regs->maxfrm);
482 
483 	iowrite32be(0xffffffff, &regs->cam1);
484 	iowrite32be(0xffffffff, &regs->cam2);
485 
486 	iowrite32be(exception_mask, &regs->imask);
487 
488 	iowrite32be(0xffffffff, &regs->ievent);
489 
490 	if (addr) {
491 		MAKE_ENET_ADDR_FROM_UINT64(addr, eth_addr);
492 		set_mac_address(regs, (const u8 *)eth_addr);
493 	}
494 
495 	/* HASH */
496 	for (i = 0; i < NUM_OF_HASH_REGS; i++) {
497 		/* Initialize IADDRx */
498 		iowrite32be(0, &regs->igaddr[i]);
499 		/* Initialize GADDRx */
500 		iowrite32be(0, &regs->gaddr[i]);
501 	}
502 
503 	return 0;
504 }
505 
506 static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
507 		       bool enable)
508 {
509 	int reg_idx = (bucket >> 5) & 0xf;
510 	int bit_idx = bucket & 0x1f;
511 	u32 bit_mask = 0x80000000 >> bit_idx;
512 	u32 __iomem *reg;
513 
514 	if (reg_idx > 7)
515 		reg = &regs->gaddr[reg_idx - 8];
516 	else
517 		reg = &regs->igaddr[reg_idx];
518 
519 	if (enable)
520 		iowrite32be(ioread32be(reg) | bit_mask, reg);
521 	else
522 		iowrite32be(ioread32be(reg) & (~bit_mask), reg);
523 }
524 
525 static int check_init_parameters(struct fman_mac *dtsec)
526 {
527 	if (dtsec->max_speed >= SPEED_10000) {
528 		pr_err("1G MAC driver supports 1G or lower speeds\n");
529 		return -EINVAL;
530 	}
531 	if ((dtsec->dtsec_drv_param)->rx_prepend >
532 	    MAX_PACKET_ALIGNMENT) {
533 		pr_err("packetAlignmentPadding can't be > than %d\n",
534 		       MAX_PACKET_ALIGNMENT);
535 		return -EINVAL;
536 	}
537 	if (((dtsec->dtsec_drv_param)->non_back_to_back_ipg1 >
538 	     MAX_INTER_PACKET_GAP) ||
539 	    ((dtsec->dtsec_drv_param)->non_back_to_back_ipg2 >
540 	     MAX_INTER_PACKET_GAP) ||
541 	     ((dtsec->dtsec_drv_param)->back_to_back_ipg >
542 	      MAX_INTER_PACKET_GAP)) {
543 		pr_err("Inter packet gap can't be greater than %d\n",
544 		       MAX_INTER_PACKET_GAP);
545 		return -EINVAL;
546 	}
547 	if ((dtsec->dtsec_drv_param)->halfdup_retransmit >
548 	    MAX_RETRANSMISSION) {
549 		pr_err("maxRetransmission can't be greater than %d\n",
550 		       MAX_RETRANSMISSION);
551 		return -EINVAL;
552 	}
553 	if ((dtsec->dtsec_drv_param)->halfdup_coll_window >
554 	    MAX_COLLISION_WINDOW) {
555 		pr_err("collisionWindow can't be greater than %d\n",
556 		       MAX_COLLISION_WINDOW);
557 		return -EINVAL;
558 	/* If Auto negotiation process is disabled, need to set up the PHY
559 	 * using the MII Management Interface
560 	 */
561 	}
562 	if (!dtsec->exception_cb) {
563 		pr_err("uninitialized exception_cb\n");
564 		return -EINVAL;
565 	}
566 	if (!dtsec->event_cb) {
567 		pr_err("uninitialized event_cb\n");
568 		return -EINVAL;
569 	}
570 
571 	return 0;
572 }
573 
574 static int get_exception_flag(enum fman_mac_exceptions exception)
575 {
576 	u32 bit_mask;
577 
578 	switch (exception) {
579 	case FM_MAC_EX_1G_BAB_RX:
580 		bit_mask = DTSEC_IMASK_BREN;
581 		break;
582 	case FM_MAC_EX_1G_RX_CTL:
583 		bit_mask = DTSEC_IMASK_RXCEN;
584 		break;
585 	case FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET:
586 		bit_mask = DTSEC_IMASK_GTSCEN;
587 		break;
588 	case FM_MAC_EX_1G_BAB_TX:
589 		bit_mask = DTSEC_IMASK_BTEN;
590 		break;
591 	case FM_MAC_EX_1G_TX_CTL:
592 		bit_mask = DTSEC_IMASK_TXCEN;
593 		break;
594 	case FM_MAC_EX_1G_TX_ERR:
595 		bit_mask = DTSEC_IMASK_TXEEN;
596 		break;
597 	case FM_MAC_EX_1G_LATE_COL:
598 		bit_mask = DTSEC_IMASK_LCEN;
599 		break;
600 	case FM_MAC_EX_1G_COL_RET_LMT:
601 		bit_mask = DTSEC_IMASK_CRLEN;
602 		break;
603 	case FM_MAC_EX_1G_TX_FIFO_UNDRN:
604 		bit_mask = DTSEC_IMASK_XFUNEN;
605 		break;
606 	case FM_MAC_EX_1G_MAG_PCKT:
607 		bit_mask = DTSEC_IMASK_MAGEN;
608 		break;
609 	case FM_MAC_EX_1G_MII_MNG_RD_COMPLET:
610 		bit_mask = DTSEC_IMASK_MMRDEN;
611 		break;
612 	case FM_MAC_EX_1G_MII_MNG_WR_COMPLET:
613 		bit_mask = DTSEC_IMASK_MMWREN;
614 		break;
615 	case FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET:
616 		bit_mask = DTSEC_IMASK_GRSCEN;
617 		break;
618 	case FM_MAC_EX_1G_DATA_ERR:
619 		bit_mask = DTSEC_IMASK_TDPEEN;
620 		break;
621 	case FM_MAC_EX_1G_RX_MIB_CNT_OVFL:
622 		bit_mask = DTSEC_IMASK_MSROEN;
623 		break;
624 	default:
625 		bit_mask = 0;
626 		break;
627 	}
628 
629 	return bit_mask;
630 }
631 
632 static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
633 {
634 	/* Checks if dTSEC driver parameters were initialized */
635 	if (!dtsec_drv_params)
636 		return true;
637 
638 	return false;
639 }
640 
641 static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
642 {
643 	struct dtsec_regs __iomem *regs = dtsec->regs;
644 
645 	if (is_init_done(dtsec->dtsec_drv_param))
646 		return 0;
647 
648 	return (u16)ioread32be(&regs->maxfrm);
649 }
650 
651 static void dtsec_isr(void *handle)
652 {
653 	struct fman_mac *dtsec = (struct fman_mac *)handle;
654 	struct dtsec_regs __iomem *regs = dtsec->regs;
655 	u32 event;
656 
657 	/* do not handle MDIO events */
658 	event = ioread32be(&regs->ievent) &
659 		(u32)(~(DTSEC_IMASK_MMRDEN | DTSEC_IMASK_MMWREN));
660 
661 	event &= ioread32be(&regs->imask);
662 
663 	iowrite32be(event, &regs->ievent);
664 
665 	if (event & DTSEC_IMASK_BREN)
666 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_RX);
667 	if (event & DTSEC_IMASK_RXCEN)
668 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_RX_CTL);
669 	if (event & DTSEC_IMASK_GTSCEN)
670 		dtsec->exception_cb(dtsec->dev_id,
671 				    FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET);
672 	if (event & DTSEC_IMASK_BTEN)
673 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_BAB_TX);
674 	if (event & DTSEC_IMASK_TXCEN)
675 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_CTL);
676 	if (event & DTSEC_IMASK_TXEEN)
677 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_ERR);
678 	if (event & DTSEC_IMASK_LCEN)
679 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_LATE_COL);
680 	if (event & DTSEC_IMASK_CRLEN)
681 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
682 	if (event & DTSEC_IMASK_XFUNEN) {
683 		/* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */
684 		if (dtsec->fm_rev_info.major == 2) {
685 			u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
686 			/* a. Write 0x00E0_0C00 to DTSEC_ID
687 			 *	This is a read only register
688 			 * b. Read and save the value of TPKT
689 			 */
690 			tpkt1 = ioread32be(&regs->tpkt);
691 
692 			/* c. Read the register at dTSEC address offset 0x32C */
693 			tmp_reg1 = ioread32be(&regs->reserved02c0[27]);
694 
695 			/* d. Compare bits [9:15] to bits [25:31] of the
696 			 * register at address offset 0x32C.
697 			 */
698 			if ((tmp_reg1 & 0x007F0000) !=
699 				(tmp_reg1 & 0x0000007F)) {
700 				/* If they are not equal, save the value of
701 				 * this register and wait for at least
702 				 * MAXFRM*16 ns
703 				 */
704 				usleep_range((u32)(min
705 					(dtsec_get_max_frame_length(dtsec) *
706 					16 / 1000, 1)), (u32)
707 					(min(dtsec_get_max_frame_length
708 					(dtsec) * 16 / 1000, 1) + 1));
709 			}
710 
711 			/* e. Read and save TPKT again and read the register
712 			 * at dTSEC address offset 0x32C again
713 			 */
714 			tpkt2 = ioread32be(&regs->tpkt);
715 			tmp_reg2 = ioread32be(&regs->reserved02c0[27]);
716 
717 			/* f. Compare the value of TPKT saved in step b to
718 			 * value read in step e. Also compare bits [9:15] of
719 			 * the register at offset 0x32C saved in step d to the
720 			 * value of bits [9:15] saved in step e. If the two
721 			 * registers values are unchanged, then the transmit
722 			 * portion of the dTSEC controller is locked up and
723 			 * the user should proceed to the recover sequence.
724 			 */
725 			if ((tpkt1 == tpkt2) && ((tmp_reg1 & 0x007F0000) ==
726 				(tmp_reg2 & 0x007F0000))) {
727 				/* recover sequence */
728 
729 				/* a.Write a 1 to RCTRL[GRS] */
730 
731 				iowrite32be(ioread32be(&regs->rctrl) |
732 					    RCTRL_GRS, &regs->rctrl);
733 
734 				/* b.Wait until IEVENT[GRSC]=1, or at least
735 				 * 100 us has elapsed.
736 				 */
737 				for (i = 0; i < 100; i++) {
738 					if (ioread32be(&regs->ievent) &
739 					    DTSEC_IMASK_GRSCEN)
740 						break;
741 					udelay(1);
742 				}
743 				if (ioread32be(&regs->ievent) &
744 				    DTSEC_IMASK_GRSCEN)
745 					iowrite32be(DTSEC_IMASK_GRSCEN,
746 						    &regs->ievent);
747 				else
748 					pr_debug("Rx lockup due to Tx lockup\n");
749 
750 				/* c.Write a 1 to bit n of FM_RSTC
751 				 * (offset 0x0CC of FPM)
752 				 */
753 				fman_reset_mac(dtsec->fm, dtsec->mac_id);
754 
755 				/* d.Wait 4 Tx clocks (32 ns) */
756 				udelay(1);
757 
758 				/* e.Write a 0 to bit n of FM_RSTC. */
759 				/* cleared by FMAN
760 				 */
761 			}
762 		}
763 
764 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_TX_FIFO_UNDRN);
765 	}
766 	if (event & DTSEC_IMASK_MAGEN)
767 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_MAG_PCKT);
768 	if (event & DTSEC_IMASK_GRSCEN)
769 		dtsec->exception_cb(dtsec->dev_id,
770 				    FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET);
771 	if (event & DTSEC_IMASK_TDPEEN)
772 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_DATA_ERR);
773 	if (event & DTSEC_IMASK_RDPEEN)
774 		dtsec->exception_cb(dtsec->dev_id, FM_MAC_1G_RX_DATA_ERR);
775 
776 	/* masked interrupts */
777 	WARN_ON(event & DTSEC_IMASK_ABRTEN);
778 	WARN_ON(event & DTSEC_IMASK_IFERREN);
779 }
780 
781 static void dtsec_1588_isr(void *handle)
782 {
783 	struct fman_mac *dtsec = (struct fman_mac *)handle;
784 	struct dtsec_regs __iomem *regs = dtsec->regs;
785 	u32 event;
786 
787 	if (dtsec->ptp_tsu_enabled) {
788 		event = ioread32be(&regs->tmr_pevent);
789 		event &= ioread32be(&regs->tmr_pemask);
790 
791 		if (event) {
792 			iowrite32be(event, &regs->tmr_pevent);
793 			WARN_ON(event & TMR_PEVENT_TSRE);
794 			dtsec->exception_cb(dtsec->dev_id,
795 					    FM_MAC_EX_1G_1588_TS_RX_ERR);
796 		}
797 	}
798 }
799 
800 static void free_init_resources(struct fman_mac *dtsec)
801 {
802 	fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
803 			     FMAN_INTR_TYPE_ERR);
804 	fman_unregister_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
805 			     FMAN_INTR_TYPE_NORMAL);
806 
807 	/* release the driver's group hash table */
808 	free_hash_table(dtsec->multicast_addr_hash);
809 	dtsec->multicast_addr_hash = NULL;
810 
811 	/* release the driver's individual hash table */
812 	free_hash_table(dtsec->unicast_addr_hash);
813 	dtsec->unicast_addr_hash = NULL;
814 }
815 
816 int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val)
817 {
818 	if (is_init_done(dtsec->dtsec_drv_param))
819 		return -EINVAL;
820 
821 	dtsec->dtsec_drv_param->maximum_frame = new_val;
822 
823 	return 0;
824 }
825 
826 int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
827 {
828 	if (is_init_done(dtsec->dtsec_drv_param))
829 		return -EINVAL;
830 
831 	dtsec->dtsec_drv_param->tx_pad_crc = new_val;
832 
833 	return 0;
834 }
835 
836 static void graceful_start(struct fman_mac *dtsec)
837 {
838 	struct dtsec_regs __iomem *regs = dtsec->regs;
839 
840 	iowrite32be(ioread32be(&regs->tctrl) & ~TCTRL_GTS, &regs->tctrl);
841 	iowrite32be(ioread32be(&regs->rctrl) & ~RCTRL_GRS, &regs->rctrl);
842 }
843 
844 static void graceful_stop(struct fman_mac *dtsec)
845 {
846 	struct dtsec_regs __iomem *regs = dtsec->regs;
847 	u32 tmp;
848 
849 	/* Graceful stop - Assert the graceful Rx stop bit */
850 	tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
851 	iowrite32be(tmp, &regs->rctrl);
852 
853 	if (dtsec->fm_rev_info.major == 2) {
854 		/* Workaround for dTSEC Errata A002 */
855 		usleep_range(100, 200);
856 	} else {
857 		/* Workaround for dTSEC Errata A004839 */
858 		usleep_range(10, 50);
859 	}
860 
861 	/* Graceful stop - Assert the graceful Tx stop bit */
862 	if (dtsec->fm_rev_info.major == 2) {
863 		/* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
864 		pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
865 	} else {
866 		tmp = ioread32be(&regs->tctrl) | TCTRL_GTS;
867 		iowrite32be(tmp, &regs->tctrl);
868 
869 		/* Workaround for dTSEC Errata A0012, A0014 */
870 		usleep_range(10, 50);
871 	}
872 }
873 
874 int dtsec_enable(struct fman_mac *dtsec)
875 {
876 	struct dtsec_regs __iomem *regs = dtsec->regs;
877 	u32 tmp;
878 
879 	if (!is_init_done(dtsec->dtsec_drv_param))
880 		return -EINVAL;
881 
882 	/* Enable */
883 	tmp = ioread32be(&regs->maccfg1);
884 	tmp |= MACCFG1_RX_EN | MACCFG1_TX_EN;
885 	iowrite32be(tmp, &regs->maccfg1);
886 
887 	/* Graceful start - clear the graceful Rx/Tx stop bit */
888 	graceful_start(dtsec);
889 
890 	return 0;
891 }
892 
893 int dtsec_disable(struct fman_mac *dtsec)
894 {
895 	struct dtsec_regs __iomem *regs = dtsec->regs;
896 	u32 tmp;
897 
898 	if (!is_init_done(dtsec->dtsec_drv_param))
899 		return -EINVAL;
900 
901 	/* Graceful stop - Assert the graceful Rx/Tx stop bit */
902 	graceful_stop(dtsec);
903 
904 	tmp = ioread32be(&regs->maccfg1);
905 	tmp &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
906 	iowrite32be(tmp, &regs->maccfg1);
907 
908 	return 0;
909 }
910 
911 int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
912 			      u8 __maybe_unused priority,
913 			      u16 pause_time, u16 __maybe_unused thresh_time)
914 {
915 	struct dtsec_regs __iomem *regs = dtsec->regs;
916 	u32 ptv = 0;
917 
918 	if (!is_init_done(dtsec->dtsec_drv_param))
919 		return -EINVAL;
920 
921 	graceful_stop(dtsec);
922 
923 	if (pause_time) {
924 		/* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
925 		if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
926 			pr_warn("pause-time: %d illegal.Should be > 320\n",
927 				pause_time);
928 			return -EINVAL;
929 		}
930 
931 		ptv = ioread32be(&regs->ptv);
932 		ptv &= PTV_PTE_MASK;
933 		ptv |= pause_time & PTV_PT_MASK;
934 		iowrite32be(ptv, &regs->ptv);
935 
936 		/* trigger the transmission of a flow-control pause frame */
937 		iowrite32be(ioread32be(&regs->maccfg1) | MACCFG1_TX_FLOW,
938 			    &regs->maccfg1);
939 	} else
940 		iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
941 			    &regs->maccfg1);
942 
943 	graceful_start(dtsec);
944 
945 	return 0;
946 }
947 
948 int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
949 {
950 	struct dtsec_regs __iomem *regs = dtsec->regs;
951 	u32 tmp;
952 
953 	if (!is_init_done(dtsec->dtsec_drv_param))
954 		return -EINVAL;
955 
956 	graceful_stop(dtsec);
957 
958 	tmp = ioread32be(&regs->maccfg1);
959 	if (en)
960 		tmp |= MACCFG1_RX_FLOW;
961 	else
962 		tmp &= ~MACCFG1_RX_FLOW;
963 	iowrite32be(tmp, &regs->maccfg1);
964 
965 	graceful_start(dtsec);
966 
967 	return 0;
968 }
969 
970 int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr)
971 {
972 	if (!is_init_done(dtsec->dtsec_drv_param))
973 		return -EINVAL;
974 
975 	graceful_stop(dtsec);
976 
977 	/* Initialize MAC Station Address registers (1 & 2)
978 	 * Station address have to be swapped (big endian to little endian
979 	 */
980 	dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
981 	set_mac_address(dtsec->regs, (const u8 *)(*enet_addr));
982 
983 	graceful_start(dtsec);
984 
985 	return 0;
986 }
987 
988 int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
989 {
990 	struct dtsec_regs __iomem *regs = dtsec->regs;
991 	struct eth_hash_entry *hash_entry;
992 	u64 addr;
993 	s32 bucket;
994 	u32 crc = 0xFFFFFFFF;
995 	bool mcast, ghtx;
996 
997 	if (!is_init_done(dtsec->dtsec_drv_param))
998 		return -EINVAL;
999 
1000 	addr = ENET_ADDR_TO_UINT64(*eth_addr);
1001 
1002 	ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
1003 	mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
1004 
1005 	/* Cannot handle unicast mac addr when GHTX is on */
1006 	if (ghtx && !mcast) {
1007 		pr_err("Could not compute hash bucket\n");
1008 		return -EINVAL;
1009 	}
1010 	crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
1011 	crc = bitrev32(crc);
1012 
1013 	/* considering the 9 highest order bits in crc H[8:0]:
1014 	 *if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register
1015 	 *and H[5:1] (next 5 bits) identify the hash bit
1016 	 *if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register
1017 	 *and H[4:0] (next 5 bits) identify the hash bit.
1018 	 *
1019 	 *In bucket index output the low 5 bits identify the hash register
1020 	 *bit, while the higher 4 bits identify the hash register
1021 	 */
1022 
1023 	if (ghtx) {
1024 		bucket = (s32)((crc >> 23) & 0x1ff);
1025 	} else {
1026 		bucket = (s32)((crc >> 24) & 0xff);
1027 		/* if !ghtx and mcast the bit must be set in gaddr instead of
1028 		 *igaddr.
1029 		 */
1030 		if (mcast)
1031 			bucket += 0x100;
1032 	}
1033 
1034 	set_bucket(dtsec->regs, bucket, true);
1035 
1036 	/* Create element to be added to the driver hash table */
1037 	hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
1038 	if (!hash_entry)
1039 		return -ENOMEM;
1040 	hash_entry->addr = addr;
1041 	INIT_LIST_HEAD(&hash_entry->node);
1042 
1043 	if (addr & MAC_GROUP_ADDRESS)
1044 		/* Group Address */
1045 		list_add_tail(&hash_entry->node,
1046 			      &dtsec->multicast_addr_hash->lsts[bucket]);
1047 	else
1048 		list_add_tail(&hash_entry->node,
1049 			      &dtsec->unicast_addr_hash->lsts[bucket]);
1050 
1051 	return 0;
1052 }
1053 
1054 int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
1055 {
1056 	u32 tmp;
1057 	struct dtsec_regs __iomem *regs = dtsec->regs;
1058 
1059 	if (!is_init_done(dtsec->dtsec_drv_param))
1060 		return -EINVAL;
1061 
1062 	tmp = ioread32be(&regs->rctrl);
1063 	if (enable)
1064 		tmp |= RCTRL_MPROM;
1065 	else
1066 		tmp &= ~RCTRL_MPROM;
1067 
1068 	iowrite32be(tmp, &regs->rctrl);
1069 
1070 	return 0;
1071 }
1072 
1073 int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
1074 {
1075 	struct dtsec_regs __iomem *regs = dtsec->regs;
1076 	u32 rctrl, tctrl;
1077 
1078 	if (!is_init_done(dtsec->dtsec_drv_param))
1079 		return -EINVAL;
1080 
1081 	rctrl = ioread32be(&regs->rctrl);
1082 	tctrl = ioread32be(&regs->tctrl);
1083 
1084 	if (enable) {
1085 		rctrl |= RCTRL_RTSE;
1086 		tctrl |= TCTRL_TTSE;
1087 	} else {
1088 		rctrl &= ~RCTRL_RTSE;
1089 		tctrl &= ~TCTRL_TTSE;
1090 	}
1091 
1092 	iowrite32be(rctrl, &regs->rctrl);
1093 	iowrite32be(tctrl, &regs->tctrl);
1094 
1095 	return 0;
1096 }
1097 
1098 int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
1099 {
1100 	struct dtsec_regs __iomem *regs = dtsec->regs;
1101 	struct list_head *pos;
1102 	struct eth_hash_entry *hash_entry = NULL;
1103 	u64 addr;
1104 	s32 bucket;
1105 	u32 crc = 0xFFFFFFFF;
1106 	bool mcast, ghtx;
1107 
1108 	if (!is_init_done(dtsec->dtsec_drv_param))
1109 		return -EINVAL;
1110 
1111 	addr = ENET_ADDR_TO_UINT64(*eth_addr);
1112 
1113 	ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
1114 	mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false);
1115 
1116 	/* Cannot handle unicast mac addr when GHTX is on */
1117 	if (ghtx && !mcast) {
1118 		pr_err("Could not compute hash bucket\n");
1119 		return -EINVAL;
1120 	}
1121 	crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN);
1122 	crc = bitrev32(crc);
1123 
1124 	if (ghtx) {
1125 		bucket = (s32)((crc >> 23) & 0x1ff);
1126 	} else {
1127 		bucket = (s32)((crc >> 24) & 0xff);
1128 		/* if !ghtx and mcast the bit must be set
1129 		 * in gaddr instead of igaddr.
1130 		 */
1131 		if (mcast)
1132 			bucket += 0x100;
1133 	}
1134 
1135 	if (addr & MAC_GROUP_ADDRESS) {
1136 		/* Group Address */
1137 		list_for_each(pos,
1138 			      &dtsec->multicast_addr_hash->lsts[bucket]) {
1139 			hash_entry = ETH_HASH_ENTRY_OBJ(pos);
1140 			if (hash_entry && hash_entry->addr == addr) {
1141 				list_del_init(&hash_entry->node);
1142 				kfree(hash_entry);
1143 				break;
1144 			}
1145 		}
1146 		if (list_empty(&dtsec->multicast_addr_hash->lsts[bucket]))
1147 			set_bucket(dtsec->regs, bucket, false);
1148 	} else {
1149 		/* Individual Address */
1150 		list_for_each(pos,
1151 			      &dtsec->unicast_addr_hash->lsts[bucket]) {
1152 			hash_entry = ETH_HASH_ENTRY_OBJ(pos);
1153 			if (hash_entry && hash_entry->addr == addr) {
1154 				list_del_init(&hash_entry->node);
1155 				kfree(hash_entry);
1156 				break;
1157 			}
1158 		}
1159 		if (list_empty(&dtsec->unicast_addr_hash->lsts[bucket]))
1160 			set_bucket(dtsec->regs, bucket, false);
1161 	}
1162 
1163 	/* address does not exist */
1164 	WARN_ON(!hash_entry);
1165 
1166 	return 0;
1167 }
1168 
1169 int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
1170 {
1171 	struct dtsec_regs __iomem *regs = dtsec->regs;
1172 	u32 tmp;
1173 
1174 	if (!is_init_done(dtsec->dtsec_drv_param))
1175 		return -EINVAL;
1176 
1177 	/* Set unicast promiscuous */
1178 	tmp = ioread32be(&regs->rctrl);
1179 	if (new_val)
1180 		tmp |= RCTRL_UPROM;
1181 	else
1182 		tmp &= ~RCTRL_UPROM;
1183 
1184 	iowrite32be(tmp, &regs->rctrl);
1185 
1186 	/* Set multicast promiscuous */
1187 	tmp = ioread32be(&regs->rctrl);
1188 	if (new_val)
1189 		tmp |= RCTRL_MPROM;
1190 	else
1191 		tmp &= ~RCTRL_MPROM;
1192 
1193 	iowrite32be(tmp, &regs->rctrl);
1194 
1195 	return 0;
1196 }
1197 
1198 int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
1199 {
1200 	struct dtsec_regs __iomem *regs = dtsec->regs;
1201 	u32 tmp;
1202 
1203 	if (!is_init_done(dtsec->dtsec_drv_param))
1204 		return -EINVAL;
1205 
1206 	graceful_stop(dtsec);
1207 
1208 	tmp = ioread32be(&regs->maccfg2);
1209 
1210 	/* Full Duplex */
1211 	tmp |= MACCFG2_FULL_DUPLEX;
1212 
1213 	tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
1214 	if (speed < SPEED_1000)
1215 		tmp |= MACCFG2_NIBBLE_MODE;
1216 	else if (speed == SPEED_1000)
1217 		tmp |= MACCFG2_BYTE_MODE;
1218 	iowrite32be(tmp, &regs->maccfg2);
1219 
1220 	tmp = ioread32be(&regs->ecntrl);
1221 	if (speed == SPEED_100)
1222 		tmp |= DTSEC_ECNTRL_R100M;
1223 	else
1224 		tmp &= ~DTSEC_ECNTRL_R100M;
1225 	iowrite32be(tmp, &regs->ecntrl);
1226 
1227 	graceful_start(dtsec);
1228 
1229 	return 0;
1230 }
1231 
1232 int dtsec_restart_autoneg(struct fman_mac *dtsec)
1233 {
1234 	u16 tmp_reg16;
1235 
1236 	if (!is_init_done(dtsec->dtsec_drv_param))
1237 		return -EINVAL;
1238 
1239 	tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR);
1240 
1241 	tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1242 	tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART |
1243 		      BMCR_FULLDPLX | BMCR_SPEED1000);
1244 
1245 	phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
1246 
1247 	return 0;
1248 }
1249 
1250 int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version)
1251 {
1252 	struct dtsec_regs __iomem *regs = dtsec->regs;
1253 
1254 	if (!is_init_done(dtsec->dtsec_drv_param))
1255 		return -EINVAL;
1256 
1257 	*mac_version = ioread32be(&regs->tsec_id);
1258 
1259 	return 0;
1260 }
1261 
1262 int dtsec_set_exception(struct fman_mac *dtsec,
1263 			enum fman_mac_exceptions exception, bool enable)
1264 {
1265 	struct dtsec_regs __iomem *regs = dtsec->regs;
1266 	u32 bit_mask = 0;
1267 
1268 	if (!is_init_done(dtsec->dtsec_drv_param))
1269 		return -EINVAL;
1270 
1271 	if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) {
1272 		bit_mask = get_exception_flag(exception);
1273 		if (bit_mask) {
1274 			if (enable)
1275 				dtsec->exceptions |= bit_mask;
1276 			else
1277 				dtsec->exceptions &= ~bit_mask;
1278 		} else {
1279 			pr_err("Undefined exception\n");
1280 			return -EINVAL;
1281 		}
1282 		if (enable)
1283 			iowrite32be(ioread32be(&regs->imask) | bit_mask,
1284 				    &regs->imask);
1285 		else
1286 			iowrite32be(ioread32be(&regs->imask) & ~bit_mask,
1287 				    &regs->imask);
1288 	} else {
1289 		if (!dtsec->ptp_tsu_enabled) {
1290 			pr_err("Exception valid for 1588 only\n");
1291 			return -EINVAL;
1292 		}
1293 		switch (exception) {
1294 		case FM_MAC_EX_1G_1588_TS_RX_ERR:
1295 			if (enable) {
1296 				dtsec->en_tsu_err_exception = true;
1297 				iowrite32be(ioread32be(&regs->tmr_pemask) |
1298 					    TMR_PEMASK_TSREEN,
1299 					    &regs->tmr_pemask);
1300 			} else {
1301 				dtsec->en_tsu_err_exception = false;
1302 				iowrite32be(ioread32be(&regs->tmr_pemask) &
1303 					    ~TMR_PEMASK_TSREEN,
1304 					    &regs->tmr_pemask);
1305 			}
1306 			break;
1307 		default:
1308 			pr_err("Undefined exception\n");
1309 			return -EINVAL;
1310 		}
1311 	}
1312 
1313 	return 0;
1314 }
1315 
1316 int dtsec_init(struct fman_mac *dtsec)
1317 {
1318 	struct dtsec_regs __iomem *regs = dtsec->regs;
1319 	struct dtsec_cfg *dtsec_drv_param;
1320 	u16 max_frm_ln;
1321 	int err;
1322 
1323 	if (is_init_done(dtsec->dtsec_drv_param))
1324 		return -EINVAL;
1325 
1326 	if (DEFAULT_RESET_ON_INIT &&
1327 	    (fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) {
1328 		pr_err("Can't reset MAC!\n");
1329 		return -EINVAL;
1330 	}
1331 
1332 	err = check_init_parameters(dtsec);
1333 	if (err)
1334 		return err;
1335 
1336 	dtsec_drv_param = dtsec->dtsec_drv_param;
1337 
1338 	err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
1339 		   dtsec->max_speed, dtsec->addr, dtsec->exceptions,
1340 		   dtsec->tbiphy->mdio.addr);
1341 	if (err) {
1342 		free_init_resources(dtsec);
1343 		pr_err("DTSEC version doesn't support this i/f mode\n");
1344 		return err;
1345 	}
1346 
1347 	if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) {
1348 		u16 tmp_reg16;
1349 
1350 		/* Configure the TBI PHY Control Register */
1351 		tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
1352 		phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
1353 
1354 		tmp_reg16 = TBICON_CLK_SELECT;
1355 		phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
1356 
1357 		tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE |
1358 			     BMCR_FULLDPLX | BMCR_SPEED1000);
1359 		phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
1360 
1361 		if (dtsec->basex_if)
1362 			tmp_reg16 = TBIANA_1000X;
1363 		else
1364 			tmp_reg16 = TBIANA_SGMII;
1365 		phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16);
1366 
1367 		tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART |
1368 			     BMCR_FULLDPLX | BMCR_SPEED1000);
1369 
1370 		phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
1371 	}
1372 
1373 	/* Max Frame Length */
1374 	max_frm_ln = (u16)ioread32be(&regs->maxfrm);
1375 	err = fman_set_mac_max_frame(dtsec->fm, dtsec->mac_id, max_frm_ln);
1376 	if (err) {
1377 		pr_err("Setting max frame length failed\n");
1378 		free_init_resources(dtsec);
1379 		return -EINVAL;
1380 	}
1381 
1382 	dtsec->multicast_addr_hash =
1383 	alloc_hash_table(EXTENDED_HASH_TABLE_SIZE);
1384 	if (!dtsec->multicast_addr_hash) {
1385 		free_init_resources(dtsec);
1386 		pr_err("MC hash table is failed\n");
1387 		return -ENOMEM;
1388 	}
1389 
1390 	dtsec->unicast_addr_hash = alloc_hash_table(DTSEC_HASH_TABLE_SIZE);
1391 	if (!dtsec->unicast_addr_hash) {
1392 		free_init_resources(dtsec);
1393 		pr_err("UC hash table is failed\n");
1394 		return -ENOMEM;
1395 	}
1396 
1397 	/* register err intr handler for dtsec to FPM (err) */
1398 	fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
1399 			   FMAN_INTR_TYPE_ERR, dtsec_isr, dtsec);
1400 	/* register 1588 intr handler for TMR to FPM (normal) */
1401 	fman_register_intr(dtsec->fm, FMAN_MOD_MAC, dtsec->mac_id,
1402 			   FMAN_INTR_TYPE_NORMAL, dtsec_1588_isr, dtsec);
1403 
1404 	kfree(dtsec_drv_param);
1405 	dtsec->dtsec_drv_param = NULL;
1406 
1407 	return 0;
1408 }
1409 
1410 int dtsec_free(struct fman_mac *dtsec)
1411 {
1412 	free_init_resources(dtsec);
1413 
1414 	kfree(dtsec->dtsec_drv_param);
1415 	dtsec->dtsec_drv_param = NULL;
1416 	kfree(dtsec);
1417 
1418 	return 0;
1419 }
1420 
1421 struct fman_mac *dtsec_config(struct fman_mac_params *params)
1422 {
1423 	struct fman_mac *dtsec;
1424 	struct dtsec_cfg *dtsec_drv_param;
1425 	void __iomem *base_addr;
1426 
1427 	base_addr = params->base_addr;
1428 
1429 	/* allocate memory for the UCC GETH data structure. */
1430 	dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
1431 	if (!dtsec)
1432 		return NULL;
1433 
1434 	/* allocate memory for the d_tsec driver parameters data structure. */
1435 	dtsec_drv_param = kzalloc(sizeof(*dtsec_drv_param), GFP_KERNEL);
1436 	if (!dtsec_drv_param)
1437 		goto err_dtsec;
1438 
1439 	/* Plant parameter structure pointer */
1440 	dtsec->dtsec_drv_param = dtsec_drv_param;
1441 
1442 	set_dflts(dtsec_drv_param);
1443 
1444 	dtsec->regs = base_addr;
1445 	dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
1446 	dtsec->max_speed = params->max_speed;
1447 	dtsec->phy_if = params->phy_if;
1448 	dtsec->mac_id = params->mac_id;
1449 	dtsec->exceptions = (DTSEC_IMASK_BREN	|
1450 			     DTSEC_IMASK_RXCEN	|
1451 			     DTSEC_IMASK_BTEN	|
1452 			     DTSEC_IMASK_TXCEN	|
1453 			     DTSEC_IMASK_TXEEN	|
1454 			     DTSEC_IMASK_ABRTEN	|
1455 			     DTSEC_IMASK_LCEN	|
1456 			     DTSEC_IMASK_CRLEN	|
1457 			     DTSEC_IMASK_XFUNEN	|
1458 			     DTSEC_IMASK_IFERREN |
1459 			     DTSEC_IMASK_MAGEN	|
1460 			     DTSEC_IMASK_TDPEEN	|
1461 			     DTSEC_IMASK_RDPEEN);
1462 	dtsec->exception_cb = params->exception_cb;
1463 	dtsec->event_cb = params->event_cb;
1464 	dtsec->dev_id = params->dev_id;
1465 	dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
1466 	dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
1467 
1468 	dtsec->fm = params->fm;
1469 	dtsec->basex_if = params->basex_if;
1470 
1471 	if (!params->internal_phy_node) {
1472 		pr_err("TBI PHY node is not available\n");
1473 		goto err_dtsec_drv_param;
1474 	}
1475 
1476 	dtsec->tbiphy = of_phy_find_device(params->internal_phy_node);
1477 	if (!dtsec->tbiphy) {
1478 		pr_err("of_phy_find_device (TBI PHY) failed\n");
1479 		goto err_dtsec_drv_param;
1480 	}
1481 
1482 	put_device(&dtsec->tbiphy->mdio.dev);
1483 
1484 	/* Save FMan revision */
1485 	fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
1486 
1487 	return dtsec;
1488 
1489 err_dtsec_drv_param:
1490 	kfree(dtsec_drv_param);
1491 err_dtsec:
1492 	kfree(dtsec);
1493 	return NULL;
1494 }
1495