1 /*
2  * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
3  *
4  * Copyright (C) 2012 Marvell
5  *
6  * Rami Rosen <rosenr@marvell.com>
7  * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
8  *
9  * This file is licensed under the terms of the GNU General Public
10  * License version 2. This program is licensed "as is" without any
11  * warranty of any kind, whether express or implied.
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/platform_device.h>
18 #include <linux/skbuff.h>
19 #include <linux/inetdevice.h>
20 #include <linux/mbus.h>
21 #include <linux/module.h>
22 #include <linux/interrupt.h>
23 #include <linux/if_vlan.h>
24 #include <net/ip.h>
25 #include <net/ipv6.h>
26 #include <linux/io.h>
27 #include <net/tso.h>
28 #include <linux/of.h>
29 #include <linux/of_irq.h>
30 #include <linux/of_mdio.h>
31 #include <linux/of_net.h>
32 #include <linux/of_address.h>
33 #include <linux/phy.h>
34 #include <linux/clk.h>
35 
36 /* Registers */
37 #define MVNETA_RXQ_CONFIG_REG(q)                (0x1400 + ((q) << 2))
38 #define      MVNETA_RXQ_HW_BUF_ALLOC            BIT(1)
39 #define      MVNETA_RXQ_PKT_OFFSET_ALL_MASK     (0xf    << 8)
40 #define      MVNETA_RXQ_PKT_OFFSET_MASK(offs)   ((offs) << 8)
41 #define MVNETA_RXQ_THRESHOLD_REG(q)             (0x14c0 + ((q) << 2))
42 #define      MVNETA_RXQ_NON_OCCUPIED(v)         ((v) << 16)
43 #define MVNETA_RXQ_BASE_ADDR_REG(q)             (0x1480 + ((q) << 2))
44 #define MVNETA_RXQ_SIZE_REG(q)                  (0x14a0 + ((q) << 2))
45 #define      MVNETA_RXQ_BUF_SIZE_SHIFT          19
46 #define      MVNETA_RXQ_BUF_SIZE_MASK           (0x1fff << 19)
47 #define MVNETA_RXQ_STATUS_REG(q)                (0x14e0 + ((q) << 2))
48 #define      MVNETA_RXQ_OCCUPIED_ALL_MASK       0x3fff
49 #define MVNETA_RXQ_STATUS_UPDATE_REG(q)         (0x1500 + ((q) << 2))
50 #define      MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT  16
51 #define      MVNETA_RXQ_ADD_NON_OCCUPIED_MAX    255
52 #define MVNETA_PORT_RX_RESET                    0x1cc0
53 #define      MVNETA_PORT_RX_DMA_RESET           BIT(0)
54 #define MVNETA_PHY_ADDR                         0x2000
55 #define      MVNETA_PHY_ADDR_MASK               0x1f
56 #define MVNETA_MBUS_RETRY                       0x2010
57 #define MVNETA_UNIT_INTR_CAUSE                  0x2080
58 #define MVNETA_UNIT_CONTROL                     0x20B0
59 #define      MVNETA_PHY_POLLING_ENABLE          BIT(1)
60 #define MVNETA_WIN_BASE(w)                      (0x2200 + ((w) << 3))
61 #define MVNETA_WIN_SIZE(w)                      (0x2204 + ((w) << 3))
62 #define MVNETA_WIN_REMAP(w)                     (0x2280 + ((w) << 2))
63 #define MVNETA_BASE_ADDR_ENABLE                 0x2290
64 #define MVNETA_PORT_CONFIG                      0x2400
65 #define      MVNETA_UNI_PROMISC_MODE            BIT(0)
66 #define      MVNETA_DEF_RXQ(q)                  ((q) << 1)
67 #define      MVNETA_DEF_RXQ_ARP(q)              ((q) << 4)
68 #define      MVNETA_TX_UNSET_ERR_SUM            BIT(12)
69 #define      MVNETA_DEF_RXQ_TCP(q)              ((q) << 16)
70 #define      MVNETA_DEF_RXQ_UDP(q)              ((q) << 19)
71 #define      MVNETA_DEF_RXQ_BPDU(q)             ((q) << 22)
72 #define      MVNETA_RX_CSUM_WITH_PSEUDO_HDR     BIT(25)
73 #define      MVNETA_PORT_CONFIG_DEFL_VALUE(q)   (MVNETA_DEF_RXQ(q)       | \
74 						 MVNETA_DEF_RXQ_ARP(q)	 | \
75 						 MVNETA_DEF_RXQ_TCP(q)	 | \
76 						 MVNETA_DEF_RXQ_UDP(q)	 | \
77 						 MVNETA_DEF_RXQ_BPDU(q)	 | \
78 						 MVNETA_TX_UNSET_ERR_SUM | \
79 						 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
80 #define MVNETA_PORT_CONFIG_EXTEND                0x2404
81 #define MVNETA_MAC_ADDR_LOW                      0x2414
82 #define MVNETA_MAC_ADDR_HIGH                     0x2418
83 #define MVNETA_SDMA_CONFIG                       0x241c
84 #define      MVNETA_SDMA_BRST_SIZE_16            4
85 #define      MVNETA_RX_BRST_SZ_MASK(burst)       ((burst) << 1)
86 #define      MVNETA_RX_NO_DATA_SWAP              BIT(4)
87 #define      MVNETA_TX_NO_DATA_SWAP              BIT(5)
88 #define      MVNETA_DESC_SWAP                    BIT(6)
89 #define      MVNETA_TX_BRST_SZ_MASK(burst)       ((burst) << 22)
90 #define MVNETA_PORT_STATUS                       0x2444
91 #define      MVNETA_TX_IN_PRGRS                  BIT(1)
92 #define      MVNETA_TX_FIFO_EMPTY                BIT(8)
93 #define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
94 #define MVNETA_SERDES_CFG			 0x24A0
95 #define      MVNETA_SGMII_SERDES_PROTO		 0x0cc7
96 #define      MVNETA_QSGMII_SERDES_PROTO		 0x0667
97 #define MVNETA_TYPE_PRIO                         0x24bc
98 #define      MVNETA_FORCE_UNI                    BIT(21)
99 #define MVNETA_TXQ_CMD_1                         0x24e4
100 #define MVNETA_TXQ_CMD                           0x2448
101 #define      MVNETA_TXQ_DISABLE_SHIFT            8
102 #define      MVNETA_TXQ_ENABLE_MASK              0x000000ff
103 #define MVNETA_GMAC_CLOCK_DIVIDER                0x24f4
104 #define      MVNETA_GMAC_1MS_CLOCK_ENABLE        BIT(31)
105 #define MVNETA_ACC_MODE                          0x2500
106 #define MVNETA_CPU_MAP(cpu)                      (0x2540 + ((cpu) << 2))
107 #define      MVNETA_CPU_RXQ_ACCESS_ALL_MASK      0x000000ff
108 #define      MVNETA_CPU_TXQ_ACCESS_ALL_MASK      0x0000ff00
109 #define MVNETA_RXQ_TIME_COAL_REG(q)              (0x2580 + ((q) << 2))
110 
111 /* Exception Interrupt Port/Queue Cause register */
112 
113 #define MVNETA_INTR_NEW_CAUSE                    0x25a0
114 #define MVNETA_INTR_NEW_MASK                     0x25a4
115 
116 /* bits  0..7  = TXQ SENT, one bit per queue.
117  * bits  8..15 = RXQ OCCUP, one bit per queue.
118  * bits 16..23 = RXQ FREE, one bit per queue.
119  * bit  29 = OLD_REG_SUM, see old reg ?
120  * bit  30 = TX_ERR_SUM, one bit for 4 ports
121  * bit  31 = MISC_SUM,   one bit for 4 ports
122  */
123 #define      MVNETA_TX_INTR_MASK(nr_txqs)        (((1 << nr_txqs) - 1) << 0)
124 #define      MVNETA_TX_INTR_MASK_ALL             (0xff << 0)
125 #define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
126 #define      MVNETA_RX_INTR_MASK_ALL             (0xff << 8)
127 #define      MVNETA_MISCINTR_INTR_MASK           BIT(31)
128 
129 #define MVNETA_INTR_OLD_CAUSE                    0x25a8
130 #define MVNETA_INTR_OLD_MASK                     0x25ac
131 
132 /* Data Path Port/Queue Cause Register */
133 #define MVNETA_INTR_MISC_CAUSE                   0x25b0
134 #define MVNETA_INTR_MISC_MASK                    0x25b4
135 
136 #define      MVNETA_CAUSE_PHY_STATUS_CHANGE      BIT(0)
137 #define      MVNETA_CAUSE_LINK_CHANGE            BIT(1)
138 #define      MVNETA_CAUSE_PTP                    BIT(4)
139 
140 #define      MVNETA_CAUSE_INTERNAL_ADDR_ERR      BIT(7)
141 #define      MVNETA_CAUSE_RX_OVERRUN             BIT(8)
142 #define      MVNETA_CAUSE_RX_CRC_ERROR           BIT(9)
143 #define      MVNETA_CAUSE_RX_LARGE_PKT           BIT(10)
144 #define      MVNETA_CAUSE_TX_UNDERUN             BIT(11)
145 #define      MVNETA_CAUSE_PRBS_ERR               BIT(12)
146 #define      MVNETA_CAUSE_PSC_SYNC_CHANGE        BIT(13)
147 #define      MVNETA_CAUSE_SERDES_SYNC_ERR        BIT(14)
148 
149 #define      MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT    16
150 #define      MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK   (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
151 #define      MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
152 
153 #define      MVNETA_CAUSE_TXQ_ERROR_SHIFT        24
154 #define      MVNETA_CAUSE_TXQ_ERROR_ALL_MASK     (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
155 #define      MVNETA_CAUSE_TXQ_ERROR_MASK(q)      (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
156 
157 #define MVNETA_INTR_ENABLE                       0x25b8
158 #define      MVNETA_TXQ_INTR_ENABLE_ALL_MASK     0x0000ff00
159 #define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0xff000000  // note: neta says it's 0x000000FF
160 
161 #define MVNETA_RXQ_CMD                           0x2680
162 #define      MVNETA_RXQ_DISABLE_SHIFT            8
163 #define      MVNETA_RXQ_ENABLE_MASK              0x000000ff
164 #define MVETH_TXQ_TOKEN_COUNT_REG(q)             (0x2700 + ((q) << 4))
165 #define MVETH_TXQ_TOKEN_CFG_REG(q)               (0x2704 + ((q) << 4))
166 #define MVNETA_GMAC_CTRL_0                       0x2c00
167 #define      MVNETA_GMAC_MAX_RX_SIZE_SHIFT       2
168 #define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
169 #define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
170 #define MVNETA_GMAC_CTRL_2                       0x2c08
171 #define      MVNETA_GMAC2_INBAND_AN_ENABLE       BIT(0)
172 #define      MVNETA_GMAC2_PCS_ENABLE             BIT(3)
173 #define      MVNETA_GMAC2_PORT_RGMII             BIT(4)
174 #define      MVNETA_GMAC2_PORT_RESET             BIT(6)
175 #define MVNETA_GMAC_STATUS                       0x2c10
176 #define      MVNETA_GMAC_LINK_UP                 BIT(0)
177 #define      MVNETA_GMAC_SPEED_1000              BIT(1)
178 #define      MVNETA_GMAC_SPEED_100               BIT(2)
179 #define      MVNETA_GMAC_FULL_DUPLEX             BIT(3)
180 #define      MVNETA_GMAC_RX_FLOW_CTRL_ENABLE     BIT(4)
181 #define      MVNETA_GMAC_TX_FLOW_CTRL_ENABLE     BIT(5)
182 #define      MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE     BIT(6)
183 #define      MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE     BIT(7)
184 #define MVNETA_GMAC_AUTONEG_CONFIG               0x2c0c
185 #define      MVNETA_GMAC_FORCE_LINK_DOWN         BIT(0)
186 #define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
187 #define      MVNETA_GMAC_INBAND_AN_ENABLE        BIT(2)
188 #define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
189 #define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
190 #define      MVNETA_GMAC_AN_SPEED_EN             BIT(7)
191 #define      MVNETA_GMAC_AN_FLOW_CTRL_EN         BIT(11)
192 #define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
193 #define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
194 #define MVNETA_MIB_COUNTERS_BASE                 0x3080
195 #define      MVNETA_MIB_LATE_COLLISION           0x7c
196 #define MVNETA_DA_FILT_SPEC_MCAST                0x3400
197 #define MVNETA_DA_FILT_OTH_MCAST                 0x3500
198 #define MVNETA_DA_FILT_UCAST_BASE                0x3600
199 #define MVNETA_TXQ_BASE_ADDR_REG(q)              (0x3c00 + ((q) << 2))
200 #define MVNETA_TXQ_SIZE_REG(q)                   (0x3c20 + ((q) << 2))
201 #define      MVNETA_TXQ_SENT_THRESH_ALL_MASK     0x3fff0000
202 #define      MVNETA_TXQ_SENT_THRESH_MASK(coal)   ((coal) << 16)
203 #define MVNETA_TXQ_UPDATE_REG(q)                 (0x3c60 + ((q) << 2))
204 #define      MVNETA_TXQ_DEC_SENT_SHIFT           16
205 #define MVNETA_TXQ_STATUS_REG(q)                 (0x3c40 + ((q) << 2))
206 #define      MVNETA_TXQ_SENT_DESC_SHIFT          16
207 #define      MVNETA_TXQ_SENT_DESC_MASK           0x3fff0000
208 #define MVNETA_PORT_TX_RESET                     0x3cf0
209 #define      MVNETA_PORT_TX_DMA_RESET            BIT(0)
210 #define MVNETA_TX_MTU                            0x3e0c
211 #define MVNETA_TX_TOKEN_SIZE                     0x3e14
212 #define      MVNETA_TX_TOKEN_SIZE_MAX            0xffffffff
213 #define MVNETA_TXQ_TOKEN_SIZE_REG(q)             (0x3e40 + ((q) << 2))
214 #define      MVNETA_TXQ_TOKEN_SIZE_MAX           0x7fffffff
215 
216 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK	 0xff
217 
218 /* Descriptor ring Macros */
219 #define MVNETA_QUEUE_NEXT_DESC(q, index)	\
220 	(((index) < (q)->last_desc) ? ((index) + 1) : 0)
221 
222 /* Various constants */
223 
224 /* Coalescing */
225 #define MVNETA_TXDONE_COAL_PKTS		1
226 #define MVNETA_RX_COAL_PKTS		32
227 #define MVNETA_RX_COAL_USEC		100
228 
229 /* The two bytes Marvell header. Either contains a special value used
230  * by Marvell switches when a specific hardware mode is enabled (not
231  * supported by this driver) or is filled automatically by zeroes on
232  * the RX side. Those two bytes being at the front of the Ethernet
233  * header, they allow to have the IP header aligned on a 4 bytes
234  * boundary automatically: the hardware skips those two bytes on its
235  * own.
236  */
237 #define MVNETA_MH_SIZE			2
238 
239 #define MVNETA_VLAN_TAG_LEN             4
240 
241 #define MVNETA_CPU_D_CACHE_LINE_SIZE    32
242 #define MVNETA_TX_CSUM_MAX_SIZE		9800
243 #define MVNETA_ACC_MODE_EXT		1
244 
245 /* Timeout constants */
246 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC	1000
247 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC	1000
248 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT	10000
249 
250 #define MVNETA_TX_MTU_MAX		0x3ffff
251 
252 /* TSO header size */
253 #define TSO_HEADER_SIZE 128
254 
255 /* Max number of Rx descriptors */
256 #define MVNETA_MAX_RXD 128
257 
258 /* Max number of Tx descriptors */
259 #define MVNETA_MAX_TXD 532
260 
261 /* Max number of allowed TCP segments for software TSO */
262 #define MVNETA_MAX_TSO_SEGS 100
263 
264 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
265 
266 /* descriptor aligned size */
267 #define MVNETA_DESC_ALIGNED_SIZE	32
268 
269 #define MVNETA_RX_PKT_SIZE(mtu) \
270 	ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
271 	      ETH_HLEN + ETH_FCS_LEN,			     \
272 	      MVNETA_CPU_D_CACHE_LINE_SIZE)
273 
274 #define IS_TSO_HEADER(txq, addr) \
275 	((addr >= txq->tso_hdrs_phys) && \
276 	 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
277 
278 #define MVNETA_RX_BUF_SIZE(pkt_size)   ((pkt_size) + NET_SKB_PAD)
279 
280 struct mvneta_pcpu_stats {
281 	struct	u64_stats_sync syncp;
282 	u64	rx_packets;
283 	u64	rx_bytes;
284 	u64	tx_packets;
285 	u64	tx_bytes;
286 };
287 
288 struct mvneta_port {
289 	int pkt_size;
290 	unsigned int frag_size;
291 	void __iomem *base;
292 	struct mvneta_rx_queue *rxqs;
293 	struct mvneta_tx_queue *txqs;
294 	struct net_device *dev;
295 
296 	u32 cause_rx_tx;
297 	struct napi_struct napi;
298 
299 	/* Core clock */
300 	struct clk *clk;
301 	u8 mcast_count[256];
302 	u16 tx_ring_size;
303 	u16 rx_ring_size;
304 	struct mvneta_pcpu_stats *stats;
305 
306 	struct mii_bus *mii_bus;
307 	struct phy_device *phy_dev;
308 	phy_interface_t phy_interface;
309 	struct device_node *phy_node;
310 	unsigned int link;
311 	unsigned int duplex;
312 	unsigned int speed;
313 	unsigned int tx_csum_limit;
314 	int use_inband_status:1;
315 };
316 
317 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
318  * layout of the transmit and reception DMA descriptors, and their
319  * layout is therefore defined by the hardware design
320  */
321 
322 #define MVNETA_TX_L3_OFF_SHIFT	0
323 #define MVNETA_TX_IP_HLEN_SHIFT	8
324 #define MVNETA_TX_L4_UDP	BIT(16)
325 #define MVNETA_TX_L3_IP6	BIT(17)
326 #define MVNETA_TXD_IP_CSUM	BIT(18)
327 #define MVNETA_TXD_Z_PAD	BIT(19)
328 #define MVNETA_TXD_L_DESC	BIT(20)
329 #define MVNETA_TXD_F_DESC	BIT(21)
330 #define MVNETA_TXD_FLZ_DESC	(MVNETA_TXD_Z_PAD  | \
331 				 MVNETA_TXD_L_DESC | \
332 				 MVNETA_TXD_F_DESC)
333 #define MVNETA_TX_L4_CSUM_FULL	BIT(30)
334 #define MVNETA_TX_L4_CSUM_NOT	BIT(31)
335 
336 #define MVNETA_RXD_ERR_CRC		0x0
337 #define MVNETA_RXD_ERR_SUMMARY		BIT(16)
338 #define MVNETA_RXD_ERR_OVERRUN		BIT(17)
339 #define MVNETA_RXD_ERR_LEN		BIT(18)
340 #define MVNETA_RXD_ERR_RESOURCE		(BIT(17) | BIT(18))
341 #define MVNETA_RXD_ERR_CODE_MASK	(BIT(17) | BIT(18))
342 #define MVNETA_RXD_L3_IP4		BIT(25)
343 #define MVNETA_RXD_FIRST_LAST_DESC	(BIT(26) | BIT(27))
344 #define MVNETA_RXD_L4_CSUM_OK		BIT(30)
345 
346 #if defined(__LITTLE_ENDIAN)
347 struct mvneta_tx_desc {
348 	u32  command;		/* Options used by HW for packet transmitting.*/
349 	u16  reserverd1;	/* csum_l4 (for future use)		*/
350 	u16  data_size;		/* Data size of transmitted packet in bytes */
351 	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
352 	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
353 	u32  reserved3[4];	/* Reserved - (for future use)		*/
354 };
355 
356 struct mvneta_rx_desc {
357 	u32  status;		/* Info about received packet		*/
358 	u16  reserved1;		/* pnc_info - (for future use, PnC)	*/
359 	u16  data_size;		/* Size of received packet in bytes	*/
360 
361 	u32  buf_phys_addr;	/* Physical address of the buffer	*/
362 	u32  reserved2;		/* pnc_flow_id  (for future use, PnC)	*/
363 
364 	u32  buf_cookie;	/* cookie for access to RX buffer in rx path */
365 	u16  reserved3;		/* prefetch_cmd, for future use		*/
366 	u16  reserved4;		/* csum_l4 - (for future use, PnC)	*/
367 
368 	u32  reserved5;		/* pnc_extra PnC (for future use, PnC)	*/
369 	u32  reserved6;		/* hw_cmd (for future use, PnC and HWF)	*/
370 };
371 #else
372 struct mvneta_tx_desc {
373 	u16  data_size;		/* Data size of transmitted packet in bytes */
374 	u16  reserverd1;	/* csum_l4 (for future use)		*/
375 	u32  command;		/* Options used by HW for packet transmitting.*/
376 	u32  reserved2;		/* hw_cmd - (for future use, PMT)	*/
377 	u32  buf_phys_addr;	/* Physical addr of transmitted buffer	*/
378 	u32  reserved3[4];	/* Reserved - (for future use)		*/
379 };
380 
381 struct mvneta_rx_desc {
382 	u16  data_size;		/* Size of received packet in bytes	*/
383 	u16  reserved1;		/* pnc_info - (for future use, PnC)	*/
384 	u32  status;		/* Info about received packet		*/
385 
386 	u32  reserved2;		/* pnc_flow_id  (for future use, PnC)	*/
387 	u32  buf_phys_addr;	/* Physical address of the buffer	*/
388 
389 	u16  reserved4;		/* csum_l4 - (for future use, PnC)	*/
390 	u16  reserved3;		/* prefetch_cmd, for future use		*/
391 	u32  buf_cookie;	/* cookie for access to RX buffer in rx path */
392 
393 	u32  reserved5;		/* pnc_extra PnC (for future use, PnC)	*/
394 	u32  reserved6;		/* hw_cmd (for future use, PnC and HWF)	*/
395 };
396 #endif
397 
398 struct mvneta_tx_queue {
399 	/* Number of this TX queue, in the range 0-7 */
400 	u8 id;
401 
402 	/* Number of TX DMA descriptors in the descriptor ring */
403 	int size;
404 
405 	/* Number of currently used TX DMA descriptor in the
406 	 * descriptor ring
407 	 */
408 	int count;
409 	int tx_stop_threshold;
410 	int tx_wake_threshold;
411 
412 	/* Array of transmitted skb */
413 	struct sk_buff **tx_skb;
414 
415 	/* Index of last TX DMA descriptor that was inserted */
416 	int txq_put_index;
417 
418 	/* Index of the TX DMA descriptor to be cleaned up */
419 	int txq_get_index;
420 
421 	u32 done_pkts_coal;
422 
423 	/* Virtual address of the TX DMA descriptors array */
424 	struct mvneta_tx_desc *descs;
425 
426 	/* DMA address of the TX DMA descriptors array */
427 	dma_addr_t descs_phys;
428 
429 	/* Index of the last TX DMA descriptor */
430 	int last_desc;
431 
432 	/* Index of the next TX DMA descriptor to process */
433 	int next_desc_to_proc;
434 
435 	/* DMA buffers for TSO headers */
436 	char *tso_hdrs;
437 
438 	/* DMA address of TSO headers */
439 	dma_addr_t tso_hdrs_phys;
440 };
441 
442 struct mvneta_rx_queue {
443 	/* rx queue number, in the range 0-7 */
444 	u8 id;
445 
446 	/* num of rx descriptors in the rx descriptor ring */
447 	int size;
448 
449 	/* counter of times when mvneta_refill() failed */
450 	int missed;
451 
452 	u32 pkts_coal;
453 	u32 time_coal;
454 
455 	/* Virtual address of the RX DMA descriptors array */
456 	struct mvneta_rx_desc *descs;
457 
458 	/* DMA address of the RX DMA descriptors array */
459 	dma_addr_t descs_phys;
460 
461 	/* Index of the last RX DMA descriptor */
462 	int last_desc;
463 
464 	/* Index of the next RX DMA descriptor to process */
465 	int next_desc_to_proc;
466 };
467 
468 /* The hardware supports eight (8) rx queues, but we are only allowing
469  * the first one to be used. Therefore, let's just allocate one queue.
470  */
471 static int rxq_number = 1;
472 static int txq_number = 8;
473 
474 static int rxq_def;
475 
476 static int rx_copybreak __read_mostly = 256;
477 
478 #define MVNETA_DRIVER_NAME "mvneta"
479 #define MVNETA_DRIVER_VERSION "1.0"
480 
481 /* Utility/helper methods */
482 
483 /* Write helper method */
484 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
485 {
486 	writel(data, pp->base + offset);
487 }
488 
489 /* Read helper method */
490 static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
491 {
492 	return readl(pp->base + offset);
493 }
494 
495 /* Increment txq get counter */
496 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
497 {
498 	txq->txq_get_index++;
499 	if (txq->txq_get_index == txq->size)
500 		txq->txq_get_index = 0;
501 }
502 
503 /* Increment txq put counter */
504 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
505 {
506 	txq->txq_put_index++;
507 	if (txq->txq_put_index == txq->size)
508 		txq->txq_put_index = 0;
509 }
510 
511 
512 /* Clear all MIB counters */
513 static void mvneta_mib_counters_clear(struct mvneta_port *pp)
514 {
515 	int i;
516 	u32 dummy;
517 
518 	/* Perform dummy reads from MIB counters */
519 	for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
520 		dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
521 }
522 
523 /* Get System Network Statistics */
524 struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
525 					     struct rtnl_link_stats64 *stats)
526 {
527 	struct mvneta_port *pp = netdev_priv(dev);
528 	unsigned int start;
529 	int cpu;
530 
531 	for_each_possible_cpu(cpu) {
532 		struct mvneta_pcpu_stats *cpu_stats;
533 		u64 rx_packets;
534 		u64 rx_bytes;
535 		u64 tx_packets;
536 		u64 tx_bytes;
537 
538 		cpu_stats = per_cpu_ptr(pp->stats, cpu);
539 		do {
540 			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
541 			rx_packets = cpu_stats->rx_packets;
542 			rx_bytes   = cpu_stats->rx_bytes;
543 			tx_packets = cpu_stats->tx_packets;
544 			tx_bytes   = cpu_stats->tx_bytes;
545 		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
546 
547 		stats->rx_packets += rx_packets;
548 		stats->rx_bytes   += rx_bytes;
549 		stats->tx_packets += tx_packets;
550 		stats->tx_bytes   += tx_bytes;
551 	}
552 
553 	stats->rx_errors	= dev->stats.rx_errors;
554 	stats->rx_dropped	= dev->stats.rx_dropped;
555 
556 	stats->tx_dropped	= dev->stats.tx_dropped;
557 
558 	return stats;
559 }
560 
561 /* Rx descriptors helper methods */
562 
563 /* Checks whether the RX descriptor having this status is both the first
564  * and the last descriptor for the RX packet. Each RX packet is currently
565  * received through a single RX descriptor, so not having each RX
566  * descriptor with its first and last bits set is an error
567  */
568 static int mvneta_rxq_desc_is_first_last(u32 status)
569 {
570 	return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
571 		MVNETA_RXD_FIRST_LAST_DESC;
572 }
573 
574 /* Add number of descriptors ready to receive new packets */
575 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
576 					  struct mvneta_rx_queue *rxq,
577 					  int ndescs)
578 {
579 	/* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
580 	 * be added at once
581 	 */
582 	while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
583 		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
584 			    (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
585 			     MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
586 		ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
587 	}
588 
589 	mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
590 		    (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
591 }
592 
593 /* Get number of RX descriptors occupied by received packets */
594 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
595 					struct mvneta_rx_queue *rxq)
596 {
597 	u32 val;
598 
599 	val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
600 	return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
601 }
602 
603 /* Update num of rx desc called upon return from rx path or
604  * from mvneta_rxq_drop_pkts().
605  */
606 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
607 				       struct mvneta_rx_queue *rxq,
608 				       int rx_done, int rx_filled)
609 {
610 	u32 val;
611 
612 	if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
613 		val = rx_done |
614 		  (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
615 		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
616 		return;
617 	}
618 
619 	/* Only 255 descriptors can be added at once */
620 	while ((rx_done > 0) || (rx_filled > 0)) {
621 		if (rx_done <= 0xff) {
622 			val = rx_done;
623 			rx_done = 0;
624 		} else {
625 			val = 0xff;
626 			rx_done -= 0xff;
627 		}
628 		if (rx_filled <= 0xff) {
629 			val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
630 			rx_filled = 0;
631 		} else {
632 			val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
633 			rx_filled -= 0xff;
634 		}
635 		mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
636 	}
637 }
638 
639 /* Get pointer to next RX descriptor to be processed by SW */
640 static struct mvneta_rx_desc *
641 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
642 {
643 	int rx_desc = rxq->next_desc_to_proc;
644 
645 	rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
646 	prefetch(rxq->descs + rxq->next_desc_to_proc);
647 	return rxq->descs + rx_desc;
648 }
649 
650 /* Change maximum receive size of the port. */
651 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
652 {
653 	u32 val;
654 
655 	val =  mvreg_read(pp, MVNETA_GMAC_CTRL_0);
656 	val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
657 	val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
658 		MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
659 	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
660 }
661 
662 
663 /* Set rx queue offset */
664 static void mvneta_rxq_offset_set(struct mvneta_port *pp,
665 				  struct mvneta_rx_queue *rxq,
666 				  int offset)
667 {
668 	u32 val;
669 
670 	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
671 	val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
672 
673 	/* Offset is in */
674 	val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
675 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
676 }
677 
678 
679 /* Tx descriptors helper methods */
680 
681 /* Update HW with number of TX descriptors to be sent */
682 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
683 				     struct mvneta_tx_queue *txq,
684 				     int pend_desc)
685 {
686 	u32 val;
687 
688 	/* Only 255 descriptors can be added at once ; Assume caller
689 	 * process TX desriptors in quanta less than 256
690 	 */
691 	val = pend_desc;
692 	mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
693 }
694 
695 /* Get pointer to next TX descriptor to be processed (send) by HW */
696 static struct mvneta_tx_desc *
697 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
698 {
699 	int tx_desc = txq->next_desc_to_proc;
700 
701 	txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
702 	return txq->descs + tx_desc;
703 }
704 
705 /* Release the last allocated TX descriptor. Useful to handle DMA
706  * mapping failures in the TX path.
707  */
708 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
709 {
710 	if (txq->next_desc_to_proc == 0)
711 		txq->next_desc_to_proc = txq->last_desc - 1;
712 	else
713 		txq->next_desc_to_proc--;
714 }
715 
716 /* Set rxq buf size */
717 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
718 				    struct mvneta_rx_queue *rxq,
719 				    int buf_size)
720 {
721 	u32 val;
722 
723 	val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
724 
725 	val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
726 	val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
727 
728 	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
729 }
730 
731 /* Disable buffer management (BM) */
732 static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
733 				  struct mvneta_rx_queue *rxq)
734 {
735 	u32 val;
736 
737 	val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
738 	val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
739 	mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
740 }
741 
742 /* Start the Ethernet port RX and TX activity */
743 static void mvneta_port_up(struct mvneta_port *pp)
744 {
745 	int queue;
746 	u32 q_map;
747 
748 	/* Enable all initialized TXs. */
749 	mvneta_mib_counters_clear(pp);
750 	q_map = 0;
751 	for (queue = 0; queue < txq_number; queue++) {
752 		struct mvneta_tx_queue *txq = &pp->txqs[queue];
753 		if (txq->descs != NULL)
754 			q_map |= (1 << queue);
755 	}
756 	mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
757 
758 	/* Enable all initialized RXQs. */
759 	q_map = 0;
760 	for (queue = 0; queue < rxq_number; queue++) {
761 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
762 		if (rxq->descs != NULL)
763 			q_map |= (1 << queue);
764 	}
765 
766 	mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
767 }
768 
769 /* Stop the Ethernet port activity */
770 static void mvneta_port_down(struct mvneta_port *pp)
771 {
772 	u32 val;
773 	int count;
774 
775 	/* Stop Rx port activity. Check port Rx activity. */
776 	val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
777 
778 	/* Issue stop command for active channels only */
779 	if (val != 0)
780 		mvreg_write(pp, MVNETA_RXQ_CMD,
781 			    val << MVNETA_RXQ_DISABLE_SHIFT);
782 
783 	/* Wait for all Rx activity to terminate. */
784 	count = 0;
785 	do {
786 		if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
787 			netdev_warn(pp->dev,
788 				    "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
789 				    val);
790 			break;
791 		}
792 		mdelay(1);
793 
794 		val = mvreg_read(pp, MVNETA_RXQ_CMD);
795 	} while (val & 0xff);
796 
797 	/* Stop Tx port activity. Check port Tx activity. Issue stop
798 	 * command for active channels only
799 	 */
800 	val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
801 
802 	if (val != 0)
803 		mvreg_write(pp, MVNETA_TXQ_CMD,
804 			    (val << MVNETA_TXQ_DISABLE_SHIFT));
805 
806 	/* Wait for all Tx activity to terminate. */
807 	count = 0;
808 	do {
809 		if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
810 			netdev_warn(pp->dev,
811 				    "TIMEOUT for TX stopped status=0x%08x\n",
812 				    val);
813 			break;
814 		}
815 		mdelay(1);
816 
817 		/* Check TX Command reg that all Txqs are stopped */
818 		val = mvreg_read(pp, MVNETA_TXQ_CMD);
819 
820 	} while (val & 0xff);
821 
822 	/* Double check to verify that TX FIFO is empty */
823 	count = 0;
824 	do {
825 		if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
826 			netdev_warn(pp->dev,
827 				    "TX FIFO empty timeout status=0x08%x\n",
828 				    val);
829 			break;
830 		}
831 		mdelay(1);
832 
833 		val = mvreg_read(pp, MVNETA_PORT_STATUS);
834 	} while (!(val & MVNETA_TX_FIFO_EMPTY) &&
835 		 (val & MVNETA_TX_IN_PRGRS));
836 
837 	udelay(200);
838 }
839 
840 /* Enable the port by setting the port enable bit of the MAC control register */
841 static void mvneta_port_enable(struct mvneta_port *pp)
842 {
843 	u32 val;
844 
845 	/* Enable port */
846 	val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
847 	val |= MVNETA_GMAC0_PORT_ENABLE;
848 	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
849 }
850 
851 /* Disable the port and wait for about 200 usec before retuning */
852 static void mvneta_port_disable(struct mvneta_port *pp)
853 {
854 	u32 val;
855 
856 	/* Reset the Enable bit in the Serial Control Register */
857 	val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
858 	val &= ~MVNETA_GMAC0_PORT_ENABLE;
859 	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
860 
861 	udelay(200);
862 }
863 
864 /* Multicast tables methods */
865 
866 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
867 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
868 {
869 	int offset;
870 	u32 val;
871 
872 	if (queue == -1) {
873 		val = 0;
874 	} else {
875 		val = 0x1 | (queue << 1);
876 		val |= (val << 24) | (val << 16) | (val << 8);
877 	}
878 
879 	for (offset = 0; offset <= 0xc; offset += 4)
880 		mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
881 }
882 
883 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
884 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
885 {
886 	int offset;
887 	u32 val;
888 
889 	if (queue == -1) {
890 		val = 0;
891 	} else {
892 		val = 0x1 | (queue << 1);
893 		val |= (val << 24) | (val << 16) | (val << 8);
894 	}
895 
896 	for (offset = 0; offset <= 0xfc; offset += 4)
897 		mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
898 
899 }
900 
901 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
902 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
903 {
904 	int offset;
905 	u32 val;
906 
907 	if (queue == -1) {
908 		memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
909 		val = 0;
910 	} else {
911 		memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
912 		val = 0x1 | (queue << 1);
913 		val |= (val << 24) | (val << 16) | (val << 8);
914 	}
915 
916 	for (offset = 0; offset <= 0xfc; offset += 4)
917 		mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
918 }
919 
920 /* This method sets defaults to the NETA port:
921  *	Clears interrupt Cause and Mask registers.
922  *	Clears all MAC tables.
923  *	Sets defaults to all registers.
924  *	Resets RX and TX descriptor rings.
925  *	Resets PHY.
926  * This method can be called after mvneta_port_down() to return the port
927  *	settings to defaults.
928  */
929 static void mvneta_defaults_set(struct mvneta_port *pp)
930 {
931 	int cpu;
932 	int queue;
933 	u32 val;
934 
935 	/* Clear all Cause registers */
936 	mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
937 	mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
938 	mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
939 
940 	/* Mask all interrupts */
941 	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
942 	mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
943 	mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
944 	mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
945 
946 	/* Enable MBUS Retry bit16 */
947 	mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
948 
949 	/* Set CPU queue access map - all CPUs have access to all RX
950 	 * queues and to all TX queues
951 	 */
952 	for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
953 		mvreg_write(pp, MVNETA_CPU_MAP(cpu),
954 			    (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
955 			     MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
956 
957 	/* Reset RX and TX DMAs */
958 	mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
959 	mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
960 
961 	/* Disable Legacy WRR, Disable EJP, Release from reset */
962 	mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
963 	for (queue = 0; queue < txq_number; queue++) {
964 		mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
965 		mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
966 	}
967 
968 	mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
969 	mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
970 
971 	/* Set Port Acceleration Mode */
972 	val = MVNETA_ACC_MODE_EXT;
973 	mvreg_write(pp, MVNETA_ACC_MODE, val);
974 
975 	/* Update val of portCfg register accordingly with all RxQueue types */
976 	val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
977 	mvreg_write(pp, MVNETA_PORT_CONFIG, val);
978 
979 	val = 0;
980 	mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
981 	mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
982 
983 	/* Build PORT_SDMA_CONFIG_REG */
984 	val = 0;
985 
986 	/* Default burst size */
987 	val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
988 	val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
989 	val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
990 
991 #if defined(__BIG_ENDIAN)
992 	val |= MVNETA_DESC_SWAP;
993 #endif
994 
995 	/* Assign port SDMA configuration */
996 	mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
997 
998 	/* Disable PHY polling in hardware, since we're using the
999 	 * kernel phylib to do this.
1000 	 */
1001 	val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1002 	val &= ~MVNETA_PHY_POLLING_ENABLE;
1003 	mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1004 
1005 	if (pp->use_inband_status) {
1006 		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1007 		val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
1008 			 MVNETA_GMAC_FORCE_LINK_DOWN |
1009 			 MVNETA_GMAC_AN_FLOW_CTRL_EN);
1010 		val |= MVNETA_GMAC_INBAND_AN_ENABLE |
1011 		       MVNETA_GMAC_AN_SPEED_EN |
1012 		       MVNETA_GMAC_AN_DUPLEX_EN;
1013 		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1014 		val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1015 		val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
1016 		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
1017 	} else {
1018 		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1019 		val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
1020 		       MVNETA_GMAC_AN_SPEED_EN |
1021 		       MVNETA_GMAC_AN_DUPLEX_EN);
1022 		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1023 	}
1024 
1025 	mvneta_set_ucast_table(pp, -1);
1026 	mvneta_set_special_mcast_table(pp, -1);
1027 	mvneta_set_other_mcast_table(pp, -1);
1028 
1029 	/* Set port interrupt enable register - default enable all */
1030 	mvreg_write(pp, MVNETA_INTR_ENABLE,
1031 		    (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1032 		     | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1033 }
1034 
1035 /* Set max sizes for tx queues */
1036 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1037 
1038 {
1039 	u32 val, size, mtu;
1040 	int queue;
1041 
1042 	mtu = max_tx_size * 8;
1043 	if (mtu > MVNETA_TX_MTU_MAX)
1044 		mtu = MVNETA_TX_MTU_MAX;
1045 
1046 	/* Set MTU */
1047 	val = mvreg_read(pp, MVNETA_TX_MTU);
1048 	val &= ~MVNETA_TX_MTU_MAX;
1049 	val |= mtu;
1050 	mvreg_write(pp, MVNETA_TX_MTU, val);
1051 
1052 	/* TX token size and all TXQs token size must be larger that MTU */
1053 	val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1054 
1055 	size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1056 	if (size < mtu) {
1057 		size = mtu;
1058 		val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1059 		val |= size;
1060 		mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1061 	}
1062 	for (queue = 0; queue < txq_number; queue++) {
1063 		val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1064 
1065 		size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1066 		if (size < mtu) {
1067 			size = mtu;
1068 			val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1069 			val |= size;
1070 			mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1071 		}
1072 	}
1073 }
1074 
1075 /* Set unicast address */
1076 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1077 				  int queue)
1078 {
1079 	unsigned int unicast_reg;
1080 	unsigned int tbl_offset;
1081 	unsigned int reg_offset;
1082 
1083 	/* Locate the Unicast table entry */
1084 	last_nibble = (0xf & last_nibble);
1085 
1086 	/* offset from unicast tbl base */
1087 	tbl_offset = (last_nibble / 4) * 4;
1088 
1089 	/* offset within the above reg  */
1090 	reg_offset = last_nibble % 4;
1091 
1092 	unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1093 
1094 	if (queue == -1) {
1095 		/* Clear accepts frame bit at specified unicast DA tbl entry */
1096 		unicast_reg &= ~(0xff << (8 * reg_offset));
1097 	} else {
1098 		unicast_reg &= ~(0xff << (8 * reg_offset));
1099 		unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1100 	}
1101 
1102 	mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1103 }
1104 
1105 /* Set mac address */
1106 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1107 				int queue)
1108 {
1109 	unsigned int mac_h;
1110 	unsigned int mac_l;
1111 
1112 	if (queue != -1) {
1113 		mac_l = (addr[4] << 8) | (addr[5]);
1114 		mac_h = (addr[0] << 24) | (addr[1] << 16) |
1115 			(addr[2] << 8) | (addr[3] << 0);
1116 
1117 		mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1118 		mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1119 	}
1120 
1121 	/* Accept frames of this address */
1122 	mvneta_set_ucast_addr(pp, addr[5], queue);
1123 }
1124 
1125 /* Set the number of packets that will be received before RX interrupt
1126  * will be generated by HW.
1127  */
1128 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1129 				    struct mvneta_rx_queue *rxq, u32 value)
1130 {
1131 	mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1132 		    value | MVNETA_RXQ_NON_OCCUPIED(0));
1133 	rxq->pkts_coal = value;
1134 }
1135 
1136 /* Set the time delay in usec before RX interrupt will be generated by
1137  * HW.
1138  */
1139 static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1140 				    struct mvneta_rx_queue *rxq, u32 value)
1141 {
1142 	u32 val;
1143 	unsigned long clk_rate;
1144 
1145 	clk_rate = clk_get_rate(pp->clk);
1146 	val = (clk_rate / 1000000) * value;
1147 
1148 	mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1149 	rxq->time_coal = value;
1150 }
1151 
1152 /* Set threshold for TX_DONE pkts coalescing */
1153 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1154 					 struct mvneta_tx_queue *txq, u32 value)
1155 {
1156 	u32 val;
1157 
1158 	val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1159 
1160 	val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1161 	val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1162 
1163 	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1164 
1165 	txq->done_pkts_coal = value;
1166 }
1167 
1168 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1169 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1170 				u32 phys_addr, u32 cookie)
1171 {
1172 	rx_desc->buf_cookie = cookie;
1173 	rx_desc->buf_phys_addr = phys_addr;
1174 }
1175 
1176 /* Decrement sent descriptors counter */
1177 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1178 				     struct mvneta_tx_queue *txq,
1179 				     int sent_desc)
1180 {
1181 	u32 val;
1182 
1183 	/* Only 255 TX descriptors can be updated at once */
1184 	while (sent_desc > 0xff) {
1185 		val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1186 		mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1187 		sent_desc = sent_desc - 0xff;
1188 	}
1189 
1190 	val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1191 	mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1192 }
1193 
1194 /* Get number of TX descriptors already sent by HW */
1195 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1196 					struct mvneta_tx_queue *txq)
1197 {
1198 	u32 val;
1199 	int sent_desc;
1200 
1201 	val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1202 	sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1203 		MVNETA_TXQ_SENT_DESC_SHIFT;
1204 
1205 	return sent_desc;
1206 }
1207 
1208 /* Get number of sent descriptors and decrement counter.
1209  *  The number of sent descriptors is returned.
1210  */
1211 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1212 				     struct mvneta_tx_queue *txq)
1213 {
1214 	int sent_desc;
1215 
1216 	/* Get number of sent descriptors */
1217 	sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1218 
1219 	/* Decrement sent descriptors counter */
1220 	if (sent_desc)
1221 		mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1222 
1223 	return sent_desc;
1224 }
1225 
1226 /* Set TXQ descriptors fields relevant for CSUM calculation */
1227 static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1228 				int ip_hdr_len, int l4_proto)
1229 {
1230 	u32 command;
1231 
1232 	/* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1233 	 * G_L4_chk, L4_type; required only for checksum
1234 	 * calculation
1235 	 */
1236 	command =  l3_offs    << MVNETA_TX_L3_OFF_SHIFT;
1237 	command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1238 
1239 	if (l3_proto == htons(ETH_P_IP))
1240 		command |= MVNETA_TXD_IP_CSUM;
1241 	else
1242 		command |= MVNETA_TX_L3_IP6;
1243 
1244 	if (l4_proto == IPPROTO_TCP)
1245 		command |=  MVNETA_TX_L4_CSUM_FULL;
1246 	else if (l4_proto == IPPROTO_UDP)
1247 		command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1248 	else
1249 		command |= MVNETA_TX_L4_CSUM_NOT;
1250 
1251 	return command;
1252 }
1253 
1254 
1255 /* Display more error info */
1256 static void mvneta_rx_error(struct mvneta_port *pp,
1257 			    struct mvneta_rx_desc *rx_desc)
1258 {
1259 	u32 status = rx_desc->status;
1260 
1261 	if (!mvneta_rxq_desc_is_first_last(status)) {
1262 		netdev_err(pp->dev,
1263 			   "bad rx status %08x (buffer oversize), size=%d\n",
1264 			   status, rx_desc->data_size);
1265 		return;
1266 	}
1267 
1268 	switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1269 	case MVNETA_RXD_ERR_CRC:
1270 		netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1271 			   status, rx_desc->data_size);
1272 		break;
1273 	case MVNETA_RXD_ERR_OVERRUN:
1274 		netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1275 			   status, rx_desc->data_size);
1276 		break;
1277 	case MVNETA_RXD_ERR_LEN:
1278 		netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1279 			   status, rx_desc->data_size);
1280 		break;
1281 	case MVNETA_RXD_ERR_RESOURCE:
1282 		netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1283 			   status, rx_desc->data_size);
1284 		break;
1285 	}
1286 }
1287 
1288 /* Handle RX checksum offload based on the descriptor's status */
1289 static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
1290 			   struct sk_buff *skb)
1291 {
1292 	if ((status & MVNETA_RXD_L3_IP4) &&
1293 	    (status & MVNETA_RXD_L4_CSUM_OK)) {
1294 		skb->csum = 0;
1295 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1296 		return;
1297 	}
1298 
1299 	skb->ip_summed = CHECKSUM_NONE;
1300 }
1301 
1302 /* Return tx queue pointer (find last set bit) according to <cause> returned
1303  * form tx_done reg. <cause> must not be null. The return value is always a
1304  * valid queue for matching the first one found in <cause>.
1305  */
1306 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1307 						     u32 cause)
1308 {
1309 	int queue = fls(cause) - 1;
1310 
1311 	return &pp->txqs[queue];
1312 }
1313 
1314 /* Free tx queue skbuffs */
1315 static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1316 				 struct mvneta_tx_queue *txq, int num)
1317 {
1318 	int i;
1319 
1320 	for (i = 0; i < num; i++) {
1321 		struct mvneta_tx_desc *tx_desc = txq->descs +
1322 			txq->txq_get_index;
1323 		struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1324 
1325 		mvneta_txq_inc_get(txq);
1326 
1327 		if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1328 			dma_unmap_single(pp->dev->dev.parent,
1329 					 tx_desc->buf_phys_addr,
1330 					 tx_desc->data_size, DMA_TO_DEVICE);
1331 		if (!skb)
1332 			continue;
1333 		dev_kfree_skb_any(skb);
1334 	}
1335 }
1336 
1337 /* Handle end of transmission */
1338 static void mvneta_txq_done(struct mvneta_port *pp,
1339 			   struct mvneta_tx_queue *txq)
1340 {
1341 	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1342 	int tx_done;
1343 
1344 	tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1345 	if (!tx_done)
1346 		return;
1347 
1348 	mvneta_txq_bufs_free(pp, txq, tx_done);
1349 
1350 	txq->count -= tx_done;
1351 
1352 	if (netif_tx_queue_stopped(nq)) {
1353 		if (txq->count <= txq->tx_wake_threshold)
1354 			netif_tx_wake_queue(nq);
1355 	}
1356 }
1357 
1358 static void *mvneta_frag_alloc(const struct mvneta_port *pp)
1359 {
1360 	if (likely(pp->frag_size <= PAGE_SIZE))
1361 		return netdev_alloc_frag(pp->frag_size);
1362 	else
1363 		return kmalloc(pp->frag_size, GFP_ATOMIC);
1364 }
1365 
1366 static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
1367 {
1368 	if (likely(pp->frag_size <= PAGE_SIZE))
1369 		skb_free_frag(data);
1370 	else
1371 		kfree(data);
1372 }
1373 
1374 /* Refill processing */
1375 static int mvneta_rx_refill(struct mvneta_port *pp,
1376 			    struct mvneta_rx_desc *rx_desc)
1377 
1378 {
1379 	dma_addr_t phys_addr;
1380 	void *data;
1381 
1382 	data = mvneta_frag_alloc(pp);
1383 	if (!data)
1384 		return -ENOMEM;
1385 
1386 	phys_addr = dma_map_single(pp->dev->dev.parent, data,
1387 				   MVNETA_RX_BUF_SIZE(pp->pkt_size),
1388 				   DMA_FROM_DEVICE);
1389 	if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1390 		mvneta_frag_free(pp, data);
1391 		return -ENOMEM;
1392 	}
1393 
1394 	mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data);
1395 	return 0;
1396 }
1397 
1398 /* Handle tx checksum */
1399 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1400 {
1401 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1402 		int ip_hdr_len = 0;
1403 		__be16 l3_proto = vlan_get_protocol(skb);
1404 		u8 l4_proto;
1405 
1406 		if (l3_proto == htons(ETH_P_IP)) {
1407 			struct iphdr *ip4h = ip_hdr(skb);
1408 
1409 			/* Calculate IPv4 checksum and L4 checksum */
1410 			ip_hdr_len = ip4h->ihl;
1411 			l4_proto = ip4h->protocol;
1412 		} else if (l3_proto == htons(ETH_P_IPV6)) {
1413 			struct ipv6hdr *ip6h = ipv6_hdr(skb);
1414 
1415 			/* Read l4_protocol from one of IPv6 extra headers */
1416 			if (skb_network_header_len(skb) > 0)
1417 				ip_hdr_len = (skb_network_header_len(skb) >> 2);
1418 			l4_proto = ip6h->nexthdr;
1419 		} else
1420 			return MVNETA_TX_L4_CSUM_NOT;
1421 
1422 		return mvneta_txq_desc_csum(skb_network_offset(skb),
1423 					    l3_proto, ip_hdr_len, l4_proto);
1424 	}
1425 
1426 	return MVNETA_TX_L4_CSUM_NOT;
1427 }
1428 
1429 /* Returns rx queue pointer (find last set bit) according to causeRxTx
1430  * value
1431  */
1432 static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
1433 						u32 cause)
1434 {
1435 	int queue = fls(cause >> 8) - 1;
1436 
1437 	return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
1438 }
1439 
1440 /* Drop packets received by the RXQ and free buffers */
1441 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1442 				 struct mvneta_rx_queue *rxq)
1443 {
1444 	int rx_done, i;
1445 
1446 	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1447 	for (i = 0; i < rxq->size; i++) {
1448 		struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1449 		void *data = (void *)rx_desc->buf_cookie;
1450 
1451 		mvneta_frag_free(pp, data);
1452 		dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1453 				 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1454 	}
1455 
1456 	if (rx_done)
1457 		mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1458 }
1459 
1460 /* Main rx processing */
1461 static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1462 		     struct mvneta_rx_queue *rxq)
1463 {
1464 	struct net_device *dev = pp->dev;
1465 	int rx_done;
1466 	u32 rcvd_pkts = 0;
1467 	u32 rcvd_bytes = 0;
1468 
1469 	/* Get number of received packets */
1470 	rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1471 
1472 	if (rx_todo > rx_done)
1473 		rx_todo = rx_done;
1474 
1475 	rx_done = 0;
1476 
1477 	/* Fairness NAPI loop */
1478 	while (rx_done < rx_todo) {
1479 		struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1480 		struct sk_buff *skb;
1481 		unsigned char *data;
1482 		u32 rx_status;
1483 		int rx_bytes, err;
1484 
1485 		rx_done++;
1486 		rx_status = rx_desc->status;
1487 		rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
1488 		data = (unsigned char *)rx_desc->buf_cookie;
1489 
1490 		if (!mvneta_rxq_desc_is_first_last(rx_status) ||
1491 		    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1492 		err_drop_frame:
1493 			dev->stats.rx_errors++;
1494 			mvneta_rx_error(pp, rx_desc);
1495 			/* leave the descriptor untouched */
1496 			continue;
1497 		}
1498 
1499 		if (rx_bytes <= rx_copybreak) {
1500 			/* better copy a small frame and not unmap the DMA region */
1501 			skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
1502 			if (unlikely(!skb))
1503 				goto err_drop_frame;
1504 
1505 			dma_sync_single_range_for_cpu(dev->dev.parent,
1506 			                              rx_desc->buf_phys_addr,
1507 			                              MVNETA_MH_SIZE + NET_SKB_PAD,
1508 			                              rx_bytes,
1509 			                              DMA_FROM_DEVICE);
1510 			memcpy(skb_put(skb, rx_bytes),
1511 			       data + MVNETA_MH_SIZE + NET_SKB_PAD,
1512 			       rx_bytes);
1513 
1514 			skb->protocol = eth_type_trans(skb, dev);
1515 			mvneta_rx_csum(pp, rx_status, skb);
1516 			napi_gro_receive(&pp->napi, skb);
1517 
1518 			rcvd_pkts++;
1519 			rcvd_bytes += rx_bytes;
1520 
1521 			/* leave the descriptor and buffer untouched */
1522 			continue;
1523 		}
1524 
1525 		/* Refill processing */
1526 		err = mvneta_rx_refill(pp, rx_desc);
1527 		if (err) {
1528 			netdev_err(dev, "Linux processing - Can't refill\n");
1529 			rxq->missed++;
1530 			goto err_drop_frame;
1531 		}
1532 
1533 		skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
1534 		if (!skb)
1535 			goto err_drop_frame;
1536 
1537 		dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr,
1538 				 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1539 
1540 		rcvd_pkts++;
1541 		rcvd_bytes += rx_bytes;
1542 
1543 		/* Linux processing */
1544 		skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
1545 		skb_put(skb, rx_bytes);
1546 
1547 		skb->protocol = eth_type_trans(skb, dev);
1548 
1549 		mvneta_rx_csum(pp, rx_status, skb);
1550 
1551 		napi_gro_receive(&pp->napi, skb);
1552 	}
1553 
1554 	if (rcvd_pkts) {
1555 		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1556 
1557 		u64_stats_update_begin(&stats->syncp);
1558 		stats->rx_packets += rcvd_pkts;
1559 		stats->rx_bytes   += rcvd_bytes;
1560 		u64_stats_update_end(&stats->syncp);
1561 	}
1562 
1563 	/* Update rxq management counters */
1564 	mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1565 
1566 	return rx_done;
1567 }
1568 
1569 static inline void
1570 mvneta_tso_put_hdr(struct sk_buff *skb,
1571 		   struct mvneta_port *pp, struct mvneta_tx_queue *txq)
1572 {
1573 	struct mvneta_tx_desc *tx_desc;
1574 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1575 
1576 	txq->tx_skb[txq->txq_put_index] = NULL;
1577 	tx_desc = mvneta_txq_next_desc_get(txq);
1578 	tx_desc->data_size = hdr_len;
1579 	tx_desc->command = mvneta_skb_tx_csum(pp, skb);
1580 	tx_desc->command |= MVNETA_TXD_F_DESC;
1581 	tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
1582 				 txq->txq_put_index * TSO_HEADER_SIZE;
1583 	mvneta_txq_inc_put(txq);
1584 }
1585 
1586 static inline int
1587 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
1588 		    struct sk_buff *skb, char *data, int size,
1589 		    bool last_tcp, bool is_last)
1590 {
1591 	struct mvneta_tx_desc *tx_desc;
1592 
1593 	tx_desc = mvneta_txq_next_desc_get(txq);
1594 	tx_desc->data_size = size;
1595 	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
1596 						size, DMA_TO_DEVICE);
1597 	if (unlikely(dma_mapping_error(dev->dev.parent,
1598 		     tx_desc->buf_phys_addr))) {
1599 		mvneta_txq_desc_put(txq);
1600 		return -ENOMEM;
1601 	}
1602 
1603 	tx_desc->command = 0;
1604 	txq->tx_skb[txq->txq_put_index] = NULL;
1605 
1606 	if (last_tcp) {
1607 		/* last descriptor in the TCP packet */
1608 		tx_desc->command = MVNETA_TXD_L_DESC;
1609 
1610 		/* last descriptor in SKB */
1611 		if (is_last)
1612 			txq->tx_skb[txq->txq_put_index] = skb;
1613 	}
1614 	mvneta_txq_inc_put(txq);
1615 	return 0;
1616 }
1617 
1618 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
1619 			 struct mvneta_tx_queue *txq)
1620 {
1621 	int total_len, data_left;
1622 	int desc_count = 0;
1623 	struct mvneta_port *pp = netdev_priv(dev);
1624 	struct tso_t tso;
1625 	int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1626 	int i;
1627 
1628 	/* Count needed descriptors */
1629 	if ((txq->count + tso_count_descs(skb)) >= txq->size)
1630 		return 0;
1631 
1632 	if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
1633 		pr_info("*** Is this even  possible???!?!?\n");
1634 		return 0;
1635 	}
1636 
1637 	/* Initialize the TSO handler, and prepare the first payload */
1638 	tso_start(skb, &tso);
1639 
1640 	total_len = skb->len - hdr_len;
1641 	while (total_len > 0) {
1642 		char *hdr;
1643 
1644 		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1645 		total_len -= data_left;
1646 		desc_count++;
1647 
1648 		/* prepare packet headers: MAC + IP + TCP */
1649 		hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
1650 		tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1651 
1652 		mvneta_tso_put_hdr(skb, pp, txq);
1653 
1654 		while (data_left > 0) {
1655 			int size;
1656 			desc_count++;
1657 
1658 			size = min_t(int, tso.size, data_left);
1659 
1660 			if (mvneta_tso_put_data(dev, txq, skb,
1661 						 tso.data, size,
1662 						 size == data_left,
1663 						 total_len == 0))
1664 				goto err_release;
1665 			data_left -= size;
1666 
1667 			tso_build_data(skb, &tso, size);
1668 		}
1669 	}
1670 
1671 	return desc_count;
1672 
1673 err_release:
1674 	/* Release all used data descriptors; header descriptors must not
1675 	 * be DMA-unmapped.
1676 	 */
1677 	for (i = desc_count - 1; i >= 0; i--) {
1678 		struct mvneta_tx_desc *tx_desc = txq->descs + i;
1679 		if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1680 			dma_unmap_single(pp->dev->dev.parent,
1681 					 tx_desc->buf_phys_addr,
1682 					 tx_desc->data_size,
1683 					 DMA_TO_DEVICE);
1684 		mvneta_txq_desc_put(txq);
1685 	}
1686 	return 0;
1687 }
1688 
1689 /* Handle tx fragmentation processing */
1690 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1691 				  struct mvneta_tx_queue *txq)
1692 {
1693 	struct mvneta_tx_desc *tx_desc;
1694 	int i, nr_frags = skb_shinfo(skb)->nr_frags;
1695 
1696 	for (i = 0; i < nr_frags; i++) {
1697 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1698 		void *addr = page_address(frag->page.p) + frag->page_offset;
1699 
1700 		tx_desc = mvneta_txq_next_desc_get(txq);
1701 		tx_desc->data_size = frag->size;
1702 
1703 		tx_desc->buf_phys_addr =
1704 			dma_map_single(pp->dev->dev.parent, addr,
1705 				       tx_desc->data_size, DMA_TO_DEVICE);
1706 
1707 		if (dma_mapping_error(pp->dev->dev.parent,
1708 				      tx_desc->buf_phys_addr)) {
1709 			mvneta_txq_desc_put(txq);
1710 			goto error;
1711 		}
1712 
1713 		if (i == nr_frags - 1) {
1714 			/* Last descriptor */
1715 			tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
1716 			txq->tx_skb[txq->txq_put_index] = skb;
1717 		} else {
1718 			/* Descriptor in the middle: Not First, Not Last */
1719 			tx_desc->command = 0;
1720 			txq->tx_skb[txq->txq_put_index] = NULL;
1721 		}
1722 		mvneta_txq_inc_put(txq);
1723 	}
1724 
1725 	return 0;
1726 
1727 error:
1728 	/* Release all descriptors that were used to map fragments of
1729 	 * this packet, as well as the corresponding DMA mappings
1730 	 */
1731 	for (i = i - 1; i >= 0; i--) {
1732 		tx_desc = txq->descs + i;
1733 		dma_unmap_single(pp->dev->dev.parent,
1734 				 tx_desc->buf_phys_addr,
1735 				 tx_desc->data_size,
1736 				 DMA_TO_DEVICE);
1737 		mvneta_txq_desc_put(txq);
1738 	}
1739 
1740 	return -ENOMEM;
1741 }
1742 
1743 /* Main tx processing */
1744 static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1745 {
1746 	struct mvneta_port *pp = netdev_priv(dev);
1747 	u16 txq_id = skb_get_queue_mapping(skb);
1748 	struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
1749 	struct mvneta_tx_desc *tx_desc;
1750 	int len = skb->len;
1751 	int frags = 0;
1752 	u32 tx_cmd;
1753 
1754 	if (!netif_running(dev))
1755 		goto out;
1756 
1757 	if (skb_is_gso(skb)) {
1758 		frags = mvneta_tx_tso(skb, dev, txq);
1759 		goto out;
1760 	}
1761 
1762 	frags = skb_shinfo(skb)->nr_frags + 1;
1763 
1764 	/* Get a descriptor for the first part of the packet */
1765 	tx_desc = mvneta_txq_next_desc_get(txq);
1766 
1767 	tx_cmd = mvneta_skb_tx_csum(pp, skb);
1768 
1769 	tx_desc->data_size = skb_headlen(skb);
1770 
1771 	tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
1772 						tx_desc->data_size,
1773 						DMA_TO_DEVICE);
1774 	if (unlikely(dma_mapping_error(dev->dev.parent,
1775 				       tx_desc->buf_phys_addr))) {
1776 		mvneta_txq_desc_put(txq);
1777 		frags = 0;
1778 		goto out;
1779 	}
1780 
1781 	if (frags == 1) {
1782 		/* First and Last descriptor */
1783 		tx_cmd |= MVNETA_TXD_FLZ_DESC;
1784 		tx_desc->command = tx_cmd;
1785 		txq->tx_skb[txq->txq_put_index] = skb;
1786 		mvneta_txq_inc_put(txq);
1787 	} else {
1788 		/* First but not Last */
1789 		tx_cmd |= MVNETA_TXD_F_DESC;
1790 		txq->tx_skb[txq->txq_put_index] = NULL;
1791 		mvneta_txq_inc_put(txq);
1792 		tx_desc->command = tx_cmd;
1793 		/* Continue with other skb fragments */
1794 		if (mvneta_tx_frag_process(pp, skb, txq)) {
1795 			dma_unmap_single(dev->dev.parent,
1796 					 tx_desc->buf_phys_addr,
1797 					 tx_desc->data_size,
1798 					 DMA_TO_DEVICE);
1799 			mvneta_txq_desc_put(txq);
1800 			frags = 0;
1801 			goto out;
1802 		}
1803 	}
1804 
1805 out:
1806 	if (frags > 0) {
1807 		struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1808 		struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
1809 
1810 		txq->count += frags;
1811 		mvneta_txq_pend_desc_add(pp, txq, frags);
1812 
1813 		if (txq->count >= txq->tx_stop_threshold)
1814 			netif_tx_stop_queue(nq);
1815 
1816 		u64_stats_update_begin(&stats->syncp);
1817 		stats->tx_packets++;
1818 		stats->tx_bytes  += len;
1819 		u64_stats_update_end(&stats->syncp);
1820 	} else {
1821 		dev->stats.tx_dropped++;
1822 		dev_kfree_skb_any(skb);
1823 	}
1824 
1825 	return NETDEV_TX_OK;
1826 }
1827 
1828 
1829 /* Free tx resources, when resetting a port */
1830 static void mvneta_txq_done_force(struct mvneta_port *pp,
1831 				  struct mvneta_tx_queue *txq)
1832 
1833 {
1834 	int tx_done = txq->count;
1835 
1836 	mvneta_txq_bufs_free(pp, txq, tx_done);
1837 
1838 	/* reset txq */
1839 	txq->count = 0;
1840 	txq->txq_put_index = 0;
1841 	txq->txq_get_index = 0;
1842 }
1843 
1844 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
1845  * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
1846  */
1847 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
1848 {
1849 	struct mvneta_tx_queue *txq;
1850 	struct netdev_queue *nq;
1851 
1852 	while (cause_tx_done) {
1853 		txq = mvneta_tx_done_policy(pp, cause_tx_done);
1854 
1855 		nq = netdev_get_tx_queue(pp->dev, txq->id);
1856 		__netif_tx_lock(nq, smp_processor_id());
1857 
1858 		if (txq->count)
1859 			mvneta_txq_done(pp, txq);
1860 
1861 		__netif_tx_unlock(nq);
1862 		cause_tx_done &= ~((1 << txq->id));
1863 	}
1864 }
1865 
1866 /* Compute crc8 of the specified address, using a unique algorithm ,
1867  * according to hw spec, different than generic crc8 algorithm
1868  */
1869 static int mvneta_addr_crc(unsigned char *addr)
1870 {
1871 	int crc = 0;
1872 	int i;
1873 
1874 	for (i = 0; i < ETH_ALEN; i++) {
1875 		int j;
1876 
1877 		crc = (crc ^ addr[i]) << 8;
1878 		for (j = 7; j >= 0; j--) {
1879 			if (crc & (0x100 << j))
1880 				crc ^= 0x107 << j;
1881 		}
1882 	}
1883 
1884 	return crc;
1885 }
1886 
1887 /* This method controls the net device special MAC multicast support.
1888  * The Special Multicast Table for MAC addresses supports MAC of the form
1889  * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1890  * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1891  * Table entries in the DA-Filter table. This method set the Special
1892  * Multicast Table appropriate entry.
1893  */
1894 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
1895 					  unsigned char last_byte,
1896 					  int queue)
1897 {
1898 	unsigned int smc_table_reg;
1899 	unsigned int tbl_offset;
1900 	unsigned int reg_offset;
1901 
1902 	/* Register offset from SMC table base    */
1903 	tbl_offset = (last_byte / 4);
1904 	/* Entry offset within the above reg */
1905 	reg_offset = last_byte % 4;
1906 
1907 	smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
1908 					+ tbl_offset * 4));
1909 
1910 	if (queue == -1)
1911 		smc_table_reg &= ~(0xff << (8 * reg_offset));
1912 	else {
1913 		smc_table_reg &= ~(0xff << (8 * reg_offset));
1914 		smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1915 	}
1916 
1917 	mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
1918 		    smc_table_reg);
1919 }
1920 
1921 /* This method controls the network device Other MAC multicast support.
1922  * The Other Multicast Table is used for multicast of another type.
1923  * A CRC-8 is used as an index to the Other Multicast Table entries
1924  * in the DA-Filter table.
1925  * The method gets the CRC-8 value from the calling routine and
1926  * sets the Other Multicast Table appropriate entry according to the
1927  * specified CRC-8 .
1928  */
1929 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
1930 					unsigned char crc8,
1931 					int queue)
1932 {
1933 	unsigned int omc_table_reg;
1934 	unsigned int tbl_offset;
1935 	unsigned int reg_offset;
1936 
1937 	tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
1938 	reg_offset = crc8 % 4;	     /* Entry offset within the above reg   */
1939 
1940 	omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
1941 
1942 	if (queue == -1) {
1943 		/* Clear accepts frame bit at specified Other DA table entry */
1944 		omc_table_reg &= ~(0xff << (8 * reg_offset));
1945 	} else {
1946 		omc_table_reg &= ~(0xff << (8 * reg_offset));
1947 		omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1948 	}
1949 
1950 	mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
1951 }
1952 
1953 /* The network device supports multicast using two tables:
1954  *    1) Special Multicast Table for MAC addresses of the form
1955  *       0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1956  *       The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1957  *       Table entries in the DA-Filter table.
1958  *    2) Other Multicast Table for multicast of another type. A CRC-8 value
1959  *       is used as an index to the Other Multicast Table entries in the
1960  *       DA-Filter table.
1961  */
1962 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
1963 				 int queue)
1964 {
1965 	unsigned char crc_result = 0;
1966 
1967 	if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
1968 		mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
1969 		return 0;
1970 	}
1971 
1972 	crc_result = mvneta_addr_crc(p_addr);
1973 	if (queue == -1) {
1974 		if (pp->mcast_count[crc_result] == 0) {
1975 			netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
1976 				    crc_result);
1977 			return -EINVAL;
1978 		}
1979 
1980 		pp->mcast_count[crc_result]--;
1981 		if (pp->mcast_count[crc_result] != 0) {
1982 			netdev_info(pp->dev,
1983 				    "After delete there are %d valid Mcast for crc8=0x%02x\n",
1984 				    pp->mcast_count[crc_result], crc_result);
1985 			return -EINVAL;
1986 		}
1987 	} else
1988 		pp->mcast_count[crc_result]++;
1989 
1990 	mvneta_set_other_mcast_addr(pp, crc_result, queue);
1991 
1992 	return 0;
1993 }
1994 
1995 /* Configure Fitering mode of Ethernet port */
1996 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
1997 					  int is_promisc)
1998 {
1999 	u32 port_cfg_reg, val;
2000 
2001 	port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
2002 
2003 	val = mvreg_read(pp, MVNETA_TYPE_PRIO);
2004 
2005 	/* Set / Clear UPM bit in port configuration register */
2006 	if (is_promisc) {
2007 		/* Accept all Unicast addresses */
2008 		port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
2009 		val |= MVNETA_FORCE_UNI;
2010 		mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
2011 		mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
2012 	} else {
2013 		/* Reject all Unicast addresses */
2014 		port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
2015 		val &= ~MVNETA_FORCE_UNI;
2016 	}
2017 
2018 	mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
2019 	mvreg_write(pp, MVNETA_TYPE_PRIO, val);
2020 }
2021 
2022 /* register unicast and multicast addresses */
2023 static void mvneta_set_rx_mode(struct net_device *dev)
2024 {
2025 	struct mvneta_port *pp = netdev_priv(dev);
2026 	struct netdev_hw_addr *ha;
2027 
2028 	if (dev->flags & IFF_PROMISC) {
2029 		/* Accept all: Multicast + Unicast */
2030 		mvneta_rx_unicast_promisc_set(pp, 1);
2031 		mvneta_set_ucast_table(pp, rxq_def);
2032 		mvneta_set_special_mcast_table(pp, rxq_def);
2033 		mvneta_set_other_mcast_table(pp, rxq_def);
2034 	} else {
2035 		/* Accept single Unicast */
2036 		mvneta_rx_unicast_promisc_set(pp, 0);
2037 		mvneta_set_ucast_table(pp, -1);
2038 		mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
2039 
2040 		if (dev->flags & IFF_ALLMULTI) {
2041 			/* Accept all multicast */
2042 			mvneta_set_special_mcast_table(pp, rxq_def);
2043 			mvneta_set_other_mcast_table(pp, rxq_def);
2044 		} else {
2045 			/* Accept only initialized multicast */
2046 			mvneta_set_special_mcast_table(pp, -1);
2047 			mvneta_set_other_mcast_table(pp, -1);
2048 
2049 			if (!netdev_mc_empty(dev)) {
2050 				netdev_for_each_mc_addr(ha, dev) {
2051 					mvneta_mcast_addr_set(pp, ha->addr,
2052 							      rxq_def);
2053 				}
2054 			}
2055 		}
2056 	}
2057 }
2058 
2059 /* Interrupt handling - the callback for request_irq() */
2060 static irqreturn_t mvneta_isr(int irq, void *dev_id)
2061 {
2062 	struct mvneta_port *pp = (struct mvneta_port *)dev_id;
2063 
2064 	/* Mask all interrupts */
2065 	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2066 
2067 	napi_schedule(&pp->napi);
2068 
2069 	return IRQ_HANDLED;
2070 }
2071 
2072 static int mvneta_fixed_link_update(struct mvneta_port *pp,
2073 				    struct phy_device *phy)
2074 {
2075 	struct fixed_phy_status status;
2076 	struct fixed_phy_status changed = {};
2077 	u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
2078 
2079 	status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
2080 	if (gmac_stat & MVNETA_GMAC_SPEED_1000)
2081 		status.speed = SPEED_1000;
2082 	else if (gmac_stat & MVNETA_GMAC_SPEED_100)
2083 		status.speed = SPEED_100;
2084 	else
2085 		status.speed = SPEED_10;
2086 	status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
2087 	changed.link = 1;
2088 	changed.speed = 1;
2089 	changed.duplex = 1;
2090 	fixed_phy_update_state(phy, &status, &changed);
2091 	return 0;
2092 }
2093 
2094 /* NAPI handler
2095  * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2096  * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2097  * Bits 8 -15 of the cause Rx Tx register indicate that are received
2098  * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2099  * Each CPU has its own causeRxTx register
2100  */
2101 static int mvneta_poll(struct napi_struct *napi, int budget)
2102 {
2103 	int rx_done = 0;
2104 	u32 cause_rx_tx;
2105 	unsigned long flags;
2106 	struct mvneta_port *pp = netdev_priv(napi->dev);
2107 
2108 	if (!netif_running(pp->dev)) {
2109 		napi_complete(napi);
2110 		return rx_done;
2111 	}
2112 
2113 	/* Read cause register */
2114 	cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
2115 	if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
2116 		u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
2117 
2118 		mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2119 		if (pp->use_inband_status && (cause_misc &
2120 				(MVNETA_CAUSE_PHY_STATUS_CHANGE |
2121 				 MVNETA_CAUSE_LINK_CHANGE |
2122 				 MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
2123 			mvneta_fixed_link_update(pp, pp->phy_dev);
2124 		}
2125 	}
2126 
2127 	/* Release Tx descriptors */
2128 	if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
2129 		mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
2130 		cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
2131 	}
2132 
2133 	/* For the case where the last mvneta_poll did not process all
2134 	 * RX packets
2135 	 */
2136 	cause_rx_tx |= pp->cause_rx_tx;
2137 	if (rxq_number > 1) {
2138 		while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) {
2139 			int count;
2140 			struct mvneta_rx_queue *rxq;
2141 			/* get rx queue number from cause_rx_tx */
2142 			rxq = mvneta_rx_policy(pp, cause_rx_tx);
2143 			if (!rxq)
2144 				break;
2145 
2146 			/* process the packet in that rx queue */
2147 			count = mvneta_rx(pp, budget, rxq);
2148 			rx_done += count;
2149 			budget -= count;
2150 			if (budget > 0) {
2151 				/* set off the rx bit of the
2152 				 * corresponding bit in the cause rx
2153 				 * tx register, so that next iteration
2154 				 * will find the next rx queue where
2155 				 * packets are received on
2156 				 */
2157 				cause_rx_tx &= ~((1 << rxq->id) << 8);
2158 			}
2159 		}
2160 	} else {
2161 		rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
2162 		budget -= rx_done;
2163 	}
2164 
2165 	if (budget > 0) {
2166 		cause_rx_tx = 0;
2167 		napi_complete(napi);
2168 		local_irq_save(flags);
2169 		mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2170 			    MVNETA_RX_INTR_MASK(rxq_number) |
2171 			    MVNETA_TX_INTR_MASK(txq_number) |
2172 			    MVNETA_MISCINTR_INTR_MASK);
2173 		local_irq_restore(flags);
2174 	}
2175 
2176 	pp->cause_rx_tx = cause_rx_tx;
2177 	return rx_done;
2178 }
2179 
2180 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2181 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2182 			   int num)
2183 {
2184 	int i;
2185 
2186 	for (i = 0; i < num; i++) {
2187 		memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2188 		if (mvneta_rx_refill(pp, rxq->descs + i) != 0) {
2189 			netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs  filled\n",
2190 				__func__, rxq->id, i, num);
2191 			break;
2192 		}
2193 	}
2194 
2195 	/* Add this number of RX descriptors as non occupied (ready to
2196 	 * get packets)
2197 	 */
2198 	mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2199 
2200 	return i;
2201 }
2202 
2203 /* Free all packets pending transmit from all TXQs and reset TX port */
2204 static void mvneta_tx_reset(struct mvneta_port *pp)
2205 {
2206 	int queue;
2207 
2208 	/* free the skb's in the tx ring */
2209 	for (queue = 0; queue < txq_number; queue++)
2210 		mvneta_txq_done_force(pp, &pp->txqs[queue]);
2211 
2212 	mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2213 	mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2214 }
2215 
2216 static void mvneta_rx_reset(struct mvneta_port *pp)
2217 {
2218 	mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2219 	mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2220 }
2221 
2222 /* Rx/Tx queue initialization/cleanup methods */
2223 
2224 /* Create a specified RX queue */
2225 static int mvneta_rxq_init(struct mvneta_port *pp,
2226 			   struct mvneta_rx_queue *rxq)
2227 
2228 {
2229 	rxq->size = pp->rx_ring_size;
2230 
2231 	/* Allocate memory for RX descriptors */
2232 	rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2233 					rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2234 					&rxq->descs_phys, GFP_KERNEL);
2235 	if (rxq->descs == NULL)
2236 		return -ENOMEM;
2237 
2238 	BUG_ON(rxq->descs !=
2239 	       PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2240 
2241 	rxq->last_desc = rxq->size - 1;
2242 
2243 	/* Set Rx descriptors queue starting address */
2244 	mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2245 	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2246 
2247 	/* Set Offset */
2248 	mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
2249 
2250 	/* Set coalescing pkts and time */
2251 	mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2252 	mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2253 
2254 	/* Fill RXQ with buffers from RX pool */
2255 	mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
2256 	mvneta_rxq_bm_disable(pp, rxq);
2257 	mvneta_rxq_fill(pp, rxq, rxq->size);
2258 
2259 	return 0;
2260 }
2261 
2262 /* Cleanup Rx queue */
2263 static void mvneta_rxq_deinit(struct mvneta_port *pp,
2264 			      struct mvneta_rx_queue *rxq)
2265 {
2266 	mvneta_rxq_drop_pkts(pp, rxq);
2267 
2268 	if (rxq->descs)
2269 		dma_free_coherent(pp->dev->dev.parent,
2270 				  rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2271 				  rxq->descs,
2272 				  rxq->descs_phys);
2273 
2274 	rxq->descs             = NULL;
2275 	rxq->last_desc         = 0;
2276 	rxq->next_desc_to_proc = 0;
2277 	rxq->descs_phys        = 0;
2278 }
2279 
2280 /* Create and initialize a tx queue */
2281 static int mvneta_txq_init(struct mvneta_port *pp,
2282 			   struct mvneta_tx_queue *txq)
2283 {
2284 	txq->size = pp->tx_ring_size;
2285 
2286 	/* A queue must always have room for at least one skb.
2287 	 * Therefore, stop the queue when the free entries reaches
2288 	 * the maximum number of descriptors per skb.
2289 	 */
2290 	txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2291 	txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2292 
2293 
2294 	/* Allocate memory for TX descriptors */
2295 	txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2296 					txq->size * MVNETA_DESC_ALIGNED_SIZE,
2297 					&txq->descs_phys, GFP_KERNEL);
2298 	if (txq->descs == NULL)
2299 		return -ENOMEM;
2300 
2301 	/* Make sure descriptor address is cache line size aligned  */
2302 	BUG_ON(txq->descs !=
2303 	       PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2304 
2305 	txq->last_desc = txq->size - 1;
2306 
2307 	/* Set maximum bandwidth for enabled TXQs */
2308 	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2309 	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2310 
2311 	/* Set Tx descriptors queue starting address */
2312 	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2313 	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2314 
2315 	txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
2316 	if (txq->tx_skb == NULL) {
2317 		dma_free_coherent(pp->dev->dev.parent,
2318 				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
2319 				  txq->descs, txq->descs_phys);
2320 		return -ENOMEM;
2321 	}
2322 
2323 	/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2324 	txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
2325 					   txq->size * TSO_HEADER_SIZE,
2326 					   &txq->tso_hdrs_phys, GFP_KERNEL);
2327 	if (txq->tso_hdrs == NULL) {
2328 		kfree(txq->tx_skb);
2329 		dma_free_coherent(pp->dev->dev.parent,
2330 				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
2331 				  txq->descs, txq->descs_phys);
2332 		return -ENOMEM;
2333 	}
2334 	mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2335 
2336 	return 0;
2337 }
2338 
2339 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2340 static void mvneta_txq_deinit(struct mvneta_port *pp,
2341 			      struct mvneta_tx_queue *txq)
2342 {
2343 	kfree(txq->tx_skb);
2344 
2345 	if (txq->tso_hdrs)
2346 		dma_free_coherent(pp->dev->dev.parent,
2347 				  txq->size * TSO_HEADER_SIZE,
2348 				  txq->tso_hdrs, txq->tso_hdrs_phys);
2349 	if (txq->descs)
2350 		dma_free_coherent(pp->dev->dev.parent,
2351 				  txq->size * MVNETA_DESC_ALIGNED_SIZE,
2352 				  txq->descs, txq->descs_phys);
2353 
2354 	txq->descs             = NULL;
2355 	txq->last_desc         = 0;
2356 	txq->next_desc_to_proc = 0;
2357 	txq->descs_phys        = 0;
2358 
2359 	/* Set minimum bandwidth for disabled TXQs */
2360 	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2361 	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2362 
2363 	/* Set Tx descriptors queue starting address and size */
2364 	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2365 	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
2366 }
2367 
2368 /* Cleanup all Tx queues */
2369 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
2370 {
2371 	int queue;
2372 
2373 	for (queue = 0; queue < txq_number; queue++)
2374 		mvneta_txq_deinit(pp, &pp->txqs[queue]);
2375 }
2376 
2377 /* Cleanup all Rx queues */
2378 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
2379 {
2380 	int queue;
2381 
2382 	for (queue = 0; queue < rxq_number; queue++)
2383 		mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
2384 }
2385 
2386 
2387 /* Init all Rx queues */
2388 static int mvneta_setup_rxqs(struct mvneta_port *pp)
2389 {
2390 	int queue;
2391 
2392 	for (queue = 0; queue < rxq_number; queue++) {
2393 		int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
2394 		if (err) {
2395 			netdev_err(pp->dev, "%s: can't create rxq=%d\n",
2396 				   __func__, queue);
2397 			mvneta_cleanup_rxqs(pp);
2398 			return err;
2399 		}
2400 	}
2401 
2402 	return 0;
2403 }
2404 
2405 /* Init all tx queues */
2406 static int mvneta_setup_txqs(struct mvneta_port *pp)
2407 {
2408 	int queue;
2409 
2410 	for (queue = 0; queue < txq_number; queue++) {
2411 		int err = mvneta_txq_init(pp, &pp->txqs[queue]);
2412 		if (err) {
2413 			netdev_err(pp->dev, "%s: can't create txq=%d\n",
2414 				   __func__, queue);
2415 			mvneta_cleanup_txqs(pp);
2416 			return err;
2417 		}
2418 	}
2419 
2420 	return 0;
2421 }
2422 
2423 static void mvneta_start_dev(struct mvneta_port *pp)
2424 {
2425 	mvneta_max_rx_size_set(pp, pp->pkt_size);
2426 	mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
2427 
2428 	/* start the Rx/Tx activity */
2429 	mvneta_port_enable(pp);
2430 
2431 	/* Enable polling on the port */
2432 	napi_enable(&pp->napi);
2433 
2434 	/* Unmask interrupts */
2435 	mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2436 		    MVNETA_RX_INTR_MASK(rxq_number) |
2437 		    MVNETA_TX_INTR_MASK(txq_number) |
2438 		    MVNETA_MISCINTR_INTR_MASK);
2439 	mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2440 		    MVNETA_CAUSE_PHY_STATUS_CHANGE |
2441 		    MVNETA_CAUSE_LINK_CHANGE |
2442 		    MVNETA_CAUSE_PSC_SYNC_CHANGE);
2443 
2444 	phy_start(pp->phy_dev);
2445 	netif_tx_start_all_queues(pp->dev);
2446 }
2447 
2448 static void mvneta_stop_dev(struct mvneta_port *pp)
2449 {
2450 	phy_stop(pp->phy_dev);
2451 
2452 	napi_disable(&pp->napi);
2453 
2454 	netif_carrier_off(pp->dev);
2455 
2456 	mvneta_port_down(pp);
2457 	netif_tx_stop_all_queues(pp->dev);
2458 
2459 	/* Stop the port activity */
2460 	mvneta_port_disable(pp);
2461 
2462 	/* Clear all ethernet port interrupts */
2463 	mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2464 	mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2465 
2466 	/* Mask all ethernet port interrupts */
2467 	mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2468 	mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2469 	mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2470 
2471 	mvneta_tx_reset(pp);
2472 	mvneta_rx_reset(pp);
2473 }
2474 
2475 /* Return positive if MTU is valid */
2476 static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
2477 {
2478 	if (mtu < 68) {
2479 		netdev_err(dev, "cannot change mtu to less than 68\n");
2480 		return -EINVAL;
2481 	}
2482 
2483 	/* 9676 == 9700 - 20 and rounding to 8 */
2484 	if (mtu > 9676) {
2485 		netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
2486 		mtu = 9676;
2487 	}
2488 
2489 	if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
2490 		netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
2491 			mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
2492 		mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
2493 	}
2494 
2495 	return mtu;
2496 }
2497 
2498 /* Change the device mtu */
2499 static int mvneta_change_mtu(struct net_device *dev, int mtu)
2500 {
2501 	struct mvneta_port *pp = netdev_priv(dev);
2502 	int ret;
2503 
2504 	mtu = mvneta_check_mtu_valid(dev, mtu);
2505 	if (mtu < 0)
2506 		return -EINVAL;
2507 
2508 	dev->mtu = mtu;
2509 
2510 	if (!netif_running(dev)) {
2511 		netdev_update_features(dev);
2512 		return 0;
2513 	}
2514 
2515 	/* The interface is running, so we have to force a
2516 	 * reallocation of the queues
2517 	 */
2518 	mvneta_stop_dev(pp);
2519 
2520 	mvneta_cleanup_txqs(pp);
2521 	mvneta_cleanup_rxqs(pp);
2522 
2523 	pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
2524 	pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2525 	                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2526 
2527 	ret = mvneta_setup_rxqs(pp);
2528 	if (ret) {
2529 		netdev_err(dev, "unable to setup rxqs after MTU change\n");
2530 		return ret;
2531 	}
2532 
2533 	ret = mvneta_setup_txqs(pp);
2534 	if (ret) {
2535 		netdev_err(dev, "unable to setup txqs after MTU change\n");
2536 		return ret;
2537 	}
2538 
2539 	mvneta_start_dev(pp);
2540 	mvneta_port_up(pp);
2541 
2542 	netdev_update_features(dev);
2543 
2544 	return 0;
2545 }
2546 
2547 static netdev_features_t mvneta_fix_features(struct net_device *dev,
2548 					     netdev_features_t features)
2549 {
2550 	struct mvneta_port *pp = netdev_priv(dev);
2551 
2552 	if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
2553 		features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
2554 		netdev_info(dev,
2555 			    "Disable IP checksum for MTU greater than %dB\n",
2556 			    pp->tx_csum_limit);
2557 	}
2558 
2559 	return features;
2560 }
2561 
2562 /* Get mac address */
2563 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
2564 {
2565 	u32 mac_addr_l, mac_addr_h;
2566 
2567 	mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
2568 	mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
2569 	addr[0] = (mac_addr_h >> 24) & 0xFF;
2570 	addr[1] = (mac_addr_h >> 16) & 0xFF;
2571 	addr[2] = (mac_addr_h >> 8) & 0xFF;
2572 	addr[3] = mac_addr_h & 0xFF;
2573 	addr[4] = (mac_addr_l >> 8) & 0xFF;
2574 	addr[5] = mac_addr_l & 0xFF;
2575 }
2576 
2577 /* Handle setting mac address */
2578 static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2579 {
2580 	struct mvneta_port *pp = netdev_priv(dev);
2581 	struct sockaddr *sockaddr = addr;
2582 	int ret;
2583 
2584 	ret = eth_prepare_mac_addr_change(dev, addr);
2585 	if (ret < 0)
2586 		return ret;
2587 	/* Remove previous address table entry */
2588 	mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2589 
2590 	/* Set new addr in hw */
2591 	mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def);
2592 
2593 	eth_commit_mac_addr_change(dev, addr);
2594 	return 0;
2595 }
2596 
2597 static void mvneta_adjust_link(struct net_device *ndev)
2598 {
2599 	struct mvneta_port *pp = netdev_priv(ndev);
2600 	struct phy_device *phydev = pp->phy_dev;
2601 	int status_change = 0;
2602 
2603 	if (phydev->link) {
2604 		if ((pp->speed != phydev->speed) ||
2605 		    (pp->duplex != phydev->duplex)) {
2606 			u32 val;
2607 
2608 			val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2609 			val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2610 				 MVNETA_GMAC_CONFIG_GMII_SPEED |
2611 				 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
2612 
2613 			if (phydev->duplex)
2614 				val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
2615 
2616 			if (phydev->speed == SPEED_1000)
2617 				val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
2618 			else if (phydev->speed == SPEED_100)
2619 				val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2620 
2621 			mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2622 
2623 			pp->duplex = phydev->duplex;
2624 			pp->speed  = phydev->speed;
2625 		}
2626 	}
2627 
2628 	if (phydev->link != pp->link) {
2629 		if (!phydev->link) {
2630 			pp->duplex = -1;
2631 			pp->speed = 0;
2632 		}
2633 
2634 		pp->link = phydev->link;
2635 		status_change = 1;
2636 	}
2637 
2638 	if (status_change) {
2639 		if (phydev->link) {
2640 			if (!pp->use_inband_status) {
2641 				u32 val = mvreg_read(pp,
2642 						  MVNETA_GMAC_AUTONEG_CONFIG);
2643 				val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
2644 				val |= MVNETA_GMAC_FORCE_LINK_PASS;
2645 				mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
2646 					    val);
2647 			}
2648 			mvneta_port_up(pp);
2649 		} else {
2650 			if (!pp->use_inband_status) {
2651 				u32 val = mvreg_read(pp,
2652 						  MVNETA_GMAC_AUTONEG_CONFIG);
2653 				val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
2654 				val |= MVNETA_GMAC_FORCE_LINK_DOWN;
2655 				mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
2656 					    val);
2657 			}
2658 			mvneta_port_down(pp);
2659 		}
2660 		phy_print_status(phydev);
2661 	}
2662 }
2663 
2664 static int mvneta_mdio_probe(struct mvneta_port *pp)
2665 {
2666 	struct phy_device *phy_dev;
2667 
2668 	phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
2669 				 pp->phy_interface);
2670 	if (!phy_dev) {
2671 		netdev_err(pp->dev, "could not find the PHY\n");
2672 		return -ENODEV;
2673 	}
2674 
2675 	phy_dev->supported &= PHY_GBIT_FEATURES;
2676 	phy_dev->advertising = phy_dev->supported;
2677 
2678 	pp->phy_dev = phy_dev;
2679 	pp->link    = 0;
2680 	pp->duplex  = 0;
2681 	pp->speed   = 0;
2682 
2683 	return 0;
2684 }
2685 
2686 static void mvneta_mdio_remove(struct mvneta_port *pp)
2687 {
2688 	phy_disconnect(pp->phy_dev);
2689 	pp->phy_dev = NULL;
2690 }
2691 
2692 static int mvneta_open(struct net_device *dev)
2693 {
2694 	struct mvneta_port *pp = netdev_priv(dev);
2695 	int ret;
2696 
2697 	pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2698 	pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2699 	                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2700 
2701 	ret = mvneta_setup_rxqs(pp);
2702 	if (ret)
2703 		return ret;
2704 
2705 	ret = mvneta_setup_txqs(pp);
2706 	if (ret)
2707 		goto err_cleanup_rxqs;
2708 
2709 	/* Connect to port interrupt line */
2710 	ret = request_irq(pp->dev->irq, mvneta_isr, 0,
2711 			  MVNETA_DRIVER_NAME, pp);
2712 	if (ret) {
2713 		netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
2714 		goto err_cleanup_txqs;
2715 	}
2716 
2717 	/* In default link is down */
2718 	netif_carrier_off(pp->dev);
2719 
2720 	ret = mvneta_mdio_probe(pp);
2721 	if (ret < 0) {
2722 		netdev_err(dev, "cannot probe MDIO bus\n");
2723 		goto err_free_irq;
2724 	}
2725 
2726 	mvneta_start_dev(pp);
2727 
2728 	return 0;
2729 
2730 err_free_irq:
2731 	free_irq(pp->dev->irq, pp);
2732 err_cleanup_txqs:
2733 	mvneta_cleanup_txqs(pp);
2734 err_cleanup_rxqs:
2735 	mvneta_cleanup_rxqs(pp);
2736 	return ret;
2737 }
2738 
2739 /* Stop the port, free port interrupt line */
2740 static int mvneta_stop(struct net_device *dev)
2741 {
2742 	struct mvneta_port *pp = netdev_priv(dev);
2743 
2744 	mvneta_stop_dev(pp);
2745 	mvneta_mdio_remove(pp);
2746 	free_irq(dev->irq, pp);
2747 	mvneta_cleanup_rxqs(pp);
2748 	mvneta_cleanup_txqs(pp);
2749 
2750 	return 0;
2751 }
2752 
2753 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2754 {
2755 	struct mvneta_port *pp = netdev_priv(dev);
2756 
2757 	if (!pp->phy_dev)
2758 		return -ENOTSUPP;
2759 
2760 	return phy_mii_ioctl(pp->phy_dev, ifr, cmd);
2761 }
2762 
2763 /* Ethtool methods */
2764 
2765 /* Get settings (phy address, speed) for ethtools */
2766 int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2767 {
2768 	struct mvneta_port *pp = netdev_priv(dev);
2769 
2770 	if (!pp->phy_dev)
2771 		return -ENODEV;
2772 
2773 	return phy_ethtool_gset(pp->phy_dev, cmd);
2774 }
2775 
2776 /* Set settings (phy address, speed) for ethtools */
2777 int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2778 {
2779 	struct mvneta_port *pp = netdev_priv(dev);
2780 
2781 	if (!pp->phy_dev)
2782 		return -ENODEV;
2783 
2784 	return phy_ethtool_sset(pp->phy_dev, cmd);
2785 }
2786 
2787 /* Set interrupt coalescing for ethtools */
2788 static int mvneta_ethtool_set_coalesce(struct net_device *dev,
2789 				       struct ethtool_coalesce *c)
2790 {
2791 	struct mvneta_port *pp = netdev_priv(dev);
2792 	int queue;
2793 
2794 	for (queue = 0; queue < rxq_number; queue++) {
2795 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2796 		rxq->time_coal = c->rx_coalesce_usecs;
2797 		rxq->pkts_coal = c->rx_max_coalesced_frames;
2798 		mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2799 		mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2800 	}
2801 
2802 	for (queue = 0; queue < txq_number; queue++) {
2803 		struct mvneta_tx_queue *txq = &pp->txqs[queue];
2804 		txq->done_pkts_coal = c->tx_max_coalesced_frames;
2805 		mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2806 	}
2807 
2808 	return 0;
2809 }
2810 
2811 /* get coalescing for ethtools */
2812 static int mvneta_ethtool_get_coalesce(struct net_device *dev,
2813 				       struct ethtool_coalesce *c)
2814 {
2815 	struct mvneta_port *pp = netdev_priv(dev);
2816 
2817 	c->rx_coalesce_usecs        = pp->rxqs[0].time_coal;
2818 	c->rx_max_coalesced_frames  = pp->rxqs[0].pkts_coal;
2819 
2820 	c->tx_max_coalesced_frames =  pp->txqs[0].done_pkts_coal;
2821 	return 0;
2822 }
2823 
2824 
2825 static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
2826 				    struct ethtool_drvinfo *drvinfo)
2827 {
2828 	strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
2829 		sizeof(drvinfo->driver));
2830 	strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
2831 		sizeof(drvinfo->version));
2832 	strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
2833 		sizeof(drvinfo->bus_info));
2834 }
2835 
2836 
2837 static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
2838 					 struct ethtool_ringparam *ring)
2839 {
2840 	struct mvneta_port *pp = netdev_priv(netdev);
2841 
2842 	ring->rx_max_pending = MVNETA_MAX_RXD;
2843 	ring->tx_max_pending = MVNETA_MAX_TXD;
2844 	ring->rx_pending = pp->rx_ring_size;
2845 	ring->tx_pending = pp->tx_ring_size;
2846 }
2847 
2848 static int mvneta_ethtool_set_ringparam(struct net_device *dev,
2849 					struct ethtool_ringparam *ring)
2850 {
2851 	struct mvneta_port *pp = netdev_priv(dev);
2852 
2853 	if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
2854 		return -EINVAL;
2855 	pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
2856 		ring->rx_pending : MVNETA_MAX_RXD;
2857 
2858 	pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
2859 				   MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
2860 	if (pp->tx_ring_size != ring->tx_pending)
2861 		netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
2862 			    pp->tx_ring_size, ring->tx_pending);
2863 
2864 	if (netif_running(dev)) {
2865 		mvneta_stop(dev);
2866 		if (mvneta_open(dev)) {
2867 			netdev_err(dev,
2868 				   "error on opening device after ring param change\n");
2869 			return -ENOMEM;
2870 		}
2871 	}
2872 
2873 	return 0;
2874 }
2875 
2876 static const struct net_device_ops mvneta_netdev_ops = {
2877 	.ndo_open            = mvneta_open,
2878 	.ndo_stop            = mvneta_stop,
2879 	.ndo_start_xmit      = mvneta_tx,
2880 	.ndo_set_rx_mode     = mvneta_set_rx_mode,
2881 	.ndo_set_mac_address = mvneta_set_mac_addr,
2882 	.ndo_change_mtu      = mvneta_change_mtu,
2883 	.ndo_fix_features    = mvneta_fix_features,
2884 	.ndo_get_stats64     = mvneta_get_stats64,
2885 	.ndo_do_ioctl        = mvneta_ioctl,
2886 };
2887 
2888 const struct ethtool_ops mvneta_eth_tool_ops = {
2889 	.get_link       = ethtool_op_get_link,
2890 	.get_settings   = mvneta_ethtool_get_settings,
2891 	.set_settings   = mvneta_ethtool_set_settings,
2892 	.set_coalesce   = mvneta_ethtool_set_coalesce,
2893 	.get_coalesce   = mvneta_ethtool_get_coalesce,
2894 	.get_drvinfo    = mvneta_ethtool_get_drvinfo,
2895 	.get_ringparam  = mvneta_ethtool_get_ringparam,
2896 	.set_ringparam	= mvneta_ethtool_set_ringparam,
2897 };
2898 
2899 /* Initialize hw */
2900 static int mvneta_init(struct device *dev, struct mvneta_port *pp)
2901 {
2902 	int queue;
2903 
2904 	/* Disable port */
2905 	mvneta_port_disable(pp);
2906 
2907 	/* Set port default values */
2908 	mvneta_defaults_set(pp);
2909 
2910 	pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
2911 				GFP_KERNEL);
2912 	if (!pp->txqs)
2913 		return -ENOMEM;
2914 
2915 	/* Initialize TX descriptor rings */
2916 	for (queue = 0; queue < txq_number; queue++) {
2917 		struct mvneta_tx_queue *txq = &pp->txqs[queue];
2918 		txq->id = queue;
2919 		txq->size = pp->tx_ring_size;
2920 		txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
2921 	}
2922 
2923 	pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
2924 				GFP_KERNEL);
2925 	if (!pp->rxqs)
2926 		return -ENOMEM;
2927 
2928 	/* Create Rx descriptor rings */
2929 	for (queue = 0; queue < rxq_number; queue++) {
2930 		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2931 		rxq->id = queue;
2932 		rxq->size = pp->rx_ring_size;
2933 		rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
2934 		rxq->time_coal = MVNETA_RX_COAL_USEC;
2935 	}
2936 
2937 	return 0;
2938 }
2939 
2940 /* platform glue : initialize decoding windows */
2941 static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
2942 				     const struct mbus_dram_target_info *dram)
2943 {
2944 	u32 win_enable;
2945 	u32 win_protect;
2946 	int i;
2947 
2948 	for (i = 0; i < 6; i++) {
2949 		mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
2950 		mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
2951 
2952 		if (i < 4)
2953 			mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
2954 	}
2955 
2956 	win_enable = 0x3f;
2957 	win_protect = 0;
2958 
2959 	for (i = 0; i < dram->num_cs; i++) {
2960 		const struct mbus_dram_window *cs = dram->cs + i;
2961 		mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
2962 			    (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
2963 
2964 		mvreg_write(pp, MVNETA_WIN_SIZE(i),
2965 			    (cs->size - 1) & 0xffff0000);
2966 
2967 		win_enable &= ~(1 << i);
2968 		win_protect |= 3 << (2 * i);
2969 	}
2970 
2971 	mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
2972 }
2973 
2974 /* Power up the port */
2975 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
2976 {
2977 	u32 ctrl;
2978 
2979 	/* MAC Cause register should be cleared */
2980 	mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
2981 
2982 	ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
2983 
2984 	/* Even though it might look weird, when we're configured in
2985 	 * SGMII or QSGMII mode, the RGMII bit needs to be set.
2986 	 */
2987 	switch(phy_mode) {
2988 	case PHY_INTERFACE_MODE_QSGMII:
2989 		mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
2990 		ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
2991 		break;
2992 	case PHY_INTERFACE_MODE_SGMII:
2993 		mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
2994 		ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
2995 		break;
2996 	case PHY_INTERFACE_MODE_RGMII:
2997 	case PHY_INTERFACE_MODE_RGMII_ID:
2998 		ctrl |= MVNETA_GMAC2_PORT_RGMII;
2999 		break;
3000 	default:
3001 		return -EINVAL;
3002 	}
3003 
3004 	if (pp->use_inband_status)
3005 		ctrl |= MVNETA_GMAC2_INBAND_AN_ENABLE;
3006 
3007 	/* Cancel Port Reset */
3008 	ctrl &= ~MVNETA_GMAC2_PORT_RESET;
3009 	mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
3010 
3011 	while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
3012 		MVNETA_GMAC2_PORT_RESET) != 0)
3013 		continue;
3014 
3015 	return 0;
3016 }
3017 
3018 /* Device initialization routine */
3019 static int mvneta_probe(struct platform_device *pdev)
3020 {
3021 	const struct mbus_dram_target_info *dram_target_info;
3022 	struct resource *res;
3023 	struct device_node *dn = pdev->dev.of_node;
3024 	struct device_node *phy_node;
3025 	struct mvneta_port *pp;
3026 	struct net_device *dev;
3027 	const char *dt_mac_addr;
3028 	char hw_mac_addr[ETH_ALEN];
3029 	const char *mac_from;
3030 	int phy_mode;
3031 	int fixed_phy = 0;
3032 	int err;
3033 
3034 	/* Our multiqueue support is not complete, so for now, only
3035 	 * allow the usage of the first RX queue
3036 	 */
3037 	if (rxq_def != 0) {
3038 		dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
3039 		return -EINVAL;
3040 	}
3041 
3042 	dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
3043 	if (!dev)
3044 		return -ENOMEM;
3045 
3046 	dev->irq = irq_of_parse_and_map(dn, 0);
3047 	if (dev->irq == 0) {
3048 		err = -EINVAL;
3049 		goto err_free_netdev;
3050 	}
3051 
3052 	phy_node = of_parse_phandle(dn, "phy", 0);
3053 	if (!phy_node) {
3054 		if (!of_phy_is_fixed_link(dn)) {
3055 			dev_err(&pdev->dev, "no PHY specified\n");
3056 			err = -ENODEV;
3057 			goto err_free_irq;
3058 		}
3059 
3060 		err = of_phy_register_fixed_link(dn);
3061 		if (err < 0) {
3062 			dev_err(&pdev->dev, "cannot register fixed PHY\n");
3063 			goto err_free_irq;
3064 		}
3065 		fixed_phy = 1;
3066 
3067 		/* In the case of a fixed PHY, the DT node associated
3068 		 * to the PHY is the Ethernet MAC DT node.
3069 		 */
3070 		phy_node = of_node_get(dn);
3071 	}
3072 
3073 	phy_mode = of_get_phy_mode(dn);
3074 	if (phy_mode < 0) {
3075 		dev_err(&pdev->dev, "incorrect phy-mode\n");
3076 		err = -EINVAL;
3077 		goto err_put_phy_node;
3078 	}
3079 
3080 	dev->tx_queue_len = MVNETA_MAX_TXD;
3081 	dev->watchdog_timeo = 5 * HZ;
3082 	dev->netdev_ops = &mvneta_netdev_ops;
3083 
3084 	dev->ethtool_ops = &mvneta_eth_tool_ops;
3085 
3086 	pp = netdev_priv(dev);
3087 	pp->phy_node = phy_node;
3088 	pp->phy_interface = phy_mode;
3089 	pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) &&
3090 				fixed_phy;
3091 
3092 	pp->clk = devm_clk_get(&pdev->dev, NULL);
3093 	if (IS_ERR(pp->clk)) {
3094 		err = PTR_ERR(pp->clk);
3095 		goto err_put_phy_node;
3096 	}
3097 
3098 	clk_prepare_enable(pp->clk);
3099 
3100 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3101 	pp->base = devm_ioremap_resource(&pdev->dev, res);
3102 	if (IS_ERR(pp->base)) {
3103 		err = PTR_ERR(pp->base);
3104 		goto err_clk;
3105 	}
3106 
3107 	/* Alloc per-cpu stats */
3108 	pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
3109 	if (!pp->stats) {
3110 		err = -ENOMEM;
3111 		goto err_clk;
3112 	}
3113 
3114 	dt_mac_addr = of_get_mac_address(dn);
3115 	if (dt_mac_addr) {
3116 		mac_from = "device tree";
3117 		memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
3118 	} else {
3119 		mvneta_get_mac_addr(pp, hw_mac_addr);
3120 		if (is_valid_ether_addr(hw_mac_addr)) {
3121 			mac_from = "hardware";
3122 			memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
3123 		} else {
3124 			mac_from = "random";
3125 			eth_hw_addr_random(dev);
3126 		}
3127 	}
3128 
3129 	if (of_device_is_compatible(dn, "marvell,armada-370-neta"))
3130 		pp->tx_csum_limit = 1600;
3131 
3132 	pp->tx_ring_size = MVNETA_MAX_TXD;
3133 	pp->rx_ring_size = MVNETA_MAX_RXD;
3134 
3135 	pp->dev = dev;
3136 	SET_NETDEV_DEV(dev, &pdev->dev);
3137 
3138 	err = mvneta_init(&pdev->dev, pp);
3139 	if (err < 0)
3140 		goto err_free_stats;
3141 
3142 	err = mvneta_port_power_up(pp, phy_mode);
3143 	if (err < 0) {
3144 		dev_err(&pdev->dev, "can't power up port\n");
3145 		goto err_free_stats;
3146 	}
3147 
3148 	dram_target_info = mv_mbus_dram_info();
3149 	if (dram_target_info)
3150 		mvneta_conf_mbus_windows(pp, dram_target_info);
3151 
3152 	netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
3153 
3154 	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3155 	dev->hw_features |= dev->features;
3156 	dev->vlan_features |= dev->features;
3157 	dev->priv_flags |= IFF_UNICAST_FLT;
3158 	dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
3159 
3160 	err = register_netdev(dev);
3161 	if (err < 0) {
3162 		dev_err(&pdev->dev, "failed to register\n");
3163 		goto err_free_stats;
3164 	}
3165 
3166 	netdev_info(dev, "Using %s mac address %pM\n", mac_from,
3167 		    dev->dev_addr);
3168 
3169 	platform_set_drvdata(pdev, pp->dev);
3170 
3171 	if (pp->use_inband_status) {
3172 		struct phy_device *phy = of_phy_find_device(dn);
3173 
3174 		mvneta_fixed_link_update(pp, phy);
3175 	}
3176 
3177 	return 0;
3178 
3179 err_free_stats:
3180 	free_percpu(pp->stats);
3181 err_clk:
3182 	clk_disable_unprepare(pp->clk);
3183 err_put_phy_node:
3184 	of_node_put(phy_node);
3185 err_free_irq:
3186 	irq_dispose_mapping(dev->irq);
3187 err_free_netdev:
3188 	free_netdev(dev);
3189 	return err;
3190 }
3191 
3192 /* Device removal routine */
3193 static int mvneta_remove(struct platform_device *pdev)
3194 {
3195 	struct net_device  *dev = platform_get_drvdata(pdev);
3196 	struct mvneta_port *pp = netdev_priv(dev);
3197 
3198 	unregister_netdev(dev);
3199 	clk_disable_unprepare(pp->clk);
3200 	free_percpu(pp->stats);
3201 	irq_dispose_mapping(dev->irq);
3202 	of_node_put(pp->phy_node);
3203 	free_netdev(dev);
3204 
3205 	return 0;
3206 }
3207 
3208 static const struct of_device_id mvneta_match[] = {
3209 	{ .compatible = "marvell,armada-370-neta" },
3210 	{ .compatible = "marvell,armada-xp-neta" },
3211 	{ }
3212 };
3213 MODULE_DEVICE_TABLE(of, mvneta_match);
3214 
3215 static struct platform_driver mvneta_driver = {
3216 	.probe = mvneta_probe,
3217 	.remove = mvneta_remove,
3218 	.driver = {
3219 		.name = MVNETA_DRIVER_NAME,
3220 		.of_match_table = mvneta_match,
3221 	},
3222 };
3223 
3224 module_platform_driver(mvneta_driver);
3225 
3226 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
3227 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
3228 MODULE_LICENSE("GPL");
3229 
3230 module_param(rxq_number, int, S_IRUGO);
3231 module_param(txq_number, int, S_IRUGO);
3232 
3233 module_param(rxq_def, int, S_IRUGO);
3234 module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);
3235