12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
20977f817SJan Ceuleers /* drivers/net/ethernet/freescale/gianfar.c
3ec21e2ecSJeff Kirsher *
4ec21e2ecSJeff Kirsher * Gianfar Ethernet Driver
5ec21e2ecSJeff Kirsher * This driver is designed for the non-CPM ethernet controllers
6ec21e2ecSJeff Kirsher * on the 85xx and 83xx family of integrated processors
7ec21e2ecSJeff Kirsher * Based on 8260_io/fcc_enet.c
8ec21e2ecSJeff Kirsher *
9ec21e2ecSJeff Kirsher * Author: Andy Fleming
10ec21e2ecSJeff Kirsher * Maintainer: Kumar Gala
11ec21e2ecSJeff Kirsher * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12ec21e2ecSJeff Kirsher *
1320862788SClaudiu Manoil * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
14ec21e2ecSJeff Kirsher * Copyright 2007 MontaVista Software, Inc.
15ec21e2ecSJeff Kirsher *
16ec21e2ecSJeff Kirsher * Gianfar: AKA Lambda Draconis, "Dragon"
17ec21e2ecSJeff Kirsher * RA 11 31 24.2
18ec21e2ecSJeff Kirsher * Dec +69 19 52
19ec21e2ecSJeff Kirsher * V 3.84
20ec21e2ecSJeff Kirsher * B-V +1.62
21ec21e2ecSJeff Kirsher *
22ec21e2ecSJeff Kirsher * Theory of operation
23ec21e2ecSJeff Kirsher *
24ec21e2ecSJeff Kirsher * The driver is initialized through of_device. Configuration information
25ec21e2ecSJeff Kirsher * is therefore conveyed through an OF-style device tree.
26ec21e2ecSJeff Kirsher *
27ec21e2ecSJeff Kirsher * The Gianfar Ethernet Controller uses a ring of buffer
28ec21e2ecSJeff Kirsher * descriptors. The beginning is indicated by a register
29ec21e2ecSJeff Kirsher * pointing to the physical address of the start of the ring.
30ec21e2ecSJeff Kirsher * The end is determined by a "wrap" bit being set in the
31ec21e2ecSJeff Kirsher * last descriptor of the ring.
32ec21e2ecSJeff Kirsher *
33ec21e2ecSJeff Kirsher * When a packet is received, the RXF bit in the
34ec21e2ecSJeff Kirsher * IEVENT register is set, triggering an interrupt when the
35ec21e2ecSJeff Kirsher * corresponding bit in the IMASK register is also set (if
36ec21e2ecSJeff Kirsher * interrupt coalescing is active, then the interrupt may not
37ec21e2ecSJeff Kirsher * happen immediately, but will wait until either a set number
38ec21e2ecSJeff Kirsher * of frames or amount of time have passed). In NAPI, the
39ec21e2ecSJeff Kirsher * interrupt handler will signal there is work to be done, and
40ec21e2ecSJeff Kirsher * exit. This method will start at the last known empty
41ec21e2ecSJeff Kirsher * descriptor, and process every subsequent descriptor until there
42ec21e2ecSJeff Kirsher * are none left with data (NAPI will stop after a set number of
43ec21e2ecSJeff Kirsher * packets to give time to other tasks, but will eventually
44ec21e2ecSJeff Kirsher * process all the packets). The data arrives inside a
45ec21e2ecSJeff Kirsher * pre-allocated skb, and so after the skb is passed up to the
46ec21e2ecSJeff Kirsher * stack, a new skb must be allocated, and the address field in
47ec21e2ecSJeff Kirsher * the buffer descriptor must be updated to indicate this new
48ec21e2ecSJeff Kirsher * skb.
49ec21e2ecSJeff Kirsher *
50ec21e2ecSJeff Kirsher * When the kernel requests that a packet be transmitted, the
51ec21e2ecSJeff Kirsher * driver starts where it left off last time, and points the
52ec21e2ecSJeff Kirsher * descriptor at the buffer which was passed in. The driver
53ec21e2ecSJeff Kirsher * then informs the DMA engine that there are packets ready to
54ec21e2ecSJeff Kirsher * be transmitted. Once the controller is finished transmitting
55ec21e2ecSJeff Kirsher * the packet, an interrupt may be triggered (under the same
56ec21e2ecSJeff Kirsher * conditions as for reception, but depending on the TXF bit).
57ec21e2ecSJeff Kirsher * The driver then cleans up the buffer.
58ec21e2ecSJeff Kirsher */
59ec21e2ecSJeff Kirsher
60ec21e2ecSJeff Kirsher #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
61ec21e2ecSJeff Kirsher
62ec21e2ecSJeff Kirsher #include <linux/kernel.h>
63*3d40aed8SRob Herring #include <linux/platform_device.h>
64ec21e2ecSJeff Kirsher #include <linux/string.h>
65ec21e2ecSJeff Kirsher #include <linux/errno.h>
66ec21e2ecSJeff Kirsher #include <linux/unistd.h>
67ec21e2ecSJeff Kirsher #include <linux/slab.h>
68ec21e2ecSJeff Kirsher #include <linux/interrupt.h>
69ec21e2ecSJeff Kirsher #include <linux/delay.h>
70ec21e2ecSJeff Kirsher #include <linux/netdevice.h>
71ec21e2ecSJeff Kirsher #include <linux/etherdevice.h>
72ec21e2ecSJeff Kirsher #include <linux/skbuff.h>
73ec21e2ecSJeff Kirsher #include <linux/if_vlan.h>
74ec21e2ecSJeff Kirsher #include <linux/spinlock.h>
75ec21e2ecSJeff Kirsher #include <linux/mm.h>
765af50730SRob Herring #include <linux/of_address.h>
775af50730SRob Herring #include <linux/of_irq.h>
78ec21e2ecSJeff Kirsher #include <linux/of_mdio.h>
79ec21e2ecSJeff Kirsher #include <linux/ip.h>
80ec21e2ecSJeff Kirsher #include <linux/tcp.h>
81ec21e2ecSJeff Kirsher #include <linux/udp.h>
82ec21e2ecSJeff Kirsher #include <linux/in.h>
83ec21e2ecSJeff Kirsher #include <linux/net_tstamp.h>
84ec21e2ecSJeff Kirsher
85ec21e2ecSJeff Kirsher #include <asm/io.h>
86d6ef0bccSClaudiu Manoil #ifdef CONFIG_PPC
87ec21e2ecSJeff Kirsher #include <asm/reg.h>
882969b1f7SClaudiu Manoil #include <asm/mpc85xx.h>
89d6ef0bccSClaudiu Manoil #endif
90ec21e2ecSJeff Kirsher #include <asm/irq.h>
917c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
92ec21e2ecSJeff Kirsher #include <linux/module.h>
93ec21e2ecSJeff Kirsher #include <linux/dma-mapping.h>
94ec21e2ecSJeff Kirsher #include <linux/crc32.h>
95ec21e2ecSJeff Kirsher #include <linux/mii.h>
96ec21e2ecSJeff Kirsher #include <linux/phy.h>
97ec21e2ecSJeff Kirsher #include <linux/phy_fixed.h>
98ec21e2ecSJeff Kirsher #include <linux/of.h>
99ec21e2ecSJeff Kirsher #include <linux/of_net.h>
100ec21e2ecSJeff Kirsher
101ec21e2ecSJeff Kirsher #include "gianfar.h"
102ec21e2ecSJeff Kirsher
1038fcc6033SAbhimanyu #define TX_TIMEOUT (5*HZ)
104ec21e2ecSJeff Kirsher
105ec21e2ecSJeff Kirsher MODULE_AUTHOR("Freescale Semiconductor, Inc");
106ec21e2ecSJeff Kirsher MODULE_DESCRIPTION("Gianfar Ethernet Driver");
107ec21e2ecSJeff Kirsher MODULE_LICENSE("GPL");
108ec21e2ecSJeff Kirsher
gfar_init_rxbdp(struct gfar_priv_rx_q * rx_queue,struct rxbd8 * bdp,dma_addr_t buf)109ec21e2ecSJeff Kirsher static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
110ec21e2ecSJeff Kirsher dma_addr_t buf)
111ec21e2ecSJeff Kirsher {
112ec21e2ecSJeff Kirsher u32 lstatus;
113ec21e2ecSJeff Kirsher
114a7312d58SClaudiu Manoil bdp->bufPtr = cpu_to_be32(buf);
115ec21e2ecSJeff Kirsher
116ec21e2ecSJeff Kirsher lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
117ec21e2ecSJeff Kirsher if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
118ec21e2ecSJeff Kirsher lstatus |= BD_LFLAG(RXBD_WRAP);
119ec21e2ecSJeff Kirsher
120d55398baSClaudiu Manoil gfar_wmb();
121ec21e2ecSJeff Kirsher
122a7312d58SClaudiu Manoil bdp->lstatus = cpu_to_be32(lstatus);
123ec21e2ecSJeff Kirsher }
124ec21e2ecSJeff Kirsher
gfar_init_tx_rx_base(struct gfar_private * priv)125ec21e2ecSJeff Kirsher static void gfar_init_tx_rx_base(struct gfar_private *priv)
126ec21e2ecSJeff Kirsher {
127ec21e2ecSJeff Kirsher struct gfar __iomem *regs = priv->gfargrp[0].regs;
128ec21e2ecSJeff Kirsher u32 __iomem *baddr;
129ec21e2ecSJeff Kirsher int i;
130ec21e2ecSJeff Kirsher
131ec21e2ecSJeff Kirsher baddr = ®s->tbase0;
132ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_tx_queues; i++) {
133ec21e2ecSJeff Kirsher gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
134ec21e2ecSJeff Kirsher baddr += 2;
135ec21e2ecSJeff Kirsher }
136ec21e2ecSJeff Kirsher
137ec21e2ecSJeff Kirsher baddr = ®s->rbase0;
138ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_rx_queues; i++) {
139ec21e2ecSJeff Kirsher gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
140ec21e2ecSJeff Kirsher baddr += 2;
141ec21e2ecSJeff Kirsher }
142ec21e2ecSJeff Kirsher }
143ec21e2ecSJeff Kirsher
gfar_init_rqprm(struct gfar_private * priv)14445b679c9SMatei Pavaluca static void gfar_init_rqprm(struct gfar_private *priv)
14545b679c9SMatei Pavaluca {
14645b679c9SMatei Pavaluca struct gfar __iomem *regs = priv->gfargrp[0].regs;
14745b679c9SMatei Pavaluca u32 __iomem *baddr;
14845b679c9SMatei Pavaluca int i;
14945b679c9SMatei Pavaluca
15045b679c9SMatei Pavaluca baddr = ®s->rqprm0;
15145b679c9SMatei Pavaluca for (i = 0; i < priv->num_rx_queues; i++) {
15245b679c9SMatei Pavaluca gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
15345b679c9SMatei Pavaluca (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
15445b679c9SMatei Pavaluca baddr++;
15545b679c9SMatei Pavaluca }
15645b679c9SMatei Pavaluca }
15745b679c9SMatei Pavaluca
gfar_rx_offload_en(struct gfar_private * priv)15875354148SClaudiu Manoil static void gfar_rx_offload_en(struct gfar_private *priv)
15988302648SClaudiu Manoil {
16088302648SClaudiu Manoil /* set this when rx hw offload (TOE) functions are being used */
16188302648SClaudiu Manoil priv->uses_rxfcb = 0;
16288302648SClaudiu Manoil
16388302648SClaudiu Manoil if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
16488302648SClaudiu Manoil priv->uses_rxfcb = 1;
16588302648SClaudiu Manoil
16615bf176dSClaudiu Manoil if (priv->hwts_rx_en || priv->rx_filer_enable)
16788302648SClaudiu Manoil priv->uses_rxfcb = 1;
16888302648SClaudiu Manoil }
16988302648SClaudiu Manoil
gfar_mac_rx_config(struct gfar_private * priv)170a328ac92SClaudiu Manoil static void gfar_mac_rx_config(struct gfar_private *priv)
171ec21e2ecSJeff Kirsher {
172ec21e2ecSJeff Kirsher struct gfar __iomem *regs = priv->gfargrp[0].regs;
173ec21e2ecSJeff Kirsher u32 rctrl = 0;
174ec21e2ecSJeff Kirsher
175ec21e2ecSJeff Kirsher if (priv->rx_filer_enable) {
17615bf176dSClaudiu Manoil rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
177ec21e2ecSJeff Kirsher /* Program the RIR0 reg with the required distribution */
17871ff9e3dSClaudiu Manoil gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0);
179ec21e2ecSJeff Kirsher }
180ec21e2ecSJeff Kirsher
181f5ae6279SClaudiu Manoil /* Restore PROMISC mode */
182a328ac92SClaudiu Manoil if (priv->ndev->flags & IFF_PROMISC)
183f5ae6279SClaudiu Manoil rctrl |= RCTRL_PROM;
184f5ae6279SClaudiu Manoil
18588302648SClaudiu Manoil if (priv->ndev->features & NETIF_F_RXCSUM)
186ec21e2ecSJeff Kirsher rctrl |= RCTRL_CHECKSUMMING;
187ec21e2ecSJeff Kirsher
18888302648SClaudiu Manoil if (priv->extended_hash)
18988302648SClaudiu Manoil rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
190ec21e2ecSJeff Kirsher
191ec21e2ecSJeff Kirsher if (priv->padding) {
192ec21e2ecSJeff Kirsher rctrl &= ~RCTRL_PAL_MASK;
193ec21e2ecSJeff Kirsher rctrl |= RCTRL_PADDING(priv->padding);
194ec21e2ecSJeff Kirsher }
195ec21e2ecSJeff Kirsher
196ec21e2ecSJeff Kirsher /* Enable HW time stamping if requested from user space */
19788302648SClaudiu Manoil if (priv->hwts_rx_en)
198ec21e2ecSJeff Kirsher rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
199ec21e2ecSJeff Kirsher
20088302648SClaudiu Manoil if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
201ec21e2ecSJeff Kirsher rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
202ec21e2ecSJeff Kirsher
20345b679c9SMatei Pavaluca /* Clear the LFC bit */
20445b679c9SMatei Pavaluca gfar_write(®s->rctrl, rctrl);
20545b679c9SMatei Pavaluca /* Init flow control threshold values */
20645b679c9SMatei Pavaluca gfar_init_rqprm(priv);
20745b679c9SMatei Pavaluca gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL);
20845b679c9SMatei Pavaluca rctrl |= RCTRL_LFC;
20945b679c9SMatei Pavaluca
210ec21e2ecSJeff Kirsher /* Init rctrl based on our settings */
211ec21e2ecSJeff Kirsher gfar_write(®s->rctrl, rctrl);
212a328ac92SClaudiu Manoil }
213ec21e2ecSJeff Kirsher
gfar_mac_tx_config(struct gfar_private * priv)214a328ac92SClaudiu Manoil static void gfar_mac_tx_config(struct gfar_private *priv)
215a328ac92SClaudiu Manoil {
216a328ac92SClaudiu Manoil struct gfar __iomem *regs = priv->gfargrp[0].regs;
217a328ac92SClaudiu Manoil u32 tctrl = 0;
218a328ac92SClaudiu Manoil
219a328ac92SClaudiu Manoil if (priv->ndev->features & NETIF_F_IP_CSUM)
220ec21e2ecSJeff Kirsher tctrl |= TCTRL_INIT_CSUM;
221ec21e2ecSJeff Kirsher
222b98b8babSClaudiu Manoil if (priv->prio_sched_en)
223ec21e2ecSJeff Kirsher tctrl |= TCTRL_TXSCHED_PRIO;
224b98b8babSClaudiu Manoil else {
225b98b8babSClaudiu Manoil tctrl |= TCTRL_TXSCHED_WRRS;
226b98b8babSClaudiu Manoil gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT);
227b98b8babSClaudiu Manoil gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT);
228b98b8babSClaudiu Manoil }
229ec21e2ecSJeff Kirsher
23088302648SClaudiu Manoil if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
23188302648SClaudiu Manoil tctrl |= TCTRL_VLINS;
23288302648SClaudiu Manoil
233ec21e2ecSJeff Kirsher gfar_write(®s->tctrl, tctrl);
234ec21e2ecSJeff Kirsher }
235ec21e2ecSJeff Kirsher
gfar_configure_coalescing(struct gfar_private * priv,unsigned long tx_mask,unsigned long rx_mask)236f19015baSClaudiu Manoil static void gfar_configure_coalescing(struct gfar_private *priv,
237f19015baSClaudiu Manoil unsigned long tx_mask, unsigned long rx_mask)
238f19015baSClaudiu Manoil {
239f19015baSClaudiu Manoil struct gfar __iomem *regs = priv->gfargrp[0].regs;
240f19015baSClaudiu Manoil u32 __iomem *baddr;
241f19015baSClaudiu Manoil
242f19015baSClaudiu Manoil if (priv->mode == MQ_MG_MODE) {
243f19015baSClaudiu Manoil int i = 0;
244f19015baSClaudiu Manoil
245f19015baSClaudiu Manoil baddr = ®s->txic0;
246f19015baSClaudiu Manoil for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
247f19015baSClaudiu Manoil gfar_write(baddr + i, 0);
248f19015baSClaudiu Manoil if (likely(priv->tx_queue[i]->txcoalescing))
249f19015baSClaudiu Manoil gfar_write(baddr + i, priv->tx_queue[i]->txic);
250f19015baSClaudiu Manoil }
251f19015baSClaudiu Manoil
252f19015baSClaudiu Manoil baddr = ®s->rxic0;
253f19015baSClaudiu Manoil for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
254f19015baSClaudiu Manoil gfar_write(baddr + i, 0);
255f19015baSClaudiu Manoil if (likely(priv->rx_queue[i]->rxcoalescing))
256f19015baSClaudiu Manoil gfar_write(baddr + i, priv->rx_queue[i]->rxic);
257f19015baSClaudiu Manoil }
258f19015baSClaudiu Manoil } else {
259f19015baSClaudiu Manoil /* Backward compatible case -- even if we enable
260f19015baSClaudiu Manoil * multiple queues, there's only single reg to program
261f19015baSClaudiu Manoil */
262f19015baSClaudiu Manoil gfar_write(®s->txic, 0);
263f19015baSClaudiu Manoil if (likely(priv->tx_queue[0]->txcoalescing))
264f19015baSClaudiu Manoil gfar_write(®s->txic, priv->tx_queue[0]->txic);
265f19015baSClaudiu Manoil
266f19015baSClaudiu Manoil gfar_write(®s->rxic, 0);
267f19015baSClaudiu Manoil if (unlikely(priv->rx_queue[0]->rxcoalescing))
268f19015baSClaudiu Manoil gfar_write(®s->rxic, priv->rx_queue[0]->rxic);
269f19015baSClaudiu Manoil }
270f19015baSClaudiu Manoil }
271f19015baSClaudiu Manoil
gfar_configure_coalescing_all(struct gfar_private * priv)2727ad38784SArseny Solokha static void gfar_configure_coalescing_all(struct gfar_private *priv)
273f19015baSClaudiu Manoil {
274f19015baSClaudiu Manoil gfar_configure_coalescing(priv, 0xFF, 0xFF);
275f19015baSClaudiu Manoil }
276f19015baSClaudiu Manoil
gfar_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)277d59a24fdSEsben Haabendal static void gfar_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
278ec21e2ecSJeff Kirsher {
279ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(dev);
2803a2e16c8SJan Ceuleers int i;
281ec21e2ecSJeff Kirsher
282ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_rx_queues; i++) {
283d59a24fdSEsben Haabendal stats->rx_packets += priv->rx_queue[i]->stats.rx_packets;
284d59a24fdSEsben Haabendal stats->rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
285d59a24fdSEsben Haabendal stats->rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
286ec21e2ecSJeff Kirsher }
287ec21e2ecSJeff Kirsher
288ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_tx_queues; i++) {
289d59a24fdSEsben Haabendal stats->tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
290d59a24fdSEsben Haabendal stats->tx_packets += priv->tx_queue[i]->stats.tx_packets;
291ec21e2ecSJeff Kirsher }
29214870b75SEsben Haabendal
29314870b75SEsben Haabendal if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
29414870b75SEsben Haabendal struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon;
29514870b75SEsben Haabendal unsigned long flags;
29614870b75SEsben Haabendal u32 rdrp, car, car_before;
29714870b75SEsben Haabendal u64 rdrp_offset;
29814870b75SEsben Haabendal
29914870b75SEsben Haabendal spin_lock_irqsave(&priv->rmon_overflow.lock, flags);
30014870b75SEsben Haabendal car = gfar_read(&rmon->car1) & CAR1_C1RDR;
30114870b75SEsben Haabendal do {
30214870b75SEsben Haabendal car_before = car;
30314870b75SEsben Haabendal rdrp = gfar_read(&rmon->rdrp);
30414870b75SEsben Haabendal car = gfar_read(&rmon->car1) & CAR1_C1RDR;
30514870b75SEsben Haabendal } while (car != car_before);
30614870b75SEsben Haabendal if (car) {
30714870b75SEsben Haabendal priv->rmon_overflow.rdrp++;
30814870b75SEsben Haabendal gfar_write(&rmon->car1, car);
30914870b75SEsben Haabendal }
31014870b75SEsben Haabendal rdrp_offset = priv->rmon_overflow.rdrp;
31114870b75SEsben Haabendal spin_unlock_irqrestore(&priv->rmon_overflow.lock, flags);
31214870b75SEsben Haabendal
31314870b75SEsben Haabendal stats->rx_missed_errors = rdrp + (rdrp_offset << 16);
31414870b75SEsben Haabendal }
315ec21e2ecSJeff Kirsher }
316ec21e2ecSJeff Kirsher
3177d993c5fSArseny Solokha /* Set the appropriate hash bit for the given addr */
3187d993c5fSArseny Solokha /* The algorithm works like so:
3197d993c5fSArseny Solokha * 1) Take the Destination Address (ie the multicast address), and
3207d993c5fSArseny Solokha * do a CRC on it (little endian), and reverse the bits of the
3217d993c5fSArseny Solokha * result.
3227d993c5fSArseny Solokha * 2) Use the 8 most significant bits as a hash into a 256-entry
3237d993c5fSArseny Solokha * table. The table is controlled through 8 32-bit registers:
3247d993c5fSArseny Solokha * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3257d993c5fSArseny Solokha * gaddr7. This means that the 3 most significant bits in the
3267d993c5fSArseny Solokha * hash index which gaddr register to use, and the 5 other bits
3277d993c5fSArseny Solokha * indicate which bit (assuming an IBM numbering scheme, which
3287d993c5fSArseny Solokha * for PowerPC (tm) is usually the case) in the register holds
3297d993c5fSArseny Solokha * the entry.
3307d993c5fSArseny Solokha */
gfar_set_hash_for_addr(struct net_device * dev,u8 * addr)3317d993c5fSArseny Solokha static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3327d993c5fSArseny Solokha {
3337d993c5fSArseny Solokha u32 tempval;
3347d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(dev);
3357d993c5fSArseny Solokha u32 result = ether_crc(ETH_ALEN, addr);
3367d993c5fSArseny Solokha int width = priv->hash_width;
3377d993c5fSArseny Solokha u8 whichbit = (result >> (32 - width)) & 0x1f;
3387d993c5fSArseny Solokha u8 whichreg = result >> (32 - width + 5);
3397d993c5fSArseny Solokha u32 value = (1 << (31-whichbit));
3407d993c5fSArseny Solokha
3417d993c5fSArseny Solokha tempval = gfar_read(priv->hash_regs[whichreg]);
3427d993c5fSArseny Solokha tempval |= value;
3437d993c5fSArseny Solokha gfar_write(priv->hash_regs[whichreg], tempval);
3447d993c5fSArseny Solokha }
3457d993c5fSArseny Solokha
3467d993c5fSArseny Solokha /* There are multiple MAC Address register pairs on some controllers
3477d993c5fSArseny Solokha * This function sets the numth pair to a given address
3487d993c5fSArseny Solokha */
gfar_set_mac_for_addr(struct net_device * dev,int num,const u8 * addr)3497d993c5fSArseny Solokha static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3507d993c5fSArseny Solokha const u8 *addr)
3517d993c5fSArseny Solokha {
3527d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(dev);
3537d993c5fSArseny Solokha struct gfar __iomem *regs = priv->gfargrp[0].regs;
3547d993c5fSArseny Solokha u32 tempval;
3557d993c5fSArseny Solokha u32 __iomem *macptr = ®s->macstnaddr1;
3567d993c5fSArseny Solokha
3577d993c5fSArseny Solokha macptr += num*2;
3587d993c5fSArseny Solokha
3597d993c5fSArseny Solokha /* For a station address of 0x12345678ABCD in transmission
3607d993c5fSArseny Solokha * order (BE), MACnADDR1 is set to 0xCDAB7856 and
3617d993c5fSArseny Solokha * MACnADDR2 is set to 0x34120000.
3627d993c5fSArseny Solokha */
3637d993c5fSArseny Solokha tempval = (addr[5] << 24) | (addr[4] << 16) |
3647d993c5fSArseny Solokha (addr[3] << 8) | addr[2];
3657d993c5fSArseny Solokha
3667d993c5fSArseny Solokha gfar_write(macptr, tempval);
3677d993c5fSArseny Solokha
3687d993c5fSArseny Solokha tempval = (addr[1] << 24) | (addr[0] << 16);
3697d993c5fSArseny Solokha
3707d993c5fSArseny Solokha gfar_write(macptr+1, tempval);
3717d993c5fSArseny Solokha }
3727d993c5fSArseny Solokha
gfar_set_mac_addr(struct net_device * dev,void * p)3733d23a05cSClaudiu Manoil static int gfar_set_mac_addr(struct net_device *dev, void *p)
3743d23a05cSClaudiu Manoil {
375bff5b625SClaudiu Manoil int ret;
376bff5b625SClaudiu Manoil
377bff5b625SClaudiu Manoil ret = eth_mac_addr(dev, p);
378bff5b625SClaudiu Manoil if (ret)
379bff5b625SClaudiu Manoil return ret;
3803d23a05cSClaudiu Manoil
3813d23a05cSClaudiu Manoil gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
3823d23a05cSClaudiu Manoil
3833d23a05cSClaudiu Manoil return 0;
3843d23a05cSClaudiu Manoil }
3853d23a05cSClaudiu Manoil
gfar_ints_disable(struct gfar_private * priv)386efeddce7SClaudiu Manoil static void gfar_ints_disable(struct gfar_private *priv)
387efeddce7SClaudiu Manoil {
388efeddce7SClaudiu Manoil int i;
389efeddce7SClaudiu Manoil for (i = 0; i < priv->num_grps; i++) {
390efeddce7SClaudiu Manoil struct gfar __iomem *regs = priv->gfargrp[i].regs;
391efeddce7SClaudiu Manoil /* Clear IEVENT */
392efeddce7SClaudiu Manoil gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
393efeddce7SClaudiu Manoil
394efeddce7SClaudiu Manoil /* Initialize IMASK */
395efeddce7SClaudiu Manoil gfar_write(®s->imask, IMASK_INIT_CLEAR);
396efeddce7SClaudiu Manoil }
397efeddce7SClaudiu Manoil }
398efeddce7SClaudiu Manoil
gfar_ints_enable(struct gfar_private * priv)399efeddce7SClaudiu Manoil static void gfar_ints_enable(struct gfar_private *priv)
400efeddce7SClaudiu Manoil {
401efeddce7SClaudiu Manoil int i;
402efeddce7SClaudiu Manoil for (i = 0; i < priv->num_grps; i++) {
403efeddce7SClaudiu Manoil struct gfar __iomem *regs = priv->gfargrp[i].regs;
404efeddce7SClaudiu Manoil /* Unmask the interrupts we look for */
40514870b75SEsben Haabendal gfar_write(®s->imask,
40614870b75SEsben Haabendal IMASK_DEFAULT | priv->rmon_overflow.imask);
407efeddce7SClaudiu Manoil }
408efeddce7SClaudiu Manoil }
409efeddce7SClaudiu Manoil
gfar_alloc_tx_queues(struct gfar_private * priv)41020862788SClaudiu Manoil static int gfar_alloc_tx_queues(struct gfar_private *priv)
41120862788SClaudiu Manoil {
41220862788SClaudiu Manoil int i;
41320862788SClaudiu Manoil
41420862788SClaudiu Manoil for (i = 0; i < priv->num_tx_queues; i++) {
41520862788SClaudiu Manoil priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
41620862788SClaudiu Manoil GFP_KERNEL);
41720862788SClaudiu Manoil if (!priv->tx_queue[i])
41820862788SClaudiu Manoil return -ENOMEM;
41920862788SClaudiu Manoil
42020862788SClaudiu Manoil priv->tx_queue[i]->tx_skbuff = NULL;
42120862788SClaudiu Manoil priv->tx_queue[i]->qindex = i;
42220862788SClaudiu Manoil priv->tx_queue[i]->dev = priv->ndev;
42320862788SClaudiu Manoil spin_lock_init(&(priv->tx_queue[i]->txlock));
42420862788SClaudiu Manoil }
42520862788SClaudiu Manoil return 0;
42620862788SClaudiu Manoil }
42720862788SClaudiu Manoil
gfar_alloc_rx_queues(struct gfar_private * priv)42820862788SClaudiu Manoil static int gfar_alloc_rx_queues(struct gfar_private *priv)
42920862788SClaudiu Manoil {
43020862788SClaudiu Manoil int i;
43120862788SClaudiu Manoil
43220862788SClaudiu Manoil for (i = 0; i < priv->num_rx_queues; i++) {
43320862788SClaudiu Manoil priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
43420862788SClaudiu Manoil GFP_KERNEL);
43520862788SClaudiu Manoil if (!priv->rx_queue[i])
43620862788SClaudiu Manoil return -ENOMEM;
43720862788SClaudiu Manoil
43820862788SClaudiu Manoil priv->rx_queue[i]->qindex = i;
439f23223f1SClaudiu Manoil priv->rx_queue[i]->ndev = priv->ndev;
44020862788SClaudiu Manoil }
44120862788SClaudiu Manoil return 0;
44220862788SClaudiu Manoil }
44320862788SClaudiu Manoil
gfar_free_tx_queues(struct gfar_private * priv)44420862788SClaudiu Manoil static void gfar_free_tx_queues(struct gfar_private *priv)
445ec21e2ecSJeff Kirsher {
4463a2e16c8SJan Ceuleers int i;
447ec21e2ecSJeff Kirsher
448ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_tx_queues; i++)
449ec21e2ecSJeff Kirsher kfree(priv->tx_queue[i]);
450ec21e2ecSJeff Kirsher }
451ec21e2ecSJeff Kirsher
gfar_free_rx_queues(struct gfar_private * priv)45220862788SClaudiu Manoil static void gfar_free_rx_queues(struct gfar_private *priv)
453ec21e2ecSJeff Kirsher {
4543a2e16c8SJan Ceuleers int i;
455ec21e2ecSJeff Kirsher
456ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_rx_queues; i++)
457ec21e2ecSJeff Kirsher kfree(priv->rx_queue[i]);
458ec21e2ecSJeff Kirsher }
459ec21e2ecSJeff Kirsher
unmap_group_regs(struct gfar_private * priv)460ec21e2ecSJeff Kirsher static void unmap_group_regs(struct gfar_private *priv)
461ec21e2ecSJeff Kirsher {
4623a2e16c8SJan Ceuleers int i;
463ec21e2ecSJeff Kirsher
464ec21e2ecSJeff Kirsher for (i = 0; i < MAXGROUPS; i++)
465ec21e2ecSJeff Kirsher if (priv->gfargrp[i].regs)
466ec21e2ecSJeff Kirsher iounmap(priv->gfargrp[i].regs);
467ec21e2ecSJeff Kirsher }
468ec21e2ecSJeff Kirsher
free_gfar_dev(struct gfar_private * priv)469ee873fdaSClaudiu Manoil static void free_gfar_dev(struct gfar_private *priv)
470ee873fdaSClaudiu Manoil {
471ee873fdaSClaudiu Manoil int i, j;
472ee873fdaSClaudiu Manoil
473ee873fdaSClaudiu Manoil for (i = 0; i < priv->num_grps; i++)
474ee873fdaSClaudiu Manoil for (j = 0; j < GFAR_NUM_IRQS; j++) {
475ee873fdaSClaudiu Manoil kfree(priv->gfargrp[i].irqinfo[j]);
476ee873fdaSClaudiu Manoil priv->gfargrp[i].irqinfo[j] = NULL;
477ee873fdaSClaudiu Manoil }
478ee873fdaSClaudiu Manoil
479ee873fdaSClaudiu Manoil free_netdev(priv->ndev);
480ee873fdaSClaudiu Manoil }
481ee873fdaSClaudiu Manoil
disable_napi(struct gfar_private * priv)482ec21e2ecSJeff Kirsher static void disable_napi(struct gfar_private *priv)
483ec21e2ecSJeff Kirsher {
4843a2e16c8SJan Ceuleers int i;
485ec21e2ecSJeff Kirsher
486aeb12c5eSClaudiu Manoil for (i = 0; i < priv->num_grps; i++) {
487aeb12c5eSClaudiu Manoil napi_disable(&priv->gfargrp[i].napi_rx);
488aeb12c5eSClaudiu Manoil napi_disable(&priv->gfargrp[i].napi_tx);
489aeb12c5eSClaudiu Manoil }
490ec21e2ecSJeff Kirsher }
491ec21e2ecSJeff Kirsher
enable_napi(struct gfar_private * priv)492ec21e2ecSJeff Kirsher static void enable_napi(struct gfar_private *priv)
493ec21e2ecSJeff Kirsher {
4943a2e16c8SJan Ceuleers int i;
495ec21e2ecSJeff Kirsher
496aeb12c5eSClaudiu Manoil for (i = 0; i < priv->num_grps; i++) {
497aeb12c5eSClaudiu Manoil napi_enable(&priv->gfargrp[i].napi_rx);
498aeb12c5eSClaudiu Manoil napi_enable(&priv->gfargrp[i].napi_tx);
499aeb12c5eSClaudiu Manoil }
500ec21e2ecSJeff Kirsher }
501ec21e2ecSJeff Kirsher
gfar_parse_group(struct device_node * np,struct gfar_private * priv,const char * model)502ec21e2ecSJeff Kirsher static int gfar_parse_group(struct device_node *np,
503ec21e2ecSJeff Kirsher struct gfar_private *priv, const char *model)
504ec21e2ecSJeff Kirsher {
5055fedcc14SClaudiu Manoil struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
506ee873fdaSClaudiu Manoil int i;
507ee873fdaSClaudiu Manoil
508ee873fdaSClaudiu Manoil for (i = 0; i < GFAR_NUM_IRQS; i++) {
509ee873fdaSClaudiu Manoil grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
510ee873fdaSClaudiu Manoil GFP_KERNEL);
511ee873fdaSClaudiu Manoil if (!grp->irqinfo[i])
512ee873fdaSClaudiu Manoil return -ENOMEM;
513ee873fdaSClaudiu Manoil }
514ec21e2ecSJeff Kirsher
5155fedcc14SClaudiu Manoil grp->regs = of_iomap(np, 0);
5165fedcc14SClaudiu Manoil if (!grp->regs)
517ec21e2ecSJeff Kirsher return -ENOMEM;
518ec21e2ecSJeff Kirsher
519ee873fdaSClaudiu Manoil gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
520ec21e2ecSJeff Kirsher
521ec21e2ecSJeff Kirsher /* If we aren't the FEC we have multiple interrupts */
522ec21e2ecSJeff Kirsher if (model && strcasecmp(model, "FEC")) {
523ee873fdaSClaudiu Manoil gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
524ee873fdaSClaudiu Manoil gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
525fea0f665SMark Brown if (!gfar_irq(grp, TX)->irq ||
526fea0f665SMark Brown !gfar_irq(grp, RX)->irq ||
527fea0f665SMark Brown !gfar_irq(grp, ER)->irq)
528ec21e2ecSJeff Kirsher return -EINVAL;
529ec21e2ecSJeff Kirsher }
530ec21e2ecSJeff Kirsher
5315fedcc14SClaudiu Manoil grp->priv = priv;
5325fedcc14SClaudiu Manoil spin_lock_init(&grp->grplock);
533ec21e2ecSJeff Kirsher if (priv->mode == MQ_MG_MODE) {
53471ff9e3dSClaudiu Manoil /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
53571ff9e3dSClaudiu Manoil grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
53671ff9e3dSClaudiu Manoil grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
537ec21e2ecSJeff Kirsher } else {
5385fedcc14SClaudiu Manoil grp->rx_bit_map = 0xFF;
5395fedcc14SClaudiu Manoil grp->tx_bit_map = 0xFF;
540ec21e2ecSJeff Kirsher }
54120862788SClaudiu Manoil
54220862788SClaudiu Manoil /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
54320862788SClaudiu Manoil * right to left, so we need to revert the 8 bits to get the q index
54420862788SClaudiu Manoil */
54520862788SClaudiu Manoil grp->rx_bit_map = bitrev8(grp->rx_bit_map);
54620862788SClaudiu Manoil grp->tx_bit_map = bitrev8(grp->tx_bit_map);
54720862788SClaudiu Manoil
54820862788SClaudiu Manoil /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
54920862788SClaudiu Manoil * also assign queues to groups
55020862788SClaudiu Manoil */
55120862788SClaudiu Manoil for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
55271ff9e3dSClaudiu Manoil if (!grp->rx_queue)
55371ff9e3dSClaudiu Manoil grp->rx_queue = priv->rx_queue[i];
55420862788SClaudiu Manoil grp->num_rx_queues++;
55520862788SClaudiu Manoil grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
55620862788SClaudiu Manoil priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
55720862788SClaudiu Manoil priv->rx_queue[i]->grp = grp;
55820862788SClaudiu Manoil }
55920862788SClaudiu Manoil
56020862788SClaudiu Manoil for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
56171ff9e3dSClaudiu Manoil if (!grp->tx_queue)
56271ff9e3dSClaudiu Manoil grp->tx_queue = priv->tx_queue[i];
56320862788SClaudiu Manoil grp->num_tx_queues++;
56420862788SClaudiu Manoil grp->tstat |= (TSTAT_CLEAR_THALT >> i);
56520862788SClaudiu Manoil priv->tqueue |= (TQUEUE_EN0 >> i);
56620862788SClaudiu Manoil priv->tx_queue[i]->grp = grp;
56720862788SClaudiu Manoil }
56820862788SClaudiu Manoil
569ec21e2ecSJeff Kirsher priv->num_grps++;
570ec21e2ecSJeff Kirsher
571ec21e2ecSJeff Kirsher return 0;
572ec21e2ecSJeff Kirsher }
573ec21e2ecSJeff Kirsher
gfar_of_group_count(struct device_node * np)574f50724cdSTobias Waldekranz static int gfar_of_group_count(struct device_node *np)
575f50724cdSTobias Waldekranz {
576f50724cdSTobias Waldekranz struct device_node *child;
577f50724cdSTobias Waldekranz int num = 0;
578f50724cdSTobias Waldekranz
579f50724cdSTobias Waldekranz for_each_available_child_of_node(np, child)
580bf5849f1SRob Herring if (of_node_name_eq(child, "queue-group"))
581f50724cdSTobias Waldekranz num++;
582f50724cdSTobias Waldekranz
583f50724cdSTobias Waldekranz return num;
584f50724cdSTobias Waldekranz }
585f50724cdSTobias Waldekranz
5867d993c5fSArseny Solokha /* Reads the controller's registers to determine what interface
5877d993c5fSArseny Solokha * connects it to the PHY.
5887d993c5fSArseny Solokha */
gfar_get_interface(struct net_device * dev)5897d993c5fSArseny Solokha static phy_interface_t gfar_get_interface(struct net_device *dev)
5907d993c5fSArseny Solokha {
5917d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(dev);
5927d993c5fSArseny Solokha struct gfar __iomem *regs = priv->gfargrp[0].regs;
5937d993c5fSArseny Solokha u32 ecntrl;
5947d993c5fSArseny Solokha
5957d993c5fSArseny Solokha ecntrl = gfar_read(®s->ecntrl);
5967d993c5fSArseny Solokha
5977d993c5fSArseny Solokha if (ecntrl & ECNTRL_SGMII_MODE)
5987d993c5fSArseny Solokha return PHY_INTERFACE_MODE_SGMII;
5997d993c5fSArseny Solokha
6007d993c5fSArseny Solokha if (ecntrl & ECNTRL_TBI_MODE) {
6017d993c5fSArseny Solokha if (ecntrl & ECNTRL_REDUCED_MODE)
6027d993c5fSArseny Solokha return PHY_INTERFACE_MODE_RTBI;
6037d993c5fSArseny Solokha else
6047d993c5fSArseny Solokha return PHY_INTERFACE_MODE_TBI;
6057d993c5fSArseny Solokha }
6067d993c5fSArseny Solokha
6077d993c5fSArseny Solokha if (ecntrl & ECNTRL_REDUCED_MODE) {
6087d993c5fSArseny Solokha if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
6097d993c5fSArseny Solokha return PHY_INTERFACE_MODE_RMII;
6107d993c5fSArseny Solokha }
6117d993c5fSArseny Solokha else {
6127d993c5fSArseny Solokha phy_interface_t interface = priv->interface;
6137d993c5fSArseny Solokha
6147d993c5fSArseny Solokha /* This isn't autodetected right now, so it must
6157d993c5fSArseny Solokha * be set by the device tree or platform code.
6167d993c5fSArseny Solokha */
6177d993c5fSArseny Solokha if (interface == PHY_INTERFACE_MODE_RGMII_ID)
6187d993c5fSArseny Solokha return PHY_INTERFACE_MODE_RGMII_ID;
6197d993c5fSArseny Solokha
6207d993c5fSArseny Solokha return PHY_INTERFACE_MODE_RGMII;
6217d993c5fSArseny Solokha }
6227d993c5fSArseny Solokha }
6237d993c5fSArseny Solokha
6247d993c5fSArseny Solokha if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
6257d993c5fSArseny Solokha return PHY_INTERFACE_MODE_GMII;
6267d993c5fSArseny Solokha
6277d993c5fSArseny Solokha return PHY_INTERFACE_MODE_MII;
6287d993c5fSArseny Solokha }
6297d993c5fSArseny Solokha
gfar_of_init(struct platform_device * ofdev,struct net_device ** pdev)630ec21e2ecSJeff Kirsher static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
631ec21e2ecSJeff Kirsher {
632ec21e2ecSJeff Kirsher const char *model;
633ec21e2ecSJeff Kirsher int err = 0, i;
6340c65b2b9SAndrew Lunn phy_interface_t interface;
635ec21e2ecSJeff Kirsher struct net_device *dev = NULL;
636ec21e2ecSJeff Kirsher struct gfar_private *priv = NULL;
637ec21e2ecSJeff Kirsher struct device_node *np = ofdev->dev.of_node;
638ec21e2ecSJeff Kirsher struct device_node *child = NULL;
63955917641SJingchang Lu u32 stash_len = 0;
64055917641SJingchang Lu u32 stash_idx = 0;
641ec21e2ecSJeff Kirsher unsigned int num_tx_qs, num_rx_qs;
6428eda54c5SClaudiu Manoil unsigned short mode;
643ec21e2ecSJeff Kirsher
6444b222ca6SKevin Hao if (!np)
645ec21e2ecSJeff Kirsher return -ENODEV;
646ec21e2ecSJeff Kirsher
6478eda54c5SClaudiu Manoil if (of_device_is_compatible(np, "fsl,etsec2"))
648b338ce27SClaudiu Manoil mode = MQ_MG_MODE;
6498eda54c5SClaudiu Manoil else
650b338ce27SClaudiu Manoil mode = SQ_SG_MODE;
651b338ce27SClaudiu Manoil
652b338ce27SClaudiu Manoil if (mode == SQ_SG_MODE) {
65371ff9e3dSClaudiu Manoil num_tx_qs = 1;
65471ff9e3dSClaudiu Manoil num_rx_qs = 1;
65571ff9e3dSClaudiu Manoil } else { /* MQ_MG_MODE */
656c65d7533SClaudiu Manoil /* get the actual number of supported groups */
657f50724cdSTobias Waldekranz unsigned int num_grps = gfar_of_group_count(np);
658c65d7533SClaudiu Manoil
659c65d7533SClaudiu Manoil if (num_grps == 0 || num_grps > MAXGROUPS) {
660c65d7533SClaudiu Manoil dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
661c65d7533SClaudiu Manoil num_grps);
662c65d7533SClaudiu Manoil pr_err("Cannot do alloc_etherdev, aborting\n");
663c65d7533SClaudiu Manoil return -EINVAL;
664c65d7533SClaudiu Manoil }
665c65d7533SClaudiu Manoil
666c65d7533SClaudiu Manoil num_tx_qs = num_grps; /* one txq per int group */
667c65d7533SClaudiu Manoil num_rx_qs = num_grps; /* one rxq per int group */
66871ff9e3dSClaudiu Manoil }
669ec21e2ecSJeff Kirsher
670ec21e2ecSJeff Kirsher if (num_tx_qs > MAX_TX_QS) {
671ec21e2ecSJeff Kirsher pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
672ec21e2ecSJeff Kirsher num_tx_qs, MAX_TX_QS);
673ec21e2ecSJeff Kirsher pr_err("Cannot do alloc_etherdev, aborting\n");
674ec21e2ecSJeff Kirsher return -EINVAL;
675ec21e2ecSJeff Kirsher }
676ec21e2ecSJeff Kirsher
677ec21e2ecSJeff Kirsher if (num_rx_qs > MAX_RX_QS) {
678ec21e2ecSJeff Kirsher pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
679ec21e2ecSJeff Kirsher num_rx_qs, MAX_RX_QS);
680ec21e2ecSJeff Kirsher pr_err("Cannot do alloc_etherdev, aborting\n");
681ec21e2ecSJeff Kirsher return -EINVAL;
682ec21e2ecSJeff Kirsher }
683ec21e2ecSJeff Kirsher
684ec21e2ecSJeff Kirsher *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
685ec21e2ecSJeff Kirsher dev = *pdev;
686ec21e2ecSJeff Kirsher if (NULL == dev)
687ec21e2ecSJeff Kirsher return -ENOMEM;
688ec21e2ecSJeff Kirsher
689ec21e2ecSJeff Kirsher priv = netdev_priv(dev);
690ec21e2ecSJeff Kirsher priv->ndev = dev;
691ec21e2ecSJeff Kirsher
692b338ce27SClaudiu Manoil priv->mode = mode;
693b338ce27SClaudiu Manoil
694ec21e2ecSJeff Kirsher priv->num_tx_queues = num_tx_qs;
695ec21e2ecSJeff Kirsher netif_set_real_num_rx_queues(dev, num_rx_qs);
696ec21e2ecSJeff Kirsher priv->num_rx_queues = num_rx_qs;
69720862788SClaudiu Manoil
69820862788SClaudiu Manoil err = gfar_alloc_tx_queues(priv);
69920862788SClaudiu Manoil if (err)
70020862788SClaudiu Manoil goto tx_alloc_failed;
70120862788SClaudiu Manoil
70220862788SClaudiu Manoil err = gfar_alloc_rx_queues(priv);
70320862788SClaudiu Manoil if (err)
70420862788SClaudiu Manoil goto rx_alloc_failed;
705ec21e2ecSJeff Kirsher
70655917641SJingchang Lu err = of_property_read_string(np, "model", &model);
70755917641SJingchang Lu if (err) {
70855917641SJingchang Lu pr_err("Device model property missing, aborting\n");
70955917641SJingchang Lu goto rx_alloc_failed;
71055917641SJingchang Lu }
71155917641SJingchang Lu
712ec21e2ecSJeff Kirsher /* Init Rx queue filer rule set linked list */
713ec21e2ecSJeff Kirsher INIT_LIST_HEAD(&priv->rx_list.list);
714ec21e2ecSJeff Kirsher priv->rx_list.count = 0;
715ec21e2ecSJeff Kirsher mutex_init(&priv->rx_queue_access);
716ec21e2ecSJeff Kirsher
717ec21e2ecSJeff Kirsher for (i = 0; i < MAXGROUPS; i++)
718ec21e2ecSJeff Kirsher priv->gfargrp[i].regs = NULL;
719ec21e2ecSJeff Kirsher
720ec21e2ecSJeff Kirsher /* Parse and initialize group specific information */
721b338ce27SClaudiu Manoil if (priv->mode == MQ_MG_MODE) {
722f50724cdSTobias Waldekranz for_each_available_child_of_node(np, child) {
723bf5849f1SRob Herring if (!of_node_name_eq(child, "queue-group"))
724f50724cdSTobias Waldekranz continue;
725f50724cdSTobias Waldekranz
726ec21e2ecSJeff Kirsher err = gfar_parse_group(child, priv, model);
727989e4da0SSumera Priyadarsini if (err) {
728989e4da0SSumera Priyadarsini of_node_put(child);
729ec21e2ecSJeff Kirsher goto err_grp_init;
730ec21e2ecSJeff Kirsher }
731989e4da0SSumera Priyadarsini }
732b338ce27SClaudiu Manoil } else { /* SQ_SG_MODE */
733ec21e2ecSJeff Kirsher err = gfar_parse_group(np, priv, model);
734ec21e2ecSJeff Kirsher if (err)
735ec21e2ecSJeff Kirsher goto err_grp_init;
736ec21e2ecSJeff Kirsher }
737ec21e2ecSJeff Kirsher
7383f8c0f7eSSaurabh Sengar if (of_property_read_bool(np, "bd-stash")) {
739ec21e2ecSJeff Kirsher priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
740ec21e2ecSJeff Kirsher priv->bd_stash_en = 1;
741ec21e2ecSJeff Kirsher }
742ec21e2ecSJeff Kirsher
74355917641SJingchang Lu err = of_property_read_u32(np, "rx-stash-len", &stash_len);
744ec21e2ecSJeff Kirsher
74555917641SJingchang Lu if (err == 0)
74655917641SJingchang Lu priv->rx_stash_size = stash_len;
747ec21e2ecSJeff Kirsher
74855917641SJingchang Lu err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
749ec21e2ecSJeff Kirsher
75055917641SJingchang Lu if (err == 0)
75155917641SJingchang Lu priv->rx_stash_index = stash_idx;
752ec21e2ecSJeff Kirsher
753ec21e2ecSJeff Kirsher if (stash_len || stash_idx)
754ec21e2ecSJeff Kirsher priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
755ec21e2ecSJeff Kirsher
7569ca01b25SJakub Kicinski err = of_get_ethdev_address(np, dev);
75783216e39SMichael Walle if (err) {
758ff021f22SMaxim Kochetkov eth_hw_addr_random(dev);
759ff021f22SMaxim Kochetkov dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
760ff021f22SMaxim Kochetkov }
761ec21e2ecSJeff Kirsher
762ec21e2ecSJeff Kirsher if (model && !strcasecmp(model, "TSEC"))
76334018fd4SClaudiu Manoil priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
764ec21e2ecSJeff Kirsher FSL_GIANFAR_DEV_HAS_COALESCE |
765ec21e2ecSJeff Kirsher FSL_GIANFAR_DEV_HAS_RMON |
766ec21e2ecSJeff Kirsher FSL_GIANFAR_DEV_HAS_MULTI_INTR;
767bc4598bcSJan Ceuleers
768ec21e2ecSJeff Kirsher if (model && !strcasecmp(model, "eTSEC"))
76934018fd4SClaudiu Manoil priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
770ec21e2ecSJeff Kirsher FSL_GIANFAR_DEV_HAS_COALESCE |
771ec21e2ecSJeff Kirsher FSL_GIANFAR_DEV_HAS_RMON |
772ec21e2ecSJeff Kirsher FSL_GIANFAR_DEV_HAS_MULTI_INTR |
773ec21e2ecSJeff Kirsher FSL_GIANFAR_DEV_HAS_CSUM |
774ec21e2ecSJeff Kirsher FSL_GIANFAR_DEV_HAS_VLAN |
775ec21e2ecSJeff Kirsher FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
776ec21e2ecSJeff Kirsher FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
7777bff47daSHamish Martin FSL_GIANFAR_DEV_HAS_TIMER |
7787bff47daSHamish Martin FSL_GIANFAR_DEV_HAS_RX_FILER;
779ec21e2ecSJeff Kirsher
7808e578e73SArseny Solokha /* Use PHY connection type from the DT node if one is specified there.
7818e578e73SArseny Solokha * rgmii-id really needs to be specified. Other types can be
7828e578e73SArseny Solokha * detected by hardware
7838e578e73SArseny Solokha */
7840c65b2b9SAndrew Lunn err = of_get_phy_mode(np, &interface);
7850c65b2b9SAndrew Lunn if (!err)
7860c65b2b9SAndrew Lunn priv->interface = interface;
787ec21e2ecSJeff Kirsher else
7888e578e73SArseny Solokha priv->interface = gfar_get_interface(dev);
789ec21e2ecSJeff Kirsher
7901a87e641SRob Herring if (of_property_read_bool(np, "fsl,magic-packet"))
791ec21e2ecSJeff Kirsher priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
792ec21e2ecSJeff Kirsher
7931a87e641SRob Herring if (of_property_read_bool(np, "fsl,wake-on-filer"))
7943e905b80SClaudiu Manoil priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
7953e905b80SClaudiu Manoil
796ec21e2ecSJeff Kirsher priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
797ec21e2ecSJeff Kirsher
798be403645SFlorian Fainelli /* In the case of a fixed PHY, the DT node associated
799be403645SFlorian Fainelli * to the PHY is the Ethernet MAC DT node.
800be403645SFlorian Fainelli */
8016f2c9bd8SUwe Kleine-König if (!priv->phy_node && of_phy_is_fixed_link(np)) {
802be403645SFlorian Fainelli err = of_phy_register_fixed_link(np);
803be403645SFlorian Fainelli if (err)
804be403645SFlorian Fainelli goto err_grp_init;
805be403645SFlorian Fainelli
8066f2c9bd8SUwe Kleine-König priv->phy_node = of_node_get(np);
807be403645SFlorian Fainelli }
808be403645SFlorian Fainelli
809ec21e2ecSJeff Kirsher /* Find the TBI PHY. If it's not there, we don't support SGMII */
810ec21e2ecSJeff Kirsher priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
811ec21e2ecSJeff Kirsher
812ec21e2ecSJeff Kirsher return 0;
813ec21e2ecSJeff Kirsher
814ec21e2ecSJeff Kirsher err_grp_init:
815ec21e2ecSJeff Kirsher unmap_group_regs(priv);
81620862788SClaudiu Manoil rx_alloc_failed:
81720862788SClaudiu Manoil gfar_free_rx_queues(priv);
81820862788SClaudiu Manoil tx_alloc_failed:
81920862788SClaudiu Manoil gfar_free_tx_queues(priv);
820ee873fdaSClaudiu Manoil free_gfar_dev(priv);
821ec21e2ecSJeff Kirsher return err;
822ec21e2ecSJeff Kirsher }
823ec21e2ecSJeff Kirsher
cluster_entry_per_class(struct gfar_private * priv,u32 rqfar,u32 class)824ec21e2ecSJeff Kirsher static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
825ec21e2ecSJeff Kirsher u32 class)
826ec21e2ecSJeff Kirsher {
827ec21e2ecSJeff Kirsher u32 rqfpr = FPR_FILER_MASK;
828ec21e2ecSJeff Kirsher u32 rqfcr = 0x0;
829ec21e2ecSJeff Kirsher
830ec21e2ecSJeff Kirsher rqfar--;
831ec21e2ecSJeff Kirsher rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
832ec21e2ecSJeff Kirsher priv->ftp_rqfpr[rqfar] = rqfpr;
833ec21e2ecSJeff Kirsher priv->ftp_rqfcr[rqfar] = rqfcr;
834ec21e2ecSJeff Kirsher gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
835ec21e2ecSJeff Kirsher
836ec21e2ecSJeff Kirsher rqfar--;
837ec21e2ecSJeff Kirsher rqfcr = RQFCR_CMP_NOMATCH;
838ec21e2ecSJeff Kirsher priv->ftp_rqfpr[rqfar] = rqfpr;
839ec21e2ecSJeff Kirsher priv->ftp_rqfcr[rqfar] = rqfcr;
840ec21e2ecSJeff Kirsher gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
841ec21e2ecSJeff Kirsher
842ec21e2ecSJeff Kirsher rqfar--;
843ec21e2ecSJeff Kirsher rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
844ec21e2ecSJeff Kirsher rqfpr = class;
845ec21e2ecSJeff Kirsher priv->ftp_rqfcr[rqfar] = rqfcr;
846ec21e2ecSJeff Kirsher priv->ftp_rqfpr[rqfar] = rqfpr;
847ec21e2ecSJeff Kirsher gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
848ec21e2ecSJeff Kirsher
849ec21e2ecSJeff Kirsher rqfar--;
850ec21e2ecSJeff Kirsher rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
851ec21e2ecSJeff Kirsher rqfpr = class;
852ec21e2ecSJeff Kirsher priv->ftp_rqfcr[rqfar] = rqfcr;
853ec21e2ecSJeff Kirsher priv->ftp_rqfpr[rqfar] = rqfpr;
854ec21e2ecSJeff Kirsher gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
855ec21e2ecSJeff Kirsher
856ec21e2ecSJeff Kirsher return rqfar;
857ec21e2ecSJeff Kirsher }
858ec21e2ecSJeff Kirsher
gfar_init_filer_table(struct gfar_private * priv)859ec21e2ecSJeff Kirsher static void gfar_init_filer_table(struct gfar_private *priv)
860ec21e2ecSJeff Kirsher {
861ec21e2ecSJeff Kirsher int i = 0x0;
862ec21e2ecSJeff Kirsher u32 rqfar = MAX_FILER_IDX;
863ec21e2ecSJeff Kirsher u32 rqfcr = 0x0;
864ec21e2ecSJeff Kirsher u32 rqfpr = FPR_FILER_MASK;
865ec21e2ecSJeff Kirsher
866ec21e2ecSJeff Kirsher /* Default rule */
867ec21e2ecSJeff Kirsher rqfcr = RQFCR_CMP_MATCH;
868ec21e2ecSJeff Kirsher priv->ftp_rqfcr[rqfar] = rqfcr;
869ec21e2ecSJeff Kirsher priv->ftp_rqfpr[rqfar] = rqfpr;
870ec21e2ecSJeff Kirsher gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
871ec21e2ecSJeff Kirsher
872ec21e2ecSJeff Kirsher rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
873ec21e2ecSJeff Kirsher rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
874ec21e2ecSJeff Kirsher rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
875ec21e2ecSJeff Kirsher rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
876ec21e2ecSJeff Kirsher rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
877ec21e2ecSJeff Kirsher rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
878ec21e2ecSJeff Kirsher
879ec21e2ecSJeff Kirsher /* cur_filer_idx indicated the first non-masked rule */
880ec21e2ecSJeff Kirsher priv->cur_filer_idx = rqfar;
881ec21e2ecSJeff Kirsher
882ec21e2ecSJeff Kirsher /* Rest are masked rules */
883ec21e2ecSJeff Kirsher rqfcr = RQFCR_CMP_NOMATCH;
884ec21e2ecSJeff Kirsher for (i = 0; i < rqfar; i++) {
885ec21e2ecSJeff Kirsher priv->ftp_rqfcr[i] = rqfcr;
886ec21e2ecSJeff Kirsher priv->ftp_rqfpr[i] = rqfpr;
887ec21e2ecSJeff Kirsher gfar_write_filer(priv, i, rqfcr, rqfpr);
888ec21e2ecSJeff Kirsher }
889ec21e2ecSJeff Kirsher }
890ec21e2ecSJeff Kirsher
891d6ef0bccSClaudiu Manoil #ifdef CONFIG_PPC
__gfar_detect_errata_83xx(struct gfar_private * priv)8922969b1f7SClaudiu Manoil static void __gfar_detect_errata_83xx(struct gfar_private *priv)
893ec21e2ecSJeff Kirsher {
894ec21e2ecSJeff Kirsher unsigned int pvr = mfspr(SPRN_PVR);
895ec21e2ecSJeff Kirsher unsigned int svr = mfspr(SPRN_SVR);
896ec21e2ecSJeff Kirsher unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
897ec21e2ecSJeff Kirsher unsigned int rev = svr & 0xffff;
898ec21e2ecSJeff Kirsher
899ec21e2ecSJeff Kirsher /* MPC8313 Rev 2.0 and higher; All MPC837x */
900ec21e2ecSJeff Kirsher if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
901ec21e2ecSJeff Kirsher (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
902ec21e2ecSJeff Kirsher priv->errata |= GFAR_ERRATA_74;
903ec21e2ecSJeff Kirsher
904ec21e2ecSJeff Kirsher /* MPC8313 and MPC837x all rev */
905ec21e2ecSJeff Kirsher if ((pvr == 0x80850010 && mod == 0x80b0) ||
906ec21e2ecSJeff Kirsher (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
907ec21e2ecSJeff Kirsher priv->errata |= GFAR_ERRATA_76;
908ec21e2ecSJeff Kirsher
9092969b1f7SClaudiu Manoil /* MPC8313 Rev < 2.0 */
9102969b1f7SClaudiu Manoil if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
911ec21e2ecSJeff Kirsher priv->errata |= GFAR_ERRATA_12;
9122969b1f7SClaudiu Manoil }
9132969b1f7SClaudiu Manoil
__gfar_detect_errata_85xx(struct gfar_private * priv)9142969b1f7SClaudiu Manoil static void __gfar_detect_errata_85xx(struct gfar_private *priv)
9152969b1f7SClaudiu Manoil {
9162969b1f7SClaudiu Manoil unsigned int svr = mfspr(SPRN_SVR);
9172969b1f7SClaudiu Manoil
9182969b1f7SClaudiu Manoil if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
9192969b1f7SClaudiu Manoil priv->errata |= GFAR_ERRATA_12;
9207bfc6082SAtsushi Nemoto /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
92153fad773SClaudiu Manoil if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
9227bfc6082SAtsushi Nemoto ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
9237bfc6082SAtsushi Nemoto ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
92453fad773SClaudiu Manoil priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
9252969b1f7SClaudiu Manoil }
926d6ef0bccSClaudiu Manoil #endif
9272969b1f7SClaudiu Manoil
gfar_detect_errata(struct gfar_private * priv)9282969b1f7SClaudiu Manoil static void gfar_detect_errata(struct gfar_private *priv)
9292969b1f7SClaudiu Manoil {
9302969b1f7SClaudiu Manoil struct device *dev = &priv->ofdev->dev;
9312969b1f7SClaudiu Manoil
9322969b1f7SClaudiu Manoil /* no plans to fix */
9332969b1f7SClaudiu Manoil priv->errata |= GFAR_ERRATA_A002;
9342969b1f7SClaudiu Manoil
935d6ef0bccSClaudiu Manoil #ifdef CONFIG_PPC
9362969b1f7SClaudiu Manoil if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
9372969b1f7SClaudiu Manoil __gfar_detect_errata_85xx(priv);
9382969b1f7SClaudiu Manoil else /* non-mpc85xx parts, i.e. e300 core based */
9392969b1f7SClaudiu Manoil __gfar_detect_errata_83xx(priv);
940d6ef0bccSClaudiu Manoil #endif
941ec21e2ecSJeff Kirsher
942ec21e2ecSJeff Kirsher if (priv->errata)
943ec21e2ecSJeff Kirsher dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
944ec21e2ecSJeff Kirsher priv->errata);
945ec21e2ecSJeff Kirsher }
946ec21e2ecSJeff Kirsher
gfar_init_addr_hash_table(struct gfar_private * priv)947898157edSXiubo Li static void gfar_init_addr_hash_table(struct gfar_private *priv)
94820862788SClaudiu Manoil {
94920862788SClaudiu Manoil struct gfar __iomem *regs = priv->gfargrp[0].regs;
950ec21e2ecSJeff Kirsher
951ec21e2ecSJeff Kirsher if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
952ec21e2ecSJeff Kirsher priv->extended_hash = 1;
953ec21e2ecSJeff Kirsher priv->hash_width = 9;
954ec21e2ecSJeff Kirsher
955ec21e2ecSJeff Kirsher priv->hash_regs[0] = ®s->igaddr0;
956ec21e2ecSJeff Kirsher priv->hash_regs[1] = ®s->igaddr1;
957ec21e2ecSJeff Kirsher priv->hash_regs[2] = ®s->igaddr2;
958ec21e2ecSJeff Kirsher priv->hash_regs[3] = ®s->igaddr3;
959ec21e2ecSJeff Kirsher priv->hash_regs[4] = ®s->igaddr4;
960ec21e2ecSJeff Kirsher priv->hash_regs[5] = ®s->igaddr5;
961ec21e2ecSJeff Kirsher priv->hash_regs[6] = ®s->igaddr6;
962ec21e2ecSJeff Kirsher priv->hash_regs[7] = ®s->igaddr7;
963ec21e2ecSJeff Kirsher priv->hash_regs[8] = ®s->gaddr0;
964ec21e2ecSJeff Kirsher priv->hash_regs[9] = ®s->gaddr1;
965ec21e2ecSJeff Kirsher priv->hash_regs[10] = ®s->gaddr2;
966ec21e2ecSJeff Kirsher priv->hash_regs[11] = ®s->gaddr3;
967ec21e2ecSJeff Kirsher priv->hash_regs[12] = ®s->gaddr4;
968ec21e2ecSJeff Kirsher priv->hash_regs[13] = ®s->gaddr5;
969ec21e2ecSJeff Kirsher priv->hash_regs[14] = ®s->gaddr6;
970ec21e2ecSJeff Kirsher priv->hash_regs[15] = ®s->gaddr7;
971ec21e2ecSJeff Kirsher
972ec21e2ecSJeff Kirsher } else {
973ec21e2ecSJeff Kirsher priv->extended_hash = 0;
974ec21e2ecSJeff Kirsher priv->hash_width = 8;
975ec21e2ecSJeff Kirsher
976ec21e2ecSJeff Kirsher priv->hash_regs[0] = ®s->gaddr0;
977ec21e2ecSJeff Kirsher priv->hash_regs[1] = ®s->gaddr1;
978ec21e2ecSJeff Kirsher priv->hash_regs[2] = ®s->gaddr2;
979ec21e2ecSJeff Kirsher priv->hash_regs[3] = ®s->gaddr3;
980ec21e2ecSJeff Kirsher priv->hash_regs[4] = ®s->gaddr4;
981ec21e2ecSJeff Kirsher priv->hash_regs[5] = ®s->gaddr5;
982ec21e2ecSJeff Kirsher priv->hash_regs[6] = ®s->gaddr6;
983ec21e2ecSJeff Kirsher priv->hash_regs[7] = ®s->gaddr7;
984ec21e2ecSJeff Kirsher }
98520862788SClaudiu Manoil }
98620862788SClaudiu Manoil
__gfar_is_rx_idle(struct gfar_private * priv)987ec21e2ecSJeff Kirsher static int __gfar_is_rx_idle(struct gfar_private *priv)
988ec21e2ecSJeff Kirsher {
989ec21e2ecSJeff Kirsher u32 res;
990ec21e2ecSJeff Kirsher
9910977f817SJan Ceuleers /* Normaly TSEC should not hang on GRS commands, so we should
992ec21e2ecSJeff Kirsher * actually wait for IEVENT_GRSC flag.
993ec21e2ecSJeff Kirsher */
994ad3660c2SClaudiu Manoil if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
995ec21e2ecSJeff Kirsher return 0;
996ec21e2ecSJeff Kirsher
9970977f817SJan Ceuleers /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
998ec21e2ecSJeff Kirsher * the same as bits 23-30, the eTSEC Rx is assumed to be idle
999ec21e2ecSJeff Kirsher * and the Rx can be safely reset.
1000ec21e2ecSJeff Kirsher */
1001ec21e2ecSJeff Kirsher res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
1002ec21e2ecSJeff Kirsher res &= 0x7f807f80;
1003ec21e2ecSJeff Kirsher if ((res & 0xffff) == (res >> 16))
1004ec21e2ecSJeff Kirsher return 1;
1005ec21e2ecSJeff Kirsher
1006ec21e2ecSJeff Kirsher return 0;
1007ec21e2ecSJeff Kirsher }
1008ec21e2ecSJeff Kirsher
1009ec21e2ecSJeff Kirsher /* Halt the receive and transmit queues */
gfar_halt_nodisable(struct gfar_private * priv)1010c10650b6SClaudiu Manoil static void gfar_halt_nodisable(struct gfar_private *priv)
1011ec21e2ecSJeff Kirsher {
1012efeddce7SClaudiu Manoil struct gfar __iomem *regs = priv->gfargrp[0].regs;
1013ec21e2ecSJeff Kirsher u32 tempval;
1014a4feee89SClaudiu Manoil unsigned int timeout;
1015a4feee89SClaudiu Manoil int stopped;
1016ec21e2ecSJeff Kirsher
1017efeddce7SClaudiu Manoil gfar_ints_disable(priv);
1018ec21e2ecSJeff Kirsher
1019a4feee89SClaudiu Manoil if (gfar_is_dma_stopped(priv))
1020a4feee89SClaudiu Manoil return;
1021a4feee89SClaudiu Manoil
1022ec21e2ecSJeff Kirsher /* Stop the DMA, and wait for it to stop */
1023ec21e2ecSJeff Kirsher tempval = gfar_read(®s->dmactrl);
1024ec21e2ecSJeff Kirsher tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1025ec21e2ecSJeff Kirsher gfar_write(®s->dmactrl, tempval);
1026ec21e2ecSJeff Kirsher
1027a4feee89SClaudiu Manoil retry:
1028a4feee89SClaudiu Manoil timeout = 1000;
1029a4feee89SClaudiu Manoil while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1030a4feee89SClaudiu Manoil cpu_relax();
1031a4feee89SClaudiu Manoil timeout--;
1032ec21e2ecSJeff Kirsher }
1033a4feee89SClaudiu Manoil
1034a4feee89SClaudiu Manoil if (!timeout)
1035a4feee89SClaudiu Manoil stopped = gfar_is_dma_stopped(priv);
1036a4feee89SClaudiu Manoil
1037a4feee89SClaudiu Manoil if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1038a4feee89SClaudiu Manoil !__gfar_is_rx_idle(priv))
1039a4feee89SClaudiu Manoil goto retry;
1040ec21e2ecSJeff Kirsher }
1041ec21e2ecSJeff Kirsher
1042ec21e2ecSJeff Kirsher /* Halt the receive and transmit queues */
gfar_halt(struct gfar_private * priv)10437ad38784SArseny Solokha static void gfar_halt(struct gfar_private *priv)
1044ec21e2ecSJeff Kirsher {
1045ec21e2ecSJeff Kirsher struct gfar __iomem *regs = priv->gfargrp[0].regs;
1046ec21e2ecSJeff Kirsher u32 tempval;
1047ec21e2ecSJeff Kirsher
1048c10650b6SClaudiu Manoil /* Dissable the Rx/Tx hw queues */
1049c10650b6SClaudiu Manoil gfar_write(®s->rqueue, 0);
1050c10650b6SClaudiu Manoil gfar_write(®s->tqueue, 0);
1051ec21e2ecSJeff Kirsher
1052c10650b6SClaudiu Manoil mdelay(10);
1053c10650b6SClaudiu Manoil
1054c10650b6SClaudiu Manoil gfar_halt_nodisable(priv);
1055c10650b6SClaudiu Manoil
1056c10650b6SClaudiu Manoil /* Disable Rx/Tx DMA */
1057ec21e2ecSJeff Kirsher tempval = gfar_read(®s->maccfg1);
1058ec21e2ecSJeff Kirsher tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1059ec21e2ecSJeff Kirsher gfar_write(®s->maccfg1, tempval);
1060ec21e2ecSJeff Kirsher }
1061ec21e2ecSJeff Kirsher
free_skb_tx_queue(struct gfar_priv_tx_q * tx_queue)1062ec21e2ecSJeff Kirsher static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1063ec21e2ecSJeff Kirsher {
1064ec21e2ecSJeff Kirsher struct txbd8 *txbdp;
1065ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(tx_queue->dev);
1066ec21e2ecSJeff Kirsher int i, j;
1067ec21e2ecSJeff Kirsher
1068ec21e2ecSJeff Kirsher txbdp = tx_queue->tx_bd_base;
1069ec21e2ecSJeff Kirsher
1070ec21e2ecSJeff Kirsher for (i = 0; i < tx_queue->tx_ring_size; i++) {
1071ec21e2ecSJeff Kirsher if (!tx_queue->tx_skbuff[i])
1072ec21e2ecSJeff Kirsher continue;
1073ec21e2ecSJeff Kirsher
1074a7312d58SClaudiu Manoil dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1075a7312d58SClaudiu Manoil be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
1076ec21e2ecSJeff Kirsher txbdp->lstatus = 0;
1077ec21e2ecSJeff Kirsher for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1078ec21e2ecSJeff Kirsher j++) {
1079ec21e2ecSJeff Kirsher txbdp++;
1080a7312d58SClaudiu Manoil dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1081a7312d58SClaudiu Manoil be16_to_cpu(txbdp->length),
1082a7312d58SClaudiu Manoil DMA_TO_DEVICE);
1083ec21e2ecSJeff Kirsher }
1084ec21e2ecSJeff Kirsher txbdp++;
1085ec21e2ecSJeff Kirsher dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1086ec21e2ecSJeff Kirsher tx_queue->tx_skbuff[i] = NULL;
1087ec21e2ecSJeff Kirsher }
1088ec21e2ecSJeff Kirsher kfree(tx_queue->tx_skbuff);
10891eb8f7a7SClaudiu Manoil tx_queue->tx_skbuff = NULL;
1090ec21e2ecSJeff Kirsher }
1091ec21e2ecSJeff Kirsher
free_skb_rx_queue(struct gfar_priv_rx_q * rx_queue)1092ec21e2ecSJeff Kirsher static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1093ec21e2ecSJeff Kirsher {
1094ec21e2ecSJeff Kirsher int i;
1095ec21e2ecSJeff Kirsher
109675354148SClaudiu Manoil struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
109775354148SClaudiu Manoil
109875354148SClaudiu Manoil dev_kfree_skb(rx_queue->skb);
1099ec21e2ecSJeff Kirsher
1100ec21e2ecSJeff Kirsher for (i = 0; i < rx_queue->rx_ring_size; i++) {
110175354148SClaudiu Manoil struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
110275354148SClaudiu Manoil
1103ec21e2ecSJeff Kirsher rxbdp->lstatus = 0;
1104ec21e2ecSJeff Kirsher rxbdp->bufPtr = 0;
1105ec21e2ecSJeff Kirsher rxbdp++;
110675354148SClaudiu Manoil
110775354148SClaudiu Manoil if (!rxb->page)
110875354148SClaudiu Manoil continue;
110975354148SClaudiu Manoil
11104af0e5bbSArseny Solokha dma_unmap_page(rx_queue->dev, rxb->dma,
111175354148SClaudiu Manoil PAGE_SIZE, DMA_FROM_DEVICE);
111275354148SClaudiu Manoil __free_page(rxb->page);
111375354148SClaudiu Manoil
111475354148SClaudiu Manoil rxb->page = NULL;
1115ec21e2ecSJeff Kirsher }
111675354148SClaudiu Manoil
111775354148SClaudiu Manoil kfree(rx_queue->rx_buff);
111875354148SClaudiu Manoil rx_queue->rx_buff = NULL;
1119ec21e2ecSJeff Kirsher }
1120ec21e2ecSJeff Kirsher
1121ec21e2ecSJeff Kirsher /* If there are any tx skbs or rx skbs still around, free them.
11220977f817SJan Ceuleers * Then free tx_skbuff and rx_skbuff
11230977f817SJan Ceuleers */
free_skb_resources(struct gfar_private * priv)1124ec21e2ecSJeff Kirsher static void free_skb_resources(struct gfar_private *priv)
1125ec21e2ecSJeff Kirsher {
1126ec21e2ecSJeff Kirsher struct gfar_priv_tx_q *tx_queue = NULL;
1127ec21e2ecSJeff Kirsher struct gfar_priv_rx_q *rx_queue = NULL;
1128ec21e2ecSJeff Kirsher int i;
1129ec21e2ecSJeff Kirsher
1130ec21e2ecSJeff Kirsher /* Go through all the buffer descriptors and free their data buffers */
1131ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_tx_queues; i++) {
1132d8a0f1b0SPaul Gortmaker struct netdev_queue *txq;
1133bc4598bcSJan Ceuleers
1134ec21e2ecSJeff Kirsher tx_queue = priv->tx_queue[i];
1135d8a0f1b0SPaul Gortmaker txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1136ec21e2ecSJeff Kirsher if (tx_queue->tx_skbuff)
1137ec21e2ecSJeff Kirsher free_skb_tx_queue(tx_queue);
1138d8a0f1b0SPaul Gortmaker netdev_tx_reset_queue(txq);
1139ec21e2ecSJeff Kirsher }
1140ec21e2ecSJeff Kirsher
1141ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_rx_queues; i++) {
1142ec21e2ecSJeff Kirsher rx_queue = priv->rx_queue[i];
114375354148SClaudiu Manoil if (rx_queue->rx_buff)
1144ec21e2ecSJeff Kirsher free_skb_rx_queue(rx_queue);
1145ec21e2ecSJeff Kirsher }
1146ec21e2ecSJeff Kirsher
1147369ec162SClaudiu Manoil dma_free_coherent(priv->dev,
1148ec21e2ecSJeff Kirsher sizeof(struct txbd8) * priv->total_tx_ring_size +
1149ec21e2ecSJeff Kirsher sizeof(struct rxbd8) * priv->total_rx_ring_size,
1150ec21e2ecSJeff Kirsher priv->tx_queue[0]->tx_bd_base,
1151ec21e2ecSJeff Kirsher priv->tx_queue[0]->tx_bd_dma_base);
1152ec21e2ecSJeff Kirsher }
1153ec21e2ecSJeff Kirsher
stop_gfar(struct net_device * dev)11547d993c5fSArseny Solokha void stop_gfar(struct net_device *dev)
11557d993c5fSArseny Solokha {
11567d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(dev);
11577d993c5fSArseny Solokha
11587d993c5fSArseny Solokha netif_tx_stop_all_queues(dev);
11597d993c5fSArseny Solokha
11607d993c5fSArseny Solokha smp_mb__before_atomic();
11617d993c5fSArseny Solokha set_bit(GFAR_DOWN, &priv->state);
11627d993c5fSArseny Solokha smp_mb__after_atomic();
11637d993c5fSArseny Solokha
11647d993c5fSArseny Solokha disable_napi(priv);
11657d993c5fSArseny Solokha
11667d993c5fSArseny Solokha /* disable ints and gracefully shut down Rx/Tx DMA */
11677d993c5fSArseny Solokha gfar_halt(priv);
11687d993c5fSArseny Solokha
11697d993c5fSArseny Solokha phy_stop(dev->phydev);
11707d993c5fSArseny Solokha
11717d993c5fSArseny Solokha free_skb_resources(priv);
11727d993c5fSArseny Solokha }
11737d993c5fSArseny Solokha
gfar_start(struct gfar_private * priv)11747ad38784SArseny Solokha static void gfar_start(struct gfar_private *priv)
1175ec21e2ecSJeff Kirsher {
1176ec21e2ecSJeff Kirsher struct gfar __iomem *regs = priv->gfargrp[0].regs;
1177ec21e2ecSJeff Kirsher u32 tempval;
1178ec21e2ecSJeff Kirsher int i = 0;
1179ec21e2ecSJeff Kirsher
1180c10650b6SClaudiu Manoil /* Enable Rx/Tx hw queues */
1181c10650b6SClaudiu Manoil gfar_write(®s->rqueue, priv->rqueue);
1182c10650b6SClaudiu Manoil gfar_write(®s->tqueue, priv->tqueue);
1183ec21e2ecSJeff Kirsher
1184ec21e2ecSJeff Kirsher /* Initialize DMACTRL to have WWR and WOP */
1185ec21e2ecSJeff Kirsher tempval = gfar_read(®s->dmactrl);
1186ec21e2ecSJeff Kirsher tempval |= DMACTRL_INIT_SETTINGS;
1187ec21e2ecSJeff Kirsher gfar_write(®s->dmactrl, tempval);
1188ec21e2ecSJeff Kirsher
1189ec21e2ecSJeff Kirsher /* Make sure we aren't stopped */
1190ec21e2ecSJeff Kirsher tempval = gfar_read(®s->dmactrl);
1191ec21e2ecSJeff Kirsher tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1192ec21e2ecSJeff Kirsher gfar_write(®s->dmactrl, tempval);
1193ec21e2ecSJeff Kirsher
1194ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_grps; i++) {
1195ec21e2ecSJeff Kirsher regs = priv->gfargrp[i].regs;
1196ec21e2ecSJeff Kirsher /* Clear THLT/RHLT, so that the DMA starts polling now */
1197ec21e2ecSJeff Kirsher gfar_write(®s->tstat, priv->gfargrp[i].tstat);
1198ec21e2ecSJeff Kirsher gfar_write(®s->rstat, priv->gfargrp[i].rstat);
1199ec21e2ecSJeff Kirsher }
1200ec21e2ecSJeff Kirsher
1201c10650b6SClaudiu Manoil /* Enable Rx/Tx DMA */
1202c10650b6SClaudiu Manoil tempval = gfar_read(®s->maccfg1);
1203c10650b6SClaudiu Manoil tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1204c10650b6SClaudiu Manoil gfar_write(®s->maccfg1, tempval);
1205c10650b6SClaudiu Manoil
1206efeddce7SClaudiu Manoil gfar_ints_enable(priv);
1207efeddce7SClaudiu Manoil
1208860e9538SFlorian Westphal netif_trans_update(priv->ndev); /* prevent tx timeout */
1209ec21e2ecSJeff Kirsher }
1210ec21e2ecSJeff Kirsher
gfar_new_page(struct gfar_priv_rx_q * rxq,struct gfar_rx_buff * rxb)12117d993c5fSArseny Solokha static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
121280ec396cSClaudiu Manoil {
12137d993c5fSArseny Solokha struct page *page;
12147d993c5fSArseny Solokha dma_addr_t addr;
12157d993c5fSArseny Solokha
12167d993c5fSArseny Solokha page = dev_alloc_page();
12177d993c5fSArseny Solokha if (unlikely(!page))
12187d993c5fSArseny Solokha return false;
12197d993c5fSArseny Solokha
12207d993c5fSArseny Solokha addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
12217d993c5fSArseny Solokha if (unlikely(dma_mapping_error(rxq->dev, addr))) {
12227d993c5fSArseny Solokha __free_page(page);
12237d993c5fSArseny Solokha
12247d993c5fSArseny Solokha return false;
122580ec396cSClaudiu Manoil }
122680ec396cSClaudiu Manoil
12277d993c5fSArseny Solokha rxb->dma = addr;
12287d993c5fSArseny Solokha rxb->page = page;
12297d993c5fSArseny Solokha rxb->page_offset = 0;
12307d993c5fSArseny Solokha
12317d993c5fSArseny Solokha return true;
12327d993c5fSArseny Solokha }
12337d993c5fSArseny Solokha
gfar_rx_alloc_err(struct gfar_priv_rx_q * rx_queue)12347d993c5fSArseny Solokha static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
1235ec21e2ecSJeff Kirsher {
12367d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(rx_queue->ndev);
12377d993c5fSArseny Solokha struct gfar_extra_stats *estats = &priv->extra_stats;
1238ec21e2ecSJeff Kirsher
12397d993c5fSArseny Solokha netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
12407d993c5fSArseny Solokha atomic64_inc(&estats->rx_alloc_err);
1241ec21e2ecSJeff Kirsher }
1242ec21e2ecSJeff Kirsher
gfar_alloc_rx_buffs(struct gfar_priv_rx_q * rx_queue,int alloc_cnt)12437d993c5fSArseny Solokha static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
12447d993c5fSArseny Solokha int alloc_cnt)
124580ec396cSClaudiu Manoil {
12467d993c5fSArseny Solokha struct rxbd8 *bdp;
12477d993c5fSArseny Solokha struct gfar_rx_buff *rxb;
124880ec396cSClaudiu Manoil int i;
124980ec396cSClaudiu Manoil
12507d993c5fSArseny Solokha i = rx_queue->next_to_use;
12517d993c5fSArseny Solokha bdp = &rx_queue->rx_bd_base[i];
12527d993c5fSArseny Solokha rxb = &rx_queue->rx_buff[i];
12537d993c5fSArseny Solokha
12547d993c5fSArseny Solokha while (alloc_cnt--) {
12557d993c5fSArseny Solokha /* try reuse page */
12567d993c5fSArseny Solokha if (unlikely(!rxb->page)) {
12577d993c5fSArseny Solokha if (unlikely(!gfar_new_page(rx_queue, rxb))) {
12587d993c5fSArseny Solokha gfar_rx_alloc_err(rx_queue);
12597d993c5fSArseny Solokha break;
126080ec396cSClaudiu Manoil }
126180ec396cSClaudiu Manoil }
126280ec396cSClaudiu Manoil
12637d993c5fSArseny Solokha /* Setup the new RxBD */
12647d993c5fSArseny Solokha gfar_init_rxbdp(rx_queue, bdp,
12657d993c5fSArseny Solokha rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
12667d993c5fSArseny Solokha
12677d993c5fSArseny Solokha /* Update to the next pointer */
12687d993c5fSArseny Solokha bdp++;
12697d993c5fSArseny Solokha rxb++;
12707d993c5fSArseny Solokha
12717d993c5fSArseny Solokha if (unlikely(++i == rx_queue->rx_ring_size)) {
12727d993c5fSArseny Solokha i = 0;
12737d993c5fSArseny Solokha bdp = rx_queue->rx_bd_base;
12747d993c5fSArseny Solokha rxb = rx_queue->rx_buff;
12757d993c5fSArseny Solokha }
12767d993c5fSArseny Solokha }
12777d993c5fSArseny Solokha
12787d993c5fSArseny Solokha rx_queue->next_to_use = i;
12797d993c5fSArseny Solokha rx_queue->next_to_alloc = i;
12807d993c5fSArseny Solokha }
12817d993c5fSArseny Solokha
gfar_init_bds(struct net_device * ndev)12827d993c5fSArseny Solokha static void gfar_init_bds(struct net_device *ndev)
128380ec396cSClaudiu Manoil {
12847d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(ndev);
12857d993c5fSArseny Solokha struct gfar __iomem *regs = priv->gfargrp[0].regs;
12867d993c5fSArseny Solokha struct gfar_priv_tx_q *tx_queue = NULL;
12877d993c5fSArseny Solokha struct gfar_priv_rx_q *rx_queue = NULL;
12887d993c5fSArseny Solokha struct txbd8 *txbdp;
12897d993c5fSArseny Solokha u32 __iomem *rfbptr;
12907d993c5fSArseny Solokha int i, j;
129180ec396cSClaudiu Manoil
12927d993c5fSArseny Solokha for (i = 0; i < priv->num_tx_queues; i++) {
12937d993c5fSArseny Solokha tx_queue = priv->tx_queue[i];
12947d993c5fSArseny Solokha /* Initialize some variables in our dev structure */
12957d993c5fSArseny Solokha tx_queue->num_txbdfree = tx_queue->tx_ring_size;
12967d993c5fSArseny Solokha tx_queue->dirty_tx = tx_queue->tx_bd_base;
12977d993c5fSArseny Solokha tx_queue->cur_tx = tx_queue->tx_bd_base;
12987d993c5fSArseny Solokha tx_queue->skb_curtx = 0;
12997d993c5fSArseny Solokha tx_queue->skb_dirtytx = 0;
13007d993c5fSArseny Solokha
13017d993c5fSArseny Solokha /* Initialize Transmit Descriptor Ring */
13027d993c5fSArseny Solokha txbdp = tx_queue->tx_bd_base;
13037d993c5fSArseny Solokha for (j = 0; j < tx_queue->tx_ring_size; j++) {
13047d993c5fSArseny Solokha txbdp->lstatus = 0;
13057d993c5fSArseny Solokha txbdp->bufPtr = 0;
13067d993c5fSArseny Solokha txbdp++;
13077d993c5fSArseny Solokha }
13087d993c5fSArseny Solokha
13097d993c5fSArseny Solokha /* Set the last descriptor in the ring to indicate wrap */
13107d993c5fSArseny Solokha txbdp--;
13117d993c5fSArseny Solokha txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
13127d993c5fSArseny Solokha TXBD_WRAP);
13137d993c5fSArseny Solokha }
13147d993c5fSArseny Solokha
13157d993c5fSArseny Solokha rfbptr = ®s->rfbptr0;
13167d993c5fSArseny Solokha for (i = 0; i < priv->num_rx_queues; i++) {
13177d993c5fSArseny Solokha rx_queue = priv->rx_queue[i];
13187d993c5fSArseny Solokha
13197d993c5fSArseny Solokha rx_queue->next_to_clean = 0;
13207d993c5fSArseny Solokha rx_queue->next_to_use = 0;
13217d993c5fSArseny Solokha rx_queue->next_to_alloc = 0;
13227d993c5fSArseny Solokha
13237d993c5fSArseny Solokha /* make sure next_to_clean != next_to_use after this
13247d993c5fSArseny Solokha * by leaving at least 1 unused descriptor
13257d993c5fSArseny Solokha */
13267d993c5fSArseny Solokha gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
13277d993c5fSArseny Solokha
13287d993c5fSArseny Solokha rx_queue->rfbptr = rfbptr;
13297d993c5fSArseny Solokha rfbptr += 2;
133080ec396cSClaudiu Manoil }
133180ec396cSClaudiu Manoil }
133280ec396cSClaudiu Manoil
gfar_alloc_skb_resources(struct net_device * ndev)13337d993c5fSArseny Solokha static int gfar_alloc_skb_resources(struct net_device *ndev)
13347d993c5fSArseny Solokha {
13357d993c5fSArseny Solokha void *vaddr;
13367d993c5fSArseny Solokha dma_addr_t addr;
13377d993c5fSArseny Solokha int i, j;
13387d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(ndev);
13397d993c5fSArseny Solokha struct device *dev = priv->dev;
13407d993c5fSArseny Solokha struct gfar_priv_tx_q *tx_queue = NULL;
13417d993c5fSArseny Solokha struct gfar_priv_rx_q *rx_queue = NULL;
13427d993c5fSArseny Solokha
13437d993c5fSArseny Solokha priv->total_tx_ring_size = 0;
13447d993c5fSArseny Solokha for (i = 0; i < priv->num_tx_queues; i++)
13457d993c5fSArseny Solokha priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
13467d993c5fSArseny Solokha
13477d993c5fSArseny Solokha priv->total_rx_ring_size = 0;
13487d993c5fSArseny Solokha for (i = 0; i < priv->num_rx_queues; i++)
13497d993c5fSArseny Solokha priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
13507d993c5fSArseny Solokha
13517d993c5fSArseny Solokha /* Allocate memory for the buffer descriptors */
13527d993c5fSArseny Solokha vaddr = dma_alloc_coherent(dev,
13537d993c5fSArseny Solokha (priv->total_tx_ring_size *
13547d993c5fSArseny Solokha sizeof(struct txbd8)) +
13557d993c5fSArseny Solokha (priv->total_rx_ring_size *
13567d993c5fSArseny Solokha sizeof(struct rxbd8)),
13577d993c5fSArseny Solokha &addr, GFP_KERNEL);
13587d993c5fSArseny Solokha if (!vaddr)
13597d993c5fSArseny Solokha return -ENOMEM;
13607d993c5fSArseny Solokha
13617d993c5fSArseny Solokha for (i = 0; i < priv->num_tx_queues; i++) {
13627d993c5fSArseny Solokha tx_queue = priv->tx_queue[i];
13637d993c5fSArseny Solokha tx_queue->tx_bd_base = vaddr;
13647d993c5fSArseny Solokha tx_queue->tx_bd_dma_base = addr;
13657d993c5fSArseny Solokha tx_queue->dev = ndev;
13667d993c5fSArseny Solokha /* enet DMA only understands physical addresses */
13677d993c5fSArseny Solokha addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
13687d993c5fSArseny Solokha vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
13697d993c5fSArseny Solokha }
13707d993c5fSArseny Solokha
13717d993c5fSArseny Solokha /* Start the rx descriptor ring where the tx ring leaves off */
13727d993c5fSArseny Solokha for (i = 0; i < priv->num_rx_queues; i++) {
13737d993c5fSArseny Solokha rx_queue = priv->rx_queue[i];
13747d993c5fSArseny Solokha rx_queue->rx_bd_base = vaddr;
13757d993c5fSArseny Solokha rx_queue->rx_bd_dma_base = addr;
13767d993c5fSArseny Solokha rx_queue->ndev = ndev;
13777d993c5fSArseny Solokha rx_queue->dev = dev;
13787d993c5fSArseny Solokha addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
13797d993c5fSArseny Solokha vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
13807d993c5fSArseny Solokha }
13817d993c5fSArseny Solokha
13827d993c5fSArseny Solokha /* Setup the skbuff rings */
13837d993c5fSArseny Solokha for (i = 0; i < priv->num_tx_queues; i++) {
13847d993c5fSArseny Solokha tx_queue = priv->tx_queue[i];
13857d993c5fSArseny Solokha tx_queue->tx_skbuff =
13867d993c5fSArseny Solokha kmalloc_array(tx_queue->tx_ring_size,
13877d993c5fSArseny Solokha sizeof(*tx_queue->tx_skbuff),
13887d993c5fSArseny Solokha GFP_KERNEL);
13897d993c5fSArseny Solokha if (!tx_queue->tx_skbuff)
13907d993c5fSArseny Solokha goto cleanup;
13917d993c5fSArseny Solokha
13927d993c5fSArseny Solokha for (j = 0; j < tx_queue->tx_ring_size; j++)
13937d993c5fSArseny Solokha tx_queue->tx_skbuff[j] = NULL;
13947d993c5fSArseny Solokha }
13957d993c5fSArseny Solokha
13967d993c5fSArseny Solokha for (i = 0; i < priv->num_rx_queues; i++) {
13977d993c5fSArseny Solokha rx_queue = priv->rx_queue[i];
13987d993c5fSArseny Solokha rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
13997d993c5fSArseny Solokha sizeof(*rx_queue->rx_buff),
14007d993c5fSArseny Solokha GFP_KERNEL);
14017d993c5fSArseny Solokha if (!rx_queue->rx_buff)
14027d993c5fSArseny Solokha goto cleanup;
14037d993c5fSArseny Solokha }
14047d993c5fSArseny Solokha
14057d993c5fSArseny Solokha gfar_init_bds(ndev);
14067d993c5fSArseny Solokha
140780ec396cSClaudiu Manoil return 0;
14087d993c5fSArseny Solokha
14097d993c5fSArseny Solokha cleanup:
14107d993c5fSArseny Solokha free_skb_resources(priv);
14117d993c5fSArseny Solokha return -ENOMEM;
141280ec396cSClaudiu Manoil }
141380ec396cSClaudiu Manoil
1414ec21e2ecSJeff Kirsher /* Bring the controller up and running */
startup_gfar(struct net_device * ndev)1415ec21e2ecSJeff Kirsher int startup_gfar(struct net_device *ndev)
1416ec21e2ecSJeff Kirsher {
1417ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(ndev);
141880ec396cSClaudiu Manoil int err;
1419ec21e2ecSJeff Kirsher
1420a328ac92SClaudiu Manoil gfar_mac_reset(priv);
1421ec21e2ecSJeff Kirsher
1422ec21e2ecSJeff Kirsher err = gfar_alloc_skb_resources(ndev);
1423ec21e2ecSJeff Kirsher if (err)
1424ec21e2ecSJeff Kirsher return err;
1425ec21e2ecSJeff Kirsher
1426a328ac92SClaudiu Manoil gfar_init_tx_rx_base(priv);
1427ec21e2ecSJeff Kirsher
14284e857c58SPeter Zijlstra smp_mb__before_atomic();
14290851133bSClaudiu Manoil clear_bit(GFAR_DOWN, &priv->state);
14304e857c58SPeter Zijlstra smp_mb__after_atomic();
14310851133bSClaudiu Manoil
14320851133bSClaudiu Manoil /* Start Rx/Tx DMA and enable the interrupts */
1433c10650b6SClaudiu Manoil gfar_start(priv);
1434ec21e2ecSJeff Kirsher
14352a4eebf0SClaudiu Manoil /* force link state update after mac reset */
14362a4eebf0SClaudiu Manoil priv->oldlink = 0;
14372a4eebf0SClaudiu Manoil priv->oldspeed = 0;
14382a4eebf0SClaudiu Manoil priv->oldduplex = -1;
14392a4eebf0SClaudiu Manoil
14404c4a6b0eSPhilippe Reynes phy_start(ndev->phydev);
1441ec21e2ecSJeff Kirsher
14420851133bSClaudiu Manoil enable_napi(priv);
14430851133bSClaudiu Manoil
14440851133bSClaudiu Manoil netif_tx_wake_all_queues(ndev);
14450851133bSClaudiu Manoil
1446ec21e2ecSJeff Kirsher return 0;
1447ec21e2ecSJeff Kirsher }
1448ec21e2ecSJeff Kirsher
gfar_get_flowctrl_cfg(struct gfar_private * priv)14497d993c5fSArseny Solokha static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
14507d993c5fSArseny Solokha {
14517d993c5fSArseny Solokha struct net_device *ndev = priv->ndev;
14527d993c5fSArseny Solokha struct phy_device *phydev = ndev->phydev;
14537d993c5fSArseny Solokha u32 val = 0;
14547d993c5fSArseny Solokha
14557d993c5fSArseny Solokha if (!phydev->duplex)
14567d993c5fSArseny Solokha return val;
14577d993c5fSArseny Solokha
14587d993c5fSArseny Solokha if (!priv->pause_aneg_en) {
14597d993c5fSArseny Solokha if (priv->tx_pause_en)
14607d993c5fSArseny Solokha val |= MACCFG1_TX_FLOW;
14617d993c5fSArseny Solokha if (priv->rx_pause_en)
14627d993c5fSArseny Solokha val |= MACCFG1_RX_FLOW;
14637d993c5fSArseny Solokha } else {
14647d993c5fSArseny Solokha u16 lcl_adv, rmt_adv;
14657d993c5fSArseny Solokha u8 flowctrl;
14667d993c5fSArseny Solokha /* get link partner capabilities */
14677d993c5fSArseny Solokha rmt_adv = 0;
14687d993c5fSArseny Solokha if (phydev->pause)
14697d993c5fSArseny Solokha rmt_adv = LPA_PAUSE_CAP;
14707d993c5fSArseny Solokha if (phydev->asym_pause)
14717d993c5fSArseny Solokha rmt_adv |= LPA_PAUSE_ASYM;
14727d993c5fSArseny Solokha
14737d993c5fSArseny Solokha lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
14747d993c5fSArseny Solokha flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
14757d993c5fSArseny Solokha if (flowctrl & FLOW_CTRL_TX)
14767d993c5fSArseny Solokha val |= MACCFG1_TX_FLOW;
14777d993c5fSArseny Solokha if (flowctrl & FLOW_CTRL_RX)
14787d993c5fSArseny Solokha val |= MACCFG1_RX_FLOW;
14797d993c5fSArseny Solokha }
14807d993c5fSArseny Solokha
14817d993c5fSArseny Solokha return val;
14827d993c5fSArseny Solokha }
14837d993c5fSArseny Solokha
gfar_update_link_state(struct gfar_private * priv)14847d993c5fSArseny Solokha static noinline void gfar_update_link_state(struct gfar_private *priv)
14857d993c5fSArseny Solokha {
14867d993c5fSArseny Solokha struct gfar __iomem *regs = priv->gfargrp[0].regs;
14877d993c5fSArseny Solokha struct net_device *ndev = priv->ndev;
14887d993c5fSArseny Solokha struct phy_device *phydev = ndev->phydev;
14897d993c5fSArseny Solokha struct gfar_priv_rx_q *rx_queue = NULL;
14907d993c5fSArseny Solokha int i;
14917d993c5fSArseny Solokha
14927d993c5fSArseny Solokha if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
14937d993c5fSArseny Solokha return;
14947d993c5fSArseny Solokha
14957d993c5fSArseny Solokha if (phydev->link) {
14967d993c5fSArseny Solokha u32 tempval1 = gfar_read(®s->maccfg1);
14977d993c5fSArseny Solokha u32 tempval = gfar_read(®s->maccfg2);
14987d993c5fSArseny Solokha u32 ecntrl = gfar_read(®s->ecntrl);
14997d993c5fSArseny Solokha u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
15007d993c5fSArseny Solokha
15017d993c5fSArseny Solokha if (phydev->duplex != priv->oldduplex) {
15027d993c5fSArseny Solokha if (!(phydev->duplex))
15037d993c5fSArseny Solokha tempval &= ~(MACCFG2_FULL_DUPLEX);
15047d993c5fSArseny Solokha else
15057d993c5fSArseny Solokha tempval |= MACCFG2_FULL_DUPLEX;
15067d993c5fSArseny Solokha
15077d993c5fSArseny Solokha priv->oldduplex = phydev->duplex;
15087d993c5fSArseny Solokha }
15097d993c5fSArseny Solokha
15107d993c5fSArseny Solokha if (phydev->speed != priv->oldspeed) {
15117d993c5fSArseny Solokha switch (phydev->speed) {
15127d993c5fSArseny Solokha case 1000:
15137d993c5fSArseny Solokha tempval =
15147d993c5fSArseny Solokha ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
15157d993c5fSArseny Solokha
15167d993c5fSArseny Solokha ecntrl &= ~(ECNTRL_R100);
15177d993c5fSArseny Solokha break;
15187d993c5fSArseny Solokha case 100:
15197d993c5fSArseny Solokha case 10:
15207d993c5fSArseny Solokha tempval =
15217d993c5fSArseny Solokha ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
15227d993c5fSArseny Solokha
15237d993c5fSArseny Solokha /* Reduced mode distinguishes
15247d993c5fSArseny Solokha * between 10 and 100
15250977f817SJan Ceuleers */
15267d993c5fSArseny Solokha if (phydev->speed == SPEED_100)
15277d993c5fSArseny Solokha ecntrl |= ECNTRL_R100;
15287d993c5fSArseny Solokha else
15297d993c5fSArseny Solokha ecntrl &= ~(ECNTRL_R100);
15307d993c5fSArseny Solokha break;
15317d993c5fSArseny Solokha default:
15327d993c5fSArseny Solokha netif_warn(priv, link, priv->ndev,
15337d993c5fSArseny Solokha "Ack! Speed (%d) is not 10/100/1000!\n",
15347d993c5fSArseny Solokha phydev->speed);
15357d993c5fSArseny Solokha break;
15367d993c5fSArseny Solokha }
15377d993c5fSArseny Solokha
15387d993c5fSArseny Solokha priv->oldspeed = phydev->speed;
15397d993c5fSArseny Solokha }
15407d993c5fSArseny Solokha
15417d993c5fSArseny Solokha tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
15427d993c5fSArseny Solokha tempval1 |= gfar_get_flowctrl_cfg(priv);
15437d993c5fSArseny Solokha
15447d993c5fSArseny Solokha /* Turn last free buffer recording on */
15457d993c5fSArseny Solokha if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
15467d993c5fSArseny Solokha for (i = 0; i < priv->num_rx_queues; i++) {
15477d993c5fSArseny Solokha u32 bdp_dma;
15487d993c5fSArseny Solokha
15497d993c5fSArseny Solokha rx_queue = priv->rx_queue[i];
15507d993c5fSArseny Solokha bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
15517d993c5fSArseny Solokha gfar_write(rx_queue->rfbptr, bdp_dma);
15527d993c5fSArseny Solokha }
15537d993c5fSArseny Solokha
15547d993c5fSArseny Solokha priv->tx_actual_en = 1;
15557d993c5fSArseny Solokha }
15567d993c5fSArseny Solokha
15577d993c5fSArseny Solokha if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
15587d993c5fSArseny Solokha priv->tx_actual_en = 0;
15597d993c5fSArseny Solokha
15607d993c5fSArseny Solokha gfar_write(®s->maccfg1, tempval1);
15617d993c5fSArseny Solokha gfar_write(®s->maccfg2, tempval);
15627d993c5fSArseny Solokha gfar_write(®s->ecntrl, ecntrl);
15637d993c5fSArseny Solokha
15647d993c5fSArseny Solokha if (!priv->oldlink)
15657d993c5fSArseny Solokha priv->oldlink = 1;
15667d993c5fSArseny Solokha
15677d993c5fSArseny Solokha } else if (priv->oldlink) {
15687d993c5fSArseny Solokha priv->oldlink = 0;
15697d993c5fSArseny Solokha priv->oldspeed = 0;
15707d993c5fSArseny Solokha priv->oldduplex = -1;
15717d993c5fSArseny Solokha }
15727d993c5fSArseny Solokha
15737d993c5fSArseny Solokha if (netif_msg_link(priv))
15747d993c5fSArseny Solokha phy_print_status(phydev);
15757d993c5fSArseny Solokha }
15767d993c5fSArseny Solokha
15777d993c5fSArseny Solokha /* Called every time the controller might need to be made
15787d993c5fSArseny Solokha * aware of new link state. The PHY code conveys this
15797d993c5fSArseny Solokha * information through variables in the phydev structure, and this
15807d993c5fSArseny Solokha * function converts those variables into the appropriate
15817d993c5fSArseny Solokha * register values, and can bring down the device if needed.
15827d993c5fSArseny Solokha */
adjust_link(struct net_device * dev)15837d993c5fSArseny Solokha static void adjust_link(struct net_device *dev)
1584ec21e2ecSJeff Kirsher {
1585ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(dev);
15867d993c5fSArseny Solokha struct phy_device *phydev = dev->phydev;
1587ec21e2ecSJeff Kirsher
15887d993c5fSArseny Solokha if (unlikely(phydev->link != priv->oldlink ||
15897d993c5fSArseny Solokha (phydev->link && (phydev->duplex != priv->oldduplex ||
15907d993c5fSArseny Solokha phydev->speed != priv->oldspeed))))
15917d993c5fSArseny Solokha gfar_update_link_state(priv);
15927d993c5fSArseny Solokha }
1593ec21e2ecSJeff Kirsher
15947d993c5fSArseny Solokha /* Initialize TBI PHY interface for communicating with the
15957d993c5fSArseny Solokha * SERDES lynx PHY on the chip. We communicate with this PHY
15967d993c5fSArseny Solokha * through the MDIO bus on each controller, treating it as a
15977d993c5fSArseny Solokha * "normal" PHY at the address found in the TBIPA register. We assume
15987d993c5fSArseny Solokha * that the TBIPA register is valid. Either the MDIO bus code will set
15997d993c5fSArseny Solokha * it to a value that doesn't conflict with other PHYs on the bus, or the
16007d993c5fSArseny Solokha * value doesn't matter, as there are no other PHYs on the bus.
16017d993c5fSArseny Solokha */
gfar_configure_serdes(struct net_device * dev)16027d993c5fSArseny Solokha static void gfar_configure_serdes(struct net_device *dev)
16037d993c5fSArseny Solokha {
16047d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(dev);
16057d993c5fSArseny Solokha struct phy_device *tbiphy;
160680ec396cSClaudiu Manoil
16077d993c5fSArseny Solokha if (!priv->tbi_node) {
16087d993c5fSArseny Solokha dev_warn(&dev->dev, "error: SGMII mode requires that the "
16097d993c5fSArseny Solokha "device tree specify a tbi-handle\n");
16107d993c5fSArseny Solokha return;
16117d993c5fSArseny Solokha }
1612ec21e2ecSJeff Kirsher
16137d993c5fSArseny Solokha tbiphy = of_phy_find_device(priv->tbi_node);
16147d993c5fSArseny Solokha if (!tbiphy) {
16157d993c5fSArseny Solokha dev_err(&dev->dev, "error: Could not get TBI device\n");
16167d993c5fSArseny Solokha return;
16177d993c5fSArseny Solokha }
16187d993c5fSArseny Solokha
16197d993c5fSArseny Solokha /* If the link is already up, we must already be ok, and don't need to
16207d993c5fSArseny Solokha * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
16217d993c5fSArseny Solokha * everything for us? Resetting it takes the link down and requires
16227d993c5fSArseny Solokha * several seconds for it to come back.
16237d993c5fSArseny Solokha */
16247d993c5fSArseny Solokha if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
16257d993c5fSArseny Solokha put_device(&tbiphy->mdio.dev);
16267d993c5fSArseny Solokha return;
16277d993c5fSArseny Solokha }
16287d993c5fSArseny Solokha
16297d993c5fSArseny Solokha /* Single clk mode, mii mode off(for serdes communication) */
16307d993c5fSArseny Solokha phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
16317d993c5fSArseny Solokha
16327d993c5fSArseny Solokha phy_write(tbiphy, MII_ADVERTISE,
16337d993c5fSArseny Solokha ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
16347d993c5fSArseny Solokha ADVERTISE_1000XPSE_ASYM);
16357d993c5fSArseny Solokha
16367d993c5fSArseny Solokha phy_write(tbiphy, MII_BMCR,
16377d993c5fSArseny Solokha BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
16387d993c5fSArseny Solokha BMCR_SPEED1000);
16397d993c5fSArseny Solokha
16407d993c5fSArseny Solokha put_device(&tbiphy->mdio.dev);
16417d993c5fSArseny Solokha }
16427d993c5fSArseny Solokha
16437d993c5fSArseny Solokha /* Initializes driver's PHY state, and attaches to the PHY.
16447d993c5fSArseny Solokha * Returns 0 on success.
16457d993c5fSArseny Solokha */
init_phy(struct net_device * dev)16467d993c5fSArseny Solokha static int init_phy(struct net_device *dev)
16477d993c5fSArseny Solokha {
16487d993c5fSArseny Solokha __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
16497d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(dev);
16508e578e73SArseny Solokha phy_interface_t interface = priv->interface;
16517d993c5fSArseny Solokha struct phy_device *phydev;
16527d993c5fSArseny Solokha struct ethtool_eee edata;
16537d993c5fSArseny Solokha
16547d993c5fSArseny Solokha linkmode_set_bit_array(phy_10_100_features_array,
16557d993c5fSArseny Solokha ARRAY_SIZE(phy_10_100_features_array),
16567d993c5fSArseny Solokha mask);
16577d993c5fSArseny Solokha linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
16587d993c5fSArseny Solokha linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
16597d993c5fSArseny Solokha if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
16607d993c5fSArseny Solokha linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
16617d993c5fSArseny Solokha
16627d993c5fSArseny Solokha priv->oldlink = 0;
16637d993c5fSArseny Solokha priv->oldspeed = 0;
16647d993c5fSArseny Solokha priv->oldduplex = -1;
16657d993c5fSArseny Solokha
16667d993c5fSArseny Solokha phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
16677d993c5fSArseny Solokha interface);
16687d993c5fSArseny Solokha if (!phydev) {
16697d993c5fSArseny Solokha dev_err(&dev->dev, "could not attach to PHY\n");
16707d993c5fSArseny Solokha return -ENODEV;
16717d993c5fSArseny Solokha }
16727d993c5fSArseny Solokha
16737d993c5fSArseny Solokha if (interface == PHY_INTERFACE_MODE_SGMII)
16747d993c5fSArseny Solokha gfar_configure_serdes(dev);
16757d993c5fSArseny Solokha
16767d993c5fSArseny Solokha /* Remove any features not supported by the controller */
16777d993c5fSArseny Solokha linkmode_and(phydev->supported, phydev->supported, mask);
16787d993c5fSArseny Solokha linkmode_copy(phydev->advertising, phydev->supported);
16797d993c5fSArseny Solokha
16807d993c5fSArseny Solokha /* Add support for flow control */
16817d993c5fSArseny Solokha phy_support_asym_pause(phydev);
16827d993c5fSArseny Solokha
16837d993c5fSArseny Solokha /* disable EEE autoneg, EEE not supported by eTSEC */
16847d993c5fSArseny Solokha memset(&edata, 0, sizeof(struct ethtool_eee));
16857d993c5fSArseny Solokha phy_ethtool_set_eee(phydev, &edata);
16867d993c5fSArseny Solokha
16877d993c5fSArseny Solokha return 0;
1688ec21e2ecSJeff Kirsher }
1689ec21e2ecSJeff Kirsher
gfar_add_fcb(struct sk_buff * skb)1690ec21e2ecSJeff Kirsher static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1691ec21e2ecSJeff Kirsher {
1692d58ff351SJohannes Berg struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
1693ec21e2ecSJeff Kirsher
1694ec21e2ecSJeff Kirsher memset(fcb, 0, GMAC_FCB_LEN);
1695ec21e2ecSJeff Kirsher
1696ec21e2ecSJeff Kirsher return fcb;
1697ec21e2ecSJeff Kirsher }
1698ec21e2ecSJeff Kirsher
gfar_tx_checksum(struct sk_buff * skb,struct txfcb * fcb,int fcb_length)16999c4886e5SManfred Rudigier static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
17009c4886e5SManfred Rudigier int fcb_length)
1701ec21e2ecSJeff Kirsher {
1702ec21e2ecSJeff Kirsher /* If we're here, it's a IP packet with a TCP or UDP
1703ec21e2ecSJeff Kirsher * payload. We set it to checksum, using a pseudo-header
1704ec21e2ecSJeff Kirsher * we provide
1705ec21e2ecSJeff Kirsher */
17063a2e16c8SJan Ceuleers u8 flags = TXFCB_DEFAULT;
1707ec21e2ecSJeff Kirsher
17080977f817SJan Ceuleers /* Tell the controller what the protocol is
17090977f817SJan Ceuleers * And provide the already calculated phcs
17100977f817SJan Ceuleers */
1711ec21e2ecSJeff Kirsher if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1712ec21e2ecSJeff Kirsher flags |= TXFCB_UDP;
171326eb9374SClaudiu Manoil fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
1714ec21e2ecSJeff Kirsher } else
171526eb9374SClaudiu Manoil fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
1716ec21e2ecSJeff Kirsher
1717ec21e2ecSJeff Kirsher /* l3os is the distance between the start of the
1718ec21e2ecSJeff Kirsher * frame (skb->data) and the start of the IP hdr.
1719ec21e2ecSJeff Kirsher * l4os is the distance between the start of the
17200977f817SJan Ceuleers * l3 hdr and the l4 hdr
17210977f817SJan Ceuleers */
172226eb9374SClaudiu Manoil fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
1723ec21e2ecSJeff Kirsher fcb->l4os = skb_network_header_len(skb);
1724ec21e2ecSJeff Kirsher
1725ec21e2ecSJeff Kirsher fcb->flags = flags;
1726ec21e2ecSJeff Kirsher }
1727ec21e2ecSJeff Kirsher
gfar_tx_vlan(struct sk_buff * skb,struct txfcb * fcb)1728278af574SArnd Bergmann static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1729ec21e2ecSJeff Kirsher {
1730ec21e2ecSJeff Kirsher fcb->flags |= TXFCB_VLN;
173126eb9374SClaudiu Manoil fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
1732ec21e2ecSJeff Kirsher }
1733ec21e2ecSJeff Kirsher
skip_txbd(struct txbd8 * bdp,int stride,struct txbd8 * base,int ring_size)1734ec21e2ecSJeff Kirsher static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1735ec21e2ecSJeff Kirsher struct txbd8 *base, int ring_size)
1736ec21e2ecSJeff Kirsher {
1737ec21e2ecSJeff Kirsher struct txbd8 *new_bd = bdp + stride;
1738ec21e2ecSJeff Kirsher
1739ec21e2ecSJeff Kirsher return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1740ec21e2ecSJeff Kirsher }
1741ec21e2ecSJeff Kirsher
next_txbd(struct txbd8 * bdp,struct txbd8 * base,int ring_size)1742ec21e2ecSJeff Kirsher static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1743ec21e2ecSJeff Kirsher int ring_size)
1744ec21e2ecSJeff Kirsher {
1745ec21e2ecSJeff Kirsher return skip_txbd(bdp, 1, base, ring_size);
1746ec21e2ecSJeff Kirsher }
1747ec21e2ecSJeff Kirsher
174802d88fb4SClaudiu Manoil /* eTSEC12: csum generation not supported for some fcb offsets */
gfar_csum_errata_12(struct gfar_private * priv,unsigned long fcb_addr)174902d88fb4SClaudiu Manoil static inline bool gfar_csum_errata_12(struct gfar_private *priv,
175002d88fb4SClaudiu Manoil unsigned long fcb_addr)
175102d88fb4SClaudiu Manoil {
175202d88fb4SClaudiu Manoil return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
175302d88fb4SClaudiu Manoil (fcb_addr % 0x20) > 0x18);
175402d88fb4SClaudiu Manoil }
175502d88fb4SClaudiu Manoil
175602d88fb4SClaudiu Manoil /* eTSEC76: csum generation for frames larger than 2500 may
175702d88fb4SClaudiu Manoil * cause excess delays before start of transmission
175802d88fb4SClaudiu Manoil */
gfar_csum_errata_76(struct gfar_private * priv,unsigned int len)175902d88fb4SClaudiu Manoil static inline bool gfar_csum_errata_76(struct gfar_private *priv,
176002d88fb4SClaudiu Manoil unsigned int len)
176102d88fb4SClaudiu Manoil {
176202d88fb4SClaudiu Manoil return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
176302d88fb4SClaudiu Manoil (len > 2500));
176402d88fb4SClaudiu Manoil }
176502d88fb4SClaudiu Manoil
17660977f817SJan Ceuleers /* This is called by the kernel when a frame is ready for transmission.
17670977f817SJan Ceuleers * It is pointed to by the dev->hard_start_xmit function pointer
17680977f817SJan Ceuleers */
gfar_start_xmit(struct sk_buff * skb,struct net_device * dev)176906983aa5SYueHaibing static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1770ec21e2ecSJeff Kirsher {
1771ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(dev);
1772ec21e2ecSJeff Kirsher struct gfar_priv_tx_q *tx_queue = NULL;
1773ec21e2ecSJeff Kirsher struct netdev_queue *txq;
1774ec21e2ecSJeff Kirsher struct gfar __iomem *regs = NULL;
1775ec21e2ecSJeff Kirsher struct txfcb *fcb = NULL;
1776ec21e2ecSJeff Kirsher struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
1777ec21e2ecSJeff Kirsher u32 lstatus;
177842f397adSClaudiu Manoil skb_frag_t *frag;
17790d0cffdcSClaudiu Manoil int i, rq = 0;
17800d0cffdcSClaudiu Manoil int do_tstamp, do_csum, do_vlan;
1781ec21e2ecSJeff Kirsher u32 bufaddr;
178250ad076bSClaudiu Manoil unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
1783ec21e2ecSJeff Kirsher
1784ec21e2ecSJeff Kirsher rq = skb->queue_mapping;
1785ec21e2ecSJeff Kirsher tx_queue = priv->tx_queue[rq];
1786ec21e2ecSJeff Kirsher txq = netdev_get_tx_queue(dev, rq);
1787ec21e2ecSJeff Kirsher base = tx_queue->tx_bd_base;
1788ec21e2ecSJeff Kirsher regs = tx_queue->grp->regs;
1789ec21e2ecSJeff Kirsher
17900d0cffdcSClaudiu Manoil do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
1791df8a39deSJiri Pirko do_vlan = skb_vlan_tag_present(skb);
17920d0cffdcSClaudiu Manoil do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
17930d0cffdcSClaudiu Manoil priv->hwts_tx_en;
17940d0cffdcSClaudiu Manoil
17950d0cffdcSClaudiu Manoil if (do_csum || do_vlan)
17960d0cffdcSClaudiu Manoil fcb_len = GMAC_FCB_LEN;
17970d0cffdcSClaudiu Manoil
1798ec21e2ecSJeff Kirsher /* check if time stamp should be generated */
17990d0cffdcSClaudiu Manoil if (unlikely(do_tstamp))
18000d0cffdcSClaudiu Manoil fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
1801ec21e2ecSJeff Kirsher
1802ec21e2ecSJeff Kirsher /* make space for additional header when fcb is needed */
1803d145c903SClaudiu Manoil if (fcb_len) {
1804d145c903SClaudiu Manoil if (unlikely(skb_cow_head(skb, fcb_len))) {
1805ec21e2ecSJeff Kirsher dev->stats.tx_errors++;
1806c9974ad4SEric W. Biederman dev_kfree_skb_any(skb);
1807ec21e2ecSJeff Kirsher return NETDEV_TX_OK;
1808ec21e2ecSJeff Kirsher }
1809ec21e2ecSJeff Kirsher }
1810ec21e2ecSJeff Kirsher
1811ec21e2ecSJeff Kirsher /* total number of fragments in the SKB */
1812ec21e2ecSJeff Kirsher nr_frags = skb_shinfo(skb)->nr_frags;
1813ec21e2ecSJeff Kirsher
1814ec21e2ecSJeff Kirsher /* calculate the required number of TxBDs for this skb */
1815ec21e2ecSJeff Kirsher if (unlikely(do_tstamp))
1816ec21e2ecSJeff Kirsher nr_txbds = nr_frags + 2;
1817ec21e2ecSJeff Kirsher else
1818ec21e2ecSJeff Kirsher nr_txbds = nr_frags + 1;
1819ec21e2ecSJeff Kirsher
1820ec21e2ecSJeff Kirsher /* check if there is space to queue this packet */
1821ec21e2ecSJeff Kirsher if (nr_txbds > tx_queue->num_txbdfree) {
1822ec21e2ecSJeff Kirsher /* no space, stop the queue */
1823ec21e2ecSJeff Kirsher netif_tx_stop_queue(txq);
1824ec21e2ecSJeff Kirsher dev->stats.tx_fifo_errors++;
1825ec21e2ecSJeff Kirsher return NETDEV_TX_BUSY;
1826ec21e2ecSJeff Kirsher }
1827ec21e2ecSJeff Kirsher
1828ec21e2ecSJeff Kirsher /* Update transmit stats */
182950ad076bSClaudiu Manoil bytes_sent = skb->len;
183050ad076bSClaudiu Manoil tx_queue->stats.tx_bytes += bytes_sent;
183150ad076bSClaudiu Manoil /* keep Tx bytes on wire for BQL accounting */
183250ad076bSClaudiu Manoil GFAR_CB(skb)->bytes_sent = bytes_sent;
1833ec21e2ecSJeff Kirsher tx_queue->stats.tx_packets++;
1834ec21e2ecSJeff Kirsher
1835ec21e2ecSJeff Kirsher txbdp = txbdp_start = tx_queue->cur_tx;
1836a7312d58SClaudiu Manoil lstatus = be32_to_cpu(txbdp->lstatus);
1837ec21e2ecSJeff Kirsher
18389c4886e5SManfred Rudigier /* Add TxPAL between FCB and frame if required */
18399c4886e5SManfred Rudigier if (unlikely(do_tstamp)) {
18409c4886e5SManfred Rudigier skb_push(skb, GMAC_TXPAL_LEN);
18419c4886e5SManfred Rudigier memset(skb->data, 0, GMAC_TXPAL_LEN);
18429c4886e5SManfred Rudigier }
18439c4886e5SManfred Rudigier
18440d0cffdcSClaudiu Manoil /* Add TxFCB if required */
18450d0cffdcSClaudiu Manoil if (fcb_len) {
1846ec21e2ecSJeff Kirsher fcb = gfar_add_fcb(skb);
1847ec21e2ecSJeff Kirsher lstatus |= BD_LFLAG(TXBD_TOE);
18480d0cffdcSClaudiu Manoil }
18490d0cffdcSClaudiu Manoil
18500d0cffdcSClaudiu Manoil /* Set up checksumming */
18510d0cffdcSClaudiu Manoil if (do_csum) {
18520d0cffdcSClaudiu Manoil gfar_tx_checksum(skb, fcb, fcb_len);
185302d88fb4SClaudiu Manoil
185402d88fb4SClaudiu Manoil if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
185502d88fb4SClaudiu Manoil unlikely(gfar_csum_errata_76(priv, skb->len))) {
185602d88fb4SClaudiu Manoil __skb_pull(skb, GMAC_FCB_LEN);
185702d88fb4SClaudiu Manoil skb_checksum_help(skb);
18580d0cffdcSClaudiu Manoil if (do_vlan || do_tstamp) {
18590d0cffdcSClaudiu Manoil /* put back a new fcb for vlan/tstamp TOE */
18600d0cffdcSClaudiu Manoil fcb = gfar_add_fcb(skb);
18610d0cffdcSClaudiu Manoil } else {
18620d0cffdcSClaudiu Manoil /* Tx TOE not used */
186302d88fb4SClaudiu Manoil lstatus &= ~(BD_LFLAG(TXBD_TOE));
186402d88fb4SClaudiu Manoil fcb = NULL;
1865ec21e2ecSJeff Kirsher }
1866ec21e2ecSJeff Kirsher }
1867ec21e2ecSJeff Kirsher }
1868ec21e2ecSJeff Kirsher
18690d0cffdcSClaudiu Manoil if (do_vlan)
1870ec21e2ecSJeff Kirsher gfar_tx_vlan(skb, fcb);
1871ec21e2ecSJeff Kirsher
18720a4b5a24SKevin Hao bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
18730a4b5a24SKevin Hao DMA_TO_DEVICE);
18740a4b5a24SKevin Hao if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
18750a4b5a24SKevin Hao goto dma_map_err;
18760a4b5a24SKevin Hao
1877a7312d58SClaudiu Manoil txbdp_start->bufPtr = cpu_to_be32(bufaddr);
1878ec21e2ecSJeff Kirsher
1879e19d0839SClaudiu Manoil /* Time stamp insertion requires one additional TxBD */
1880e19d0839SClaudiu Manoil if (unlikely(do_tstamp))
1881e19d0839SClaudiu Manoil txbdp_tstamp = txbdp = next_txbd(txbdp, base,
1882e19d0839SClaudiu Manoil tx_queue->tx_ring_size);
1883e19d0839SClaudiu Manoil
188448963b44SClaudiu Manoil if (likely(!nr_frags)) {
18859c8b0778SYangbo Lu if (likely(!do_tstamp))
1886e19d0839SClaudiu Manoil lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1887e19d0839SClaudiu Manoil } else {
1888e19d0839SClaudiu Manoil u32 lstatus_start = lstatus;
1889e19d0839SClaudiu Manoil
1890e19d0839SClaudiu Manoil /* Place the fragment addresses and lengths into the TxBDs */
189142f397adSClaudiu Manoil frag = &skb_shinfo(skb)->frags[0];
189242f397adSClaudiu Manoil for (i = 0; i < nr_frags; i++, frag++) {
189342f397adSClaudiu Manoil unsigned int size;
189442f397adSClaudiu Manoil
1895e19d0839SClaudiu Manoil /* Point at the next BD, wrapping as needed */
1896e19d0839SClaudiu Manoil txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1897e19d0839SClaudiu Manoil
189842f397adSClaudiu Manoil size = skb_frag_size(frag);
1899e19d0839SClaudiu Manoil
190042f397adSClaudiu Manoil lstatus = be32_to_cpu(txbdp->lstatus) | size |
1901e19d0839SClaudiu Manoil BD_LFLAG(TXBD_READY);
1902e19d0839SClaudiu Manoil
1903e19d0839SClaudiu Manoil /* Handle the last BD specially */
1904e19d0839SClaudiu Manoil if (i == nr_frags - 1)
1905e19d0839SClaudiu Manoil lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1906e19d0839SClaudiu Manoil
190742f397adSClaudiu Manoil bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
190842f397adSClaudiu Manoil size, DMA_TO_DEVICE);
1909e19d0839SClaudiu Manoil if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1910e19d0839SClaudiu Manoil goto dma_map_err;
1911e19d0839SClaudiu Manoil
1912e19d0839SClaudiu Manoil /* set the TxBD length and buffer pointer */
1913e19d0839SClaudiu Manoil txbdp->bufPtr = cpu_to_be32(bufaddr);
1914e19d0839SClaudiu Manoil txbdp->lstatus = cpu_to_be32(lstatus);
1915e19d0839SClaudiu Manoil }
1916e19d0839SClaudiu Manoil
1917e19d0839SClaudiu Manoil lstatus = lstatus_start;
1918e19d0839SClaudiu Manoil }
1919e19d0839SClaudiu Manoil
19200977f817SJan Ceuleers /* If time stamping is requested one additional TxBD must be set up. The
1921ec21e2ecSJeff Kirsher * first TxBD points to the FCB and must have a data length of
1922ec21e2ecSJeff Kirsher * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
1923ec21e2ecSJeff Kirsher * the full frame length.
1924ec21e2ecSJeff Kirsher */
1925ec21e2ecSJeff Kirsher if (unlikely(do_tstamp)) {
1926a7312d58SClaudiu Manoil u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
1927a7312d58SClaudiu Manoil
1928a7312d58SClaudiu Manoil bufaddr = be32_to_cpu(txbdp_start->bufPtr);
1929a7312d58SClaudiu Manoil bufaddr += fcb_len;
193048963b44SClaudiu Manoil
1931a7312d58SClaudiu Manoil lstatus_ts |= BD_LFLAG(TXBD_READY) |
19320d0cffdcSClaudiu Manoil (skb_headlen(skb) - fcb_len);
193348963b44SClaudiu Manoil if (!nr_frags)
193448963b44SClaudiu Manoil lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1935a7312d58SClaudiu Manoil
1936a7312d58SClaudiu Manoil txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
1937a7312d58SClaudiu Manoil txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
1938ec21e2ecSJeff Kirsher lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
1939e19d0839SClaudiu Manoil
1940e19d0839SClaudiu Manoil /* Setup tx hardware time stamping */
1941e19d0839SClaudiu Manoil skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1942e19d0839SClaudiu Manoil fcb->ptp = 1;
1943ec21e2ecSJeff Kirsher } else {
1944ec21e2ecSJeff Kirsher lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1945ec21e2ecSJeff Kirsher }
1946ec21e2ecSJeff Kirsher
1947c7e5c423SVladimir Oltean skb_tx_timestamp(skb);
194850ad076bSClaudiu Manoil netdev_tx_sent_queue(txq, bytes_sent);
1949d8a0f1b0SPaul Gortmaker
1950d55398baSClaudiu Manoil gfar_wmb();
1951ec21e2ecSJeff Kirsher
1952a7312d58SClaudiu Manoil txbdp_start->lstatus = cpu_to_be32(lstatus);
1953ec21e2ecSJeff Kirsher
1954d55398baSClaudiu Manoil gfar_wmb(); /* force lstatus write before tx_skbuff */
1955ec21e2ecSJeff Kirsher
1956ec21e2ecSJeff Kirsher tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1957ec21e2ecSJeff Kirsher
1958ec21e2ecSJeff Kirsher /* Update the current skb pointer to the next entry we will use
19590977f817SJan Ceuleers * (wrapping if necessary)
19600977f817SJan Ceuleers */
1961ec21e2ecSJeff Kirsher tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1962ec21e2ecSJeff Kirsher TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1963ec21e2ecSJeff Kirsher
1964ec21e2ecSJeff Kirsher tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1965ec21e2ecSJeff Kirsher
1966bc602280SClaudiu Manoil /* We can work in parallel with gfar_clean_tx_ring(), except
1967bc602280SClaudiu Manoil * when modifying num_txbdfree. Note that we didn't grab the lock
1968bc602280SClaudiu Manoil * when we were reading the num_txbdfree and checking for available
1969bc602280SClaudiu Manoil * space, that's because outside of this function it can only grow.
1970bc602280SClaudiu Manoil */
1971bc602280SClaudiu Manoil spin_lock_bh(&tx_queue->txlock);
1972ec21e2ecSJeff Kirsher /* reduce TxBD free count */
1973ec21e2ecSJeff Kirsher tx_queue->num_txbdfree -= (nr_txbds);
1974bc602280SClaudiu Manoil spin_unlock_bh(&tx_queue->txlock);
1975ec21e2ecSJeff Kirsher
1976ec21e2ecSJeff Kirsher /* If the next BD still needs to be cleaned up, then the bds
19770977f817SJan Ceuleers * are full. We need to tell the kernel to stop sending us stuff.
19780977f817SJan Ceuleers */
1979ec21e2ecSJeff Kirsher if (!tx_queue->num_txbdfree) {
1980ec21e2ecSJeff Kirsher netif_tx_stop_queue(txq);
1981ec21e2ecSJeff Kirsher
1982ec21e2ecSJeff Kirsher dev->stats.tx_fifo_errors++;
1983ec21e2ecSJeff Kirsher }
1984ec21e2ecSJeff Kirsher
1985ec21e2ecSJeff Kirsher /* Tell the DMA to go go go */
1986ec21e2ecSJeff Kirsher gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
1987ec21e2ecSJeff Kirsher
1988ec21e2ecSJeff Kirsher return NETDEV_TX_OK;
19890a4b5a24SKevin Hao
19900a4b5a24SKevin Hao dma_map_err:
19910a4b5a24SKevin Hao txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
19920a4b5a24SKevin Hao if (do_tstamp)
19930a4b5a24SKevin Hao txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
19940a4b5a24SKevin Hao for (i = 0; i < nr_frags; i++) {
1995a7312d58SClaudiu Manoil lstatus = be32_to_cpu(txbdp->lstatus);
19960a4b5a24SKevin Hao if (!(lstatus & BD_LFLAG(TXBD_READY)))
19970a4b5a24SKevin Hao break;
19980a4b5a24SKevin Hao
1999a7312d58SClaudiu Manoil lstatus &= ~BD_LFLAG(TXBD_READY);
2000a7312d58SClaudiu Manoil txbdp->lstatus = cpu_to_be32(lstatus);
2001a7312d58SClaudiu Manoil bufaddr = be32_to_cpu(txbdp->bufPtr);
2002a7312d58SClaudiu Manoil dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
20030a4b5a24SKevin Hao DMA_TO_DEVICE);
20040a4b5a24SKevin Hao txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
20050a4b5a24SKevin Hao }
20060a4b5a24SKevin Hao gfar_wmb();
20070a4b5a24SKevin Hao dev_kfree_skb_any(skb);
20080a4b5a24SKevin Hao return NETDEV_TX_OK;
2009ec21e2ecSJeff Kirsher }
2010ec21e2ecSJeff Kirsher
2011ec21e2ecSJeff Kirsher /* Changes the mac address if the controller is not running. */
gfar_set_mac_address(struct net_device * dev)2012ec21e2ecSJeff Kirsher static int gfar_set_mac_address(struct net_device *dev)
2013ec21e2ecSJeff Kirsher {
2014ec21e2ecSJeff Kirsher gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
2015ec21e2ecSJeff Kirsher
2016ec21e2ecSJeff Kirsher return 0;
2017ec21e2ecSJeff Kirsher }
2018ec21e2ecSJeff Kirsher
gfar_change_mtu(struct net_device * dev,int new_mtu)2019ec21e2ecSJeff Kirsher static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2020ec21e2ecSJeff Kirsher {
2021ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(dev);
2022ec21e2ecSJeff Kirsher
20230851133bSClaudiu Manoil while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
20240851133bSClaudiu Manoil cpu_relax();
20250851133bSClaudiu Manoil
202688302648SClaudiu Manoil if (dev->flags & IFF_UP)
2027ec21e2ecSJeff Kirsher stop_gfar(dev);
2028ec21e2ecSJeff Kirsher
2029ec21e2ecSJeff Kirsher dev->mtu = new_mtu;
2030ec21e2ecSJeff Kirsher
203188302648SClaudiu Manoil if (dev->flags & IFF_UP)
2032ec21e2ecSJeff Kirsher startup_gfar(dev);
2033ec21e2ecSJeff Kirsher
20340851133bSClaudiu Manoil clear_bit_unlock(GFAR_RESETTING, &priv->state);
20350851133bSClaudiu Manoil
2036ec21e2ecSJeff Kirsher return 0;
2037ec21e2ecSJeff Kirsher }
2038ec21e2ecSJeff Kirsher
reset_gfar(struct net_device * ndev)20399f5c44cfSYueHaibing static void reset_gfar(struct net_device *ndev)
20400851133bSClaudiu Manoil {
20410851133bSClaudiu Manoil struct gfar_private *priv = netdev_priv(ndev);
20420851133bSClaudiu Manoil
20430851133bSClaudiu Manoil while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
20440851133bSClaudiu Manoil cpu_relax();
20450851133bSClaudiu Manoil
20460851133bSClaudiu Manoil stop_gfar(ndev);
20470851133bSClaudiu Manoil startup_gfar(ndev);
20480851133bSClaudiu Manoil
20490851133bSClaudiu Manoil clear_bit_unlock(GFAR_RESETTING, &priv->state);
20500851133bSClaudiu Manoil }
20510851133bSClaudiu Manoil
2052ec21e2ecSJeff Kirsher /* gfar_reset_task gets scheduled when a packet has not been
2053ec21e2ecSJeff Kirsher * transmitted after a set amount of time.
2054ec21e2ecSJeff Kirsher * For now, assume that clearing out all the structures, and
2055ec21e2ecSJeff Kirsher * starting over will fix the problem.
2056ec21e2ecSJeff Kirsher */
gfar_reset_task(struct work_struct * work)2057ec21e2ecSJeff Kirsher static void gfar_reset_task(struct work_struct *work)
2058ec21e2ecSJeff Kirsher {
2059ec21e2ecSJeff Kirsher struct gfar_private *priv = container_of(work, struct gfar_private,
2060ec21e2ecSJeff Kirsher reset_task);
20610851133bSClaudiu Manoil reset_gfar(priv->ndev);
2062ec21e2ecSJeff Kirsher }
2063ec21e2ecSJeff Kirsher
gfar_timeout(struct net_device * dev,unsigned int txqueue)20640290bd29SMichael S. Tsirkin static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
2065ec21e2ecSJeff Kirsher {
2066ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(dev);
2067ec21e2ecSJeff Kirsher
2068ec21e2ecSJeff Kirsher dev->stats.tx_errors++;
2069ec21e2ecSJeff Kirsher schedule_work(&priv->reset_task);
2070ec21e2ecSJeff Kirsher }
2071ec21e2ecSJeff Kirsher
gfar_hwtstamp_set(struct net_device * netdev,struct ifreq * ifr)20727d993c5fSArseny Solokha static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
20737d993c5fSArseny Solokha {
20747d993c5fSArseny Solokha struct hwtstamp_config config;
20757d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(netdev);
20767d993c5fSArseny Solokha
20777d993c5fSArseny Solokha if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
20787d993c5fSArseny Solokha return -EFAULT;
20797d993c5fSArseny Solokha
20807d993c5fSArseny Solokha switch (config.tx_type) {
20817d993c5fSArseny Solokha case HWTSTAMP_TX_OFF:
20827d993c5fSArseny Solokha priv->hwts_tx_en = 0;
20837d993c5fSArseny Solokha break;
20847d993c5fSArseny Solokha case HWTSTAMP_TX_ON:
20857d993c5fSArseny Solokha if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
20867d993c5fSArseny Solokha return -ERANGE;
20877d993c5fSArseny Solokha priv->hwts_tx_en = 1;
20887d993c5fSArseny Solokha break;
20897d993c5fSArseny Solokha default:
20907d993c5fSArseny Solokha return -ERANGE;
20917d993c5fSArseny Solokha }
20927d993c5fSArseny Solokha
20937d993c5fSArseny Solokha switch (config.rx_filter) {
20947d993c5fSArseny Solokha case HWTSTAMP_FILTER_NONE:
20957d993c5fSArseny Solokha if (priv->hwts_rx_en) {
20967d993c5fSArseny Solokha priv->hwts_rx_en = 0;
20977d993c5fSArseny Solokha reset_gfar(netdev);
20987d993c5fSArseny Solokha }
20997d993c5fSArseny Solokha break;
21007d993c5fSArseny Solokha default:
21017d993c5fSArseny Solokha if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
21027d993c5fSArseny Solokha return -ERANGE;
21037d993c5fSArseny Solokha if (!priv->hwts_rx_en) {
21047d993c5fSArseny Solokha priv->hwts_rx_en = 1;
21057d993c5fSArseny Solokha reset_gfar(netdev);
21067d993c5fSArseny Solokha }
21077d993c5fSArseny Solokha config.rx_filter = HWTSTAMP_FILTER_ALL;
21087d993c5fSArseny Solokha break;
21097d993c5fSArseny Solokha }
21107d993c5fSArseny Solokha
21117d993c5fSArseny Solokha return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
21127d993c5fSArseny Solokha -EFAULT : 0;
21137d993c5fSArseny Solokha }
21147d993c5fSArseny Solokha
gfar_hwtstamp_get(struct net_device * netdev,struct ifreq * ifr)21157d993c5fSArseny Solokha static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
21167d993c5fSArseny Solokha {
21177d993c5fSArseny Solokha struct hwtstamp_config config;
21187d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(netdev);
21197d993c5fSArseny Solokha
21207d993c5fSArseny Solokha config.flags = 0;
21217d993c5fSArseny Solokha config.tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
21227d993c5fSArseny Solokha config.rx_filter = (priv->hwts_rx_en ?
21237d993c5fSArseny Solokha HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
21247d993c5fSArseny Solokha
21257d993c5fSArseny Solokha return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
21267d993c5fSArseny Solokha -EFAULT : 0;
21277d993c5fSArseny Solokha }
21287d993c5fSArseny Solokha
gfar_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)21297d993c5fSArseny Solokha static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
21307d993c5fSArseny Solokha {
21317d993c5fSArseny Solokha struct phy_device *phydev = dev->phydev;
21327d993c5fSArseny Solokha
21337d993c5fSArseny Solokha if (!netif_running(dev))
21347d993c5fSArseny Solokha return -EINVAL;
21357d993c5fSArseny Solokha
21367d993c5fSArseny Solokha if (cmd == SIOCSHWTSTAMP)
21377d993c5fSArseny Solokha return gfar_hwtstamp_set(dev, rq);
21387d993c5fSArseny Solokha if (cmd == SIOCGHWTSTAMP)
21397d993c5fSArseny Solokha return gfar_hwtstamp_get(dev, rq);
21407d993c5fSArseny Solokha
21417d993c5fSArseny Solokha if (!phydev)
21427d993c5fSArseny Solokha return -ENODEV;
21437d993c5fSArseny Solokha
21447d993c5fSArseny Solokha return phy_mii_ioctl(phydev, rq, cmd);
21457d993c5fSArseny Solokha }
21467d993c5fSArseny Solokha
2147ec21e2ecSJeff Kirsher /* Interrupt Handler for Transmit complete */
gfar_clean_tx_ring(struct gfar_priv_tx_q * tx_queue)2148c233cf40SClaudiu Manoil static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2149ec21e2ecSJeff Kirsher {
2150ec21e2ecSJeff Kirsher struct net_device *dev = tx_queue->dev;
2151d8a0f1b0SPaul Gortmaker struct netdev_queue *txq;
2152ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(dev);
2153ec21e2ecSJeff Kirsher struct txbd8 *bdp, *next = NULL;
2154ec21e2ecSJeff Kirsher struct txbd8 *lbdp = NULL;
2155ec21e2ecSJeff Kirsher struct txbd8 *base = tx_queue->tx_bd_base;
2156ec21e2ecSJeff Kirsher struct sk_buff *skb;
2157ec21e2ecSJeff Kirsher int skb_dirtytx;
2158ec21e2ecSJeff Kirsher int tx_ring_size = tx_queue->tx_ring_size;
2159ec21e2ecSJeff Kirsher int frags = 0, nr_txbds = 0;
2160ec21e2ecSJeff Kirsher int i;
2161ec21e2ecSJeff Kirsher int howmany = 0;
2162d8a0f1b0SPaul Gortmaker int tqi = tx_queue->qindex;
2163d8a0f1b0SPaul Gortmaker unsigned int bytes_sent = 0;
2164ec21e2ecSJeff Kirsher u32 lstatus;
2165ec21e2ecSJeff Kirsher size_t buflen;
2166ec21e2ecSJeff Kirsher
2167d8a0f1b0SPaul Gortmaker txq = netdev_get_tx_queue(dev, tqi);
2168ec21e2ecSJeff Kirsher bdp = tx_queue->dirty_tx;
2169ec21e2ecSJeff Kirsher skb_dirtytx = tx_queue->skb_dirtytx;
2170ec21e2ecSJeff Kirsher
2171ec21e2ecSJeff Kirsher while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2172c26a2c2dSVladimir Oltean bool do_tstamp;
2173c26a2c2dSVladimir Oltean
2174c26a2c2dSVladimir Oltean do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2175c26a2c2dSVladimir Oltean priv->hwts_tx_en;
2176ec21e2ecSJeff Kirsher
2177ec21e2ecSJeff Kirsher frags = skb_shinfo(skb)->nr_frags;
2178ec21e2ecSJeff Kirsher
21790977f817SJan Ceuleers /* When time stamping, one additional TxBD must be freed.
2180ec21e2ecSJeff Kirsher * Also, we need to dma_unmap_single() the TxPAL.
2181ec21e2ecSJeff Kirsher */
2182c26a2c2dSVladimir Oltean if (unlikely(do_tstamp))
2183ec21e2ecSJeff Kirsher nr_txbds = frags + 2;
2184ec21e2ecSJeff Kirsher else
2185ec21e2ecSJeff Kirsher nr_txbds = frags + 1;
2186ec21e2ecSJeff Kirsher
2187ec21e2ecSJeff Kirsher lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2188ec21e2ecSJeff Kirsher
2189a7312d58SClaudiu Manoil lstatus = be32_to_cpu(lbdp->lstatus);
2190ec21e2ecSJeff Kirsher
2191ec21e2ecSJeff Kirsher /* Only clean completed frames */
2192ec21e2ecSJeff Kirsher if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2193ec21e2ecSJeff Kirsher (lstatus & BD_LENGTH_MASK))
2194ec21e2ecSJeff Kirsher break;
2195ec21e2ecSJeff Kirsher
2196c26a2c2dSVladimir Oltean if (unlikely(do_tstamp)) {
2197ec21e2ecSJeff Kirsher next = next_txbd(bdp, base, tx_ring_size);
2198a7312d58SClaudiu Manoil buflen = be16_to_cpu(next->length) +
2199a7312d58SClaudiu Manoil GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2200ec21e2ecSJeff Kirsher } else
2201a7312d58SClaudiu Manoil buflen = be16_to_cpu(bdp->length);
2202ec21e2ecSJeff Kirsher
2203a7312d58SClaudiu Manoil dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2204ec21e2ecSJeff Kirsher buflen, DMA_TO_DEVICE);
2205ec21e2ecSJeff Kirsher
2206c26a2c2dSVladimir Oltean if (unlikely(do_tstamp)) {
2207ec21e2ecSJeff Kirsher struct skb_shared_hwtstamps shhwtstamps;
2208b4b67f26SScott Wood u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
2209b4b67f26SScott Wood ~0x7UL);
2210bc4598bcSJan Ceuleers
2211ec21e2ecSJeff Kirsher memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2212f54af12fSYangbo Lu shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
22139c4886e5SManfred Rudigier skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2214ec21e2ecSJeff Kirsher skb_tstamp_tx(skb, &shhwtstamps);
2215a7312d58SClaudiu Manoil gfar_clear_txbd_status(bdp);
2216ec21e2ecSJeff Kirsher bdp = next;
2217ec21e2ecSJeff Kirsher }
2218ec21e2ecSJeff Kirsher
2219a7312d58SClaudiu Manoil gfar_clear_txbd_status(bdp);
2220ec21e2ecSJeff Kirsher bdp = next_txbd(bdp, base, tx_ring_size);
2221ec21e2ecSJeff Kirsher
2222ec21e2ecSJeff Kirsher for (i = 0; i < frags; i++) {
2223a7312d58SClaudiu Manoil dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2224a7312d58SClaudiu Manoil be16_to_cpu(bdp->length),
2225a7312d58SClaudiu Manoil DMA_TO_DEVICE);
2226a7312d58SClaudiu Manoil gfar_clear_txbd_status(bdp);
2227ec21e2ecSJeff Kirsher bdp = next_txbd(bdp, base, tx_ring_size);
2228ec21e2ecSJeff Kirsher }
2229ec21e2ecSJeff Kirsher
223050ad076bSClaudiu Manoil bytes_sent += GFAR_CB(skb)->bytes_sent;
2231d8a0f1b0SPaul Gortmaker
2232ec21e2ecSJeff Kirsher dev_kfree_skb_any(skb);
2233ec21e2ecSJeff Kirsher
2234ec21e2ecSJeff Kirsher tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2235ec21e2ecSJeff Kirsher
2236ec21e2ecSJeff Kirsher skb_dirtytx = (skb_dirtytx + 1) &
2237ec21e2ecSJeff Kirsher TX_RING_MOD_MASK(tx_ring_size);
2238ec21e2ecSJeff Kirsher
2239ec21e2ecSJeff Kirsher howmany++;
2240bc602280SClaudiu Manoil spin_lock(&tx_queue->txlock);
2241ec21e2ecSJeff Kirsher tx_queue->num_txbdfree += nr_txbds;
2242bc602280SClaudiu Manoil spin_unlock(&tx_queue->txlock);
2243ec21e2ecSJeff Kirsher }
2244ec21e2ecSJeff Kirsher
2245ec21e2ecSJeff Kirsher /* If we freed a buffer, we can restart transmission, if necessary */
22460851133bSClaudiu Manoil if (tx_queue->num_txbdfree &&
22470851133bSClaudiu Manoil netif_tx_queue_stopped(txq) &&
22480851133bSClaudiu Manoil !(test_bit(GFAR_DOWN, &priv->state)))
22490851133bSClaudiu Manoil netif_wake_subqueue(priv->ndev, tqi);
2250ec21e2ecSJeff Kirsher
2251ec21e2ecSJeff Kirsher /* Update dirty indicators */
2252ec21e2ecSJeff Kirsher tx_queue->skb_dirtytx = skb_dirtytx;
2253ec21e2ecSJeff Kirsher tx_queue->dirty_tx = bdp;
2254ec21e2ecSJeff Kirsher
2255d8a0f1b0SPaul Gortmaker netdev_tx_completed_queue(txq, howmany, bytes_sent);
2256ec21e2ecSJeff Kirsher }
2257ec21e2ecSJeff Kirsher
count_errors(u32 lstatus,struct net_device * ndev)2258f23223f1SClaudiu Manoil static void count_errors(u32 lstatus, struct net_device *ndev)
2259ec21e2ecSJeff Kirsher {
2260f23223f1SClaudiu Manoil struct gfar_private *priv = netdev_priv(ndev);
2261f23223f1SClaudiu Manoil struct net_device_stats *stats = &ndev->stats;
2262ec21e2ecSJeff Kirsher struct gfar_extra_stats *estats = &priv->extra_stats;
2263ec21e2ecSJeff Kirsher
22640977f817SJan Ceuleers /* If the packet was truncated, none of the other errors matter */
2265f966082eSClaudiu Manoil if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
2266ec21e2ecSJeff Kirsher stats->rx_length_errors++;
2267ec21e2ecSJeff Kirsher
2268212079dfSPaul Gortmaker atomic64_inc(&estats->rx_trunc);
2269ec21e2ecSJeff Kirsher
2270ec21e2ecSJeff Kirsher return;
2271ec21e2ecSJeff Kirsher }
2272ec21e2ecSJeff Kirsher /* Count the errors, if there were any */
2273f966082eSClaudiu Manoil if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
2274ec21e2ecSJeff Kirsher stats->rx_length_errors++;
2275ec21e2ecSJeff Kirsher
2276f966082eSClaudiu Manoil if (lstatus & BD_LFLAG(RXBD_LARGE))
2277212079dfSPaul Gortmaker atomic64_inc(&estats->rx_large);
2278ec21e2ecSJeff Kirsher else
2279212079dfSPaul Gortmaker atomic64_inc(&estats->rx_short);
2280ec21e2ecSJeff Kirsher }
2281f966082eSClaudiu Manoil if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
2282ec21e2ecSJeff Kirsher stats->rx_frame_errors++;
2283212079dfSPaul Gortmaker atomic64_inc(&estats->rx_nonoctet);
2284ec21e2ecSJeff Kirsher }
2285f966082eSClaudiu Manoil if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
2286212079dfSPaul Gortmaker atomic64_inc(&estats->rx_crcerr);
2287ec21e2ecSJeff Kirsher stats->rx_crc_errors++;
2288ec21e2ecSJeff Kirsher }
2289f966082eSClaudiu Manoil if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
2290212079dfSPaul Gortmaker atomic64_inc(&estats->rx_overrun);
2291f966082eSClaudiu Manoil stats->rx_over_errors++;
2292ec21e2ecSJeff Kirsher }
2293ec21e2ecSJeff Kirsher }
2294ec21e2ecSJeff Kirsher
gfar_receive(int irq,void * grp_id)22957ad38784SArseny Solokha static irqreturn_t gfar_receive(int irq, void *grp_id)
2296ec21e2ecSJeff Kirsher {
2297aeb12c5eSClaudiu Manoil struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2298aeb12c5eSClaudiu Manoil unsigned long flags;
22993e905b80SClaudiu Manoil u32 imask, ievent;
23003e905b80SClaudiu Manoil
23013e905b80SClaudiu Manoil ievent = gfar_read(&grp->regs->ievent);
23023e905b80SClaudiu Manoil
23033e905b80SClaudiu Manoil if (unlikely(ievent & IEVENT_FGPI)) {
23043e905b80SClaudiu Manoil gfar_write(&grp->regs->ievent, IEVENT_FGPI);
23053e905b80SClaudiu Manoil return IRQ_HANDLED;
23063e905b80SClaudiu Manoil }
2307aeb12c5eSClaudiu Manoil
2308aeb12c5eSClaudiu Manoil if (likely(napi_schedule_prep(&grp->napi_rx))) {
2309aeb12c5eSClaudiu Manoil spin_lock_irqsave(&grp->grplock, flags);
2310aeb12c5eSClaudiu Manoil imask = gfar_read(&grp->regs->imask);
231114870b75SEsben Haabendal imask &= IMASK_RX_DISABLED | grp->priv->rmon_overflow.imask;
2312aeb12c5eSClaudiu Manoil gfar_write(&grp->regs->imask, imask);
2313aeb12c5eSClaudiu Manoil spin_unlock_irqrestore(&grp->grplock, flags);
2314aeb12c5eSClaudiu Manoil __napi_schedule(&grp->napi_rx);
2315aeb12c5eSClaudiu Manoil } else {
2316aeb12c5eSClaudiu Manoil /* Clear IEVENT, so interrupts aren't called again
2317aeb12c5eSClaudiu Manoil * because of the packets that have already arrived.
2318aeb12c5eSClaudiu Manoil */
2319aeb12c5eSClaudiu Manoil gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2320aeb12c5eSClaudiu Manoil }
2321aeb12c5eSClaudiu Manoil
2322aeb12c5eSClaudiu Manoil return IRQ_HANDLED;
2323aeb12c5eSClaudiu Manoil }
2324aeb12c5eSClaudiu Manoil
2325aeb12c5eSClaudiu Manoil /* Interrupt Handler for Transmit complete */
gfar_transmit(int irq,void * grp_id)2326aeb12c5eSClaudiu Manoil static irqreturn_t gfar_transmit(int irq, void *grp_id)
2327aeb12c5eSClaudiu Manoil {
2328aeb12c5eSClaudiu Manoil struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2329aeb12c5eSClaudiu Manoil unsigned long flags;
2330aeb12c5eSClaudiu Manoil u32 imask;
2331aeb12c5eSClaudiu Manoil
2332aeb12c5eSClaudiu Manoil if (likely(napi_schedule_prep(&grp->napi_tx))) {
2333aeb12c5eSClaudiu Manoil spin_lock_irqsave(&grp->grplock, flags);
2334aeb12c5eSClaudiu Manoil imask = gfar_read(&grp->regs->imask);
233514870b75SEsben Haabendal imask &= IMASK_TX_DISABLED | grp->priv->rmon_overflow.imask;
2336aeb12c5eSClaudiu Manoil gfar_write(&grp->regs->imask, imask);
2337aeb12c5eSClaudiu Manoil spin_unlock_irqrestore(&grp->grplock, flags);
2338aeb12c5eSClaudiu Manoil __napi_schedule(&grp->napi_tx);
2339aeb12c5eSClaudiu Manoil } else {
2340aeb12c5eSClaudiu Manoil /* Clear IEVENT, so interrupts aren't called again
2341aeb12c5eSClaudiu Manoil * because of the packets that have already arrived.
2342aeb12c5eSClaudiu Manoil */
2343aeb12c5eSClaudiu Manoil gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2344aeb12c5eSClaudiu Manoil }
2345aeb12c5eSClaudiu Manoil
2346ec21e2ecSJeff Kirsher return IRQ_HANDLED;
2347ec21e2ecSJeff Kirsher }
2348ec21e2ecSJeff Kirsher
gfar_add_rx_frag(struct gfar_rx_buff * rxb,u32 lstatus,struct sk_buff * skb,bool first)234975354148SClaudiu Manoil static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
235075354148SClaudiu Manoil struct sk_buff *skb, bool first)
235175354148SClaudiu Manoil {
2352202a0a70SAndy Spencer int size = lstatus & BD_LENGTH_MASK;
235375354148SClaudiu Manoil struct page *page = rxb->page;
235475354148SClaudiu Manoil
23556c389fc9SZefir Kurtisi if (likely(first)) {
235675354148SClaudiu Manoil skb_put(skb, size);
23576c389fc9SZefir Kurtisi } else {
23586c389fc9SZefir Kurtisi /* the last fragments' length contains the full frame length */
2359d903ec77SAndy Spencer if (lstatus & BD_LFLAG(RXBD_LAST))
23606c389fc9SZefir Kurtisi size -= skb->len;
23616c389fc9SZefir Kurtisi
2362d8861babSMichael Braun WARN(size < 0, "gianfar: rx fragment size underflow");
2363d8861babSMichael Braun if (size < 0)
2364d8861babSMichael Braun return false;
2365d8861babSMichael Braun
236675354148SClaudiu Manoil skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
236775354148SClaudiu Manoil rxb->page_offset + RXBUF_ALIGNMENT,
236875354148SClaudiu Manoil size, GFAR_RXB_TRUESIZE);
23696c389fc9SZefir Kurtisi }
237075354148SClaudiu Manoil
237175354148SClaudiu Manoil /* try reuse page */
237269fed99bSEric Dumazet if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
237375354148SClaudiu Manoil return false;
237475354148SClaudiu Manoil
237575354148SClaudiu Manoil /* change offset to the other half */
237675354148SClaudiu Manoil rxb->page_offset ^= GFAR_RXB_TRUESIZE;
237775354148SClaudiu Manoil
2378fe896d18SJoonsoo Kim page_ref_inc(page);
237975354148SClaudiu Manoil
238075354148SClaudiu Manoil return true;
238175354148SClaudiu Manoil }
238275354148SClaudiu Manoil
gfar_reuse_rx_page(struct gfar_priv_rx_q * rxq,struct gfar_rx_buff * old_rxb)238375354148SClaudiu Manoil static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
238475354148SClaudiu Manoil struct gfar_rx_buff *old_rxb)
238575354148SClaudiu Manoil {
238675354148SClaudiu Manoil struct gfar_rx_buff *new_rxb;
238775354148SClaudiu Manoil u16 nta = rxq->next_to_alloc;
238875354148SClaudiu Manoil
238975354148SClaudiu Manoil new_rxb = &rxq->rx_buff[nta];
239075354148SClaudiu Manoil
239175354148SClaudiu Manoil /* find next buf that can reuse a page */
239275354148SClaudiu Manoil nta++;
239375354148SClaudiu Manoil rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
239475354148SClaudiu Manoil
239575354148SClaudiu Manoil /* copy page reference */
239675354148SClaudiu Manoil *new_rxb = *old_rxb;
239775354148SClaudiu Manoil
239875354148SClaudiu Manoil /* sync for use by the device */
239975354148SClaudiu Manoil dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
240075354148SClaudiu Manoil old_rxb->page_offset,
240175354148SClaudiu Manoil GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
240275354148SClaudiu Manoil }
240375354148SClaudiu Manoil
gfar_get_next_rxbuff(struct gfar_priv_rx_q * rx_queue,u32 lstatus,struct sk_buff * skb)240475354148SClaudiu Manoil static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
240575354148SClaudiu Manoil u32 lstatus, struct sk_buff *skb)
240675354148SClaudiu Manoil {
240775354148SClaudiu Manoil struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
240875354148SClaudiu Manoil struct page *page = rxb->page;
240975354148SClaudiu Manoil bool first = false;
241075354148SClaudiu Manoil
241175354148SClaudiu Manoil if (likely(!skb)) {
241275354148SClaudiu Manoil void *buff_addr = page_address(page) + rxb->page_offset;
241375354148SClaudiu Manoil
241475354148SClaudiu Manoil skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
241575354148SClaudiu Manoil if (unlikely(!skb)) {
241675354148SClaudiu Manoil gfar_rx_alloc_err(rx_queue);
241775354148SClaudiu Manoil return NULL;
241875354148SClaudiu Manoil }
241975354148SClaudiu Manoil skb_reserve(skb, RXBUF_ALIGNMENT);
242075354148SClaudiu Manoil first = true;
242175354148SClaudiu Manoil }
242275354148SClaudiu Manoil
242375354148SClaudiu Manoil dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
242475354148SClaudiu Manoil GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
242575354148SClaudiu Manoil
242675354148SClaudiu Manoil if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
242775354148SClaudiu Manoil /* reuse the free half of the page */
242875354148SClaudiu Manoil gfar_reuse_rx_page(rx_queue, rxb);
242975354148SClaudiu Manoil } else {
243075354148SClaudiu Manoil /* page cannot be reused, unmap it */
243175354148SClaudiu Manoil dma_unmap_page(rx_queue->dev, rxb->dma,
243275354148SClaudiu Manoil PAGE_SIZE, DMA_FROM_DEVICE);
243375354148SClaudiu Manoil }
243475354148SClaudiu Manoil
243575354148SClaudiu Manoil /* clear rxb content */
243675354148SClaudiu Manoil rxb->page = NULL;
243775354148SClaudiu Manoil
243875354148SClaudiu Manoil return skb;
243975354148SClaudiu Manoil }
244075354148SClaudiu Manoil
gfar_rx_checksum(struct sk_buff * skb,struct rxfcb * fcb)2441ec21e2ecSJeff Kirsher static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2442ec21e2ecSJeff Kirsher {
2443ec21e2ecSJeff Kirsher /* If valid headers were found, and valid sums
2444ec21e2ecSJeff Kirsher * were verified, then we tell the kernel that no
24450977f817SJan Ceuleers * checksumming is necessary. Otherwise, it is [FIXME]
24460977f817SJan Ceuleers */
244726eb9374SClaudiu Manoil if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
244826eb9374SClaudiu Manoil (RXFCB_CIP | RXFCB_CTU))
2449ec21e2ecSJeff Kirsher skb->ip_summed = CHECKSUM_UNNECESSARY;
2450ec21e2ecSJeff Kirsher else
2451ec21e2ecSJeff Kirsher skb_checksum_none_assert(skb);
2452ec21e2ecSJeff Kirsher }
2453ec21e2ecSJeff Kirsher
24540977f817SJan Ceuleers /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
gfar_process_frame(struct net_device * ndev,struct sk_buff * skb)2455f23223f1SClaudiu Manoil static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
2456ec21e2ecSJeff Kirsher {
2457f23223f1SClaudiu Manoil struct gfar_private *priv = netdev_priv(ndev);
2458ec21e2ecSJeff Kirsher struct rxfcb *fcb = NULL;
2459ec21e2ecSJeff Kirsher
2460ec21e2ecSJeff Kirsher /* fcb is at the beginning if exists */
2461ec21e2ecSJeff Kirsher fcb = (struct rxfcb *)skb->data;
2462ec21e2ecSJeff Kirsher
24630977f817SJan Ceuleers /* Remove the FCB from the skb
24640977f817SJan Ceuleers * Remove the padded bytes, if there are any
24650977f817SJan Ceuleers */
2466f23223f1SClaudiu Manoil if (priv->uses_rxfcb)
246776f31e8bSClaudiu Manoil skb_pull(skb, GMAC_FCB_LEN);
2468ec21e2ecSJeff Kirsher
2469ec21e2ecSJeff Kirsher /* Get receive timestamp from the skb */
2470ec21e2ecSJeff Kirsher if (priv->hwts_rx_en) {
2471ec21e2ecSJeff Kirsher struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2472ec21e2ecSJeff Kirsher u64 *ns = (u64 *) skb->data;
2473bc4598bcSJan Ceuleers
2474ec21e2ecSJeff Kirsher memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2475f54af12fSYangbo Lu shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2476ec21e2ecSJeff Kirsher }
2477ec21e2ecSJeff Kirsher
2478ec21e2ecSJeff Kirsher if (priv->padding)
2479ec21e2ecSJeff Kirsher skb_pull(skb, priv->padding);
2480ec21e2ecSJeff Kirsher
2481d903ec77SAndy Spencer /* Trim off the FCS */
2482d903ec77SAndy Spencer pskb_trim(skb, skb->len - ETH_FCS_LEN);
2483d903ec77SAndy Spencer
2484f23223f1SClaudiu Manoil if (ndev->features & NETIF_F_RXCSUM)
2485ec21e2ecSJeff Kirsher gfar_rx_checksum(skb, fcb);
2486ec21e2ecSJeff Kirsher
2487f646968fSPatrick McHardy /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2488823dcd25SDavid S. Miller * Even if vlan rx accel is disabled, on some chips
2489823dcd25SDavid S. Miller * RXFCB_VLN is pseudo randomly set.
2490823dcd25SDavid S. Miller */
2491f23223f1SClaudiu Manoil if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
249226eb9374SClaudiu Manoil be16_to_cpu(fcb->flags) & RXFCB_VLN)
249326eb9374SClaudiu Manoil __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
249426eb9374SClaudiu Manoil be16_to_cpu(fcb->vlctl));
2495ec21e2ecSJeff Kirsher }
2496ec21e2ecSJeff Kirsher
2497ec21e2ecSJeff Kirsher /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2498ec21e2ecSJeff Kirsher * until the budget/quota has been reached. Returns the number
2499ec21e2ecSJeff Kirsher * of frames handled
2500ec21e2ecSJeff Kirsher */
gfar_clean_rx_ring(struct gfar_priv_rx_q * rx_queue,int rx_work_limit)25017ad38784SArseny Solokha static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
25027ad38784SArseny Solokha int rx_work_limit)
2503ec21e2ecSJeff Kirsher {
2504f23223f1SClaudiu Manoil struct net_device *ndev = rx_queue->ndev;
2505f23223f1SClaudiu Manoil struct gfar_private *priv = netdev_priv(ndev);
250675354148SClaudiu Manoil struct rxbd8 *bdp;
250775354148SClaudiu Manoil int i, howmany = 0;
250875354148SClaudiu Manoil struct sk_buff *skb = rx_queue->skb;
250975354148SClaudiu Manoil int cleaned_cnt = gfar_rxbd_unused(rx_queue);
251075354148SClaudiu Manoil unsigned int total_bytes = 0, total_pkts = 0;
2511ec21e2ecSJeff Kirsher
2512ec21e2ecSJeff Kirsher /* Get the first full descriptor */
251376f31e8bSClaudiu Manoil i = rx_queue->next_to_clean;
2514ec21e2ecSJeff Kirsher
251576f31e8bSClaudiu Manoil while (rx_work_limit--) {
2516f966082eSClaudiu Manoil u32 lstatus;
2517ec21e2ecSJeff Kirsher
251876f31e8bSClaudiu Manoil if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
251976f31e8bSClaudiu Manoil gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
252076f31e8bSClaudiu Manoil cleaned_cnt = 0;
252176f31e8bSClaudiu Manoil }
2522bc4598bcSJan Ceuleers
252376f31e8bSClaudiu Manoil bdp = &rx_queue->rx_bd_base[i];
2524f966082eSClaudiu Manoil lstatus = be32_to_cpu(bdp->lstatus);
2525f966082eSClaudiu Manoil if (lstatus & BD_LFLAG(RXBD_EMPTY))
252676f31e8bSClaudiu Manoil break;
252776f31e8bSClaudiu Manoil
2528d8861babSMichael Braun /* lost RXBD_LAST descriptor due to overrun */
2529d8861babSMichael Braun if (skb &&
2530d8861babSMichael Braun (lstatus & BD_LFLAG(RXBD_FIRST))) {
2531d8861babSMichael Braun /* discard faulty buffer */
2532d8861babSMichael Braun dev_kfree_skb(skb);
2533d8861babSMichael Braun skb = NULL;
2534d8861babSMichael Braun rx_queue->stats.rx_dropped++;
2535d8861babSMichael Braun
2536d8861babSMichael Braun /* can continue normally */
2537d8861babSMichael Braun }
2538d8861babSMichael Braun
253976f31e8bSClaudiu Manoil /* order rx buffer descriptor reads */
2540ec21e2ecSJeff Kirsher rmb();
2541ec21e2ecSJeff Kirsher
254276f31e8bSClaudiu Manoil /* fetch next to clean buffer from the ring */
254375354148SClaudiu Manoil skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
254475354148SClaudiu Manoil if (unlikely(!skb))
254575354148SClaudiu Manoil break;
2546ec21e2ecSJeff Kirsher
254775354148SClaudiu Manoil cleaned_cnt++;
254875354148SClaudiu Manoil howmany++;
2549ec21e2ecSJeff Kirsher
255075354148SClaudiu Manoil if (unlikely(++i == rx_queue->rx_ring_size))
255175354148SClaudiu Manoil i = 0;
2552ec21e2ecSJeff Kirsher
255375354148SClaudiu Manoil rx_queue->next_to_clean = i;
255475354148SClaudiu Manoil
255575354148SClaudiu Manoil /* fetch next buffer if not the last in frame */
255675354148SClaudiu Manoil if (!(lstatus & BD_LFLAG(RXBD_LAST)))
255775354148SClaudiu Manoil continue;
255875354148SClaudiu Manoil
255975354148SClaudiu Manoil if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
2560f23223f1SClaudiu Manoil count_errors(lstatus, ndev);
2561ec21e2ecSJeff Kirsher
256276f31e8bSClaudiu Manoil /* discard faulty buffer */
2563acb600deSEric Dumazet dev_kfree_skb(skb);
256475354148SClaudiu Manoil skb = NULL;
256575354148SClaudiu Manoil rx_queue->stats.rx_dropped++;
256675354148SClaudiu Manoil continue;
256775354148SClaudiu Manoil }
256876f31e8bSClaudiu Manoil
2569590399ddSClaudiu Manoil gfar_process_frame(ndev, skb);
2570590399ddSClaudiu Manoil
2571ec21e2ecSJeff Kirsher /* Increment the number of packets */
257275354148SClaudiu Manoil total_pkts++;
257375354148SClaudiu Manoil total_bytes += skb->len;
2574ec21e2ecSJeff Kirsher
2575ec21e2ecSJeff Kirsher skb_record_rx_queue(skb, rx_queue->qindex);
257675354148SClaudiu Manoil
2577590399ddSClaudiu Manoil skb->protocol = eth_type_trans(skb, ndev);
2578f23223f1SClaudiu Manoil
2579f23223f1SClaudiu Manoil /* Send the packet up the stack */
2580f23223f1SClaudiu Manoil napi_gro_receive(&rx_queue->grp->napi_rx, skb);
2581ec21e2ecSJeff Kirsher
258275354148SClaudiu Manoil skb = NULL;
2583ec21e2ecSJeff Kirsher }
2584ec21e2ecSJeff Kirsher
258575354148SClaudiu Manoil /* Store incomplete frames for completion */
258675354148SClaudiu Manoil rx_queue->skb = skb;
2587ec21e2ecSJeff Kirsher
258875354148SClaudiu Manoil rx_queue->stats.rx_packets += total_pkts;
258975354148SClaudiu Manoil rx_queue->stats.rx_bytes += total_bytes;
259076f31e8bSClaudiu Manoil
259176f31e8bSClaudiu Manoil if (cleaned_cnt)
259276f31e8bSClaudiu Manoil gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
259376f31e8bSClaudiu Manoil
259476f31e8bSClaudiu Manoil /* Update Last Free RxBD pointer for LFC */
259576f31e8bSClaudiu Manoil if (unlikely(priv->tx_actual_en)) {
2596b4b67f26SScott Wood u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
2597b4b67f26SScott Wood
2598b4b67f26SScott Wood gfar_write(rx_queue->rfbptr, bdp_dma);
259976f31e8bSClaudiu Manoil }
2600ec21e2ecSJeff Kirsher
2601ec21e2ecSJeff Kirsher return howmany;
2602ec21e2ecSJeff Kirsher }
2603ec21e2ecSJeff Kirsher
gfar_poll_rx_sq(struct napi_struct * napi,int budget)2604aeb12c5eSClaudiu Manoil static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
26055eaedf31SClaudiu Manoil {
26065eaedf31SClaudiu Manoil struct gfar_priv_grp *gfargrp =
2607aeb12c5eSClaudiu Manoil container_of(napi, struct gfar_priv_grp, napi_rx);
26085eaedf31SClaudiu Manoil struct gfar __iomem *regs = gfargrp->regs;
260971ff9e3dSClaudiu Manoil struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
26105eaedf31SClaudiu Manoil int work_done = 0;
26115eaedf31SClaudiu Manoil
26125eaedf31SClaudiu Manoil /* Clear IEVENT, so interrupts aren't called again
26135eaedf31SClaudiu Manoil * because of the packets that have already arrived
26145eaedf31SClaudiu Manoil */
2615aeb12c5eSClaudiu Manoil gfar_write(®s->ievent, IEVENT_RX_MASK);
26165eaedf31SClaudiu Manoil
26175eaedf31SClaudiu Manoil work_done = gfar_clean_rx_ring(rx_queue, budget);
26185eaedf31SClaudiu Manoil
26195eaedf31SClaudiu Manoil if (work_done < budget) {
2620aeb12c5eSClaudiu Manoil u32 imask;
26216ad20165SEric Dumazet napi_complete_done(napi, work_done);
26225eaedf31SClaudiu Manoil /* Clear the halt bit in RSTAT */
26235eaedf31SClaudiu Manoil gfar_write(®s->rstat, gfargrp->rstat);
26245eaedf31SClaudiu Manoil
2625aeb12c5eSClaudiu Manoil spin_lock_irq(&gfargrp->grplock);
2626aeb12c5eSClaudiu Manoil imask = gfar_read(®s->imask);
2627aeb12c5eSClaudiu Manoil imask |= IMASK_RX_DEFAULT;
2628aeb12c5eSClaudiu Manoil gfar_write(®s->imask, imask);
2629aeb12c5eSClaudiu Manoil spin_unlock_irq(&gfargrp->grplock);
26305eaedf31SClaudiu Manoil }
26315eaedf31SClaudiu Manoil
26325eaedf31SClaudiu Manoil return work_done;
26335eaedf31SClaudiu Manoil }
26345eaedf31SClaudiu Manoil
gfar_poll_tx_sq(struct napi_struct * napi,int budget)2635aeb12c5eSClaudiu Manoil static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2636ec21e2ecSJeff Kirsher {
2637bc4598bcSJan Ceuleers struct gfar_priv_grp *gfargrp =
2638aeb12c5eSClaudiu Manoil container_of(napi, struct gfar_priv_grp, napi_tx);
2639aeb12c5eSClaudiu Manoil struct gfar __iomem *regs = gfargrp->regs;
264071ff9e3dSClaudiu Manoil struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2641aeb12c5eSClaudiu Manoil u32 imask;
2642aeb12c5eSClaudiu Manoil
2643aeb12c5eSClaudiu Manoil /* Clear IEVENT, so interrupts aren't called again
2644aeb12c5eSClaudiu Manoil * because of the packets that have already arrived
2645aeb12c5eSClaudiu Manoil */
2646aeb12c5eSClaudiu Manoil gfar_write(®s->ievent, IEVENT_TX_MASK);
2647aeb12c5eSClaudiu Manoil
2648aeb12c5eSClaudiu Manoil /* run Tx cleanup to completion */
2649aeb12c5eSClaudiu Manoil if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2650aeb12c5eSClaudiu Manoil gfar_clean_tx_ring(tx_queue);
2651aeb12c5eSClaudiu Manoil
2652aeb12c5eSClaudiu Manoil napi_complete(napi);
2653aeb12c5eSClaudiu Manoil
2654aeb12c5eSClaudiu Manoil spin_lock_irq(&gfargrp->grplock);
2655aeb12c5eSClaudiu Manoil imask = gfar_read(®s->imask);
2656aeb12c5eSClaudiu Manoil imask |= IMASK_TX_DEFAULT;
2657aeb12c5eSClaudiu Manoil gfar_write(®s->imask, imask);
2658aeb12c5eSClaudiu Manoil spin_unlock_irq(&gfargrp->grplock);
2659aeb12c5eSClaudiu Manoil
2660aeb12c5eSClaudiu Manoil return 0;
2661aeb12c5eSClaudiu Manoil }
2662aeb12c5eSClaudiu Manoil
26637d993c5fSArseny Solokha /* GFAR error interrupt handler */
gfar_error(int irq,void * grp_id)26647d993c5fSArseny Solokha static irqreturn_t gfar_error(int irq, void *grp_id)
26657d993c5fSArseny Solokha {
26667d993c5fSArseny Solokha struct gfar_priv_grp *gfargrp = grp_id;
26677d993c5fSArseny Solokha struct gfar __iomem *regs = gfargrp->regs;
26687d993c5fSArseny Solokha struct gfar_private *priv= gfargrp->priv;
26697d993c5fSArseny Solokha struct net_device *dev = priv->ndev;
26707d993c5fSArseny Solokha
26717d993c5fSArseny Solokha /* Save ievent for future reference */
26727d993c5fSArseny Solokha u32 events = gfar_read(®s->ievent);
26737d993c5fSArseny Solokha
26747d993c5fSArseny Solokha /* Clear IEVENT */
26757d993c5fSArseny Solokha gfar_write(®s->ievent, events & IEVENT_ERR_MASK);
26767d993c5fSArseny Solokha
26777d993c5fSArseny Solokha /* Magic Packet is not an error. */
26787d993c5fSArseny Solokha if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
26797d993c5fSArseny Solokha (events & IEVENT_MAG))
26807d993c5fSArseny Solokha events &= ~IEVENT_MAG;
26817d993c5fSArseny Solokha
26827d993c5fSArseny Solokha /* Hmm... */
26837d993c5fSArseny Solokha if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
26847d993c5fSArseny Solokha netdev_dbg(dev,
26857d993c5fSArseny Solokha "error interrupt (ievent=0x%08x imask=0x%08x)\n",
26867d993c5fSArseny Solokha events, gfar_read(®s->imask));
26877d993c5fSArseny Solokha
26887d993c5fSArseny Solokha /* Update the error counters */
26897d993c5fSArseny Solokha if (events & IEVENT_TXE) {
26907d993c5fSArseny Solokha dev->stats.tx_errors++;
26917d993c5fSArseny Solokha
26927d993c5fSArseny Solokha if (events & IEVENT_LC)
26937d993c5fSArseny Solokha dev->stats.tx_window_errors++;
26947d993c5fSArseny Solokha if (events & IEVENT_CRL)
26957d993c5fSArseny Solokha dev->stats.tx_aborted_errors++;
26967d993c5fSArseny Solokha if (events & IEVENT_XFUN) {
26977d993c5fSArseny Solokha netif_dbg(priv, tx_err, dev,
26987d993c5fSArseny Solokha "TX FIFO underrun, packet dropped\n");
26997d993c5fSArseny Solokha dev->stats.tx_dropped++;
27007d993c5fSArseny Solokha atomic64_inc(&priv->extra_stats.tx_underrun);
27017d993c5fSArseny Solokha
27027d993c5fSArseny Solokha schedule_work(&priv->reset_task);
27037d993c5fSArseny Solokha }
27047d993c5fSArseny Solokha netif_dbg(priv, tx_err, dev, "Transmit Error\n");
27057d993c5fSArseny Solokha }
270614870b75SEsben Haabendal if (events & IEVENT_MSRO) {
270714870b75SEsben Haabendal struct rmon_mib __iomem *rmon = ®s->rmon;
270814870b75SEsben Haabendal u32 car;
270914870b75SEsben Haabendal
271014870b75SEsben Haabendal spin_lock(&priv->rmon_overflow.lock);
271114870b75SEsben Haabendal car = gfar_read(&rmon->car1) & CAR1_C1RDR;
271214870b75SEsben Haabendal if (car) {
271314870b75SEsben Haabendal priv->rmon_overflow.rdrp++;
271414870b75SEsben Haabendal gfar_write(&rmon->car1, car);
271514870b75SEsben Haabendal }
271614870b75SEsben Haabendal spin_unlock(&priv->rmon_overflow.lock);
271714870b75SEsben Haabendal }
27187d993c5fSArseny Solokha if (events & IEVENT_BSY) {
27197d993c5fSArseny Solokha dev->stats.rx_over_errors++;
27207d993c5fSArseny Solokha atomic64_inc(&priv->extra_stats.rx_bsy);
27217d993c5fSArseny Solokha
27227d993c5fSArseny Solokha netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
27237d993c5fSArseny Solokha gfar_read(®s->rstat));
27247d993c5fSArseny Solokha }
27257d993c5fSArseny Solokha if (events & IEVENT_BABR) {
27267d993c5fSArseny Solokha dev->stats.rx_errors++;
27277d993c5fSArseny Solokha atomic64_inc(&priv->extra_stats.rx_babr);
27287d993c5fSArseny Solokha
27297d993c5fSArseny Solokha netif_dbg(priv, rx_err, dev, "babbling RX error\n");
27307d993c5fSArseny Solokha }
27317d993c5fSArseny Solokha if (events & IEVENT_EBERR) {
27327d993c5fSArseny Solokha atomic64_inc(&priv->extra_stats.eberr);
27337d993c5fSArseny Solokha netif_dbg(priv, rx_err, dev, "bus error\n");
27347d993c5fSArseny Solokha }
27357d993c5fSArseny Solokha if (events & IEVENT_RXC)
27367d993c5fSArseny Solokha netif_dbg(priv, rx_status, dev, "control frame\n");
27377d993c5fSArseny Solokha
27387d993c5fSArseny Solokha if (events & IEVENT_BABT) {
27397d993c5fSArseny Solokha atomic64_inc(&priv->extra_stats.tx_babt);
27407d993c5fSArseny Solokha netif_dbg(priv, tx_err, dev, "babbling TX error\n");
27417d993c5fSArseny Solokha }
27427d993c5fSArseny Solokha return IRQ_HANDLED;
27437d993c5fSArseny Solokha }
27447d993c5fSArseny Solokha
27457d993c5fSArseny Solokha /* The interrupt handler for devices with one interrupt */
gfar_interrupt(int irq,void * grp_id)27467d993c5fSArseny Solokha static irqreturn_t gfar_interrupt(int irq, void *grp_id)
27477d993c5fSArseny Solokha {
27487d993c5fSArseny Solokha struct gfar_priv_grp *gfargrp = grp_id;
27497d993c5fSArseny Solokha
27507d993c5fSArseny Solokha /* Save ievent for future reference */
27517d993c5fSArseny Solokha u32 events = gfar_read(&gfargrp->regs->ievent);
27527d993c5fSArseny Solokha
27537d993c5fSArseny Solokha /* Check for reception */
27547d993c5fSArseny Solokha if (events & IEVENT_RX_MASK)
27557d993c5fSArseny Solokha gfar_receive(irq, grp_id);
27567d993c5fSArseny Solokha
27577d993c5fSArseny Solokha /* Check for transmit completion */
27587d993c5fSArseny Solokha if (events & IEVENT_TX_MASK)
27597d993c5fSArseny Solokha gfar_transmit(irq, grp_id);
27607d993c5fSArseny Solokha
27617d993c5fSArseny Solokha /* Check for errors */
27627d993c5fSArseny Solokha if (events & IEVENT_ERR_MASK)
27637d993c5fSArseny Solokha gfar_error(irq, grp_id);
27647d993c5fSArseny Solokha
27657d993c5fSArseny Solokha return IRQ_HANDLED;
27667d993c5fSArseny Solokha }
2767aeb12c5eSClaudiu Manoil
2768ec21e2ecSJeff Kirsher #ifdef CONFIG_NET_POLL_CONTROLLER
27690977f817SJan Ceuleers /* Polling 'interrupt' - used by things like netconsole to send skbs
2770ec21e2ecSJeff Kirsher * without having to re-enable interrupts. It's not called while
2771ec21e2ecSJeff Kirsher * the interrupt routine is executing.
2772ec21e2ecSJeff Kirsher */
gfar_netpoll(struct net_device * dev)2773ec21e2ecSJeff Kirsher static void gfar_netpoll(struct net_device *dev)
2774ec21e2ecSJeff Kirsher {
2775ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(dev);
27763a2e16c8SJan Ceuleers int i;
2777ec21e2ecSJeff Kirsher
2778ec21e2ecSJeff Kirsher /* If the device has multiple interrupts, run tx/rx */
2779ec21e2ecSJeff Kirsher if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2780ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_grps; i++) {
278162ed839dSPaul Gortmaker struct gfar_priv_grp *grp = &priv->gfargrp[i];
278262ed839dSPaul Gortmaker
278362ed839dSPaul Gortmaker disable_irq(gfar_irq(grp, TX)->irq);
278462ed839dSPaul Gortmaker disable_irq(gfar_irq(grp, RX)->irq);
278562ed839dSPaul Gortmaker disable_irq(gfar_irq(grp, ER)->irq);
278662ed839dSPaul Gortmaker gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
278762ed839dSPaul Gortmaker enable_irq(gfar_irq(grp, ER)->irq);
278862ed839dSPaul Gortmaker enable_irq(gfar_irq(grp, RX)->irq);
278962ed839dSPaul Gortmaker enable_irq(gfar_irq(grp, TX)->irq);
2790ec21e2ecSJeff Kirsher }
2791ec21e2ecSJeff Kirsher } else {
2792ec21e2ecSJeff Kirsher for (i = 0; i < priv->num_grps; i++) {
279362ed839dSPaul Gortmaker struct gfar_priv_grp *grp = &priv->gfargrp[i];
279462ed839dSPaul Gortmaker
279562ed839dSPaul Gortmaker disable_irq(gfar_irq(grp, TX)->irq);
279662ed839dSPaul Gortmaker gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
279762ed839dSPaul Gortmaker enable_irq(gfar_irq(grp, TX)->irq);
2798ec21e2ecSJeff Kirsher }
2799ec21e2ecSJeff Kirsher }
2800ec21e2ecSJeff Kirsher }
2801ec21e2ecSJeff Kirsher #endif
2802ec21e2ecSJeff Kirsher
free_grp_irqs(struct gfar_priv_grp * grp)28037d993c5fSArseny Solokha static void free_grp_irqs(struct gfar_priv_grp *grp)
2804ec21e2ecSJeff Kirsher {
28057d993c5fSArseny Solokha free_irq(gfar_irq(grp, TX)->irq, grp);
28067d993c5fSArseny Solokha free_irq(gfar_irq(grp, RX)->irq, grp);
28077d993c5fSArseny Solokha free_irq(gfar_irq(grp, ER)->irq, grp);
2808ec21e2ecSJeff Kirsher }
2809ec21e2ecSJeff Kirsher
register_grp_irqs(struct gfar_priv_grp * grp)28107d993c5fSArseny Solokha static int register_grp_irqs(struct gfar_priv_grp *grp)
28117d993c5fSArseny Solokha {
28127d993c5fSArseny Solokha struct gfar_private *priv = grp->priv;
28137d993c5fSArseny Solokha struct net_device *dev = priv->ndev;
28147d993c5fSArseny Solokha int err;
28157d993c5fSArseny Solokha
28167d993c5fSArseny Solokha /* If the device has multiple interrupts, register for
28177d993c5fSArseny Solokha * them. Otherwise, only register for the one
2818ec21e2ecSJeff Kirsher */
28197d993c5fSArseny Solokha if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
28207d993c5fSArseny Solokha /* Install our interrupt handlers for Error,
28217d993c5fSArseny Solokha * Transmit, and Receive
28227d993c5fSArseny Solokha */
28237d993c5fSArseny Solokha err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
28247d993c5fSArseny Solokha gfar_irq(grp, ER)->name, grp);
28257d993c5fSArseny Solokha if (err < 0) {
28267d993c5fSArseny Solokha netif_err(priv, intr, dev, "Can't get IRQ %d\n",
28277d993c5fSArseny Solokha gfar_irq(grp, ER)->irq);
28287d993c5fSArseny Solokha
28297d993c5fSArseny Solokha goto err_irq_fail;
28307d993c5fSArseny Solokha }
28317d993c5fSArseny Solokha enable_irq_wake(gfar_irq(grp, ER)->irq);
28327d993c5fSArseny Solokha
28337d993c5fSArseny Solokha err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
28347d993c5fSArseny Solokha gfar_irq(grp, TX)->name, grp);
28357d993c5fSArseny Solokha if (err < 0) {
28367d993c5fSArseny Solokha netif_err(priv, intr, dev, "Can't get IRQ %d\n",
28377d993c5fSArseny Solokha gfar_irq(grp, TX)->irq);
28387d993c5fSArseny Solokha goto tx_irq_fail;
28397d993c5fSArseny Solokha }
28407d993c5fSArseny Solokha err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
28417d993c5fSArseny Solokha gfar_irq(grp, RX)->name, grp);
28427d993c5fSArseny Solokha if (err < 0) {
28437d993c5fSArseny Solokha netif_err(priv, intr, dev, "Can't get IRQ %d\n",
28447d993c5fSArseny Solokha gfar_irq(grp, RX)->irq);
28457d993c5fSArseny Solokha goto rx_irq_fail;
28467d993c5fSArseny Solokha }
28477d993c5fSArseny Solokha enable_irq_wake(gfar_irq(grp, RX)->irq);
28487d993c5fSArseny Solokha
28497d993c5fSArseny Solokha } else {
28507d993c5fSArseny Solokha err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
28517d993c5fSArseny Solokha gfar_irq(grp, TX)->name, grp);
28527d993c5fSArseny Solokha if (err < 0) {
28537d993c5fSArseny Solokha netif_err(priv, intr, dev, "Can't get IRQ %d\n",
28547d993c5fSArseny Solokha gfar_irq(grp, TX)->irq);
28557d993c5fSArseny Solokha goto err_irq_fail;
28567d993c5fSArseny Solokha }
28577d993c5fSArseny Solokha enable_irq_wake(gfar_irq(grp, TX)->irq);
28587d993c5fSArseny Solokha }
28597d993c5fSArseny Solokha
28607d993c5fSArseny Solokha return 0;
28617d993c5fSArseny Solokha
28627d993c5fSArseny Solokha rx_irq_fail:
28637d993c5fSArseny Solokha free_irq(gfar_irq(grp, TX)->irq, grp);
28647d993c5fSArseny Solokha tx_irq_fail:
28657d993c5fSArseny Solokha free_irq(gfar_irq(grp, ER)->irq, grp);
28667d993c5fSArseny Solokha err_irq_fail:
28677d993c5fSArseny Solokha return err;
28687d993c5fSArseny Solokha
28697d993c5fSArseny Solokha }
28707d993c5fSArseny Solokha
gfar_free_irq(struct gfar_private * priv)28717d993c5fSArseny Solokha static void gfar_free_irq(struct gfar_private *priv)
28727d993c5fSArseny Solokha {
28737d993c5fSArseny Solokha int i;
28747d993c5fSArseny Solokha
28757d993c5fSArseny Solokha /* Free the IRQs */
28767d993c5fSArseny Solokha if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
28777d993c5fSArseny Solokha for (i = 0; i < priv->num_grps; i++)
28787d993c5fSArseny Solokha free_grp_irqs(&priv->gfargrp[i]);
28797d993c5fSArseny Solokha } else {
28807d993c5fSArseny Solokha for (i = 0; i < priv->num_grps; i++)
28817d993c5fSArseny Solokha free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
28827d993c5fSArseny Solokha &priv->gfargrp[i]);
28837d993c5fSArseny Solokha }
28847d993c5fSArseny Solokha }
28857d993c5fSArseny Solokha
gfar_request_irq(struct gfar_private * priv)28867d993c5fSArseny Solokha static int gfar_request_irq(struct gfar_private *priv)
28877d993c5fSArseny Solokha {
28887d993c5fSArseny Solokha int err, i, j;
28897d993c5fSArseny Solokha
28907d993c5fSArseny Solokha for (i = 0; i < priv->num_grps; i++) {
28917d993c5fSArseny Solokha err = register_grp_irqs(&priv->gfargrp[i]);
28927d993c5fSArseny Solokha if (err) {
28937d993c5fSArseny Solokha for (j = 0; j < i; j++)
28947d993c5fSArseny Solokha free_grp_irqs(&priv->gfargrp[j]);
28957d993c5fSArseny Solokha return err;
28967d993c5fSArseny Solokha }
28977d993c5fSArseny Solokha }
28987d993c5fSArseny Solokha
28997d993c5fSArseny Solokha return 0;
29007d993c5fSArseny Solokha }
29017d993c5fSArseny Solokha
29027d993c5fSArseny Solokha /* Called when something needs to use the ethernet device
29037d993c5fSArseny Solokha * Returns 0 for success.
29047d993c5fSArseny Solokha */
gfar_enet_open(struct net_device * dev)29057d993c5fSArseny Solokha static int gfar_enet_open(struct net_device *dev)
2906ec21e2ecSJeff Kirsher {
2907ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(dev);
29087d993c5fSArseny Solokha int err;
2909ec21e2ecSJeff Kirsher
29107d993c5fSArseny Solokha err = init_phy(dev);
29117d993c5fSArseny Solokha if (err)
29127d993c5fSArseny Solokha return err;
29137d993c5fSArseny Solokha
29147d993c5fSArseny Solokha err = gfar_request_irq(priv);
29157d993c5fSArseny Solokha if (err)
29167d993c5fSArseny Solokha return err;
29177d993c5fSArseny Solokha
29187d993c5fSArseny Solokha err = startup_gfar(dev);
29197d993c5fSArseny Solokha if (err)
29207d993c5fSArseny Solokha return err;
29217d993c5fSArseny Solokha
29227d993c5fSArseny Solokha return err;
29237d993c5fSArseny Solokha }
29247d993c5fSArseny Solokha
29257d993c5fSArseny Solokha /* Stops the kernel queue, and halts the controller */
gfar_close(struct net_device * dev)29267d993c5fSArseny Solokha static int gfar_close(struct net_device *dev)
29277d993c5fSArseny Solokha {
29287d993c5fSArseny Solokha struct gfar_private *priv = netdev_priv(dev);
29297d993c5fSArseny Solokha
29307d993c5fSArseny Solokha cancel_work_sync(&priv->reset_task);
29317d993c5fSArseny Solokha stop_gfar(dev);
29327d993c5fSArseny Solokha
29337d993c5fSArseny Solokha /* Disconnect from the PHY */
29347d993c5fSArseny Solokha phy_disconnect(dev->phydev);
29357d993c5fSArseny Solokha
29367d993c5fSArseny Solokha gfar_free_irq(priv);
29377d993c5fSArseny Solokha
29387d993c5fSArseny Solokha return 0;
29397d993c5fSArseny Solokha }
29407d993c5fSArseny Solokha
29417d993c5fSArseny Solokha /* Clears each of the exact match registers to zero, so they
29427d993c5fSArseny Solokha * don't interfere with normal reception
29437d993c5fSArseny Solokha */
gfar_clear_exact_match(struct net_device * dev)29447d993c5fSArseny Solokha static void gfar_clear_exact_match(struct net_device *dev)
29457d993c5fSArseny Solokha {
29467d993c5fSArseny Solokha int idx;
29477d993c5fSArseny Solokha static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
29487d993c5fSArseny Solokha
29497d993c5fSArseny Solokha for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
29507d993c5fSArseny Solokha gfar_set_mac_for_addr(dev, idx, zero_arr);
2951ec21e2ecSJeff Kirsher }
2952ec21e2ecSJeff Kirsher
2953ec21e2ecSJeff Kirsher /* Update the hash table based on the current list of multicast
2954ec21e2ecSJeff Kirsher * addresses we subscribe to. Also, change the promiscuity of
2955ec21e2ecSJeff Kirsher * the device based on the flags (this function is called
29560977f817SJan Ceuleers * whenever dev->flags is changed
29570977f817SJan Ceuleers */
gfar_set_multi(struct net_device * dev)2958ec21e2ecSJeff Kirsher static void gfar_set_multi(struct net_device *dev)
2959ec21e2ecSJeff Kirsher {
2960ec21e2ecSJeff Kirsher struct netdev_hw_addr *ha;
2961ec21e2ecSJeff Kirsher struct gfar_private *priv = netdev_priv(dev);
2962ec21e2ecSJeff Kirsher struct gfar __iomem *regs = priv->gfargrp[0].regs;
2963ec21e2ecSJeff Kirsher u32 tempval;
2964ec21e2ecSJeff Kirsher
2965ec21e2ecSJeff Kirsher if (dev->flags & IFF_PROMISC) {
2966ec21e2ecSJeff Kirsher /* Set RCTRL to PROM */
2967ec21e2ecSJeff Kirsher tempval = gfar_read(®s->rctrl);
2968ec21e2ecSJeff Kirsher tempval |= RCTRL_PROM;
2969ec21e2ecSJeff Kirsher gfar_write(®s->rctrl, tempval);
2970ec21e2ecSJeff Kirsher } else {
2971ec21e2ecSJeff Kirsher /* Set RCTRL to not PROM */
2972ec21e2ecSJeff Kirsher tempval = gfar_read(®s->rctrl);
2973ec21e2ecSJeff Kirsher tempval &= ~(RCTRL_PROM);
2974ec21e2ecSJeff Kirsher gfar_write(®s->rctrl, tempval);
2975ec21e2ecSJeff Kirsher }
2976ec21e2ecSJeff Kirsher
2977ec21e2ecSJeff Kirsher if (dev->flags & IFF_ALLMULTI) {
2978ec21e2ecSJeff Kirsher /* Set the hash to rx all multicast frames */
2979ec21e2ecSJeff Kirsher gfar_write(®s->igaddr0, 0xffffffff);
2980ec21e2ecSJeff Kirsher gfar_write(®s->igaddr1, 0xffffffff);
2981ec21e2ecSJeff Kirsher gfar_write(®s->igaddr2, 0xffffffff);
2982ec21e2ecSJeff Kirsher gfar_write(®s->igaddr3, 0xffffffff);
2983ec21e2ecSJeff Kirsher gfar_write(®s->igaddr4, 0xffffffff);
2984ec21e2ecSJeff Kirsher gfar_write(®s->igaddr5, 0xffffffff);
2985ec21e2ecSJeff Kirsher gfar_write(®s->igaddr6, 0xffffffff);
2986ec21e2ecSJeff Kirsher gfar_write(®s->igaddr7, 0xffffffff);
2987ec21e2ecSJeff Kirsher gfar_write(®s->gaddr0, 0xffffffff);
2988ec21e2ecSJeff Kirsher gfar_write(®s->gaddr1, 0xffffffff);
2989ec21e2ecSJeff Kirsher gfar_write(®s->gaddr2, 0xffffffff);
2990ec21e2ecSJeff Kirsher gfar_write(®s->gaddr3, 0xffffffff);
2991ec21e2ecSJeff Kirsher gfar_write(®s->gaddr4, 0xffffffff);
2992ec21e2ecSJeff Kirsher gfar_write(®s->gaddr5, 0xffffffff);
2993ec21e2ecSJeff Kirsher gfar_write(®s->gaddr6, 0xffffffff);
2994ec21e2ecSJeff Kirsher gfar_write(®s->gaddr7, 0xffffffff);
2995ec21e2ecSJeff Kirsher } else {
2996ec21e2ecSJeff Kirsher int em_num;
2997ec21e2ecSJeff Kirsher int idx;
2998ec21e2ecSJeff Kirsher
2999ec21e2ecSJeff Kirsher /* zero out the hash */
3000ec21e2ecSJeff Kirsher gfar_write(®s->igaddr0, 0x0);
3001ec21e2ecSJeff Kirsher gfar_write(®s->igaddr1, 0x0);
3002ec21e2ecSJeff Kirsher gfar_write(®s->igaddr2, 0x0);
3003ec21e2ecSJeff Kirsher gfar_write(®s->igaddr3, 0x0);
3004ec21e2ecSJeff Kirsher gfar_write(®s->igaddr4, 0x0);
3005ec21e2ecSJeff Kirsher gfar_write(®s->igaddr5, 0x0);
3006ec21e2ecSJeff Kirsher gfar_write(®s->igaddr6, 0x0);
3007ec21e2ecSJeff Kirsher gfar_write(®s->igaddr7, 0x0);
3008ec21e2ecSJeff Kirsher gfar_write(®s->gaddr0, 0x0);
3009ec21e2ecSJeff Kirsher gfar_write(®s->gaddr1, 0x0);
3010ec21e2ecSJeff Kirsher gfar_write(®s->gaddr2, 0x0);
3011ec21e2ecSJeff Kirsher gfar_write(®s->gaddr3, 0x0);
3012ec21e2ecSJeff Kirsher gfar_write(®s->gaddr4, 0x0);
3013ec21e2ecSJeff Kirsher gfar_write(®s->gaddr5, 0x0);
3014ec21e2ecSJeff Kirsher gfar_write(®s->gaddr6, 0x0);
3015ec21e2ecSJeff Kirsher gfar_write(®s->gaddr7, 0x0);
3016ec21e2ecSJeff Kirsher
3017ec21e2ecSJeff Kirsher /* If we have extended hash tables, we need to
3018ec21e2ecSJeff Kirsher * clear the exact match registers to prepare for
30190977f817SJan Ceuleers * setting them
30200977f817SJan Ceuleers */
3021ec21e2ecSJeff Kirsher if (priv->extended_hash) {
3022ec21e2ecSJeff Kirsher em_num = GFAR_EM_NUM + 1;
3023ec21e2ecSJeff Kirsher gfar_clear_exact_match(dev);
3024ec21e2ecSJeff Kirsher idx = 1;
3025ec21e2ecSJeff Kirsher } else {
3026ec21e2ecSJeff Kirsher idx = 0;
3027ec21e2ecSJeff Kirsher em_num = 0;
3028ec21e2ecSJeff Kirsher }
3029ec21e2ecSJeff Kirsher
3030ec21e2ecSJeff Kirsher if (netdev_mc_empty(dev))
3031ec21e2ecSJeff Kirsher return;
3032ec21e2ecSJeff Kirsher
3033ec21e2ecSJeff Kirsher /* Parse the list, and set the appropriate bits */
3034ec21e2ecSJeff Kirsher netdev_for_each_mc_addr(ha, dev) {
3035ec21e2ecSJeff Kirsher if (idx < em_num) {
3036ec21e2ecSJeff Kirsher gfar_set_mac_for_addr(dev, idx, ha->addr);
3037ec21e2ecSJeff Kirsher idx++;
3038ec21e2ecSJeff Kirsher } else
3039ec21e2ecSJeff Kirsher gfar_set_hash_for_addr(dev, ha->addr);
3040ec21e2ecSJeff Kirsher }
3041ec21e2ecSJeff Kirsher }
3042ec21e2ecSJeff Kirsher }
3043ec21e2ecSJeff Kirsher
gfar_mac_reset(struct gfar_private * priv)30447d993c5fSArseny Solokha void gfar_mac_reset(struct gfar_private *priv)
30456ce29b0eSClaudiu Manoil {
30466ce29b0eSClaudiu Manoil struct gfar __iomem *regs = priv->gfargrp[0].regs;
30477d993c5fSArseny Solokha u32 tempval;
30486ce29b0eSClaudiu Manoil
30497d993c5fSArseny Solokha /* Reset MAC layer */
30507d993c5fSArseny Solokha gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET);
30516ce29b0eSClaudiu Manoil
30527d993c5fSArseny Solokha /* We need to delay at least 3 TX clocks */
30537d993c5fSArseny Solokha udelay(3);
30546ce29b0eSClaudiu Manoil
30557d993c5fSArseny Solokha /* the soft reset bit is not self-resetting, so we need to
30567d993c5fSArseny Solokha * clear it before resuming normal operation
30576ce29b0eSClaudiu Manoil */
30587d993c5fSArseny Solokha gfar_write(®s->maccfg1, 0);
30596ce29b0eSClaudiu Manoil
30607d993c5fSArseny Solokha udelay(3);
30616ce29b0eSClaudiu Manoil
30627d993c5fSArseny Solokha gfar_rx_offload_en(priv);
30636ce29b0eSClaudiu Manoil
30647d993c5fSArseny Solokha /* Initialize the max receive frame/buffer lengths */
30657d993c5fSArseny Solokha gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE);
30667d993c5fSArseny Solokha gfar_write(®s->mrblr, GFAR_RXB_SIZE);
3067b4b67f26SScott Wood
30687d993c5fSArseny Solokha /* Initialize the Minimum Frame Length Register */
30697d993c5fSArseny Solokha gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);
307045b679c9SMatei Pavaluca
30717d993c5fSArseny Solokha /* Initialize MACCFG2. */
30727d993c5fSArseny Solokha tempval = MACCFG2_INIT_SETTINGS;
307345b679c9SMatei Pavaluca
30747d993c5fSArseny Solokha /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
30757d993c5fSArseny Solokha * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
30767d993c5fSArseny Solokha * and by checking RxBD[LG] and discarding larger than MAXFRM.
30777d993c5fSArseny Solokha */
30787d993c5fSArseny Solokha if (gfar_has_errata(priv, GFAR_ERRATA_74))
30797d993c5fSArseny Solokha tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
308045b679c9SMatei Pavaluca
30816ce29b0eSClaudiu Manoil gfar_write(®s->maccfg2, tempval);
30826ce29b0eSClaudiu Manoil
30837d993c5fSArseny Solokha /* Clear mac addr hash registers */
30847d993c5fSArseny Solokha gfar_write(®s->igaddr0, 0);
30857d993c5fSArseny Solokha gfar_write(®s->igaddr1, 0);
30867d993c5fSArseny Solokha gfar_write(®s->igaddr2, 0);
30877d993c5fSArseny Solokha gfar_write(®s->igaddr3, 0);
30887d993c5fSArseny Solokha gfar_write(®s->igaddr4, 0);
30897d993c5fSArseny Solokha gfar_write(®s->igaddr5, 0);
30907d993c5fSArseny Solokha gfar_write(®s->igaddr6, 0);
30917d993c5fSArseny Solokha gfar_write(®s->igaddr7, 0);
30926ce29b0eSClaudiu Manoil
30937d993c5fSArseny Solokha gfar_write(®s->gaddr0, 0);
30947d993c5fSArseny Solokha gfar_write(®s->gaddr1, 0);
30957d993c5fSArseny Solokha gfar_write(®s->gaddr2, 0);
30967d993c5fSArseny Solokha gfar_write(®s->gaddr3, 0);
30977d993c5fSArseny Solokha gfar_write(®s->gaddr4, 0);
30987d993c5fSArseny Solokha gfar_write(®s->gaddr5, 0);
30997d993c5fSArseny Solokha gfar_write(®s->gaddr6, 0);
31007d993c5fSArseny Solokha gfar_write(®s->gaddr7, 0);
31017d993c5fSArseny Solokha
31027d993c5fSArseny Solokha if (priv->extended_hash)
31037d993c5fSArseny Solokha gfar_clear_exact_match(priv->ndev);
31047d993c5fSArseny Solokha
31057d993c5fSArseny Solokha gfar_mac_rx_config(priv);
31067d993c5fSArseny Solokha
31077d993c5fSArseny Solokha gfar_mac_tx_config(priv);
31087d993c5fSArseny Solokha
31097d993c5fSArseny Solokha gfar_set_mac_address(priv->ndev);
31107d993c5fSArseny Solokha
31117d993c5fSArseny Solokha gfar_set_multi(priv->ndev);
31127d993c5fSArseny Solokha
31137d993c5fSArseny Solokha /* clear ievent and imask before configuring coalescing */
31147d993c5fSArseny Solokha gfar_ints_disable(priv);
31157d993c5fSArseny Solokha
31167d993c5fSArseny Solokha /* Configure the coalescing support */
31177d993c5fSArseny Solokha gfar_configure_coalescing_all(priv);
31187d993c5fSArseny Solokha }
31197d993c5fSArseny Solokha
gfar_hw_init(struct gfar_private * priv)31207d993c5fSArseny Solokha static void gfar_hw_init(struct gfar_private *priv)
31217d993c5fSArseny Solokha {
31227d993c5fSArseny Solokha struct gfar __iomem *regs = priv->gfargrp[0].regs;
31237d993c5fSArseny Solokha u32 attrs;
31247d993c5fSArseny Solokha
31257d993c5fSArseny Solokha /* Stop the DMA engine now, in case it was running before
31267d993c5fSArseny Solokha * (The firmware could have used it, and left it running).
31277d993c5fSArseny Solokha */
31287d993c5fSArseny Solokha gfar_halt(priv);
31297d993c5fSArseny Solokha
31307d993c5fSArseny Solokha gfar_mac_reset(priv);
31317d993c5fSArseny Solokha
31327d993c5fSArseny Solokha /* Zero out the rmon mib registers if it has them */
31337d993c5fSArseny Solokha if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
3134e2dbbbe5SEsben Haabendal memset_io(®s->rmon, 0, offsetof(struct rmon_mib, car1));
31357d993c5fSArseny Solokha
31367d993c5fSArseny Solokha /* Mask off the CAM interrupts */
31377d993c5fSArseny Solokha gfar_write(®s->rmon.cam1, 0xffffffff);
31387d993c5fSArseny Solokha gfar_write(®s->rmon.cam2, 0xffffffff);
3139ef094874SEsben Haabendal /* Clear the CAR registers (w1c style) */
3140ef094874SEsben Haabendal gfar_write(®s->rmon.car1, 0xffffffff);
3141ef094874SEsben Haabendal gfar_write(®s->rmon.car2, 0xffffffff);
31427d993c5fSArseny Solokha }
31437d993c5fSArseny Solokha
31447d993c5fSArseny Solokha /* Initialize ECNTRL */
31457d993c5fSArseny Solokha gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS);
31467d993c5fSArseny Solokha
31477d993c5fSArseny Solokha /* Set the extraction length and index */
31487d993c5fSArseny Solokha attrs = ATTRELI_EL(priv->rx_stash_size) |
31497d993c5fSArseny Solokha ATTRELI_EI(priv->rx_stash_index);
31507d993c5fSArseny Solokha
31517d993c5fSArseny Solokha gfar_write(®s->attreli, attrs);
31527d993c5fSArseny Solokha
31537d993c5fSArseny Solokha /* Start with defaults, and add stashing
31547d993c5fSArseny Solokha * depending on driver parameters
31557d993c5fSArseny Solokha */
31567d993c5fSArseny Solokha attrs = ATTR_INIT_SETTINGS;
31577d993c5fSArseny Solokha
31587d993c5fSArseny Solokha if (priv->bd_stash_en)
31597d993c5fSArseny Solokha attrs |= ATTR_BDSTASH;
31607d993c5fSArseny Solokha
31617d993c5fSArseny Solokha if (priv->rx_stash_size != 0)
31627d993c5fSArseny Solokha attrs |= ATTR_BUFSTASH;
31637d993c5fSArseny Solokha
31647d993c5fSArseny Solokha gfar_write(®s->attr, attrs);
31657d993c5fSArseny Solokha
31667d993c5fSArseny Solokha /* FIFO configs */
31677d993c5fSArseny Solokha gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
31687d993c5fSArseny Solokha gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
31697d993c5fSArseny Solokha gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
31707d993c5fSArseny Solokha
31717d993c5fSArseny Solokha /* Program the interrupt steering regs, only for MG devices */
31727d993c5fSArseny Solokha if (priv->num_grps > 1)
31737d993c5fSArseny Solokha gfar_write_isrg(priv);
31747d993c5fSArseny Solokha }
31757d993c5fSArseny Solokha
31767d993c5fSArseny Solokha static const struct net_device_ops gfar_netdev_ops = {
31777d993c5fSArseny Solokha .ndo_open = gfar_enet_open,
31787d993c5fSArseny Solokha .ndo_start_xmit = gfar_start_xmit,
31797d993c5fSArseny Solokha .ndo_stop = gfar_close,
31807d993c5fSArseny Solokha .ndo_change_mtu = gfar_change_mtu,
31817d993c5fSArseny Solokha .ndo_set_features = gfar_set_features,
31827d993c5fSArseny Solokha .ndo_set_rx_mode = gfar_set_multi,
31837d993c5fSArseny Solokha .ndo_tx_timeout = gfar_timeout,
3184a7605370SArnd Bergmann .ndo_eth_ioctl = gfar_ioctl,
3185d59a24fdSEsben Haabendal .ndo_get_stats64 = gfar_get_stats64,
31867d993c5fSArseny Solokha .ndo_change_carrier = fixed_phy_change_carrier,
31877d993c5fSArseny Solokha .ndo_set_mac_address = gfar_set_mac_addr,
31887d993c5fSArseny Solokha .ndo_validate_addr = eth_validate_addr,
31897d993c5fSArseny Solokha #ifdef CONFIG_NET_POLL_CONTROLLER
31907d993c5fSArseny Solokha .ndo_poll_controller = gfar_netpoll,
31917d993c5fSArseny Solokha #endif
31927d993c5fSArseny Solokha };
31937d993c5fSArseny Solokha
31947d993c5fSArseny Solokha /* Set up the ethernet device structure, private data,
31957d993c5fSArseny Solokha * and anything else we need before we start
31967d993c5fSArseny Solokha */
gfar_probe(struct platform_device * ofdev)31977d993c5fSArseny Solokha static int gfar_probe(struct platform_device *ofdev)
31987d993c5fSArseny Solokha {
31997d993c5fSArseny Solokha struct device_node *np = ofdev->dev.of_node;
32007d993c5fSArseny Solokha struct net_device *dev = NULL;
32017d993c5fSArseny Solokha struct gfar_private *priv = NULL;
32027d993c5fSArseny Solokha int err = 0, i;
32037d993c5fSArseny Solokha
32047d993c5fSArseny Solokha err = gfar_of_init(ofdev, &dev);
32057d993c5fSArseny Solokha
32067d993c5fSArseny Solokha if (err)
32077d993c5fSArseny Solokha return err;
32087d993c5fSArseny Solokha
32097d993c5fSArseny Solokha priv = netdev_priv(dev);
32107d993c5fSArseny Solokha priv->ndev = dev;
32117d993c5fSArseny Solokha priv->ofdev = ofdev;
32127d993c5fSArseny Solokha priv->dev = &ofdev->dev;
32137d993c5fSArseny Solokha SET_NETDEV_DEV(dev, &ofdev->dev);
32147d993c5fSArseny Solokha
32157d993c5fSArseny Solokha INIT_WORK(&priv->reset_task, gfar_reset_task);
32167d993c5fSArseny Solokha
32177d993c5fSArseny Solokha platform_set_drvdata(ofdev, priv);
32187d993c5fSArseny Solokha
32197d993c5fSArseny Solokha gfar_detect_errata(priv);
32207d993c5fSArseny Solokha
32217d993c5fSArseny Solokha /* Set the dev->base_addr to the gfar reg region */
32227d993c5fSArseny Solokha dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
32237d993c5fSArseny Solokha
32247d993c5fSArseny Solokha /* Fill in the dev structure */
32257d993c5fSArseny Solokha dev->watchdog_timeo = TX_TIMEOUT;
32267d993c5fSArseny Solokha /* MTU range: 50 - 9586 */
32277d993c5fSArseny Solokha dev->mtu = 1500;
32287d993c5fSArseny Solokha dev->min_mtu = 50;
32297d993c5fSArseny Solokha dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
32307d993c5fSArseny Solokha dev->netdev_ops = &gfar_netdev_ops;
32317d993c5fSArseny Solokha dev->ethtool_ops = &gfar_ethtool_ops;
32327d993c5fSArseny Solokha
32337d993c5fSArseny Solokha /* Register for napi ...We are registering NAPI for each grp */
32347d993c5fSArseny Solokha for (i = 0; i < priv->num_grps; i++) {
32357d993c5fSArseny Solokha netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
3236b48b89f9SJakub Kicinski gfar_poll_rx_sq);
32378d602e1aSJakub Kicinski netif_napi_add_tx_weight(dev, &priv->gfargrp[i].napi_tx,
32387d993c5fSArseny Solokha gfar_poll_tx_sq, 2);
32397d993c5fSArseny Solokha }
32407d993c5fSArseny Solokha
32417d993c5fSArseny Solokha if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
32427d993c5fSArseny Solokha dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
32437d993c5fSArseny Solokha NETIF_F_RXCSUM;
32447d993c5fSArseny Solokha dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
32457d993c5fSArseny Solokha NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
32467d993c5fSArseny Solokha }
32477d993c5fSArseny Solokha
32487d993c5fSArseny Solokha if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
32497d993c5fSArseny Solokha dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
32507d993c5fSArseny Solokha NETIF_F_HW_VLAN_CTAG_RX;
32517d993c5fSArseny Solokha dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
32527d993c5fSArseny Solokha }
32537d993c5fSArseny Solokha
32547d993c5fSArseny Solokha dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
32557d993c5fSArseny Solokha
32567d993c5fSArseny Solokha gfar_init_addr_hash_table(priv);
32577d993c5fSArseny Solokha
32587d993c5fSArseny Solokha /* Insert receive time stamps into padding alignment bytes, and
32597d993c5fSArseny Solokha * plus 2 bytes padding to ensure the cpu alignment.
32607d993c5fSArseny Solokha */
32617d993c5fSArseny Solokha if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
32627d993c5fSArseny Solokha priv->padding = 8 + DEFAULT_PADDING;
32637d993c5fSArseny Solokha
32647d993c5fSArseny Solokha if (dev->features & NETIF_F_IP_CSUM ||
32657d993c5fSArseny Solokha priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3266d6a076d6SClaudiu Manoil dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
32677d993c5fSArseny Solokha
32687d993c5fSArseny Solokha /* Initializing some of the rx/tx queue level parameters */
32697d993c5fSArseny Solokha for (i = 0; i < priv->num_tx_queues; i++) {
32707d993c5fSArseny Solokha priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
32717d993c5fSArseny Solokha priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
32727d993c5fSArseny Solokha priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
32737d993c5fSArseny Solokha priv->tx_queue[i]->txic = DEFAULT_TXIC;
32747d993c5fSArseny Solokha }
32757d993c5fSArseny Solokha
32767d993c5fSArseny Solokha for (i = 0; i < priv->num_rx_queues; i++) {
32777d993c5fSArseny Solokha priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
32787d993c5fSArseny Solokha priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
32797d993c5fSArseny Solokha priv->rx_queue[i]->rxic = DEFAULT_RXIC;
32807d993c5fSArseny Solokha }
32817d993c5fSArseny Solokha
32827d993c5fSArseny Solokha /* Always enable rx filer if available */
32837d993c5fSArseny Solokha priv->rx_filer_enable =
32847d993c5fSArseny Solokha (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
32857d993c5fSArseny Solokha /* Enable most messages by default */
32867d993c5fSArseny Solokha priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
32877d993c5fSArseny Solokha /* use pritority h/w tx queue scheduling for single queue devices */
32887d993c5fSArseny Solokha if (priv->num_tx_queues == 1)
32897d993c5fSArseny Solokha priv->prio_sched_en = 1;
32907d993c5fSArseny Solokha
32917d993c5fSArseny Solokha set_bit(GFAR_DOWN, &priv->state);
32927d993c5fSArseny Solokha
32937d993c5fSArseny Solokha gfar_hw_init(priv);
32947d993c5fSArseny Solokha
329514870b75SEsben Haabendal if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
329614870b75SEsben Haabendal struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon;
329714870b75SEsben Haabendal
329814870b75SEsben Haabendal spin_lock_init(&priv->rmon_overflow.lock);
329914870b75SEsben Haabendal priv->rmon_overflow.imask = IMASK_MSRO;
330014870b75SEsben Haabendal gfar_write(&rmon->cam1, gfar_read(&rmon->cam1) & ~CAM1_M1RDR);
330114870b75SEsben Haabendal }
330214870b75SEsben Haabendal
33037d993c5fSArseny Solokha /* Carrier starts down, phylib will bring it up */
33047d993c5fSArseny Solokha netif_carrier_off(dev);
33057d993c5fSArseny Solokha
33067d993c5fSArseny Solokha err = register_netdev(dev);
33077d993c5fSArseny Solokha
33087d993c5fSArseny Solokha if (err) {
33097d993c5fSArseny Solokha pr_err("%s: Cannot register net device, aborting\n", dev->name);
33107d993c5fSArseny Solokha goto register_fail;
33117d993c5fSArseny Solokha }
33127d993c5fSArseny Solokha
33137d993c5fSArseny Solokha if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
33147d993c5fSArseny Solokha priv->wol_supported |= GFAR_WOL_MAGIC;
33157d993c5fSArseny Solokha
33167d993c5fSArseny Solokha if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
33177d993c5fSArseny Solokha priv->rx_filer_enable)
33187d993c5fSArseny Solokha priv->wol_supported |= GFAR_WOL_FILER_UCAST;
33197d993c5fSArseny Solokha
33207d993c5fSArseny Solokha device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
33217d993c5fSArseny Solokha
33227d993c5fSArseny Solokha /* fill out IRQ number and name fields */
33237d993c5fSArseny Solokha for (i = 0; i < priv->num_grps; i++) {
33247d993c5fSArseny Solokha struct gfar_priv_grp *grp = &priv->gfargrp[i];
33257d993c5fSArseny Solokha if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
33267d993c5fSArseny Solokha sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
33277d993c5fSArseny Solokha dev->name, "_g", '0' + i, "_tx");
33287d993c5fSArseny Solokha sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
33297d993c5fSArseny Solokha dev->name, "_g", '0' + i, "_rx");
33307d993c5fSArseny Solokha sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
33317d993c5fSArseny Solokha dev->name, "_g", '0' + i, "_er");
33327d993c5fSArseny Solokha } else
33337d993c5fSArseny Solokha strcpy(gfar_irq(grp, TX)->name, dev->name);
33347d993c5fSArseny Solokha }
33357d993c5fSArseny Solokha
33367d993c5fSArseny Solokha /* Initialize the filer table */
33377d993c5fSArseny Solokha gfar_init_filer_table(priv);
33387d993c5fSArseny Solokha
33397d993c5fSArseny Solokha /* Print out the device info */
33407d993c5fSArseny Solokha netdev_info(dev, "mac: %pM\n", dev->dev_addr);
33417d993c5fSArseny Solokha
33427d993c5fSArseny Solokha /* Even more device info helps when determining which kernel
33437d993c5fSArseny Solokha * provided which set of benchmarks.
33447d993c5fSArseny Solokha */
33457d993c5fSArseny Solokha netdev_info(dev, "Running with NAPI enabled\n");
33467d993c5fSArseny Solokha for (i = 0; i < priv->num_rx_queues; i++)
33477d993c5fSArseny Solokha netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
33487d993c5fSArseny Solokha i, priv->rx_queue[i]->rx_ring_size);
33497d993c5fSArseny Solokha for (i = 0; i < priv->num_tx_queues; i++)
33507d993c5fSArseny Solokha netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
33517d993c5fSArseny Solokha i, priv->tx_queue[i]->tx_ring_size);
33527d993c5fSArseny Solokha
33537d993c5fSArseny Solokha return 0;
33547d993c5fSArseny Solokha
33557d993c5fSArseny Solokha register_fail:
33567d993c5fSArseny Solokha if (of_phy_is_fixed_link(np))
33577d993c5fSArseny Solokha of_phy_deregister_fixed_link(np);
33587d993c5fSArseny Solokha unmap_group_regs(priv);
33597d993c5fSArseny Solokha gfar_free_rx_queues(priv);
33607d993c5fSArseny Solokha gfar_free_tx_queues(priv);
33617d993c5fSArseny Solokha of_node_put(priv->phy_node);
33627d993c5fSArseny Solokha of_node_put(priv->tbi_node);
33637d993c5fSArseny Solokha free_gfar_dev(priv);
33647d993c5fSArseny Solokha return err;
33657d993c5fSArseny Solokha }
33667d993c5fSArseny Solokha
gfar_remove(struct platform_device * ofdev)33674be0ebc3SUwe Kleine-König static void gfar_remove(struct platform_device *ofdev)
33687d993c5fSArseny Solokha {
33697d993c5fSArseny Solokha struct gfar_private *priv = platform_get_drvdata(ofdev);
33707d993c5fSArseny Solokha struct device_node *np = ofdev->dev.of_node;
33717d993c5fSArseny Solokha
33727d993c5fSArseny Solokha of_node_put(priv->phy_node);
33737d993c5fSArseny Solokha of_node_put(priv->tbi_node);
33747d993c5fSArseny Solokha
33757d993c5fSArseny Solokha unregister_netdev(priv->ndev);
33767d993c5fSArseny Solokha
33777d993c5fSArseny Solokha if (of_phy_is_fixed_link(np))
33787d993c5fSArseny Solokha of_phy_deregister_fixed_link(np);
33797d993c5fSArseny Solokha
33807d993c5fSArseny Solokha unmap_group_regs(priv);
33817d993c5fSArseny Solokha gfar_free_rx_queues(priv);
33827d993c5fSArseny Solokha gfar_free_tx_queues(priv);
33837d993c5fSArseny Solokha free_gfar_dev(priv);
33847d993c5fSArseny Solokha }
33857d993c5fSArseny Solokha
33867d993c5fSArseny Solokha #ifdef CONFIG_PM
33877d993c5fSArseny Solokha
__gfar_filer_disable(struct gfar_private * priv)33887d993c5fSArseny Solokha static void __gfar_filer_disable(struct gfar_private *priv)
33897d993c5fSArseny Solokha {
33907d993c5fSArseny Solokha struct gfar __iomem *regs = priv->gfargrp[0].regs;
33917d993c5fSArseny Solokha u32 temp;
33927d993c5fSArseny Solokha
33937d993c5fSArseny Solokha temp = gfar_read(®s->rctrl);
33947d993c5fSArseny Solokha temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
33957d993c5fSArseny Solokha gfar_write(®s->rctrl, temp);
33967d993c5fSArseny Solokha }
33977d993c5fSArseny Solokha
__gfar_filer_enable(struct gfar_private * priv)33987d993c5fSArseny Solokha static void __gfar_filer_enable(struct gfar_private *priv)
33997d993c5fSArseny Solokha {
34007d993c5fSArseny Solokha struct gfar __iomem *regs = priv->gfargrp[0].regs;
34017d993c5fSArseny Solokha u32 temp;
34027d993c5fSArseny Solokha
34037d993c5fSArseny Solokha temp = gfar_read(®s->rctrl);
34047d993c5fSArseny Solokha temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
34057d993c5fSArseny Solokha gfar_write(®s->rctrl, temp);
34067d993c5fSArseny Solokha }
34077d993c5fSArseny Solokha
34087d993c5fSArseny Solokha /* Filer rules implementing wol capabilities */
gfar_filer_config_wol(struct gfar_private * priv)34097d993c5fSArseny Solokha static void gfar_filer_config_wol(struct gfar_private *priv)
34107d993c5fSArseny Solokha {
34117d993c5fSArseny Solokha unsigned int i;
34127d993c5fSArseny Solokha u32 rqfcr;
34137d993c5fSArseny Solokha
34147d993c5fSArseny Solokha __gfar_filer_disable(priv);
34157d993c5fSArseny Solokha
34167d993c5fSArseny Solokha /* clear the filer table, reject any packet by default */
34177d993c5fSArseny Solokha rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
34187d993c5fSArseny Solokha for (i = 0; i <= MAX_FILER_IDX; i++)
34197d993c5fSArseny Solokha gfar_write_filer(priv, i, rqfcr, 0);
34207d993c5fSArseny Solokha
34217d993c5fSArseny Solokha i = 0;
34227d993c5fSArseny Solokha if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
34237d993c5fSArseny Solokha /* unicast packet, accept it */
34247d993c5fSArseny Solokha struct net_device *ndev = priv->ndev;
34257d993c5fSArseny Solokha /* get the default rx queue index */
34267d993c5fSArseny Solokha u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
34277d993c5fSArseny Solokha u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
34287d993c5fSArseny Solokha (ndev->dev_addr[1] << 8) |
34297d993c5fSArseny Solokha ndev->dev_addr[2];
34307d993c5fSArseny Solokha
34317d993c5fSArseny Solokha rqfcr = (qindex << 10) | RQFCR_AND |
34327d993c5fSArseny Solokha RQFCR_CMP_EXACT | RQFCR_PID_DAH;
34337d993c5fSArseny Solokha
34347d993c5fSArseny Solokha gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
34357d993c5fSArseny Solokha
34367d993c5fSArseny Solokha dest_mac_addr = (ndev->dev_addr[3] << 16) |
34377d993c5fSArseny Solokha (ndev->dev_addr[4] << 8) |
34387d993c5fSArseny Solokha ndev->dev_addr[5];
34397d993c5fSArseny Solokha rqfcr = (qindex << 10) | RQFCR_GPI |
34407d993c5fSArseny Solokha RQFCR_CMP_EXACT | RQFCR_PID_DAL;
34417d993c5fSArseny Solokha gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
34427d993c5fSArseny Solokha }
34437d993c5fSArseny Solokha
34447d993c5fSArseny Solokha __gfar_filer_enable(priv);
34457d993c5fSArseny Solokha }
34467d993c5fSArseny Solokha
gfar_filer_restore_table(struct gfar_private * priv)34477d993c5fSArseny Solokha static void gfar_filer_restore_table(struct gfar_private *priv)
34487d993c5fSArseny Solokha {
34497d993c5fSArseny Solokha u32 rqfcr, rqfpr;
34507d993c5fSArseny Solokha unsigned int i;
34517d993c5fSArseny Solokha
34527d993c5fSArseny Solokha __gfar_filer_disable(priv);
34537d993c5fSArseny Solokha
34547d993c5fSArseny Solokha for (i = 0; i <= MAX_FILER_IDX; i++) {
34557d993c5fSArseny Solokha rqfcr = priv->ftp_rqfcr[i];
34567d993c5fSArseny Solokha rqfpr = priv->ftp_rqfpr[i];
34577d993c5fSArseny Solokha gfar_write_filer(priv, i, rqfcr, rqfpr);
34587d993c5fSArseny Solokha }
34597d993c5fSArseny Solokha
34607d993c5fSArseny Solokha __gfar_filer_enable(priv);
34617d993c5fSArseny Solokha }
34627d993c5fSArseny Solokha
34637d993c5fSArseny Solokha /* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
gfar_start_wol_filer(struct gfar_private * priv)34647d993c5fSArseny Solokha static void gfar_start_wol_filer(struct gfar_private *priv)
34657d993c5fSArseny Solokha {
34667d993c5fSArseny Solokha struct gfar __iomem *regs = priv->gfargrp[0].regs;
34677d993c5fSArseny Solokha u32 tempval;
34687d993c5fSArseny Solokha int i = 0;
34697d993c5fSArseny Solokha
34707d993c5fSArseny Solokha /* Enable Rx hw queues */
34717d993c5fSArseny Solokha gfar_write(®s->rqueue, priv->rqueue);
34727d993c5fSArseny Solokha
34737d993c5fSArseny Solokha /* Initialize DMACTRL to have WWR and WOP */
34747d993c5fSArseny Solokha tempval = gfar_read(®s->dmactrl);
34757d993c5fSArseny Solokha tempval |= DMACTRL_INIT_SETTINGS;
34767d993c5fSArseny Solokha gfar_write(®s->dmactrl, tempval);
34777d993c5fSArseny Solokha
34787d993c5fSArseny Solokha /* Make sure we aren't stopped */
34797d993c5fSArseny Solokha tempval = gfar_read(®s->dmactrl);
34807d993c5fSArseny Solokha tempval &= ~DMACTRL_GRS;
34817d993c5fSArseny Solokha gfar_write(®s->dmactrl, tempval);
34827d993c5fSArseny Solokha
34837d993c5fSArseny Solokha for (i = 0; i < priv->num_grps; i++) {
34847d993c5fSArseny Solokha regs = priv->gfargrp[i].regs;
34857d993c5fSArseny Solokha /* Clear RHLT, so that the DMA starts polling now */
34867d993c5fSArseny Solokha gfar_write(®s->rstat, priv->gfargrp[i].rstat);
34877d993c5fSArseny Solokha /* enable the Filer General Purpose Interrupt */
34887d993c5fSArseny Solokha gfar_write(®s->imask, IMASK_FGPI);
34897d993c5fSArseny Solokha }
34907d993c5fSArseny Solokha
34917d993c5fSArseny Solokha /* Enable Rx DMA */
34927d993c5fSArseny Solokha tempval = gfar_read(®s->maccfg1);
34937d993c5fSArseny Solokha tempval |= MACCFG1_RX_EN;
34947d993c5fSArseny Solokha gfar_write(®s->maccfg1, tempval);
34957d993c5fSArseny Solokha }
34967d993c5fSArseny Solokha
gfar_suspend(struct device * dev)34977d993c5fSArseny Solokha static int gfar_suspend(struct device *dev)
34987d993c5fSArseny Solokha {
34997d993c5fSArseny Solokha struct gfar_private *priv = dev_get_drvdata(dev);
35007d993c5fSArseny Solokha struct net_device *ndev = priv->ndev;
35017d993c5fSArseny Solokha struct gfar __iomem *regs = priv->gfargrp[0].regs;
35027d993c5fSArseny Solokha u32 tempval;
35037d993c5fSArseny Solokha u16 wol = priv->wol_opts;
35047d993c5fSArseny Solokha
35057d993c5fSArseny Solokha if (!netif_running(ndev))
35067d993c5fSArseny Solokha return 0;
35077d993c5fSArseny Solokha
35087d993c5fSArseny Solokha disable_napi(priv);
35097d993c5fSArseny Solokha netif_tx_lock(ndev);
35107d993c5fSArseny Solokha netif_device_detach(ndev);
35117d993c5fSArseny Solokha netif_tx_unlock(ndev);
35127d993c5fSArseny Solokha
35137d993c5fSArseny Solokha gfar_halt(priv);
35147d993c5fSArseny Solokha
35157d993c5fSArseny Solokha if (wol & GFAR_WOL_MAGIC) {
35167d993c5fSArseny Solokha /* Enable interrupt on Magic Packet */
35177d993c5fSArseny Solokha gfar_write(®s->imask, IMASK_MAG);
35187d993c5fSArseny Solokha
35197d993c5fSArseny Solokha /* Enable Magic Packet mode */
35207d993c5fSArseny Solokha tempval = gfar_read(®s->maccfg2);
35217d993c5fSArseny Solokha tempval |= MACCFG2_MPEN;
35227d993c5fSArseny Solokha gfar_write(®s->maccfg2, tempval);
35237d993c5fSArseny Solokha
35247d993c5fSArseny Solokha /* re-enable the Rx block */
35257d993c5fSArseny Solokha tempval = gfar_read(®s->maccfg1);
35267d993c5fSArseny Solokha tempval |= MACCFG1_RX_EN;
35277d993c5fSArseny Solokha gfar_write(®s->maccfg1, tempval);
35287d993c5fSArseny Solokha
35297d993c5fSArseny Solokha } else if (wol & GFAR_WOL_FILER_UCAST) {
35307d993c5fSArseny Solokha gfar_filer_config_wol(priv);
35317d993c5fSArseny Solokha gfar_start_wol_filer(priv);
35327d993c5fSArseny Solokha
35337d993c5fSArseny Solokha } else {
35347d993c5fSArseny Solokha phy_stop(ndev->phydev);
35357d993c5fSArseny Solokha }
35367d993c5fSArseny Solokha
35377d993c5fSArseny Solokha return 0;
35387d993c5fSArseny Solokha }
35397d993c5fSArseny Solokha
gfar_resume(struct device * dev)35407d993c5fSArseny Solokha static int gfar_resume(struct device *dev)
35417d993c5fSArseny Solokha {
35427d993c5fSArseny Solokha struct gfar_private *priv = dev_get_drvdata(dev);
35437d993c5fSArseny Solokha struct net_device *ndev = priv->ndev;
35447d993c5fSArseny Solokha struct gfar __iomem *regs = priv->gfargrp[0].regs;
35457d993c5fSArseny Solokha u32 tempval;
35467d993c5fSArseny Solokha u16 wol = priv->wol_opts;
35477d993c5fSArseny Solokha
35487d993c5fSArseny Solokha if (!netif_running(ndev))
35497d993c5fSArseny Solokha return 0;
35507d993c5fSArseny Solokha
35517d993c5fSArseny Solokha if (wol & GFAR_WOL_MAGIC) {
35527d993c5fSArseny Solokha /* Disable Magic Packet mode */
35537d993c5fSArseny Solokha tempval = gfar_read(®s->maccfg2);
35547d993c5fSArseny Solokha tempval &= ~MACCFG2_MPEN;
35557d993c5fSArseny Solokha gfar_write(®s->maccfg2, tempval);
35567d993c5fSArseny Solokha
35577d993c5fSArseny Solokha } else if (wol & GFAR_WOL_FILER_UCAST) {
35587d993c5fSArseny Solokha /* need to stop rx only, tx is already down */
35597d993c5fSArseny Solokha gfar_halt(priv);
35607d993c5fSArseny Solokha gfar_filer_restore_table(priv);
35617d993c5fSArseny Solokha
35627d993c5fSArseny Solokha } else {
35637d993c5fSArseny Solokha phy_start(ndev->phydev);
35647d993c5fSArseny Solokha }
35657d993c5fSArseny Solokha
35667d993c5fSArseny Solokha gfar_start(priv);
35677d993c5fSArseny Solokha
35687d993c5fSArseny Solokha netif_device_attach(ndev);
35697d993c5fSArseny Solokha enable_napi(priv);
35707d993c5fSArseny Solokha
35717d993c5fSArseny Solokha return 0;
35727d993c5fSArseny Solokha }
35737d993c5fSArseny Solokha
gfar_restore(struct device * dev)35747d993c5fSArseny Solokha static int gfar_restore(struct device *dev)
35757d993c5fSArseny Solokha {
35767d993c5fSArseny Solokha struct gfar_private *priv = dev_get_drvdata(dev);
35777d993c5fSArseny Solokha struct net_device *ndev = priv->ndev;
35787d993c5fSArseny Solokha
35797d993c5fSArseny Solokha if (!netif_running(ndev)) {
35807d993c5fSArseny Solokha netif_device_attach(ndev);
35817d993c5fSArseny Solokha
35827d993c5fSArseny Solokha return 0;
35837d993c5fSArseny Solokha }
35847d993c5fSArseny Solokha
35857d993c5fSArseny Solokha gfar_init_bds(ndev);
35867d993c5fSArseny Solokha
35877d993c5fSArseny Solokha gfar_mac_reset(priv);
35887d993c5fSArseny Solokha
35897d993c5fSArseny Solokha gfar_init_tx_rx_base(priv);
35907d993c5fSArseny Solokha
35917d993c5fSArseny Solokha gfar_start(priv);
35927d993c5fSArseny Solokha
35936ce29b0eSClaudiu Manoil priv->oldlink = 0;
35946ce29b0eSClaudiu Manoil priv->oldspeed = 0;
35956ce29b0eSClaudiu Manoil priv->oldduplex = -1;
35967d993c5fSArseny Solokha
35977d993c5fSArseny Solokha if (ndev->phydev)
35987d993c5fSArseny Solokha phy_start(ndev->phydev);
35997d993c5fSArseny Solokha
36007d993c5fSArseny Solokha netif_device_attach(ndev);
36017d993c5fSArseny Solokha enable_napi(priv);
36027d993c5fSArseny Solokha
36037d993c5fSArseny Solokha return 0;
36046ce29b0eSClaudiu Manoil }
36056ce29b0eSClaudiu Manoil
36067d993c5fSArseny Solokha static const struct dev_pm_ops gfar_pm_ops = {
36077d993c5fSArseny Solokha .suspend = gfar_suspend,
36087d993c5fSArseny Solokha .resume = gfar_resume,
36097d993c5fSArseny Solokha .freeze = gfar_suspend,
36107d993c5fSArseny Solokha .thaw = gfar_resume,
36117d993c5fSArseny Solokha .restore = gfar_restore,
36127d993c5fSArseny Solokha };
36137d993c5fSArseny Solokha
36147d993c5fSArseny Solokha #define GFAR_PM_OPS (&gfar_pm_ops)
36157d993c5fSArseny Solokha
36167d993c5fSArseny Solokha #else
36177d993c5fSArseny Solokha
36187d993c5fSArseny Solokha #define GFAR_PM_OPS NULL
36197d993c5fSArseny Solokha
36207d993c5fSArseny Solokha #endif
36216ce29b0eSClaudiu Manoil
362294e5a2a8SFabian Frederick static const struct of_device_id gfar_match[] =
3623ec21e2ecSJeff Kirsher {
3624ec21e2ecSJeff Kirsher {
3625ec21e2ecSJeff Kirsher .type = "network",
3626ec21e2ecSJeff Kirsher .compatible = "gianfar",
3627ec21e2ecSJeff Kirsher },
3628ec21e2ecSJeff Kirsher {
3629ec21e2ecSJeff Kirsher .compatible = "fsl,etsec2",
3630ec21e2ecSJeff Kirsher },
3631ec21e2ecSJeff Kirsher {},
3632ec21e2ecSJeff Kirsher };
3633ec21e2ecSJeff Kirsher MODULE_DEVICE_TABLE(of, gfar_match);
3634ec21e2ecSJeff Kirsher
3635ec21e2ecSJeff Kirsher /* Structure for a device driver */
3636ec21e2ecSJeff Kirsher static struct platform_driver gfar_driver = {
3637ec21e2ecSJeff Kirsher .driver = {
3638ec21e2ecSJeff Kirsher .name = "fsl-gianfar",
3639ec21e2ecSJeff Kirsher .pm = GFAR_PM_OPS,
3640ec21e2ecSJeff Kirsher .of_match_table = gfar_match,
3641ec21e2ecSJeff Kirsher },
3642ec21e2ecSJeff Kirsher .probe = gfar_probe,
36434be0ebc3SUwe Kleine-König .remove_new = gfar_remove,
3644ec21e2ecSJeff Kirsher };
3645ec21e2ecSJeff Kirsher
3646db62f684SAxel Lin module_platform_driver(gfar_driver);
3647