xref: /openbmc/linux/drivers/net/ethernet/freescale/enetc/enetc.c (revision d4fd0404c1c95b17880f254ebfee3485693fa8ba)
1*d4fd0404SClaudiu Manoil // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2*d4fd0404SClaudiu Manoil /* Copyright 2017-2019 NXP */
3*d4fd0404SClaudiu Manoil 
4*d4fd0404SClaudiu Manoil #include "enetc.h"
5*d4fd0404SClaudiu Manoil #include <linux/tcp.h>
6*d4fd0404SClaudiu Manoil #include <linux/udp.h>
7*d4fd0404SClaudiu Manoil #include <linux/of_mdio.h>
8*d4fd0404SClaudiu Manoil 
9*d4fd0404SClaudiu Manoil /* ENETC overhead: optional extension BD + 1 BD gap */
10*d4fd0404SClaudiu Manoil #define ENETC_TXBDS_NEEDED(val)	((val) + 2)
11*d4fd0404SClaudiu Manoil /* max # of chained Tx BDs is 15, including head and extension BD */
12*d4fd0404SClaudiu Manoil #define ENETC_MAX_SKB_FRAGS	13
13*d4fd0404SClaudiu Manoil #define ENETC_TXBDS_MAX_NEEDED	ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
14*d4fd0404SClaudiu Manoil 
15*d4fd0404SClaudiu Manoil static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb);
16*d4fd0404SClaudiu Manoil 
17*d4fd0404SClaudiu Manoil netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
18*d4fd0404SClaudiu Manoil {
19*d4fd0404SClaudiu Manoil 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
20*d4fd0404SClaudiu Manoil 	struct enetc_bdr *tx_ring;
21*d4fd0404SClaudiu Manoil 	int count;
22*d4fd0404SClaudiu Manoil 
23*d4fd0404SClaudiu Manoil 	tx_ring = priv->tx_ring[skb->queue_mapping];
24*d4fd0404SClaudiu Manoil 
25*d4fd0404SClaudiu Manoil 	if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
26*d4fd0404SClaudiu Manoil 		if (unlikely(skb_linearize(skb)))
27*d4fd0404SClaudiu Manoil 			goto drop_packet_err;
28*d4fd0404SClaudiu Manoil 
29*d4fd0404SClaudiu Manoil 	count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
30*d4fd0404SClaudiu Manoil 	if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
31*d4fd0404SClaudiu Manoil 		netif_stop_subqueue(ndev, tx_ring->index);
32*d4fd0404SClaudiu Manoil 		return NETDEV_TX_BUSY;
33*d4fd0404SClaudiu Manoil 	}
34*d4fd0404SClaudiu Manoil 
35*d4fd0404SClaudiu Manoil 	count = enetc_map_tx_buffs(tx_ring, skb);
36*d4fd0404SClaudiu Manoil 	if (unlikely(!count))
37*d4fd0404SClaudiu Manoil 		goto drop_packet_err;
38*d4fd0404SClaudiu Manoil 
39*d4fd0404SClaudiu Manoil 	if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
40*d4fd0404SClaudiu Manoil 		netif_stop_subqueue(ndev, tx_ring->index);
41*d4fd0404SClaudiu Manoil 
42*d4fd0404SClaudiu Manoil 	return NETDEV_TX_OK;
43*d4fd0404SClaudiu Manoil 
44*d4fd0404SClaudiu Manoil drop_packet_err:
45*d4fd0404SClaudiu Manoil 	dev_kfree_skb_any(skb);
46*d4fd0404SClaudiu Manoil 	return NETDEV_TX_OK;
47*d4fd0404SClaudiu Manoil }
48*d4fd0404SClaudiu Manoil 
49*d4fd0404SClaudiu Manoil static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd)
50*d4fd0404SClaudiu Manoil {
51*d4fd0404SClaudiu Manoil 	int l3_start, l3_hsize;
52*d4fd0404SClaudiu Manoil 	u16 l3_flags, l4_flags;
53*d4fd0404SClaudiu Manoil 
54*d4fd0404SClaudiu Manoil 	if (skb->ip_summed != CHECKSUM_PARTIAL)
55*d4fd0404SClaudiu Manoil 		return false;
56*d4fd0404SClaudiu Manoil 
57*d4fd0404SClaudiu Manoil 	switch (skb->csum_offset) {
58*d4fd0404SClaudiu Manoil 	case offsetof(struct tcphdr, check):
59*d4fd0404SClaudiu Manoil 		l4_flags = ENETC_TXBD_L4_TCP;
60*d4fd0404SClaudiu Manoil 		break;
61*d4fd0404SClaudiu Manoil 	case offsetof(struct udphdr, check):
62*d4fd0404SClaudiu Manoil 		l4_flags = ENETC_TXBD_L4_UDP;
63*d4fd0404SClaudiu Manoil 		break;
64*d4fd0404SClaudiu Manoil 	default:
65*d4fd0404SClaudiu Manoil 		skb_checksum_help(skb);
66*d4fd0404SClaudiu Manoil 		return false;
67*d4fd0404SClaudiu Manoil 	}
68*d4fd0404SClaudiu Manoil 
69*d4fd0404SClaudiu Manoil 	l3_start = skb_network_offset(skb);
70*d4fd0404SClaudiu Manoil 	l3_hsize = skb_network_header_len(skb);
71*d4fd0404SClaudiu Manoil 
72*d4fd0404SClaudiu Manoil 	l3_flags = 0;
73*d4fd0404SClaudiu Manoil 	if (skb->protocol == htons(ETH_P_IPV6))
74*d4fd0404SClaudiu Manoil 		l3_flags = ENETC_TXBD_L3_IPV6;
75*d4fd0404SClaudiu Manoil 
76*d4fd0404SClaudiu Manoil 	/* write BD fields */
77*d4fd0404SClaudiu Manoil 	txbd->l3_csoff = enetc_txbd_l3_csoff(l3_start, l3_hsize, l3_flags);
78*d4fd0404SClaudiu Manoil 	txbd->l4_csoff = l4_flags;
79*d4fd0404SClaudiu Manoil 
80*d4fd0404SClaudiu Manoil 	return true;
81*d4fd0404SClaudiu Manoil }
82*d4fd0404SClaudiu Manoil 
83*d4fd0404SClaudiu Manoil static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
84*d4fd0404SClaudiu Manoil 				struct enetc_tx_swbd *tx_swbd)
85*d4fd0404SClaudiu Manoil {
86*d4fd0404SClaudiu Manoil 	if (tx_swbd->is_dma_page)
87*d4fd0404SClaudiu Manoil 		dma_unmap_page(tx_ring->dev, tx_swbd->dma,
88*d4fd0404SClaudiu Manoil 			       tx_swbd->len, DMA_TO_DEVICE);
89*d4fd0404SClaudiu Manoil 	else
90*d4fd0404SClaudiu Manoil 		dma_unmap_single(tx_ring->dev, tx_swbd->dma,
91*d4fd0404SClaudiu Manoil 				 tx_swbd->len, DMA_TO_DEVICE);
92*d4fd0404SClaudiu Manoil 	tx_swbd->dma = 0;
93*d4fd0404SClaudiu Manoil }
94*d4fd0404SClaudiu Manoil 
95*d4fd0404SClaudiu Manoil static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
96*d4fd0404SClaudiu Manoil 			      struct enetc_tx_swbd *tx_swbd)
97*d4fd0404SClaudiu Manoil {
98*d4fd0404SClaudiu Manoil 	if (tx_swbd->dma)
99*d4fd0404SClaudiu Manoil 		enetc_unmap_tx_buff(tx_ring, tx_swbd);
100*d4fd0404SClaudiu Manoil 
101*d4fd0404SClaudiu Manoil 	if (tx_swbd->skb) {
102*d4fd0404SClaudiu Manoil 		dev_kfree_skb_any(tx_swbd->skb);
103*d4fd0404SClaudiu Manoil 		tx_swbd->skb = NULL;
104*d4fd0404SClaudiu Manoil 	}
105*d4fd0404SClaudiu Manoil }
106*d4fd0404SClaudiu Manoil 
107*d4fd0404SClaudiu Manoil static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
108*d4fd0404SClaudiu Manoil {
109*d4fd0404SClaudiu Manoil 	struct enetc_tx_swbd *tx_swbd;
110*d4fd0404SClaudiu Manoil 	struct skb_frag_struct *frag;
111*d4fd0404SClaudiu Manoil 	int len = skb_headlen(skb);
112*d4fd0404SClaudiu Manoil 	union enetc_tx_bd temp_bd;
113*d4fd0404SClaudiu Manoil 	union enetc_tx_bd *txbd;
114*d4fd0404SClaudiu Manoil 	bool do_vlan, do_tstamp;
115*d4fd0404SClaudiu Manoil 	int i, count = 0;
116*d4fd0404SClaudiu Manoil 	unsigned int f;
117*d4fd0404SClaudiu Manoil 	dma_addr_t dma;
118*d4fd0404SClaudiu Manoil 	u8 flags = 0;
119*d4fd0404SClaudiu Manoil 
120*d4fd0404SClaudiu Manoil 	i = tx_ring->next_to_use;
121*d4fd0404SClaudiu Manoil 	txbd = ENETC_TXBD(*tx_ring, i);
122*d4fd0404SClaudiu Manoil 	prefetchw(txbd);
123*d4fd0404SClaudiu Manoil 
124*d4fd0404SClaudiu Manoil 	dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
125*d4fd0404SClaudiu Manoil 	if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
126*d4fd0404SClaudiu Manoil 		goto dma_err;
127*d4fd0404SClaudiu Manoil 
128*d4fd0404SClaudiu Manoil 	temp_bd.addr = cpu_to_le64(dma);
129*d4fd0404SClaudiu Manoil 	temp_bd.buf_len = cpu_to_le16(len);
130*d4fd0404SClaudiu Manoil 	temp_bd.lstatus = 0;
131*d4fd0404SClaudiu Manoil 
132*d4fd0404SClaudiu Manoil 	tx_swbd = &tx_ring->tx_swbd[i];
133*d4fd0404SClaudiu Manoil 	tx_swbd->dma = dma;
134*d4fd0404SClaudiu Manoil 	tx_swbd->len = len;
135*d4fd0404SClaudiu Manoil 	tx_swbd->is_dma_page = 0;
136*d4fd0404SClaudiu Manoil 	count++;
137*d4fd0404SClaudiu Manoil 
138*d4fd0404SClaudiu Manoil 	do_vlan = skb_vlan_tag_present(skb);
139*d4fd0404SClaudiu Manoil 	do_tstamp = skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
140*d4fd0404SClaudiu Manoil 
141*d4fd0404SClaudiu Manoil 	if (do_vlan || do_tstamp)
142*d4fd0404SClaudiu Manoil 		flags |= ENETC_TXBD_FLAGS_EX;
143*d4fd0404SClaudiu Manoil 
144*d4fd0404SClaudiu Manoil 	if (enetc_tx_csum(skb, &temp_bd))
145*d4fd0404SClaudiu Manoil 		flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS;
146*d4fd0404SClaudiu Manoil 
147*d4fd0404SClaudiu Manoil 	/* first BD needs frm_len and offload flags set */
148*d4fd0404SClaudiu Manoil 	temp_bd.frm_len = cpu_to_le16(skb->len);
149*d4fd0404SClaudiu Manoil 	temp_bd.flags = flags;
150*d4fd0404SClaudiu Manoil 
151*d4fd0404SClaudiu Manoil 	if (flags & ENETC_TXBD_FLAGS_EX) {
152*d4fd0404SClaudiu Manoil 		u8 e_flags = 0;
153*d4fd0404SClaudiu Manoil 		*txbd = temp_bd;
154*d4fd0404SClaudiu Manoil 		enetc_clear_tx_bd(&temp_bd);
155*d4fd0404SClaudiu Manoil 
156*d4fd0404SClaudiu Manoil 		/* add extension BD for VLAN and/or timestamping */
157*d4fd0404SClaudiu Manoil 		flags = 0;
158*d4fd0404SClaudiu Manoil 		tx_swbd++;
159*d4fd0404SClaudiu Manoil 		txbd++;
160*d4fd0404SClaudiu Manoil 		i++;
161*d4fd0404SClaudiu Manoil 		if (unlikely(i == tx_ring->bd_count)) {
162*d4fd0404SClaudiu Manoil 			i = 0;
163*d4fd0404SClaudiu Manoil 			tx_swbd = tx_ring->tx_swbd;
164*d4fd0404SClaudiu Manoil 			txbd = ENETC_TXBD(*tx_ring, 0);
165*d4fd0404SClaudiu Manoil 		}
166*d4fd0404SClaudiu Manoil 		prefetchw(txbd);
167*d4fd0404SClaudiu Manoil 
168*d4fd0404SClaudiu Manoil 		if (do_vlan) {
169*d4fd0404SClaudiu Manoil 			temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
170*d4fd0404SClaudiu Manoil 			temp_bd.ext.tpid = 0; /* < C-TAG */
171*d4fd0404SClaudiu Manoil 			e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
172*d4fd0404SClaudiu Manoil 		}
173*d4fd0404SClaudiu Manoil 
174*d4fd0404SClaudiu Manoil 		if (do_tstamp) {
175*d4fd0404SClaudiu Manoil 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
176*d4fd0404SClaudiu Manoil 			e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
177*d4fd0404SClaudiu Manoil 		}
178*d4fd0404SClaudiu Manoil 
179*d4fd0404SClaudiu Manoil 		temp_bd.ext.e_flags = e_flags;
180*d4fd0404SClaudiu Manoil 		count++;
181*d4fd0404SClaudiu Manoil 	}
182*d4fd0404SClaudiu Manoil 
183*d4fd0404SClaudiu Manoil 	frag = &skb_shinfo(skb)->frags[0];
184*d4fd0404SClaudiu Manoil 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
185*d4fd0404SClaudiu Manoil 		len = skb_frag_size(frag);
186*d4fd0404SClaudiu Manoil 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
187*d4fd0404SClaudiu Manoil 				       DMA_TO_DEVICE);
188*d4fd0404SClaudiu Manoil 		if (dma_mapping_error(tx_ring->dev, dma))
189*d4fd0404SClaudiu Manoil 			goto dma_err;
190*d4fd0404SClaudiu Manoil 
191*d4fd0404SClaudiu Manoil 		*txbd = temp_bd;
192*d4fd0404SClaudiu Manoil 		enetc_clear_tx_bd(&temp_bd);
193*d4fd0404SClaudiu Manoil 
194*d4fd0404SClaudiu Manoil 		flags = 0;
195*d4fd0404SClaudiu Manoil 		tx_swbd++;
196*d4fd0404SClaudiu Manoil 		txbd++;
197*d4fd0404SClaudiu Manoil 		i++;
198*d4fd0404SClaudiu Manoil 		if (unlikely(i == tx_ring->bd_count)) {
199*d4fd0404SClaudiu Manoil 			i = 0;
200*d4fd0404SClaudiu Manoil 			tx_swbd = tx_ring->tx_swbd;
201*d4fd0404SClaudiu Manoil 			txbd = ENETC_TXBD(*tx_ring, 0);
202*d4fd0404SClaudiu Manoil 		}
203*d4fd0404SClaudiu Manoil 		prefetchw(txbd);
204*d4fd0404SClaudiu Manoil 
205*d4fd0404SClaudiu Manoil 		temp_bd.addr = cpu_to_le64(dma);
206*d4fd0404SClaudiu Manoil 		temp_bd.buf_len = cpu_to_le16(len);
207*d4fd0404SClaudiu Manoil 
208*d4fd0404SClaudiu Manoil 		tx_swbd->dma = dma;
209*d4fd0404SClaudiu Manoil 		tx_swbd->len = len;
210*d4fd0404SClaudiu Manoil 		tx_swbd->is_dma_page = 1;
211*d4fd0404SClaudiu Manoil 		count++;
212*d4fd0404SClaudiu Manoil 	}
213*d4fd0404SClaudiu Manoil 
214*d4fd0404SClaudiu Manoil 	/* last BD needs 'F' bit set */
215*d4fd0404SClaudiu Manoil 	flags |= ENETC_TXBD_FLAGS_F;
216*d4fd0404SClaudiu Manoil 	temp_bd.flags = flags;
217*d4fd0404SClaudiu Manoil 	*txbd = temp_bd;
218*d4fd0404SClaudiu Manoil 
219*d4fd0404SClaudiu Manoil 	tx_ring->tx_swbd[i].skb = skb;
220*d4fd0404SClaudiu Manoil 
221*d4fd0404SClaudiu Manoil 	enetc_bdr_idx_inc(tx_ring, &i);
222*d4fd0404SClaudiu Manoil 	tx_ring->next_to_use = i;
223*d4fd0404SClaudiu Manoil 
224*d4fd0404SClaudiu Manoil 	/* let H/W know BD ring has been updated */
225*d4fd0404SClaudiu Manoil 	enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */
226*d4fd0404SClaudiu Manoil 
227*d4fd0404SClaudiu Manoil 	return count;
228*d4fd0404SClaudiu Manoil 
229*d4fd0404SClaudiu Manoil dma_err:
230*d4fd0404SClaudiu Manoil 	dev_err(tx_ring->dev, "DMA map error");
231*d4fd0404SClaudiu Manoil 
232*d4fd0404SClaudiu Manoil 	do {
233*d4fd0404SClaudiu Manoil 		tx_swbd = &tx_ring->tx_swbd[i];
234*d4fd0404SClaudiu Manoil 		enetc_free_tx_skb(tx_ring, tx_swbd);
235*d4fd0404SClaudiu Manoil 		if (i == 0)
236*d4fd0404SClaudiu Manoil 			i = tx_ring->bd_count;
237*d4fd0404SClaudiu Manoil 		i--;
238*d4fd0404SClaudiu Manoil 	} while (count--);
239*d4fd0404SClaudiu Manoil 
240*d4fd0404SClaudiu Manoil 	return 0;
241*d4fd0404SClaudiu Manoil }
242*d4fd0404SClaudiu Manoil 
243*d4fd0404SClaudiu Manoil static irqreturn_t enetc_msix(int irq, void *data)
244*d4fd0404SClaudiu Manoil {
245*d4fd0404SClaudiu Manoil 	struct enetc_int_vector	*v = data;
246*d4fd0404SClaudiu Manoil 	int i;
247*d4fd0404SClaudiu Manoil 
248*d4fd0404SClaudiu Manoil 	/* disable interrupts */
249*d4fd0404SClaudiu Manoil 	enetc_wr_reg(v->rbier, 0);
250*d4fd0404SClaudiu Manoil 
251*d4fd0404SClaudiu Manoil 	for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
252*d4fd0404SClaudiu Manoil 		enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
253*d4fd0404SClaudiu Manoil 
254*d4fd0404SClaudiu Manoil 	napi_schedule_irqoff(&v->napi);
255*d4fd0404SClaudiu Manoil 
256*d4fd0404SClaudiu Manoil 	return IRQ_HANDLED;
257*d4fd0404SClaudiu Manoil }
258*d4fd0404SClaudiu Manoil 
259*d4fd0404SClaudiu Manoil static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
260*d4fd0404SClaudiu Manoil static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
261*d4fd0404SClaudiu Manoil 			       struct napi_struct *napi, int work_limit);
262*d4fd0404SClaudiu Manoil 
263*d4fd0404SClaudiu Manoil static int enetc_poll(struct napi_struct *napi, int budget)
264*d4fd0404SClaudiu Manoil {
265*d4fd0404SClaudiu Manoil 	struct enetc_int_vector
266*d4fd0404SClaudiu Manoil 		*v = container_of(napi, struct enetc_int_vector, napi);
267*d4fd0404SClaudiu Manoil 	bool complete = true;
268*d4fd0404SClaudiu Manoil 	int work_done;
269*d4fd0404SClaudiu Manoil 	int i;
270*d4fd0404SClaudiu Manoil 
271*d4fd0404SClaudiu Manoil 	for (i = 0; i < v->count_tx_rings; i++)
272*d4fd0404SClaudiu Manoil 		if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
273*d4fd0404SClaudiu Manoil 			complete = false;
274*d4fd0404SClaudiu Manoil 
275*d4fd0404SClaudiu Manoil 	work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
276*d4fd0404SClaudiu Manoil 	if (work_done == budget)
277*d4fd0404SClaudiu Manoil 		complete = false;
278*d4fd0404SClaudiu Manoil 
279*d4fd0404SClaudiu Manoil 	if (!complete)
280*d4fd0404SClaudiu Manoil 		return budget;
281*d4fd0404SClaudiu Manoil 
282*d4fd0404SClaudiu Manoil 	napi_complete_done(napi, work_done);
283*d4fd0404SClaudiu Manoil 
284*d4fd0404SClaudiu Manoil 	/* enable interrupts */
285*d4fd0404SClaudiu Manoil 	enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
286*d4fd0404SClaudiu Manoil 
287*d4fd0404SClaudiu Manoil 	for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
288*d4fd0404SClaudiu Manoil 		enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
289*d4fd0404SClaudiu Manoil 			     ENETC_TBIER_TXTIE);
290*d4fd0404SClaudiu Manoil 
291*d4fd0404SClaudiu Manoil 	return work_done;
292*d4fd0404SClaudiu Manoil }
293*d4fd0404SClaudiu Manoil 
294*d4fd0404SClaudiu Manoil static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
295*d4fd0404SClaudiu Manoil {
296*d4fd0404SClaudiu Manoil 	int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
297*d4fd0404SClaudiu Manoil 
298*d4fd0404SClaudiu Manoil 	return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
299*d4fd0404SClaudiu Manoil }
300*d4fd0404SClaudiu Manoil 
301*d4fd0404SClaudiu Manoil static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
302*d4fd0404SClaudiu Manoil {
303*d4fd0404SClaudiu Manoil 	struct net_device *ndev = tx_ring->ndev;
304*d4fd0404SClaudiu Manoil 	int tx_frm_cnt = 0, tx_byte_cnt = 0;
305*d4fd0404SClaudiu Manoil 	struct enetc_tx_swbd *tx_swbd;
306*d4fd0404SClaudiu Manoil 	int i, bds_to_clean;
307*d4fd0404SClaudiu Manoil 
308*d4fd0404SClaudiu Manoil 	i = tx_ring->next_to_clean;
309*d4fd0404SClaudiu Manoil 	tx_swbd = &tx_ring->tx_swbd[i];
310*d4fd0404SClaudiu Manoil 	bds_to_clean = enetc_bd_ready_count(tx_ring, i);
311*d4fd0404SClaudiu Manoil 
312*d4fd0404SClaudiu Manoil 	while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
313*d4fd0404SClaudiu Manoil 		bool is_eof = !!tx_swbd->skb;
314*d4fd0404SClaudiu Manoil 
315*d4fd0404SClaudiu Manoil 		enetc_unmap_tx_buff(tx_ring, tx_swbd);
316*d4fd0404SClaudiu Manoil 		if (is_eof) {
317*d4fd0404SClaudiu Manoil 			napi_consume_skb(tx_swbd->skb, napi_budget);
318*d4fd0404SClaudiu Manoil 			tx_swbd->skb = NULL;
319*d4fd0404SClaudiu Manoil 		}
320*d4fd0404SClaudiu Manoil 
321*d4fd0404SClaudiu Manoil 		tx_byte_cnt += tx_swbd->len;
322*d4fd0404SClaudiu Manoil 
323*d4fd0404SClaudiu Manoil 		bds_to_clean--;
324*d4fd0404SClaudiu Manoil 		tx_swbd++;
325*d4fd0404SClaudiu Manoil 		i++;
326*d4fd0404SClaudiu Manoil 		if (unlikely(i == tx_ring->bd_count)) {
327*d4fd0404SClaudiu Manoil 			i = 0;
328*d4fd0404SClaudiu Manoil 			tx_swbd = tx_ring->tx_swbd;
329*d4fd0404SClaudiu Manoil 		}
330*d4fd0404SClaudiu Manoil 
331*d4fd0404SClaudiu Manoil 		/* BD iteration loop end */
332*d4fd0404SClaudiu Manoil 		if (is_eof) {
333*d4fd0404SClaudiu Manoil 			tx_frm_cnt++;
334*d4fd0404SClaudiu Manoil 			/* re-arm interrupt source */
335*d4fd0404SClaudiu Manoil 			enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) |
336*d4fd0404SClaudiu Manoil 				     BIT(16 + tx_ring->index));
337*d4fd0404SClaudiu Manoil 		}
338*d4fd0404SClaudiu Manoil 
339*d4fd0404SClaudiu Manoil 		if (unlikely(!bds_to_clean))
340*d4fd0404SClaudiu Manoil 			bds_to_clean = enetc_bd_ready_count(tx_ring, i);
341*d4fd0404SClaudiu Manoil 	}
342*d4fd0404SClaudiu Manoil 
343*d4fd0404SClaudiu Manoil 	tx_ring->next_to_clean = i;
344*d4fd0404SClaudiu Manoil 	tx_ring->stats.packets += tx_frm_cnt;
345*d4fd0404SClaudiu Manoil 	tx_ring->stats.bytes += tx_byte_cnt;
346*d4fd0404SClaudiu Manoil 
347*d4fd0404SClaudiu Manoil 	if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
348*d4fd0404SClaudiu Manoil 		     __netif_subqueue_stopped(ndev, tx_ring->index) &&
349*d4fd0404SClaudiu Manoil 		     (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
350*d4fd0404SClaudiu Manoil 		netif_wake_subqueue(ndev, tx_ring->index);
351*d4fd0404SClaudiu Manoil 	}
352*d4fd0404SClaudiu Manoil 
353*d4fd0404SClaudiu Manoil 	return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
354*d4fd0404SClaudiu Manoil }
355*d4fd0404SClaudiu Manoil 
356*d4fd0404SClaudiu Manoil static bool enetc_new_page(struct enetc_bdr *rx_ring,
357*d4fd0404SClaudiu Manoil 			   struct enetc_rx_swbd *rx_swbd)
358*d4fd0404SClaudiu Manoil {
359*d4fd0404SClaudiu Manoil 	struct page *page;
360*d4fd0404SClaudiu Manoil 	dma_addr_t addr;
361*d4fd0404SClaudiu Manoil 
362*d4fd0404SClaudiu Manoil 	page = dev_alloc_page();
363*d4fd0404SClaudiu Manoil 	if (unlikely(!page))
364*d4fd0404SClaudiu Manoil 		return false;
365*d4fd0404SClaudiu Manoil 
366*d4fd0404SClaudiu Manoil 	addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
367*d4fd0404SClaudiu Manoil 	if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
368*d4fd0404SClaudiu Manoil 		__free_page(page);
369*d4fd0404SClaudiu Manoil 
370*d4fd0404SClaudiu Manoil 		return false;
371*d4fd0404SClaudiu Manoil 	}
372*d4fd0404SClaudiu Manoil 
373*d4fd0404SClaudiu Manoil 	rx_swbd->dma = addr;
374*d4fd0404SClaudiu Manoil 	rx_swbd->page = page;
375*d4fd0404SClaudiu Manoil 	rx_swbd->page_offset = ENETC_RXB_PAD;
376*d4fd0404SClaudiu Manoil 
377*d4fd0404SClaudiu Manoil 	return true;
378*d4fd0404SClaudiu Manoil }
379*d4fd0404SClaudiu Manoil 
380*d4fd0404SClaudiu Manoil static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
381*d4fd0404SClaudiu Manoil {
382*d4fd0404SClaudiu Manoil 	struct enetc_rx_swbd *rx_swbd;
383*d4fd0404SClaudiu Manoil 	union enetc_rx_bd *rxbd;
384*d4fd0404SClaudiu Manoil 	int i, j;
385*d4fd0404SClaudiu Manoil 
386*d4fd0404SClaudiu Manoil 	i = rx_ring->next_to_use;
387*d4fd0404SClaudiu Manoil 	rx_swbd = &rx_ring->rx_swbd[i];
388*d4fd0404SClaudiu Manoil 	rxbd = ENETC_RXBD(*rx_ring, i);
389*d4fd0404SClaudiu Manoil 
390*d4fd0404SClaudiu Manoil 	for (j = 0; j < buff_cnt; j++) {
391*d4fd0404SClaudiu Manoil 		/* try reuse page */
392*d4fd0404SClaudiu Manoil 		if (unlikely(!rx_swbd->page)) {
393*d4fd0404SClaudiu Manoil 			if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
394*d4fd0404SClaudiu Manoil 				rx_ring->stats.rx_alloc_errs++;
395*d4fd0404SClaudiu Manoil 				break;
396*d4fd0404SClaudiu Manoil 			}
397*d4fd0404SClaudiu Manoil 		}
398*d4fd0404SClaudiu Manoil 
399*d4fd0404SClaudiu Manoil 		/* update RxBD */
400*d4fd0404SClaudiu Manoil 		rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
401*d4fd0404SClaudiu Manoil 					   rx_swbd->page_offset);
402*d4fd0404SClaudiu Manoil 		/* clear 'R" as well */
403*d4fd0404SClaudiu Manoil 		rxbd->r.lstatus = 0;
404*d4fd0404SClaudiu Manoil 
405*d4fd0404SClaudiu Manoil 		rx_swbd++;
406*d4fd0404SClaudiu Manoil 		rxbd++;
407*d4fd0404SClaudiu Manoil 		i++;
408*d4fd0404SClaudiu Manoil 		if (unlikely(i == rx_ring->bd_count)) {
409*d4fd0404SClaudiu Manoil 			i = 0;
410*d4fd0404SClaudiu Manoil 			rx_swbd = rx_ring->rx_swbd;
411*d4fd0404SClaudiu Manoil 			rxbd = ENETC_RXBD(*rx_ring, 0);
412*d4fd0404SClaudiu Manoil 		}
413*d4fd0404SClaudiu Manoil 	}
414*d4fd0404SClaudiu Manoil 
415*d4fd0404SClaudiu Manoil 	if (likely(j)) {
416*d4fd0404SClaudiu Manoil 		rx_ring->next_to_alloc = i; /* keep track from page reuse */
417*d4fd0404SClaudiu Manoil 		rx_ring->next_to_use = i;
418*d4fd0404SClaudiu Manoil 		/* update ENETC's consumer index */
419*d4fd0404SClaudiu Manoil 		enetc_wr_reg(rx_ring->rcir, i);
420*d4fd0404SClaudiu Manoil 	}
421*d4fd0404SClaudiu Manoil 
422*d4fd0404SClaudiu Manoil 	return j;
423*d4fd0404SClaudiu Manoil }
424*d4fd0404SClaudiu Manoil 
425*d4fd0404SClaudiu Manoil static void enetc_get_offloads(struct enetc_bdr *rx_ring,
426*d4fd0404SClaudiu Manoil 			       union enetc_rx_bd *rxbd, struct sk_buff *skb)
427*d4fd0404SClaudiu Manoil {
428*d4fd0404SClaudiu Manoil 	/* TODO: add tstamp, hashing */
429*d4fd0404SClaudiu Manoil 	if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
430*d4fd0404SClaudiu Manoil 		u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
431*d4fd0404SClaudiu Manoil 
432*d4fd0404SClaudiu Manoil 		skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
433*d4fd0404SClaudiu Manoil 		skb->ip_summed = CHECKSUM_COMPLETE;
434*d4fd0404SClaudiu Manoil 	}
435*d4fd0404SClaudiu Manoil 
436*d4fd0404SClaudiu Manoil 	/* copy VLAN to skb, if one is extracted, for now we assume it's a
437*d4fd0404SClaudiu Manoil 	 * standard TPID, but HW also supports custom values
438*d4fd0404SClaudiu Manoil 	 */
439*d4fd0404SClaudiu Manoil 	if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN)
440*d4fd0404SClaudiu Manoil 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
441*d4fd0404SClaudiu Manoil 				       le16_to_cpu(rxbd->r.vlan_opt));
442*d4fd0404SClaudiu Manoil }
443*d4fd0404SClaudiu Manoil 
444*d4fd0404SClaudiu Manoil static void enetc_process_skb(struct enetc_bdr *rx_ring,
445*d4fd0404SClaudiu Manoil 			      struct sk_buff *skb)
446*d4fd0404SClaudiu Manoil {
447*d4fd0404SClaudiu Manoil 	skb_record_rx_queue(skb, rx_ring->index);
448*d4fd0404SClaudiu Manoil 	skb->protocol = eth_type_trans(skb, rx_ring->ndev);
449*d4fd0404SClaudiu Manoil }
450*d4fd0404SClaudiu Manoil 
451*d4fd0404SClaudiu Manoil static bool enetc_page_reusable(struct page *page)
452*d4fd0404SClaudiu Manoil {
453*d4fd0404SClaudiu Manoil 	return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
454*d4fd0404SClaudiu Manoil }
455*d4fd0404SClaudiu Manoil 
456*d4fd0404SClaudiu Manoil static void enetc_reuse_page(struct enetc_bdr *rx_ring,
457*d4fd0404SClaudiu Manoil 			     struct enetc_rx_swbd *old)
458*d4fd0404SClaudiu Manoil {
459*d4fd0404SClaudiu Manoil 	struct enetc_rx_swbd *new;
460*d4fd0404SClaudiu Manoil 
461*d4fd0404SClaudiu Manoil 	new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
462*d4fd0404SClaudiu Manoil 
463*d4fd0404SClaudiu Manoil 	/* next buf that may reuse a page */
464*d4fd0404SClaudiu Manoil 	enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
465*d4fd0404SClaudiu Manoil 
466*d4fd0404SClaudiu Manoil 	/* copy page reference */
467*d4fd0404SClaudiu Manoil 	*new = *old;
468*d4fd0404SClaudiu Manoil }
469*d4fd0404SClaudiu Manoil 
470*d4fd0404SClaudiu Manoil static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
471*d4fd0404SClaudiu Manoil 					       int i, u16 size)
472*d4fd0404SClaudiu Manoil {
473*d4fd0404SClaudiu Manoil 	struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
474*d4fd0404SClaudiu Manoil 
475*d4fd0404SClaudiu Manoil 	dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
476*d4fd0404SClaudiu Manoil 				      rx_swbd->page_offset,
477*d4fd0404SClaudiu Manoil 				      size, DMA_FROM_DEVICE);
478*d4fd0404SClaudiu Manoil 	return rx_swbd;
479*d4fd0404SClaudiu Manoil }
480*d4fd0404SClaudiu Manoil 
481*d4fd0404SClaudiu Manoil static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
482*d4fd0404SClaudiu Manoil 			      struct enetc_rx_swbd *rx_swbd)
483*d4fd0404SClaudiu Manoil {
484*d4fd0404SClaudiu Manoil 	if (likely(enetc_page_reusable(rx_swbd->page))) {
485*d4fd0404SClaudiu Manoil 		rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
486*d4fd0404SClaudiu Manoil 		page_ref_inc(rx_swbd->page);
487*d4fd0404SClaudiu Manoil 
488*d4fd0404SClaudiu Manoil 		enetc_reuse_page(rx_ring, rx_swbd);
489*d4fd0404SClaudiu Manoil 
490*d4fd0404SClaudiu Manoil 		/* sync for use by the device */
491*d4fd0404SClaudiu Manoil 		dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
492*d4fd0404SClaudiu Manoil 						 rx_swbd->page_offset,
493*d4fd0404SClaudiu Manoil 						 ENETC_RXB_DMA_SIZE,
494*d4fd0404SClaudiu Manoil 						 DMA_FROM_DEVICE);
495*d4fd0404SClaudiu Manoil 	} else {
496*d4fd0404SClaudiu Manoil 		dma_unmap_page(rx_ring->dev, rx_swbd->dma,
497*d4fd0404SClaudiu Manoil 			       PAGE_SIZE, DMA_FROM_DEVICE);
498*d4fd0404SClaudiu Manoil 	}
499*d4fd0404SClaudiu Manoil 
500*d4fd0404SClaudiu Manoil 	rx_swbd->page = NULL;
501*d4fd0404SClaudiu Manoil }
502*d4fd0404SClaudiu Manoil 
503*d4fd0404SClaudiu Manoil static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
504*d4fd0404SClaudiu Manoil 						int i, u16 size)
505*d4fd0404SClaudiu Manoil {
506*d4fd0404SClaudiu Manoil 	struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
507*d4fd0404SClaudiu Manoil 	struct sk_buff *skb;
508*d4fd0404SClaudiu Manoil 	void *ba;
509*d4fd0404SClaudiu Manoil 
510*d4fd0404SClaudiu Manoil 	ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
511*d4fd0404SClaudiu Manoil 	skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE);
512*d4fd0404SClaudiu Manoil 	if (unlikely(!skb)) {
513*d4fd0404SClaudiu Manoil 		rx_ring->stats.rx_alloc_errs++;
514*d4fd0404SClaudiu Manoil 		return NULL;
515*d4fd0404SClaudiu Manoil 	}
516*d4fd0404SClaudiu Manoil 
517*d4fd0404SClaudiu Manoil 	skb_reserve(skb, ENETC_RXB_PAD);
518*d4fd0404SClaudiu Manoil 	__skb_put(skb, size);
519*d4fd0404SClaudiu Manoil 
520*d4fd0404SClaudiu Manoil 	enetc_put_rx_buff(rx_ring, rx_swbd);
521*d4fd0404SClaudiu Manoil 
522*d4fd0404SClaudiu Manoil 	return skb;
523*d4fd0404SClaudiu Manoil }
524*d4fd0404SClaudiu Manoil 
525*d4fd0404SClaudiu Manoil static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
526*d4fd0404SClaudiu Manoil 				     u16 size, struct sk_buff *skb)
527*d4fd0404SClaudiu Manoil {
528*d4fd0404SClaudiu Manoil 	struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
529*d4fd0404SClaudiu Manoil 
530*d4fd0404SClaudiu Manoil 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
531*d4fd0404SClaudiu Manoil 			rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
532*d4fd0404SClaudiu Manoil 
533*d4fd0404SClaudiu Manoil 	enetc_put_rx_buff(rx_ring, rx_swbd);
534*d4fd0404SClaudiu Manoil }
535*d4fd0404SClaudiu Manoil 
536*d4fd0404SClaudiu Manoil #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
537*d4fd0404SClaudiu Manoil 
538*d4fd0404SClaudiu Manoil static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
539*d4fd0404SClaudiu Manoil 			       struct napi_struct *napi, int work_limit)
540*d4fd0404SClaudiu Manoil {
541*d4fd0404SClaudiu Manoil 	int rx_frm_cnt = 0, rx_byte_cnt = 0;
542*d4fd0404SClaudiu Manoil 	int cleaned_cnt, i;
543*d4fd0404SClaudiu Manoil 
544*d4fd0404SClaudiu Manoil 	cleaned_cnt = enetc_bd_unused(rx_ring);
545*d4fd0404SClaudiu Manoil 	/* next descriptor to process */
546*d4fd0404SClaudiu Manoil 	i = rx_ring->next_to_clean;
547*d4fd0404SClaudiu Manoil 
548*d4fd0404SClaudiu Manoil 	while (likely(rx_frm_cnt < work_limit)) {
549*d4fd0404SClaudiu Manoil 		union enetc_rx_bd *rxbd;
550*d4fd0404SClaudiu Manoil 		struct sk_buff *skb;
551*d4fd0404SClaudiu Manoil 		u32 bd_status;
552*d4fd0404SClaudiu Manoil 		u16 size;
553*d4fd0404SClaudiu Manoil 
554*d4fd0404SClaudiu Manoil 		if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
555*d4fd0404SClaudiu Manoil 			int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
556*d4fd0404SClaudiu Manoil 
557*d4fd0404SClaudiu Manoil 			cleaned_cnt -= count;
558*d4fd0404SClaudiu Manoil 		}
559*d4fd0404SClaudiu Manoil 
560*d4fd0404SClaudiu Manoil 		rxbd = ENETC_RXBD(*rx_ring, i);
561*d4fd0404SClaudiu Manoil 		bd_status = le32_to_cpu(rxbd->r.lstatus);
562*d4fd0404SClaudiu Manoil 		if (!bd_status)
563*d4fd0404SClaudiu Manoil 			break;
564*d4fd0404SClaudiu Manoil 
565*d4fd0404SClaudiu Manoil 		enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index));
566*d4fd0404SClaudiu Manoil 		dma_rmb(); /* for reading other rxbd fields */
567*d4fd0404SClaudiu Manoil 		size = le16_to_cpu(rxbd->r.buf_len);
568*d4fd0404SClaudiu Manoil 		skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
569*d4fd0404SClaudiu Manoil 		if (!skb)
570*d4fd0404SClaudiu Manoil 			break;
571*d4fd0404SClaudiu Manoil 
572*d4fd0404SClaudiu Manoil 		enetc_get_offloads(rx_ring, rxbd, skb);
573*d4fd0404SClaudiu Manoil 
574*d4fd0404SClaudiu Manoil 		cleaned_cnt++;
575*d4fd0404SClaudiu Manoil 		rxbd++;
576*d4fd0404SClaudiu Manoil 		i++;
577*d4fd0404SClaudiu Manoil 		if (unlikely(i == rx_ring->bd_count)) {
578*d4fd0404SClaudiu Manoil 			i = 0;
579*d4fd0404SClaudiu Manoil 			rxbd = ENETC_RXBD(*rx_ring, 0);
580*d4fd0404SClaudiu Manoil 		}
581*d4fd0404SClaudiu Manoil 
582*d4fd0404SClaudiu Manoil 		if (unlikely(bd_status &
583*d4fd0404SClaudiu Manoil 			     ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
584*d4fd0404SClaudiu Manoil 			dev_kfree_skb(skb);
585*d4fd0404SClaudiu Manoil 			while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
586*d4fd0404SClaudiu Manoil 				dma_rmb();
587*d4fd0404SClaudiu Manoil 				bd_status = le32_to_cpu(rxbd->r.lstatus);
588*d4fd0404SClaudiu Manoil 				rxbd++;
589*d4fd0404SClaudiu Manoil 				i++;
590*d4fd0404SClaudiu Manoil 				if (unlikely(i == rx_ring->bd_count)) {
591*d4fd0404SClaudiu Manoil 					i = 0;
592*d4fd0404SClaudiu Manoil 					rxbd = ENETC_RXBD(*rx_ring, 0);
593*d4fd0404SClaudiu Manoil 				}
594*d4fd0404SClaudiu Manoil 			}
595*d4fd0404SClaudiu Manoil 
596*d4fd0404SClaudiu Manoil 			rx_ring->ndev->stats.rx_dropped++;
597*d4fd0404SClaudiu Manoil 			rx_ring->ndev->stats.rx_errors++;
598*d4fd0404SClaudiu Manoil 
599*d4fd0404SClaudiu Manoil 			break;
600*d4fd0404SClaudiu Manoil 		}
601*d4fd0404SClaudiu Manoil 
602*d4fd0404SClaudiu Manoil 		/* not last BD in frame? */
603*d4fd0404SClaudiu Manoil 		while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
604*d4fd0404SClaudiu Manoil 			bd_status = le32_to_cpu(rxbd->r.lstatus);
605*d4fd0404SClaudiu Manoil 			size = ENETC_RXB_DMA_SIZE;
606*d4fd0404SClaudiu Manoil 
607*d4fd0404SClaudiu Manoil 			if (bd_status & ENETC_RXBD_LSTATUS_F) {
608*d4fd0404SClaudiu Manoil 				dma_rmb();
609*d4fd0404SClaudiu Manoil 				size = le16_to_cpu(rxbd->r.buf_len);
610*d4fd0404SClaudiu Manoil 			}
611*d4fd0404SClaudiu Manoil 
612*d4fd0404SClaudiu Manoil 			enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
613*d4fd0404SClaudiu Manoil 
614*d4fd0404SClaudiu Manoil 			cleaned_cnt++;
615*d4fd0404SClaudiu Manoil 			rxbd++;
616*d4fd0404SClaudiu Manoil 			i++;
617*d4fd0404SClaudiu Manoil 			if (unlikely(i == rx_ring->bd_count)) {
618*d4fd0404SClaudiu Manoil 				i = 0;
619*d4fd0404SClaudiu Manoil 				rxbd = ENETC_RXBD(*rx_ring, 0);
620*d4fd0404SClaudiu Manoil 			}
621*d4fd0404SClaudiu Manoil 		}
622*d4fd0404SClaudiu Manoil 
623*d4fd0404SClaudiu Manoil 		rx_byte_cnt += skb->len;
624*d4fd0404SClaudiu Manoil 
625*d4fd0404SClaudiu Manoil 		enetc_process_skb(rx_ring, skb);
626*d4fd0404SClaudiu Manoil 
627*d4fd0404SClaudiu Manoil 		napi_gro_receive(napi, skb);
628*d4fd0404SClaudiu Manoil 
629*d4fd0404SClaudiu Manoil 		rx_frm_cnt++;
630*d4fd0404SClaudiu Manoil 	}
631*d4fd0404SClaudiu Manoil 
632*d4fd0404SClaudiu Manoil 	rx_ring->next_to_clean = i;
633*d4fd0404SClaudiu Manoil 
634*d4fd0404SClaudiu Manoil 	rx_ring->stats.packets += rx_frm_cnt;
635*d4fd0404SClaudiu Manoil 	rx_ring->stats.bytes += rx_byte_cnt;
636*d4fd0404SClaudiu Manoil 
637*d4fd0404SClaudiu Manoil 	return rx_frm_cnt;
638*d4fd0404SClaudiu Manoil }
639*d4fd0404SClaudiu Manoil 
640*d4fd0404SClaudiu Manoil /* Probing and Init */
641*d4fd0404SClaudiu Manoil void enetc_get_si_caps(struct enetc_si *si)
642*d4fd0404SClaudiu Manoil {
643*d4fd0404SClaudiu Manoil 	struct enetc_hw *hw = &si->hw;
644*d4fd0404SClaudiu Manoil 	u32 val;
645*d4fd0404SClaudiu Manoil 
646*d4fd0404SClaudiu Manoil 	/* find out how many of various resources we have to work with */
647*d4fd0404SClaudiu Manoil 	val = enetc_rd(hw, ENETC_SICAPR0);
648*d4fd0404SClaudiu Manoil 	si->num_rx_rings = (val >> 16) & 0xff;
649*d4fd0404SClaudiu Manoil 	si->num_tx_rings = val & 0xff;
650*d4fd0404SClaudiu Manoil }
651*d4fd0404SClaudiu Manoil 
652*d4fd0404SClaudiu Manoil static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
653*d4fd0404SClaudiu Manoil {
654*d4fd0404SClaudiu Manoil 	r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
655*d4fd0404SClaudiu Manoil 					&r->bd_dma_base, GFP_KERNEL);
656*d4fd0404SClaudiu Manoil 	if (!r->bd_base)
657*d4fd0404SClaudiu Manoil 		return -ENOMEM;
658*d4fd0404SClaudiu Manoil 
659*d4fd0404SClaudiu Manoil 	/* h/w requires 128B alignment */
660*d4fd0404SClaudiu Manoil 	if (!IS_ALIGNED(r->bd_dma_base, 128)) {
661*d4fd0404SClaudiu Manoil 		dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
662*d4fd0404SClaudiu Manoil 				  r->bd_dma_base);
663*d4fd0404SClaudiu Manoil 		return -EINVAL;
664*d4fd0404SClaudiu Manoil 	}
665*d4fd0404SClaudiu Manoil 
666*d4fd0404SClaudiu Manoil 	return 0;
667*d4fd0404SClaudiu Manoil }
668*d4fd0404SClaudiu Manoil 
669*d4fd0404SClaudiu Manoil static int enetc_alloc_txbdr(struct enetc_bdr *txr)
670*d4fd0404SClaudiu Manoil {
671*d4fd0404SClaudiu Manoil 	int err;
672*d4fd0404SClaudiu Manoil 
673*d4fd0404SClaudiu Manoil 	txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
674*d4fd0404SClaudiu Manoil 	if (!txr->tx_swbd)
675*d4fd0404SClaudiu Manoil 		return -ENOMEM;
676*d4fd0404SClaudiu Manoil 
677*d4fd0404SClaudiu Manoil 	err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
678*d4fd0404SClaudiu Manoil 	if (err) {
679*d4fd0404SClaudiu Manoil 		vfree(txr->tx_swbd);
680*d4fd0404SClaudiu Manoil 		return err;
681*d4fd0404SClaudiu Manoil 	}
682*d4fd0404SClaudiu Manoil 
683*d4fd0404SClaudiu Manoil 	txr->next_to_clean = 0;
684*d4fd0404SClaudiu Manoil 	txr->next_to_use = 0;
685*d4fd0404SClaudiu Manoil 
686*d4fd0404SClaudiu Manoil 	return 0;
687*d4fd0404SClaudiu Manoil }
688*d4fd0404SClaudiu Manoil 
689*d4fd0404SClaudiu Manoil static void enetc_free_txbdr(struct enetc_bdr *txr)
690*d4fd0404SClaudiu Manoil {
691*d4fd0404SClaudiu Manoil 	int size, i;
692*d4fd0404SClaudiu Manoil 
693*d4fd0404SClaudiu Manoil 	for (i = 0; i < txr->bd_count; i++)
694*d4fd0404SClaudiu Manoil 		enetc_free_tx_skb(txr, &txr->tx_swbd[i]);
695*d4fd0404SClaudiu Manoil 
696*d4fd0404SClaudiu Manoil 	size = txr->bd_count * sizeof(union enetc_tx_bd);
697*d4fd0404SClaudiu Manoil 
698*d4fd0404SClaudiu Manoil 	dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
699*d4fd0404SClaudiu Manoil 	txr->bd_base = NULL;
700*d4fd0404SClaudiu Manoil 
701*d4fd0404SClaudiu Manoil 	vfree(txr->tx_swbd);
702*d4fd0404SClaudiu Manoil 	txr->tx_swbd = NULL;
703*d4fd0404SClaudiu Manoil }
704*d4fd0404SClaudiu Manoil 
705*d4fd0404SClaudiu Manoil static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
706*d4fd0404SClaudiu Manoil {
707*d4fd0404SClaudiu Manoil 	int i, err;
708*d4fd0404SClaudiu Manoil 
709*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->num_tx_rings; i++) {
710*d4fd0404SClaudiu Manoil 		err = enetc_alloc_txbdr(priv->tx_ring[i]);
711*d4fd0404SClaudiu Manoil 
712*d4fd0404SClaudiu Manoil 		if (err)
713*d4fd0404SClaudiu Manoil 			goto fail;
714*d4fd0404SClaudiu Manoil 	}
715*d4fd0404SClaudiu Manoil 
716*d4fd0404SClaudiu Manoil 	return 0;
717*d4fd0404SClaudiu Manoil 
718*d4fd0404SClaudiu Manoil fail:
719*d4fd0404SClaudiu Manoil 	while (i-- > 0)
720*d4fd0404SClaudiu Manoil 		enetc_free_txbdr(priv->tx_ring[i]);
721*d4fd0404SClaudiu Manoil 
722*d4fd0404SClaudiu Manoil 	return err;
723*d4fd0404SClaudiu Manoil }
724*d4fd0404SClaudiu Manoil 
725*d4fd0404SClaudiu Manoil static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
726*d4fd0404SClaudiu Manoil {
727*d4fd0404SClaudiu Manoil 	int i;
728*d4fd0404SClaudiu Manoil 
729*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->num_tx_rings; i++)
730*d4fd0404SClaudiu Manoil 		enetc_free_txbdr(priv->tx_ring[i]);
731*d4fd0404SClaudiu Manoil }
732*d4fd0404SClaudiu Manoil 
733*d4fd0404SClaudiu Manoil static int enetc_alloc_rxbdr(struct enetc_bdr *rxr)
734*d4fd0404SClaudiu Manoil {
735*d4fd0404SClaudiu Manoil 	int err;
736*d4fd0404SClaudiu Manoil 
737*d4fd0404SClaudiu Manoil 	rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
738*d4fd0404SClaudiu Manoil 	if (!rxr->rx_swbd)
739*d4fd0404SClaudiu Manoil 		return -ENOMEM;
740*d4fd0404SClaudiu Manoil 
741*d4fd0404SClaudiu Manoil 	err = enetc_dma_alloc_bdr(rxr, sizeof(union enetc_rx_bd));
742*d4fd0404SClaudiu Manoil 	if (err) {
743*d4fd0404SClaudiu Manoil 		vfree(rxr->rx_swbd);
744*d4fd0404SClaudiu Manoil 		return err;
745*d4fd0404SClaudiu Manoil 	}
746*d4fd0404SClaudiu Manoil 
747*d4fd0404SClaudiu Manoil 	rxr->next_to_clean = 0;
748*d4fd0404SClaudiu Manoil 	rxr->next_to_use = 0;
749*d4fd0404SClaudiu Manoil 	rxr->next_to_alloc = 0;
750*d4fd0404SClaudiu Manoil 
751*d4fd0404SClaudiu Manoil 	return 0;
752*d4fd0404SClaudiu Manoil }
753*d4fd0404SClaudiu Manoil 
754*d4fd0404SClaudiu Manoil static void enetc_free_rxbdr(struct enetc_bdr *rxr)
755*d4fd0404SClaudiu Manoil {
756*d4fd0404SClaudiu Manoil 	int size;
757*d4fd0404SClaudiu Manoil 
758*d4fd0404SClaudiu Manoil 	size = rxr->bd_count * sizeof(union enetc_rx_bd);
759*d4fd0404SClaudiu Manoil 
760*d4fd0404SClaudiu Manoil 	dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
761*d4fd0404SClaudiu Manoil 	rxr->bd_base = NULL;
762*d4fd0404SClaudiu Manoil 
763*d4fd0404SClaudiu Manoil 	vfree(rxr->rx_swbd);
764*d4fd0404SClaudiu Manoil 	rxr->rx_swbd = NULL;
765*d4fd0404SClaudiu Manoil }
766*d4fd0404SClaudiu Manoil 
767*d4fd0404SClaudiu Manoil static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
768*d4fd0404SClaudiu Manoil {
769*d4fd0404SClaudiu Manoil 	int i, err;
770*d4fd0404SClaudiu Manoil 
771*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->num_rx_rings; i++) {
772*d4fd0404SClaudiu Manoil 		err = enetc_alloc_rxbdr(priv->rx_ring[i]);
773*d4fd0404SClaudiu Manoil 
774*d4fd0404SClaudiu Manoil 		if (err)
775*d4fd0404SClaudiu Manoil 			goto fail;
776*d4fd0404SClaudiu Manoil 	}
777*d4fd0404SClaudiu Manoil 
778*d4fd0404SClaudiu Manoil 	return 0;
779*d4fd0404SClaudiu Manoil 
780*d4fd0404SClaudiu Manoil fail:
781*d4fd0404SClaudiu Manoil 	while (i-- > 0)
782*d4fd0404SClaudiu Manoil 		enetc_free_rxbdr(priv->rx_ring[i]);
783*d4fd0404SClaudiu Manoil 
784*d4fd0404SClaudiu Manoil 	return err;
785*d4fd0404SClaudiu Manoil }
786*d4fd0404SClaudiu Manoil 
787*d4fd0404SClaudiu Manoil static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
788*d4fd0404SClaudiu Manoil {
789*d4fd0404SClaudiu Manoil 	int i;
790*d4fd0404SClaudiu Manoil 
791*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->num_rx_rings; i++)
792*d4fd0404SClaudiu Manoil 		enetc_free_rxbdr(priv->rx_ring[i]);
793*d4fd0404SClaudiu Manoil }
794*d4fd0404SClaudiu Manoil 
795*d4fd0404SClaudiu Manoil static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
796*d4fd0404SClaudiu Manoil {
797*d4fd0404SClaudiu Manoil 	int i;
798*d4fd0404SClaudiu Manoil 
799*d4fd0404SClaudiu Manoil 	if (!tx_ring->tx_swbd)
800*d4fd0404SClaudiu Manoil 		return;
801*d4fd0404SClaudiu Manoil 
802*d4fd0404SClaudiu Manoil 	for (i = 0; i < tx_ring->bd_count; i++) {
803*d4fd0404SClaudiu Manoil 		struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
804*d4fd0404SClaudiu Manoil 
805*d4fd0404SClaudiu Manoil 		enetc_free_tx_skb(tx_ring, tx_swbd);
806*d4fd0404SClaudiu Manoil 	}
807*d4fd0404SClaudiu Manoil 
808*d4fd0404SClaudiu Manoil 	tx_ring->next_to_clean = 0;
809*d4fd0404SClaudiu Manoil 	tx_ring->next_to_use = 0;
810*d4fd0404SClaudiu Manoil }
811*d4fd0404SClaudiu Manoil 
812*d4fd0404SClaudiu Manoil static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
813*d4fd0404SClaudiu Manoil {
814*d4fd0404SClaudiu Manoil 	int i;
815*d4fd0404SClaudiu Manoil 
816*d4fd0404SClaudiu Manoil 	if (!rx_ring->rx_swbd)
817*d4fd0404SClaudiu Manoil 		return;
818*d4fd0404SClaudiu Manoil 
819*d4fd0404SClaudiu Manoil 	for (i = 0; i < rx_ring->bd_count; i++) {
820*d4fd0404SClaudiu Manoil 		struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
821*d4fd0404SClaudiu Manoil 
822*d4fd0404SClaudiu Manoil 		if (!rx_swbd->page)
823*d4fd0404SClaudiu Manoil 			continue;
824*d4fd0404SClaudiu Manoil 
825*d4fd0404SClaudiu Manoil 		dma_unmap_page(rx_ring->dev, rx_swbd->dma,
826*d4fd0404SClaudiu Manoil 			       PAGE_SIZE, DMA_FROM_DEVICE);
827*d4fd0404SClaudiu Manoil 		__free_page(rx_swbd->page);
828*d4fd0404SClaudiu Manoil 		rx_swbd->page = NULL;
829*d4fd0404SClaudiu Manoil 	}
830*d4fd0404SClaudiu Manoil 
831*d4fd0404SClaudiu Manoil 	rx_ring->next_to_clean = 0;
832*d4fd0404SClaudiu Manoil 	rx_ring->next_to_use = 0;
833*d4fd0404SClaudiu Manoil 	rx_ring->next_to_alloc = 0;
834*d4fd0404SClaudiu Manoil }
835*d4fd0404SClaudiu Manoil 
836*d4fd0404SClaudiu Manoil static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
837*d4fd0404SClaudiu Manoil {
838*d4fd0404SClaudiu Manoil 	int i;
839*d4fd0404SClaudiu Manoil 
840*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->num_rx_rings; i++)
841*d4fd0404SClaudiu Manoil 		enetc_free_rx_ring(priv->rx_ring[i]);
842*d4fd0404SClaudiu Manoil 
843*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->num_tx_rings; i++)
844*d4fd0404SClaudiu Manoil 		enetc_free_tx_ring(priv->tx_ring[i]);
845*d4fd0404SClaudiu Manoil }
846*d4fd0404SClaudiu Manoil 
847*d4fd0404SClaudiu Manoil static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
848*d4fd0404SClaudiu Manoil {
849*d4fd0404SClaudiu Manoil 	int size = cbdr->bd_count * sizeof(struct enetc_cbd);
850*d4fd0404SClaudiu Manoil 
851*d4fd0404SClaudiu Manoil 	cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
852*d4fd0404SClaudiu Manoil 					   GFP_KERNEL);
853*d4fd0404SClaudiu Manoil 	if (!cbdr->bd_base)
854*d4fd0404SClaudiu Manoil 		return -ENOMEM;
855*d4fd0404SClaudiu Manoil 
856*d4fd0404SClaudiu Manoil 	/* h/w requires 128B alignment */
857*d4fd0404SClaudiu Manoil 	if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
858*d4fd0404SClaudiu Manoil 		dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
859*d4fd0404SClaudiu Manoil 		return -EINVAL;
860*d4fd0404SClaudiu Manoil 	}
861*d4fd0404SClaudiu Manoil 
862*d4fd0404SClaudiu Manoil 	cbdr->next_to_clean = 0;
863*d4fd0404SClaudiu Manoil 	cbdr->next_to_use = 0;
864*d4fd0404SClaudiu Manoil 
865*d4fd0404SClaudiu Manoil 	return 0;
866*d4fd0404SClaudiu Manoil }
867*d4fd0404SClaudiu Manoil 
868*d4fd0404SClaudiu Manoil static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
869*d4fd0404SClaudiu Manoil {
870*d4fd0404SClaudiu Manoil 	int size = cbdr->bd_count * sizeof(struct enetc_cbd);
871*d4fd0404SClaudiu Manoil 
872*d4fd0404SClaudiu Manoil 	dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
873*d4fd0404SClaudiu Manoil 	cbdr->bd_base = NULL;
874*d4fd0404SClaudiu Manoil }
875*d4fd0404SClaudiu Manoil 
876*d4fd0404SClaudiu Manoil static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
877*d4fd0404SClaudiu Manoil {
878*d4fd0404SClaudiu Manoil 	/* set CBDR cache attributes */
879*d4fd0404SClaudiu Manoil 	enetc_wr(hw, ENETC_SICAR2,
880*d4fd0404SClaudiu Manoil 		 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
881*d4fd0404SClaudiu Manoil 
882*d4fd0404SClaudiu Manoil 	enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
883*d4fd0404SClaudiu Manoil 	enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
884*d4fd0404SClaudiu Manoil 	enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
885*d4fd0404SClaudiu Manoil 
886*d4fd0404SClaudiu Manoil 	enetc_wr(hw, ENETC_SICBDRPIR, 0);
887*d4fd0404SClaudiu Manoil 	enetc_wr(hw, ENETC_SICBDRCIR, 0);
888*d4fd0404SClaudiu Manoil 
889*d4fd0404SClaudiu Manoil 	/* enable ring */
890*d4fd0404SClaudiu Manoil 	enetc_wr(hw, ENETC_SICBDRMR, BIT(31));
891*d4fd0404SClaudiu Manoil 
892*d4fd0404SClaudiu Manoil 	cbdr->pir = hw->reg + ENETC_SICBDRPIR;
893*d4fd0404SClaudiu Manoil 	cbdr->cir = hw->reg + ENETC_SICBDRCIR;
894*d4fd0404SClaudiu Manoil }
895*d4fd0404SClaudiu Manoil 
896*d4fd0404SClaudiu Manoil static void enetc_clear_cbdr(struct enetc_hw *hw)
897*d4fd0404SClaudiu Manoil {
898*d4fd0404SClaudiu Manoil 	enetc_wr(hw, ENETC_SICBDRMR, 0);
899*d4fd0404SClaudiu Manoil }
900*d4fd0404SClaudiu Manoil 
901*d4fd0404SClaudiu Manoil static int enetc_configure_si(struct enetc_ndev_priv *priv)
902*d4fd0404SClaudiu Manoil {
903*d4fd0404SClaudiu Manoil 	struct enetc_si *si = priv->si;
904*d4fd0404SClaudiu Manoil 	struct enetc_hw *hw = &si->hw;
905*d4fd0404SClaudiu Manoil 
906*d4fd0404SClaudiu Manoil 	enetc_setup_cbdr(hw, &si->cbd_ring);
907*d4fd0404SClaudiu Manoil 	/* set SI cache attributes */
908*d4fd0404SClaudiu Manoil 	enetc_wr(hw, ENETC_SICAR0,
909*d4fd0404SClaudiu Manoil 		 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
910*d4fd0404SClaudiu Manoil 	enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
911*d4fd0404SClaudiu Manoil 	/* enable SI */
912*d4fd0404SClaudiu Manoil 	enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
913*d4fd0404SClaudiu Manoil 
914*d4fd0404SClaudiu Manoil 	return 0;
915*d4fd0404SClaudiu Manoil }
916*d4fd0404SClaudiu Manoil 
917*d4fd0404SClaudiu Manoil void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
918*d4fd0404SClaudiu Manoil {
919*d4fd0404SClaudiu Manoil 	struct enetc_si *si = priv->si;
920*d4fd0404SClaudiu Manoil 	int cpus = num_online_cpus();
921*d4fd0404SClaudiu Manoil 
922*d4fd0404SClaudiu Manoil 	priv->tx_bd_count = ENETC_BDR_DEFAULT_SIZE;
923*d4fd0404SClaudiu Manoil 	priv->rx_bd_count = ENETC_BDR_DEFAULT_SIZE;
924*d4fd0404SClaudiu Manoil 
925*d4fd0404SClaudiu Manoil 	/* Enable all available TX rings in order to configure as many
926*d4fd0404SClaudiu Manoil 	 * priorities as possible, when needed.
927*d4fd0404SClaudiu Manoil 	 * TODO: Make # of TX rings run-time configurable
928*d4fd0404SClaudiu Manoil 	 */
929*d4fd0404SClaudiu Manoil 	priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
930*d4fd0404SClaudiu Manoil 	priv->num_tx_rings = si->num_tx_rings;
931*d4fd0404SClaudiu Manoil 	priv->bdr_int_num = cpus;
932*d4fd0404SClaudiu Manoil 
933*d4fd0404SClaudiu Manoil 	/* SI specific */
934*d4fd0404SClaudiu Manoil 	si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
935*d4fd0404SClaudiu Manoil }
936*d4fd0404SClaudiu Manoil 
937*d4fd0404SClaudiu Manoil int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
938*d4fd0404SClaudiu Manoil {
939*d4fd0404SClaudiu Manoil 	struct enetc_si *si = priv->si;
940*d4fd0404SClaudiu Manoil 	int err;
941*d4fd0404SClaudiu Manoil 
942*d4fd0404SClaudiu Manoil 	err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring);
943*d4fd0404SClaudiu Manoil 	if (err)
944*d4fd0404SClaudiu Manoil 		return err;
945*d4fd0404SClaudiu Manoil 
946*d4fd0404SClaudiu Manoil 	err = enetc_configure_si(priv);
947*d4fd0404SClaudiu Manoil 	if (err)
948*d4fd0404SClaudiu Manoil 		goto err_config_si;
949*d4fd0404SClaudiu Manoil 
950*d4fd0404SClaudiu Manoil 	return 0;
951*d4fd0404SClaudiu Manoil 
952*d4fd0404SClaudiu Manoil err_config_si:
953*d4fd0404SClaudiu Manoil 	enetc_clear_cbdr(&si->hw);
954*d4fd0404SClaudiu Manoil 	enetc_free_cbdr(priv->dev, &si->cbd_ring);
955*d4fd0404SClaudiu Manoil 
956*d4fd0404SClaudiu Manoil 	return err;
957*d4fd0404SClaudiu Manoil }
958*d4fd0404SClaudiu Manoil 
959*d4fd0404SClaudiu Manoil void enetc_free_si_resources(struct enetc_ndev_priv *priv)
960*d4fd0404SClaudiu Manoil {
961*d4fd0404SClaudiu Manoil 	struct enetc_si *si = priv->si;
962*d4fd0404SClaudiu Manoil 
963*d4fd0404SClaudiu Manoil 	enetc_clear_cbdr(&si->hw);
964*d4fd0404SClaudiu Manoil 	enetc_free_cbdr(priv->dev, &si->cbd_ring);
965*d4fd0404SClaudiu Manoil }
966*d4fd0404SClaudiu Manoil 
967*d4fd0404SClaudiu Manoil static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
968*d4fd0404SClaudiu Manoil {
969*d4fd0404SClaudiu Manoil 	int idx = tx_ring->index;
970*d4fd0404SClaudiu Manoil 	u32 tbmr;
971*d4fd0404SClaudiu Manoil 
972*d4fd0404SClaudiu Manoil 	enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
973*d4fd0404SClaudiu Manoil 		       lower_32_bits(tx_ring->bd_dma_base));
974*d4fd0404SClaudiu Manoil 
975*d4fd0404SClaudiu Manoil 	enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
976*d4fd0404SClaudiu Manoil 		       upper_32_bits(tx_ring->bd_dma_base));
977*d4fd0404SClaudiu Manoil 
978*d4fd0404SClaudiu Manoil 	WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
979*d4fd0404SClaudiu Manoil 	enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
980*d4fd0404SClaudiu Manoil 		       ENETC_RTBLENR_LEN(tx_ring->bd_count));
981*d4fd0404SClaudiu Manoil 
982*d4fd0404SClaudiu Manoil 	/* clearing PI/CI registers for Tx not supported, adjust sw indexes */
983*d4fd0404SClaudiu Manoil 	tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
984*d4fd0404SClaudiu Manoil 	tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
985*d4fd0404SClaudiu Manoil 
986*d4fd0404SClaudiu Manoil 	/* enable Tx ints by setting pkt thr to 1 */
987*d4fd0404SClaudiu Manoil 	enetc_txbdr_wr(hw, idx, ENETC_TBICIR0, ENETC_TBICIR0_ICEN | 0x1);
988*d4fd0404SClaudiu Manoil 
989*d4fd0404SClaudiu Manoil 	tbmr = ENETC_TBMR_EN;
990*d4fd0404SClaudiu Manoil 	if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
991*d4fd0404SClaudiu Manoil 		tbmr |= ENETC_TBMR_VIH;
992*d4fd0404SClaudiu Manoil 
993*d4fd0404SClaudiu Manoil 	/* enable ring */
994*d4fd0404SClaudiu Manoil 	enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
995*d4fd0404SClaudiu Manoil 
996*d4fd0404SClaudiu Manoil 	tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
997*d4fd0404SClaudiu Manoil 	tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
998*d4fd0404SClaudiu Manoil 	tx_ring->idr = hw->reg + ENETC_SITXIDR;
999*d4fd0404SClaudiu Manoil }
1000*d4fd0404SClaudiu Manoil 
1001*d4fd0404SClaudiu Manoil static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1002*d4fd0404SClaudiu Manoil {
1003*d4fd0404SClaudiu Manoil 	int idx = rx_ring->index;
1004*d4fd0404SClaudiu Manoil 	u32 rbmr;
1005*d4fd0404SClaudiu Manoil 
1006*d4fd0404SClaudiu Manoil 	enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
1007*d4fd0404SClaudiu Manoil 		       lower_32_bits(rx_ring->bd_dma_base));
1008*d4fd0404SClaudiu Manoil 
1009*d4fd0404SClaudiu Manoil 	enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
1010*d4fd0404SClaudiu Manoil 		       upper_32_bits(rx_ring->bd_dma_base));
1011*d4fd0404SClaudiu Manoil 
1012*d4fd0404SClaudiu Manoil 	WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
1013*d4fd0404SClaudiu Manoil 	enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
1014*d4fd0404SClaudiu Manoil 		       ENETC_RTBLENR_LEN(rx_ring->bd_count));
1015*d4fd0404SClaudiu Manoil 
1016*d4fd0404SClaudiu Manoil 	enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
1017*d4fd0404SClaudiu Manoil 
1018*d4fd0404SClaudiu Manoil 	enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
1019*d4fd0404SClaudiu Manoil 
1020*d4fd0404SClaudiu Manoil 	/* enable Rx ints by setting pkt thr to 1 */
1021*d4fd0404SClaudiu Manoil 	enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1);
1022*d4fd0404SClaudiu Manoil 
1023*d4fd0404SClaudiu Manoil 	rbmr = ENETC_RBMR_EN;
1024*d4fd0404SClaudiu Manoil 	if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1025*d4fd0404SClaudiu Manoil 		rbmr |= ENETC_RBMR_VTE;
1026*d4fd0404SClaudiu Manoil 
1027*d4fd0404SClaudiu Manoil 	rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
1028*d4fd0404SClaudiu Manoil 	rx_ring->idr = hw->reg + ENETC_SIRXIDR;
1029*d4fd0404SClaudiu Manoil 
1030*d4fd0404SClaudiu Manoil 	enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
1031*d4fd0404SClaudiu Manoil 
1032*d4fd0404SClaudiu Manoil 	/* enable ring */
1033*d4fd0404SClaudiu Manoil 	enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
1034*d4fd0404SClaudiu Manoil }
1035*d4fd0404SClaudiu Manoil 
1036*d4fd0404SClaudiu Manoil static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
1037*d4fd0404SClaudiu Manoil {
1038*d4fd0404SClaudiu Manoil 	int i;
1039*d4fd0404SClaudiu Manoil 
1040*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->num_tx_rings; i++)
1041*d4fd0404SClaudiu Manoil 		enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
1042*d4fd0404SClaudiu Manoil 
1043*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->num_rx_rings; i++)
1044*d4fd0404SClaudiu Manoil 		enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1045*d4fd0404SClaudiu Manoil }
1046*d4fd0404SClaudiu Manoil 
1047*d4fd0404SClaudiu Manoil static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1048*d4fd0404SClaudiu Manoil {
1049*d4fd0404SClaudiu Manoil 	int idx = rx_ring->index;
1050*d4fd0404SClaudiu Manoil 
1051*d4fd0404SClaudiu Manoil 	/* disable EN bit on ring */
1052*d4fd0404SClaudiu Manoil 	enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
1053*d4fd0404SClaudiu Manoil }
1054*d4fd0404SClaudiu Manoil 
1055*d4fd0404SClaudiu Manoil static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1056*d4fd0404SClaudiu Manoil {
1057*d4fd0404SClaudiu Manoil 	int delay = 8, timeout = 100;
1058*d4fd0404SClaudiu Manoil 	int idx = tx_ring->index;
1059*d4fd0404SClaudiu Manoil 
1060*d4fd0404SClaudiu Manoil 	/* disable EN bit on ring */
1061*d4fd0404SClaudiu Manoil 	enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
1062*d4fd0404SClaudiu Manoil 
1063*d4fd0404SClaudiu Manoil 	/* wait for busy to clear */
1064*d4fd0404SClaudiu Manoil 	while (delay < timeout &&
1065*d4fd0404SClaudiu Manoil 	       enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
1066*d4fd0404SClaudiu Manoil 		msleep(delay);
1067*d4fd0404SClaudiu Manoil 		delay *= 2;
1068*d4fd0404SClaudiu Manoil 	}
1069*d4fd0404SClaudiu Manoil 
1070*d4fd0404SClaudiu Manoil 	if (delay >= timeout)
1071*d4fd0404SClaudiu Manoil 		netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
1072*d4fd0404SClaudiu Manoil 			    idx);
1073*d4fd0404SClaudiu Manoil }
1074*d4fd0404SClaudiu Manoil 
1075*d4fd0404SClaudiu Manoil static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
1076*d4fd0404SClaudiu Manoil {
1077*d4fd0404SClaudiu Manoil 	int i;
1078*d4fd0404SClaudiu Manoil 
1079*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->num_tx_rings; i++)
1080*d4fd0404SClaudiu Manoil 		enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
1081*d4fd0404SClaudiu Manoil 
1082*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->num_rx_rings; i++)
1083*d4fd0404SClaudiu Manoil 		enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1084*d4fd0404SClaudiu Manoil 
1085*d4fd0404SClaudiu Manoil 	udelay(1);
1086*d4fd0404SClaudiu Manoil }
1087*d4fd0404SClaudiu Manoil 
1088*d4fd0404SClaudiu Manoil static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
1089*d4fd0404SClaudiu Manoil {
1090*d4fd0404SClaudiu Manoil 	struct pci_dev *pdev = priv->si->pdev;
1091*d4fd0404SClaudiu Manoil 	cpumask_t cpu_mask;
1092*d4fd0404SClaudiu Manoil 	int i, j, err;
1093*d4fd0404SClaudiu Manoil 
1094*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->bdr_int_num; i++) {
1095*d4fd0404SClaudiu Manoil 		int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1096*d4fd0404SClaudiu Manoil 		struct enetc_int_vector *v = priv->int_vector[i];
1097*d4fd0404SClaudiu Manoil 		int entry = ENETC_BDR_INT_BASE_IDX + i;
1098*d4fd0404SClaudiu Manoil 		struct enetc_hw *hw = &priv->si->hw;
1099*d4fd0404SClaudiu Manoil 
1100*d4fd0404SClaudiu Manoil 		snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
1101*d4fd0404SClaudiu Manoil 			 priv->ndev->name, i);
1102*d4fd0404SClaudiu Manoil 		err = request_irq(irq, enetc_msix, 0, v->name, v);
1103*d4fd0404SClaudiu Manoil 		if (err) {
1104*d4fd0404SClaudiu Manoil 			dev_err(priv->dev, "request_irq() failed!\n");
1105*d4fd0404SClaudiu Manoil 			goto irq_err;
1106*d4fd0404SClaudiu Manoil 		}
1107*d4fd0404SClaudiu Manoil 
1108*d4fd0404SClaudiu Manoil 		v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
1109*d4fd0404SClaudiu Manoil 		v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
1110*d4fd0404SClaudiu Manoil 
1111*d4fd0404SClaudiu Manoil 		enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
1112*d4fd0404SClaudiu Manoil 
1113*d4fd0404SClaudiu Manoil 		for (j = 0; j < v->count_tx_rings; j++) {
1114*d4fd0404SClaudiu Manoil 			int idx = v->tx_ring[j].index;
1115*d4fd0404SClaudiu Manoil 
1116*d4fd0404SClaudiu Manoil 			enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
1117*d4fd0404SClaudiu Manoil 		}
1118*d4fd0404SClaudiu Manoil 		cpumask_clear(&cpu_mask);
1119*d4fd0404SClaudiu Manoil 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
1120*d4fd0404SClaudiu Manoil 		irq_set_affinity_hint(irq, &cpu_mask);
1121*d4fd0404SClaudiu Manoil 	}
1122*d4fd0404SClaudiu Manoil 
1123*d4fd0404SClaudiu Manoil 	return 0;
1124*d4fd0404SClaudiu Manoil 
1125*d4fd0404SClaudiu Manoil irq_err:
1126*d4fd0404SClaudiu Manoil 	while (i--) {
1127*d4fd0404SClaudiu Manoil 		int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1128*d4fd0404SClaudiu Manoil 
1129*d4fd0404SClaudiu Manoil 		irq_set_affinity_hint(irq, NULL);
1130*d4fd0404SClaudiu Manoil 		free_irq(irq, priv->int_vector[i]);
1131*d4fd0404SClaudiu Manoil 	}
1132*d4fd0404SClaudiu Manoil 
1133*d4fd0404SClaudiu Manoil 	return err;
1134*d4fd0404SClaudiu Manoil }
1135*d4fd0404SClaudiu Manoil 
1136*d4fd0404SClaudiu Manoil static void enetc_free_irqs(struct enetc_ndev_priv *priv)
1137*d4fd0404SClaudiu Manoil {
1138*d4fd0404SClaudiu Manoil 	struct pci_dev *pdev = priv->si->pdev;
1139*d4fd0404SClaudiu Manoil 	int i;
1140*d4fd0404SClaudiu Manoil 
1141*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->bdr_int_num; i++) {
1142*d4fd0404SClaudiu Manoil 		int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1143*d4fd0404SClaudiu Manoil 
1144*d4fd0404SClaudiu Manoil 		irq_set_affinity_hint(irq, NULL);
1145*d4fd0404SClaudiu Manoil 		free_irq(irq, priv->int_vector[i]);
1146*d4fd0404SClaudiu Manoil 	}
1147*d4fd0404SClaudiu Manoil }
1148*d4fd0404SClaudiu Manoil 
1149*d4fd0404SClaudiu Manoil static void enetc_enable_interrupts(struct enetc_ndev_priv *priv)
1150*d4fd0404SClaudiu Manoil {
1151*d4fd0404SClaudiu Manoil 	int i;
1152*d4fd0404SClaudiu Manoil 
1153*d4fd0404SClaudiu Manoil 	/* enable Tx & Rx event indication */
1154*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->num_rx_rings; i++) {
1155*d4fd0404SClaudiu Manoil 		enetc_rxbdr_wr(&priv->si->hw, i,
1156*d4fd0404SClaudiu Manoil 			       ENETC_RBIER, ENETC_RBIER_RXTIE);
1157*d4fd0404SClaudiu Manoil 	}
1158*d4fd0404SClaudiu Manoil 
1159*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->num_tx_rings; i++) {
1160*d4fd0404SClaudiu Manoil 		enetc_txbdr_wr(&priv->si->hw, i,
1161*d4fd0404SClaudiu Manoil 			       ENETC_TBIER, ENETC_TBIER_TXTIE);
1162*d4fd0404SClaudiu Manoil 	}
1163*d4fd0404SClaudiu Manoil }
1164*d4fd0404SClaudiu Manoil 
1165*d4fd0404SClaudiu Manoil static void enetc_disable_interrupts(struct enetc_ndev_priv *priv)
1166*d4fd0404SClaudiu Manoil {
1167*d4fd0404SClaudiu Manoil 	int i;
1168*d4fd0404SClaudiu Manoil 
1169*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->num_tx_rings; i++)
1170*d4fd0404SClaudiu Manoil 		enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
1171*d4fd0404SClaudiu Manoil 
1172*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->num_rx_rings; i++)
1173*d4fd0404SClaudiu Manoil 		enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
1174*d4fd0404SClaudiu Manoil }
1175*d4fd0404SClaudiu Manoil 
1176*d4fd0404SClaudiu Manoil static void adjust_link(struct net_device *ndev)
1177*d4fd0404SClaudiu Manoil {
1178*d4fd0404SClaudiu Manoil 	struct phy_device *phydev = ndev->phydev;
1179*d4fd0404SClaudiu Manoil 
1180*d4fd0404SClaudiu Manoil 	phy_print_status(phydev);
1181*d4fd0404SClaudiu Manoil }
1182*d4fd0404SClaudiu Manoil 
1183*d4fd0404SClaudiu Manoil static int enetc_phy_connect(struct net_device *ndev)
1184*d4fd0404SClaudiu Manoil {
1185*d4fd0404SClaudiu Manoil 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
1186*d4fd0404SClaudiu Manoil 	struct phy_device *phydev;
1187*d4fd0404SClaudiu Manoil 
1188*d4fd0404SClaudiu Manoil 	if (!priv->phy_node)
1189*d4fd0404SClaudiu Manoil 		return 0; /* phy-less mode */
1190*d4fd0404SClaudiu Manoil 
1191*d4fd0404SClaudiu Manoil 	phydev = of_phy_connect(ndev, priv->phy_node, &adjust_link,
1192*d4fd0404SClaudiu Manoil 				0, priv->if_mode);
1193*d4fd0404SClaudiu Manoil 	if (!phydev) {
1194*d4fd0404SClaudiu Manoil 		dev_err(&ndev->dev, "could not attach to PHY\n");
1195*d4fd0404SClaudiu Manoil 		return -ENODEV;
1196*d4fd0404SClaudiu Manoil 	}
1197*d4fd0404SClaudiu Manoil 
1198*d4fd0404SClaudiu Manoil 	phy_attached_info(phydev);
1199*d4fd0404SClaudiu Manoil 
1200*d4fd0404SClaudiu Manoil 	return 0;
1201*d4fd0404SClaudiu Manoil }
1202*d4fd0404SClaudiu Manoil 
1203*d4fd0404SClaudiu Manoil int enetc_open(struct net_device *ndev)
1204*d4fd0404SClaudiu Manoil {
1205*d4fd0404SClaudiu Manoil 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
1206*d4fd0404SClaudiu Manoil 	int i, err;
1207*d4fd0404SClaudiu Manoil 
1208*d4fd0404SClaudiu Manoil 	err = enetc_setup_irqs(priv);
1209*d4fd0404SClaudiu Manoil 	if (err)
1210*d4fd0404SClaudiu Manoil 		return err;
1211*d4fd0404SClaudiu Manoil 
1212*d4fd0404SClaudiu Manoil 	err = enetc_phy_connect(ndev);
1213*d4fd0404SClaudiu Manoil 	if (err)
1214*d4fd0404SClaudiu Manoil 		goto err_phy_connect;
1215*d4fd0404SClaudiu Manoil 
1216*d4fd0404SClaudiu Manoil 	err = enetc_alloc_tx_resources(priv);
1217*d4fd0404SClaudiu Manoil 	if (err)
1218*d4fd0404SClaudiu Manoil 		goto err_alloc_tx;
1219*d4fd0404SClaudiu Manoil 
1220*d4fd0404SClaudiu Manoil 	err = enetc_alloc_rx_resources(priv);
1221*d4fd0404SClaudiu Manoil 	if (err)
1222*d4fd0404SClaudiu Manoil 		goto err_alloc_rx;
1223*d4fd0404SClaudiu Manoil 
1224*d4fd0404SClaudiu Manoil 	enetc_setup_bdrs(priv);
1225*d4fd0404SClaudiu Manoil 
1226*d4fd0404SClaudiu Manoil 	err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
1227*d4fd0404SClaudiu Manoil 	if (err)
1228*d4fd0404SClaudiu Manoil 		goto err_set_queues;
1229*d4fd0404SClaudiu Manoil 
1230*d4fd0404SClaudiu Manoil 	err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
1231*d4fd0404SClaudiu Manoil 	if (err)
1232*d4fd0404SClaudiu Manoil 		goto err_set_queues;
1233*d4fd0404SClaudiu Manoil 
1234*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->bdr_int_num; i++)
1235*d4fd0404SClaudiu Manoil 		napi_enable(&priv->int_vector[i]->napi);
1236*d4fd0404SClaudiu Manoil 
1237*d4fd0404SClaudiu Manoil 	enetc_enable_interrupts(priv);
1238*d4fd0404SClaudiu Manoil 
1239*d4fd0404SClaudiu Manoil 	if (ndev->phydev)
1240*d4fd0404SClaudiu Manoil 		phy_start(ndev->phydev);
1241*d4fd0404SClaudiu Manoil 	else
1242*d4fd0404SClaudiu Manoil 		netif_carrier_on(ndev);
1243*d4fd0404SClaudiu Manoil 
1244*d4fd0404SClaudiu Manoil 	netif_tx_start_all_queues(ndev);
1245*d4fd0404SClaudiu Manoil 
1246*d4fd0404SClaudiu Manoil 	return 0;
1247*d4fd0404SClaudiu Manoil 
1248*d4fd0404SClaudiu Manoil err_set_queues:
1249*d4fd0404SClaudiu Manoil 	enetc_free_rx_resources(priv);
1250*d4fd0404SClaudiu Manoil err_alloc_rx:
1251*d4fd0404SClaudiu Manoil 	enetc_free_tx_resources(priv);
1252*d4fd0404SClaudiu Manoil err_alloc_tx:
1253*d4fd0404SClaudiu Manoil 	if (ndev->phydev)
1254*d4fd0404SClaudiu Manoil 		phy_disconnect(ndev->phydev);
1255*d4fd0404SClaudiu Manoil err_phy_connect:
1256*d4fd0404SClaudiu Manoil 	enetc_free_irqs(priv);
1257*d4fd0404SClaudiu Manoil 
1258*d4fd0404SClaudiu Manoil 	return err;
1259*d4fd0404SClaudiu Manoil }
1260*d4fd0404SClaudiu Manoil 
1261*d4fd0404SClaudiu Manoil int enetc_close(struct net_device *ndev)
1262*d4fd0404SClaudiu Manoil {
1263*d4fd0404SClaudiu Manoil 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
1264*d4fd0404SClaudiu Manoil 	int i;
1265*d4fd0404SClaudiu Manoil 
1266*d4fd0404SClaudiu Manoil 	netif_tx_stop_all_queues(ndev);
1267*d4fd0404SClaudiu Manoil 
1268*d4fd0404SClaudiu Manoil 	if (ndev->phydev) {
1269*d4fd0404SClaudiu Manoil 		phy_stop(ndev->phydev);
1270*d4fd0404SClaudiu Manoil 		phy_disconnect(ndev->phydev);
1271*d4fd0404SClaudiu Manoil 	} else {
1272*d4fd0404SClaudiu Manoil 		netif_carrier_off(ndev);
1273*d4fd0404SClaudiu Manoil 	}
1274*d4fd0404SClaudiu Manoil 
1275*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->bdr_int_num; i++) {
1276*d4fd0404SClaudiu Manoil 		napi_synchronize(&priv->int_vector[i]->napi);
1277*d4fd0404SClaudiu Manoil 		napi_disable(&priv->int_vector[i]->napi);
1278*d4fd0404SClaudiu Manoil 	}
1279*d4fd0404SClaudiu Manoil 
1280*d4fd0404SClaudiu Manoil 	enetc_disable_interrupts(priv);
1281*d4fd0404SClaudiu Manoil 	enetc_clear_bdrs(priv);
1282*d4fd0404SClaudiu Manoil 
1283*d4fd0404SClaudiu Manoil 	enetc_free_rxtx_rings(priv);
1284*d4fd0404SClaudiu Manoil 	enetc_free_rx_resources(priv);
1285*d4fd0404SClaudiu Manoil 	enetc_free_tx_resources(priv);
1286*d4fd0404SClaudiu Manoil 	enetc_free_irqs(priv);
1287*d4fd0404SClaudiu Manoil 
1288*d4fd0404SClaudiu Manoil 	return 0;
1289*d4fd0404SClaudiu Manoil }
1290*d4fd0404SClaudiu Manoil 
1291*d4fd0404SClaudiu Manoil struct net_device_stats *enetc_get_stats(struct net_device *ndev)
1292*d4fd0404SClaudiu Manoil {
1293*d4fd0404SClaudiu Manoil 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
1294*d4fd0404SClaudiu Manoil 	struct net_device_stats *stats = &ndev->stats;
1295*d4fd0404SClaudiu Manoil 	unsigned long packets = 0, bytes = 0;
1296*d4fd0404SClaudiu Manoil 	int i;
1297*d4fd0404SClaudiu Manoil 
1298*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->num_rx_rings; i++) {
1299*d4fd0404SClaudiu Manoil 		packets += priv->rx_ring[i]->stats.packets;
1300*d4fd0404SClaudiu Manoil 		bytes	+= priv->rx_ring[i]->stats.bytes;
1301*d4fd0404SClaudiu Manoil 	}
1302*d4fd0404SClaudiu Manoil 
1303*d4fd0404SClaudiu Manoil 	stats->rx_packets = packets;
1304*d4fd0404SClaudiu Manoil 	stats->rx_bytes = bytes;
1305*d4fd0404SClaudiu Manoil 	bytes = 0;
1306*d4fd0404SClaudiu Manoil 	packets = 0;
1307*d4fd0404SClaudiu Manoil 
1308*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->num_tx_rings; i++) {
1309*d4fd0404SClaudiu Manoil 		packets += priv->tx_ring[i]->stats.packets;
1310*d4fd0404SClaudiu Manoil 		bytes	+= priv->tx_ring[i]->stats.bytes;
1311*d4fd0404SClaudiu Manoil 	}
1312*d4fd0404SClaudiu Manoil 
1313*d4fd0404SClaudiu Manoil 	stats->tx_packets = packets;
1314*d4fd0404SClaudiu Manoil 	stats->tx_bytes = bytes;
1315*d4fd0404SClaudiu Manoil 
1316*d4fd0404SClaudiu Manoil 	return stats;
1317*d4fd0404SClaudiu Manoil }
1318*d4fd0404SClaudiu Manoil 
1319*d4fd0404SClaudiu Manoil int enetc_alloc_msix(struct enetc_ndev_priv *priv)
1320*d4fd0404SClaudiu Manoil {
1321*d4fd0404SClaudiu Manoil 	struct pci_dev *pdev = priv->si->pdev;
1322*d4fd0404SClaudiu Manoil 	int size, v_tx_rings;
1323*d4fd0404SClaudiu Manoil 	int i, n, err, nvec;
1324*d4fd0404SClaudiu Manoil 
1325*d4fd0404SClaudiu Manoil 	nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
1326*d4fd0404SClaudiu Manoil 	/* allocate MSIX for both messaging and Rx/Tx interrupts */
1327*d4fd0404SClaudiu Manoil 	n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1328*d4fd0404SClaudiu Manoil 
1329*d4fd0404SClaudiu Manoil 	if (n < 0)
1330*d4fd0404SClaudiu Manoil 		return n;
1331*d4fd0404SClaudiu Manoil 
1332*d4fd0404SClaudiu Manoil 	if (n != nvec)
1333*d4fd0404SClaudiu Manoil 		return -EPERM;
1334*d4fd0404SClaudiu Manoil 
1335*d4fd0404SClaudiu Manoil 	/* # of tx rings per int vector */
1336*d4fd0404SClaudiu Manoil 	v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
1337*d4fd0404SClaudiu Manoil 	size = sizeof(struct enetc_int_vector) +
1338*d4fd0404SClaudiu Manoil 	       sizeof(struct enetc_bdr) * v_tx_rings;
1339*d4fd0404SClaudiu Manoil 
1340*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->bdr_int_num; i++) {
1341*d4fd0404SClaudiu Manoil 		struct enetc_int_vector *v;
1342*d4fd0404SClaudiu Manoil 		struct enetc_bdr *bdr;
1343*d4fd0404SClaudiu Manoil 		int j;
1344*d4fd0404SClaudiu Manoil 
1345*d4fd0404SClaudiu Manoil 		v = kzalloc(size, GFP_KERNEL);
1346*d4fd0404SClaudiu Manoil 		if (!v) {
1347*d4fd0404SClaudiu Manoil 			err = -ENOMEM;
1348*d4fd0404SClaudiu Manoil 			goto fail;
1349*d4fd0404SClaudiu Manoil 		}
1350*d4fd0404SClaudiu Manoil 
1351*d4fd0404SClaudiu Manoil 		priv->int_vector[i] = v;
1352*d4fd0404SClaudiu Manoil 
1353*d4fd0404SClaudiu Manoil 		netif_napi_add(priv->ndev, &v->napi, enetc_poll,
1354*d4fd0404SClaudiu Manoil 			       NAPI_POLL_WEIGHT);
1355*d4fd0404SClaudiu Manoil 		v->count_tx_rings = v_tx_rings;
1356*d4fd0404SClaudiu Manoil 
1357*d4fd0404SClaudiu Manoil 		for (j = 0; j < v_tx_rings; j++) {
1358*d4fd0404SClaudiu Manoil 			int idx;
1359*d4fd0404SClaudiu Manoil 
1360*d4fd0404SClaudiu Manoil 			/* default tx ring mapping policy */
1361*d4fd0404SClaudiu Manoil 			if (priv->bdr_int_num == ENETC_MAX_BDR_INT)
1362*d4fd0404SClaudiu Manoil 				idx = 2 * j + i; /* 2 CPUs */
1363*d4fd0404SClaudiu Manoil 			else
1364*d4fd0404SClaudiu Manoil 				idx = j + i * v_tx_rings; /* default */
1365*d4fd0404SClaudiu Manoil 
1366*d4fd0404SClaudiu Manoil 			__set_bit(idx, &v->tx_rings_map);
1367*d4fd0404SClaudiu Manoil 			bdr = &v->tx_ring[j];
1368*d4fd0404SClaudiu Manoil 			bdr->index = idx;
1369*d4fd0404SClaudiu Manoil 			bdr->ndev = priv->ndev;
1370*d4fd0404SClaudiu Manoil 			bdr->dev = priv->dev;
1371*d4fd0404SClaudiu Manoil 			bdr->bd_count = priv->tx_bd_count;
1372*d4fd0404SClaudiu Manoil 			priv->tx_ring[idx] = bdr;
1373*d4fd0404SClaudiu Manoil 		}
1374*d4fd0404SClaudiu Manoil 
1375*d4fd0404SClaudiu Manoil 		bdr = &v->rx_ring;
1376*d4fd0404SClaudiu Manoil 		bdr->index = i;
1377*d4fd0404SClaudiu Manoil 		bdr->ndev = priv->ndev;
1378*d4fd0404SClaudiu Manoil 		bdr->dev = priv->dev;
1379*d4fd0404SClaudiu Manoil 		bdr->bd_count = priv->rx_bd_count;
1380*d4fd0404SClaudiu Manoil 		priv->rx_ring[i] = bdr;
1381*d4fd0404SClaudiu Manoil 	}
1382*d4fd0404SClaudiu Manoil 
1383*d4fd0404SClaudiu Manoil 	return 0;
1384*d4fd0404SClaudiu Manoil 
1385*d4fd0404SClaudiu Manoil fail:
1386*d4fd0404SClaudiu Manoil 	while (i--) {
1387*d4fd0404SClaudiu Manoil 		netif_napi_del(&priv->int_vector[i]->napi);
1388*d4fd0404SClaudiu Manoil 		kfree(priv->int_vector[i]);
1389*d4fd0404SClaudiu Manoil 	}
1390*d4fd0404SClaudiu Manoil 
1391*d4fd0404SClaudiu Manoil 	pci_free_irq_vectors(pdev);
1392*d4fd0404SClaudiu Manoil 
1393*d4fd0404SClaudiu Manoil 	return err;
1394*d4fd0404SClaudiu Manoil }
1395*d4fd0404SClaudiu Manoil 
1396*d4fd0404SClaudiu Manoil void enetc_free_msix(struct enetc_ndev_priv *priv)
1397*d4fd0404SClaudiu Manoil {
1398*d4fd0404SClaudiu Manoil 	int i;
1399*d4fd0404SClaudiu Manoil 
1400*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->bdr_int_num; i++) {
1401*d4fd0404SClaudiu Manoil 		struct enetc_int_vector *v = priv->int_vector[i];
1402*d4fd0404SClaudiu Manoil 
1403*d4fd0404SClaudiu Manoil 		netif_napi_del(&v->napi);
1404*d4fd0404SClaudiu Manoil 	}
1405*d4fd0404SClaudiu Manoil 
1406*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->num_rx_rings; i++)
1407*d4fd0404SClaudiu Manoil 		priv->rx_ring[i] = NULL;
1408*d4fd0404SClaudiu Manoil 
1409*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->num_tx_rings; i++)
1410*d4fd0404SClaudiu Manoil 		priv->tx_ring[i] = NULL;
1411*d4fd0404SClaudiu Manoil 
1412*d4fd0404SClaudiu Manoil 	for (i = 0; i < priv->bdr_int_num; i++) {
1413*d4fd0404SClaudiu Manoil 		kfree(priv->int_vector[i]);
1414*d4fd0404SClaudiu Manoil 		priv->int_vector[i] = NULL;
1415*d4fd0404SClaudiu Manoil 	}
1416*d4fd0404SClaudiu Manoil 
1417*d4fd0404SClaudiu Manoil 	/* disable all MSIX for this device */
1418*d4fd0404SClaudiu Manoil 	pci_free_irq_vectors(priv->si->pdev);
1419*d4fd0404SClaudiu Manoil }
1420*d4fd0404SClaudiu Manoil 
1421*d4fd0404SClaudiu Manoil static void enetc_kfree_si(struct enetc_si *si)
1422*d4fd0404SClaudiu Manoil {
1423*d4fd0404SClaudiu Manoil 	char *p = (char *)si - si->pad;
1424*d4fd0404SClaudiu Manoil 
1425*d4fd0404SClaudiu Manoil 	kfree(p);
1426*d4fd0404SClaudiu Manoil }
1427*d4fd0404SClaudiu Manoil 
1428*d4fd0404SClaudiu Manoil static void enetc_detect_errata(struct enetc_si *si)
1429*d4fd0404SClaudiu Manoil {
1430*d4fd0404SClaudiu Manoil 	if (si->pdev->revision == ENETC_REV1)
1431*d4fd0404SClaudiu Manoil 		si->errata = ENETC_ERR_TXCSUM | ENETC_ERR_VLAN_ISOL |
1432*d4fd0404SClaudiu Manoil 			     ENETC_ERR_UCMCSWP;
1433*d4fd0404SClaudiu Manoil }
1434*d4fd0404SClaudiu Manoil 
1435*d4fd0404SClaudiu Manoil int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
1436*d4fd0404SClaudiu Manoil {
1437*d4fd0404SClaudiu Manoil 	struct enetc_si *si, *p;
1438*d4fd0404SClaudiu Manoil 	struct enetc_hw *hw;
1439*d4fd0404SClaudiu Manoil 	size_t alloc_size;
1440*d4fd0404SClaudiu Manoil 	int err, len;
1441*d4fd0404SClaudiu Manoil 
1442*d4fd0404SClaudiu Manoil 	pcie_flr(pdev);
1443*d4fd0404SClaudiu Manoil 	err = pci_enable_device_mem(pdev);
1444*d4fd0404SClaudiu Manoil 	if (err) {
1445*d4fd0404SClaudiu Manoil 		dev_err(&pdev->dev, "device enable failed\n");
1446*d4fd0404SClaudiu Manoil 		return err;
1447*d4fd0404SClaudiu Manoil 	}
1448*d4fd0404SClaudiu Manoil 
1449*d4fd0404SClaudiu Manoil 	/* set up for high or low dma */
1450*d4fd0404SClaudiu Manoil 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1451*d4fd0404SClaudiu Manoil 	if (err) {
1452*d4fd0404SClaudiu Manoil 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1453*d4fd0404SClaudiu Manoil 		if (err) {
1454*d4fd0404SClaudiu Manoil 			dev_err(&pdev->dev,
1455*d4fd0404SClaudiu Manoil 				"DMA configuration failed: 0x%x\n", err);
1456*d4fd0404SClaudiu Manoil 			goto err_dma;
1457*d4fd0404SClaudiu Manoil 		}
1458*d4fd0404SClaudiu Manoil 	}
1459*d4fd0404SClaudiu Manoil 
1460*d4fd0404SClaudiu Manoil 	err = pci_request_mem_regions(pdev, name);
1461*d4fd0404SClaudiu Manoil 	if (err) {
1462*d4fd0404SClaudiu Manoil 		dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
1463*d4fd0404SClaudiu Manoil 		goto err_pci_mem_reg;
1464*d4fd0404SClaudiu Manoil 	}
1465*d4fd0404SClaudiu Manoil 
1466*d4fd0404SClaudiu Manoil 	pci_set_master(pdev);
1467*d4fd0404SClaudiu Manoil 
1468*d4fd0404SClaudiu Manoil 	alloc_size = sizeof(struct enetc_si);
1469*d4fd0404SClaudiu Manoil 	if (sizeof_priv) {
1470*d4fd0404SClaudiu Manoil 		/* align priv to 32B */
1471*d4fd0404SClaudiu Manoil 		alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
1472*d4fd0404SClaudiu Manoil 		alloc_size += sizeof_priv;
1473*d4fd0404SClaudiu Manoil 	}
1474*d4fd0404SClaudiu Manoil 	/* force 32B alignment for enetc_si */
1475*d4fd0404SClaudiu Manoil 	alloc_size += ENETC_SI_ALIGN - 1;
1476*d4fd0404SClaudiu Manoil 
1477*d4fd0404SClaudiu Manoil 	p = kzalloc(alloc_size, GFP_KERNEL);
1478*d4fd0404SClaudiu Manoil 	if (!p) {
1479*d4fd0404SClaudiu Manoil 		err = -ENOMEM;
1480*d4fd0404SClaudiu Manoil 		goto err_alloc_si;
1481*d4fd0404SClaudiu Manoil 	}
1482*d4fd0404SClaudiu Manoil 
1483*d4fd0404SClaudiu Manoil 	si = PTR_ALIGN(p, ENETC_SI_ALIGN);
1484*d4fd0404SClaudiu Manoil 	si->pad = (char *)si - (char *)p;
1485*d4fd0404SClaudiu Manoil 
1486*d4fd0404SClaudiu Manoil 	pci_set_drvdata(pdev, si);
1487*d4fd0404SClaudiu Manoil 	si->pdev = pdev;
1488*d4fd0404SClaudiu Manoil 	hw = &si->hw;
1489*d4fd0404SClaudiu Manoil 
1490*d4fd0404SClaudiu Manoil 	len = pci_resource_len(pdev, ENETC_BAR_REGS);
1491*d4fd0404SClaudiu Manoil 	hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
1492*d4fd0404SClaudiu Manoil 	if (!hw->reg) {
1493*d4fd0404SClaudiu Manoil 		err = -ENXIO;
1494*d4fd0404SClaudiu Manoil 		dev_err(&pdev->dev, "ioremap() failed\n");
1495*d4fd0404SClaudiu Manoil 		goto err_ioremap;
1496*d4fd0404SClaudiu Manoil 	}
1497*d4fd0404SClaudiu Manoil 	if (len > ENETC_PORT_BASE)
1498*d4fd0404SClaudiu Manoil 		hw->port = hw->reg + ENETC_PORT_BASE;
1499*d4fd0404SClaudiu Manoil 	if (len > ENETC_GLOBAL_BASE)
1500*d4fd0404SClaudiu Manoil 		hw->global = hw->reg + ENETC_GLOBAL_BASE;
1501*d4fd0404SClaudiu Manoil 
1502*d4fd0404SClaudiu Manoil 	enetc_detect_errata(si);
1503*d4fd0404SClaudiu Manoil 
1504*d4fd0404SClaudiu Manoil 	return 0;
1505*d4fd0404SClaudiu Manoil 
1506*d4fd0404SClaudiu Manoil err_ioremap:
1507*d4fd0404SClaudiu Manoil 	enetc_kfree_si(si);
1508*d4fd0404SClaudiu Manoil err_alloc_si:
1509*d4fd0404SClaudiu Manoil 	pci_release_mem_regions(pdev);
1510*d4fd0404SClaudiu Manoil err_pci_mem_reg:
1511*d4fd0404SClaudiu Manoil err_dma:
1512*d4fd0404SClaudiu Manoil 	pci_disable_device(pdev);
1513*d4fd0404SClaudiu Manoil 
1514*d4fd0404SClaudiu Manoil 	return err;
1515*d4fd0404SClaudiu Manoil }
1516*d4fd0404SClaudiu Manoil 
1517*d4fd0404SClaudiu Manoil void enetc_pci_remove(struct pci_dev *pdev)
1518*d4fd0404SClaudiu Manoil {
1519*d4fd0404SClaudiu Manoil 	struct enetc_si *si = pci_get_drvdata(pdev);
1520*d4fd0404SClaudiu Manoil 	struct enetc_hw *hw = &si->hw;
1521*d4fd0404SClaudiu Manoil 
1522*d4fd0404SClaudiu Manoil 	iounmap(hw->reg);
1523*d4fd0404SClaudiu Manoil 	enetc_kfree_si(si);
1524*d4fd0404SClaudiu Manoil 	pci_release_mem_regions(pdev);
1525*d4fd0404SClaudiu Manoil 	pci_disable_device(pdev);
1526*d4fd0404SClaudiu Manoil }
1527