1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
3 
4 #include "enetc.h"
5 #include <linux/tcp.h>
6 #include <linux/udp.h>
7 #include <linux/of_mdio.h>
8 #include <linux/vmalloc.h>
9 
10 /* ENETC overhead: optional extension BD + 1 BD gap */
11 #define ENETC_TXBDS_NEEDED(val)	((val) + 2)
12 /* max # of chained Tx BDs is 15, including head and extension BD */
13 #define ENETC_MAX_SKB_FRAGS	13
14 #define ENETC_TXBDS_MAX_NEEDED	ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
15 
16 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb);
17 
18 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
19 {
20 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
21 	struct enetc_bdr *tx_ring;
22 	int count;
23 
24 	tx_ring = priv->tx_ring[skb->queue_mapping];
25 
26 	if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
27 		if (unlikely(skb_linearize(skb)))
28 			goto drop_packet_err;
29 
30 	count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
31 	if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
32 		netif_stop_subqueue(ndev, tx_ring->index);
33 		return NETDEV_TX_BUSY;
34 	}
35 
36 	count = enetc_map_tx_buffs(tx_ring, skb);
37 	if (unlikely(!count))
38 		goto drop_packet_err;
39 
40 	if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
41 		netif_stop_subqueue(ndev, tx_ring->index);
42 
43 	return NETDEV_TX_OK;
44 
45 drop_packet_err:
46 	dev_kfree_skb_any(skb);
47 	return NETDEV_TX_OK;
48 }
49 
50 static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd)
51 {
52 	int l3_start, l3_hsize;
53 	u16 l3_flags, l4_flags;
54 
55 	if (skb->ip_summed != CHECKSUM_PARTIAL)
56 		return false;
57 
58 	switch (skb->csum_offset) {
59 	case offsetof(struct tcphdr, check):
60 		l4_flags = ENETC_TXBD_L4_TCP;
61 		break;
62 	case offsetof(struct udphdr, check):
63 		l4_flags = ENETC_TXBD_L4_UDP;
64 		break;
65 	default:
66 		skb_checksum_help(skb);
67 		return false;
68 	}
69 
70 	l3_start = skb_network_offset(skb);
71 	l3_hsize = skb_network_header_len(skb);
72 
73 	l3_flags = 0;
74 	if (skb->protocol == htons(ETH_P_IPV6))
75 		l3_flags = ENETC_TXBD_L3_IPV6;
76 
77 	/* write BD fields */
78 	txbd->l3_csoff = enetc_txbd_l3_csoff(l3_start, l3_hsize, l3_flags);
79 	txbd->l4_csoff = l4_flags;
80 
81 	return true;
82 }
83 
84 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
85 				struct enetc_tx_swbd *tx_swbd)
86 {
87 	if (tx_swbd->is_dma_page)
88 		dma_unmap_page(tx_ring->dev, tx_swbd->dma,
89 			       tx_swbd->len, DMA_TO_DEVICE);
90 	else
91 		dma_unmap_single(tx_ring->dev, tx_swbd->dma,
92 				 tx_swbd->len, DMA_TO_DEVICE);
93 	tx_swbd->dma = 0;
94 }
95 
96 static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
97 			      struct enetc_tx_swbd *tx_swbd)
98 {
99 	if (tx_swbd->dma)
100 		enetc_unmap_tx_buff(tx_ring, tx_swbd);
101 
102 	if (tx_swbd->skb) {
103 		dev_kfree_skb_any(tx_swbd->skb);
104 		tx_swbd->skb = NULL;
105 	}
106 }
107 
108 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
109 {
110 	struct enetc_tx_swbd *tx_swbd;
111 	struct skb_frag_struct *frag;
112 	int len = skb_headlen(skb);
113 	union enetc_tx_bd temp_bd;
114 	union enetc_tx_bd *txbd;
115 	bool do_vlan, do_tstamp;
116 	int i, count = 0;
117 	unsigned int f;
118 	dma_addr_t dma;
119 	u8 flags = 0;
120 
121 	i = tx_ring->next_to_use;
122 	txbd = ENETC_TXBD(*tx_ring, i);
123 	prefetchw(txbd);
124 
125 	dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
126 	if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
127 		goto dma_err;
128 
129 	temp_bd.addr = cpu_to_le64(dma);
130 	temp_bd.buf_len = cpu_to_le16(len);
131 	temp_bd.lstatus = 0;
132 
133 	tx_swbd = &tx_ring->tx_swbd[i];
134 	tx_swbd->dma = dma;
135 	tx_swbd->len = len;
136 	tx_swbd->is_dma_page = 0;
137 	count++;
138 
139 	do_vlan = skb_vlan_tag_present(skb);
140 	do_tstamp = skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
141 
142 	if (do_vlan || do_tstamp)
143 		flags |= ENETC_TXBD_FLAGS_EX;
144 
145 	if (enetc_tx_csum(skb, &temp_bd))
146 		flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS;
147 
148 	/* first BD needs frm_len and offload flags set */
149 	temp_bd.frm_len = cpu_to_le16(skb->len);
150 	temp_bd.flags = flags;
151 
152 	if (flags & ENETC_TXBD_FLAGS_EX) {
153 		u8 e_flags = 0;
154 		*txbd = temp_bd;
155 		enetc_clear_tx_bd(&temp_bd);
156 
157 		/* add extension BD for VLAN and/or timestamping */
158 		flags = 0;
159 		tx_swbd++;
160 		txbd++;
161 		i++;
162 		if (unlikely(i == tx_ring->bd_count)) {
163 			i = 0;
164 			tx_swbd = tx_ring->tx_swbd;
165 			txbd = ENETC_TXBD(*tx_ring, 0);
166 		}
167 		prefetchw(txbd);
168 
169 		if (do_vlan) {
170 			temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
171 			temp_bd.ext.tpid = 0; /* < C-TAG */
172 			e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
173 		}
174 
175 		if (do_tstamp) {
176 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
177 			e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
178 		}
179 
180 		temp_bd.ext.e_flags = e_flags;
181 		count++;
182 	}
183 
184 	frag = &skb_shinfo(skb)->frags[0];
185 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
186 		len = skb_frag_size(frag);
187 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
188 				       DMA_TO_DEVICE);
189 		if (dma_mapping_error(tx_ring->dev, dma))
190 			goto dma_err;
191 
192 		*txbd = temp_bd;
193 		enetc_clear_tx_bd(&temp_bd);
194 
195 		flags = 0;
196 		tx_swbd++;
197 		txbd++;
198 		i++;
199 		if (unlikely(i == tx_ring->bd_count)) {
200 			i = 0;
201 			tx_swbd = tx_ring->tx_swbd;
202 			txbd = ENETC_TXBD(*tx_ring, 0);
203 		}
204 		prefetchw(txbd);
205 
206 		temp_bd.addr = cpu_to_le64(dma);
207 		temp_bd.buf_len = cpu_to_le16(len);
208 
209 		tx_swbd->dma = dma;
210 		tx_swbd->len = len;
211 		tx_swbd->is_dma_page = 1;
212 		count++;
213 	}
214 
215 	/* last BD needs 'F' bit set */
216 	flags |= ENETC_TXBD_FLAGS_F;
217 	temp_bd.flags = flags;
218 	*txbd = temp_bd;
219 
220 	tx_ring->tx_swbd[i].skb = skb;
221 
222 	enetc_bdr_idx_inc(tx_ring, &i);
223 	tx_ring->next_to_use = i;
224 
225 	/* let H/W know BD ring has been updated */
226 	enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */
227 
228 	return count;
229 
230 dma_err:
231 	dev_err(tx_ring->dev, "DMA map error");
232 
233 	do {
234 		tx_swbd = &tx_ring->tx_swbd[i];
235 		enetc_free_tx_skb(tx_ring, tx_swbd);
236 		if (i == 0)
237 			i = tx_ring->bd_count;
238 		i--;
239 	} while (count--);
240 
241 	return 0;
242 }
243 
244 static irqreturn_t enetc_msix(int irq, void *data)
245 {
246 	struct enetc_int_vector	*v = data;
247 	int i;
248 
249 	/* disable interrupts */
250 	enetc_wr_reg(v->rbier, 0);
251 
252 	for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
253 		enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
254 
255 	napi_schedule_irqoff(&v->napi);
256 
257 	return IRQ_HANDLED;
258 }
259 
260 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
261 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
262 			       struct napi_struct *napi, int work_limit);
263 
264 static int enetc_poll(struct napi_struct *napi, int budget)
265 {
266 	struct enetc_int_vector
267 		*v = container_of(napi, struct enetc_int_vector, napi);
268 	bool complete = true;
269 	int work_done;
270 	int i;
271 
272 	for (i = 0; i < v->count_tx_rings; i++)
273 		if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
274 			complete = false;
275 
276 	work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
277 	if (work_done == budget)
278 		complete = false;
279 
280 	if (!complete)
281 		return budget;
282 
283 	napi_complete_done(napi, work_done);
284 
285 	/* enable interrupts */
286 	enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
287 
288 	for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
289 		enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
290 			     ENETC_TBIER_TXTIE);
291 
292 	return work_done;
293 }
294 
295 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
296 {
297 	int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
298 
299 	return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
300 }
301 
302 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
303 {
304 	struct net_device *ndev = tx_ring->ndev;
305 	int tx_frm_cnt = 0, tx_byte_cnt = 0;
306 	struct enetc_tx_swbd *tx_swbd;
307 	int i, bds_to_clean;
308 
309 	i = tx_ring->next_to_clean;
310 	tx_swbd = &tx_ring->tx_swbd[i];
311 	bds_to_clean = enetc_bd_ready_count(tx_ring, i);
312 
313 	while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
314 		bool is_eof = !!tx_swbd->skb;
315 
316 		enetc_unmap_tx_buff(tx_ring, tx_swbd);
317 		if (is_eof) {
318 			napi_consume_skb(tx_swbd->skb, napi_budget);
319 			tx_swbd->skb = NULL;
320 		}
321 
322 		tx_byte_cnt += tx_swbd->len;
323 
324 		bds_to_clean--;
325 		tx_swbd++;
326 		i++;
327 		if (unlikely(i == tx_ring->bd_count)) {
328 			i = 0;
329 			tx_swbd = tx_ring->tx_swbd;
330 		}
331 
332 		/* BD iteration loop end */
333 		if (is_eof) {
334 			tx_frm_cnt++;
335 			/* re-arm interrupt source */
336 			enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) |
337 				     BIT(16 + tx_ring->index));
338 		}
339 
340 		if (unlikely(!bds_to_clean))
341 			bds_to_clean = enetc_bd_ready_count(tx_ring, i);
342 	}
343 
344 	tx_ring->next_to_clean = i;
345 	tx_ring->stats.packets += tx_frm_cnt;
346 	tx_ring->stats.bytes += tx_byte_cnt;
347 
348 	if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
349 		     __netif_subqueue_stopped(ndev, tx_ring->index) &&
350 		     (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
351 		netif_wake_subqueue(ndev, tx_ring->index);
352 	}
353 
354 	return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
355 }
356 
357 static bool enetc_new_page(struct enetc_bdr *rx_ring,
358 			   struct enetc_rx_swbd *rx_swbd)
359 {
360 	struct page *page;
361 	dma_addr_t addr;
362 
363 	page = dev_alloc_page();
364 	if (unlikely(!page))
365 		return false;
366 
367 	addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
368 	if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
369 		__free_page(page);
370 
371 		return false;
372 	}
373 
374 	rx_swbd->dma = addr;
375 	rx_swbd->page = page;
376 	rx_swbd->page_offset = ENETC_RXB_PAD;
377 
378 	return true;
379 }
380 
381 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
382 {
383 	struct enetc_rx_swbd *rx_swbd;
384 	union enetc_rx_bd *rxbd;
385 	int i, j;
386 
387 	i = rx_ring->next_to_use;
388 	rx_swbd = &rx_ring->rx_swbd[i];
389 	rxbd = ENETC_RXBD(*rx_ring, i);
390 
391 	for (j = 0; j < buff_cnt; j++) {
392 		/* try reuse page */
393 		if (unlikely(!rx_swbd->page)) {
394 			if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
395 				rx_ring->stats.rx_alloc_errs++;
396 				break;
397 			}
398 		}
399 
400 		/* update RxBD */
401 		rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
402 					   rx_swbd->page_offset);
403 		/* clear 'R" as well */
404 		rxbd->r.lstatus = 0;
405 
406 		rx_swbd++;
407 		rxbd++;
408 		i++;
409 		if (unlikely(i == rx_ring->bd_count)) {
410 			i = 0;
411 			rx_swbd = rx_ring->rx_swbd;
412 			rxbd = ENETC_RXBD(*rx_ring, 0);
413 		}
414 	}
415 
416 	if (likely(j)) {
417 		rx_ring->next_to_alloc = i; /* keep track from page reuse */
418 		rx_ring->next_to_use = i;
419 		/* update ENETC's consumer index */
420 		enetc_wr_reg(rx_ring->rcir, i);
421 	}
422 
423 	return j;
424 }
425 
426 static void enetc_get_offloads(struct enetc_bdr *rx_ring,
427 			       union enetc_rx_bd *rxbd, struct sk_buff *skb)
428 {
429 	/* TODO: add tstamp, hashing */
430 	if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
431 		u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
432 
433 		skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
434 		skb->ip_summed = CHECKSUM_COMPLETE;
435 	}
436 
437 	/* copy VLAN to skb, if one is extracted, for now we assume it's a
438 	 * standard TPID, but HW also supports custom values
439 	 */
440 	if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN)
441 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
442 				       le16_to_cpu(rxbd->r.vlan_opt));
443 }
444 
445 static void enetc_process_skb(struct enetc_bdr *rx_ring,
446 			      struct sk_buff *skb)
447 {
448 	skb_record_rx_queue(skb, rx_ring->index);
449 	skb->protocol = eth_type_trans(skb, rx_ring->ndev);
450 }
451 
452 static bool enetc_page_reusable(struct page *page)
453 {
454 	return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
455 }
456 
457 static void enetc_reuse_page(struct enetc_bdr *rx_ring,
458 			     struct enetc_rx_swbd *old)
459 {
460 	struct enetc_rx_swbd *new;
461 
462 	new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
463 
464 	/* next buf that may reuse a page */
465 	enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
466 
467 	/* copy page reference */
468 	*new = *old;
469 }
470 
471 static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
472 					       int i, u16 size)
473 {
474 	struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
475 
476 	dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
477 				      rx_swbd->page_offset,
478 				      size, DMA_FROM_DEVICE);
479 	return rx_swbd;
480 }
481 
482 static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
483 			      struct enetc_rx_swbd *rx_swbd)
484 {
485 	if (likely(enetc_page_reusable(rx_swbd->page))) {
486 		rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
487 		page_ref_inc(rx_swbd->page);
488 
489 		enetc_reuse_page(rx_ring, rx_swbd);
490 
491 		/* sync for use by the device */
492 		dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
493 						 rx_swbd->page_offset,
494 						 ENETC_RXB_DMA_SIZE,
495 						 DMA_FROM_DEVICE);
496 	} else {
497 		dma_unmap_page(rx_ring->dev, rx_swbd->dma,
498 			       PAGE_SIZE, DMA_FROM_DEVICE);
499 	}
500 
501 	rx_swbd->page = NULL;
502 }
503 
504 static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
505 						int i, u16 size)
506 {
507 	struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
508 	struct sk_buff *skb;
509 	void *ba;
510 
511 	ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
512 	skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE);
513 	if (unlikely(!skb)) {
514 		rx_ring->stats.rx_alloc_errs++;
515 		return NULL;
516 	}
517 
518 	skb_reserve(skb, ENETC_RXB_PAD);
519 	__skb_put(skb, size);
520 
521 	enetc_put_rx_buff(rx_ring, rx_swbd);
522 
523 	return skb;
524 }
525 
526 static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
527 				     u16 size, struct sk_buff *skb)
528 {
529 	struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
530 
531 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
532 			rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
533 
534 	enetc_put_rx_buff(rx_ring, rx_swbd);
535 }
536 
537 #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
538 
539 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
540 			       struct napi_struct *napi, int work_limit)
541 {
542 	int rx_frm_cnt = 0, rx_byte_cnt = 0;
543 	int cleaned_cnt, i;
544 
545 	cleaned_cnt = enetc_bd_unused(rx_ring);
546 	/* next descriptor to process */
547 	i = rx_ring->next_to_clean;
548 
549 	while (likely(rx_frm_cnt < work_limit)) {
550 		union enetc_rx_bd *rxbd;
551 		struct sk_buff *skb;
552 		u32 bd_status;
553 		u16 size;
554 
555 		if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
556 			int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
557 
558 			cleaned_cnt -= count;
559 		}
560 
561 		rxbd = ENETC_RXBD(*rx_ring, i);
562 		bd_status = le32_to_cpu(rxbd->r.lstatus);
563 		if (!bd_status)
564 			break;
565 
566 		enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index));
567 		dma_rmb(); /* for reading other rxbd fields */
568 		size = le16_to_cpu(rxbd->r.buf_len);
569 		skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
570 		if (!skb)
571 			break;
572 
573 		enetc_get_offloads(rx_ring, rxbd, skb);
574 
575 		cleaned_cnt++;
576 		rxbd++;
577 		i++;
578 		if (unlikely(i == rx_ring->bd_count)) {
579 			i = 0;
580 			rxbd = ENETC_RXBD(*rx_ring, 0);
581 		}
582 
583 		if (unlikely(bd_status &
584 			     ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
585 			dev_kfree_skb(skb);
586 			while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
587 				dma_rmb();
588 				bd_status = le32_to_cpu(rxbd->r.lstatus);
589 				rxbd++;
590 				i++;
591 				if (unlikely(i == rx_ring->bd_count)) {
592 					i = 0;
593 					rxbd = ENETC_RXBD(*rx_ring, 0);
594 				}
595 			}
596 
597 			rx_ring->ndev->stats.rx_dropped++;
598 			rx_ring->ndev->stats.rx_errors++;
599 
600 			break;
601 		}
602 
603 		/* not last BD in frame? */
604 		while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
605 			bd_status = le32_to_cpu(rxbd->r.lstatus);
606 			size = ENETC_RXB_DMA_SIZE;
607 
608 			if (bd_status & ENETC_RXBD_LSTATUS_F) {
609 				dma_rmb();
610 				size = le16_to_cpu(rxbd->r.buf_len);
611 			}
612 
613 			enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
614 
615 			cleaned_cnt++;
616 			rxbd++;
617 			i++;
618 			if (unlikely(i == rx_ring->bd_count)) {
619 				i = 0;
620 				rxbd = ENETC_RXBD(*rx_ring, 0);
621 			}
622 		}
623 
624 		rx_byte_cnt += skb->len;
625 
626 		enetc_process_skb(rx_ring, skb);
627 
628 		napi_gro_receive(napi, skb);
629 
630 		rx_frm_cnt++;
631 	}
632 
633 	rx_ring->next_to_clean = i;
634 
635 	rx_ring->stats.packets += rx_frm_cnt;
636 	rx_ring->stats.bytes += rx_byte_cnt;
637 
638 	return rx_frm_cnt;
639 }
640 
641 /* Probing and Init */
642 #define ENETC_MAX_RFS_SIZE 64
643 void enetc_get_si_caps(struct enetc_si *si)
644 {
645 	struct enetc_hw *hw = &si->hw;
646 	u32 val;
647 
648 	/* find out how many of various resources we have to work with */
649 	val = enetc_rd(hw, ENETC_SICAPR0);
650 	si->num_rx_rings = (val >> 16) & 0xff;
651 	si->num_tx_rings = val & 0xff;
652 
653 	val = enetc_rd(hw, ENETC_SIRFSCAPR);
654 	si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
655 	si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
656 
657 	si->num_rss = 0;
658 	val = enetc_rd(hw, ENETC_SIPCAPR0);
659 	if (val & ENETC_SIPCAPR0_RSS) {
660 		val = enetc_rd(hw, ENETC_SIRSSCAPR);
661 		si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(val);
662 	}
663 }
664 
665 static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
666 {
667 	r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
668 					&r->bd_dma_base, GFP_KERNEL);
669 	if (!r->bd_base)
670 		return -ENOMEM;
671 
672 	/* h/w requires 128B alignment */
673 	if (!IS_ALIGNED(r->bd_dma_base, 128)) {
674 		dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
675 				  r->bd_dma_base);
676 		return -EINVAL;
677 	}
678 
679 	return 0;
680 }
681 
682 static int enetc_alloc_txbdr(struct enetc_bdr *txr)
683 {
684 	int err;
685 
686 	txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
687 	if (!txr->tx_swbd)
688 		return -ENOMEM;
689 
690 	err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
691 	if (err) {
692 		vfree(txr->tx_swbd);
693 		return err;
694 	}
695 
696 	txr->next_to_clean = 0;
697 	txr->next_to_use = 0;
698 
699 	return 0;
700 }
701 
702 static void enetc_free_txbdr(struct enetc_bdr *txr)
703 {
704 	int size, i;
705 
706 	for (i = 0; i < txr->bd_count; i++)
707 		enetc_free_tx_skb(txr, &txr->tx_swbd[i]);
708 
709 	size = txr->bd_count * sizeof(union enetc_tx_bd);
710 
711 	dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
712 	txr->bd_base = NULL;
713 
714 	vfree(txr->tx_swbd);
715 	txr->tx_swbd = NULL;
716 }
717 
718 static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
719 {
720 	int i, err;
721 
722 	for (i = 0; i < priv->num_tx_rings; i++) {
723 		err = enetc_alloc_txbdr(priv->tx_ring[i]);
724 
725 		if (err)
726 			goto fail;
727 	}
728 
729 	return 0;
730 
731 fail:
732 	while (i-- > 0)
733 		enetc_free_txbdr(priv->tx_ring[i]);
734 
735 	return err;
736 }
737 
738 static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
739 {
740 	int i;
741 
742 	for (i = 0; i < priv->num_tx_rings; i++)
743 		enetc_free_txbdr(priv->tx_ring[i]);
744 }
745 
746 static int enetc_alloc_rxbdr(struct enetc_bdr *rxr)
747 {
748 	int err;
749 
750 	rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
751 	if (!rxr->rx_swbd)
752 		return -ENOMEM;
753 
754 	err = enetc_dma_alloc_bdr(rxr, sizeof(union enetc_rx_bd));
755 	if (err) {
756 		vfree(rxr->rx_swbd);
757 		return err;
758 	}
759 
760 	rxr->next_to_clean = 0;
761 	rxr->next_to_use = 0;
762 	rxr->next_to_alloc = 0;
763 
764 	return 0;
765 }
766 
767 static void enetc_free_rxbdr(struct enetc_bdr *rxr)
768 {
769 	int size;
770 
771 	size = rxr->bd_count * sizeof(union enetc_rx_bd);
772 
773 	dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
774 	rxr->bd_base = NULL;
775 
776 	vfree(rxr->rx_swbd);
777 	rxr->rx_swbd = NULL;
778 }
779 
780 static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
781 {
782 	int i, err;
783 
784 	for (i = 0; i < priv->num_rx_rings; i++) {
785 		err = enetc_alloc_rxbdr(priv->rx_ring[i]);
786 
787 		if (err)
788 			goto fail;
789 	}
790 
791 	return 0;
792 
793 fail:
794 	while (i-- > 0)
795 		enetc_free_rxbdr(priv->rx_ring[i]);
796 
797 	return err;
798 }
799 
800 static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
801 {
802 	int i;
803 
804 	for (i = 0; i < priv->num_rx_rings; i++)
805 		enetc_free_rxbdr(priv->rx_ring[i]);
806 }
807 
808 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
809 {
810 	int i;
811 
812 	if (!tx_ring->tx_swbd)
813 		return;
814 
815 	for (i = 0; i < tx_ring->bd_count; i++) {
816 		struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
817 
818 		enetc_free_tx_skb(tx_ring, tx_swbd);
819 	}
820 
821 	tx_ring->next_to_clean = 0;
822 	tx_ring->next_to_use = 0;
823 }
824 
825 static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
826 {
827 	int i;
828 
829 	if (!rx_ring->rx_swbd)
830 		return;
831 
832 	for (i = 0; i < rx_ring->bd_count; i++) {
833 		struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
834 
835 		if (!rx_swbd->page)
836 			continue;
837 
838 		dma_unmap_page(rx_ring->dev, rx_swbd->dma,
839 			       PAGE_SIZE, DMA_FROM_DEVICE);
840 		__free_page(rx_swbd->page);
841 		rx_swbd->page = NULL;
842 	}
843 
844 	rx_ring->next_to_clean = 0;
845 	rx_ring->next_to_use = 0;
846 	rx_ring->next_to_alloc = 0;
847 }
848 
849 static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
850 {
851 	int i;
852 
853 	for (i = 0; i < priv->num_rx_rings; i++)
854 		enetc_free_rx_ring(priv->rx_ring[i]);
855 
856 	for (i = 0; i < priv->num_tx_rings; i++)
857 		enetc_free_tx_ring(priv->tx_ring[i]);
858 }
859 
860 static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
861 {
862 	int size = cbdr->bd_count * sizeof(struct enetc_cbd);
863 
864 	cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
865 					   GFP_KERNEL);
866 	if (!cbdr->bd_base)
867 		return -ENOMEM;
868 
869 	/* h/w requires 128B alignment */
870 	if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
871 		dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
872 		return -EINVAL;
873 	}
874 
875 	cbdr->next_to_clean = 0;
876 	cbdr->next_to_use = 0;
877 
878 	return 0;
879 }
880 
881 static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
882 {
883 	int size = cbdr->bd_count * sizeof(struct enetc_cbd);
884 
885 	dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
886 	cbdr->bd_base = NULL;
887 }
888 
889 static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
890 {
891 	/* set CBDR cache attributes */
892 	enetc_wr(hw, ENETC_SICAR2,
893 		 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
894 
895 	enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
896 	enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
897 	enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
898 
899 	enetc_wr(hw, ENETC_SICBDRPIR, 0);
900 	enetc_wr(hw, ENETC_SICBDRCIR, 0);
901 
902 	/* enable ring */
903 	enetc_wr(hw, ENETC_SICBDRMR, BIT(31));
904 
905 	cbdr->pir = hw->reg + ENETC_SICBDRPIR;
906 	cbdr->cir = hw->reg + ENETC_SICBDRCIR;
907 }
908 
909 static void enetc_clear_cbdr(struct enetc_hw *hw)
910 {
911 	enetc_wr(hw, ENETC_SICBDRMR, 0);
912 }
913 
914 static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
915 {
916 	int *rss_table;
917 	int i;
918 
919 	rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
920 	if (!rss_table)
921 		return -ENOMEM;
922 
923 	/* Set up RSS table defaults */
924 	for (i = 0; i < si->num_rss; i++)
925 		rss_table[i] = i % num_groups;
926 
927 	enetc_set_rss_table(si, rss_table, si->num_rss);
928 
929 	kfree(rss_table);
930 
931 	return 0;
932 }
933 
934 static int enetc_configure_si(struct enetc_ndev_priv *priv)
935 {
936 	struct enetc_si *si = priv->si;
937 	struct enetc_hw *hw = &si->hw;
938 	int err;
939 
940 	enetc_setup_cbdr(hw, &si->cbd_ring);
941 	/* set SI cache attributes */
942 	enetc_wr(hw, ENETC_SICAR0,
943 		 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
944 	enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
945 	/* enable SI */
946 	enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
947 
948 	if (si->num_rss) {
949 		err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
950 		if (err)
951 			return err;
952 	}
953 
954 	return 0;
955 }
956 
957 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
958 {
959 	struct enetc_si *si = priv->si;
960 	int cpus = num_online_cpus();
961 
962 	priv->tx_bd_count = ENETC_BDR_DEFAULT_SIZE;
963 	priv->rx_bd_count = ENETC_BDR_DEFAULT_SIZE;
964 
965 	/* Enable all available TX rings in order to configure as many
966 	 * priorities as possible, when needed.
967 	 * TODO: Make # of TX rings run-time configurable
968 	 */
969 	priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
970 	priv->num_tx_rings = si->num_tx_rings;
971 	priv->bdr_int_num = cpus;
972 
973 	/* SI specific */
974 	si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
975 }
976 
977 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
978 {
979 	struct enetc_si *si = priv->si;
980 	int err;
981 
982 	err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring);
983 	if (err)
984 		return err;
985 
986 	priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
987 				  GFP_KERNEL);
988 	if (!priv->cls_rules) {
989 		err = -ENOMEM;
990 		goto err_alloc_cls;
991 	}
992 
993 	err = enetc_configure_si(priv);
994 	if (err)
995 		goto err_config_si;
996 
997 	return 0;
998 
999 err_config_si:
1000 	kfree(priv->cls_rules);
1001 err_alloc_cls:
1002 	enetc_clear_cbdr(&si->hw);
1003 	enetc_free_cbdr(priv->dev, &si->cbd_ring);
1004 
1005 	return err;
1006 }
1007 
1008 void enetc_free_si_resources(struct enetc_ndev_priv *priv)
1009 {
1010 	struct enetc_si *si = priv->si;
1011 
1012 	enetc_clear_cbdr(&si->hw);
1013 	enetc_free_cbdr(priv->dev, &si->cbd_ring);
1014 
1015 	kfree(priv->cls_rules);
1016 }
1017 
1018 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1019 {
1020 	int idx = tx_ring->index;
1021 	u32 tbmr;
1022 
1023 	enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
1024 		       lower_32_bits(tx_ring->bd_dma_base));
1025 
1026 	enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
1027 		       upper_32_bits(tx_ring->bd_dma_base));
1028 
1029 	WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
1030 	enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
1031 		       ENETC_RTBLENR_LEN(tx_ring->bd_count));
1032 
1033 	/* clearing PI/CI registers for Tx not supported, adjust sw indexes */
1034 	tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
1035 	tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
1036 
1037 	/* enable Tx ints by setting pkt thr to 1 */
1038 	enetc_txbdr_wr(hw, idx, ENETC_TBICIR0, ENETC_TBICIR0_ICEN | 0x1);
1039 
1040 	tbmr = ENETC_TBMR_EN;
1041 	if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
1042 		tbmr |= ENETC_TBMR_VIH;
1043 
1044 	/* enable ring */
1045 	enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
1046 
1047 	tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
1048 	tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
1049 	tx_ring->idr = hw->reg + ENETC_SITXIDR;
1050 }
1051 
1052 static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1053 {
1054 	int idx = rx_ring->index;
1055 	u32 rbmr;
1056 
1057 	enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
1058 		       lower_32_bits(rx_ring->bd_dma_base));
1059 
1060 	enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
1061 		       upper_32_bits(rx_ring->bd_dma_base));
1062 
1063 	WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
1064 	enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
1065 		       ENETC_RTBLENR_LEN(rx_ring->bd_count));
1066 
1067 	enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
1068 
1069 	enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
1070 
1071 	/* enable Rx ints by setting pkt thr to 1 */
1072 	enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1);
1073 
1074 	rbmr = ENETC_RBMR_EN;
1075 	if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1076 		rbmr |= ENETC_RBMR_VTE;
1077 
1078 	rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
1079 	rx_ring->idr = hw->reg + ENETC_SIRXIDR;
1080 
1081 	enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
1082 
1083 	/* enable ring */
1084 	enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
1085 }
1086 
1087 static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
1088 {
1089 	int i;
1090 
1091 	for (i = 0; i < priv->num_tx_rings; i++)
1092 		enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
1093 
1094 	for (i = 0; i < priv->num_rx_rings; i++)
1095 		enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1096 }
1097 
1098 static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1099 {
1100 	int idx = rx_ring->index;
1101 
1102 	/* disable EN bit on ring */
1103 	enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
1104 }
1105 
1106 static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1107 {
1108 	int delay = 8, timeout = 100;
1109 	int idx = tx_ring->index;
1110 
1111 	/* disable EN bit on ring */
1112 	enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
1113 
1114 	/* wait for busy to clear */
1115 	while (delay < timeout &&
1116 	       enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
1117 		msleep(delay);
1118 		delay *= 2;
1119 	}
1120 
1121 	if (delay >= timeout)
1122 		netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
1123 			    idx);
1124 }
1125 
1126 static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
1127 {
1128 	int i;
1129 
1130 	for (i = 0; i < priv->num_tx_rings; i++)
1131 		enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
1132 
1133 	for (i = 0; i < priv->num_rx_rings; i++)
1134 		enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1135 
1136 	udelay(1);
1137 }
1138 
1139 static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
1140 {
1141 	struct pci_dev *pdev = priv->si->pdev;
1142 	cpumask_t cpu_mask;
1143 	int i, j, err;
1144 
1145 	for (i = 0; i < priv->bdr_int_num; i++) {
1146 		int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1147 		struct enetc_int_vector *v = priv->int_vector[i];
1148 		int entry = ENETC_BDR_INT_BASE_IDX + i;
1149 		struct enetc_hw *hw = &priv->si->hw;
1150 
1151 		snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
1152 			 priv->ndev->name, i);
1153 		err = request_irq(irq, enetc_msix, 0, v->name, v);
1154 		if (err) {
1155 			dev_err(priv->dev, "request_irq() failed!\n");
1156 			goto irq_err;
1157 		}
1158 
1159 		v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
1160 		v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
1161 
1162 		enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
1163 
1164 		for (j = 0; j < v->count_tx_rings; j++) {
1165 			int idx = v->tx_ring[j].index;
1166 
1167 			enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
1168 		}
1169 		cpumask_clear(&cpu_mask);
1170 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
1171 		irq_set_affinity_hint(irq, &cpu_mask);
1172 	}
1173 
1174 	return 0;
1175 
1176 irq_err:
1177 	while (i--) {
1178 		int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1179 
1180 		irq_set_affinity_hint(irq, NULL);
1181 		free_irq(irq, priv->int_vector[i]);
1182 	}
1183 
1184 	return err;
1185 }
1186 
1187 static void enetc_free_irqs(struct enetc_ndev_priv *priv)
1188 {
1189 	struct pci_dev *pdev = priv->si->pdev;
1190 	int i;
1191 
1192 	for (i = 0; i < priv->bdr_int_num; i++) {
1193 		int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1194 
1195 		irq_set_affinity_hint(irq, NULL);
1196 		free_irq(irq, priv->int_vector[i]);
1197 	}
1198 }
1199 
1200 static void enetc_enable_interrupts(struct enetc_ndev_priv *priv)
1201 {
1202 	int i;
1203 
1204 	/* enable Tx & Rx event indication */
1205 	for (i = 0; i < priv->num_rx_rings; i++) {
1206 		enetc_rxbdr_wr(&priv->si->hw, i,
1207 			       ENETC_RBIER, ENETC_RBIER_RXTIE);
1208 	}
1209 
1210 	for (i = 0; i < priv->num_tx_rings; i++) {
1211 		enetc_txbdr_wr(&priv->si->hw, i,
1212 			       ENETC_TBIER, ENETC_TBIER_TXTIE);
1213 	}
1214 }
1215 
1216 static void enetc_disable_interrupts(struct enetc_ndev_priv *priv)
1217 {
1218 	int i;
1219 
1220 	for (i = 0; i < priv->num_tx_rings; i++)
1221 		enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
1222 
1223 	for (i = 0; i < priv->num_rx_rings; i++)
1224 		enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
1225 }
1226 
1227 static void adjust_link(struct net_device *ndev)
1228 {
1229 	struct phy_device *phydev = ndev->phydev;
1230 
1231 	phy_print_status(phydev);
1232 }
1233 
1234 static int enetc_phy_connect(struct net_device *ndev)
1235 {
1236 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
1237 	struct phy_device *phydev;
1238 
1239 	if (!priv->phy_node)
1240 		return 0; /* phy-less mode */
1241 
1242 	phydev = of_phy_connect(ndev, priv->phy_node, &adjust_link,
1243 				0, priv->if_mode);
1244 	if (!phydev) {
1245 		dev_err(&ndev->dev, "could not attach to PHY\n");
1246 		return -ENODEV;
1247 	}
1248 
1249 	phy_attached_info(phydev);
1250 
1251 	return 0;
1252 }
1253 
1254 int enetc_open(struct net_device *ndev)
1255 {
1256 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
1257 	int i, err;
1258 
1259 	err = enetc_setup_irqs(priv);
1260 	if (err)
1261 		return err;
1262 
1263 	err = enetc_phy_connect(ndev);
1264 	if (err)
1265 		goto err_phy_connect;
1266 
1267 	err = enetc_alloc_tx_resources(priv);
1268 	if (err)
1269 		goto err_alloc_tx;
1270 
1271 	err = enetc_alloc_rx_resources(priv);
1272 	if (err)
1273 		goto err_alloc_rx;
1274 
1275 	enetc_setup_bdrs(priv);
1276 
1277 	err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
1278 	if (err)
1279 		goto err_set_queues;
1280 
1281 	err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
1282 	if (err)
1283 		goto err_set_queues;
1284 
1285 	for (i = 0; i < priv->bdr_int_num; i++)
1286 		napi_enable(&priv->int_vector[i]->napi);
1287 
1288 	enetc_enable_interrupts(priv);
1289 
1290 	if (ndev->phydev)
1291 		phy_start(ndev->phydev);
1292 	else
1293 		netif_carrier_on(ndev);
1294 
1295 	netif_tx_start_all_queues(ndev);
1296 
1297 	return 0;
1298 
1299 err_set_queues:
1300 	enetc_free_rx_resources(priv);
1301 err_alloc_rx:
1302 	enetc_free_tx_resources(priv);
1303 err_alloc_tx:
1304 	if (ndev->phydev)
1305 		phy_disconnect(ndev->phydev);
1306 err_phy_connect:
1307 	enetc_free_irqs(priv);
1308 
1309 	return err;
1310 }
1311 
1312 int enetc_close(struct net_device *ndev)
1313 {
1314 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
1315 	int i;
1316 
1317 	netif_tx_stop_all_queues(ndev);
1318 
1319 	if (ndev->phydev) {
1320 		phy_stop(ndev->phydev);
1321 		phy_disconnect(ndev->phydev);
1322 	} else {
1323 		netif_carrier_off(ndev);
1324 	}
1325 
1326 	for (i = 0; i < priv->bdr_int_num; i++) {
1327 		napi_synchronize(&priv->int_vector[i]->napi);
1328 		napi_disable(&priv->int_vector[i]->napi);
1329 	}
1330 
1331 	enetc_disable_interrupts(priv);
1332 	enetc_clear_bdrs(priv);
1333 
1334 	enetc_free_rxtx_rings(priv);
1335 	enetc_free_rx_resources(priv);
1336 	enetc_free_tx_resources(priv);
1337 	enetc_free_irqs(priv);
1338 
1339 	return 0;
1340 }
1341 
1342 struct net_device_stats *enetc_get_stats(struct net_device *ndev)
1343 {
1344 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
1345 	struct net_device_stats *stats = &ndev->stats;
1346 	unsigned long packets = 0, bytes = 0;
1347 	int i;
1348 
1349 	for (i = 0; i < priv->num_rx_rings; i++) {
1350 		packets += priv->rx_ring[i]->stats.packets;
1351 		bytes	+= priv->rx_ring[i]->stats.bytes;
1352 	}
1353 
1354 	stats->rx_packets = packets;
1355 	stats->rx_bytes = bytes;
1356 	bytes = 0;
1357 	packets = 0;
1358 
1359 	for (i = 0; i < priv->num_tx_rings; i++) {
1360 		packets += priv->tx_ring[i]->stats.packets;
1361 		bytes	+= priv->tx_ring[i]->stats.bytes;
1362 	}
1363 
1364 	stats->tx_packets = packets;
1365 	stats->tx_bytes = bytes;
1366 
1367 	return stats;
1368 }
1369 
1370 static int enetc_set_rss(struct net_device *ndev, int en)
1371 {
1372 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
1373 	struct enetc_hw *hw = &priv->si->hw;
1374 	u32 reg;
1375 
1376 	enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
1377 
1378 	reg = enetc_rd(hw, ENETC_SIMR);
1379 	reg &= ~ENETC_SIMR_RSSE;
1380 	reg |= (en) ? ENETC_SIMR_RSSE : 0;
1381 	enetc_wr(hw, ENETC_SIMR, reg);
1382 
1383 	return 0;
1384 }
1385 
1386 int enetc_set_features(struct net_device *ndev,
1387 		       netdev_features_t features)
1388 {
1389 	netdev_features_t changed = ndev->features ^ features;
1390 
1391 	if (changed & NETIF_F_RXHASH)
1392 		enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
1393 
1394 	return 0;
1395 }
1396 
1397 int enetc_alloc_msix(struct enetc_ndev_priv *priv)
1398 {
1399 	struct pci_dev *pdev = priv->si->pdev;
1400 	int size, v_tx_rings;
1401 	int i, n, err, nvec;
1402 
1403 	nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
1404 	/* allocate MSIX for both messaging and Rx/Tx interrupts */
1405 	n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1406 
1407 	if (n < 0)
1408 		return n;
1409 
1410 	if (n != nvec)
1411 		return -EPERM;
1412 
1413 	/* # of tx rings per int vector */
1414 	v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
1415 	size = sizeof(struct enetc_int_vector) +
1416 	       sizeof(struct enetc_bdr) * v_tx_rings;
1417 
1418 	for (i = 0; i < priv->bdr_int_num; i++) {
1419 		struct enetc_int_vector *v;
1420 		struct enetc_bdr *bdr;
1421 		int j;
1422 
1423 		v = kzalloc(size, GFP_KERNEL);
1424 		if (!v) {
1425 			err = -ENOMEM;
1426 			goto fail;
1427 		}
1428 
1429 		priv->int_vector[i] = v;
1430 
1431 		netif_napi_add(priv->ndev, &v->napi, enetc_poll,
1432 			       NAPI_POLL_WEIGHT);
1433 		v->count_tx_rings = v_tx_rings;
1434 
1435 		for (j = 0; j < v_tx_rings; j++) {
1436 			int idx;
1437 
1438 			/* default tx ring mapping policy */
1439 			if (priv->bdr_int_num == ENETC_MAX_BDR_INT)
1440 				idx = 2 * j + i; /* 2 CPUs */
1441 			else
1442 				idx = j + i * v_tx_rings; /* default */
1443 
1444 			__set_bit(idx, &v->tx_rings_map);
1445 			bdr = &v->tx_ring[j];
1446 			bdr->index = idx;
1447 			bdr->ndev = priv->ndev;
1448 			bdr->dev = priv->dev;
1449 			bdr->bd_count = priv->tx_bd_count;
1450 			priv->tx_ring[idx] = bdr;
1451 		}
1452 
1453 		bdr = &v->rx_ring;
1454 		bdr->index = i;
1455 		bdr->ndev = priv->ndev;
1456 		bdr->dev = priv->dev;
1457 		bdr->bd_count = priv->rx_bd_count;
1458 		priv->rx_ring[i] = bdr;
1459 	}
1460 
1461 	return 0;
1462 
1463 fail:
1464 	while (i--) {
1465 		netif_napi_del(&priv->int_vector[i]->napi);
1466 		kfree(priv->int_vector[i]);
1467 	}
1468 
1469 	pci_free_irq_vectors(pdev);
1470 
1471 	return err;
1472 }
1473 
1474 void enetc_free_msix(struct enetc_ndev_priv *priv)
1475 {
1476 	int i;
1477 
1478 	for (i = 0; i < priv->bdr_int_num; i++) {
1479 		struct enetc_int_vector *v = priv->int_vector[i];
1480 
1481 		netif_napi_del(&v->napi);
1482 	}
1483 
1484 	for (i = 0; i < priv->num_rx_rings; i++)
1485 		priv->rx_ring[i] = NULL;
1486 
1487 	for (i = 0; i < priv->num_tx_rings; i++)
1488 		priv->tx_ring[i] = NULL;
1489 
1490 	for (i = 0; i < priv->bdr_int_num; i++) {
1491 		kfree(priv->int_vector[i]);
1492 		priv->int_vector[i] = NULL;
1493 	}
1494 
1495 	/* disable all MSIX for this device */
1496 	pci_free_irq_vectors(priv->si->pdev);
1497 }
1498 
1499 static void enetc_kfree_si(struct enetc_si *si)
1500 {
1501 	char *p = (char *)si - si->pad;
1502 
1503 	kfree(p);
1504 }
1505 
1506 static void enetc_detect_errata(struct enetc_si *si)
1507 {
1508 	if (si->pdev->revision == ENETC_REV1)
1509 		si->errata = ENETC_ERR_TXCSUM | ENETC_ERR_VLAN_ISOL |
1510 			     ENETC_ERR_UCMCSWP;
1511 }
1512 
1513 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
1514 {
1515 	struct enetc_si *si, *p;
1516 	struct enetc_hw *hw;
1517 	size_t alloc_size;
1518 	int err, len;
1519 
1520 	pcie_flr(pdev);
1521 	err = pci_enable_device_mem(pdev);
1522 	if (err) {
1523 		dev_err(&pdev->dev, "device enable failed\n");
1524 		return err;
1525 	}
1526 
1527 	/* set up for high or low dma */
1528 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1529 	if (err) {
1530 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1531 		if (err) {
1532 			dev_err(&pdev->dev,
1533 				"DMA configuration failed: 0x%x\n", err);
1534 			goto err_dma;
1535 		}
1536 	}
1537 
1538 	err = pci_request_mem_regions(pdev, name);
1539 	if (err) {
1540 		dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
1541 		goto err_pci_mem_reg;
1542 	}
1543 
1544 	pci_set_master(pdev);
1545 
1546 	alloc_size = sizeof(struct enetc_si);
1547 	if (sizeof_priv) {
1548 		/* align priv to 32B */
1549 		alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
1550 		alloc_size += sizeof_priv;
1551 	}
1552 	/* force 32B alignment for enetc_si */
1553 	alloc_size += ENETC_SI_ALIGN - 1;
1554 
1555 	p = kzalloc(alloc_size, GFP_KERNEL);
1556 	if (!p) {
1557 		err = -ENOMEM;
1558 		goto err_alloc_si;
1559 	}
1560 
1561 	si = PTR_ALIGN(p, ENETC_SI_ALIGN);
1562 	si->pad = (char *)si - (char *)p;
1563 
1564 	pci_set_drvdata(pdev, si);
1565 	si->pdev = pdev;
1566 	hw = &si->hw;
1567 
1568 	len = pci_resource_len(pdev, ENETC_BAR_REGS);
1569 	hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
1570 	if (!hw->reg) {
1571 		err = -ENXIO;
1572 		dev_err(&pdev->dev, "ioremap() failed\n");
1573 		goto err_ioremap;
1574 	}
1575 	if (len > ENETC_PORT_BASE)
1576 		hw->port = hw->reg + ENETC_PORT_BASE;
1577 	if (len > ENETC_GLOBAL_BASE)
1578 		hw->global = hw->reg + ENETC_GLOBAL_BASE;
1579 
1580 	enetc_detect_errata(si);
1581 
1582 	return 0;
1583 
1584 err_ioremap:
1585 	enetc_kfree_si(si);
1586 err_alloc_si:
1587 	pci_release_mem_regions(pdev);
1588 err_pci_mem_reg:
1589 err_dma:
1590 	pci_disable_device(pdev);
1591 
1592 	return err;
1593 }
1594 
1595 void enetc_pci_remove(struct pci_dev *pdev)
1596 {
1597 	struct enetc_si *si = pci_get_drvdata(pdev);
1598 	struct enetc_hw *hw = &si->hw;
1599 
1600 	iounmap(hw->reg);
1601 	enetc_kfree_si(si);
1602 	pci_release_mem_regions(pdev);
1603 	pci_disable_device(pdev);
1604 }
1605