1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt)			"bcmasp_intf: " fmt
3 
4 #include <asm/byteorder.h>
5 #include <linux/brcmphy.h>
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/etherdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/of_net.h>
11 #include <linux/of_mdio.h>
12 #include <linux/phy.h>
13 #include <linux/phy_fixed.h>
14 #include <linux/ptp_classify.h>
15 #include <linux/platform_device.h>
16 #include <net/ip.h>
17 #include <net/ipv6.h>
18 
19 #include "bcmasp.h"
20 #include "bcmasp_intf_defs.h"
21 
incr_ring(int index,int ring_count)22 static int incr_ring(int index, int ring_count)
23 {
24 	index++;
25 	if (index == ring_count)
26 		return 0;
27 
28 	return index;
29 }
30 
31 /* Points to last byte of descriptor */
incr_last_byte(dma_addr_t addr,dma_addr_t beg,int ring_count)32 static dma_addr_t incr_last_byte(dma_addr_t addr, dma_addr_t beg,
33 				 int ring_count)
34 {
35 	dma_addr_t end = beg + (ring_count * DESC_SIZE);
36 
37 	addr += DESC_SIZE;
38 	if (addr > end)
39 		return beg + DESC_SIZE - 1;
40 
41 	return addr;
42 }
43 
44 /* Points to first byte of descriptor */
incr_first_byte(dma_addr_t addr,dma_addr_t beg,int ring_count)45 static dma_addr_t incr_first_byte(dma_addr_t addr, dma_addr_t beg,
46 				  int ring_count)
47 {
48 	dma_addr_t end = beg + (ring_count * DESC_SIZE);
49 
50 	addr += DESC_SIZE;
51 	if (addr >= end)
52 		return beg;
53 
54 	return addr;
55 }
56 
bcmasp_enable_tx(struct bcmasp_intf * intf,int en)57 static void bcmasp_enable_tx(struct bcmasp_intf *intf, int en)
58 {
59 	if (en) {
60 		tx_spb_ctrl_wl(intf, TX_SPB_CTRL_ENABLE_EN, TX_SPB_CTRL_ENABLE);
61 		tx_epkt_core_wl(intf, (TX_EPKT_C_CFG_MISC_EN |
62 				TX_EPKT_C_CFG_MISC_PT |
63 				(intf->port << TX_EPKT_C_CFG_MISC_PS_SHIFT)),
64 				TX_EPKT_C_CFG_MISC);
65 	} else {
66 		tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
67 		tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
68 	}
69 }
70 
bcmasp_enable_rx(struct bcmasp_intf * intf,int en)71 static void bcmasp_enable_rx(struct bcmasp_intf *intf, int en)
72 {
73 	if (en)
74 		rx_edpkt_cfg_wl(intf, RX_EDPKT_CFG_ENABLE_EN,
75 				RX_EDPKT_CFG_ENABLE);
76 	else
77 		rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE);
78 }
79 
bcmasp_set_rx_mode(struct net_device * dev)80 static void bcmasp_set_rx_mode(struct net_device *dev)
81 {
82 	unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
83 	struct bcmasp_intf *intf = netdev_priv(dev);
84 	struct netdev_hw_addr *ha;
85 	int ret;
86 
87 	spin_lock_bh(&intf->parent->mda_lock);
88 
89 	bcmasp_disable_all_filters(intf);
90 
91 	if (dev->flags & IFF_PROMISC)
92 		goto set_promisc;
93 
94 	bcmasp_set_promisc(intf, 0);
95 
96 	bcmasp_set_broad(intf, 1);
97 
98 	bcmasp_set_oaddr(intf, dev->dev_addr, 1);
99 
100 	if (dev->flags & IFF_ALLMULTI) {
101 		bcmasp_set_allmulti(intf, 1);
102 	} else {
103 		bcmasp_set_allmulti(intf, 0);
104 
105 		netdev_for_each_mc_addr(ha, dev) {
106 			ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask);
107 			if (ret) {
108 				intf->mib.mc_filters_full_cnt++;
109 				goto set_promisc;
110 			}
111 		}
112 	}
113 
114 	netdev_for_each_uc_addr(ha, dev) {
115 		ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask);
116 		if (ret) {
117 			intf->mib.uc_filters_full_cnt++;
118 			goto set_promisc;
119 		}
120 	}
121 
122 	spin_unlock_bh(&intf->parent->mda_lock);
123 	return;
124 
125 set_promisc:
126 	bcmasp_set_promisc(intf, 1);
127 	intf->mib.promisc_filters_cnt++;
128 
129 	/* disable all filters used by this port */
130 	bcmasp_disable_all_filters(intf);
131 
132 	spin_unlock_bh(&intf->parent->mda_lock);
133 }
134 
bcmasp_clean_txcb(struct bcmasp_intf * intf,int index)135 static void bcmasp_clean_txcb(struct bcmasp_intf *intf, int index)
136 {
137 	struct bcmasp_tx_cb *txcb = &intf->tx_cbs[index];
138 
139 	txcb->skb = NULL;
140 	dma_unmap_addr_set(txcb, dma_addr, 0);
141 	dma_unmap_len_set(txcb, dma_len, 0);
142 	txcb->last = false;
143 }
144 
tx_spb_ring_full(struct bcmasp_intf * intf,int cnt)145 static int tx_spb_ring_full(struct bcmasp_intf *intf, int cnt)
146 {
147 	int next_index, i;
148 
149 	/* Check if we have enough room for cnt descriptors */
150 	for (i = 0; i < cnt; i++) {
151 		next_index = incr_ring(intf->tx_spb_index, DESC_RING_COUNT);
152 		if (next_index == intf->tx_spb_clean_index)
153 			return 1;
154 	}
155 
156 	return 0;
157 }
158 
bcmasp_csum_offload(struct net_device * dev,struct sk_buff * skb,bool * csum_hw)159 static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
160 					   struct sk_buff *skb,
161 					   bool *csum_hw)
162 {
163 	struct bcmasp_intf *intf = netdev_priv(dev);
164 	u32 header = 0, header2 = 0, epkt = 0;
165 	struct bcmasp_pkt_offload *offload;
166 	unsigned int header_cnt = 0;
167 	u8 ip_proto;
168 	int ret;
169 
170 	if (skb->ip_summed != CHECKSUM_PARTIAL)
171 		return skb;
172 
173 	ret = skb_cow_head(skb, sizeof(*offload));
174 	if (ret < 0) {
175 		intf->mib.tx_realloc_offload_failed++;
176 		goto help;
177 	}
178 
179 	switch (skb->protocol) {
180 	case htons(ETH_P_IP):
181 		header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf);
182 		header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff);
183 		epkt |= PKT_OFFLOAD_EPKT_IP(0) | PKT_OFFLOAD_EPKT_CSUM_L2;
184 		ip_proto = ip_hdr(skb)->protocol;
185 		header_cnt += 2;
186 		break;
187 	case htons(ETH_P_IPV6):
188 		header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf);
189 		header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff);
190 		epkt |= PKT_OFFLOAD_EPKT_IP(1) | PKT_OFFLOAD_EPKT_CSUM_L2;
191 		ip_proto = ipv6_hdr(skb)->nexthdr;
192 		header_cnt += 2;
193 		break;
194 	default:
195 		goto help;
196 	}
197 
198 	switch (ip_proto) {
199 	case IPPROTO_TCP:
200 		header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb));
201 		epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L3;
202 		header_cnt++;
203 		break;
204 	case IPPROTO_UDP:
205 		header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN);
206 		epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L3;
207 		header_cnt++;
208 		break;
209 	default:
210 		goto help;
211 	}
212 
213 	offload = (struct bcmasp_pkt_offload *)skb_push(skb, sizeof(*offload));
214 
215 	header |= PKT_OFFLOAD_HDR_OP | PKT_OFFLOAD_HDR_COUNT(header_cnt) |
216 		  PKT_OFFLOAD_HDR_SIZE_1(ETH_HLEN);
217 	epkt |= PKT_OFFLOAD_EPKT_OP;
218 
219 	offload->nop = htonl(PKT_OFFLOAD_NOP);
220 	offload->header = htonl(header);
221 	offload->header2 = htonl(header2);
222 	offload->epkt = htonl(epkt);
223 	offload->end = htonl(PKT_OFFLOAD_END_OP);
224 	*csum_hw = true;
225 
226 	return skb;
227 
228 help:
229 	skb_checksum_help(skb);
230 
231 	return skb;
232 }
233 
bcmasp_rx_edpkt_dma_rq(struct bcmasp_intf * intf)234 static unsigned long bcmasp_rx_edpkt_dma_rq(struct bcmasp_intf *intf)
235 {
236 	return rx_edpkt_dma_rq(intf, RX_EDPKT_DMA_VALID);
237 }
238 
bcmasp_rx_edpkt_cfg_wq(struct bcmasp_intf * intf,dma_addr_t addr)239 static void bcmasp_rx_edpkt_cfg_wq(struct bcmasp_intf *intf, dma_addr_t addr)
240 {
241 	rx_edpkt_cfg_wq(intf, addr, RX_EDPKT_RING_BUFFER_READ);
242 }
243 
bcmasp_rx_edpkt_dma_wq(struct bcmasp_intf * intf,dma_addr_t addr)244 static void bcmasp_rx_edpkt_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
245 {
246 	rx_edpkt_dma_wq(intf, addr, RX_EDPKT_DMA_READ);
247 }
248 
bcmasp_tx_spb_dma_rq(struct bcmasp_intf * intf)249 static unsigned long bcmasp_tx_spb_dma_rq(struct bcmasp_intf *intf)
250 {
251 	return tx_spb_dma_rq(intf, TX_SPB_DMA_READ);
252 }
253 
bcmasp_tx_spb_dma_wq(struct bcmasp_intf * intf,dma_addr_t addr)254 static void bcmasp_tx_spb_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
255 {
256 	tx_spb_dma_wq(intf, addr, TX_SPB_DMA_VALID);
257 }
258 
259 static const struct bcmasp_intf_ops bcmasp_intf_ops = {
260 	.rx_desc_read = bcmasp_rx_edpkt_dma_rq,
261 	.rx_buffer_write = bcmasp_rx_edpkt_cfg_wq,
262 	.rx_desc_write = bcmasp_rx_edpkt_dma_wq,
263 	.tx_read = bcmasp_tx_spb_dma_rq,
264 	.tx_write = bcmasp_tx_spb_dma_wq,
265 };
266 
bcmasp_xmit(struct sk_buff * skb,struct net_device * dev)267 static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
268 {
269 	struct bcmasp_intf *intf = netdev_priv(dev);
270 	unsigned int total_bytes, size;
271 	int spb_index, nr_frags, i, j;
272 	struct bcmasp_tx_cb *txcb;
273 	dma_addr_t mapping, valid;
274 	struct bcmasp_desc *desc;
275 	bool csum_hw = false;
276 	struct device *kdev;
277 	skb_frag_t *frag;
278 
279 	kdev = &intf->parent->pdev->dev;
280 
281 	nr_frags = skb_shinfo(skb)->nr_frags;
282 
283 	if (tx_spb_ring_full(intf, nr_frags + 1)) {
284 		netif_stop_queue(dev);
285 		if (net_ratelimit())
286 			netdev_err(dev, "Tx Ring Full!\n");
287 		return NETDEV_TX_BUSY;
288 	}
289 
290 	/* Save skb len before adding csum offload header */
291 	total_bytes = skb->len;
292 	skb = bcmasp_csum_offload(dev, skb, &csum_hw);
293 	if (!skb)
294 		return NETDEV_TX_OK;
295 
296 	spb_index = intf->tx_spb_index;
297 	valid = intf->tx_spb_dma_valid;
298 	for (i = 0; i <= nr_frags; i++) {
299 		if (!i) {
300 			size = skb_headlen(skb);
301 			if (!nr_frags && size < (ETH_ZLEN + ETH_FCS_LEN)) {
302 				if (skb_put_padto(skb, ETH_ZLEN + ETH_FCS_LEN))
303 					return NETDEV_TX_OK;
304 				size = skb->len;
305 			}
306 			mapping = dma_map_single(kdev, skb->data, size,
307 						 DMA_TO_DEVICE);
308 		} else {
309 			frag = &skb_shinfo(skb)->frags[i - 1];
310 			size = skb_frag_size(frag);
311 			mapping = skb_frag_dma_map(kdev, frag, 0, size,
312 						   DMA_TO_DEVICE);
313 		}
314 
315 		if (dma_mapping_error(kdev, mapping)) {
316 			intf->mib.tx_dma_failed++;
317 			spb_index = intf->tx_spb_index;
318 			for (j = 0; j < i; j++) {
319 				bcmasp_clean_txcb(intf, spb_index);
320 				spb_index = incr_ring(spb_index,
321 						      DESC_RING_COUNT);
322 			}
323 			/* Rewind so we do not have a hole */
324 			spb_index = intf->tx_spb_index;
325 			return NETDEV_TX_OK;
326 		}
327 
328 		txcb = &intf->tx_cbs[spb_index];
329 		desc = &intf->tx_spb_cpu[spb_index];
330 		memset(desc, 0, sizeof(*desc));
331 		txcb->skb = skb;
332 		txcb->bytes_sent = total_bytes;
333 		dma_unmap_addr_set(txcb, dma_addr, mapping);
334 		dma_unmap_len_set(txcb, dma_len, size);
335 		if (!i) {
336 			desc->flags |= DESC_SOF;
337 			if (csum_hw)
338 				desc->flags |= DESC_EPKT_CMD;
339 		}
340 
341 		if (i == nr_frags) {
342 			desc->flags |= DESC_EOF;
343 			txcb->last = true;
344 		}
345 
346 		desc->buf = mapping;
347 		desc->size = size;
348 		desc->flags |= DESC_INT_EN;
349 
350 		netif_dbg(intf, tx_queued, dev,
351 			  "%s dma_buf=%pad dma_len=0x%x flags=0x%x index=0x%x\n",
352 			  __func__, &mapping, desc->size, desc->flags,
353 			  spb_index);
354 
355 		spb_index = incr_ring(spb_index, DESC_RING_COUNT);
356 		valid = incr_last_byte(valid, intf->tx_spb_dma_addr,
357 				       DESC_RING_COUNT);
358 	}
359 
360 	/* Ensure all descriptors have been written to DRAM for the
361 	 * hardware to see up-to-date contents.
362 	 */
363 	wmb();
364 
365 	intf->tx_spb_index = spb_index;
366 	intf->tx_spb_dma_valid = valid;
367 	bcmasp_intf_tx_write(intf, intf->tx_spb_dma_valid);
368 
369 	if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1))
370 		netif_stop_queue(dev);
371 
372 	return NETDEV_TX_OK;
373 }
374 
bcmasp_netif_start(struct net_device * dev)375 static void bcmasp_netif_start(struct net_device *dev)
376 {
377 	struct bcmasp_intf *intf = netdev_priv(dev);
378 
379 	bcmasp_set_rx_mode(dev);
380 	napi_enable(&intf->tx_napi);
381 	napi_enable(&intf->rx_napi);
382 
383 	bcmasp_enable_rx_irq(intf, 1);
384 	bcmasp_enable_tx_irq(intf, 1);
385 
386 	phy_start(dev->phydev);
387 }
388 
umac_reset(struct bcmasp_intf * intf)389 static void umac_reset(struct bcmasp_intf *intf)
390 {
391 	umac_wl(intf, 0x0, UMC_CMD);
392 	umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD);
393 	usleep_range(10, 100);
394 	/* We hold the umac in reset and bring it out of
395 	 * reset when phy link is up.
396 	 */
397 }
398 
umac_set_hw_addr(struct bcmasp_intf * intf,const unsigned char * addr)399 static void umac_set_hw_addr(struct bcmasp_intf *intf,
400 			     const unsigned char *addr)
401 {
402 	u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
403 		    addr[3];
404 	u32 mac1 = (addr[4] << 8) | addr[5];
405 
406 	umac_wl(intf, mac0, UMC_MAC0);
407 	umac_wl(intf, mac1, UMC_MAC1);
408 }
409 
umac_enable_set(struct bcmasp_intf * intf,u32 mask,unsigned int enable)410 static void umac_enable_set(struct bcmasp_intf *intf, u32 mask,
411 			    unsigned int enable)
412 {
413 	u32 reg;
414 
415 	reg = umac_rl(intf, UMC_CMD);
416 	if (reg & UMC_CMD_SW_RESET)
417 		return;
418 	if (enable)
419 		reg |= mask;
420 	else
421 		reg &= ~mask;
422 	umac_wl(intf, reg, UMC_CMD);
423 
424 	/* UniMAC stops on a packet boundary, wait for a full-sized packet
425 	 * to be processed (1 msec).
426 	 */
427 	if (enable == 0)
428 		usleep_range(1000, 2000);
429 }
430 
umac_init(struct bcmasp_intf * intf)431 static void umac_init(struct bcmasp_intf *intf)
432 {
433 	umac_wl(intf, 0x800, UMC_FRM_LEN);
434 	umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL);
435 	umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
436 }
437 
bcmasp_tx_reclaim(struct bcmasp_intf * intf)438 static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
439 {
440 	struct bcmasp_intf_stats64 *stats = &intf->stats64;
441 	struct device *kdev = &intf->parent->pdev->dev;
442 	unsigned long read, released = 0;
443 	struct bcmasp_tx_cb *txcb;
444 	struct bcmasp_desc *desc;
445 	dma_addr_t mapping;
446 
447 	read = bcmasp_intf_tx_read(intf);
448 	while (intf->tx_spb_dma_read != read) {
449 		txcb = &intf->tx_cbs[intf->tx_spb_clean_index];
450 		mapping = dma_unmap_addr(txcb, dma_addr);
451 
452 		dma_unmap_single(kdev, mapping,
453 				 dma_unmap_len(txcb, dma_len),
454 				 DMA_TO_DEVICE);
455 
456 		if (txcb->last) {
457 			dev_consume_skb_any(txcb->skb);
458 
459 			u64_stats_update_begin(&stats->syncp);
460 			u64_stats_inc(&stats->tx_packets);
461 			u64_stats_add(&stats->tx_bytes, txcb->bytes_sent);
462 			u64_stats_update_end(&stats->syncp);
463 		}
464 
465 		desc = &intf->tx_spb_cpu[intf->tx_spb_clean_index];
466 
467 		netif_dbg(intf, tx_done, intf->ndev,
468 			  "%s dma_buf=%pad dma_len=0x%x flags=0x%x c_index=0x%x\n",
469 			  __func__, &mapping, desc->size, desc->flags,
470 			  intf->tx_spb_clean_index);
471 
472 		bcmasp_clean_txcb(intf, intf->tx_spb_clean_index);
473 		released++;
474 
475 		intf->tx_spb_clean_index = incr_ring(intf->tx_spb_clean_index,
476 						     DESC_RING_COUNT);
477 		intf->tx_spb_dma_read = incr_first_byte(intf->tx_spb_dma_read,
478 							intf->tx_spb_dma_addr,
479 							DESC_RING_COUNT);
480 	}
481 
482 	return released;
483 }
484 
bcmasp_tx_poll(struct napi_struct * napi,int budget)485 static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
486 {
487 	struct bcmasp_intf *intf =
488 		container_of(napi, struct bcmasp_intf, tx_napi);
489 	int released = 0;
490 
491 	released = bcmasp_tx_reclaim(intf);
492 
493 	napi_complete(&intf->tx_napi);
494 
495 	bcmasp_enable_tx_irq(intf, 1);
496 
497 	if (released)
498 		netif_wake_queue(intf->ndev);
499 
500 	return 0;
501 }
502 
bcmasp_rx_poll(struct napi_struct * napi,int budget)503 static int bcmasp_rx_poll(struct napi_struct *napi, int budget)
504 {
505 	struct bcmasp_intf *intf =
506 		container_of(napi, struct bcmasp_intf, rx_napi);
507 	struct bcmasp_intf_stats64 *stats = &intf->stats64;
508 	struct device *kdev = &intf->parent->pdev->dev;
509 	unsigned long processed = 0;
510 	struct bcmasp_desc *desc;
511 	struct sk_buff *skb;
512 	dma_addr_t valid;
513 	void *data;
514 	u64 flags;
515 	u32 len;
516 
517 	valid = bcmasp_intf_rx_desc_read(intf) + 1;
518 	if (valid == intf->rx_edpkt_dma_addr + DESC_RING_SIZE)
519 		valid = intf->rx_edpkt_dma_addr;
520 
521 	while ((processed < budget) && (valid != intf->rx_edpkt_dma_read)) {
522 		desc = &intf->rx_edpkt_cpu[intf->rx_edpkt_index];
523 
524 		/* Ensure that descriptor has been fully written to DRAM by
525 		 * hardware before reading by the CPU
526 		 */
527 		rmb();
528 
529 		/* Calculate virt addr by offsetting from physical addr */
530 		data = intf->rx_ring_cpu +
531 			(DESC_ADDR(desc->buf) - intf->rx_ring_dma);
532 
533 		flags = DESC_FLAGS(desc->buf);
534 		if (unlikely(flags & (DESC_CRC_ERR | DESC_RX_SYM_ERR))) {
535 			if (net_ratelimit()) {
536 				netif_err(intf, rx_status, intf->ndev,
537 					  "flags=0x%llx\n", flags);
538 			}
539 
540 			u64_stats_update_begin(&stats->syncp);
541 			if (flags & DESC_CRC_ERR)
542 				u64_stats_inc(&stats->rx_crc_errs);
543 			if (flags & DESC_RX_SYM_ERR)
544 				u64_stats_inc(&stats->rx_sym_errs);
545 			u64_stats_update_end(&stats->syncp);
546 
547 			goto next;
548 		}
549 
550 		dma_sync_single_for_cpu(kdev, DESC_ADDR(desc->buf), desc->size,
551 					DMA_FROM_DEVICE);
552 
553 		len = desc->size;
554 
555 		skb = napi_alloc_skb(napi, len);
556 		if (!skb) {
557 			u64_stats_update_begin(&stats->syncp);
558 			u64_stats_inc(&stats->rx_dropped);
559 			u64_stats_update_end(&stats->syncp);
560 			intf->mib.alloc_rx_skb_failed++;
561 
562 			goto next;
563 		}
564 
565 		skb_put(skb, len);
566 		memcpy(skb->data, data, len);
567 
568 		skb_pull(skb, 2);
569 		len -= 2;
570 		if (likely(intf->crc_fwd)) {
571 			skb_trim(skb, len - ETH_FCS_LEN);
572 			len -= ETH_FCS_LEN;
573 		}
574 
575 		if ((intf->ndev->features & NETIF_F_RXCSUM) &&
576 		    (desc->buf & DESC_CHKSUM))
577 			skb->ip_summed = CHECKSUM_UNNECESSARY;
578 
579 		skb->protocol = eth_type_trans(skb, intf->ndev);
580 
581 		napi_gro_receive(napi, skb);
582 
583 		u64_stats_update_begin(&stats->syncp);
584 		u64_stats_inc(&stats->rx_packets);
585 		u64_stats_add(&stats->rx_bytes, len);
586 		u64_stats_update_end(&stats->syncp);
587 
588 next:
589 		bcmasp_intf_rx_buffer_write(intf, (DESC_ADDR(desc->buf) +
590 					    desc->size));
591 
592 		processed++;
593 		intf->rx_edpkt_dma_read =
594 			incr_first_byte(intf->rx_edpkt_dma_read,
595 					intf->rx_edpkt_dma_addr,
596 					DESC_RING_COUNT);
597 		intf->rx_edpkt_index = incr_ring(intf->rx_edpkt_index,
598 						 DESC_RING_COUNT);
599 	}
600 
601 	bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read);
602 
603 	if (processed < budget) {
604 		napi_complete_done(&intf->rx_napi, processed);
605 		bcmasp_enable_rx_irq(intf, 1);
606 	}
607 
608 	return processed;
609 }
610 
bcmasp_adj_link(struct net_device * dev)611 static void bcmasp_adj_link(struct net_device *dev)
612 {
613 	struct bcmasp_intf *intf = netdev_priv(dev);
614 	struct phy_device *phydev = dev->phydev;
615 	u32 cmd_bits = 0, reg;
616 	int changed = 0;
617 
618 	if (intf->old_link != phydev->link) {
619 		changed = 1;
620 		intf->old_link = phydev->link;
621 	}
622 
623 	if (intf->old_duplex != phydev->duplex) {
624 		changed = 1;
625 		intf->old_duplex = phydev->duplex;
626 	}
627 
628 	switch (phydev->speed) {
629 	case SPEED_2500:
630 		cmd_bits = UMC_CMD_SPEED_2500;
631 		break;
632 	case SPEED_1000:
633 		cmd_bits = UMC_CMD_SPEED_1000;
634 		break;
635 	case SPEED_100:
636 		cmd_bits = UMC_CMD_SPEED_100;
637 		break;
638 	case SPEED_10:
639 		cmd_bits = UMC_CMD_SPEED_10;
640 		break;
641 	default:
642 		break;
643 	}
644 	cmd_bits <<= UMC_CMD_SPEED_SHIFT;
645 
646 	if (phydev->duplex == DUPLEX_HALF)
647 		cmd_bits |= UMC_CMD_HD_EN;
648 
649 	if (intf->old_pause != phydev->pause) {
650 		changed = 1;
651 		intf->old_pause = phydev->pause;
652 	}
653 
654 	if (!phydev->pause)
655 		cmd_bits |= UMC_CMD_RX_PAUSE_IGNORE | UMC_CMD_TX_PAUSE_IGNORE;
656 
657 	if (!changed)
658 		return;
659 
660 	if (phydev->link) {
661 		reg = umac_rl(intf, UMC_CMD);
662 		reg &= ~((UMC_CMD_SPEED_MASK << UMC_CMD_SPEED_SHIFT) |
663 			UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE |
664 			UMC_CMD_TX_PAUSE_IGNORE);
665 		reg |= cmd_bits;
666 		if (reg & UMC_CMD_SW_RESET) {
667 			reg &= ~UMC_CMD_SW_RESET;
668 			umac_wl(intf, reg, UMC_CMD);
669 			udelay(2);
670 			reg |= UMC_CMD_TX_EN | UMC_CMD_RX_EN | UMC_CMD_PROMISC;
671 		}
672 		umac_wl(intf, reg, UMC_CMD);
673 
674 		intf->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
675 		bcmasp_eee_enable_set(intf, intf->eee.eee_active);
676 	}
677 
678 	reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
679 	if (phydev->link)
680 		reg |= RGMII_LINK;
681 	else
682 		reg &= ~RGMII_LINK;
683 	rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
684 
685 	if (changed)
686 		phy_print_status(phydev);
687 }
688 
bcmasp_init_rx(struct bcmasp_intf * intf)689 static int bcmasp_init_rx(struct bcmasp_intf *intf)
690 {
691 	struct device *kdev = &intf->parent->pdev->dev;
692 	struct page *buffer_pg;
693 	dma_addr_t dma;
694 	void *p;
695 	u32 reg;
696 	int ret;
697 
698 	intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
699 	buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
700 
701 	dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
702 			   DMA_FROM_DEVICE);
703 	if (dma_mapping_error(kdev, dma)) {
704 		__free_pages(buffer_pg, intf->rx_buf_order);
705 		return -ENOMEM;
706 	}
707 	intf->rx_ring_cpu = page_to_virt(buffer_pg);
708 	intf->rx_ring_dma = dma;
709 	intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
710 
711 	p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->rx_edpkt_dma_addr,
712 			       GFP_KERNEL);
713 	if (!p) {
714 		ret = -ENOMEM;
715 		goto free_rx_ring;
716 	}
717 	intf->rx_edpkt_cpu = p;
718 
719 	netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
720 
721 	intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr;
722 	intf->rx_edpkt_index = 0;
723 
724 	/* Make sure channels are disabled */
725 	rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE);
726 
727 	/* Rx SPB */
728 	rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_READ);
729 	rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_WRITE);
730 	rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_BASE);
731 	rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid,
732 			RX_EDPKT_RING_BUFFER_END);
733 	rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid,
734 			RX_EDPKT_RING_BUFFER_VALID);
735 
736 	/* EDPKT */
737 	rx_edpkt_cfg_wl(intf, (RX_EDPKT_CFG_CFG0_RBUF_4K <<
738 			RX_EDPKT_CFG_CFG0_DBUF_SHIFT) |
739 		       (RX_EDPKT_CFG_CFG0_64_ALN <<
740 			RX_EDPKT_CFG_CFG0_BALN_SHIFT) |
741 		       (RX_EDPKT_CFG_CFG0_EFRM_STUF),
742 			RX_EDPKT_CFG_CFG0);
743 	rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE);
744 	rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ);
745 	rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE);
746 	rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
747 			RX_EDPKT_DMA_END);
748 	rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
749 			RX_EDPKT_DMA_VALID);
750 
751 	reg = UMAC2FB_CFG_DEFAULT_EN |
752 	      ((intf->channel + 11) << UMAC2FB_CFG_CHID_SHIFT);
753 	reg |= (0xd << UMAC2FB_CFG_OK_SEND_SHIFT);
754 	umac2fb_wl(intf, reg, UMAC2FB_CFG);
755 
756 	return 0;
757 
758 free_rx_ring:
759 	dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
760 		       DMA_FROM_DEVICE);
761 	__free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
762 
763 	return ret;
764 }
765 
bcmasp_reclaim_free_all_rx(struct bcmasp_intf * intf)766 static void bcmasp_reclaim_free_all_rx(struct bcmasp_intf *intf)
767 {
768 	struct device *kdev = &intf->parent->pdev->dev;
769 
770 	dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
771 			  intf->rx_edpkt_dma_addr);
772 	dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
773 		       DMA_FROM_DEVICE);
774 	__free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
775 }
776 
bcmasp_init_tx(struct bcmasp_intf * intf)777 static int bcmasp_init_tx(struct bcmasp_intf *intf)
778 {
779 	struct device *kdev = &intf->parent->pdev->dev;
780 	void *p;
781 	int ret;
782 
783 	p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->tx_spb_dma_addr,
784 			       GFP_KERNEL);
785 	if (!p)
786 		return -ENOMEM;
787 
788 	intf->tx_spb_cpu = p;
789 	intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1;
790 	intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
791 
792 	intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
793 			       GFP_KERNEL);
794 	if (!intf->tx_cbs) {
795 		ret = -ENOMEM;
796 		goto free_tx_spb;
797 	}
798 
799 	intf->tx_spb_index = 0;
800 	intf->tx_spb_clean_index = 0;
801 	memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT);
802 
803 	netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
804 
805 	/* Make sure channels are disabled */
806 	tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
807 	tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
808 
809 	/* Tx SPB */
810 	tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT),
811 		       TX_SPB_CTRL_XF_CTRL2);
812 	tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
813 	tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT);
814 	tx_spb_top_wl(intf, 0x0, TX_SPB_TOP_SPRE_BW_CTRL);
815 
816 	tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ);
817 	tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
818 	tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END);
819 	tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID);
820 
821 	return 0;
822 
823 free_tx_spb:
824 	dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
825 			  intf->tx_spb_dma_addr);
826 
827 	return ret;
828 }
829 
bcmasp_reclaim_free_all_tx(struct bcmasp_intf * intf)830 static void bcmasp_reclaim_free_all_tx(struct bcmasp_intf *intf)
831 {
832 	struct device *kdev = &intf->parent->pdev->dev;
833 
834 	/* Free descriptors */
835 	dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
836 			  intf->tx_spb_dma_addr);
837 
838 	/* Free cbs */
839 	kfree(intf->tx_cbs);
840 }
841 
bcmasp_ephy_enable_set(struct bcmasp_intf * intf,bool enable)842 static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable)
843 {
844 	u32 mask = RGMII_EPHY_CFG_IDDQ_BIAS | RGMII_EPHY_CFG_EXT_PWRDOWN |
845 		   RGMII_EPHY_CFG_IDDQ_GLOBAL;
846 	u32 reg;
847 
848 	reg = rgmii_rl(intf, RGMII_EPHY_CNTRL);
849 	if (enable) {
850 		reg &= ~RGMII_EPHY_CK25_DIS;
851 		rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
852 		mdelay(1);
853 
854 		reg &= ~mask;
855 		reg |= RGMII_EPHY_RESET;
856 		rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
857 		mdelay(1);
858 
859 		reg &= ~RGMII_EPHY_RESET;
860 	} else {
861 		reg |= mask | RGMII_EPHY_RESET;
862 		rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
863 		mdelay(1);
864 		reg |= RGMII_EPHY_CK25_DIS;
865 	}
866 	rgmii_wl(intf, reg, RGMII_EPHY_CNTRL);
867 	mdelay(1);
868 
869 	/* Set or clear the LED control override to avoid lighting up LEDs
870 	 * while the EPHY is powered off and drawing unnecessary current.
871 	 */
872 	reg = rgmii_rl(intf, RGMII_SYS_LED_CNTRL);
873 	if (enable)
874 		reg &= ~RGMII_SYS_LED_CNTRL_LINK_OVRD;
875 	else
876 		reg |= RGMII_SYS_LED_CNTRL_LINK_OVRD;
877 	rgmii_wl(intf, reg, RGMII_SYS_LED_CNTRL);
878 }
879 
bcmasp_rgmii_mode_en_set(struct bcmasp_intf * intf,bool enable)880 static void bcmasp_rgmii_mode_en_set(struct bcmasp_intf *intf, bool enable)
881 {
882 	u32 reg;
883 
884 	reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
885 	reg &= ~RGMII_OOB_DIS;
886 	if (enable)
887 		reg |= RGMII_MODE_EN;
888 	else
889 		reg &= ~RGMII_MODE_EN;
890 	rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
891 }
892 
bcmasp_netif_deinit(struct net_device * dev)893 static void bcmasp_netif_deinit(struct net_device *dev)
894 {
895 	struct bcmasp_intf *intf = netdev_priv(dev);
896 	u32 reg, timeout = 1000;
897 
898 	napi_disable(&intf->tx_napi);
899 
900 	bcmasp_enable_tx(intf, 0);
901 
902 	/* Flush any TX packets in the pipe */
903 	tx_spb_dma_wl(intf, TX_SPB_DMA_FIFO_FLUSH, TX_SPB_DMA_FIFO_CTRL);
904 	do {
905 		reg = tx_spb_dma_rl(intf, TX_SPB_DMA_FIFO_STATUS);
906 		if (!(reg & TX_SPB_DMA_FIFO_FLUSH))
907 			break;
908 		usleep_range(1000, 2000);
909 	} while (timeout-- > 0);
910 	tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
911 
912 	bcmasp_tx_reclaim(intf);
913 
914 	umac_enable_set(intf, UMC_CMD_TX_EN, 0);
915 
916 	phy_stop(dev->phydev);
917 
918 	umac_enable_set(intf, UMC_CMD_RX_EN, 0);
919 
920 	bcmasp_flush_rx_port(intf);
921 	usleep_range(1000, 2000);
922 	bcmasp_enable_rx(intf, 0);
923 
924 	napi_disable(&intf->rx_napi);
925 
926 	/* Disable interrupts */
927 	bcmasp_enable_tx_irq(intf, 0);
928 	bcmasp_enable_rx_irq(intf, 0);
929 
930 	netif_napi_del(&intf->tx_napi);
931 	bcmasp_reclaim_free_all_tx(intf);
932 
933 	netif_napi_del(&intf->rx_napi);
934 	bcmasp_reclaim_free_all_rx(intf);
935 }
936 
bcmasp_stop(struct net_device * dev)937 static int bcmasp_stop(struct net_device *dev)
938 {
939 	struct bcmasp_intf *intf = netdev_priv(dev);
940 
941 	netif_dbg(intf, ifdown, dev, "bcmasp stop\n");
942 
943 	/* Stop tx from updating HW */
944 	netif_tx_disable(dev);
945 
946 	bcmasp_netif_deinit(dev);
947 
948 	phy_disconnect(dev->phydev);
949 
950 	/* Disable internal EPHY or external PHY */
951 	if (intf->internal_phy)
952 		bcmasp_ephy_enable_set(intf, false);
953 	else
954 		bcmasp_rgmii_mode_en_set(intf, false);
955 
956 	/* Disable the interface clocks */
957 	bcmasp_core_clock_set_intf(intf, false);
958 
959 	clk_disable_unprepare(intf->parent->clk);
960 
961 	return 0;
962 }
963 
bcmasp_configure_port(struct bcmasp_intf * intf)964 static void bcmasp_configure_port(struct bcmasp_intf *intf)
965 {
966 	u32 reg, id_mode_dis = 0;
967 
968 	reg = rgmii_rl(intf, RGMII_PORT_CNTRL);
969 	reg &= ~RGMII_PORT_MODE_MASK;
970 
971 	switch (intf->phy_interface) {
972 	case PHY_INTERFACE_MODE_RGMII:
973 		/* RGMII_NO_ID: TXC transitions at the same time as TXD
974 		 *		(requires PCB or receiver-side delay)
975 		 * RGMII:	Add 2ns delay on TXC (90 degree shift)
976 		 *
977 		 * ID is implicitly disabled for 100Mbps (RG)MII operation.
978 		 */
979 		id_mode_dis = RGMII_ID_MODE_DIS;
980 		fallthrough;
981 	case PHY_INTERFACE_MODE_RGMII_TXID:
982 		reg |= RGMII_PORT_MODE_EXT_GPHY;
983 		break;
984 	case PHY_INTERFACE_MODE_MII:
985 		reg |= RGMII_PORT_MODE_EXT_EPHY;
986 		break;
987 	default:
988 		break;
989 	}
990 
991 	if (intf->internal_phy)
992 		reg |= RGMII_PORT_MODE_EPHY;
993 
994 	rgmii_wl(intf, reg, RGMII_PORT_CNTRL);
995 
996 	reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
997 	reg &= ~RGMII_ID_MODE_DIS;
998 	reg |= id_mode_dis;
999 	rgmii_wl(intf, reg, RGMII_OOB_CNTRL);
1000 }
1001 
bcmasp_netif_init(struct net_device * dev,bool phy_connect)1002 static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
1003 {
1004 	struct bcmasp_intf *intf = netdev_priv(dev);
1005 	phy_interface_t phy_iface = intf->phy_interface;
1006 	u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE |
1007 			PHY_BRCM_DIS_TXCRXC_NOENRGY |
1008 			PHY_BRCM_IDDQ_SUSPEND;
1009 	struct phy_device *phydev = NULL;
1010 	int ret;
1011 
1012 	/* Always enable interface clocks */
1013 	bcmasp_core_clock_set_intf(intf, true);
1014 
1015 	/* Enable internal PHY or external PHY before any MAC activity */
1016 	if (intf->internal_phy)
1017 		bcmasp_ephy_enable_set(intf, true);
1018 	else
1019 		bcmasp_rgmii_mode_en_set(intf, true);
1020 	bcmasp_configure_port(intf);
1021 
1022 	/* This is an ugly quirk but we have not been correctly
1023 	 * interpreting the phy_interface values and we have done that
1024 	 * across different drivers, so at least we are consistent in
1025 	 * our mistakes.
1026 	 *
1027 	 * When the Generic PHY driver is in use either the PHY has
1028 	 * been strapped or programmed correctly by the boot loader so
1029 	 * we should stick to our incorrect interpretation since we
1030 	 * have validated it.
1031 	 *
1032 	 * Now when a dedicated PHY driver is in use, we need to
1033 	 * reverse the meaning of the phy_interface_mode values to
1034 	 * something that the PHY driver will interpret and act on such
1035 	 * that we have two mistakes canceling themselves so to speak.
1036 	 * We only do this for the two modes that GENET driver
1037 	 * officially supports on Broadcom STB chips:
1038 	 * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID.
1039 	 * Other modes are not *officially* supported with the boot
1040 	 * loader and the scripted environment generating Device Tree
1041 	 * blobs for those platforms.
1042 	 *
1043 	 * Note that internal PHY and fixed-link configurations are not
1044 	 * affected because they use different phy_interface_t values
1045 	 * or the Generic PHY driver.
1046 	 */
1047 	switch (phy_iface) {
1048 	case PHY_INTERFACE_MODE_RGMII:
1049 		phy_iface = PHY_INTERFACE_MODE_RGMII_ID;
1050 		break;
1051 	case PHY_INTERFACE_MODE_RGMII_TXID:
1052 		phy_iface = PHY_INTERFACE_MODE_RGMII_RXID;
1053 		break;
1054 	default:
1055 		break;
1056 	}
1057 
1058 	if (phy_connect) {
1059 		phydev = of_phy_connect(dev, intf->phy_dn,
1060 					bcmasp_adj_link, phy_flags,
1061 					phy_iface);
1062 		if (!phydev) {
1063 			ret = -ENODEV;
1064 			netdev_err(dev, "could not attach to PHY\n");
1065 			goto err_phy_disable;
1066 		}
1067 
1068 		/* Indicate that the MAC is responsible for PHY PM */
1069 		phydev->mac_managed_pm = true;
1070 	} else if (!intf->wolopts) {
1071 		ret = phy_resume(dev->phydev);
1072 		if (ret)
1073 			goto err_phy_disable;
1074 	}
1075 
1076 	umac_reset(intf);
1077 
1078 	umac_init(intf);
1079 
1080 	umac_set_hw_addr(intf, dev->dev_addr);
1081 
1082 	intf->old_duplex = -1;
1083 	intf->old_link = -1;
1084 	intf->old_pause = -1;
1085 
1086 	ret = bcmasp_init_tx(intf);
1087 	if (ret)
1088 		goto err_phy_disconnect;
1089 
1090 	/* Turn on asp */
1091 	bcmasp_enable_tx(intf, 1);
1092 
1093 	ret = bcmasp_init_rx(intf);
1094 	if (ret)
1095 		goto err_reclaim_tx;
1096 
1097 	bcmasp_enable_rx(intf, 1);
1098 
1099 	intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD);
1100 
1101 	bcmasp_netif_start(dev);
1102 
1103 	netif_start_queue(dev);
1104 
1105 	return 0;
1106 
1107 err_reclaim_tx:
1108 	bcmasp_reclaim_free_all_tx(intf);
1109 err_phy_disconnect:
1110 	if (phydev)
1111 		phy_disconnect(phydev);
1112 err_phy_disable:
1113 	if (intf->internal_phy)
1114 		bcmasp_ephy_enable_set(intf, false);
1115 	else
1116 		bcmasp_rgmii_mode_en_set(intf, false);
1117 	return ret;
1118 }
1119 
bcmasp_open(struct net_device * dev)1120 static int bcmasp_open(struct net_device *dev)
1121 {
1122 	struct bcmasp_intf *intf = netdev_priv(dev);
1123 	int ret;
1124 
1125 	netif_dbg(intf, ifup, dev, "bcmasp open\n");
1126 
1127 	ret = clk_prepare_enable(intf->parent->clk);
1128 	if (ret)
1129 		return ret;
1130 
1131 	ret = bcmasp_netif_init(dev, true);
1132 	if (ret)
1133 		clk_disable_unprepare(intf->parent->clk);
1134 
1135 	return ret;
1136 }
1137 
bcmasp_tx_timeout(struct net_device * dev,unsigned int txqueue)1138 static void bcmasp_tx_timeout(struct net_device *dev, unsigned int txqueue)
1139 {
1140 	struct bcmasp_intf *intf = netdev_priv(dev);
1141 
1142 	netif_dbg(intf, tx_err, dev, "transmit timeout!\n");
1143 	intf->mib.tx_timeout_cnt++;
1144 }
1145 
bcmasp_get_phys_port_name(struct net_device * dev,char * name,size_t len)1146 static int bcmasp_get_phys_port_name(struct net_device *dev,
1147 				     char *name, size_t len)
1148 {
1149 	struct bcmasp_intf *intf = netdev_priv(dev);
1150 
1151 	if (snprintf(name, len, "p%d", intf->port) >= len)
1152 		return -EINVAL;
1153 
1154 	return 0;
1155 }
1156 
bcmasp_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)1157 static void bcmasp_get_stats64(struct net_device *dev,
1158 			       struct rtnl_link_stats64 *stats)
1159 {
1160 	struct bcmasp_intf *intf = netdev_priv(dev);
1161 	struct bcmasp_intf_stats64 *lstats;
1162 	unsigned int start;
1163 
1164 	lstats = &intf->stats64;
1165 
1166 	do {
1167 		start = u64_stats_fetch_begin(&lstats->syncp);
1168 		stats->rx_packets = u64_stats_read(&lstats->rx_packets);
1169 		stats->rx_bytes = u64_stats_read(&lstats->rx_bytes);
1170 		stats->rx_dropped = u64_stats_read(&lstats->rx_dropped);
1171 		stats->rx_crc_errors = u64_stats_read(&lstats->rx_crc_errs);
1172 		stats->rx_frame_errors = u64_stats_read(&lstats->rx_sym_errs);
1173 		stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1174 
1175 		stats->tx_packets = u64_stats_read(&lstats->tx_packets);
1176 		stats->tx_bytes = u64_stats_read(&lstats->tx_bytes);
1177 	} while (u64_stats_fetch_retry(&lstats->syncp, start));
1178 }
1179 
1180 static const struct net_device_ops bcmasp_netdev_ops = {
1181 	.ndo_open		= bcmasp_open,
1182 	.ndo_stop		= bcmasp_stop,
1183 	.ndo_start_xmit		= bcmasp_xmit,
1184 	.ndo_tx_timeout		= bcmasp_tx_timeout,
1185 	.ndo_set_rx_mode	= bcmasp_set_rx_mode,
1186 	.ndo_get_phys_port_name	= bcmasp_get_phys_port_name,
1187 	.ndo_eth_ioctl		= phy_do_ioctl_running,
1188 	.ndo_set_mac_address	= eth_mac_addr,
1189 	.ndo_get_stats64	= bcmasp_get_stats64,
1190 };
1191 
bcmasp_map_res(struct bcmasp_priv * priv,struct bcmasp_intf * intf)1192 static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
1193 {
1194 	/* Per port */
1195 	intf->res.umac = priv->base + UMC_OFFSET(intf);
1196 	intf->res.umac2fb = priv->base + (priv->hw_info->umac2fb +
1197 					  (intf->port * 0x4));
1198 	intf->res.rgmii = priv->base + RGMII_OFFSET(intf);
1199 
1200 	/* Per ch */
1201 	intf->tx_spb_dma = priv->base + TX_SPB_DMA_OFFSET(intf);
1202 	intf->res.tx_spb_ctrl = priv->base + TX_SPB_CTRL_OFFSET(intf);
1203 	intf->res.tx_spb_top = priv->base + TX_SPB_TOP_OFFSET(intf);
1204 	intf->res.tx_epkt_core = priv->base + TX_EPKT_C_OFFSET(intf);
1205 	intf->res.tx_pause_ctrl = priv->base + TX_PAUSE_CTRL_OFFSET(intf);
1206 
1207 	intf->rx_edpkt_dma = priv->base + RX_EDPKT_DMA_OFFSET(intf);
1208 	intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf);
1209 }
1210 
1211 #define MAX_IRQ_STR_LEN		64
bcmasp_interface_create(struct bcmasp_priv * priv,struct device_node * ndev_dn,int i)1212 struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
1213 					    struct device_node *ndev_dn, int i)
1214 {
1215 	struct device *dev = &priv->pdev->dev;
1216 	struct bcmasp_intf *intf;
1217 	struct net_device *ndev;
1218 	int ch, port, ret;
1219 
1220 	if (of_property_read_u32(ndev_dn, "reg", &port)) {
1221 		dev_warn(dev, "%s: invalid port number\n", ndev_dn->name);
1222 		goto err;
1223 	}
1224 
1225 	if (of_property_read_u32(ndev_dn, "brcm,channel", &ch)) {
1226 		dev_warn(dev, "%s: invalid ch number\n", ndev_dn->name);
1227 		goto err;
1228 	}
1229 
1230 	ndev = alloc_etherdev(sizeof(struct bcmasp_intf));
1231 	if (!ndev) {
1232 		dev_warn(dev, "%s: unable to alloc ndev\n", ndev_dn->name);
1233 		goto err;
1234 	}
1235 	intf = netdev_priv(ndev);
1236 
1237 	intf->parent = priv;
1238 	intf->ndev = ndev;
1239 	intf->channel = ch;
1240 	intf->port = port;
1241 	intf->ndev_dn = ndev_dn;
1242 	intf->index = i;
1243 
1244 	ret = of_get_phy_mode(ndev_dn, &intf->phy_interface);
1245 	if (ret < 0) {
1246 		dev_err(dev, "invalid PHY mode property\n");
1247 		goto err_free_netdev;
1248 	}
1249 
1250 	if (intf->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
1251 		intf->internal_phy = true;
1252 
1253 	intf->phy_dn = of_parse_phandle(ndev_dn, "phy-handle", 0);
1254 	if (!intf->phy_dn && of_phy_is_fixed_link(ndev_dn)) {
1255 		ret = of_phy_register_fixed_link(ndev_dn);
1256 		if (ret) {
1257 			dev_warn(dev, "%s: failed to register fixed PHY\n",
1258 				 ndev_dn->name);
1259 			goto err_free_netdev;
1260 		}
1261 		intf->phy_dn = ndev_dn;
1262 	}
1263 
1264 	/* Map resource */
1265 	bcmasp_map_res(priv, intf);
1266 
1267 	if ((!phy_interface_mode_is_rgmii(intf->phy_interface) &&
1268 	     intf->phy_interface != PHY_INTERFACE_MODE_MII &&
1269 	     intf->phy_interface != PHY_INTERFACE_MODE_INTERNAL) ||
1270 	    (intf->port != 1 && intf->internal_phy)) {
1271 		netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n",
1272 			   phy_modes(intf->phy_interface), intf->port);
1273 		ret = -EINVAL;
1274 		goto err_free_netdev;
1275 	}
1276 
1277 	ret = of_get_ethdev_address(ndev_dn, ndev);
1278 	if (ret) {
1279 		netdev_warn(ndev, "using random Ethernet MAC\n");
1280 		eth_hw_addr_random(ndev);
1281 	}
1282 
1283 	SET_NETDEV_DEV(ndev, dev);
1284 	intf->ops = &bcmasp_intf_ops;
1285 	ndev->netdev_ops = &bcmasp_netdev_ops;
1286 	ndev->ethtool_ops = &bcmasp_ethtool_ops;
1287 	intf->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV |
1288 					  NETIF_MSG_PROBE |
1289 					  NETIF_MSG_LINK);
1290 	ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
1291 			  NETIF_F_RXCSUM;
1292 	ndev->hw_features |= ndev->features;
1293 	ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload);
1294 
1295 	return intf;
1296 
1297 err_free_netdev:
1298 	free_netdev(ndev);
1299 err:
1300 	return NULL;
1301 }
1302 
bcmasp_interface_destroy(struct bcmasp_intf * intf)1303 void bcmasp_interface_destroy(struct bcmasp_intf *intf)
1304 {
1305 	if (intf->ndev->reg_state == NETREG_REGISTERED)
1306 		unregister_netdev(intf->ndev);
1307 	if (of_phy_is_fixed_link(intf->ndev_dn))
1308 		of_phy_deregister_fixed_link(intf->ndev_dn);
1309 	free_netdev(intf->ndev);
1310 }
1311 
bcmasp_suspend_to_wol(struct bcmasp_intf * intf)1312 static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
1313 {
1314 	struct net_device *ndev = intf->ndev;
1315 	u32 reg;
1316 
1317 	reg = umac_rl(intf, UMC_MPD_CTRL);
1318 	if (intf->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
1319 		reg |= UMC_MPD_CTRL_MPD_EN;
1320 	reg &= ~UMC_MPD_CTRL_PSW_EN;
1321 	if (intf->wolopts & WAKE_MAGICSECURE) {
1322 		/* Program the SecureOn password */
1323 		umac_wl(intf, get_unaligned_be16(&intf->sopass[0]),
1324 			UMC_PSW_MS);
1325 		umac_wl(intf, get_unaligned_be32(&intf->sopass[2]),
1326 			UMC_PSW_LS);
1327 		reg |= UMC_MPD_CTRL_PSW_EN;
1328 	}
1329 	umac_wl(intf, reg, UMC_MPD_CTRL);
1330 
1331 	if (intf->wolopts & WAKE_FILTER)
1332 		bcmasp_netfilt_suspend(intf);
1333 
1334 	/* Bring UniMAC out of reset if needed and enable RX */
1335 	reg = umac_rl(intf, UMC_CMD);
1336 	if (reg & UMC_CMD_SW_RESET)
1337 		reg &= ~UMC_CMD_SW_RESET;
1338 
1339 	reg |= UMC_CMD_RX_EN | UMC_CMD_PROMISC;
1340 	umac_wl(intf, reg, UMC_CMD);
1341 
1342 	umac_enable_set(intf, UMC_CMD_RX_EN, 1);
1343 
1344 	if (intf->parent->wol_irq > 0) {
1345 		wakeup_intr2_core_wl(intf->parent, 0xffffffff,
1346 				     ASP_WAKEUP_INTR2_MASK_CLEAR);
1347 	}
1348 
1349 	netif_dbg(intf, wol, ndev, "entered WOL mode\n");
1350 }
1351 
bcmasp_interface_suspend(struct bcmasp_intf * intf)1352 int bcmasp_interface_suspend(struct bcmasp_intf *intf)
1353 {
1354 	struct device *kdev = &intf->parent->pdev->dev;
1355 	struct net_device *dev = intf->ndev;
1356 	int ret = 0;
1357 
1358 	if (!netif_running(dev))
1359 		return 0;
1360 
1361 	netif_device_detach(dev);
1362 
1363 	bcmasp_netif_deinit(dev);
1364 
1365 	if (!intf->wolopts) {
1366 		ret = phy_suspend(dev->phydev);
1367 		if (ret)
1368 			goto out;
1369 
1370 		if (intf->internal_phy)
1371 			bcmasp_ephy_enable_set(intf, false);
1372 		else
1373 			bcmasp_rgmii_mode_en_set(intf, false);
1374 
1375 		/* If Wake-on-LAN is disabled, we can safely
1376 		 * disable the network interface clocks.
1377 		 */
1378 		bcmasp_core_clock_set_intf(intf, false);
1379 	}
1380 
1381 	if (device_may_wakeup(kdev) && intf->wolopts)
1382 		bcmasp_suspend_to_wol(intf);
1383 
1384 	clk_disable_unprepare(intf->parent->clk);
1385 
1386 	return ret;
1387 
1388 out:
1389 	bcmasp_netif_init(dev, false);
1390 	return ret;
1391 }
1392 
bcmasp_resume_from_wol(struct bcmasp_intf * intf)1393 static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
1394 {
1395 	u32 reg;
1396 
1397 	reg = umac_rl(intf, UMC_MPD_CTRL);
1398 	reg &= ~UMC_MPD_CTRL_MPD_EN;
1399 	umac_wl(intf, reg, UMC_MPD_CTRL);
1400 
1401 	if (intf->parent->wol_irq > 0) {
1402 		wakeup_intr2_core_wl(intf->parent, 0xffffffff,
1403 				     ASP_WAKEUP_INTR2_MASK_SET);
1404 	}
1405 }
1406 
bcmasp_interface_resume(struct bcmasp_intf * intf)1407 int bcmasp_interface_resume(struct bcmasp_intf *intf)
1408 {
1409 	struct net_device *dev = intf->ndev;
1410 	int ret;
1411 
1412 	if (!netif_running(dev))
1413 		return 0;
1414 
1415 	ret = clk_prepare_enable(intf->parent->clk);
1416 	if (ret)
1417 		return ret;
1418 
1419 	ret = bcmasp_netif_init(dev, false);
1420 	if (ret)
1421 		goto out;
1422 
1423 	bcmasp_resume_from_wol(intf);
1424 
1425 	if (intf->eee.eee_enabled)
1426 		bcmasp_eee_enable_set(intf, true);
1427 
1428 	netif_device_attach(dev);
1429 
1430 	return 0;
1431 
1432 out:
1433 	clk_disable_unprepare(intf->parent->clk);
1434 	return ret;
1435 }
1436