1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021 Rafał Miłecki <rafal@milecki.pl>
4  */
5 
6 #include <linux/delay.h>
7 #include <linux/etherdevice.h>
8 #include <linux/interrupt.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/platform_device.h>
12 #include <linux/slab.h>
13 #include <linux/string.h>
14 
15 #include "bcm4908_enet.h"
16 #include "unimac.h"
17 
18 #define ENET_DMA_CH_RX_CFG			ENET_DMA_CH0_CFG
19 #define ENET_DMA_CH_TX_CFG			ENET_DMA_CH1_CFG
20 #define ENET_DMA_CH_RX_STATE_RAM		ENET_DMA_CH0_STATE_RAM
21 #define ENET_DMA_CH_TX_STATE_RAM		ENET_DMA_CH1_STATE_RAM
22 
23 #define ENET_TX_BDS_NUM				200
24 #define ENET_RX_BDS_NUM				200
25 #define ENET_RX_BDS_NUM_MAX			8192
26 
27 #define ENET_DMA_INT_DEFAULTS			(ENET_DMA_CH_CFG_INT_DONE | \
28 						 ENET_DMA_CH_CFG_INT_NO_DESC | \
29 						 ENET_DMA_CH_CFG_INT_BUFF_DONE)
30 #define ENET_DMA_MAX_BURST_LEN			8 /* in 64 bit words */
31 
32 #define ENET_MTU_MIN				60
33 #define ENET_MTU_MAX				1500 /* Is it possible to support 2044? */
34 #define ENET_MTU_MAX_EXTRA_SIZE			32 /* L2 */
35 
36 struct bcm4908_enet_dma_ring_bd {
37 	__le32 ctl;
38 	__le32 addr;
39 } __packed;
40 
41 struct bcm4908_enet_dma_ring_slot {
42 	struct sk_buff *skb;
43 	unsigned int len;
44 	dma_addr_t dma_addr;
45 };
46 
47 struct bcm4908_enet_dma_ring {
48 	int is_tx;
49 	int read_idx;
50 	int write_idx;
51 	int length;
52 	u16 cfg_block;
53 	u16 st_ram_block;
54 
55 	union {
56 		void *cpu_addr;
57 		struct bcm4908_enet_dma_ring_bd *buf_desc;
58 	};
59 	dma_addr_t dma_addr;
60 
61 	struct bcm4908_enet_dma_ring_slot *slots;
62 };
63 
64 struct bcm4908_enet {
65 	struct device *dev;
66 	struct net_device *netdev;
67 	struct napi_struct napi;
68 	void __iomem *base;
69 
70 	struct bcm4908_enet_dma_ring tx_ring;
71 	struct bcm4908_enet_dma_ring rx_ring;
72 };
73 
74 /***
75  * R/W ops
76  */
77 
78 static u32 enet_read(struct bcm4908_enet *enet, u16 offset)
79 {
80 	return readl(enet->base + offset);
81 }
82 
83 static void enet_write(struct bcm4908_enet *enet, u16 offset, u32 value)
84 {
85 	writel(value, enet->base + offset);
86 }
87 
88 static void enet_maskset(struct bcm4908_enet *enet, u16 offset, u32 mask, u32 set)
89 {
90 	u32 val;
91 
92 	WARN_ON(set & ~mask);
93 
94 	val = enet_read(enet, offset);
95 	val = (val & ~mask) | (set & mask);
96 	enet_write(enet, offset, val);
97 }
98 
99 static void enet_set(struct bcm4908_enet *enet, u16 offset, u32 set)
100 {
101 	enet_maskset(enet, offset, set, set);
102 }
103 
104 static u32 enet_umac_read(struct bcm4908_enet *enet, u16 offset)
105 {
106 	return enet_read(enet, ENET_UNIMAC + offset);
107 }
108 
109 static void enet_umac_write(struct bcm4908_enet *enet, u16 offset, u32 value)
110 {
111 	enet_write(enet, ENET_UNIMAC + offset, value);
112 }
113 
114 static void enet_umac_set(struct bcm4908_enet *enet, u16 offset, u32 set)
115 {
116 	enet_set(enet, ENET_UNIMAC + offset, set);
117 }
118 
119 /***
120  * Helpers
121  */
122 
123 static void bcm4908_enet_intrs_on(struct bcm4908_enet *enet)
124 {
125 	enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS);
126 }
127 
128 static void bcm4908_enet_intrs_off(struct bcm4908_enet *enet)
129 {
130 	enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, 0);
131 }
132 
133 static void bcm4908_enet_intrs_ack(struct bcm4908_enet *enet)
134 {
135 	enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS);
136 }
137 
138 /***
139  * DMA
140  */
141 
142 static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet,
143 				       struct bcm4908_enet_dma_ring *ring)
144 {
145 	int size = ring->length * sizeof(struct bcm4908_enet_dma_ring_bd);
146 	struct device *dev = enet->dev;
147 
148 	ring->cpu_addr = dma_alloc_coherent(dev, size, &ring->dma_addr, GFP_KERNEL);
149 	if (!ring->cpu_addr)
150 		return -ENOMEM;
151 
152 	if (((uintptr_t)ring->cpu_addr) & (0x40 - 1)) {
153 		dev_err(dev, "Invalid DMA ring alignment\n");
154 		goto err_free_buf_descs;
155 	}
156 
157 	ring->slots = kzalloc(ring->length * sizeof(*ring->slots), GFP_KERNEL);
158 	if (!ring->slots)
159 		goto err_free_buf_descs;
160 
161 	ring->read_idx = 0;
162 	ring->write_idx = 0;
163 
164 	return 0;
165 
166 err_free_buf_descs:
167 	dma_free_coherent(dev, size, ring->cpu_addr, ring->dma_addr);
168 	return -ENOMEM;
169 }
170 
171 static void bcm4908_enet_dma_free(struct bcm4908_enet *enet)
172 {
173 	struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
174 	struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
175 	struct device *dev = enet->dev;
176 	int size;
177 
178 	size = rx_ring->length * sizeof(struct bcm4908_enet_dma_ring_bd);
179 	if (rx_ring->cpu_addr)
180 		dma_free_coherent(dev, size, rx_ring->cpu_addr, rx_ring->dma_addr);
181 	kfree(rx_ring->slots);
182 
183 	size = tx_ring->length * sizeof(struct bcm4908_enet_dma_ring_bd);
184 	if (tx_ring->cpu_addr)
185 		dma_free_coherent(dev, size, tx_ring->cpu_addr, tx_ring->dma_addr);
186 	kfree(tx_ring->slots);
187 }
188 
189 static int bcm4908_enet_dma_alloc(struct bcm4908_enet *enet)
190 {
191 	struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
192 	struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
193 	struct device *dev = enet->dev;
194 	int err;
195 
196 	tx_ring->length = ENET_TX_BDS_NUM;
197 	tx_ring->is_tx = 1;
198 	tx_ring->cfg_block = ENET_DMA_CH_TX_CFG;
199 	tx_ring->st_ram_block = ENET_DMA_CH_TX_STATE_RAM;
200 	err = bcm4908_dma_alloc_buf_descs(enet, tx_ring);
201 	if (err) {
202 		dev_err(dev, "Failed to alloc TX buf descriptors: %d\n", err);
203 		return err;
204 	}
205 
206 	rx_ring->length = ENET_RX_BDS_NUM;
207 	rx_ring->is_tx = 0;
208 	rx_ring->cfg_block = ENET_DMA_CH_RX_CFG;
209 	rx_ring->st_ram_block = ENET_DMA_CH_RX_STATE_RAM;
210 	err = bcm4908_dma_alloc_buf_descs(enet, rx_ring);
211 	if (err) {
212 		dev_err(dev, "Failed to alloc RX buf descriptors: %d\n", err);
213 		bcm4908_enet_dma_free(enet);
214 		return err;
215 	}
216 
217 	return 0;
218 }
219 
220 static void bcm4908_enet_dma_reset(struct bcm4908_enet *enet)
221 {
222 	struct bcm4908_enet_dma_ring *rings[] = { &enet->rx_ring, &enet->tx_ring };
223 	int i;
224 
225 	/* Disable the DMA controller and channel */
226 	for (i = 0; i < ARRAY_SIZE(rings); i++)
227 		enet_write(enet, rings[i]->cfg_block + ENET_DMA_CH_CFG, 0);
228 	enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN, 0);
229 
230 	/* Reset channels state */
231 	for (i = 0; i < ARRAY_SIZE(rings); i++) {
232 		struct bcm4908_enet_dma_ring *ring = rings[i];
233 
234 		enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR, 0);
235 		enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_STATE_DATA, 0);
236 		enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_DESC_LEN_STATUS, 0);
237 		enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_DESC_BASE_BUFPTR, 0);
238 	}
239 }
240 
241 static int bcm4908_enet_dma_alloc_rx_buf(struct bcm4908_enet *enet, unsigned int idx)
242 {
243 	struct bcm4908_enet_dma_ring_bd *buf_desc = &enet->rx_ring.buf_desc[idx];
244 	struct bcm4908_enet_dma_ring_slot *slot = &enet->rx_ring.slots[idx];
245 	struct device *dev = enet->dev;
246 	u32 tmp;
247 	int err;
248 
249 	slot->len = ENET_MTU_MAX + ENET_MTU_MAX_EXTRA_SIZE;
250 
251 	slot->skb = netdev_alloc_skb(enet->netdev, slot->len);
252 	if (!slot->skb)
253 		return -ENOMEM;
254 
255 	slot->dma_addr = dma_map_single(dev, slot->skb->data, slot->len, DMA_FROM_DEVICE);
256 	err = dma_mapping_error(dev, slot->dma_addr);
257 	if (err) {
258 		dev_err(dev, "Failed to map DMA buffer: %d\n", err);
259 		kfree_skb(slot->skb);
260 		slot->skb = NULL;
261 		return err;
262 	}
263 
264 	tmp = slot->len << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
265 	tmp |= DMA_CTL_STATUS_OWN;
266 	if (idx == enet->rx_ring.length - 1)
267 		tmp |= DMA_CTL_STATUS_WRAP;
268 	buf_desc->ctl = cpu_to_le32(tmp);
269 	buf_desc->addr = cpu_to_le32(slot->dma_addr);
270 
271 	return 0;
272 }
273 
274 static void bcm4908_enet_dma_ring_init(struct bcm4908_enet *enet,
275 				       struct bcm4908_enet_dma_ring *ring)
276 {
277 	int reset_channel = 0; /* We support only 1 main channel (with TX and RX) */
278 	int reset_subch = ring->is_tx ? 1 : 0;
279 
280 	/* Reset the DMA channel */
281 	enet_write(enet, ENET_DMA_CTRL_CHANNEL_RESET, BIT(reset_channel * 2 + reset_subch));
282 	enet_write(enet, ENET_DMA_CTRL_CHANNEL_RESET, 0);
283 
284 	enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, 0);
285 	enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_MAX_BURST, ENET_DMA_MAX_BURST_LEN);
286 	enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, 0);
287 
288 	enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR,
289 		   (uint32_t)ring->dma_addr);
290 }
291 
292 static void bcm4908_enet_dma_uninit(struct bcm4908_enet *enet)
293 {
294 	struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
295 	struct bcm4908_enet_dma_ring_slot *slot;
296 	struct device *dev = enet->dev;
297 	int i;
298 
299 	for (i = rx_ring->length - 1; i >= 0; i--) {
300 		slot = &rx_ring->slots[i];
301 		if (!slot->skb)
302 			continue;
303 		dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_FROM_DEVICE);
304 		kfree_skb(slot->skb);
305 		slot->skb = NULL;
306 	}
307 }
308 
309 static int bcm4908_enet_dma_init(struct bcm4908_enet *enet)
310 {
311 	struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
312 	struct device *dev = enet->dev;
313 	int err;
314 	int i;
315 
316 	for (i = 0; i < rx_ring->length; i++) {
317 		err = bcm4908_enet_dma_alloc_rx_buf(enet, i);
318 		if (err) {
319 			dev_err(dev, "Failed to alloc RX buffer: %d\n", err);
320 			bcm4908_enet_dma_uninit(enet);
321 			return err;
322 		}
323 	}
324 
325 	bcm4908_enet_dma_ring_init(enet, &enet->tx_ring);
326 	bcm4908_enet_dma_ring_init(enet, &enet->rx_ring);
327 
328 	return 0;
329 }
330 
331 static void bcm4908_enet_dma_tx_ring_enable(struct bcm4908_enet *enet,
332 					    struct bcm4908_enet_dma_ring *ring)
333 {
334 	enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE);
335 }
336 
337 static void bcm4908_enet_dma_tx_ring_disable(struct bcm4908_enet *enet,
338 					     struct bcm4908_enet_dma_ring *ring)
339 {
340 	enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, 0);
341 }
342 
343 static void bcm4908_enet_dma_rx_ring_enable(struct bcm4908_enet *enet,
344 					    struct bcm4908_enet_dma_ring *ring)
345 {
346 	enet_set(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE);
347 }
348 
349 static void bcm4908_enet_dma_rx_ring_disable(struct bcm4908_enet *enet,
350 					     struct bcm4908_enet_dma_ring *ring)
351 {
352 	unsigned long deadline;
353 	u32 tmp;
354 
355 	enet_maskset(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE, 0);
356 
357 	deadline = jiffies + usecs_to_jiffies(2000);
358 	do {
359 		tmp = enet_read(enet, ring->cfg_block + ENET_DMA_CH_CFG);
360 		if (!(tmp & ENET_DMA_CH_CFG_ENABLE))
361 			return;
362 		enet_maskset(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE, 0);
363 		usleep_range(10, 30);
364 	} while (!time_after_eq(jiffies, deadline));
365 
366 	dev_warn(enet->dev, "Timeout waiting for DMA TX stop\n");
367 }
368 
369 /***
370  * Ethernet driver
371  */
372 
373 static void bcm4908_enet_gmac_init(struct bcm4908_enet *enet)
374 {
375 	u32 cmd;
376 
377 	cmd = enet_umac_read(enet, UMAC_CMD);
378 	enet_umac_write(enet, UMAC_CMD, cmd | CMD_SW_RESET);
379 	enet_umac_write(enet, UMAC_CMD, cmd & ~CMD_SW_RESET);
380 
381 	enet_set(enet, ENET_FLUSH, ENET_FLUSH_RXFIFO_FLUSH | ENET_FLUSH_TXFIFO_FLUSH);
382 	enet_maskset(enet, ENET_FLUSH, ENET_FLUSH_RXFIFO_FLUSH | ENET_FLUSH_TXFIFO_FLUSH, 0);
383 
384 	enet_set(enet, ENET_MIB_CTRL, ENET_MIB_CTRL_CLR_MIB);
385 	enet_maskset(enet, ENET_MIB_CTRL, ENET_MIB_CTRL_CLR_MIB, 0);
386 
387 	cmd = enet_umac_read(enet, UMAC_CMD);
388 	cmd &= ~(CMD_SPEED_MASK << CMD_SPEED_SHIFT);
389 	cmd &= ~CMD_TX_EN;
390 	cmd &= ~CMD_RX_EN;
391 	cmd |= CMD_SPEED_1000 << CMD_SPEED_SHIFT;
392 	enet_umac_write(enet, UMAC_CMD, cmd);
393 
394 	enet_maskset(enet, ENET_GMAC_STATUS,
395 		     ENET_GMAC_STATUS_ETH_SPEED_MASK |
396 		     ENET_GMAC_STATUS_HD |
397 		     ENET_GMAC_STATUS_AUTO_CFG_EN |
398 		     ENET_GMAC_STATUS_LINK_UP,
399 		     ENET_GMAC_STATUS_ETH_SPEED_1000 |
400 		     ENET_GMAC_STATUS_AUTO_CFG_EN |
401 		     ENET_GMAC_STATUS_LINK_UP);
402 }
403 
404 static irqreturn_t bcm4908_enet_irq_handler(int irq, void *dev_id)
405 {
406 	struct bcm4908_enet *enet = dev_id;
407 
408 	bcm4908_enet_intrs_off(enet);
409 	bcm4908_enet_intrs_ack(enet);
410 
411 	napi_schedule(&enet->napi);
412 
413 	return IRQ_HANDLED;
414 }
415 
416 static int bcm4908_enet_open(struct net_device *netdev)
417 {
418 	struct bcm4908_enet *enet = netdev_priv(netdev);
419 	struct device *dev = enet->dev;
420 	int err;
421 
422 	err = request_irq(netdev->irq, bcm4908_enet_irq_handler, 0, "enet", enet);
423 	if (err) {
424 		dev_err(dev, "Failed to request IRQ %d: %d\n", netdev->irq, err);
425 		return err;
426 	}
427 
428 	bcm4908_enet_gmac_init(enet);
429 	bcm4908_enet_dma_reset(enet);
430 	bcm4908_enet_dma_init(enet);
431 
432 	enet_umac_set(enet, UMAC_CMD, CMD_TX_EN | CMD_RX_EN);
433 
434 	enet_set(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN);
435 	enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_FLOWC_CH1_EN, 0);
436 	bcm4908_enet_dma_rx_ring_enable(enet, &enet->rx_ring);
437 
438 	napi_enable(&enet->napi);
439 	netif_carrier_on(netdev);
440 	netif_start_queue(netdev);
441 
442 	bcm4908_enet_intrs_ack(enet);
443 	bcm4908_enet_intrs_on(enet);
444 
445 	return 0;
446 }
447 
448 static int bcm4908_enet_stop(struct net_device *netdev)
449 {
450 	struct bcm4908_enet *enet = netdev_priv(netdev);
451 
452 	netif_stop_queue(netdev);
453 	netif_carrier_off(netdev);
454 	napi_disable(&enet->napi);
455 
456 	bcm4908_enet_dma_rx_ring_disable(enet, &enet->rx_ring);
457 	bcm4908_enet_dma_tx_ring_disable(enet, &enet->tx_ring);
458 
459 	bcm4908_enet_dma_uninit(enet);
460 
461 	free_irq(enet->netdev->irq, enet);
462 
463 	return 0;
464 }
465 
466 static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netdev)
467 {
468 	struct bcm4908_enet *enet = netdev_priv(netdev);
469 	struct bcm4908_enet_dma_ring *ring = &enet->tx_ring;
470 	struct bcm4908_enet_dma_ring_slot *slot;
471 	struct device *dev = enet->dev;
472 	struct bcm4908_enet_dma_ring_bd *buf_desc;
473 	int free_buf_descs;
474 	u32 tmp;
475 
476 	/* Free transmitted skbs */
477 	while (ring->read_idx != ring->write_idx) {
478 		buf_desc = &ring->buf_desc[ring->read_idx];
479 		if (le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)
480 			break;
481 		slot = &ring->slots[ring->read_idx];
482 
483 		dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
484 		dev_kfree_skb(slot->skb);
485 		if (++ring->read_idx == ring->length)
486 			ring->read_idx = 0;
487 	}
488 
489 	/* Don't use the last empty buf descriptor */
490 	if (ring->read_idx <= ring->write_idx)
491 		free_buf_descs = ring->read_idx - ring->write_idx + ring->length;
492 	else
493 		free_buf_descs = ring->read_idx - ring->write_idx;
494 	if (free_buf_descs < 2)
495 		return NETDEV_TX_BUSY;
496 
497 	/* Hardware removes OWN bit after sending data */
498 	buf_desc = &ring->buf_desc[ring->write_idx];
499 	if (unlikely(le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)) {
500 		netif_stop_queue(netdev);
501 		return NETDEV_TX_BUSY;
502 	}
503 
504 	slot = &ring->slots[ring->write_idx];
505 	slot->skb = skb;
506 	slot->len = skb->len;
507 	slot->dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
508 	if (unlikely(dma_mapping_error(dev, slot->dma_addr)))
509 		return NETDEV_TX_BUSY;
510 
511 	tmp = skb->len << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
512 	tmp |= DMA_CTL_STATUS_OWN;
513 	tmp |= DMA_CTL_STATUS_SOP;
514 	tmp |= DMA_CTL_STATUS_EOP;
515 	tmp |= DMA_CTL_STATUS_APPEND_CRC;
516 	if (ring->write_idx + 1 == ring->length - 1)
517 		tmp |= DMA_CTL_STATUS_WRAP;
518 
519 	buf_desc->addr = cpu_to_le32((uint32_t)slot->dma_addr);
520 	buf_desc->ctl = cpu_to_le32(tmp);
521 
522 	bcm4908_enet_dma_tx_ring_enable(enet, &enet->tx_ring);
523 
524 	if (++ring->write_idx == ring->length - 1)
525 		ring->write_idx = 0;
526 	enet->netdev->stats.tx_bytes += skb->len;
527 	enet->netdev->stats.tx_packets++;
528 
529 	return NETDEV_TX_OK;
530 }
531 
532 static int bcm4908_enet_poll(struct napi_struct *napi, int weight)
533 {
534 	struct bcm4908_enet *enet = container_of(napi, struct bcm4908_enet, napi);
535 	struct device *dev = enet->dev;
536 	int handled = 0;
537 
538 	while (handled < weight) {
539 		struct bcm4908_enet_dma_ring_bd *buf_desc;
540 		struct bcm4908_enet_dma_ring_slot slot;
541 		u32 ctl;
542 		int len;
543 		int err;
544 
545 		buf_desc = &enet->rx_ring.buf_desc[enet->rx_ring.read_idx];
546 		ctl = le32_to_cpu(buf_desc->ctl);
547 		if (ctl & DMA_CTL_STATUS_OWN)
548 			break;
549 
550 		slot = enet->rx_ring.slots[enet->rx_ring.read_idx];
551 
552 		/* Provide new buffer before unpinning the old one */
553 		err = bcm4908_enet_dma_alloc_rx_buf(enet, enet->rx_ring.read_idx);
554 		if (err)
555 			break;
556 
557 		if (++enet->rx_ring.read_idx == enet->rx_ring.length)
558 			enet->rx_ring.read_idx = 0;
559 
560 		len = (ctl & DMA_CTL_LEN_DESC_BUFLENGTH) >> DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
561 
562 		if (len < ENET_MTU_MIN ||
563 		    (ctl & (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) != (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) {
564 			enet->netdev->stats.rx_dropped++;
565 			break;
566 		}
567 
568 		dma_unmap_single(dev, slot.dma_addr, slot.len, DMA_FROM_DEVICE);
569 
570 		skb_put(slot.skb, len - ETH_FCS_LEN);
571 		slot.skb->protocol = eth_type_trans(slot.skb, enet->netdev);
572 		netif_receive_skb(slot.skb);
573 
574 		enet->netdev->stats.rx_packets++;
575 		enet->netdev->stats.rx_bytes += len;
576 	}
577 
578 	if (handled < weight) {
579 		napi_complete_done(napi, handled);
580 		bcm4908_enet_intrs_on(enet);
581 	}
582 
583 	return handled;
584 }
585 
586 static const struct net_device_ops bcm4908_enet_netdev_ops = {
587 	.ndo_open = bcm4908_enet_open,
588 	.ndo_stop = bcm4908_enet_stop,
589 	.ndo_start_xmit = bcm4908_enet_start_xmit,
590 	.ndo_set_mac_address = eth_mac_addr,
591 };
592 
593 static int bcm4908_enet_probe(struct platform_device *pdev)
594 {
595 	struct device *dev = &pdev->dev;
596 	struct net_device *netdev;
597 	struct bcm4908_enet *enet;
598 	int err;
599 
600 	netdev = devm_alloc_etherdev(dev, sizeof(*enet));
601 	if (!netdev)
602 		return -ENOMEM;
603 
604 	enet = netdev_priv(netdev);
605 	enet->dev = dev;
606 	enet->netdev = netdev;
607 
608 	enet->base = devm_platform_ioremap_resource(pdev, 0);
609 	if (IS_ERR(enet->base)) {
610 		dev_err(dev, "Failed to map registers: %ld\n", PTR_ERR(enet->base));
611 		return PTR_ERR(enet->base);
612 	}
613 
614 	netdev->irq = platform_get_irq_byname(pdev, "rx");
615 	if (netdev->irq < 0)
616 		return netdev->irq;
617 
618 	dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
619 
620 	err = bcm4908_enet_dma_alloc(enet);
621 	if (err)
622 		return err;
623 
624 	SET_NETDEV_DEV(netdev, &pdev->dev);
625 	eth_hw_addr_random(netdev);
626 	netdev->netdev_ops = &bcm4908_enet_netdev_ops;
627 	netdev->min_mtu = ETH_ZLEN;
628 	netdev->mtu = ENET_MTU_MAX;
629 	netdev->max_mtu = ENET_MTU_MAX;
630 	netif_napi_add(netdev, &enet->napi, bcm4908_enet_poll, 64);
631 
632 	err = register_netdev(netdev);
633 	if (err) {
634 		bcm4908_enet_dma_free(enet);
635 		return err;
636 	}
637 
638 	platform_set_drvdata(pdev, enet);
639 
640 	return 0;
641 }
642 
643 static int bcm4908_enet_remove(struct platform_device *pdev)
644 {
645 	struct bcm4908_enet *enet = platform_get_drvdata(pdev);
646 
647 	unregister_netdev(enet->netdev);
648 	netif_napi_del(&enet->napi);
649 	bcm4908_enet_dma_free(enet);
650 
651 	return 0;
652 }
653 
654 static const struct of_device_id bcm4908_enet_of_match[] = {
655 	{ .compatible = "brcm,bcm4908-enet"},
656 	{},
657 };
658 
659 static struct platform_driver bcm4908_enet_driver = {
660 	.driver = {
661 		.name = "bcm4908_enet",
662 		.of_match_table = bcm4908_enet_of_match,
663 	},
664 	.probe	= bcm4908_enet_probe,
665 	.remove = bcm4908_enet_remove,
666 };
667 module_platform_driver(bcm4908_enet_driver);
668 
669 MODULE_LICENSE("GPL v2");
670 MODULE_DEVICE_TABLE(of, bcm4908_enet_of_match);
671