1 /*
2  * Driver for (BCM4706)? GBit MAC core on BCMA bus.
3  *
4  * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
5  *
6  * Licensed under the GNU/GPL. See COPYING for details.
7  */
8 
9 #include "bgmac.h"
10 
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/delay.h>
14 #include <linux/etherdevice.h>
15 #include <linux/mii.h>
16 #include <linux/phy.h>
17 #include <linux/interrupt.h>
18 #include <linux/dma-mapping.h>
19 #include <bcm47xx_nvram.h>
20 
21 static const struct bcma_device_id bgmac_bcma_tbl[] = {
22 	BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
23 	BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
24 	BCMA_CORETABLE_END
25 };
26 MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
27 
28 static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
29 			     u32 value, int timeout)
30 {
31 	u32 val;
32 	int i;
33 
34 	for (i = 0; i < timeout / 10; i++) {
35 		val = bcma_read32(core, reg);
36 		if ((val & mask) == value)
37 			return true;
38 		udelay(10);
39 	}
40 	pr_err("Timeout waiting for reg 0x%X\n", reg);
41 	return false;
42 }
43 
44 /**************************************************
45  * DMA
46  **************************************************/
47 
48 static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
49 {
50 	u32 val;
51 	int i;
52 
53 	if (!ring->mmio_base)
54 		return;
55 
56 	/* Suspend DMA TX ring first.
57 	 * bgmac_wait_value doesn't support waiting for any of few values, so
58 	 * implement whole loop here.
59 	 */
60 	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
61 		    BGMAC_DMA_TX_SUSPEND);
62 	for (i = 0; i < 10000 / 10; i++) {
63 		val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
64 		val &= BGMAC_DMA_TX_STAT;
65 		if (val == BGMAC_DMA_TX_STAT_DISABLED ||
66 		    val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
67 		    val == BGMAC_DMA_TX_STAT_STOPPED) {
68 			i = 0;
69 			break;
70 		}
71 		udelay(10);
72 	}
73 	if (i)
74 		bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
75 			  ring->mmio_base, val);
76 
77 	/* Remove SUSPEND bit */
78 	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
79 	if (!bgmac_wait_value(bgmac->core,
80 			      ring->mmio_base + BGMAC_DMA_TX_STATUS,
81 			      BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
82 			      10000)) {
83 		bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
84 			   ring->mmio_base);
85 		udelay(300);
86 		val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
87 		if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
88 			bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n",
89 				  ring->mmio_base);
90 	}
91 }
92 
93 static void bgmac_dma_tx_enable(struct bgmac *bgmac,
94 				struct bgmac_dma_ring *ring)
95 {
96 	u32 ctl;
97 
98 	ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
99 	ctl |= BGMAC_DMA_TX_ENABLE;
100 	ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
101 	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
102 }
103 
104 static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
105 				    struct bgmac_dma_ring *ring,
106 				    struct sk_buff *skb)
107 {
108 	struct device *dma_dev = bgmac->core->dma_dev;
109 	struct net_device *net_dev = bgmac->net_dev;
110 	struct bgmac_dma_desc *dma_desc;
111 	struct bgmac_slot_info *slot;
112 	u32 ctl0, ctl1;
113 	int free_slots;
114 
115 	if (skb->len > BGMAC_DESC_CTL1_LEN) {
116 		bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
117 		goto err_stop_drop;
118 	}
119 
120 	if (ring->start <= ring->end)
121 		free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS;
122 	else
123 		free_slots = ring->start - ring->end;
124 	if (free_slots == 1) {
125 		bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
126 		netif_stop_queue(net_dev);
127 		return NETDEV_TX_BUSY;
128 	}
129 
130 	slot = &ring->slots[ring->end];
131 	slot->skb = skb;
132 	slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len,
133 					DMA_TO_DEVICE);
134 	if (dma_mapping_error(dma_dev, slot->dma_addr)) {
135 		bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
136 			  ring->mmio_base);
137 		goto err_stop_drop;
138 	}
139 
140 	ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF;
141 	if (ring->end == ring->num_slots - 1)
142 		ctl0 |= BGMAC_DESC_CTL0_EOT;
143 	ctl1 = skb->len & BGMAC_DESC_CTL1_LEN;
144 
145 	dma_desc = ring->cpu_base;
146 	dma_desc += ring->end;
147 	dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
148 	dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
149 	dma_desc->ctl0 = cpu_to_le32(ctl0);
150 	dma_desc->ctl1 = cpu_to_le32(ctl1);
151 
152 	wmb();
153 
154 	/* Increase ring->end to point empty slot. We tell hardware the first
155 	 * slot it should *not* read.
156 	 */
157 	if (++ring->end >= BGMAC_TX_RING_SLOTS)
158 		ring->end = 0;
159 	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
160 		    ring->index_base +
161 		    ring->end * sizeof(struct bgmac_dma_desc));
162 
163 	/* Always keep one slot free to allow detecting bugged calls. */
164 	if (--free_slots == 1)
165 		netif_stop_queue(net_dev);
166 
167 	return NETDEV_TX_OK;
168 
169 err_stop_drop:
170 	netif_stop_queue(net_dev);
171 	dev_kfree_skb(skb);
172 	return NETDEV_TX_OK;
173 }
174 
175 /* Free transmitted packets */
176 static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
177 {
178 	struct device *dma_dev = bgmac->core->dma_dev;
179 	int empty_slot;
180 	bool freed = false;
181 
182 	/* The last slot that hardware didn't consume yet */
183 	empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
184 	empty_slot &= BGMAC_DMA_TX_STATDPTR;
185 	empty_slot -= ring->index_base;
186 	empty_slot &= BGMAC_DMA_TX_STATDPTR;
187 	empty_slot /= sizeof(struct bgmac_dma_desc);
188 
189 	while (ring->start != empty_slot) {
190 		struct bgmac_slot_info *slot = &ring->slots[ring->start];
191 
192 		if (slot->skb) {
193 			/* Unmap no longer used buffer */
194 			dma_unmap_single(dma_dev, slot->dma_addr,
195 					 slot->skb->len, DMA_TO_DEVICE);
196 			slot->dma_addr = 0;
197 
198 			/* Free memory! :) */
199 			dev_kfree_skb(slot->skb);
200 			slot->skb = NULL;
201 		} else {
202 			bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n",
203 				  ring->start, ring->end);
204 		}
205 
206 		if (++ring->start >= BGMAC_TX_RING_SLOTS)
207 			ring->start = 0;
208 		freed = true;
209 	}
210 
211 	if (freed && netif_queue_stopped(bgmac->net_dev))
212 		netif_wake_queue(bgmac->net_dev);
213 }
214 
215 static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
216 {
217 	if (!ring->mmio_base)
218 		return;
219 
220 	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
221 	if (!bgmac_wait_value(bgmac->core,
222 			      ring->mmio_base + BGMAC_DMA_RX_STATUS,
223 			      BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
224 			      10000))
225 		bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n",
226 			  ring->mmio_base);
227 }
228 
229 static void bgmac_dma_rx_enable(struct bgmac *bgmac,
230 				struct bgmac_dma_ring *ring)
231 {
232 	u32 ctl;
233 
234 	ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
235 	ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
236 	ctl |= BGMAC_DMA_RX_ENABLE;
237 	ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
238 	ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
239 	ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
240 	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
241 }
242 
243 static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
244 				     struct bgmac_slot_info *slot)
245 {
246 	struct device *dma_dev = bgmac->core->dma_dev;
247 	struct bgmac_rx_header *rx;
248 
249 	/* Alloc skb */
250 	slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
251 	if (!slot->skb)
252 		return -ENOMEM;
253 
254 	/* Poison - if everything goes fine, hardware will overwrite it */
255 	rx = (struct bgmac_rx_header *)slot->skb->data;
256 	rx->len = cpu_to_le16(0xdead);
257 	rx->flags = cpu_to_le16(0xbeef);
258 
259 	/* Map skb for the DMA */
260 	slot->dma_addr = dma_map_single(dma_dev, slot->skb->data,
261 					BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
262 	if (dma_mapping_error(dma_dev, slot->dma_addr)) {
263 		bgmac_err(bgmac, "DMA mapping error\n");
264 		return -ENOMEM;
265 	}
266 	if (slot->dma_addr & 0xC0000000)
267 		bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
268 
269 	return 0;
270 }
271 
272 static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
273 			     int weight)
274 {
275 	u32 end_slot;
276 	int handled = 0;
277 
278 	end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
279 	end_slot &= BGMAC_DMA_RX_STATDPTR;
280 	end_slot -= ring->index_base;
281 	end_slot &= BGMAC_DMA_RX_STATDPTR;
282 	end_slot /= sizeof(struct bgmac_dma_desc);
283 
284 	ring->end = end_slot;
285 
286 	while (ring->start != ring->end) {
287 		struct device *dma_dev = bgmac->core->dma_dev;
288 		struct bgmac_slot_info *slot = &ring->slots[ring->start];
289 		struct sk_buff *skb = slot->skb;
290 		struct sk_buff *new_skb;
291 		struct bgmac_rx_header *rx;
292 		u16 len, flags;
293 
294 		/* Unmap buffer to make it accessible to the CPU */
295 		dma_sync_single_for_cpu(dma_dev, slot->dma_addr,
296 					BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
297 
298 		/* Get info from the header */
299 		rx = (struct bgmac_rx_header *)skb->data;
300 		len = le16_to_cpu(rx->len);
301 		flags = le16_to_cpu(rx->flags);
302 
303 		/* Check for poison and drop or pass the packet */
304 		if (len == 0xdead && flags == 0xbeef) {
305 			bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
306 				  ring->start);
307 		} else {
308 			/* Omit CRC. */
309 			len -= ETH_FCS_LEN;
310 
311 			new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
312 			if (new_skb) {
313 				skb_put(new_skb, len);
314 				skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
315 								 new_skb->data,
316 								 len);
317 				skb_checksum_none_assert(skb);
318 				new_skb->protocol =
319 					eth_type_trans(new_skb, bgmac->net_dev);
320 				netif_receive_skb(new_skb);
321 				handled++;
322 			} else {
323 				bgmac->net_dev->stats.rx_dropped++;
324 				bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
325 			}
326 
327 			/* Poison the old skb */
328 			rx->len = cpu_to_le16(0xdead);
329 			rx->flags = cpu_to_le16(0xbeef);
330 		}
331 
332 		/* Make it back accessible to the hardware */
333 		dma_sync_single_for_device(dma_dev, slot->dma_addr,
334 					   BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
335 
336 		if (++ring->start >= BGMAC_RX_RING_SLOTS)
337 			ring->start = 0;
338 
339 		if (handled >= weight) /* Should never be greater */
340 			break;
341 	}
342 
343 	return handled;
344 }
345 
346 /* Does ring support unaligned addressing? */
347 static bool bgmac_dma_unaligned(struct bgmac *bgmac,
348 				struct bgmac_dma_ring *ring,
349 				enum bgmac_dma_ring_type ring_type)
350 {
351 	switch (ring_type) {
352 	case BGMAC_DMA_RING_TX:
353 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
354 			    0xff0);
355 		if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
356 			return true;
357 		break;
358 	case BGMAC_DMA_RING_RX:
359 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
360 			    0xff0);
361 		if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
362 			return true;
363 		break;
364 	}
365 	return false;
366 }
367 
368 static void bgmac_dma_ring_free(struct bgmac *bgmac,
369 				struct bgmac_dma_ring *ring)
370 {
371 	struct device *dma_dev = bgmac->core->dma_dev;
372 	struct bgmac_slot_info *slot;
373 	int size;
374 	int i;
375 
376 	for (i = 0; i < ring->num_slots; i++) {
377 		slot = &ring->slots[i];
378 		if (slot->skb) {
379 			if (slot->dma_addr)
380 				dma_unmap_single(dma_dev, slot->dma_addr,
381 						 slot->skb->len, DMA_TO_DEVICE);
382 			dev_kfree_skb(slot->skb);
383 		}
384 	}
385 
386 	if (ring->cpu_base) {
387 		/* Free ring of descriptors */
388 		size = ring->num_slots * sizeof(struct bgmac_dma_desc);
389 		dma_free_coherent(dma_dev, size, ring->cpu_base,
390 				  ring->dma_base);
391 	}
392 }
393 
394 static void bgmac_dma_free(struct bgmac *bgmac)
395 {
396 	int i;
397 
398 	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
399 		bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]);
400 	for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
401 		bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]);
402 }
403 
404 static int bgmac_dma_alloc(struct bgmac *bgmac)
405 {
406 	struct device *dma_dev = bgmac->core->dma_dev;
407 	struct bgmac_dma_ring *ring;
408 	static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
409 					 BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
410 	int size; /* ring size: different for Tx and Rx */
411 	int err;
412 	int i;
413 
414 	BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
415 	BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
416 
417 	if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) {
418 		bgmac_err(bgmac, "Core does not report 64-bit DMA\n");
419 		return -ENOTSUPP;
420 	}
421 
422 	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
423 		ring = &bgmac->tx_ring[i];
424 		ring->num_slots = BGMAC_TX_RING_SLOTS;
425 		ring->mmio_base = ring_base[i];
426 
427 		/* Alloc ring of descriptors */
428 		size = ring->num_slots * sizeof(struct bgmac_dma_desc);
429 		ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
430 						     &ring->dma_base,
431 						     GFP_KERNEL);
432 		if (!ring->cpu_base) {
433 			bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n",
434 				  ring->mmio_base);
435 			goto err_dma_free;
436 		}
437 		if (ring->dma_base & 0xC0000000)
438 			bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
439 
440 		ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
441 						      BGMAC_DMA_RING_TX);
442 		if (ring->unaligned)
443 			ring->index_base = lower_32_bits(ring->dma_base);
444 		else
445 			ring->index_base = 0;
446 
447 		/* No need to alloc TX slots yet */
448 	}
449 
450 	for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
451 		int j;
452 
453 		ring = &bgmac->rx_ring[i];
454 		ring->num_slots = BGMAC_RX_RING_SLOTS;
455 		ring->mmio_base = ring_base[i];
456 
457 		/* Alloc ring of descriptors */
458 		size = ring->num_slots * sizeof(struct bgmac_dma_desc);
459 		ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
460 						     &ring->dma_base,
461 						     GFP_KERNEL);
462 		if (!ring->cpu_base) {
463 			bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n",
464 				  ring->mmio_base);
465 			err = -ENOMEM;
466 			goto err_dma_free;
467 		}
468 		if (ring->dma_base & 0xC0000000)
469 			bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
470 
471 		ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
472 						      BGMAC_DMA_RING_RX);
473 		if (ring->unaligned)
474 			ring->index_base = lower_32_bits(ring->dma_base);
475 		else
476 			ring->index_base = 0;
477 
478 		/* Alloc RX slots */
479 		for (j = 0; j < ring->num_slots; j++) {
480 			err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
481 			if (err) {
482 				bgmac_err(bgmac, "Can't allocate skb for slot in RX ring\n");
483 				goto err_dma_free;
484 			}
485 		}
486 	}
487 
488 	return 0;
489 
490 err_dma_free:
491 	bgmac_dma_free(bgmac);
492 	return -ENOMEM;
493 }
494 
495 static void bgmac_dma_init(struct bgmac *bgmac)
496 {
497 	struct bgmac_dma_ring *ring;
498 	struct bgmac_dma_desc *dma_desc;
499 	u32 ctl0, ctl1;
500 	int i;
501 
502 	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
503 		ring = &bgmac->tx_ring[i];
504 
505 		if (!ring->unaligned)
506 			bgmac_dma_tx_enable(bgmac, ring);
507 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
508 			    lower_32_bits(ring->dma_base));
509 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
510 			    upper_32_bits(ring->dma_base));
511 		if (ring->unaligned)
512 			bgmac_dma_tx_enable(bgmac, ring);
513 
514 		ring->start = 0;
515 		ring->end = 0;	/* Points the slot that should *not* be read */
516 	}
517 
518 	for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
519 		int j;
520 
521 		ring = &bgmac->rx_ring[i];
522 
523 		if (!ring->unaligned)
524 			bgmac_dma_rx_enable(bgmac, ring);
525 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
526 			    lower_32_bits(ring->dma_base));
527 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
528 			    upper_32_bits(ring->dma_base));
529 		if (ring->unaligned)
530 			bgmac_dma_rx_enable(bgmac, ring);
531 
532 		for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
533 		     j++, dma_desc++) {
534 			ctl0 = ctl1 = 0;
535 
536 			if (j == ring->num_slots - 1)
537 				ctl0 |= BGMAC_DESC_CTL0_EOT;
538 			ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
539 			/* Is there any BGMAC device that requires extension? */
540 			/* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
541 			 * B43_DMA64_DCTL1_ADDREXT_MASK;
542 			 */
543 
544 			dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr));
545 			dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr));
546 			dma_desc->ctl0 = cpu_to_le32(ctl0);
547 			dma_desc->ctl1 = cpu_to_le32(ctl1);
548 		}
549 
550 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
551 			    ring->index_base +
552 			    ring->num_slots * sizeof(struct bgmac_dma_desc));
553 
554 		ring->start = 0;
555 		ring->end = 0;
556 	}
557 }
558 
559 /**************************************************
560  * PHY ops
561  **************************************************/
562 
563 static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
564 {
565 	struct bcma_device *core;
566 	u16 phy_access_addr;
567 	u16 phy_ctl_addr;
568 	u32 tmp;
569 
570 	BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK);
571 	BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK);
572 	BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT);
573 	BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK);
574 	BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT);
575 	BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE);
576 	BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START);
577 	BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK);
578 	BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK);
579 	BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
580 	BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
581 
582 	if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
583 		core = bgmac->core->bus->drv_gmac_cmn.core;
584 		phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
585 		phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
586 	} else {
587 		core = bgmac->core;
588 		phy_access_addr = BGMAC_PHY_ACCESS;
589 		phy_ctl_addr = BGMAC_PHY_CNTL;
590 	}
591 
592 	tmp = bcma_read32(core, phy_ctl_addr);
593 	tmp &= ~BGMAC_PC_EPA_MASK;
594 	tmp |= phyaddr;
595 	bcma_write32(core, phy_ctl_addr, tmp);
596 
597 	tmp = BGMAC_PA_START;
598 	tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
599 	tmp |= reg << BGMAC_PA_REG_SHIFT;
600 	bcma_write32(core, phy_access_addr, tmp);
601 
602 	if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
603 		bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n",
604 			  phyaddr, reg);
605 		return 0xffff;
606 	}
607 
608 	return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK;
609 }
610 
611 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
612 static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
613 {
614 	struct bcma_device *core;
615 	u16 phy_access_addr;
616 	u16 phy_ctl_addr;
617 	u32 tmp;
618 
619 	if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
620 		core = bgmac->core->bus->drv_gmac_cmn.core;
621 		phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
622 		phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
623 	} else {
624 		core = bgmac->core;
625 		phy_access_addr = BGMAC_PHY_ACCESS;
626 		phy_ctl_addr = BGMAC_PHY_CNTL;
627 	}
628 
629 	tmp = bcma_read32(core, phy_ctl_addr);
630 	tmp &= ~BGMAC_PC_EPA_MASK;
631 	tmp |= phyaddr;
632 	bcma_write32(core, phy_ctl_addr, tmp);
633 
634 	bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
635 	if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
636 		bgmac_warn(bgmac, "Error setting MDIO int\n");
637 
638 	tmp = BGMAC_PA_START;
639 	tmp |= BGMAC_PA_WRITE;
640 	tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
641 	tmp |= reg << BGMAC_PA_REG_SHIFT;
642 	tmp |= value;
643 	bcma_write32(core, phy_access_addr, tmp);
644 
645 	if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
646 		bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n",
647 			  phyaddr, reg);
648 		return -ETIMEDOUT;
649 	}
650 
651 	return 0;
652 }
653 
654 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */
655 static void bgmac_phy_force(struct bgmac *bgmac)
656 {
657 	u16 ctl;
658 	u16 mask = ~(BGMAC_PHY_CTL_SPEED | BGMAC_PHY_CTL_SPEED_MSB |
659 		     BGMAC_PHY_CTL_ANENAB | BGMAC_PHY_CTL_DUPLEX);
660 
661 	if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
662 		return;
663 
664 	if (bgmac->autoneg)
665 		return;
666 
667 	ctl = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL);
668 	ctl &= mask;
669 	if (bgmac->full_duplex)
670 		ctl |= BGMAC_PHY_CTL_DUPLEX;
671 	if (bgmac->speed == BGMAC_SPEED_100)
672 		ctl |= BGMAC_PHY_CTL_SPEED_100;
673 	else if (bgmac->speed == BGMAC_SPEED_1000)
674 		ctl |= BGMAC_PHY_CTL_SPEED_1000;
675 	bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, ctl);
676 }
677 
678 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyadvertise */
679 static void bgmac_phy_advertise(struct bgmac *bgmac)
680 {
681 	u16 adv;
682 
683 	if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
684 		return;
685 
686 	if (!bgmac->autoneg)
687 		return;
688 
689 	/* Adv selected 10/100 speeds */
690 	adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV);
691 	adv &= ~(BGMAC_PHY_ADV_10HALF | BGMAC_PHY_ADV_10FULL |
692 		 BGMAC_PHY_ADV_100HALF | BGMAC_PHY_ADV_100FULL);
693 	if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
694 		adv |= BGMAC_PHY_ADV_10HALF;
695 	if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
696 		adv |= BGMAC_PHY_ADV_100HALF;
697 	if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
698 		adv |= BGMAC_PHY_ADV_10FULL;
699 	if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
700 		adv |= BGMAC_PHY_ADV_100FULL;
701 	bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV, adv);
702 
703 	/* Adv selected 1000 speeds */
704 	adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2);
705 	adv &= ~(BGMAC_PHY_ADV2_1000HALF | BGMAC_PHY_ADV2_1000FULL);
706 	if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
707 		adv |= BGMAC_PHY_ADV2_1000HALF;
708 	if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
709 		adv |= BGMAC_PHY_ADV2_1000FULL;
710 	bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2, adv);
711 
712 	/* Restart */
713 	bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
714 			bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) |
715 			BGMAC_PHY_CTL_RESTART);
716 }
717 
718 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
719 static void bgmac_phy_init(struct bgmac *bgmac)
720 {
721 	struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
722 	struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
723 	u8 i;
724 
725 	if (ci->id == BCMA_CHIP_ID_BCM5356) {
726 		for (i = 0; i < 5; i++) {
727 			bgmac_phy_write(bgmac, i, 0x1f, 0x008b);
728 			bgmac_phy_write(bgmac, i, 0x15, 0x0100);
729 			bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
730 			bgmac_phy_write(bgmac, i, 0x12, 0x2aaa);
731 			bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
732 		}
733 	}
734 	if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
735 	    (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
736 	    (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
737 		bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
738 		bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
739 		for (i = 0; i < 5; i++) {
740 			bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
741 			bgmac_phy_write(bgmac, i, 0x16, 0x5284);
742 			bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
743 			bgmac_phy_write(bgmac, i, 0x17, 0x0010);
744 			bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
745 			bgmac_phy_write(bgmac, i, 0x16, 0x5296);
746 			bgmac_phy_write(bgmac, i, 0x17, 0x1073);
747 			bgmac_phy_write(bgmac, i, 0x17, 0x9073);
748 			bgmac_phy_write(bgmac, i, 0x16, 0x52b6);
749 			bgmac_phy_write(bgmac, i, 0x17, 0x9273);
750 			bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
751 		}
752 	}
753 }
754 
755 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
756 static void bgmac_phy_reset(struct bgmac *bgmac)
757 {
758 	if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
759 		return;
760 
761 	bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
762 			BGMAC_PHY_CTL_RESET);
763 	udelay(100);
764 	if (bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) &
765 	    BGMAC_PHY_CTL_RESET)
766 		bgmac_err(bgmac, "PHY reset failed\n");
767 	bgmac_phy_init(bgmac);
768 }
769 
770 /**************************************************
771  * Chip ops
772  **************************************************/
773 
774 /* TODO: can we just drop @force? Can we don't reset MAC at all if there is
775  * nothing to change? Try if after stabilizng driver.
776  */
777 static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
778 				 bool force)
779 {
780 	u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
781 	u32 new_val = (cmdcfg & mask) | set;
782 
783 	bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR);
784 	udelay(2);
785 
786 	if (new_val != cmdcfg || force)
787 		bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
788 
789 	bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR);
790 	udelay(2);
791 }
792 
793 static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
794 {
795 	u32 tmp;
796 
797 	tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
798 	bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
799 	tmp = (addr[4] << 8) | addr[5];
800 	bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
801 }
802 
803 static void bgmac_set_rx_mode(struct net_device *net_dev)
804 {
805 	struct bgmac *bgmac = netdev_priv(net_dev);
806 
807 	if (net_dev->flags & IFF_PROMISC)
808 		bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
809 	else
810 		bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
811 }
812 
813 #if 0 /* We don't use that regs yet */
814 static void bgmac_chip_stats_update(struct bgmac *bgmac)
815 {
816 	int i;
817 
818 	if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) {
819 		for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
820 			bgmac->mib_tx_regs[i] =
821 				bgmac_read(bgmac,
822 					   BGMAC_TX_GOOD_OCTETS + (i * 4));
823 		for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
824 			bgmac->mib_rx_regs[i] =
825 				bgmac_read(bgmac,
826 					   BGMAC_RX_GOOD_OCTETS + (i * 4));
827 	}
828 
829 	/* TODO: what else? how to handle BCM4706? Specs are needed */
830 }
831 #endif
832 
833 static void bgmac_clear_mib(struct bgmac *bgmac)
834 {
835 	int i;
836 
837 	if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT)
838 		return;
839 
840 	bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
841 	for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
842 		bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
843 	for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
844 		bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
845 }
846 
847 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
848 static void bgmac_speed(struct bgmac *bgmac, int speed)
849 {
850 	u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
851 	u32 set = 0;
852 
853 	if (speed & BGMAC_SPEED_10)
854 		set |= BGMAC_CMDCFG_ES_10;
855 	if (speed & BGMAC_SPEED_100)
856 		set |= BGMAC_CMDCFG_ES_100;
857 	if (speed & BGMAC_SPEED_1000)
858 		set |= BGMAC_CMDCFG_ES_1000;
859 	if (!bgmac->full_duplex)
860 		set |= BGMAC_CMDCFG_HD;
861 	bgmac_cmdcfg_maskset(bgmac, mask, set, true);
862 }
863 
864 static void bgmac_miiconfig(struct bgmac *bgmac)
865 {
866 	u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
867 			BGMAC_DS_MM_SHIFT;
868 	if (imode == 0 || imode == 1) {
869 		if (bgmac->autoneg)
870 			bgmac_speed(bgmac, BGMAC_SPEED_100);
871 		else
872 			bgmac_speed(bgmac, bgmac->speed);
873 	}
874 }
875 
876 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
877 static void bgmac_chip_reset(struct bgmac *bgmac)
878 {
879 	struct bcma_device *core = bgmac->core;
880 	struct bcma_bus *bus = core->bus;
881 	struct bcma_chipinfo *ci = &bus->chipinfo;
882 	u32 flags = 0;
883 	u32 iost;
884 	int i;
885 
886 	if (bcma_core_is_enabled(core)) {
887 		if (!bgmac->stats_grabbed) {
888 			/* bgmac_chip_stats_update(bgmac); */
889 			bgmac->stats_grabbed = true;
890 		}
891 
892 		for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
893 			bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
894 
895 		bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
896 		udelay(1);
897 
898 		for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
899 			bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);
900 
901 		/* TODO: Clear software multicast filter list */
902 	}
903 
904 	iost = bcma_aread32(core, BCMA_IOST);
905 	if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 10) ||
906 	    (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
907 	    (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9))
908 		iost &= ~BGMAC_BCMA_IOST_ATTACHED;
909 
910 	if (iost & BGMAC_BCMA_IOST_ATTACHED) {
911 		flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
912 		if (!bgmac->has_robosw)
913 			flags |= BGMAC_BCMA_IOCTL_SW_RESET;
914 	}
915 
916 	bcma_core_enable(core, flags);
917 
918 	if (core->id.rev > 2) {
919 		bgmac_set(bgmac, BCMA_CLKCTLST, 1 << 8);
920 		bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 1 << 24, 1 << 24,
921 				 1000);
922 	}
923 
924 	if (ci->id == BCMA_CHIP_ID_BCM5357 || ci->id == BCMA_CHIP_ID_BCM4749 ||
925 	    ci->id == BCMA_CHIP_ID_BCM53572) {
926 		struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
927 		u8 et_swtype = 0;
928 		u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
929 			     BGMAC_CHIPCTL_1_IF_TYPE_MII;
930 		char buf[4];
931 
932 		if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
933 			if (kstrtou8(buf, 0, &et_swtype))
934 				bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
935 					  buf);
936 			et_swtype &= 0x0f;
937 			et_swtype <<= 4;
938 			sw_type = et_swtype;
939 		} else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) {
940 			sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
941 		} else if ((ci->id != BCMA_CHIP_ID_BCM53572 && ci->pkg == 10) ||
942 			   (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) {
943 			sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
944 				  BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
945 		}
946 		bcma_chipco_chipctl_maskset(cc, 1,
947 					    ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
948 					      BGMAC_CHIPCTL_1_SW_TYPE_MASK),
949 					    sw_type);
950 	}
951 
952 	if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
953 		bcma_awrite32(core, BCMA_IOCTL,
954 			      bcma_aread32(core, BCMA_IOCTL) &
955 			      ~BGMAC_BCMA_IOCTL_SW_RESET);
956 
957 	/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
958 	 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
959 	 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
960 	 * be keps until taking MAC out of the reset.
961 	 */
962 	bgmac_cmdcfg_maskset(bgmac,
963 			     ~(BGMAC_CMDCFG_TE |
964 			       BGMAC_CMDCFG_RE |
965 			       BGMAC_CMDCFG_RPI |
966 			       BGMAC_CMDCFG_TAI |
967 			       BGMAC_CMDCFG_HD |
968 			       BGMAC_CMDCFG_ML |
969 			       BGMAC_CMDCFG_CFE |
970 			       BGMAC_CMDCFG_RL |
971 			       BGMAC_CMDCFG_RED |
972 			       BGMAC_CMDCFG_PE |
973 			       BGMAC_CMDCFG_TPI |
974 			       BGMAC_CMDCFG_PAD_EN |
975 			       BGMAC_CMDCFG_PF),
976 			     BGMAC_CMDCFG_PROM |
977 			     BGMAC_CMDCFG_NLC |
978 			     BGMAC_CMDCFG_CFE |
979 			     BGMAC_CMDCFG_SR,
980 			     false);
981 
982 	bgmac_clear_mib(bgmac);
983 	if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
984 		bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0,
985 			       BCMA_GMAC_CMN_PC_MTE);
986 	else
987 		bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
988 	bgmac_miiconfig(bgmac);
989 	bgmac_phy_init(bgmac);
990 
991 	bgmac->int_status = 0;
992 }
993 
994 static void bgmac_chip_intrs_on(struct bgmac *bgmac)
995 {
996 	bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
997 }
998 
999 static void bgmac_chip_intrs_off(struct bgmac *bgmac)
1000 {
1001 	bgmac_write(bgmac, BGMAC_INT_MASK, 0);
1002 	bgmac_read(bgmac, BGMAC_INT_MASK);
1003 }
1004 
1005 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
1006 static void bgmac_enable(struct bgmac *bgmac)
1007 {
1008 	struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
1009 	u32 cmdcfg;
1010 	u32 mode;
1011 	u32 rxq_ctl;
1012 	u32 fl_ctl;
1013 	u16 bp_clk;
1014 	u8 mdp;
1015 
1016 	cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
1017 	bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
1018 			     BGMAC_CMDCFG_SR, true);
1019 	udelay(2);
1020 	cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
1021 	bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
1022 
1023 	mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
1024 		BGMAC_DS_MM_SHIFT;
1025 	if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0)
1026 		bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
1027 	if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2)
1028 		bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0,
1029 					    BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
1030 
1031 	switch (ci->id) {
1032 	case BCMA_CHIP_ID_BCM5357:
1033 	case BCMA_CHIP_ID_BCM4749:
1034 	case BCMA_CHIP_ID_BCM53572:
1035 	case BCMA_CHIP_ID_BCM4716:
1036 	case BCMA_CHIP_ID_BCM47162:
1037 		fl_ctl = 0x03cb04cb;
1038 		if (ci->id == BCMA_CHIP_ID_BCM5357 ||
1039 		    ci->id == BCMA_CHIP_ID_BCM4749 ||
1040 		    ci->id == BCMA_CHIP_ID_BCM53572)
1041 			fl_ctl = 0x2300e1;
1042 		bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
1043 		bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
1044 		break;
1045 	}
1046 
1047 	rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
1048 	rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
1049 	bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000;
1050 	mdp = (bp_clk * 128 / 1000) - 3;
1051 	rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
1052 	bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
1053 }
1054 
1055 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
1056 static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
1057 {
1058 	struct bgmac_dma_ring *ring;
1059 	int i;
1060 
1061 	/* 1 interrupt per received frame */
1062 	bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
1063 
1064 	/* Enable 802.3x tx flow control (honor received PAUSE frames) */
1065 	bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
1066 
1067 	bgmac_set_rx_mode(bgmac->net_dev);
1068 
1069 	bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
1070 
1071 	if (bgmac->loopback)
1072 		bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
1073 	else
1074 		bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
1075 
1076 	bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
1077 
1078 	if (!bgmac->autoneg) {
1079 		bgmac_speed(bgmac, bgmac->speed);
1080 		bgmac_phy_force(bgmac);
1081 	} else if (bgmac->speed) { /* if there is anything to adv */
1082 		bgmac_phy_advertise(bgmac);
1083 	}
1084 
1085 	if (full_init) {
1086 		bgmac_dma_init(bgmac);
1087 		if (1) /* FIXME: is there any case we don't want IRQs? */
1088 			bgmac_chip_intrs_on(bgmac);
1089 	} else {
1090 		for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
1091 			ring = &bgmac->rx_ring[i];
1092 			bgmac_dma_rx_enable(bgmac, ring);
1093 		}
1094 	}
1095 
1096 	bgmac_enable(bgmac);
1097 }
1098 
1099 static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
1100 {
1101 	struct bgmac *bgmac = netdev_priv(dev_id);
1102 
1103 	u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
1104 	int_status &= bgmac->int_mask;
1105 
1106 	if (!int_status)
1107 		return IRQ_NONE;
1108 
1109 	/* Ack */
1110 	bgmac_write(bgmac, BGMAC_INT_STATUS, int_status);
1111 
1112 	/* Disable new interrupts until handling existing ones */
1113 	bgmac_chip_intrs_off(bgmac);
1114 
1115 	bgmac->int_status = int_status;
1116 
1117 	napi_schedule(&bgmac->napi);
1118 
1119 	return IRQ_HANDLED;
1120 }
1121 
1122 static int bgmac_poll(struct napi_struct *napi, int weight)
1123 {
1124 	struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
1125 	struct bgmac_dma_ring *ring;
1126 	int handled = 0;
1127 
1128 	if (bgmac->int_status & BGMAC_IS_TX0) {
1129 		ring = &bgmac->tx_ring[0];
1130 		bgmac_dma_tx_free(bgmac, ring);
1131 		bgmac->int_status &= ~BGMAC_IS_TX0;
1132 	}
1133 
1134 	if (bgmac->int_status & BGMAC_IS_RX) {
1135 		ring = &bgmac->rx_ring[0];
1136 		handled += bgmac_dma_rx_read(bgmac, ring, weight);
1137 		bgmac->int_status &= ~BGMAC_IS_RX;
1138 	}
1139 
1140 	if (bgmac->int_status) {
1141 		bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", bgmac->int_status);
1142 		bgmac->int_status = 0;
1143 	}
1144 
1145 	if (handled < weight)
1146 		napi_complete(napi);
1147 
1148 	bgmac_chip_intrs_on(bgmac);
1149 
1150 	return handled;
1151 }
1152 
1153 /**************************************************
1154  * net_device_ops
1155  **************************************************/
1156 
1157 static int bgmac_open(struct net_device *net_dev)
1158 {
1159 	struct bgmac *bgmac = netdev_priv(net_dev);
1160 	int err = 0;
1161 
1162 	bgmac_chip_reset(bgmac);
1163 	/* Specs say about reclaiming rings here, but we do that in DMA init */
1164 	bgmac_chip_init(bgmac, true);
1165 
1166 	err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED,
1167 			  KBUILD_MODNAME, net_dev);
1168 	if (err < 0) {
1169 		bgmac_err(bgmac, "IRQ request error: %d!\n", err);
1170 		goto err_out;
1171 	}
1172 	napi_enable(&bgmac->napi);
1173 
1174 	netif_carrier_on(net_dev);
1175 
1176 err_out:
1177 	return err;
1178 }
1179 
1180 static int bgmac_stop(struct net_device *net_dev)
1181 {
1182 	struct bgmac *bgmac = netdev_priv(net_dev);
1183 
1184 	netif_carrier_off(net_dev);
1185 
1186 	napi_disable(&bgmac->napi);
1187 	bgmac_chip_intrs_off(bgmac);
1188 	free_irq(bgmac->core->irq, net_dev);
1189 
1190 	bgmac_chip_reset(bgmac);
1191 
1192 	return 0;
1193 }
1194 
1195 static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
1196 				    struct net_device *net_dev)
1197 {
1198 	struct bgmac *bgmac = netdev_priv(net_dev);
1199 	struct bgmac_dma_ring *ring;
1200 
1201 	/* No QOS support yet */
1202 	ring = &bgmac->tx_ring[0];
1203 	return bgmac_dma_tx_add(bgmac, ring, skb);
1204 }
1205 
1206 static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
1207 {
1208 	struct bgmac *bgmac = netdev_priv(net_dev);
1209 	int ret;
1210 
1211 	ret = eth_prepare_mac_addr_change(net_dev, addr);
1212 	if (ret < 0)
1213 		return ret;
1214 	bgmac_write_mac_address(bgmac, (u8 *)addr);
1215 	eth_commit_mac_addr_change(net_dev, addr);
1216 	return 0;
1217 }
1218 
1219 static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1220 {
1221 	struct bgmac *bgmac = netdev_priv(net_dev);
1222 	struct mii_ioctl_data *data = if_mii(ifr);
1223 
1224 	switch (cmd) {
1225 	case SIOCGMIIPHY:
1226 		data->phy_id = bgmac->phyaddr;
1227 		/* fallthru */
1228 	case SIOCGMIIREG:
1229 		if (!netif_running(net_dev))
1230 			return -EAGAIN;
1231 		data->val_out = bgmac_phy_read(bgmac, data->phy_id,
1232 					       data->reg_num & 0x1f);
1233 		return 0;
1234 	case SIOCSMIIREG:
1235 		if (!netif_running(net_dev))
1236 			return -EAGAIN;
1237 		bgmac_phy_write(bgmac, data->phy_id, data->reg_num & 0x1f,
1238 				data->val_in);
1239 		return 0;
1240 	default:
1241 		return -EOPNOTSUPP;
1242 	}
1243 }
1244 
1245 static const struct net_device_ops bgmac_netdev_ops = {
1246 	.ndo_open		= bgmac_open,
1247 	.ndo_stop		= bgmac_stop,
1248 	.ndo_start_xmit		= bgmac_start_xmit,
1249 	.ndo_set_rx_mode	= bgmac_set_rx_mode,
1250 	.ndo_set_mac_address	= bgmac_set_mac_address,
1251 	.ndo_validate_addr	= eth_validate_addr,
1252 	.ndo_do_ioctl           = bgmac_ioctl,
1253 };
1254 
1255 /**************************************************
1256  * ethtool_ops
1257  **************************************************/
1258 
1259 static int bgmac_get_settings(struct net_device *net_dev,
1260 			      struct ethtool_cmd *cmd)
1261 {
1262 	struct bgmac *bgmac = netdev_priv(net_dev);
1263 
1264 	cmd->supported = SUPPORTED_10baseT_Half |
1265 			 SUPPORTED_10baseT_Full |
1266 			 SUPPORTED_100baseT_Half |
1267 			 SUPPORTED_100baseT_Full |
1268 			 SUPPORTED_1000baseT_Half |
1269 			 SUPPORTED_1000baseT_Full |
1270 			 SUPPORTED_Autoneg;
1271 
1272 	if (bgmac->autoneg) {
1273 		WARN_ON(cmd->advertising);
1274 		if (bgmac->full_duplex) {
1275 			if (bgmac->speed & BGMAC_SPEED_10)
1276 				cmd->advertising |= ADVERTISED_10baseT_Full;
1277 			if (bgmac->speed & BGMAC_SPEED_100)
1278 				cmd->advertising |= ADVERTISED_100baseT_Full;
1279 			if (bgmac->speed & BGMAC_SPEED_1000)
1280 				cmd->advertising |= ADVERTISED_1000baseT_Full;
1281 		} else {
1282 			if (bgmac->speed & BGMAC_SPEED_10)
1283 				cmd->advertising |= ADVERTISED_10baseT_Half;
1284 			if (bgmac->speed & BGMAC_SPEED_100)
1285 				cmd->advertising |= ADVERTISED_100baseT_Half;
1286 			if (bgmac->speed & BGMAC_SPEED_1000)
1287 				cmd->advertising |= ADVERTISED_1000baseT_Half;
1288 		}
1289 	} else {
1290 		switch (bgmac->speed) {
1291 		case BGMAC_SPEED_10:
1292 			ethtool_cmd_speed_set(cmd, SPEED_10);
1293 			break;
1294 		case BGMAC_SPEED_100:
1295 			ethtool_cmd_speed_set(cmd, SPEED_100);
1296 			break;
1297 		case BGMAC_SPEED_1000:
1298 			ethtool_cmd_speed_set(cmd, SPEED_1000);
1299 			break;
1300 		}
1301 	}
1302 
1303 	cmd->duplex = bgmac->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1304 
1305 	cmd->autoneg = bgmac->autoneg;
1306 
1307 	return 0;
1308 }
1309 
1310 #if 0
1311 static int bgmac_set_settings(struct net_device *net_dev,
1312 			      struct ethtool_cmd *cmd)
1313 {
1314 	struct bgmac *bgmac = netdev_priv(net_dev);
1315 
1316 	return -1;
1317 }
1318 #endif
1319 
1320 static void bgmac_get_drvinfo(struct net_device *net_dev,
1321 			      struct ethtool_drvinfo *info)
1322 {
1323 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1324 	strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info));
1325 }
1326 
1327 static const struct ethtool_ops bgmac_ethtool_ops = {
1328 	.get_settings		= bgmac_get_settings,
1329 	.get_drvinfo		= bgmac_get_drvinfo,
1330 };
1331 
1332 /**************************************************
1333  * MII
1334  **************************************************/
1335 
1336 static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum)
1337 {
1338 	return bgmac_phy_read(bus->priv, mii_id, regnum);
1339 }
1340 
1341 static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
1342 			   u16 value)
1343 {
1344 	return bgmac_phy_write(bus->priv, mii_id, regnum, value);
1345 }
1346 
1347 static int bgmac_mii_register(struct bgmac *bgmac)
1348 {
1349 	struct mii_bus *mii_bus;
1350 	int i, err = 0;
1351 
1352 	mii_bus = mdiobus_alloc();
1353 	if (!mii_bus)
1354 		return -ENOMEM;
1355 
1356 	mii_bus->name = "bgmac mii bus";
1357 	sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num,
1358 		bgmac->core->core_unit);
1359 	mii_bus->priv = bgmac;
1360 	mii_bus->read = bgmac_mii_read;
1361 	mii_bus->write = bgmac_mii_write;
1362 	mii_bus->parent = &bgmac->core->dev;
1363 	mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
1364 
1365 	mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
1366 	if (!mii_bus->irq) {
1367 		err = -ENOMEM;
1368 		goto err_free_bus;
1369 	}
1370 	for (i = 0; i < PHY_MAX_ADDR; i++)
1371 		mii_bus->irq[i] = PHY_POLL;
1372 
1373 	err = mdiobus_register(mii_bus);
1374 	if (err) {
1375 		bgmac_err(bgmac, "Registration of mii bus failed\n");
1376 		goto err_free_irq;
1377 	}
1378 
1379 	bgmac->mii_bus = mii_bus;
1380 
1381 	return err;
1382 
1383 err_free_irq:
1384 	kfree(mii_bus->irq);
1385 err_free_bus:
1386 	mdiobus_free(mii_bus);
1387 	return err;
1388 }
1389 
1390 static void bgmac_mii_unregister(struct bgmac *bgmac)
1391 {
1392 	struct mii_bus *mii_bus = bgmac->mii_bus;
1393 
1394 	mdiobus_unregister(mii_bus);
1395 	kfree(mii_bus->irq);
1396 	mdiobus_free(mii_bus);
1397 }
1398 
1399 /**************************************************
1400  * BCMA bus ops
1401  **************************************************/
1402 
1403 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
1404 static int bgmac_probe(struct bcma_device *core)
1405 {
1406 	struct net_device *net_dev;
1407 	struct bgmac *bgmac;
1408 	struct ssb_sprom *sprom = &core->bus->sprom;
1409 	u8 *mac = core->core_unit ? sprom->et1mac : sprom->et0mac;
1410 	int err;
1411 
1412 	/* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */
1413 	if (core->core_unit > 1) {
1414 		pr_err("Unsupported core_unit %d\n", core->core_unit);
1415 		return -ENOTSUPP;
1416 	}
1417 
1418 	if (!is_valid_ether_addr(mac)) {
1419 		dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac);
1420 		eth_random_addr(mac);
1421 		dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
1422 	}
1423 
1424 	/* Allocation and references */
1425 	net_dev = alloc_etherdev(sizeof(*bgmac));
1426 	if (!net_dev)
1427 		return -ENOMEM;
1428 	net_dev->netdev_ops = &bgmac_netdev_ops;
1429 	net_dev->irq = core->irq;
1430 	SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops);
1431 	bgmac = netdev_priv(net_dev);
1432 	bgmac->net_dev = net_dev;
1433 	bgmac->core = core;
1434 	bcma_set_drvdata(core, bgmac);
1435 
1436 	/* Defaults */
1437 	bgmac->autoneg = true;
1438 	bgmac->full_duplex = true;
1439 	bgmac->speed = BGMAC_SPEED_10 | BGMAC_SPEED_100 | BGMAC_SPEED_1000;
1440 	memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
1441 
1442 	/* On BCM4706 we need common core to access PHY */
1443 	if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
1444 	    !core->bus->drv_gmac_cmn.core) {
1445 		bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n");
1446 		err = -ENODEV;
1447 		goto err_netdev_free;
1448 	}
1449 	bgmac->cmn = core->bus->drv_gmac_cmn.core;
1450 
1451 	bgmac->phyaddr = core->core_unit ? sprom->et1phyaddr :
1452 			 sprom->et0phyaddr;
1453 	bgmac->phyaddr &= BGMAC_PHY_MASK;
1454 	if (bgmac->phyaddr == BGMAC_PHY_MASK) {
1455 		bgmac_err(bgmac, "No PHY found\n");
1456 		err = -ENODEV;
1457 		goto err_netdev_free;
1458 	}
1459 	bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr,
1460 		   bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
1461 
1462 	if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
1463 		bgmac_err(bgmac, "PCI setup not implemented\n");
1464 		err = -ENOTSUPP;
1465 		goto err_netdev_free;
1466 	}
1467 
1468 	bgmac_chip_reset(bgmac);
1469 
1470 	err = bgmac_dma_alloc(bgmac);
1471 	if (err) {
1472 		bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
1473 		goto err_netdev_free;
1474 	}
1475 
1476 	bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
1477 	if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
1478 		bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
1479 
1480 	/* TODO: reset the external phy. Specs are needed */
1481 	bgmac_phy_reset(bgmac);
1482 
1483 	bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
1484 			       BGMAC_BFL_ENETROBO);
1485 	if (bgmac->has_robosw)
1486 		bgmac_warn(bgmac, "Support for Roboswitch not implemented\n");
1487 
1488 	if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
1489 		bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
1490 
1491 	err = bgmac_mii_register(bgmac);
1492 	if (err) {
1493 		bgmac_err(bgmac, "Cannot register MDIO\n");
1494 		err = -ENOTSUPP;
1495 		goto err_dma_free;
1496 	}
1497 
1498 	err = register_netdev(bgmac->net_dev);
1499 	if (err) {
1500 		bgmac_err(bgmac, "Cannot register net device\n");
1501 		err = -ENOTSUPP;
1502 		goto err_mii_unregister;
1503 	}
1504 
1505 	netif_carrier_off(net_dev);
1506 
1507 	netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
1508 
1509 	return 0;
1510 
1511 err_mii_unregister:
1512 	bgmac_mii_unregister(bgmac);
1513 err_dma_free:
1514 	bgmac_dma_free(bgmac);
1515 
1516 err_netdev_free:
1517 	bcma_set_drvdata(core, NULL);
1518 	free_netdev(net_dev);
1519 
1520 	return err;
1521 }
1522 
1523 static void bgmac_remove(struct bcma_device *core)
1524 {
1525 	struct bgmac *bgmac = bcma_get_drvdata(core);
1526 
1527 	netif_napi_del(&bgmac->napi);
1528 	unregister_netdev(bgmac->net_dev);
1529 	bgmac_mii_unregister(bgmac);
1530 	bgmac_dma_free(bgmac);
1531 	bcma_set_drvdata(core, NULL);
1532 	free_netdev(bgmac->net_dev);
1533 }
1534 
1535 static struct bcma_driver bgmac_bcma_driver = {
1536 	.name		= KBUILD_MODNAME,
1537 	.id_table	= bgmac_bcma_tbl,
1538 	.probe		= bgmac_probe,
1539 	.remove		= bgmac_remove,
1540 };
1541 
1542 static int __init bgmac_init(void)
1543 {
1544 	int err;
1545 
1546 	err = bcma_driver_register(&bgmac_bcma_driver);
1547 	if (err)
1548 		return err;
1549 	pr_info("Broadcom 47xx GBit MAC driver loaded\n");
1550 
1551 	return 0;
1552 }
1553 
1554 static void __exit bgmac_exit(void)
1555 {
1556 	bcma_driver_unregister(&bgmac_bcma_driver);
1557 }
1558 
1559 module_init(bgmac_init)
1560 module_exit(bgmac_exit)
1561 
1562 MODULE_AUTHOR("Rafał Miłecki");
1563 MODULE_LICENSE("GPL");
1564