1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * ks8842.c timberdale KS8842 ethernet driver
4  * Copyright (c) 2009 Intel Corporation
5  */
6 
7 /* Supports:
8  * The Micrel KS8842 behind the timberdale FPGA
9  * The genuine Micrel KS8841/42 device with ISA 16/32bit bus interface
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/platform_device.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
20 #include <linux/ethtool.h>
21 #include <linux/ks8842.h>
22 #include <linux/dmaengine.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/scatterlist.h>
25 
26 #define DRV_NAME "ks8842"
27 
28 /* Timberdale specific Registers */
29 #define REG_TIMB_RST		0x1c
30 #define REG_TIMB_FIFO		0x20
31 #define REG_TIMB_ISR		0x24
32 #define REG_TIMB_IER		0x28
33 #define REG_TIMB_IAR		0x2C
34 #define REQ_TIMB_DMA_RESUME	0x30
35 
36 /* KS8842 registers */
37 
38 #define REG_SELECT_BANK 0x0e
39 
40 /* bank 0 registers */
41 #define REG_QRFCR	0x04
42 
43 /* bank 2 registers */
44 #define REG_MARL	0x00
45 #define REG_MARM	0x02
46 #define REG_MARH	0x04
47 
48 /* bank 3 registers */
49 #define REG_GRR		0x06
50 
51 /* bank 16 registers */
52 #define REG_TXCR	0x00
53 #define REG_TXSR	0x02
54 #define REG_RXCR	0x04
55 #define REG_TXMIR	0x08
56 #define REG_RXMIR	0x0A
57 
58 /* bank 17 registers */
59 #define REG_TXQCR	0x00
60 #define REG_RXQCR	0x02
61 #define REG_TXFDPR	0x04
62 #define REG_RXFDPR	0x06
63 #define REG_QMU_DATA_LO 0x08
64 #define REG_QMU_DATA_HI 0x0A
65 
66 /* bank 18 registers */
67 #define REG_IER		0x00
68 #define IRQ_LINK_CHANGE	0x8000
69 #define IRQ_TX		0x4000
70 #define IRQ_RX		0x2000
71 #define IRQ_RX_OVERRUN	0x0800
72 #define IRQ_TX_STOPPED	0x0200
73 #define IRQ_RX_STOPPED	0x0100
74 #define IRQ_RX_ERROR	0x0080
75 #define ENABLED_IRQS	(IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \
76 		IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
77 /* When running via timberdale in DMA mode, the RX interrupt should be
78    enabled in the KS8842, but not in the FPGA IP, since the IP handles
79    RX DMA internally.
80    TX interrupts are not needed it is handled by the FPGA the driver is
81    notified via DMA callbacks.
82 */
83 #define ENABLED_IRQS_DMA_IP	(IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \
84 	IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
85 #define ENABLED_IRQS_DMA	(ENABLED_IRQS_DMA_IP | IRQ_RX)
86 #define REG_ISR		0x02
87 #define REG_RXSR	0x04
88 #define RXSR_VALID	0x8000
89 #define RXSR_BROADCAST	0x80
90 #define RXSR_MULTICAST	0x40
91 #define RXSR_UNICAST	0x20
92 #define RXSR_FRAMETYPE	0x08
93 #define RXSR_TOO_LONG	0x04
94 #define RXSR_RUNT	0x02
95 #define RXSR_CRC_ERROR	0x01
96 #define RXSR_ERROR	(RXSR_TOO_LONG | RXSR_RUNT | RXSR_CRC_ERROR)
97 
98 /* bank 32 registers */
99 #define REG_SW_ID_AND_ENABLE	0x00
100 #define REG_SGCR1		0x02
101 #define REG_SGCR2		0x04
102 #define REG_SGCR3		0x06
103 
104 /* bank 39 registers */
105 #define REG_MACAR1		0x00
106 #define REG_MACAR2		0x02
107 #define REG_MACAR3		0x04
108 
109 /* bank 45 registers */
110 #define REG_P1MBCR		0x00
111 #define REG_P1MBSR		0x02
112 
113 /* bank 46 registers */
114 #define REG_P2MBCR		0x00
115 #define REG_P2MBSR		0x02
116 
117 /* bank 48 registers */
118 #define REG_P1CR2		0x02
119 
120 /* bank 49 registers */
121 #define REG_P1CR4		0x02
122 #define REG_P1SR		0x04
123 
124 /* flags passed by platform_device for configuration */
125 #define	MICREL_KS884X		0x01	/* 0=Timeberdale(FPGA), 1=Micrel */
126 #define	KS884X_16BIT		0x02	/*  1=16bit, 0=32bit */
127 
128 #define DMA_BUFFER_SIZE		2048
129 
130 struct ks8842_tx_dma_ctl {
131 	struct dma_chan *chan;
132 	struct dma_async_tx_descriptor *adesc;
133 	void *buf;
134 	struct scatterlist sg;
135 	int channel;
136 };
137 
138 struct ks8842_rx_dma_ctl {
139 	struct dma_chan *chan;
140 	struct dma_async_tx_descriptor *adesc;
141 	struct sk_buff  *skb;
142 	struct scatterlist sg;
143 	struct tasklet_struct tasklet;
144 	int channel;
145 };
146 
147 #define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \
148 	 ((adapter)->dma_rx.channel != -1))
149 
150 struct ks8842_adapter {
151 	void __iomem	*hw_addr;
152 	int		irq;
153 	unsigned long	conf_flags;	/* copy of platform_device config */
154 	struct tasklet_struct	tasklet;
155 	spinlock_t	lock; /* spinlock to be interrupt safe */
156 	struct work_struct timeout_work;
157 	struct net_device *netdev;
158 	struct device *dev;
159 	struct ks8842_tx_dma_ctl	dma_tx;
160 	struct ks8842_rx_dma_ctl	dma_rx;
161 };
162 
163 static void ks8842_dma_rx_cb(void *data);
164 static void ks8842_dma_tx_cb(void *data);
165 
166 static inline void ks8842_resume_dma(struct ks8842_adapter *adapter)
167 {
168 	iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME);
169 }
170 
171 static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank)
172 {
173 	iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK);
174 }
175 
176 static inline void ks8842_write8(struct ks8842_adapter *adapter, u16 bank,
177 	u8 value, int offset)
178 {
179 	ks8842_select_bank(adapter, bank);
180 	iowrite8(value, adapter->hw_addr + offset);
181 }
182 
183 static inline void ks8842_write16(struct ks8842_adapter *adapter, u16 bank,
184 	u16 value, int offset)
185 {
186 	ks8842_select_bank(adapter, bank);
187 	iowrite16(value, adapter->hw_addr + offset);
188 }
189 
190 static inline void ks8842_enable_bits(struct ks8842_adapter *adapter, u16 bank,
191 	u16 bits, int offset)
192 {
193 	u16 reg;
194 	ks8842_select_bank(adapter, bank);
195 	reg = ioread16(adapter->hw_addr + offset);
196 	reg |= bits;
197 	iowrite16(reg, adapter->hw_addr + offset);
198 }
199 
200 static inline void ks8842_clear_bits(struct ks8842_adapter *adapter, u16 bank,
201 	u16 bits, int offset)
202 {
203 	u16 reg;
204 	ks8842_select_bank(adapter, bank);
205 	reg = ioread16(adapter->hw_addr + offset);
206 	reg &= ~bits;
207 	iowrite16(reg, adapter->hw_addr + offset);
208 }
209 
210 static inline void ks8842_write32(struct ks8842_adapter *adapter, u16 bank,
211 	u32 value, int offset)
212 {
213 	ks8842_select_bank(adapter, bank);
214 	iowrite32(value, adapter->hw_addr + offset);
215 }
216 
217 static inline u8 ks8842_read8(struct ks8842_adapter *adapter, u16 bank,
218 	int offset)
219 {
220 	ks8842_select_bank(adapter, bank);
221 	return ioread8(adapter->hw_addr + offset);
222 }
223 
224 static inline u16 ks8842_read16(struct ks8842_adapter *adapter, u16 bank,
225 	int offset)
226 {
227 	ks8842_select_bank(adapter, bank);
228 	return ioread16(adapter->hw_addr + offset);
229 }
230 
231 static inline u32 ks8842_read32(struct ks8842_adapter *adapter, u16 bank,
232 	int offset)
233 {
234 	ks8842_select_bank(adapter, bank);
235 	return ioread32(adapter->hw_addr + offset);
236 }
237 
238 static void ks8842_reset(struct ks8842_adapter *adapter)
239 {
240 	if (adapter->conf_flags & MICREL_KS884X) {
241 		ks8842_write16(adapter, 3, 1, REG_GRR);
242 		msleep(10);
243 		iowrite16(0, adapter->hw_addr + REG_GRR);
244 	} else {
245 		/* The KS8842 goes haywire when doing softare reset
246 		* a work around in the timberdale IP is implemented to
247 		* do a hardware reset instead
248 		ks8842_write16(adapter, 3, 1, REG_GRR);
249 		msleep(10);
250 		iowrite16(0, adapter->hw_addr + REG_GRR);
251 		*/
252 		iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST);
253 		msleep(20);
254 	}
255 }
256 
257 static void ks8842_update_link_status(struct net_device *netdev,
258 	struct ks8842_adapter *adapter)
259 {
260 	/* check the status of the link */
261 	if (ks8842_read16(adapter, 45, REG_P1MBSR) & 0x4) {
262 		netif_carrier_on(netdev);
263 		netif_wake_queue(netdev);
264 	} else {
265 		netif_stop_queue(netdev);
266 		netif_carrier_off(netdev);
267 	}
268 }
269 
270 static void ks8842_enable_tx(struct ks8842_adapter *adapter)
271 {
272 	ks8842_enable_bits(adapter, 16, 0x01, REG_TXCR);
273 }
274 
275 static void ks8842_disable_tx(struct ks8842_adapter *adapter)
276 {
277 	ks8842_clear_bits(adapter, 16, 0x01, REG_TXCR);
278 }
279 
280 static void ks8842_enable_rx(struct ks8842_adapter *adapter)
281 {
282 	ks8842_enable_bits(adapter, 16, 0x01, REG_RXCR);
283 }
284 
285 static void ks8842_disable_rx(struct ks8842_adapter *adapter)
286 {
287 	ks8842_clear_bits(adapter, 16, 0x01, REG_RXCR);
288 }
289 
290 static void ks8842_reset_hw(struct ks8842_adapter *adapter)
291 {
292 	/* reset the HW */
293 	ks8842_reset(adapter);
294 
295 	/* Enable QMU Transmit flow control / transmit padding / Transmit CRC */
296 	ks8842_write16(adapter, 16, 0x000E, REG_TXCR);
297 
298 	/* enable the receiver, uni + multi + broadcast + flow ctrl
299 		+ crc strip */
300 	ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80 | 0x400,
301 		REG_RXCR);
302 
303 	/* TX frame pointer autoincrement */
304 	ks8842_write16(adapter, 17, 0x4000, REG_TXFDPR);
305 
306 	/* RX frame pointer autoincrement */
307 	ks8842_write16(adapter, 17, 0x4000, REG_RXFDPR);
308 
309 	/* RX 2 kb high watermark */
310 	ks8842_write16(adapter, 0, 0x1000, REG_QRFCR);
311 
312 	/* aggressive back off in half duplex */
313 	ks8842_enable_bits(adapter, 32, 1 << 8, REG_SGCR1);
314 
315 	/* enable no excessive collison drop */
316 	ks8842_enable_bits(adapter, 32, 1 << 3, REG_SGCR2);
317 
318 	/* Enable port 1 force flow control / back pressure / transmit / recv */
319 	ks8842_write16(adapter, 48, 0x1E07, REG_P1CR2);
320 
321 	/* restart port auto-negotiation */
322 	ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4);
323 
324 	/* Enable the transmitter */
325 	ks8842_enable_tx(adapter);
326 
327 	/* Enable the receiver */
328 	ks8842_enable_rx(adapter);
329 
330 	/* clear all interrupts */
331 	ks8842_write16(adapter, 18, 0xffff, REG_ISR);
332 
333 	/* enable interrupts */
334 	if (KS8842_USE_DMA(adapter)) {
335 		/* When running in DMA Mode the RX interrupt is not enabled in
336 		   timberdale because RX data is received by DMA callbacks
337 		   it must still be enabled in the KS8842 because it indicates
338 		   to timberdale when there is RX data for it's DMA FIFOs */
339 		iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER);
340 		ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
341 	} else {
342 		if (!(adapter->conf_flags & MICREL_KS884X))
343 			iowrite16(ENABLED_IRQS,
344 				adapter->hw_addr + REG_TIMB_IER);
345 		ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
346 	}
347 	/* enable the switch */
348 	ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
349 }
350 
351 static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
352 {
353 	int i;
354 	u16 mac;
355 
356 	for (i = 0; i < ETH_ALEN; i++)
357 		dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i);
358 
359 	if (adapter->conf_flags & MICREL_KS884X) {
360 		/*
361 		the sequence of saving mac addr between MAC and Switch is
362 		different.
363 		*/
364 
365 		mac = ks8842_read16(adapter, 2, REG_MARL);
366 		ks8842_write16(adapter, 39, mac, REG_MACAR3);
367 		mac = ks8842_read16(adapter, 2, REG_MARM);
368 		ks8842_write16(adapter, 39, mac, REG_MACAR2);
369 		mac = ks8842_read16(adapter, 2, REG_MARH);
370 		ks8842_write16(adapter, 39, mac, REG_MACAR1);
371 	} else {
372 
373 		/* make sure the switch port uses the same MAC as the QMU */
374 		mac = ks8842_read16(adapter, 2, REG_MARL);
375 		ks8842_write16(adapter, 39, mac, REG_MACAR1);
376 		mac = ks8842_read16(adapter, 2, REG_MARM);
377 		ks8842_write16(adapter, 39, mac, REG_MACAR2);
378 		mac = ks8842_read16(adapter, 2, REG_MARH);
379 		ks8842_write16(adapter, 39, mac, REG_MACAR3);
380 	}
381 }
382 
383 static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
384 {
385 	unsigned long flags;
386 	unsigned i;
387 
388 	spin_lock_irqsave(&adapter->lock, flags);
389 	for (i = 0; i < ETH_ALEN; i++) {
390 		ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
391 		if (!(adapter->conf_flags & MICREL_KS884X))
392 			ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
393 				REG_MACAR1 + i);
394 	}
395 
396 	if (adapter->conf_flags & MICREL_KS884X) {
397 		/*
398 		the sequence of saving mac addr between MAC and Switch is
399 		different.
400 		*/
401 
402 		u16 mac;
403 
404 		mac = ks8842_read16(adapter, 2, REG_MARL);
405 		ks8842_write16(adapter, 39, mac, REG_MACAR3);
406 		mac = ks8842_read16(adapter, 2, REG_MARM);
407 		ks8842_write16(adapter, 39, mac, REG_MACAR2);
408 		mac = ks8842_read16(adapter, 2, REG_MARH);
409 		ks8842_write16(adapter, 39, mac, REG_MACAR1);
410 	}
411 	spin_unlock_irqrestore(&adapter->lock, flags);
412 }
413 
414 static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
415 {
416 	return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
417 }
418 
419 static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
420 {
421 	struct ks8842_adapter *adapter = netdev_priv(netdev);
422 	struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
423 	u8 *buf = ctl->buf;
424 
425 	if (ctl->adesc) {
426 		netdev_dbg(netdev, "%s: TX ongoing\n", __func__);
427 		/* transfer ongoing */
428 		return NETDEV_TX_BUSY;
429 	}
430 
431 	sg_dma_len(&ctl->sg) = skb->len + sizeof(u32);
432 
433 	/* copy data to the TX buffer */
434 	/* the control word, enable IRQ, port 1 and the length */
435 	*buf++ = 0x00;
436 	*buf++ = 0x01; /* Port 1 */
437 	*buf++ = skb->len & 0xff;
438 	*buf++ = (skb->len >> 8) & 0xff;
439 	skb_copy_from_linear_data(skb, buf, skb->len);
440 
441 	dma_sync_single_range_for_device(adapter->dev,
442 		sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg),
443 		DMA_TO_DEVICE);
444 
445 	/* make sure the length is a multiple of 4 */
446 	if (sg_dma_len(&ctl->sg) % 4)
447 		sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
448 
449 	ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
450 		&ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
451 	if (!ctl->adesc)
452 		return NETDEV_TX_BUSY;
453 
454 	ctl->adesc->callback_param = netdev;
455 	ctl->adesc->callback = ks8842_dma_tx_cb;
456 	ctl->adesc->tx_submit(ctl->adesc);
457 
458 	netdev->stats.tx_bytes += skb->len;
459 
460 	dev_kfree_skb(skb);
461 
462 	return NETDEV_TX_OK;
463 }
464 
465 static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
466 {
467 	struct ks8842_adapter *adapter = netdev_priv(netdev);
468 	int len = skb->len;
469 
470 	netdev_dbg(netdev, "%s: len %u head %p data %p tail %p end %p\n",
471 		__func__, skb->len, skb->head, skb->data,
472 		skb_tail_pointer(skb), skb_end_pointer(skb));
473 
474 	/* check FIFO buffer space, we need space for CRC and command bits */
475 	if (ks8842_tx_fifo_space(adapter) < len + 8)
476 		return NETDEV_TX_BUSY;
477 
478 	if (adapter->conf_flags & KS884X_16BIT) {
479 		u16 *ptr16 = (u16 *)skb->data;
480 		ks8842_write16(adapter, 17, 0x8000 | 0x100, REG_QMU_DATA_LO);
481 		ks8842_write16(adapter, 17, (u16)len, REG_QMU_DATA_HI);
482 		netdev->stats.tx_bytes += len;
483 
484 		/* copy buffer */
485 		while (len > 0) {
486 			iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_LO);
487 			iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_HI);
488 			len -= sizeof(u32);
489 		}
490 	} else {
491 
492 		u32 *ptr = (u32 *)skb->data;
493 		u32 ctrl;
494 		/* the control word, enable IRQ, port 1 and the length */
495 		ctrl = 0x8000 | 0x100 | (len << 16);
496 		ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO);
497 
498 		netdev->stats.tx_bytes += len;
499 
500 		/* copy buffer */
501 		while (len > 0) {
502 			iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO);
503 			len -= sizeof(u32);
504 			ptr++;
505 		}
506 	}
507 
508 	/* enqueue packet */
509 	ks8842_write16(adapter, 17, 1, REG_TXQCR);
510 
511 	dev_kfree_skb(skb);
512 
513 	return NETDEV_TX_OK;
514 }
515 
516 static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status)
517 {
518 	netdev_dbg(netdev, "RX error, status: %x\n", status);
519 
520 	netdev->stats.rx_errors++;
521 	if (status & RXSR_TOO_LONG)
522 		netdev->stats.rx_length_errors++;
523 	if (status & RXSR_CRC_ERROR)
524 		netdev->stats.rx_crc_errors++;
525 	if (status & RXSR_RUNT)
526 		netdev->stats.rx_frame_errors++;
527 }
528 
529 static void ks8842_update_rx_counters(struct net_device *netdev, u32 status,
530 	int len)
531 {
532 	netdev_dbg(netdev, "RX packet, len: %d\n", len);
533 
534 	netdev->stats.rx_packets++;
535 	netdev->stats.rx_bytes += len;
536 	if (status & RXSR_MULTICAST)
537 		netdev->stats.multicast++;
538 }
539 
540 static int __ks8842_start_new_rx_dma(struct net_device *netdev)
541 {
542 	struct ks8842_adapter *adapter = netdev_priv(netdev);
543 	struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
544 	struct scatterlist *sg = &ctl->sg;
545 	int err;
546 
547 	ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE);
548 	if (ctl->skb) {
549 		sg_init_table(sg, 1);
550 		sg_dma_address(sg) = dma_map_single(adapter->dev,
551 			ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
552 		if (dma_mapping_error(adapter->dev, sg_dma_address(sg))) {
553 			err = -ENOMEM;
554 			sg_dma_address(sg) = 0;
555 			goto out;
556 		}
557 
558 		sg_dma_len(sg) = DMA_BUFFER_SIZE;
559 
560 		ctl->adesc = dmaengine_prep_slave_sg(ctl->chan,
561 			sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
562 
563 		if (!ctl->adesc) {
564 			err = -ENOMEM;
565 			goto out;
566 		}
567 
568 		ctl->adesc->callback_param = netdev;
569 		ctl->adesc->callback = ks8842_dma_rx_cb;
570 		ctl->adesc->tx_submit(ctl->adesc);
571 	} else {
572 		err = -ENOMEM;
573 		sg_dma_address(sg) = 0;
574 		goto out;
575 	}
576 
577 	return 0;
578 out:
579 	if (sg_dma_address(sg))
580 		dma_unmap_single(adapter->dev, sg_dma_address(sg),
581 			DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
582 	sg_dma_address(sg) = 0;
583 	if (ctl->skb)
584 		dev_kfree_skb(ctl->skb);
585 
586 	ctl->skb = NULL;
587 
588 	printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
589 	return err;
590 }
591 
592 static void ks8842_rx_frame_dma_tasklet(unsigned long arg)
593 {
594 	struct net_device *netdev = (struct net_device *)arg;
595 	struct ks8842_adapter *adapter = netdev_priv(netdev);
596 	struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
597 	struct sk_buff *skb = ctl->skb;
598 	dma_addr_t addr = sg_dma_address(&ctl->sg);
599 	u32 status;
600 
601 	ctl->adesc = NULL;
602 
603 	/* kick next transfer going */
604 	__ks8842_start_new_rx_dma(netdev);
605 
606 	/* now handle the data we got */
607 	dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
608 
609 	status = *((u32 *)skb->data);
610 
611 	netdev_dbg(netdev, "%s - rx_data: status: %x\n",
612 		__func__, status & 0xffff);
613 
614 	/* check the status */
615 	if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
616 		int len = (status >> 16) & 0x7ff;
617 
618 		ks8842_update_rx_counters(netdev, status, len);
619 
620 		/* reserve 4 bytes which is the status word */
621 		skb_reserve(skb, 4);
622 		skb_put(skb, len);
623 
624 		skb->protocol = eth_type_trans(skb, netdev);
625 		netif_rx(skb);
626 	} else {
627 		ks8842_update_rx_err_counters(netdev, status);
628 		dev_kfree_skb(skb);
629 	}
630 }
631 
632 static void ks8842_rx_frame(struct net_device *netdev,
633 	struct ks8842_adapter *adapter)
634 {
635 	u32 status;
636 	int len;
637 
638 	if (adapter->conf_flags & KS884X_16BIT) {
639 		status = ks8842_read16(adapter, 17, REG_QMU_DATA_LO);
640 		len = ks8842_read16(adapter, 17, REG_QMU_DATA_HI);
641 		netdev_dbg(netdev, "%s - rx_data: status: %x\n",
642 			   __func__, status);
643 	} else {
644 		status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO);
645 		len = (status >> 16) & 0x7ff;
646 		status &= 0xffff;
647 		netdev_dbg(netdev, "%s - rx_data: status: %x\n",
648 			   __func__, status);
649 	}
650 
651 	/* check the status */
652 	if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
653 		struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len + 3);
654 
655 		if (skb) {
656 
657 			ks8842_update_rx_counters(netdev, status, len);
658 
659 			if (adapter->conf_flags & KS884X_16BIT) {
660 				u16 *data16 = skb_put(skb, len);
661 				ks8842_select_bank(adapter, 17);
662 				while (len > 0) {
663 					*data16++ = ioread16(adapter->hw_addr +
664 						REG_QMU_DATA_LO);
665 					*data16++ = ioread16(adapter->hw_addr +
666 						REG_QMU_DATA_HI);
667 					len -= sizeof(u32);
668 				}
669 			} else {
670 				u32 *data = skb_put(skb, len);
671 
672 				ks8842_select_bank(adapter, 17);
673 				while (len > 0) {
674 					*data++ = ioread32(adapter->hw_addr +
675 						REG_QMU_DATA_LO);
676 					len -= sizeof(u32);
677 				}
678 			}
679 			skb->protocol = eth_type_trans(skb, netdev);
680 			netif_rx(skb);
681 		} else
682 			netdev->stats.rx_dropped++;
683 	} else
684 		ks8842_update_rx_err_counters(netdev, status);
685 
686 	/* set high watermark to 3K */
687 	ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR);
688 
689 	/* release the frame */
690 	ks8842_write16(adapter, 17, 0x01, REG_RXQCR);
691 
692 	/* set high watermark to 2K */
693 	ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR);
694 }
695 
696 static void ks8842_handle_rx(struct net_device *netdev,
697 	struct ks8842_adapter *adapter)
698 {
699 	u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
700 	netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data);
701 	while (rx_data) {
702 		ks8842_rx_frame(netdev, adapter);
703 		rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
704 	}
705 }
706 
707 static void ks8842_handle_tx(struct net_device *netdev,
708 	struct ks8842_adapter *adapter)
709 {
710 	u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
711 	netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr);
712 	netdev->stats.tx_packets++;
713 	if (netif_queue_stopped(netdev))
714 		netif_wake_queue(netdev);
715 }
716 
717 static void ks8842_handle_rx_overrun(struct net_device *netdev,
718 	struct ks8842_adapter *adapter)
719 {
720 	netdev_dbg(netdev, "%s: entry\n", __func__);
721 	netdev->stats.rx_errors++;
722 	netdev->stats.rx_fifo_errors++;
723 }
724 
725 static void ks8842_tasklet(unsigned long arg)
726 {
727 	struct net_device *netdev = (struct net_device *)arg;
728 	struct ks8842_adapter *adapter = netdev_priv(netdev);
729 	u16 isr;
730 	unsigned long flags;
731 	u16 entry_bank;
732 
733 	/* read current bank to be able to set it back */
734 	spin_lock_irqsave(&adapter->lock, flags);
735 	entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
736 	spin_unlock_irqrestore(&adapter->lock, flags);
737 
738 	isr = ks8842_read16(adapter, 18, REG_ISR);
739 	netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
740 
741 	/* when running in DMA mode, do not ack RX interrupts, it is handled
742 	   internally by timberdale, otherwise it's DMA FIFO:s would stop
743 	*/
744 	if (KS8842_USE_DMA(adapter))
745 		isr &= ~IRQ_RX;
746 
747 	/* Ack */
748 	ks8842_write16(adapter, 18, isr, REG_ISR);
749 
750 	if (!(adapter->conf_flags & MICREL_KS884X))
751 		/* Ack in the timberdale IP as well */
752 		iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR);
753 
754 	if (!netif_running(netdev))
755 		return;
756 
757 	if (isr & IRQ_LINK_CHANGE)
758 		ks8842_update_link_status(netdev, adapter);
759 
760 	/* should not get IRQ_RX when running DMA mode */
761 	if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter))
762 		ks8842_handle_rx(netdev, adapter);
763 
764 	/* should only happen when in PIO mode */
765 	if (isr & IRQ_TX)
766 		ks8842_handle_tx(netdev, adapter);
767 
768 	if (isr & IRQ_RX_OVERRUN)
769 		ks8842_handle_rx_overrun(netdev, adapter);
770 
771 	if (isr & IRQ_TX_STOPPED) {
772 		ks8842_disable_tx(adapter);
773 		ks8842_enable_tx(adapter);
774 	}
775 
776 	if (isr & IRQ_RX_STOPPED) {
777 		ks8842_disable_rx(adapter);
778 		ks8842_enable_rx(adapter);
779 	}
780 
781 	/* re-enable interrupts, put back the bank selection register */
782 	spin_lock_irqsave(&adapter->lock, flags);
783 	if (KS8842_USE_DMA(adapter))
784 		ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
785 	else
786 		ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
787 	iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
788 
789 	/* Make sure timberdale continues DMA operations, they are stopped while
790 	   we are handling the ks8842 because we might change bank */
791 	if (KS8842_USE_DMA(adapter))
792 		ks8842_resume_dma(adapter);
793 
794 	spin_unlock_irqrestore(&adapter->lock, flags);
795 }
796 
797 static irqreturn_t ks8842_irq(int irq, void *devid)
798 {
799 	struct net_device *netdev = devid;
800 	struct ks8842_adapter *adapter = netdev_priv(netdev);
801 	u16 isr;
802 	u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
803 	irqreturn_t ret = IRQ_NONE;
804 
805 	isr = ks8842_read16(adapter, 18, REG_ISR);
806 	netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
807 
808 	if (isr) {
809 		if (KS8842_USE_DMA(adapter))
810 			/* disable all but RX IRQ, since the FPGA relies on it*/
811 			ks8842_write16(adapter, 18, IRQ_RX, REG_IER);
812 		else
813 			/* disable IRQ */
814 			ks8842_write16(adapter, 18, 0x00, REG_IER);
815 
816 		/* schedule tasklet */
817 		tasklet_schedule(&adapter->tasklet);
818 
819 		ret = IRQ_HANDLED;
820 	}
821 
822 	iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
823 
824 	/* After an interrupt, tell timberdale to continue DMA operations.
825 	   DMA is disabled while we are handling the ks8842 because we might
826 	   change bank */
827 	ks8842_resume_dma(adapter);
828 
829 	return ret;
830 }
831 
832 static void ks8842_dma_rx_cb(void *data)
833 {
834 	struct net_device	*netdev = data;
835 	struct ks8842_adapter	*adapter = netdev_priv(netdev);
836 
837 	netdev_dbg(netdev, "RX DMA finished\n");
838 	/* schedule tasklet */
839 	if (adapter->dma_rx.adesc)
840 		tasklet_schedule(&adapter->dma_rx.tasklet);
841 }
842 
843 static void ks8842_dma_tx_cb(void *data)
844 {
845 	struct net_device		*netdev = data;
846 	struct ks8842_adapter		*adapter = netdev_priv(netdev);
847 	struct ks8842_tx_dma_ctl	*ctl = &adapter->dma_tx;
848 
849 	netdev_dbg(netdev, "TX DMA finished\n");
850 
851 	if (!ctl->adesc)
852 		return;
853 
854 	netdev->stats.tx_packets++;
855 	ctl->adesc = NULL;
856 
857 	if (netif_queue_stopped(netdev))
858 		netif_wake_queue(netdev);
859 }
860 
861 static void ks8842_stop_dma(struct ks8842_adapter *adapter)
862 {
863 	struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
864 	struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
865 
866 	tx_ctl->adesc = NULL;
867 	if (tx_ctl->chan)
868 		dmaengine_terminate_all(tx_ctl->chan);
869 
870 	rx_ctl->adesc = NULL;
871 	if (rx_ctl->chan)
872 		dmaengine_terminate_all(rx_ctl->chan);
873 
874 	if (sg_dma_address(&rx_ctl->sg))
875 		dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg),
876 			DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
877 	sg_dma_address(&rx_ctl->sg) = 0;
878 
879 	dev_kfree_skb(rx_ctl->skb);
880 	rx_ctl->skb = NULL;
881 }
882 
883 static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter)
884 {
885 	struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
886 	struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
887 
888 	ks8842_stop_dma(adapter);
889 
890 	if (tx_ctl->chan)
891 		dma_release_channel(tx_ctl->chan);
892 	tx_ctl->chan = NULL;
893 
894 	if (rx_ctl->chan)
895 		dma_release_channel(rx_ctl->chan);
896 	rx_ctl->chan = NULL;
897 
898 	tasklet_kill(&rx_ctl->tasklet);
899 
900 	if (sg_dma_address(&tx_ctl->sg))
901 		dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg),
902 			DMA_BUFFER_SIZE, DMA_TO_DEVICE);
903 	sg_dma_address(&tx_ctl->sg) = 0;
904 
905 	kfree(tx_ctl->buf);
906 	tx_ctl->buf = NULL;
907 }
908 
909 static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param)
910 {
911 	return chan->chan_id == (long)filter_param;
912 }
913 
914 static int ks8842_alloc_dma_bufs(struct net_device *netdev)
915 {
916 	struct ks8842_adapter *adapter = netdev_priv(netdev);
917 	struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
918 	struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
919 	int err;
920 
921 	dma_cap_mask_t mask;
922 
923 	dma_cap_zero(mask);
924 	dma_cap_set(DMA_SLAVE, mask);
925 	dma_cap_set(DMA_PRIVATE, mask);
926 
927 	sg_init_table(&tx_ctl->sg, 1);
928 
929 	tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
930 					   (void *)(long)tx_ctl->channel);
931 	if (!tx_ctl->chan) {
932 		err = -ENODEV;
933 		goto err;
934 	}
935 
936 	/* allocate DMA buffer */
937 	tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
938 	if (!tx_ctl->buf) {
939 		err = -ENOMEM;
940 		goto err;
941 	}
942 
943 	sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
944 		tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
945 	if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
946 		err = -ENOMEM;
947 		sg_dma_address(&tx_ctl->sg) = 0;
948 		goto err;
949 	}
950 
951 	rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
952 					   (void *)(long)rx_ctl->channel);
953 	if (!rx_ctl->chan) {
954 		err = -ENODEV;
955 		goto err;
956 	}
957 
958 	tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet,
959 		(unsigned long)netdev);
960 
961 	return 0;
962 err:
963 	ks8842_dealloc_dma_bufs(adapter);
964 	return err;
965 }
966 
967 /* Netdevice operations */
968 
969 static int ks8842_open(struct net_device *netdev)
970 {
971 	struct ks8842_adapter *adapter = netdev_priv(netdev);
972 	int err;
973 
974 	netdev_dbg(netdev, "%s - entry\n", __func__);
975 
976 	if (KS8842_USE_DMA(adapter)) {
977 		err = ks8842_alloc_dma_bufs(netdev);
978 
979 		if (!err) {
980 			/* start RX dma */
981 			err = __ks8842_start_new_rx_dma(netdev);
982 			if (err)
983 				ks8842_dealloc_dma_bufs(adapter);
984 		}
985 
986 		if (err) {
987 			printk(KERN_WARNING DRV_NAME
988 				": Failed to initiate DMA, running PIO\n");
989 			ks8842_dealloc_dma_bufs(adapter);
990 			adapter->dma_rx.channel = -1;
991 			adapter->dma_tx.channel = -1;
992 		}
993 	}
994 
995 	/* reset the HW */
996 	ks8842_reset_hw(adapter);
997 
998 	ks8842_write_mac_addr(adapter, netdev->dev_addr);
999 
1000 	ks8842_update_link_status(netdev, adapter);
1001 
1002 	err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
1003 		netdev);
1004 	if (err) {
1005 		pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err);
1006 		return err;
1007 	}
1008 
1009 	return 0;
1010 }
1011 
1012 static int ks8842_close(struct net_device *netdev)
1013 {
1014 	struct ks8842_adapter *adapter = netdev_priv(netdev);
1015 
1016 	netdev_dbg(netdev, "%s - entry\n", __func__);
1017 
1018 	cancel_work_sync(&adapter->timeout_work);
1019 
1020 	if (KS8842_USE_DMA(adapter))
1021 		ks8842_dealloc_dma_bufs(adapter);
1022 
1023 	/* free the irq */
1024 	free_irq(adapter->irq, netdev);
1025 
1026 	/* disable the switch */
1027 	ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE);
1028 
1029 	return 0;
1030 }
1031 
1032 static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
1033 				     struct net_device *netdev)
1034 {
1035 	int ret;
1036 	struct ks8842_adapter *adapter = netdev_priv(netdev);
1037 
1038 	netdev_dbg(netdev, "%s: entry\n", __func__);
1039 
1040 	if (KS8842_USE_DMA(adapter)) {
1041 		unsigned long flags;
1042 		ret = ks8842_tx_frame_dma(skb, netdev);
1043 		/* for now only allow one transfer at the time */
1044 		spin_lock_irqsave(&adapter->lock, flags);
1045 		if (adapter->dma_tx.adesc)
1046 			netif_stop_queue(netdev);
1047 		spin_unlock_irqrestore(&adapter->lock, flags);
1048 		return ret;
1049 	}
1050 
1051 	ret = ks8842_tx_frame(skb, netdev);
1052 
1053 	if (ks8842_tx_fifo_space(adapter) <  netdev->mtu + 8)
1054 		netif_stop_queue(netdev);
1055 
1056 	return ret;
1057 }
1058 
1059 static int ks8842_set_mac(struct net_device *netdev, void *p)
1060 {
1061 	struct ks8842_adapter *adapter = netdev_priv(netdev);
1062 	struct sockaddr *addr = p;
1063 	char *mac = (u8 *)addr->sa_data;
1064 
1065 	netdev_dbg(netdev, "%s: entry\n", __func__);
1066 
1067 	if (!is_valid_ether_addr(addr->sa_data))
1068 		return -EADDRNOTAVAIL;
1069 
1070 	memcpy(netdev->dev_addr, mac, netdev->addr_len);
1071 
1072 	ks8842_write_mac_addr(adapter, mac);
1073 	return 0;
1074 }
1075 
1076 static void ks8842_tx_timeout_work(struct work_struct *work)
1077 {
1078 	struct ks8842_adapter *adapter =
1079 		container_of(work, struct ks8842_adapter, timeout_work);
1080 	struct net_device *netdev = adapter->netdev;
1081 	unsigned long flags;
1082 
1083 	netdev_dbg(netdev, "%s: entry\n", __func__);
1084 
1085 	spin_lock_irqsave(&adapter->lock, flags);
1086 
1087 	if (KS8842_USE_DMA(adapter))
1088 		ks8842_stop_dma(adapter);
1089 
1090 	/* disable interrupts */
1091 	ks8842_write16(adapter, 18, 0, REG_IER);
1092 	ks8842_write16(adapter, 18, 0xFFFF, REG_ISR);
1093 
1094 	netif_stop_queue(netdev);
1095 
1096 	spin_unlock_irqrestore(&adapter->lock, flags);
1097 
1098 	ks8842_reset_hw(adapter);
1099 
1100 	ks8842_write_mac_addr(adapter, netdev->dev_addr);
1101 
1102 	ks8842_update_link_status(netdev, adapter);
1103 
1104 	if (KS8842_USE_DMA(adapter))
1105 		__ks8842_start_new_rx_dma(netdev);
1106 }
1107 
1108 static void ks8842_tx_timeout(struct net_device *netdev)
1109 {
1110 	struct ks8842_adapter *adapter = netdev_priv(netdev);
1111 
1112 	netdev_dbg(netdev, "%s: entry\n", __func__);
1113 
1114 	schedule_work(&adapter->timeout_work);
1115 }
1116 
1117 static const struct net_device_ops ks8842_netdev_ops = {
1118 	.ndo_open		= ks8842_open,
1119 	.ndo_stop		= ks8842_close,
1120 	.ndo_start_xmit		= ks8842_xmit_frame,
1121 	.ndo_set_mac_address	= ks8842_set_mac,
1122 	.ndo_tx_timeout 	= ks8842_tx_timeout,
1123 	.ndo_validate_addr	= eth_validate_addr
1124 };
1125 
1126 static const struct ethtool_ops ks8842_ethtool_ops = {
1127 	.get_link		= ethtool_op_get_link,
1128 };
1129 
1130 static int ks8842_probe(struct platform_device *pdev)
1131 {
1132 	int err = -ENOMEM;
1133 	struct resource *iomem;
1134 	struct net_device *netdev;
1135 	struct ks8842_adapter *adapter;
1136 	struct ks8842_platform_data *pdata = dev_get_platdata(&pdev->dev);
1137 	u16 id;
1138 	unsigned i;
1139 
1140 	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1141 	if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
1142 		goto err_mem_region;
1143 
1144 	netdev = alloc_etherdev(sizeof(struct ks8842_adapter));
1145 	if (!netdev)
1146 		goto err_alloc_etherdev;
1147 
1148 	SET_NETDEV_DEV(netdev, &pdev->dev);
1149 
1150 	adapter = netdev_priv(netdev);
1151 	adapter->netdev = netdev;
1152 	INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work);
1153 	adapter->hw_addr = ioremap(iomem->start, resource_size(iomem));
1154 	adapter->conf_flags = iomem->flags;
1155 
1156 	if (!adapter->hw_addr)
1157 		goto err_ioremap;
1158 
1159 	adapter->irq = platform_get_irq(pdev, 0);
1160 	if (adapter->irq < 0) {
1161 		err = adapter->irq;
1162 		goto err_get_irq;
1163 	}
1164 
1165 	adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev;
1166 
1167 	/* DMA is only supported when accessed via timberdale */
1168 	if (!(adapter->conf_flags & MICREL_KS884X) && pdata &&
1169 		(pdata->tx_dma_channel != -1) &&
1170 		(pdata->rx_dma_channel != -1)) {
1171 		adapter->dma_rx.channel = pdata->rx_dma_channel;
1172 		adapter->dma_tx.channel = pdata->tx_dma_channel;
1173 	} else {
1174 		adapter->dma_rx.channel = -1;
1175 		adapter->dma_tx.channel = -1;
1176 	}
1177 
1178 	tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
1179 	spin_lock_init(&adapter->lock);
1180 
1181 	netdev->netdev_ops = &ks8842_netdev_ops;
1182 	netdev->ethtool_ops = &ks8842_ethtool_ops;
1183 
1184 	/* Check if a mac address was given */
1185 	i = netdev->addr_len;
1186 	if (pdata) {
1187 		for (i = 0; i < netdev->addr_len; i++)
1188 			if (pdata->macaddr[i] != 0)
1189 				break;
1190 
1191 		if (i < netdev->addr_len)
1192 			/* an address was passed, use it */
1193 			memcpy(netdev->dev_addr, pdata->macaddr,
1194 				netdev->addr_len);
1195 	}
1196 
1197 	if (i == netdev->addr_len) {
1198 		ks8842_read_mac_addr(adapter, netdev->dev_addr);
1199 
1200 		if (!is_valid_ether_addr(netdev->dev_addr))
1201 			eth_hw_addr_random(netdev);
1202 	}
1203 
1204 	id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE);
1205 
1206 	strcpy(netdev->name, "eth%d");
1207 	err = register_netdev(netdev);
1208 	if (err)
1209 		goto err_register;
1210 
1211 	platform_set_drvdata(pdev, netdev);
1212 
1213 	pr_info("Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1214 		(id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1215 
1216 	return 0;
1217 
1218 err_register:
1219 err_get_irq:
1220 	iounmap(adapter->hw_addr);
1221 err_ioremap:
1222 	free_netdev(netdev);
1223 err_alloc_etherdev:
1224 	release_mem_region(iomem->start, resource_size(iomem));
1225 err_mem_region:
1226 	return err;
1227 }
1228 
1229 static int ks8842_remove(struct platform_device *pdev)
1230 {
1231 	struct net_device *netdev = platform_get_drvdata(pdev);
1232 	struct ks8842_adapter *adapter = netdev_priv(netdev);
1233 	struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1234 
1235 	unregister_netdev(netdev);
1236 	tasklet_kill(&adapter->tasklet);
1237 	iounmap(adapter->hw_addr);
1238 	free_netdev(netdev);
1239 	release_mem_region(iomem->start, resource_size(iomem));
1240 	return 0;
1241 }
1242 
1243 
1244 static struct platform_driver ks8842_platform_driver = {
1245 	.driver = {
1246 		.name	= DRV_NAME,
1247 	},
1248 	.probe		= ks8842_probe,
1249 	.remove		= ks8842_remove,
1250 };
1251 
1252 module_platform_driver(ks8842_platform_driver);
1253 
1254 MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver");
1255 MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
1256 MODULE_LICENSE("GPL v2");
1257 MODULE_ALIAS("platform:ks8842");
1258 
1259