1 /*
2  * PXA168 ethernet driver.
3  * Most of the code is derived from mv643xx ethernet driver.
4  *
5  * Copyright (C) 2010 Marvell International Ltd.
6  *		Sachin Sanap <ssanap@marvell.com>
7  *		Zhangfei Gao <zgao6@marvell.com>
8  *		Philip Rakity <prakity@marvell.com>
9  *		Mark Brown <markb@marvell.com>
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; either version 2
14  * of the License, or (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, see <http://www.gnu.org/licenses/>.
23  */
24 
25 #include <linux/bitops.h>
26 #include <linux/clk.h>
27 #include <linux/delay.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/etherdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/in.h>
32 #include <linux/interrupt.h>
33 #include <linux/io.h>
34 #include <linux/ip.h>
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/of.h>
38 #include <linux/of_net.h>
39 #include <linux/phy.h>
40 #include <linux/platform_device.h>
41 #include <linux/pxa168_eth.h>
42 #include <linux/tcp.h>
43 #include <linux/types.h>
44 #include <linux/udp.h>
45 #include <linux/workqueue.h>
46 
47 #include <asm/pgtable.h>
48 #include <asm/cacheflush.h>
49 
50 #define DRIVER_NAME	"pxa168-eth"
51 #define DRIVER_VERSION	"0.3"
52 
53 /*
54  * Registers
55  */
56 
57 #define PHY_ADDRESS		0x0000
58 #define SMI			0x0010
59 #define PORT_CONFIG		0x0400
60 #define PORT_CONFIG_EXT		0x0408
61 #define PORT_COMMAND		0x0410
62 #define PORT_STATUS		0x0418
63 #define HTPR			0x0428
64 #define MAC_ADDR_LOW		0x0430
65 #define MAC_ADDR_HIGH		0x0438
66 #define SDMA_CONFIG		0x0440
67 #define SDMA_CMD		0x0448
68 #define INT_CAUSE		0x0450
69 #define INT_W_CLEAR		0x0454
70 #define INT_MASK		0x0458
71 #define ETH_F_RX_DESC_0		0x0480
72 #define ETH_C_RX_DESC_0		0x04A0
73 #define ETH_C_TX_DESC_1		0x04E4
74 
75 /* smi register */
76 #define SMI_BUSY		(1 << 28)	/* 0 - Write, 1 - Read  */
77 #define SMI_R_VALID		(1 << 27)	/* 0 - Write, 1 - Read  */
78 #define SMI_OP_W		(0 << 26)	/* Write operation      */
79 #define SMI_OP_R		(1 << 26)	/* Read operation */
80 
81 #define PHY_WAIT_ITERATIONS	10
82 
83 #define PXA168_ETH_PHY_ADDR_DEFAULT	0
84 /* RX & TX descriptor command */
85 #define BUF_OWNED_BY_DMA	(1 << 31)
86 
87 /* RX descriptor status */
88 #define RX_EN_INT		(1 << 23)
89 #define RX_FIRST_DESC		(1 << 17)
90 #define RX_LAST_DESC		(1 << 16)
91 #define RX_ERROR		(1 << 15)
92 
93 /* TX descriptor command */
94 #define TX_EN_INT		(1 << 23)
95 #define TX_GEN_CRC		(1 << 22)
96 #define TX_ZERO_PADDING		(1 << 18)
97 #define TX_FIRST_DESC		(1 << 17)
98 #define TX_LAST_DESC		(1 << 16)
99 #define TX_ERROR		(1 << 15)
100 
101 /* SDMA_CMD */
102 #define SDMA_CMD_AT		(1 << 31)
103 #define SDMA_CMD_TXDL		(1 << 24)
104 #define SDMA_CMD_TXDH		(1 << 23)
105 #define SDMA_CMD_AR		(1 << 15)
106 #define SDMA_CMD_ERD		(1 << 7)
107 
108 /* Bit definitions of the Port Config Reg */
109 #define PCR_DUPLEX_FULL		(1 << 15)
110 #define PCR_HS			(1 << 12)
111 #define PCR_EN			(1 << 7)
112 #define PCR_PM			(1 << 0)
113 
114 /* Bit definitions of the Port Config Extend Reg */
115 #define PCXR_2BSM		(1 << 28)
116 #define PCXR_DSCP_EN		(1 << 21)
117 #define PCXR_RMII_EN		(1 << 20)
118 #define PCXR_AN_SPEED_DIS	(1 << 19)
119 #define PCXR_SPEED_100		(1 << 18)
120 #define PCXR_MFL_1518		(0 << 14)
121 #define PCXR_MFL_1536		(1 << 14)
122 #define PCXR_MFL_2048		(2 << 14)
123 #define PCXR_MFL_64K		(3 << 14)
124 #define PCXR_FLOWCTL_DIS	(1 << 12)
125 #define PCXR_FLP		(1 << 11)
126 #define PCXR_AN_FLOWCTL_DIS	(1 << 10)
127 #define PCXR_AN_DUPLEX_DIS	(1 << 9)
128 #define PCXR_PRIO_TX_OFF	3
129 #define PCXR_TX_HIGH_PRI	(7 << PCXR_PRIO_TX_OFF)
130 
131 /* Bit definitions of the SDMA Config Reg */
132 #define SDCR_BSZ_OFF		12
133 #define SDCR_BSZ8		(3 << SDCR_BSZ_OFF)
134 #define SDCR_BSZ4		(2 << SDCR_BSZ_OFF)
135 #define SDCR_BSZ2		(1 << SDCR_BSZ_OFF)
136 #define SDCR_BSZ1		(0 << SDCR_BSZ_OFF)
137 #define SDCR_BLMR		(1 << 6)
138 #define SDCR_BLMT		(1 << 7)
139 #define SDCR_RIFB		(1 << 9)
140 #define SDCR_RC_OFF		2
141 #define SDCR_RC_MAX_RETRANS	(0xf << SDCR_RC_OFF)
142 
143 /*
144  * Bit definitions of the Interrupt Cause Reg
145  * and Interrupt MASK Reg is the same
146  */
147 #define ICR_RXBUF		(1 << 0)
148 #define ICR_TXBUF_H		(1 << 2)
149 #define ICR_TXBUF_L		(1 << 3)
150 #define ICR_TXEND_H		(1 << 6)
151 #define ICR_TXEND_L		(1 << 7)
152 #define ICR_RXERR		(1 << 8)
153 #define ICR_TXERR_H		(1 << 10)
154 #define ICR_TXERR_L		(1 << 11)
155 #define ICR_TX_UDR		(1 << 13)
156 #define ICR_MII_CH		(1 << 28)
157 
158 #define ALL_INTS (ICR_TXBUF_H  | ICR_TXBUF_L  | ICR_TX_UDR |\
159 				ICR_TXERR_H  | ICR_TXERR_L |\
160 				ICR_TXEND_H  | ICR_TXEND_L |\
161 				ICR_RXBUF | ICR_RXERR  | ICR_MII_CH)
162 
163 #define ETH_HW_IP_ALIGN		2	/* hw aligns IP header */
164 
165 #define NUM_RX_DESCS		64
166 #define NUM_TX_DESCS		64
167 
168 #define HASH_ADD		0
169 #define HASH_DELETE		1
170 #define HASH_ADDR_TABLE_SIZE	0x4000	/* 16K (1/2K address - PCR_HS == 1) */
171 #define HOP_NUMBER		12
172 
173 /* Bit definitions for Port status */
174 #define PORT_SPEED_100		(1 << 0)
175 #define FULL_DUPLEX		(1 << 1)
176 #define FLOW_CONTROL_DISABLED	(1 << 2)
177 #define LINK_UP			(1 << 3)
178 
179 /* Bit definitions for work to be done */
180 #define WORK_TX_DONE		(1 << 1)
181 
182 /*
183  * Misc definitions.
184  */
185 #define SKB_DMA_REALIGN		((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
186 
187 struct rx_desc {
188 	u32 cmd_sts;		/* Descriptor command status            */
189 	u16 byte_cnt;		/* Descriptor buffer byte count         */
190 	u16 buf_size;		/* Buffer size                          */
191 	u32 buf_ptr;		/* Descriptor buffer pointer            */
192 	u32 next_desc_ptr;	/* Next descriptor pointer              */
193 };
194 
195 struct tx_desc {
196 	u32 cmd_sts;		/* Command/status field                 */
197 	u16 reserved;
198 	u16 byte_cnt;		/* buffer byte count                    */
199 	u32 buf_ptr;		/* pointer to buffer for this descriptor */
200 	u32 next_desc_ptr;	/* Pointer to next descriptor           */
201 };
202 
203 struct pxa168_eth_private {
204 	struct platform_device *pdev;
205 	int port_num;		/* User Ethernet port number    */
206 	int phy_addr;
207 	int phy_speed;
208 	int phy_duplex;
209 	phy_interface_t phy_intf;
210 
211 	int rx_resource_err;	/* Rx ring resource error flag */
212 
213 	/* Next available and first returning Rx resource */
214 	int rx_curr_desc_q, rx_used_desc_q;
215 
216 	/* Next available and first returning Tx resource */
217 	int tx_curr_desc_q, tx_used_desc_q;
218 
219 	struct rx_desc *p_rx_desc_area;
220 	dma_addr_t rx_desc_dma;
221 	int rx_desc_area_size;
222 	struct sk_buff **rx_skb;
223 
224 	struct tx_desc *p_tx_desc_area;
225 	dma_addr_t tx_desc_dma;
226 	int tx_desc_area_size;
227 	struct sk_buff **tx_skb;
228 
229 	struct work_struct tx_timeout_task;
230 
231 	struct net_device *dev;
232 	struct napi_struct napi;
233 	u8 work_todo;
234 	int skb_size;
235 
236 	/* Size of Tx Ring per queue */
237 	int tx_ring_size;
238 	/* Number of tx descriptors in use */
239 	int tx_desc_count;
240 	/* Size of Rx Ring per queue */
241 	int rx_ring_size;
242 	/* Number of rx descriptors in use */
243 	int rx_desc_count;
244 
245 	/*
246 	 * Used in case RX Ring is empty, which can occur when
247 	 * system does not have resources (skb's)
248 	 */
249 	struct timer_list timeout;
250 	struct mii_bus *smi_bus;
251 
252 	/* clock */
253 	struct clk *clk;
254 	struct pxa168_eth_platform_data *pd;
255 	/*
256 	 * Ethernet controller base address.
257 	 */
258 	void __iomem *base;
259 
260 	/* Pointer to the hardware address filter table */
261 	void *htpr;
262 	dma_addr_t htpr_dma;
263 };
264 
265 struct addr_table_entry {
266 	__le32 lo;
267 	__le32 hi;
268 };
269 
270 /* Bit fields of a Hash Table Entry */
271 enum hash_table_entry {
272 	HASH_ENTRY_VALID = 1,
273 	SKIP = 2,
274 	HASH_ENTRY_RECEIVE_DISCARD = 4,
275 	HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
276 };
277 
278 static int pxa168_init_hw(struct pxa168_eth_private *pep);
279 static int pxa168_init_phy(struct net_device *dev);
280 static void eth_port_reset(struct net_device *dev);
281 static void eth_port_start(struct net_device *dev);
282 static int pxa168_eth_open(struct net_device *dev);
283 static int pxa168_eth_stop(struct net_device *dev);
284 
285 static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
286 {
287 	return readl_relaxed(pep->base + offset);
288 }
289 
290 static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
291 {
292 	writel_relaxed(data, pep->base + offset);
293 }
294 
295 static void abort_dma(struct pxa168_eth_private *pep)
296 {
297 	int delay;
298 	int max_retries = 40;
299 
300 	do {
301 		wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
302 		udelay(100);
303 
304 		delay = 10;
305 		while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
306 		       && delay-- > 0) {
307 			udelay(10);
308 		}
309 	} while (max_retries-- > 0 && delay <= 0);
310 
311 	if (max_retries <= 0)
312 		netdev_err(pep->dev, "%s : DMA Stuck\n", __func__);
313 }
314 
315 static void rxq_refill(struct net_device *dev)
316 {
317 	struct pxa168_eth_private *pep = netdev_priv(dev);
318 	struct sk_buff *skb;
319 	struct rx_desc *p_used_rx_desc;
320 	int used_rx_desc;
321 
322 	while (pep->rx_desc_count < pep->rx_ring_size) {
323 		int size;
324 
325 		skb = netdev_alloc_skb(dev, pep->skb_size);
326 		if (!skb)
327 			break;
328 		if (SKB_DMA_REALIGN)
329 			skb_reserve(skb, SKB_DMA_REALIGN);
330 		pep->rx_desc_count++;
331 		/* Get 'used' Rx descriptor */
332 		used_rx_desc = pep->rx_used_desc_q;
333 		p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
334 		size = skb_end_pointer(skb) - skb->data;
335 		p_used_rx_desc->buf_ptr = dma_map_single(&pep->pdev->dev,
336 							 skb->data,
337 							 size,
338 							 DMA_FROM_DEVICE);
339 		p_used_rx_desc->buf_size = size;
340 		pep->rx_skb[used_rx_desc] = skb;
341 
342 		/* Return the descriptor to DMA ownership */
343 		dma_wmb();
344 		p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
345 		dma_wmb();
346 
347 		/* Move the used descriptor pointer to the next descriptor */
348 		pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
349 
350 		/* Any Rx return cancels the Rx resource error status */
351 		pep->rx_resource_err = 0;
352 
353 		skb_reserve(skb, ETH_HW_IP_ALIGN);
354 	}
355 
356 	/*
357 	 * If RX ring is empty of SKB, set a timer to try allocating
358 	 * again at a later time.
359 	 */
360 	if (pep->rx_desc_count == 0) {
361 		pep->timeout.expires = jiffies + (HZ / 10);
362 		add_timer(&pep->timeout);
363 	}
364 }
365 
366 static inline void rxq_refill_timer_wrapper(struct timer_list *t)
367 {
368 	struct pxa168_eth_private *pep = from_timer(pep, t, timeout);
369 	napi_schedule(&pep->napi);
370 }
371 
372 static inline u8 flip_8_bits(u8 x)
373 {
374 	return (((x) & 0x01) << 3) | (((x) & 0x02) << 1)
375 	    | (((x) & 0x04) >> 1) | (((x) & 0x08) >> 3)
376 	    | (((x) & 0x10) << 3) | (((x) & 0x20) << 1)
377 	    | (((x) & 0x40) >> 1) | (((x) & 0x80) >> 3);
378 }
379 
380 static void nibble_swap_every_byte(unsigned char *mac_addr)
381 {
382 	int i;
383 	for (i = 0; i < ETH_ALEN; i++) {
384 		mac_addr[i] = ((mac_addr[i] & 0x0f) << 4) |
385 				((mac_addr[i] & 0xf0) >> 4);
386 	}
387 }
388 
389 static void inverse_every_nibble(unsigned char *mac_addr)
390 {
391 	int i;
392 	for (i = 0; i < ETH_ALEN; i++)
393 		mac_addr[i] = flip_8_bits(mac_addr[i]);
394 }
395 
396 /*
397  * ----------------------------------------------------------------------------
398  * This function will calculate the hash function of the address.
399  * Inputs
400  * mac_addr_orig    - MAC address.
401  * Outputs
402  * return the calculated entry.
403  */
404 static u32 hash_function(unsigned char *mac_addr_orig)
405 {
406 	u32 hash_result;
407 	u32 addr0;
408 	u32 addr1;
409 	u32 addr2;
410 	u32 addr3;
411 	unsigned char mac_addr[ETH_ALEN];
412 
413 	/* Make a copy of MAC address since we are going to performe bit
414 	 * operations on it
415 	 */
416 	memcpy(mac_addr, mac_addr_orig, ETH_ALEN);
417 
418 	nibble_swap_every_byte(mac_addr);
419 	inverse_every_nibble(mac_addr);
420 
421 	addr0 = (mac_addr[5] >> 2) & 0x3f;
422 	addr1 = (mac_addr[5] & 0x03) | (((mac_addr[4] & 0x7f)) << 2);
423 	addr2 = ((mac_addr[4] & 0x80) >> 7) | mac_addr[3] << 1;
424 	addr3 = (mac_addr[2] & 0xff) | ((mac_addr[1] & 1) << 8);
425 
426 	hash_result = (addr0 << 9) | (addr1 ^ addr2 ^ addr3);
427 	hash_result = hash_result & 0x07ff;
428 	return hash_result;
429 }
430 
431 /*
432  * ----------------------------------------------------------------------------
433  * This function will add/del an entry to the address table.
434  * Inputs
435  * pep - ETHERNET .
436  * mac_addr - MAC address.
437  * skip - if 1, skip this address.Used in case of deleting an entry which is a
438  *	  part of chain in the hash table.We can't just delete the entry since
439  *	  that will break the chain.We need to defragment the tables time to
440  *	  time.
441  * rd   - 0 Discard packet upon match.
442  *	- 1 Receive packet upon match.
443  * Outputs
444  * address table entry is added/deleted.
445  * 0 if success.
446  * -ENOSPC if table full
447  */
448 static int add_del_hash_entry(struct pxa168_eth_private *pep,
449 			      unsigned char *mac_addr,
450 			      u32 rd, u32 skip, int del)
451 {
452 	struct addr_table_entry *entry, *start;
453 	u32 new_high;
454 	u32 new_low;
455 	u32 i;
456 
457 	new_low = (((mac_addr[1] >> 4) & 0xf) << 15)
458 	    | (((mac_addr[1] >> 0) & 0xf) << 11)
459 	    | (((mac_addr[0] >> 4) & 0xf) << 7)
460 	    | (((mac_addr[0] >> 0) & 0xf) << 3)
461 	    | (((mac_addr[3] >> 4) & 0x1) << 31)
462 	    | (((mac_addr[3] >> 0) & 0xf) << 27)
463 	    | (((mac_addr[2] >> 4) & 0xf) << 23)
464 	    | (((mac_addr[2] >> 0) & 0xf) << 19)
465 	    | (skip << SKIP) | (rd << HASH_ENTRY_RECEIVE_DISCARD_BIT)
466 	    | HASH_ENTRY_VALID;
467 
468 	new_high = (((mac_addr[5] >> 4) & 0xf) << 15)
469 	    | (((mac_addr[5] >> 0) & 0xf) << 11)
470 	    | (((mac_addr[4] >> 4) & 0xf) << 7)
471 	    | (((mac_addr[4] >> 0) & 0xf) << 3)
472 	    | (((mac_addr[3] >> 5) & 0x7) << 0);
473 
474 	/*
475 	 * Pick the appropriate table, start scanning for free/reusable
476 	 * entries at the index obtained by hashing the specified MAC address
477 	 */
478 	start = pep->htpr;
479 	entry = start + hash_function(mac_addr);
480 	for (i = 0; i < HOP_NUMBER; i++) {
481 		if (!(le32_to_cpu(entry->lo) & HASH_ENTRY_VALID)) {
482 			break;
483 		} else {
484 			/* if same address put in same position */
485 			if (((le32_to_cpu(entry->lo) & 0xfffffff8) ==
486 				(new_low & 0xfffffff8)) &&
487 				(le32_to_cpu(entry->hi) == new_high)) {
488 				break;
489 			}
490 		}
491 		if (entry == start + 0x7ff)
492 			entry = start;
493 		else
494 			entry++;
495 	}
496 
497 	if (((le32_to_cpu(entry->lo) & 0xfffffff8) != (new_low & 0xfffffff8)) &&
498 	    (le32_to_cpu(entry->hi) != new_high) && del)
499 		return 0;
500 
501 	if (i == HOP_NUMBER) {
502 		if (!del) {
503 			netdev_info(pep->dev,
504 				    "%s: table section is full, need to "
505 				    "move to 16kB implementation?\n",
506 				    __FILE__);
507 			return -ENOSPC;
508 		} else
509 			return 0;
510 	}
511 
512 	/*
513 	 * Update the selected entry
514 	 */
515 	if (del) {
516 		entry->hi = 0;
517 		entry->lo = 0;
518 	} else {
519 		entry->hi = cpu_to_le32(new_high);
520 		entry->lo = cpu_to_le32(new_low);
521 	}
522 
523 	return 0;
524 }
525 
526 /*
527  * ----------------------------------------------------------------------------
528  *  Create an addressTable entry from MAC address info
529  *  found in the specifed net_device struct
530  *
531  *  Input : pointer to ethernet interface network device structure
532  *  Output : N/A
533  */
534 static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
535 					  unsigned char *oaddr,
536 					  unsigned char *addr)
537 {
538 	/* Delete old entry */
539 	if (oaddr)
540 		add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
541 	/* Add new entry */
542 	add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
543 }
544 
545 static int init_hash_table(struct pxa168_eth_private *pep)
546 {
547 	/*
548 	 * Hardware expects CPU to build a hash table based on a predefined
549 	 * hash function and populate it based on hardware address. The
550 	 * location of the hash table is identified by 32-bit pointer stored
551 	 * in HTPR internal register. Two possible sizes exists for the hash
552 	 * table 8kB (256kB of DRAM required (4 x 64 kB banks)) and 1/2kB
553 	 * (16kB of DRAM required (4 x 4 kB banks)).We currently only support
554 	 * 1/2kB.
555 	 */
556 	/* TODO: Add support for 8kB hash table and alternative hash
557 	 * function.Driver can dynamically switch to them if the 1/2kB hash
558 	 * table is full.
559 	 */
560 	if (!pep->htpr) {
561 		pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
562 					       HASH_ADDR_TABLE_SIZE,
563 					       &pep->htpr_dma, GFP_KERNEL);
564 		if (!pep->htpr)
565 			return -ENOMEM;
566 	} else {
567 		memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
568 	}
569 	wrl(pep, HTPR, pep->htpr_dma);
570 	return 0;
571 }
572 
573 static void pxa168_eth_set_rx_mode(struct net_device *dev)
574 {
575 	struct pxa168_eth_private *pep = netdev_priv(dev);
576 	struct netdev_hw_addr *ha;
577 	u32 val;
578 
579 	val = rdl(pep, PORT_CONFIG);
580 	if (dev->flags & IFF_PROMISC)
581 		val |= PCR_PM;
582 	else
583 		val &= ~PCR_PM;
584 	wrl(pep, PORT_CONFIG, val);
585 
586 	/*
587 	 * Remove the old list of MAC address and add dev->addr
588 	 * and multicast address.
589 	 */
590 	memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
591 	update_hash_table_mac_address(pep, NULL, dev->dev_addr);
592 
593 	netdev_for_each_mc_addr(ha, dev)
594 		update_hash_table_mac_address(pep, NULL, ha->addr);
595 }
596 
597 static void pxa168_eth_get_mac_address(struct net_device *dev,
598 				       unsigned char *addr)
599 {
600 	struct pxa168_eth_private *pep = netdev_priv(dev);
601 	unsigned int mac_h = rdl(pep, MAC_ADDR_HIGH);
602 	unsigned int mac_l = rdl(pep, MAC_ADDR_LOW);
603 
604 	addr[0] = (mac_h >> 24) & 0xff;
605 	addr[1] = (mac_h >> 16) & 0xff;
606 	addr[2] = (mac_h >> 8) & 0xff;
607 	addr[3] = mac_h & 0xff;
608 	addr[4] = (mac_l >> 8) & 0xff;
609 	addr[5] = mac_l & 0xff;
610 }
611 
612 static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
613 {
614 	struct sockaddr *sa = addr;
615 	struct pxa168_eth_private *pep = netdev_priv(dev);
616 	unsigned char oldMac[ETH_ALEN];
617 	u32 mac_h, mac_l;
618 
619 	if (!is_valid_ether_addr(sa->sa_data))
620 		return -EADDRNOTAVAIL;
621 	memcpy(oldMac, dev->dev_addr, ETH_ALEN);
622 	memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
623 
624 	mac_h = dev->dev_addr[0] << 24;
625 	mac_h |= dev->dev_addr[1] << 16;
626 	mac_h |= dev->dev_addr[2] << 8;
627 	mac_h |= dev->dev_addr[3];
628 	mac_l = dev->dev_addr[4] << 8;
629 	mac_l |= dev->dev_addr[5];
630 	wrl(pep, MAC_ADDR_HIGH, mac_h);
631 	wrl(pep, MAC_ADDR_LOW, mac_l);
632 
633 	netif_addr_lock_bh(dev);
634 	update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
635 	netif_addr_unlock_bh(dev);
636 	return 0;
637 }
638 
639 static void eth_port_start(struct net_device *dev)
640 {
641 	unsigned int val = 0;
642 	struct pxa168_eth_private *pep = netdev_priv(dev);
643 	int tx_curr_desc, rx_curr_desc;
644 
645 	phy_start(dev->phydev);
646 
647 	/* Assignment of Tx CTRP of given queue */
648 	tx_curr_desc = pep->tx_curr_desc_q;
649 	wrl(pep, ETH_C_TX_DESC_1,
650 	    (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
651 
652 	/* Assignment of Rx CRDP of given queue */
653 	rx_curr_desc = pep->rx_curr_desc_q;
654 	wrl(pep, ETH_C_RX_DESC_0,
655 	    (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
656 
657 	wrl(pep, ETH_F_RX_DESC_0,
658 	    (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
659 
660 	/* Clear all interrupts */
661 	wrl(pep, INT_CAUSE, 0);
662 
663 	/* Enable all interrupts for receive, transmit and error. */
664 	wrl(pep, INT_MASK, ALL_INTS);
665 
666 	val = rdl(pep, PORT_CONFIG);
667 	val |= PCR_EN;
668 	wrl(pep, PORT_CONFIG, val);
669 
670 	/* Start RX DMA engine */
671 	val = rdl(pep, SDMA_CMD);
672 	val |= SDMA_CMD_ERD;
673 	wrl(pep, SDMA_CMD, val);
674 }
675 
676 static void eth_port_reset(struct net_device *dev)
677 {
678 	struct pxa168_eth_private *pep = netdev_priv(dev);
679 	unsigned int val = 0;
680 
681 	/* Stop all interrupts for receive, transmit and error. */
682 	wrl(pep, INT_MASK, 0);
683 
684 	/* Clear all interrupts */
685 	wrl(pep, INT_CAUSE, 0);
686 
687 	/* Stop RX DMA */
688 	val = rdl(pep, SDMA_CMD);
689 	val &= ~SDMA_CMD_ERD;	/* abort dma command */
690 
691 	/* Abort any transmit and receive operations and put DMA
692 	 * in idle state.
693 	 */
694 	abort_dma(pep);
695 
696 	/* Disable port */
697 	val = rdl(pep, PORT_CONFIG);
698 	val &= ~PCR_EN;
699 	wrl(pep, PORT_CONFIG, val);
700 
701 	phy_stop(dev->phydev);
702 }
703 
704 /*
705  * txq_reclaim - Free the tx desc data for completed descriptors
706  * If force is non-zero, frees uncompleted descriptors as well
707  */
708 static int txq_reclaim(struct net_device *dev, int force)
709 {
710 	struct pxa168_eth_private *pep = netdev_priv(dev);
711 	struct tx_desc *desc;
712 	u32 cmd_sts;
713 	struct sk_buff *skb;
714 	int tx_index;
715 	dma_addr_t addr;
716 	int count;
717 	int released = 0;
718 
719 	netif_tx_lock(dev);
720 
721 	pep->work_todo &= ~WORK_TX_DONE;
722 	while (pep->tx_desc_count > 0) {
723 		tx_index = pep->tx_used_desc_q;
724 		desc = &pep->p_tx_desc_area[tx_index];
725 		cmd_sts = desc->cmd_sts;
726 		if (!force && (cmd_sts & BUF_OWNED_BY_DMA)) {
727 			if (released > 0) {
728 				goto txq_reclaim_end;
729 			} else {
730 				released = -1;
731 				goto txq_reclaim_end;
732 			}
733 		}
734 		pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
735 		pep->tx_desc_count--;
736 		addr = desc->buf_ptr;
737 		count = desc->byte_cnt;
738 		skb = pep->tx_skb[tx_index];
739 		if (skb)
740 			pep->tx_skb[tx_index] = NULL;
741 
742 		if (cmd_sts & TX_ERROR) {
743 			if (net_ratelimit())
744 				netdev_err(dev, "Error in TX\n");
745 			dev->stats.tx_errors++;
746 		}
747 		dma_unmap_single(&pep->pdev->dev, addr, count, DMA_TO_DEVICE);
748 		if (skb)
749 			dev_kfree_skb_irq(skb);
750 		released++;
751 	}
752 txq_reclaim_end:
753 	netif_tx_unlock(dev);
754 	return released;
755 }
756 
757 static void pxa168_eth_tx_timeout(struct net_device *dev)
758 {
759 	struct pxa168_eth_private *pep = netdev_priv(dev);
760 
761 	netdev_info(dev, "TX timeout  desc_count %d\n", pep->tx_desc_count);
762 
763 	schedule_work(&pep->tx_timeout_task);
764 }
765 
766 static void pxa168_eth_tx_timeout_task(struct work_struct *work)
767 {
768 	struct pxa168_eth_private *pep = container_of(work,
769 						 struct pxa168_eth_private,
770 						 tx_timeout_task);
771 	struct net_device *dev = pep->dev;
772 	pxa168_eth_stop(dev);
773 	pxa168_eth_open(dev);
774 }
775 
776 static int rxq_process(struct net_device *dev, int budget)
777 {
778 	struct pxa168_eth_private *pep = netdev_priv(dev);
779 	struct net_device_stats *stats = &dev->stats;
780 	unsigned int received_packets = 0;
781 	struct sk_buff *skb;
782 
783 	while (budget-- > 0) {
784 		int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
785 		struct rx_desc *rx_desc;
786 		unsigned int cmd_sts;
787 
788 		/* Do not process Rx ring in case of Rx ring resource error */
789 		if (pep->rx_resource_err)
790 			break;
791 		rx_curr_desc = pep->rx_curr_desc_q;
792 		rx_used_desc = pep->rx_used_desc_q;
793 		rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
794 		cmd_sts = rx_desc->cmd_sts;
795 		dma_rmb();
796 		if (cmd_sts & (BUF_OWNED_BY_DMA))
797 			break;
798 		skb = pep->rx_skb[rx_curr_desc];
799 		pep->rx_skb[rx_curr_desc] = NULL;
800 
801 		rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
802 		pep->rx_curr_desc_q = rx_next_curr_desc;
803 
804 		/* Rx descriptors exhausted. */
805 		/* Set the Rx ring resource error flag */
806 		if (rx_next_curr_desc == rx_used_desc)
807 			pep->rx_resource_err = 1;
808 		pep->rx_desc_count--;
809 		dma_unmap_single(&pep->pdev->dev, rx_desc->buf_ptr,
810 				 rx_desc->buf_size,
811 				 DMA_FROM_DEVICE);
812 		received_packets++;
813 		/*
814 		 * Update statistics.
815 		 * Note byte count includes 4 byte CRC count
816 		 */
817 		stats->rx_packets++;
818 		stats->rx_bytes += rx_desc->byte_cnt;
819 		/*
820 		 * In case received a packet without first / last bits on OR
821 		 * the error summary bit is on, the packets needs to be droped.
822 		 */
823 		if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
824 		     (RX_FIRST_DESC | RX_LAST_DESC))
825 		    || (cmd_sts & RX_ERROR)) {
826 
827 			stats->rx_dropped++;
828 			if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
829 			    (RX_FIRST_DESC | RX_LAST_DESC)) {
830 				if (net_ratelimit())
831 					netdev_err(dev,
832 						   "Rx pkt on multiple desc\n");
833 			}
834 			if (cmd_sts & RX_ERROR)
835 				stats->rx_errors++;
836 			dev_kfree_skb_irq(skb);
837 		} else {
838 			/*
839 			 * The -4 is for the CRC in the trailer of the
840 			 * received packet
841 			 */
842 			skb_put(skb, rx_desc->byte_cnt - 4);
843 			skb->protocol = eth_type_trans(skb, dev);
844 			netif_receive_skb(skb);
845 		}
846 	}
847 	/* Fill RX ring with skb's */
848 	rxq_refill(dev);
849 	return received_packets;
850 }
851 
852 static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
853 				     struct net_device *dev)
854 {
855 	u32 icr;
856 	int ret = 0;
857 
858 	icr = rdl(pep, INT_CAUSE);
859 	if (icr == 0)
860 		return IRQ_NONE;
861 
862 	wrl(pep, INT_CAUSE, ~icr);
863 	if (icr & (ICR_TXBUF_H | ICR_TXBUF_L)) {
864 		pep->work_todo |= WORK_TX_DONE;
865 		ret = 1;
866 	}
867 	if (icr & ICR_RXBUF)
868 		ret = 1;
869 	return ret;
870 }
871 
872 static irqreturn_t pxa168_eth_int_handler(int irq, void *dev_id)
873 {
874 	struct net_device *dev = (struct net_device *)dev_id;
875 	struct pxa168_eth_private *pep = netdev_priv(dev);
876 
877 	if (unlikely(!pxa168_eth_collect_events(pep, dev)))
878 		return IRQ_NONE;
879 	/* Disable interrupts */
880 	wrl(pep, INT_MASK, 0);
881 	napi_schedule(&pep->napi);
882 	return IRQ_HANDLED;
883 }
884 
885 static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
886 {
887 	int skb_size;
888 
889 	/*
890 	 * Reserve 2+14 bytes for an ethernet header (the hardware
891 	 * automatically prepends 2 bytes of dummy data to each
892 	 * received packet), 16 bytes for up to four VLAN tags, and
893 	 * 4 bytes for the trailing FCS -- 36 bytes total.
894 	 */
895 	skb_size = pep->dev->mtu + 36;
896 
897 	/*
898 	 * Make sure that the skb size is a multiple of 8 bytes, as
899 	 * the lower three bits of the receive descriptor's buffer
900 	 * size field are ignored by the hardware.
901 	 */
902 	pep->skb_size = (skb_size + 7) & ~7;
903 
904 	/*
905 	 * If NET_SKB_PAD is smaller than a cache line,
906 	 * netdev_alloc_skb() will cause skb->data to be misaligned
907 	 * to a cache line boundary.  If this is the case, include
908 	 * some extra space to allow re-aligning the data area.
909 	 */
910 	pep->skb_size += SKB_DMA_REALIGN;
911 
912 }
913 
914 static int set_port_config_ext(struct pxa168_eth_private *pep)
915 {
916 	int skb_size;
917 
918 	pxa168_eth_recalc_skb_size(pep);
919 	if  (pep->skb_size <= 1518)
920 		skb_size = PCXR_MFL_1518;
921 	else if (pep->skb_size <= 1536)
922 		skb_size = PCXR_MFL_1536;
923 	else if (pep->skb_size <= 2048)
924 		skb_size = PCXR_MFL_2048;
925 	else
926 		skb_size = PCXR_MFL_64K;
927 
928 	/* Extended Port Configuration */
929 	wrl(pep, PORT_CONFIG_EXT,
930 	    PCXR_AN_SPEED_DIS |		 /* Disable HW AN */
931 	    PCXR_AN_DUPLEX_DIS |
932 	    PCXR_AN_FLOWCTL_DIS |
933 	    PCXR_2BSM |			 /* Two byte prefix aligns IP hdr */
934 	    PCXR_DSCP_EN |		 /* Enable DSCP in IP */
935 	    skb_size | PCXR_FLP |	 /* do not force link pass */
936 	    PCXR_TX_HIGH_PRI);		 /* Transmit - high priority queue */
937 
938 	return 0;
939 }
940 
941 static void pxa168_eth_adjust_link(struct net_device *dev)
942 {
943 	struct pxa168_eth_private *pep = netdev_priv(dev);
944 	struct phy_device *phy = dev->phydev;
945 	u32 cfg, cfg_o = rdl(pep, PORT_CONFIG);
946 	u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT);
947 
948 	cfg = cfg_o & ~PCR_DUPLEX_FULL;
949 	cfgext = cfgext_o & ~(PCXR_SPEED_100 | PCXR_FLOWCTL_DIS | PCXR_RMII_EN);
950 
951 	if (phy->interface == PHY_INTERFACE_MODE_RMII)
952 		cfgext |= PCXR_RMII_EN;
953 	if (phy->speed == SPEED_100)
954 		cfgext |= PCXR_SPEED_100;
955 	if (phy->duplex)
956 		cfg |= PCR_DUPLEX_FULL;
957 	if (!phy->pause)
958 		cfgext |= PCXR_FLOWCTL_DIS;
959 
960 	/* Bail out if there has nothing changed */
961 	if (cfg == cfg_o && cfgext == cfgext_o)
962 		return;
963 
964 	wrl(pep, PORT_CONFIG, cfg);
965 	wrl(pep, PORT_CONFIG_EXT, cfgext);
966 
967 	phy_print_status(phy);
968 }
969 
970 static int pxa168_init_phy(struct net_device *dev)
971 {
972 	struct pxa168_eth_private *pep = netdev_priv(dev);
973 	struct ethtool_link_ksettings cmd;
974 	struct phy_device *phy = NULL;
975 	int err;
976 
977 	if (dev->phydev)
978 		return 0;
979 
980 	phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
981 	if (IS_ERR(phy))
982 		return PTR_ERR(phy);
983 
984 	err = phy_connect_direct(dev, phy, pxa168_eth_adjust_link,
985 				 pep->phy_intf);
986 	if (err)
987 		return err;
988 
989 	cmd.base.phy_address = pep->phy_addr;
990 	cmd.base.speed = pep->phy_speed;
991 	cmd.base.duplex = pep->phy_duplex;
992 	bitmap_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES,
993 		    __ETHTOOL_LINK_MODE_MASK_NBITS);
994 	cmd.base.autoneg = AUTONEG_ENABLE;
995 
996 	if (cmd.base.speed != 0)
997 		cmd.base.autoneg = AUTONEG_DISABLE;
998 
999 	return phy_ethtool_set_link_ksettings(dev, &cmd);
1000 }
1001 
1002 static int pxa168_init_hw(struct pxa168_eth_private *pep)
1003 {
1004 	int err = 0;
1005 
1006 	/* Disable interrupts */
1007 	wrl(pep, INT_MASK, 0);
1008 	wrl(pep, INT_CAUSE, 0);
1009 	/* Write to ICR to clear interrupts. */
1010 	wrl(pep, INT_W_CLEAR, 0);
1011 	/* Abort any transmit and receive operations and put DMA
1012 	 * in idle state.
1013 	 */
1014 	abort_dma(pep);
1015 	/* Initialize address hash table */
1016 	err = init_hash_table(pep);
1017 	if (err)
1018 		return err;
1019 	/* SDMA configuration */
1020 	wrl(pep, SDMA_CONFIG, SDCR_BSZ8 |	/* Burst size = 32 bytes */
1021 	    SDCR_RIFB |				/* Rx interrupt on frame */
1022 	    SDCR_BLMT |				/* Little endian transmit */
1023 	    SDCR_BLMR |				/* Little endian receive */
1024 	    SDCR_RC_MAX_RETRANS);		/* Max retransmit count */
1025 	/* Port Configuration */
1026 	wrl(pep, PORT_CONFIG, PCR_HS);		/* Hash size is 1/2kb */
1027 	set_port_config_ext(pep);
1028 
1029 	return err;
1030 }
1031 
1032 static int rxq_init(struct net_device *dev)
1033 {
1034 	struct pxa168_eth_private *pep = netdev_priv(dev);
1035 	struct rx_desc *p_rx_desc;
1036 	int size = 0, i = 0;
1037 	int rx_desc_num = pep->rx_ring_size;
1038 
1039 	/* Allocate RX skb rings */
1040 	pep->rx_skb = kcalloc(rx_desc_num, sizeof(*pep->rx_skb), GFP_KERNEL);
1041 	if (!pep->rx_skb)
1042 		return -ENOMEM;
1043 
1044 	/* Allocate RX ring */
1045 	pep->rx_desc_count = 0;
1046 	size = pep->rx_ring_size * sizeof(struct rx_desc);
1047 	pep->rx_desc_area_size = size;
1048 	pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1049 						 &pep->rx_desc_dma,
1050 						 GFP_KERNEL);
1051 	if (!pep->p_rx_desc_area)
1052 		goto out;
1053 
1054 	/* initialize the next_desc_ptr links in the Rx descriptors ring */
1055 	p_rx_desc = pep->p_rx_desc_area;
1056 	for (i = 0; i < rx_desc_num; i++) {
1057 		p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
1058 		    ((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
1059 	}
1060 	/* Save Rx desc pointer to driver struct. */
1061 	pep->rx_curr_desc_q = 0;
1062 	pep->rx_used_desc_q = 0;
1063 	pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
1064 	return 0;
1065 out:
1066 	kfree(pep->rx_skb);
1067 	return -ENOMEM;
1068 }
1069 
1070 static void rxq_deinit(struct net_device *dev)
1071 {
1072 	struct pxa168_eth_private *pep = netdev_priv(dev);
1073 	int curr;
1074 
1075 	/* Free preallocated skb's on RX rings */
1076 	for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
1077 		if (pep->rx_skb[curr]) {
1078 			dev_kfree_skb(pep->rx_skb[curr]);
1079 			pep->rx_desc_count--;
1080 		}
1081 	}
1082 	if (pep->rx_desc_count)
1083 		netdev_err(dev, "Error in freeing Rx Ring. %d skb's still\n",
1084 			   pep->rx_desc_count);
1085 	/* Free RX ring */
1086 	if (pep->p_rx_desc_area)
1087 		dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
1088 				  pep->p_rx_desc_area, pep->rx_desc_dma);
1089 	kfree(pep->rx_skb);
1090 }
1091 
1092 static int txq_init(struct net_device *dev)
1093 {
1094 	struct pxa168_eth_private *pep = netdev_priv(dev);
1095 	struct tx_desc *p_tx_desc;
1096 	int size = 0, i = 0;
1097 	int tx_desc_num = pep->tx_ring_size;
1098 
1099 	pep->tx_skb = kcalloc(tx_desc_num, sizeof(*pep->tx_skb), GFP_KERNEL);
1100 	if (!pep->tx_skb)
1101 		return -ENOMEM;
1102 
1103 	/* Allocate TX ring */
1104 	pep->tx_desc_count = 0;
1105 	size = pep->tx_ring_size * sizeof(struct tx_desc);
1106 	pep->tx_desc_area_size = size;
1107 	pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1108 						 &pep->tx_desc_dma,
1109 						 GFP_KERNEL);
1110 	if (!pep->p_tx_desc_area)
1111 		goto out;
1112 	/* Initialize the next_desc_ptr links in the Tx descriptors ring */
1113 	p_tx_desc = pep->p_tx_desc_area;
1114 	for (i = 0; i < tx_desc_num; i++) {
1115 		p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
1116 		    ((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
1117 	}
1118 	pep->tx_curr_desc_q = 0;
1119 	pep->tx_used_desc_q = 0;
1120 	pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
1121 	return 0;
1122 out:
1123 	kfree(pep->tx_skb);
1124 	return -ENOMEM;
1125 }
1126 
1127 static void txq_deinit(struct net_device *dev)
1128 {
1129 	struct pxa168_eth_private *pep = netdev_priv(dev);
1130 
1131 	/* Free outstanding skb's on TX ring */
1132 	txq_reclaim(dev, 1);
1133 	BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
1134 	/* Free TX ring */
1135 	if (pep->p_tx_desc_area)
1136 		dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
1137 				  pep->p_tx_desc_area, pep->tx_desc_dma);
1138 	kfree(pep->tx_skb);
1139 }
1140 
1141 static int pxa168_eth_open(struct net_device *dev)
1142 {
1143 	struct pxa168_eth_private *pep = netdev_priv(dev);
1144 	int err;
1145 
1146 	err = pxa168_init_phy(dev);
1147 	if (err)
1148 		return err;
1149 
1150 	err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev);
1151 	if (err) {
1152 		dev_err(&dev->dev, "can't assign irq\n");
1153 		return -EAGAIN;
1154 	}
1155 	pep->rx_resource_err = 0;
1156 	err = rxq_init(dev);
1157 	if (err != 0)
1158 		goto out_free_irq;
1159 	err = txq_init(dev);
1160 	if (err != 0)
1161 		goto out_free_rx_skb;
1162 	pep->rx_used_desc_q = 0;
1163 	pep->rx_curr_desc_q = 0;
1164 
1165 	/* Fill RX ring with skb's */
1166 	rxq_refill(dev);
1167 	pep->rx_used_desc_q = 0;
1168 	pep->rx_curr_desc_q = 0;
1169 	netif_carrier_off(dev);
1170 	napi_enable(&pep->napi);
1171 	eth_port_start(dev);
1172 	return 0;
1173 out_free_rx_skb:
1174 	rxq_deinit(dev);
1175 out_free_irq:
1176 	free_irq(dev->irq, dev);
1177 	return err;
1178 }
1179 
1180 static int pxa168_eth_stop(struct net_device *dev)
1181 {
1182 	struct pxa168_eth_private *pep = netdev_priv(dev);
1183 	eth_port_reset(dev);
1184 
1185 	/* Disable interrupts */
1186 	wrl(pep, INT_MASK, 0);
1187 	wrl(pep, INT_CAUSE, 0);
1188 	/* Write to ICR to clear interrupts. */
1189 	wrl(pep, INT_W_CLEAR, 0);
1190 	napi_disable(&pep->napi);
1191 	del_timer_sync(&pep->timeout);
1192 	netif_carrier_off(dev);
1193 	free_irq(dev->irq, dev);
1194 	rxq_deinit(dev);
1195 	txq_deinit(dev);
1196 
1197 	return 0;
1198 }
1199 
1200 static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
1201 {
1202 	int retval;
1203 	struct pxa168_eth_private *pep = netdev_priv(dev);
1204 
1205 	dev->mtu = mtu;
1206 	retval = set_port_config_ext(pep);
1207 
1208 	if (!netif_running(dev))
1209 		return 0;
1210 
1211 	/*
1212 	 * Stop and then re-open the interface. This will allocate RX
1213 	 * skbs of the new MTU.
1214 	 * There is a possible danger that the open will not succeed,
1215 	 * due to memory being full.
1216 	 */
1217 	pxa168_eth_stop(dev);
1218 	if (pxa168_eth_open(dev)) {
1219 		dev_err(&dev->dev,
1220 			"fatal error on re-opening device after MTU change\n");
1221 	}
1222 
1223 	return 0;
1224 }
1225 
1226 static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
1227 {
1228 	int tx_desc_curr;
1229 
1230 	tx_desc_curr = pep->tx_curr_desc_q;
1231 	pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
1232 	BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
1233 	pep->tx_desc_count++;
1234 
1235 	return tx_desc_curr;
1236 }
1237 
1238 static int pxa168_rx_poll(struct napi_struct *napi, int budget)
1239 {
1240 	struct pxa168_eth_private *pep =
1241 	    container_of(napi, struct pxa168_eth_private, napi);
1242 	struct net_device *dev = pep->dev;
1243 	int work_done = 0;
1244 
1245 	/*
1246 	 * We call txq_reclaim every time since in NAPI interupts are disabled
1247 	 * and due to this we miss the TX_DONE interrupt,which is not updated in
1248 	 * interrupt status register.
1249 	 */
1250 	txq_reclaim(dev, 0);
1251 	if (netif_queue_stopped(dev)
1252 	    && pep->tx_ring_size - pep->tx_desc_count > 1) {
1253 		netif_wake_queue(dev);
1254 	}
1255 	work_done = rxq_process(dev, budget);
1256 	if (work_done < budget) {
1257 		napi_complete_done(napi, work_done);
1258 		wrl(pep, INT_MASK, ALL_INTS);
1259 	}
1260 
1261 	return work_done;
1262 }
1263 
1264 static netdev_tx_t
1265 pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1266 {
1267 	struct pxa168_eth_private *pep = netdev_priv(dev);
1268 	struct net_device_stats *stats = &dev->stats;
1269 	struct tx_desc *desc;
1270 	int tx_index;
1271 	int length;
1272 
1273 	tx_index = eth_alloc_tx_desc_index(pep);
1274 	desc = &pep->p_tx_desc_area[tx_index];
1275 	length = skb->len;
1276 	pep->tx_skb[tx_index] = skb;
1277 	desc->byte_cnt = length;
1278 	desc->buf_ptr = dma_map_single(&pep->pdev->dev, skb->data, length,
1279 					DMA_TO_DEVICE);
1280 
1281 	skb_tx_timestamp(skb);
1282 
1283 	dma_wmb();
1284 	desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
1285 			TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
1286 	wmb();
1287 	wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
1288 
1289 	stats->tx_bytes += length;
1290 	stats->tx_packets++;
1291 	netif_trans_update(dev);
1292 	if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
1293 		/* We handled the current skb, but now we are out of space.*/
1294 		netif_stop_queue(dev);
1295 	}
1296 
1297 	return NETDEV_TX_OK;
1298 }
1299 
1300 static int smi_wait_ready(struct pxa168_eth_private *pep)
1301 {
1302 	int i = 0;
1303 
1304 	/* wait for the SMI register to become available */
1305 	for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
1306 		if (i == PHY_WAIT_ITERATIONS)
1307 			return -ETIMEDOUT;
1308 		msleep(10);
1309 	}
1310 
1311 	return 0;
1312 }
1313 
1314 static int pxa168_smi_read(struct mii_bus *bus, int phy_addr, int regnum)
1315 {
1316 	struct pxa168_eth_private *pep = bus->priv;
1317 	int i = 0;
1318 	int val;
1319 
1320 	if (smi_wait_ready(pep)) {
1321 		netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
1322 		return -ETIMEDOUT;
1323 	}
1324 	wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
1325 	/* now wait for the data to be valid */
1326 	for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
1327 		if (i == PHY_WAIT_ITERATIONS) {
1328 			netdev_warn(pep->dev,
1329 				    "pxa168_eth: SMI bus read not valid\n");
1330 			return -ENODEV;
1331 		}
1332 		msleep(10);
1333 	}
1334 
1335 	return val & 0xffff;
1336 }
1337 
1338 static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
1339 			    u16 value)
1340 {
1341 	struct pxa168_eth_private *pep = bus->priv;
1342 
1343 	if (smi_wait_ready(pep)) {
1344 		netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
1345 		return -ETIMEDOUT;
1346 	}
1347 
1348 	wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
1349 	    SMI_OP_W | (value & 0xffff));
1350 
1351 	if (smi_wait_ready(pep)) {
1352 		netdev_err(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
1353 		return -ETIMEDOUT;
1354 	}
1355 
1356 	return 0;
1357 }
1358 
1359 static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
1360 			       int cmd)
1361 {
1362 	if (dev->phydev)
1363 		return phy_mii_ioctl(dev->phydev, ifr, cmd);
1364 
1365 	return -EOPNOTSUPP;
1366 }
1367 
1368 #ifdef CONFIG_NET_POLL_CONTROLLER
1369 static void pxa168_eth_netpoll(struct net_device *dev)
1370 {
1371 	disable_irq(dev->irq);
1372 	pxa168_eth_int_handler(dev->irq, dev);
1373 	enable_irq(dev->irq);
1374 }
1375 #endif
1376 
1377 static void pxa168_get_drvinfo(struct net_device *dev,
1378 			       struct ethtool_drvinfo *info)
1379 {
1380 	strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1381 	strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
1382 	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
1383 	strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
1384 }
1385 
1386 static const struct ethtool_ops pxa168_ethtool_ops = {
1387 	.get_drvinfo	= pxa168_get_drvinfo,
1388 	.nway_reset	= phy_ethtool_nway_reset,
1389 	.get_link	= ethtool_op_get_link,
1390 	.get_ts_info	= ethtool_op_get_ts_info,
1391 	.get_link_ksettings = phy_ethtool_get_link_ksettings,
1392 	.set_link_ksettings = phy_ethtool_set_link_ksettings,
1393 };
1394 
1395 static const struct net_device_ops pxa168_eth_netdev_ops = {
1396 	.ndo_open		= pxa168_eth_open,
1397 	.ndo_stop		= pxa168_eth_stop,
1398 	.ndo_start_xmit		= pxa168_eth_start_xmit,
1399 	.ndo_set_rx_mode	= pxa168_eth_set_rx_mode,
1400 	.ndo_set_mac_address	= pxa168_eth_set_mac_address,
1401 	.ndo_validate_addr	= eth_validate_addr,
1402 	.ndo_do_ioctl		= pxa168_eth_do_ioctl,
1403 	.ndo_change_mtu		= pxa168_eth_change_mtu,
1404 	.ndo_tx_timeout		= pxa168_eth_tx_timeout,
1405 #ifdef CONFIG_NET_POLL_CONTROLLER
1406 	.ndo_poll_controller    = pxa168_eth_netpoll,
1407 #endif
1408 };
1409 
1410 static int pxa168_eth_probe(struct platform_device *pdev)
1411 {
1412 	struct pxa168_eth_private *pep = NULL;
1413 	struct net_device *dev = NULL;
1414 	struct resource *res;
1415 	struct clk *clk;
1416 	struct device_node *np;
1417 	const unsigned char *mac_addr = NULL;
1418 	int err;
1419 
1420 	printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n");
1421 
1422 	clk = devm_clk_get(&pdev->dev, NULL);
1423 	if (IS_ERR(clk)) {
1424 		dev_err(&pdev->dev, "Fast Ethernet failed to get clock\n");
1425 		return -ENODEV;
1426 	}
1427 	clk_prepare_enable(clk);
1428 
1429 	dev = alloc_etherdev(sizeof(struct pxa168_eth_private));
1430 	if (!dev) {
1431 		err = -ENOMEM;
1432 		goto err_clk;
1433 	}
1434 
1435 	platform_set_drvdata(pdev, dev);
1436 	pep = netdev_priv(dev);
1437 	pep->dev = dev;
1438 	pep->clk = clk;
1439 
1440 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1441 	pep->base = devm_ioremap_resource(&pdev->dev, res);
1442 	if (IS_ERR(pep->base)) {
1443 		err = -ENOMEM;
1444 		goto err_netdev;
1445 	}
1446 
1447 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1448 	BUG_ON(!res);
1449 	dev->irq = res->start;
1450 	dev->netdev_ops = &pxa168_eth_netdev_ops;
1451 	dev->watchdog_timeo = 2 * HZ;
1452 	dev->base_addr = 0;
1453 	dev->ethtool_ops = &pxa168_ethtool_ops;
1454 
1455 	/* MTU range: 68 - 9500 */
1456 	dev->min_mtu = ETH_MIN_MTU;
1457 	dev->max_mtu = 9500;
1458 
1459 	INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
1460 
1461 	if (pdev->dev.of_node)
1462 		mac_addr = of_get_mac_address(pdev->dev.of_node);
1463 
1464 	if (!IS_ERR_OR_NULL(mac_addr)) {
1465 		ether_addr_copy(dev->dev_addr, mac_addr);
1466 	} else {
1467 		/* try reading the mac address, if set by the bootloader */
1468 		pxa168_eth_get_mac_address(dev, dev->dev_addr);
1469 		if (!is_valid_ether_addr(dev->dev_addr)) {
1470 			dev_info(&pdev->dev, "Using random mac address\n");
1471 			eth_hw_addr_random(dev);
1472 		}
1473 	}
1474 
1475 	pep->rx_ring_size = NUM_RX_DESCS;
1476 	pep->tx_ring_size = NUM_TX_DESCS;
1477 
1478 	pep->pd = dev_get_platdata(&pdev->dev);
1479 	if (pep->pd) {
1480 		if (pep->pd->rx_queue_size)
1481 			pep->rx_ring_size = pep->pd->rx_queue_size;
1482 
1483 		if (pep->pd->tx_queue_size)
1484 			pep->tx_ring_size = pep->pd->tx_queue_size;
1485 
1486 		pep->port_num = pep->pd->port_number;
1487 		pep->phy_addr = pep->pd->phy_addr;
1488 		pep->phy_speed = pep->pd->speed;
1489 		pep->phy_duplex = pep->pd->duplex;
1490 		pep->phy_intf = pep->pd->intf;
1491 
1492 		if (pep->pd->init)
1493 			pep->pd->init();
1494 	} else if (pdev->dev.of_node) {
1495 		of_property_read_u32(pdev->dev.of_node, "port-id",
1496 				     &pep->port_num);
1497 
1498 		np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1499 		if (!np) {
1500 			dev_err(&pdev->dev, "missing phy-handle\n");
1501 			err = -EINVAL;
1502 			goto err_netdev;
1503 		}
1504 		of_property_read_u32(np, "reg", &pep->phy_addr);
1505 		pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
1506 		of_node_put(np);
1507 	}
1508 
1509 	/* Hardware supports only 3 ports */
1510 	BUG_ON(pep->port_num > 2);
1511 	netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
1512 
1513 	memset(&pep->timeout, 0, sizeof(struct timer_list));
1514 	timer_setup(&pep->timeout, rxq_refill_timer_wrapper, 0);
1515 
1516 	pep->smi_bus = mdiobus_alloc();
1517 	if (!pep->smi_bus) {
1518 		err = -ENOMEM;
1519 		goto err_netdev;
1520 	}
1521 	pep->smi_bus->priv = pep;
1522 	pep->smi_bus->name = "pxa168_eth smi";
1523 	pep->smi_bus->read = pxa168_smi_read;
1524 	pep->smi_bus->write = pxa168_smi_write;
1525 	snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
1526 		pdev->name, pdev->id);
1527 	pep->smi_bus->parent = &pdev->dev;
1528 	pep->smi_bus->phy_mask = 0xffffffff;
1529 	err = mdiobus_register(pep->smi_bus);
1530 	if (err)
1531 		goto err_free_mdio;
1532 
1533 	pep->pdev = pdev;
1534 	SET_NETDEV_DEV(dev, &pdev->dev);
1535 	pxa168_init_hw(pep);
1536 	err = register_netdev(dev);
1537 	if (err)
1538 		goto err_mdiobus;
1539 	return 0;
1540 
1541 err_mdiobus:
1542 	mdiobus_unregister(pep->smi_bus);
1543 err_free_mdio:
1544 	mdiobus_free(pep->smi_bus);
1545 err_netdev:
1546 	free_netdev(dev);
1547 err_clk:
1548 	clk_disable_unprepare(clk);
1549 	return err;
1550 }
1551 
1552 static int pxa168_eth_remove(struct platform_device *pdev)
1553 {
1554 	struct net_device *dev = platform_get_drvdata(pdev);
1555 	struct pxa168_eth_private *pep = netdev_priv(dev);
1556 
1557 	if (pep->htpr) {
1558 		dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
1559 				  pep->htpr, pep->htpr_dma);
1560 		pep->htpr = NULL;
1561 	}
1562 	if (dev->phydev)
1563 		phy_disconnect(dev->phydev);
1564 	if (pep->clk) {
1565 		clk_disable_unprepare(pep->clk);
1566 	}
1567 
1568 	mdiobus_unregister(pep->smi_bus);
1569 	mdiobus_free(pep->smi_bus);
1570 	unregister_netdev(dev);
1571 	cancel_work_sync(&pep->tx_timeout_task);
1572 	free_netdev(dev);
1573 	return 0;
1574 }
1575 
1576 static void pxa168_eth_shutdown(struct platform_device *pdev)
1577 {
1578 	struct net_device *dev = platform_get_drvdata(pdev);
1579 	eth_port_reset(dev);
1580 }
1581 
1582 #ifdef CONFIG_PM
1583 static int pxa168_eth_resume(struct platform_device *pdev)
1584 {
1585 	return -ENOSYS;
1586 }
1587 
1588 static int pxa168_eth_suspend(struct platform_device *pdev, pm_message_t state)
1589 {
1590 	return -ENOSYS;
1591 }
1592 
1593 #else
1594 #define pxa168_eth_resume NULL
1595 #define pxa168_eth_suspend NULL
1596 #endif
1597 
1598 static const struct of_device_id pxa168_eth_of_match[] = {
1599 	{ .compatible = "marvell,pxa168-eth" },
1600 	{ },
1601 };
1602 MODULE_DEVICE_TABLE(of, pxa168_eth_of_match);
1603 
1604 static struct platform_driver pxa168_eth_driver = {
1605 	.probe = pxa168_eth_probe,
1606 	.remove = pxa168_eth_remove,
1607 	.shutdown = pxa168_eth_shutdown,
1608 	.resume = pxa168_eth_resume,
1609 	.suspend = pxa168_eth_suspend,
1610 	.driver = {
1611 		.name		= DRIVER_NAME,
1612 		.of_match_table	= of_match_ptr(pxa168_eth_of_match),
1613 	},
1614 };
1615 
1616 module_platform_driver(pxa168_eth_driver);
1617 
1618 MODULE_LICENSE("GPL");
1619 MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
1620 MODULE_ALIAS("platform:pxa168_eth");
1621