1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 1999 - 2010 Intel Corporation.
4  * Copyright (C) 2010 - 2012 LAPIS SEMICONDUCTOR CO., LTD.
5  *
6  * This code was derived from the Intel e1000e Linux driver.
7  */
8 
9 #include "pch_gbe.h"
10 #include "pch_gbe_phy.h"
11 
12 #include <linux/gpio/consumer.h>
13 #include <linux/gpio/machine.h>
14 #include <linux/iopoll.h>
15 #include <linux/module.h>
16 #include <linux/net_tstamp.h>
17 #include <linux/ptp_classify.h>
18 #include <linux/ptp_pch.h>
19 #include <linux/gpio.h>
20 
21 #define PCH_GBE_MAR_ENTRIES		16
22 #define PCH_GBE_SHORT_PKT		64
23 #define DSC_INIT16			0xC000
24 #define PCH_GBE_DMA_ALIGN		0
25 #define PCH_GBE_DMA_PADDING		2
26 #define PCH_GBE_WATCHDOG_PERIOD		(5 * HZ)	/* watchdog time */
27 #define PCH_GBE_PCI_BAR			1
28 #define PCH_GBE_RESERVE_MEMORY		0x200000	/* 2MB */
29 
30 #define PCI_DEVICE_ID_INTEL_IOH1_GBE		0x8802
31 
32 #define PCI_DEVICE_ID_ROHM_ML7223_GBE		0x8013
33 #define PCI_DEVICE_ID_ROHM_ML7831_GBE		0x8802
34 
35 #define PCH_GBE_TX_WEIGHT         64
36 #define PCH_GBE_RX_WEIGHT         64
37 #define PCH_GBE_RX_BUFFER_WRITE   16
38 
39 /* Initialize the wake-on-LAN settings */
40 #define PCH_GBE_WL_INIT_SETTING    (PCH_GBE_WLC_MP)
41 
42 #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
43 	PCH_GBE_CHIP_TYPE_INTERNAL | \
44 	PCH_GBE_RGMII_MODE_RGMII     \
45 	)
46 
47 /* Ethertype field values */
48 #define PCH_GBE_MAX_RX_BUFFER_SIZE      0x2880
49 #define PCH_GBE_MAX_JUMBO_FRAME_SIZE    10318
50 #define PCH_GBE_FRAME_SIZE_2048         2048
51 #define PCH_GBE_FRAME_SIZE_4096         4096
52 #define PCH_GBE_FRAME_SIZE_8192         8192
53 
54 #define PCH_GBE_GET_DESC(R, i, type)    (&(((struct type *)((R).desc))[i]))
55 #define PCH_GBE_RX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
56 #define PCH_GBE_TX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
57 #define PCH_GBE_DESC_UNUSED(R) \
58 	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
59 	(R)->next_to_clean - (R)->next_to_use - 1)
60 
61 /* Pause packet value */
62 #define	PCH_GBE_PAUSE_PKT1_VALUE    0x00C28001
63 #define	PCH_GBE_PAUSE_PKT2_VALUE    0x00000100
64 #define	PCH_GBE_PAUSE_PKT4_VALUE    0x01000888
65 #define	PCH_GBE_PAUSE_PKT5_VALUE    0x0000FFFF
66 
67 
68 /* This defines the bits that are set in the Interrupt Mask
69  * Set/Read Register.  Each bit is documented below:
70  *   o RXT0   = Receiver Timer Interrupt (ring 0)
71  *   o TXDW   = Transmit Descriptor Written Back
72  *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
73  *   o RXSEQ  = Receive Sequence Error
74  *   o LSC    = Link Status Change
75  */
76 #define PCH_GBE_INT_ENABLE_MASK ( \
77 	PCH_GBE_INT_RX_DMA_CMPLT |    \
78 	PCH_GBE_INT_RX_DSC_EMP   |    \
79 	PCH_GBE_INT_RX_FIFO_ERR  |    \
80 	PCH_GBE_INT_WOL_DET      |    \
81 	PCH_GBE_INT_TX_CMPLT          \
82 	)
83 
84 #define PCH_GBE_INT_DISABLE_ALL		0
85 
86 /* Macros for ieee1588 */
87 /* 0x40 Time Synchronization Channel Control Register Bits */
88 #define MASTER_MODE   (1<<0)
89 #define SLAVE_MODE    (0)
90 #define V2_MODE       (1<<31)
91 #define CAP_MODE0     (0)
92 #define CAP_MODE2     (1<<17)
93 
94 /* 0x44 Time Synchronization Channel Event Register Bits */
95 #define TX_SNAPSHOT_LOCKED (1<<0)
96 #define RX_SNAPSHOT_LOCKED (1<<1)
97 
98 #define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
99 #define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
100 
101 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
102 static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
103 			       int data);
104 static void pch_gbe_set_multi(struct net_device *netdev);
105 
106 static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
107 {
108 	u8 *data = skb->data;
109 	unsigned int offset;
110 	u16 hi, id;
111 	u32 lo;
112 
113 	if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
114 		return 0;
115 
116 	offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
117 
118 	if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
119 		return 0;
120 
121 	hi = get_unaligned_be16(data + offset + OFF_PTP_SOURCE_UUID + 0);
122 	lo = get_unaligned_be32(data + offset + OFF_PTP_SOURCE_UUID + 2);
123 	id = get_unaligned_be16(data + offset + OFF_PTP_SEQUENCE_ID);
124 
125 	return (uid_hi == hi && uid_lo == lo && seqid == id);
126 }
127 
128 static void
129 pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
130 {
131 	struct skb_shared_hwtstamps *shhwtstamps;
132 	struct pci_dev *pdev;
133 	u64 ns;
134 	u32 hi, lo, val;
135 
136 	if (!adapter->hwts_rx_en)
137 		return;
138 
139 	/* Get ieee1588's dev information */
140 	pdev = adapter->ptp_pdev;
141 
142 	val = pch_ch_event_read(pdev);
143 
144 	if (!(val & RX_SNAPSHOT_LOCKED))
145 		return;
146 
147 	lo = pch_src_uuid_lo_read(pdev);
148 	hi = pch_src_uuid_hi_read(pdev);
149 
150 	if (!pch_ptp_match(skb, hi, lo, hi >> 16))
151 		goto out;
152 
153 	ns = pch_rx_snap_read(pdev);
154 
155 	shhwtstamps = skb_hwtstamps(skb);
156 	memset(shhwtstamps, 0, sizeof(*shhwtstamps));
157 	shhwtstamps->hwtstamp = ns_to_ktime(ns);
158 out:
159 	pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
160 }
161 
162 static void
163 pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
164 {
165 	struct skb_shared_hwtstamps shhwtstamps;
166 	struct pci_dev *pdev;
167 	struct skb_shared_info *shtx;
168 	u64 ns;
169 	u32 cnt, val;
170 
171 	shtx = skb_shinfo(skb);
172 	if (likely(!(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)))
173 		return;
174 
175 	shtx->tx_flags |= SKBTX_IN_PROGRESS;
176 
177 	/* Get ieee1588's dev information */
178 	pdev = adapter->ptp_pdev;
179 
180 	/*
181 	 * This really stinks, but we have to poll for the Tx time stamp.
182 	 */
183 	for (cnt = 0; cnt < 100; cnt++) {
184 		val = pch_ch_event_read(pdev);
185 		if (val & TX_SNAPSHOT_LOCKED)
186 			break;
187 		udelay(1);
188 	}
189 	if (!(val & TX_SNAPSHOT_LOCKED)) {
190 		shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
191 		return;
192 	}
193 
194 	ns = pch_tx_snap_read(pdev);
195 
196 	memset(&shhwtstamps, 0, sizeof(shhwtstamps));
197 	shhwtstamps.hwtstamp = ns_to_ktime(ns);
198 	skb_tstamp_tx(skb, &shhwtstamps);
199 
200 	pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED);
201 }
202 
203 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
204 {
205 	struct hwtstamp_config cfg;
206 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
207 	struct pci_dev *pdev;
208 	u8 station[20];
209 
210 	if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
211 		return -EFAULT;
212 
213 	/* Get ieee1588's dev information */
214 	pdev = adapter->ptp_pdev;
215 
216 	if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
217 		return -ERANGE;
218 
219 	switch (cfg.rx_filter) {
220 	case HWTSTAMP_FILTER_NONE:
221 		adapter->hwts_rx_en = 0;
222 		break;
223 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
224 		adapter->hwts_rx_en = 0;
225 		pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0);
226 		break;
227 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
228 		adapter->hwts_rx_en = 1;
229 		pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0);
230 		break;
231 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
232 		adapter->hwts_rx_en = 1;
233 		pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
234 		strcpy(station, PTP_L4_MULTICAST_SA);
235 		pch_set_station_address(station, pdev);
236 		break;
237 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
238 		adapter->hwts_rx_en = 1;
239 		pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
240 		strcpy(station, PTP_L2_MULTICAST_SA);
241 		pch_set_station_address(station, pdev);
242 		break;
243 	default:
244 		return -ERANGE;
245 	}
246 
247 	adapter->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
248 
249 	/* Clear out any old time stamps. */
250 	pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
251 
252 	return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
253 }
254 
255 static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
256 {
257 	iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
258 }
259 
260 /**
261  * pch_gbe_mac_read_mac_addr - Read MAC address
262  * @hw:	            Pointer to the HW structure
263  * Returns:
264  *	0:			Successful.
265  */
266 static s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
267 {
268 	struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
269 	u32  adr1a, adr1b;
270 
271 	adr1a = ioread32(&hw->reg->mac_adr[0].high);
272 	adr1b = ioread32(&hw->reg->mac_adr[0].low);
273 
274 	hw->mac.addr[0] = (u8)(adr1a & 0xFF);
275 	hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
276 	hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
277 	hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
278 	hw->mac.addr[4] = (u8)(adr1b & 0xFF);
279 	hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
280 
281 	netdev_dbg(adapter->netdev, "hw->mac.addr : %pM\n", hw->mac.addr);
282 	return 0;
283 }
284 
285 /**
286  * pch_gbe_wait_clr_bit - Wait to clear a bit
287  * @reg:	Pointer of register
288  * @bit:	Busy bit
289  */
290 static void pch_gbe_wait_clr_bit(void __iomem *reg, u32 bit)
291 {
292 	u32 tmp;
293 
294 	/* wait busy */
295 	if (readx_poll_timeout_atomic(ioread32, reg, tmp, !(tmp & bit), 0, 10))
296 		pr_err("Error: busy bit is not cleared\n");
297 }
298 
299 /**
300  * pch_gbe_mac_mar_set - Set MAC address register
301  * @hw:	    Pointer to the HW structure
302  * @addr:   Pointer to the MAC address
303  * @index:  MAC address array register
304  */
305 static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
306 {
307 	struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
308 	u32 mar_low, mar_high, adrmask;
309 
310 	netdev_dbg(adapter->netdev, "index : 0x%x\n", index);
311 
312 	/*
313 	 * HW expects these in little endian so we reverse the byte order
314 	 * from network order (big endian) to little endian
315 	 */
316 	mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
317 		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
318 	mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
319 	/* Stop the MAC Address of index. */
320 	adrmask = ioread32(&hw->reg->ADDR_MASK);
321 	iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
322 	/* wait busy */
323 	pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
324 	/* Set the MAC address to the MAC address 1A/1B register */
325 	iowrite32(mar_high, &hw->reg->mac_adr[index].high);
326 	iowrite32(mar_low, &hw->reg->mac_adr[index].low);
327 	/* Start the MAC address of index */
328 	iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
329 	/* wait busy */
330 	pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
331 }
332 
333 /**
334  * pch_gbe_mac_reset_hw - Reset hardware
335  * @hw:	Pointer to the HW structure
336  */
337 static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
338 {
339 	/* Read the MAC address. and store to the private data */
340 	pch_gbe_mac_read_mac_addr(hw);
341 	iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
342 	iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
343 	pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
344 	/* Setup the receive addresses */
345 	pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
346 	return;
347 }
348 
349 static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw)
350 {
351 	u32 rctl;
352 	/* Disables Receive MAC */
353 	rctl = ioread32(&hw->reg->MAC_RX_EN);
354 	iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
355 }
356 
357 static void pch_gbe_enable_mac_rx(struct pch_gbe_hw *hw)
358 {
359 	u32 rctl;
360 	/* Enables Receive MAC */
361 	rctl = ioread32(&hw->reg->MAC_RX_EN);
362 	iowrite32((rctl | PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
363 }
364 
365 /**
366  * pch_gbe_mac_init_rx_addrs - Initialize receive address's
367  * @hw:	Pointer to the HW structure
368  * @mar_count: Receive address registers
369  */
370 static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
371 {
372 	u32 i;
373 
374 	/* Setup the receive address */
375 	pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
376 
377 	/* Zero out the other receive addresses */
378 	for (i = 1; i < mar_count; i++) {
379 		iowrite32(0, &hw->reg->mac_adr[i].high);
380 		iowrite32(0, &hw->reg->mac_adr[i].low);
381 	}
382 	iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
383 	/* wait busy */
384 	pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
385 }
386 
387 /**
388  * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
389  * @hw:	            Pointer to the HW structure
390  * Returns:
391  *	0:			Successful.
392  *	Negative value:		Failed.
393  */
394 s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
395 {
396 	struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
397 	struct pch_gbe_mac_info *mac = &hw->mac;
398 	u32 rx_fctrl;
399 
400 	netdev_dbg(adapter->netdev, "mac->fc = %u\n", mac->fc);
401 
402 	rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
403 
404 	switch (mac->fc) {
405 	case PCH_GBE_FC_NONE:
406 		rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
407 		mac->tx_fc_enable = false;
408 		break;
409 	case PCH_GBE_FC_RX_PAUSE:
410 		rx_fctrl |= PCH_GBE_FL_CTRL_EN;
411 		mac->tx_fc_enable = false;
412 		break;
413 	case PCH_GBE_FC_TX_PAUSE:
414 		rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
415 		mac->tx_fc_enable = true;
416 		break;
417 	case PCH_GBE_FC_FULL:
418 		rx_fctrl |= PCH_GBE_FL_CTRL_EN;
419 		mac->tx_fc_enable = true;
420 		break;
421 	default:
422 		netdev_err(adapter->netdev,
423 			   "Flow control param set incorrectly\n");
424 		return -EINVAL;
425 	}
426 	if (mac->link_duplex == DUPLEX_HALF)
427 		rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
428 	iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
429 	netdev_dbg(adapter->netdev,
430 		   "RX_FCTRL reg : 0x%08x  mac->tx_fc_enable : %d\n",
431 		   ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
432 	return 0;
433 }
434 
435 /**
436  * pch_gbe_mac_set_wol_event - Set wake-on-lan event
437  * @hw:     Pointer to the HW structure
438  * @wu_evt: Wake up event
439  */
440 static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
441 {
442 	struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
443 	u32 addr_mask;
444 
445 	netdev_dbg(adapter->netdev, "wu_evt : 0x%08x  ADDR_MASK reg : 0x%08x\n",
446 		   wu_evt, ioread32(&hw->reg->ADDR_MASK));
447 
448 	if (wu_evt) {
449 		/* Set Wake-On-Lan address mask */
450 		addr_mask = ioread32(&hw->reg->ADDR_MASK);
451 		iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
452 		/* wait busy */
453 		pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
454 		iowrite32(0, &hw->reg->WOL_ST);
455 		iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
456 		iowrite32(0x02, &hw->reg->TCPIP_ACC);
457 		iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
458 	} else {
459 		iowrite32(0, &hw->reg->WOL_CTRL);
460 		iowrite32(0, &hw->reg->WOL_ST);
461 	}
462 	return;
463 }
464 
465 /**
466  * pch_gbe_mac_ctrl_miim - Control MIIM interface
467  * @hw:   Pointer to the HW structure
468  * @addr: Address of PHY
469  * @dir:  Operetion. (Write or Read)
470  * @reg:  Access register of PHY
471  * @data: Write data.
472  *
473  * Returns: Read date.
474  */
475 u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
476 			u16 data)
477 {
478 	struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
479 	unsigned long flags;
480 	u32 data_out;
481 
482 	spin_lock_irqsave(&hw->miim_lock, flags);
483 
484 	if (readx_poll_timeout_atomic(ioread32, &hw->reg->MIIM, data_out,
485 				      data_out & PCH_GBE_MIIM_OPER_READY, 20, 2000)) {
486 		netdev_err(adapter->netdev, "pch-gbe.miim won't go Ready\n");
487 		spin_unlock_irqrestore(&hw->miim_lock, flags);
488 		return 0;	/* No way to indicate timeout error */
489 	}
490 	iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
491 		  (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
492 		  dir | data), &hw->reg->MIIM);
493 	readx_poll_timeout_atomic(ioread32, &hw->reg->MIIM, data_out,
494 				  data_out & PCH_GBE_MIIM_OPER_READY, 20, 2000);
495 	spin_unlock_irqrestore(&hw->miim_lock, flags);
496 
497 	netdev_dbg(adapter->netdev, "PHY %s: reg=%d, data=0x%04X\n",
498 		   dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
499 		   dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
500 	return (u16) data_out;
501 }
502 
503 /**
504  * pch_gbe_mac_set_pause_packet - Set pause packet
505  * @hw:   Pointer to the HW structure
506  */
507 static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
508 {
509 	struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
510 	unsigned long tmp2, tmp3;
511 
512 	/* Set Pause packet */
513 	tmp2 = hw->mac.addr[1];
514 	tmp2 = (tmp2 << 8) | hw->mac.addr[0];
515 	tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
516 
517 	tmp3 = hw->mac.addr[5];
518 	tmp3 = (tmp3 << 8) | hw->mac.addr[4];
519 	tmp3 = (tmp3 << 8) | hw->mac.addr[3];
520 	tmp3 = (tmp3 << 8) | hw->mac.addr[2];
521 
522 	iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
523 	iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
524 	iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
525 	iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
526 	iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
527 
528 	/* Transmit Pause Packet */
529 	iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
530 
531 	netdev_dbg(adapter->netdev,
532 		   "PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
533 		   ioread32(&hw->reg->PAUSE_PKT1),
534 		   ioread32(&hw->reg->PAUSE_PKT2),
535 		   ioread32(&hw->reg->PAUSE_PKT3),
536 		   ioread32(&hw->reg->PAUSE_PKT4),
537 		   ioread32(&hw->reg->PAUSE_PKT5));
538 
539 	return;
540 }
541 
542 
543 /**
544  * pch_gbe_alloc_queues - Allocate memory for all rings
545  * @adapter:  Board private structure to initialize
546  * Returns:
547  *	0:	Successfully
548  *	Negative value:	Failed
549  */
550 static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
551 {
552 	adapter->tx_ring = devm_kzalloc(&adapter->pdev->dev,
553 					sizeof(*adapter->tx_ring), GFP_KERNEL);
554 	if (!adapter->tx_ring)
555 		return -ENOMEM;
556 
557 	adapter->rx_ring = devm_kzalloc(&adapter->pdev->dev,
558 					sizeof(*adapter->rx_ring), GFP_KERNEL);
559 	if (!adapter->rx_ring)
560 		return -ENOMEM;
561 	return 0;
562 }
563 
564 /**
565  * pch_gbe_init_stats - Initialize status
566  * @adapter:  Board private structure to initialize
567  */
568 static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
569 {
570 	memset(&adapter->stats, 0, sizeof(adapter->stats));
571 	return;
572 }
573 
574 /**
575  * pch_gbe_init_phy - Initialize PHY
576  * @adapter:  Board private structure to initialize
577  * Returns:
578  *	0:	Successfully
579  *	Negative value:	Failed
580  */
581 static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
582 {
583 	struct net_device *netdev = adapter->netdev;
584 	u32 addr;
585 	u16 bmcr, stat;
586 
587 	/* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
588 	for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
589 		adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
590 		bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
591 		stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
592 		stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
593 		if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
594 			break;
595 	}
596 	adapter->hw.phy.addr = adapter->mii.phy_id;
597 	netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id);
598 	if (addr == PCH_GBE_PHY_REGS_LEN)
599 		return -EAGAIN;
600 	/* Selected the phy and isolate the rest */
601 	for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
602 		if (addr != adapter->mii.phy_id) {
603 			pch_gbe_mdio_write(netdev, addr, MII_BMCR,
604 					   BMCR_ISOLATE);
605 		} else {
606 			bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
607 			pch_gbe_mdio_write(netdev, addr, MII_BMCR,
608 					   bmcr & ~BMCR_ISOLATE);
609 		}
610 	}
611 
612 	/* MII setup */
613 	adapter->mii.phy_id_mask = 0x1F;
614 	adapter->mii.reg_num_mask = 0x1F;
615 	adapter->mii.dev = adapter->netdev;
616 	adapter->mii.mdio_read = pch_gbe_mdio_read;
617 	adapter->mii.mdio_write = pch_gbe_mdio_write;
618 	adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
619 	return 0;
620 }
621 
622 /**
623  * pch_gbe_mdio_read - The read function for mii
624  * @netdev: Network interface device structure
625  * @addr:   Phy ID
626  * @reg:    Access location
627  * Returns:
628  *	0:	Successfully
629  *	Negative value:	Failed
630  */
631 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
632 {
633 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
634 	struct pch_gbe_hw *hw = &adapter->hw;
635 
636 	return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
637 				     (u16) 0);
638 }
639 
640 /**
641  * pch_gbe_mdio_write - The write function for mii
642  * @netdev: Network interface device structure
643  * @addr:   Phy ID (not used)
644  * @reg:    Access location
645  * @data:   Write data
646  */
647 static void pch_gbe_mdio_write(struct net_device *netdev,
648 			       int addr, int reg, int data)
649 {
650 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
651 	struct pch_gbe_hw *hw = &adapter->hw;
652 
653 	pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
654 }
655 
656 /**
657  * pch_gbe_reset_task - Reset processing at the time of transmission timeout
658  * @work:  Pointer of board private structure
659  */
660 static void pch_gbe_reset_task(struct work_struct *work)
661 {
662 	struct pch_gbe_adapter *adapter;
663 	adapter = container_of(work, struct pch_gbe_adapter, reset_task);
664 
665 	rtnl_lock();
666 	pch_gbe_reinit_locked(adapter);
667 	rtnl_unlock();
668 }
669 
670 /**
671  * pch_gbe_reinit_locked- Re-initialization
672  * @adapter:  Board private structure
673  */
674 void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
675 {
676 	pch_gbe_down(adapter);
677 	pch_gbe_up(adapter);
678 }
679 
680 /**
681  * pch_gbe_reset - Reset GbE
682  * @adapter:  Board private structure
683  */
684 void pch_gbe_reset(struct pch_gbe_adapter *adapter)
685 {
686 	struct net_device *netdev = adapter->netdev;
687 	struct pch_gbe_hw *hw = &adapter->hw;
688 	s32 ret_val;
689 
690 	pch_gbe_mac_reset_hw(hw);
691 	/* reprogram multicast address register after reset */
692 	pch_gbe_set_multi(netdev);
693 	/* Setup the receive address. */
694 	pch_gbe_mac_init_rx_addrs(hw, PCH_GBE_MAR_ENTRIES);
695 
696 	ret_val = pch_gbe_phy_get_id(hw);
697 	if (ret_val) {
698 		netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n");
699 		return;
700 	}
701 	pch_gbe_phy_init_setting(hw);
702 	/* Setup Mac interface option RGMII */
703 	pch_gbe_phy_set_rgmii(hw);
704 }
705 
706 /**
707  * pch_gbe_free_irq - Free an interrupt
708  * @adapter:  Board private structure
709  */
710 static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
711 {
712 	struct net_device *netdev = adapter->netdev;
713 
714 	free_irq(adapter->irq, netdev);
715 	pci_free_irq_vectors(adapter->pdev);
716 }
717 
718 /**
719  * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
720  * @adapter:  Board private structure
721  */
722 static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
723 {
724 	struct pch_gbe_hw *hw = &adapter->hw;
725 
726 	atomic_inc(&adapter->irq_sem);
727 	iowrite32(0, &hw->reg->INT_EN);
728 	ioread32(&hw->reg->INT_ST);
729 	synchronize_irq(adapter->irq);
730 
731 	netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n",
732 		   ioread32(&hw->reg->INT_EN));
733 }
734 
735 /**
736  * pch_gbe_irq_enable - Enable default interrupt generation settings
737  * @adapter:  Board private structure
738  */
739 static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
740 {
741 	struct pch_gbe_hw *hw = &adapter->hw;
742 
743 	if (likely(atomic_dec_and_test(&adapter->irq_sem)))
744 		iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
745 	ioread32(&hw->reg->INT_ST);
746 	netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n",
747 		   ioread32(&hw->reg->INT_EN));
748 }
749 
750 
751 
752 /**
753  * pch_gbe_setup_tctl - configure the Transmit control registers
754  * @adapter:  Board private structure
755  */
756 static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
757 {
758 	struct pch_gbe_hw *hw = &adapter->hw;
759 	u32 tx_mode, tcpip;
760 
761 	tx_mode = PCH_GBE_TM_LONG_PKT |
762 		PCH_GBE_TM_ST_AND_FD |
763 		PCH_GBE_TM_SHORT_PKT |
764 		PCH_GBE_TM_TH_TX_STRT_8 |
765 		PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
766 
767 	iowrite32(tx_mode, &hw->reg->TX_MODE);
768 
769 	tcpip = ioread32(&hw->reg->TCPIP_ACC);
770 	tcpip |= PCH_GBE_TX_TCPIPACC_EN;
771 	iowrite32(tcpip, &hw->reg->TCPIP_ACC);
772 	return;
773 }
774 
775 /**
776  * pch_gbe_configure_tx - Configure Transmit Unit after Reset
777  * @adapter:  Board private structure
778  */
779 static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
780 {
781 	struct pch_gbe_hw *hw = &adapter->hw;
782 	u32 tdba, tdlen, dctrl;
783 
784 	netdev_dbg(adapter->netdev, "dma addr = 0x%08llx  size = 0x%08x\n",
785 		   (unsigned long long)adapter->tx_ring->dma,
786 		   adapter->tx_ring->size);
787 
788 	/* Setup the HW Tx Head and Tail descriptor pointers */
789 	tdba = adapter->tx_ring->dma;
790 	tdlen = adapter->tx_ring->size - 0x10;
791 	iowrite32(tdba, &hw->reg->TX_DSC_BASE);
792 	iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
793 	iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
794 
795 	/* Enables Transmission DMA */
796 	dctrl = ioread32(&hw->reg->DMA_CTRL);
797 	dctrl |= PCH_GBE_TX_DMA_EN;
798 	iowrite32(dctrl, &hw->reg->DMA_CTRL);
799 }
800 
801 /**
802  * pch_gbe_setup_rctl - Configure the receive control registers
803  * @adapter:  Board private structure
804  */
805 static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
806 {
807 	struct pch_gbe_hw *hw = &adapter->hw;
808 	u32 rx_mode, tcpip;
809 
810 	rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
811 	PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
812 
813 	iowrite32(rx_mode, &hw->reg->RX_MODE);
814 
815 	tcpip = ioread32(&hw->reg->TCPIP_ACC);
816 
817 	tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
818 	tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
819 	iowrite32(tcpip, &hw->reg->TCPIP_ACC);
820 	return;
821 }
822 
823 /**
824  * pch_gbe_configure_rx - Configure Receive Unit after Reset
825  * @adapter:  Board private structure
826  */
827 static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
828 {
829 	struct pch_gbe_hw *hw = &adapter->hw;
830 	u32 rdba, rdlen, rxdma;
831 
832 	netdev_dbg(adapter->netdev, "dma adr = 0x%08llx  size = 0x%08x\n",
833 		   (unsigned long long)adapter->rx_ring->dma,
834 		   adapter->rx_ring->size);
835 
836 	pch_gbe_mac_force_mac_fc(hw);
837 
838 	pch_gbe_disable_mac_rx(hw);
839 
840 	/* Disables Receive DMA */
841 	rxdma = ioread32(&hw->reg->DMA_CTRL);
842 	rxdma &= ~PCH_GBE_RX_DMA_EN;
843 	iowrite32(rxdma, &hw->reg->DMA_CTRL);
844 
845 	netdev_dbg(adapter->netdev,
846 		   "MAC_RX_EN reg = 0x%08x  DMA_CTRL reg = 0x%08x\n",
847 		   ioread32(&hw->reg->MAC_RX_EN),
848 		   ioread32(&hw->reg->DMA_CTRL));
849 
850 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
851 	 * the Base and Length of the Rx Descriptor Ring */
852 	rdba = adapter->rx_ring->dma;
853 	rdlen = adapter->rx_ring->size - 0x10;
854 	iowrite32(rdba, &hw->reg->RX_DSC_BASE);
855 	iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
856 	iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
857 }
858 
859 /**
860  * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
861  * @adapter:     Board private structure
862  * @buffer_info: Buffer information structure
863  */
864 static void pch_gbe_unmap_and_free_tx_resource(
865 	struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
866 {
867 	if (buffer_info->mapped) {
868 		dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
869 				 buffer_info->length, DMA_TO_DEVICE);
870 		buffer_info->mapped = false;
871 	}
872 	if (buffer_info->skb) {
873 		dev_kfree_skb_any(buffer_info->skb);
874 		buffer_info->skb = NULL;
875 	}
876 }
877 
878 /**
879  * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
880  * @adapter:      Board private structure
881  * @buffer_info:  Buffer information structure
882  */
883 static void pch_gbe_unmap_and_free_rx_resource(
884 					struct pch_gbe_adapter *adapter,
885 					struct pch_gbe_buffer *buffer_info)
886 {
887 	if (buffer_info->mapped) {
888 		dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
889 				 buffer_info->length, DMA_FROM_DEVICE);
890 		buffer_info->mapped = false;
891 	}
892 	if (buffer_info->skb) {
893 		dev_kfree_skb_any(buffer_info->skb);
894 		buffer_info->skb = NULL;
895 	}
896 }
897 
898 /**
899  * pch_gbe_clean_tx_ring - Free Tx Buffers
900  * @adapter:  Board private structure
901  * @tx_ring:  Ring to be cleaned
902  */
903 static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
904 				   struct pch_gbe_tx_ring *tx_ring)
905 {
906 	struct pch_gbe_hw *hw = &adapter->hw;
907 	struct pch_gbe_buffer *buffer_info;
908 	unsigned long size;
909 	unsigned int i;
910 
911 	/* Free all the Tx ring sk_buffs */
912 	for (i = 0; i < tx_ring->count; i++) {
913 		buffer_info = &tx_ring->buffer_info[i];
914 		pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
915 	}
916 	netdev_dbg(adapter->netdev,
917 		   "call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
918 
919 	size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
920 	memset(tx_ring->buffer_info, 0, size);
921 
922 	/* Zero out the descriptor ring */
923 	memset(tx_ring->desc, 0, tx_ring->size);
924 	tx_ring->next_to_use = 0;
925 	tx_ring->next_to_clean = 0;
926 	iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
927 	iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
928 }
929 
930 /**
931  * pch_gbe_clean_rx_ring - Free Rx Buffers
932  * @adapter:  Board private structure
933  * @rx_ring:  Ring to free buffers from
934  */
935 static void
936 pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
937 		      struct pch_gbe_rx_ring *rx_ring)
938 {
939 	struct pch_gbe_hw *hw = &adapter->hw;
940 	struct pch_gbe_buffer *buffer_info;
941 	unsigned long size;
942 	unsigned int i;
943 
944 	/* Free all the Rx ring sk_buffs */
945 	for (i = 0; i < rx_ring->count; i++) {
946 		buffer_info = &rx_ring->buffer_info[i];
947 		pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
948 	}
949 	netdev_dbg(adapter->netdev,
950 		   "call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
951 	size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
952 	memset(rx_ring->buffer_info, 0, size);
953 
954 	/* Zero out the descriptor ring */
955 	memset(rx_ring->desc, 0, rx_ring->size);
956 	rx_ring->next_to_clean = 0;
957 	rx_ring->next_to_use = 0;
958 	iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
959 	iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
960 }
961 
962 static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
963 				    u16 duplex)
964 {
965 	struct pch_gbe_hw *hw = &adapter->hw;
966 	unsigned long rgmii = 0;
967 
968 	/* Set the RGMII control. */
969 	switch (speed) {
970 	case SPEED_10:
971 		rgmii = (PCH_GBE_RGMII_RATE_2_5M |
972 			 PCH_GBE_MAC_RGMII_CTRL_SETTING);
973 		break;
974 	case SPEED_100:
975 		rgmii = (PCH_GBE_RGMII_RATE_25M |
976 			 PCH_GBE_MAC_RGMII_CTRL_SETTING);
977 		break;
978 	case SPEED_1000:
979 		rgmii = (PCH_GBE_RGMII_RATE_125M |
980 			 PCH_GBE_MAC_RGMII_CTRL_SETTING);
981 		break;
982 	}
983 	iowrite32(rgmii, &hw->reg->RGMII_CTRL);
984 }
985 static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
986 			      u16 duplex)
987 {
988 	struct net_device *netdev = adapter->netdev;
989 	struct pch_gbe_hw *hw = &adapter->hw;
990 	unsigned long mode = 0;
991 
992 	/* Set the communication mode */
993 	switch (speed) {
994 	case SPEED_10:
995 		mode = PCH_GBE_MODE_MII_ETHER;
996 		netdev->tx_queue_len = 10;
997 		break;
998 	case SPEED_100:
999 		mode = PCH_GBE_MODE_MII_ETHER;
1000 		netdev->tx_queue_len = 100;
1001 		break;
1002 	case SPEED_1000:
1003 		mode = PCH_GBE_MODE_GMII_ETHER;
1004 		break;
1005 	}
1006 	if (duplex == DUPLEX_FULL)
1007 		mode |= PCH_GBE_MODE_FULL_DUPLEX;
1008 	else
1009 		mode |= PCH_GBE_MODE_HALF_DUPLEX;
1010 	iowrite32(mode, &hw->reg->MODE);
1011 }
1012 
1013 /**
1014  * pch_gbe_watchdog - Watchdog process
1015  * @t:  timer list containing a Board private structure
1016  */
1017 static void pch_gbe_watchdog(struct timer_list *t)
1018 {
1019 	struct pch_gbe_adapter *adapter = from_timer(adapter, t,
1020 						     watchdog_timer);
1021 	struct net_device *netdev = adapter->netdev;
1022 	struct pch_gbe_hw *hw = &adapter->hw;
1023 
1024 	netdev_dbg(netdev, "right now = %ld\n", jiffies);
1025 
1026 	pch_gbe_update_stats(adapter);
1027 	if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
1028 		struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1029 		netdev->tx_queue_len = adapter->tx_queue_len;
1030 		/* mii library handles link maintenance tasks */
1031 		mii_ethtool_gset(&adapter->mii, &cmd);
1032 		hw->mac.link_speed = ethtool_cmd_speed(&cmd);
1033 		hw->mac.link_duplex = cmd.duplex;
1034 		/* Set the RGMII control. */
1035 		pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
1036 						hw->mac.link_duplex);
1037 		/* Set the communication mode */
1038 		pch_gbe_set_mode(adapter, hw->mac.link_speed,
1039 				 hw->mac.link_duplex);
1040 		netdev_dbg(netdev,
1041 			   "Link is Up %d Mbps %s-Duplex\n",
1042 			   hw->mac.link_speed,
1043 			   cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1044 		netif_carrier_on(netdev);
1045 		netif_wake_queue(netdev);
1046 	} else if ((!mii_link_ok(&adapter->mii)) &&
1047 		   (netif_carrier_ok(netdev))) {
1048 		netdev_dbg(netdev, "NIC Link is Down\n");
1049 		hw->mac.link_speed = SPEED_10;
1050 		hw->mac.link_duplex = DUPLEX_HALF;
1051 		netif_carrier_off(netdev);
1052 		netif_stop_queue(netdev);
1053 	}
1054 	mod_timer(&adapter->watchdog_timer,
1055 		  round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
1056 }
1057 
1058 /**
1059  * pch_gbe_tx_queue - Carry out queuing of the transmission data
1060  * @adapter:  Board private structure
1061  * @tx_ring:  Tx descriptor ring structure
1062  * @skb:      Sockt buffer structure
1063  */
1064 static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1065 			      struct pch_gbe_tx_ring *tx_ring,
1066 			      struct sk_buff *skb)
1067 {
1068 	struct pch_gbe_hw *hw = &adapter->hw;
1069 	struct pch_gbe_tx_desc *tx_desc;
1070 	struct pch_gbe_buffer *buffer_info;
1071 	struct sk_buff *tmp_skb;
1072 	unsigned int frame_ctrl;
1073 	unsigned int ring_num;
1074 
1075 	/*-- Set frame control --*/
1076 	frame_ctrl = 0;
1077 	if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
1078 		frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
1079 	if (skb->ip_summed == CHECKSUM_NONE)
1080 		frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1081 
1082 	/* Performs checksum processing */
1083 	/*
1084 	 * It is because the hardware accelerator does not support a checksum,
1085 	 * when the received data size is less than 64 bytes.
1086 	 */
1087 	if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
1088 		frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
1089 			      PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1090 		if (skb->protocol == htons(ETH_P_IP)) {
1091 			struct iphdr *iph = ip_hdr(skb);
1092 			unsigned int offset;
1093 			offset = skb_transport_offset(skb);
1094 			if (iph->protocol == IPPROTO_TCP) {
1095 				skb->csum = 0;
1096 				tcp_hdr(skb)->check = 0;
1097 				skb->csum = skb_checksum(skb, offset,
1098 							 skb->len - offset, 0);
1099 				tcp_hdr(skb)->check =
1100 					csum_tcpudp_magic(iph->saddr,
1101 							  iph->daddr,
1102 							  skb->len - offset,
1103 							  IPPROTO_TCP,
1104 							  skb->csum);
1105 			} else if (iph->protocol == IPPROTO_UDP) {
1106 				skb->csum = 0;
1107 				udp_hdr(skb)->check = 0;
1108 				skb->csum =
1109 					skb_checksum(skb, offset,
1110 						     skb->len - offset, 0);
1111 				udp_hdr(skb)->check =
1112 					csum_tcpudp_magic(iph->saddr,
1113 							  iph->daddr,
1114 							  skb->len - offset,
1115 							  IPPROTO_UDP,
1116 							  skb->csum);
1117 			}
1118 		}
1119 	}
1120 
1121 	ring_num = tx_ring->next_to_use;
1122 	if (unlikely((ring_num + 1) == tx_ring->count))
1123 		tx_ring->next_to_use = 0;
1124 	else
1125 		tx_ring->next_to_use = ring_num + 1;
1126 
1127 
1128 	buffer_info = &tx_ring->buffer_info[ring_num];
1129 	tmp_skb = buffer_info->skb;
1130 
1131 	/* [Header:14][payload] ---> [Header:14][paddong:2][payload]    */
1132 	memcpy(tmp_skb->data, skb->data, ETH_HLEN);
1133 	tmp_skb->data[ETH_HLEN] = 0x00;
1134 	tmp_skb->data[ETH_HLEN + 1] = 0x00;
1135 	tmp_skb->len = skb->len;
1136 	memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
1137 	       (skb->len - ETH_HLEN));
1138 	/*-- Set Buffer information --*/
1139 	buffer_info->length = tmp_skb->len;
1140 	buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
1141 					  buffer_info->length,
1142 					  DMA_TO_DEVICE);
1143 	if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1144 		netdev_err(adapter->netdev, "TX DMA map failed\n");
1145 		buffer_info->dma = 0;
1146 		buffer_info->time_stamp = 0;
1147 		tx_ring->next_to_use = ring_num;
1148 		return;
1149 	}
1150 	buffer_info->mapped = true;
1151 	buffer_info->time_stamp = jiffies;
1152 
1153 	/*-- Set Tx descriptor --*/
1154 	tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
1155 	tx_desc->buffer_addr = (buffer_info->dma);
1156 	tx_desc->length = (tmp_skb->len);
1157 	tx_desc->tx_words_eob = ((tmp_skb->len + 3));
1158 	tx_desc->tx_frame_ctrl = (frame_ctrl);
1159 	tx_desc->gbec_status = (DSC_INIT16);
1160 
1161 	if (unlikely(++ring_num == tx_ring->count))
1162 		ring_num = 0;
1163 
1164 	/* Update software pointer of TX descriptor */
1165 	iowrite32(tx_ring->dma +
1166 		  (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1167 		  &hw->reg->TX_DSC_SW_P);
1168 
1169 	pch_tx_timestamp(adapter, skb);
1170 
1171 	dev_kfree_skb_any(skb);
1172 }
1173 
1174 /**
1175  * pch_gbe_update_stats - Update the board statistics counters
1176  * @adapter:  Board private structure
1177  */
1178 void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1179 {
1180 	struct net_device *netdev = adapter->netdev;
1181 	struct pci_dev *pdev = adapter->pdev;
1182 	struct pch_gbe_hw_stats *stats = &adapter->stats;
1183 	unsigned long flags;
1184 
1185 	/*
1186 	 * Prevent stats update while adapter is being reset, or if the pci
1187 	 * connection is down.
1188 	 */
1189 	if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1190 		return;
1191 
1192 	spin_lock_irqsave(&adapter->stats_lock, flags);
1193 
1194 	/* Update device status "adapter->stats" */
1195 	stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1196 	stats->tx_errors = stats->tx_length_errors +
1197 	    stats->tx_aborted_errors +
1198 	    stats->tx_carrier_errors + stats->tx_timeout_count;
1199 
1200 	/* Update network device status "adapter->net_stats" */
1201 	netdev->stats.rx_packets = stats->rx_packets;
1202 	netdev->stats.rx_bytes = stats->rx_bytes;
1203 	netdev->stats.rx_dropped = stats->rx_dropped;
1204 	netdev->stats.tx_packets = stats->tx_packets;
1205 	netdev->stats.tx_bytes = stats->tx_bytes;
1206 	netdev->stats.tx_dropped = stats->tx_dropped;
1207 	/* Fill out the OS statistics structure */
1208 	netdev->stats.multicast = stats->multicast;
1209 	netdev->stats.collisions = stats->collisions;
1210 	/* Rx Errors */
1211 	netdev->stats.rx_errors = stats->rx_errors;
1212 	netdev->stats.rx_crc_errors = stats->rx_crc_errors;
1213 	netdev->stats.rx_frame_errors = stats->rx_frame_errors;
1214 	/* Tx Errors */
1215 	netdev->stats.tx_errors = stats->tx_errors;
1216 	netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
1217 	netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
1218 
1219 	spin_unlock_irqrestore(&adapter->stats_lock, flags);
1220 }
1221 
1222 static void pch_gbe_disable_dma_rx(struct pch_gbe_hw *hw)
1223 {
1224 	u32 rxdma;
1225 
1226 	/* Disable Receive DMA */
1227 	rxdma = ioread32(&hw->reg->DMA_CTRL);
1228 	rxdma &= ~PCH_GBE_RX_DMA_EN;
1229 	iowrite32(rxdma, &hw->reg->DMA_CTRL);
1230 }
1231 
1232 static void pch_gbe_enable_dma_rx(struct pch_gbe_hw *hw)
1233 {
1234 	u32 rxdma;
1235 
1236 	/* Enables Receive DMA */
1237 	rxdma = ioread32(&hw->reg->DMA_CTRL);
1238 	rxdma |= PCH_GBE_RX_DMA_EN;
1239 	iowrite32(rxdma, &hw->reg->DMA_CTRL);
1240 }
1241 
1242 /**
1243  * pch_gbe_intr - Interrupt Handler
1244  * @irq:   Interrupt number
1245  * @data:  Pointer to a network interface device structure
1246  * Returns:
1247  *	- IRQ_HANDLED:	Our interrupt
1248  *	- IRQ_NONE:	Not our interrupt
1249  */
1250 static irqreturn_t pch_gbe_intr(int irq, void *data)
1251 {
1252 	struct net_device *netdev = data;
1253 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1254 	struct pch_gbe_hw *hw = &adapter->hw;
1255 	u32 int_st;
1256 	u32 int_en;
1257 
1258 	/* Check request status */
1259 	int_st = ioread32(&hw->reg->INT_ST);
1260 	int_st = int_st & ioread32(&hw->reg->INT_EN);
1261 	/* When request status is no interruption factor */
1262 	if (unlikely(!int_st))
1263 		return IRQ_NONE;	/* Not our interrupt. End processing. */
1264 	netdev_dbg(netdev, "%s occur int_st = 0x%08x\n", __func__, int_st);
1265 	if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1266 		adapter->stats.intr_rx_frame_err_count++;
1267 	if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1268 		if (!adapter->rx_stop_flag) {
1269 			adapter->stats.intr_rx_fifo_err_count++;
1270 			netdev_dbg(netdev, "Rx fifo over run\n");
1271 			adapter->rx_stop_flag = true;
1272 			int_en = ioread32(&hw->reg->INT_EN);
1273 			iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1274 				  &hw->reg->INT_EN);
1275 			pch_gbe_disable_dma_rx(&adapter->hw);
1276 			int_st |= ioread32(&hw->reg->INT_ST);
1277 			int_st = int_st & ioread32(&hw->reg->INT_EN);
1278 		}
1279 	if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1280 		adapter->stats.intr_rx_dma_err_count++;
1281 	if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
1282 		adapter->stats.intr_tx_fifo_err_count++;
1283 	if (int_st & PCH_GBE_INT_TX_DMA_ERR)
1284 		adapter->stats.intr_tx_dma_err_count++;
1285 	if (int_st & PCH_GBE_INT_TCPIP_ERR)
1286 		adapter->stats.intr_tcpip_err_count++;
1287 	/* When Rx descriptor is empty  */
1288 	if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1289 		adapter->stats.intr_rx_dsc_empty_count++;
1290 		netdev_dbg(netdev, "Rx descriptor is empty\n");
1291 		int_en = ioread32(&hw->reg->INT_EN);
1292 		iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1293 		if (hw->mac.tx_fc_enable) {
1294 			/* Set Pause packet */
1295 			pch_gbe_mac_set_pause_packet(hw);
1296 		}
1297 	}
1298 
1299 	/* When request status is Receive interruption */
1300 	if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
1301 	    (adapter->rx_stop_flag)) {
1302 		if (likely(napi_schedule_prep(&adapter->napi))) {
1303 			/* Enable only Rx Descriptor empty */
1304 			atomic_inc(&adapter->irq_sem);
1305 			int_en = ioread32(&hw->reg->INT_EN);
1306 			int_en &=
1307 			    ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
1308 			iowrite32(int_en, &hw->reg->INT_EN);
1309 			/* Start polling for NAPI */
1310 			__napi_schedule(&adapter->napi);
1311 		}
1312 	}
1313 	netdev_dbg(netdev, "return = 0x%08x  INT_EN reg = 0x%08x\n",
1314 		   IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
1315 	return IRQ_HANDLED;
1316 }
1317 
1318 /**
1319  * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
1320  * @adapter:       Board private structure
1321  * @rx_ring:       Rx descriptor ring
1322  * @cleaned_count: Cleaned count
1323  */
1324 static void
1325 pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1326 			 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1327 {
1328 	struct net_device *netdev = adapter->netdev;
1329 	struct pci_dev *pdev = adapter->pdev;
1330 	struct pch_gbe_hw *hw = &adapter->hw;
1331 	struct pch_gbe_rx_desc *rx_desc;
1332 	struct pch_gbe_buffer *buffer_info;
1333 	struct sk_buff *skb;
1334 	unsigned int i;
1335 	unsigned int bufsz;
1336 
1337 	bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
1338 	i = rx_ring->next_to_use;
1339 
1340 	while ((cleaned_count--)) {
1341 		buffer_info = &rx_ring->buffer_info[i];
1342 		skb = netdev_alloc_skb(netdev, bufsz);
1343 		if (unlikely(!skb)) {
1344 			/* Better luck next round */
1345 			adapter->stats.rx_alloc_buff_failed++;
1346 			break;
1347 		}
1348 		/* align */
1349 		skb_reserve(skb, NET_IP_ALIGN);
1350 		buffer_info->skb = skb;
1351 
1352 		buffer_info->dma = dma_map_single(&pdev->dev,
1353 						  buffer_info->rx_buffer,
1354 						  buffer_info->length,
1355 						  DMA_FROM_DEVICE);
1356 		if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1357 			dev_kfree_skb(skb);
1358 			buffer_info->skb = NULL;
1359 			buffer_info->dma = 0;
1360 			adapter->stats.rx_alloc_buff_failed++;
1361 			break; /* while !buffer_info->skb */
1362 		}
1363 		buffer_info->mapped = true;
1364 		rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1365 		rx_desc->buffer_addr = (buffer_info->dma);
1366 		rx_desc->gbec_status = DSC_INIT16;
1367 
1368 		netdev_dbg(netdev,
1369 			   "i = %d  buffer_info->dma = 0x08%llx  buffer_info->length = 0x%x\n",
1370 			   i, (unsigned long long)buffer_info->dma,
1371 			   buffer_info->length);
1372 
1373 		if (unlikely(++i == rx_ring->count))
1374 			i = 0;
1375 	}
1376 	if (likely(rx_ring->next_to_use != i)) {
1377 		rx_ring->next_to_use = i;
1378 		if (unlikely(i-- == 0))
1379 			i = (rx_ring->count - 1);
1380 		iowrite32(rx_ring->dma +
1381 			  (int)sizeof(struct pch_gbe_rx_desc) * i,
1382 			  &hw->reg->RX_DSC_SW_P);
1383 	}
1384 	return;
1385 }
1386 
1387 static int
1388 pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1389 			 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1390 {
1391 	struct pci_dev *pdev = adapter->pdev;
1392 	struct pch_gbe_buffer *buffer_info;
1393 	unsigned int i;
1394 	unsigned int bufsz;
1395 	unsigned int size;
1396 
1397 	bufsz = adapter->rx_buffer_len;
1398 
1399 	size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1400 	rx_ring->rx_buff_pool =
1401 		dma_alloc_coherent(&pdev->dev, size,
1402 				   &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
1403 	if (!rx_ring->rx_buff_pool)
1404 		return -ENOMEM;
1405 
1406 	rx_ring->rx_buff_pool_size = size;
1407 	for (i = 0; i < rx_ring->count; i++) {
1408 		buffer_info = &rx_ring->buffer_info[i];
1409 		buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
1410 		buffer_info->length = bufsz;
1411 	}
1412 	return 0;
1413 }
1414 
1415 /**
1416  * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
1417  * @adapter:   Board private structure
1418  * @tx_ring:   Tx descriptor ring
1419  */
1420 static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1421 					struct pch_gbe_tx_ring *tx_ring)
1422 {
1423 	struct pch_gbe_buffer *buffer_info;
1424 	struct sk_buff *skb;
1425 	unsigned int i;
1426 	unsigned int bufsz;
1427 	struct pch_gbe_tx_desc *tx_desc;
1428 
1429 	bufsz =
1430 	    adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
1431 
1432 	for (i = 0; i < tx_ring->count; i++) {
1433 		buffer_info = &tx_ring->buffer_info[i];
1434 		skb = netdev_alloc_skb(adapter->netdev, bufsz);
1435 		skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1436 		buffer_info->skb = skb;
1437 		tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1438 		tx_desc->gbec_status = (DSC_INIT16);
1439 	}
1440 	return;
1441 }
1442 
1443 /**
1444  * pch_gbe_clean_tx - Reclaim resources after transmit completes
1445  * @adapter:   Board private structure
1446  * @tx_ring:   Tx descriptor ring
1447  * Returns:
1448  *	true:  Cleaned the descriptor
1449  *	false: Not cleaned the descriptor
1450  */
1451 static bool
1452 pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1453 		 struct pch_gbe_tx_ring *tx_ring)
1454 {
1455 	struct pch_gbe_tx_desc *tx_desc;
1456 	struct pch_gbe_buffer *buffer_info;
1457 	struct sk_buff *skb;
1458 	unsigned int i;
1459 	unsigned int cleaned_count = 0;
1460 	bool cleaned = false;
1461 	int unused, thresh;
1462 
1463 	netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
1464 		   tx_ring->next_to_clean);
1465 
1466 	i = tx_ring->next_to_clean;
1467 	tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1468 	netdev_dbg(adapter->netdev, "gbec_status:0x%04x  dma_status:0x%04x\n",
1469 		   tx_desc->gbec_status, tx_desc->dma_status);
1470 
1471 	unused = PCH_GBE_DESC_UNUSED(tx_ring);
1472 	thresh = tx_ring->count - PCH_GBE_TX_WEIGHT;
1473 	if ((tx_desc->gbec_status == DSC_INIT16) && (unused < thresh))
1474 	{  /* current marked clean, tx queue filling up, do extra clean */
1475 		int j, k;
1476 		if (unused < 8) {  /* tx queue nearly full */
1477 			netdev_dbg(adapter->netdev,
1478 				   "clean_tx: transmit queue warning (%x,%x) unused=%d\n",
1479 				   tx_ring->next_to_clean, tx_ring->next_to_use,
1480 				   unused);
1481 		}
1482 
1483 		/* current marked clean, scan for more that need cleaning. */
1484 		k = i;
1485 		for (j = 0; j < PCH_GBE_TX_WEIGHT; j++)
1486 		{
1487 			tx_desc = PCH_GBE_TX_DESC(*tx_ring, k);
1488 			if (tx_desc->gbec_status != DSC_INIT16) break; /*found*/
1489 			if (++k >= tx_ring->count) k = 0;  /*increment, wrap*/
1490 		}
1491 		if (j < PCH_GBE_TX_WEIGHT) {
1492 			netdev_dbg(adapter->netdev,
1493 				   "clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n",
1494 				   unused, j, i, k, tx_ring->next_to_use,
1495 				   tx_desc->gbec_status);
1496 			i = k;  /*found one to clean, usu gbec_status==2000.*/
1497 		}
1498 	}
1499 
1500 	while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1501 		netdev_dbg(adapter->netdev, "gbec_status:0x%04x\n",
1502 			   tx_desc->gbec_status);
1503 		buffer_info = &tx_ring->buffer_info[i];
1504 		skb = buffer_info->skb;
1505 		cleaned = true;
1506 
1507 		if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
1508 			adapter->stats.tx_aborted_errors++;
1509 			netdev_err(adapter->netdev, "Transfer Abort Error\n");
1510 		} else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
1511 			  ) {
1512 			adapter->stats.tx_carrier_errors++;
1513 			netdev_err(adapter->netdev,
1514 				   "Transfer Carrier Sense Error\n");
1515 		} else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
1516 			  ) {
1517 			adapter->stats.tx_aborted_errors++;
1518 			netdev_err(adapter->netdev,
1519 				   "Transfer Collision Abort Error\n");
1520 		} else if ((tx_desc->gbec_status &
1521 			    (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
1522 			     PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
1523 			adapter->stats.collisions++;
1524 			adapter->stats.tx_packets++;
1525 			adapter->stats.tx_bytes += skb->len;
1526 			netdev_dbg(adapter->netdev, "Transfer Collision\n");
1527 		} else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
1528 			  ) {
1529 			adapter->stats.tx_packets++;
1530 			adapter->stats.tx_bytes += skb->len;
1531 		}
1532 		if (buffer_info->mapped) {
1533 			netdev_dbg(adapter->netdev,
1534 				   "unmap buffer_info->dma : %d\n", i);
1535 			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1536 					 buffer_info->length, DMA_TO_DEVICE);
1537 			buffer_info->mapped = false;
1538 		}
1539 		if (buffer_info->skb) {
1540 			netdev_dbg(adapter->netdev,
1541 				   "trim buffer_info->skb : %d\n", i);
1542 			skb_trim(buffer_info->skb, 0);
1543 		}
1544 		tx_desc->gbec_status = DSC_INIT16;
1545 		if (unlikely(++i == tx_ring->count))
1546 			i = 0;
1547 		tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1548 
1549 		/* weight of a sort for tx, to avoid endless transmit cleanup */
1550 		if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
1551 			cleaned = false;
1552 			break;
1553 		}
1554 	}
1555 	netdev_dbg(adapter->netdev,
1556 		   "called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1557 		   cleaned_count);
1558 	if (cleaned_count > 0)  { /*skip this if nothing cleaned*/
1559 		/* Recover from running out of Tx resources in xmit_frame */
1560 		netif_tx_lock(adapter->netdev);
1561 		if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev))))
1562 		{
1563 			netif_wake_queue(adapter->netdev);
1564 			adapter->stats.tx_restart_count++;
1565 			netdev_dbg(adapter->netdev, "Tx wake queue\n");
1566 		}
1567 
1568 		tx_ring->next_to_clean = i;
1569 
1570 		netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
1571 			   tx_ring->next_to_clean);
1572 		netif_tx_unlock(adapter->netdev);
1573 	}
1574 	return cleaned;
1575 }
1576 
1577 /**
1578  * pch_gbe_clean_rx - Send received data up the network stack; legacy
1579  * @adapter:     Board private structure
1580  * @rx_ring:     Rx descriptor ring
1581  * @work_done:   Completed count
1582  * @work_to_do:  Request count
1583  * Returns:
1584  *	true:  Cleaned the descriptor
1585  *	false: Not cleaned the descriptor
1586  */
1587 static bool
1588 pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1589 		 struct pch_gbe_rx_ring *rx_ring,
1590 		 int *work_done, int work_to_do)
1591 {
1592 	struct net_device *netdev = adapter->netdev;
1593 	struct pci_dev *pdev = adapter->pdev;
1594 	struct pch_gbe_buffer *buffer_info;
1595 	struct pch_gbe_rx_desc *rx_desc;
1596 	u32 length;
1597 	unsigned int i;
1598 	unsigned int cleaned_count = 0;
1599 	bool cleaned = false;
1600 	struct sk_buff *skb;
1601 	u8 dma_status;
1602 	u16 gbec_status;
1603 	u32 tcp_ip_status;
1604 
1605 	i = rx_ring->next_to_clean;
1606 
1607 	while (*work_done < work_to_do) {
1608 		/* Check Rx descriptor status */
1609 		rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1610 		if (rx_desc->gbec_status == DSC_INIT16)
1611 			break;
1612 		cleaned = true;
1613 		cleaned_count++;
1614 
1615 		dma_status = rx_desc->dma_status;
1616 		gbec_status = rx_desc->gbec_status;
1617 		tcp_ip_status = rx_desc->tcp_ip_status;
1618 		rx_desc->gbec_status = DSC_INIT16;
1619 		buffer_info = &rx_ring->buffer_info[i];
1620 		skb = buffer_info->skb;
1621 		buffer_info->skb = NULL;
1622 
1623 		/* unmap dma */
1624 		dma_unmap_single(&pdev->dev, buffer_info->dma,
1625 				   buffer_info->length, DMA_FROM_DEVICE);
1626 		buffer_info->mapped = false;
1627 
1628 		netdev_dbg(netdev,
1629 			   "RxDecNo = 0x%04x  Status[DMA:0x%02x GBE:0x%04x TCP:0x%08x]  BufInf = 0x%p\n",
1630 			   i, dma_status, gbec_status, tcp_ip_status,
1631 			   buffer_info);
1632 		/* Error check */
1633 		if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
1634 			adapter->stats.rx_frame_errors++;
1635 			netdev_err(netdev, "Receive Not Octal Error\n");
1636 		} else if (unlikely(gbec_status &
1637 				PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
1638 			adapter->stats.rx_frame_errors++;
1639 			netdev_err(netdev, "Receive Nibble Error\n");
1640 		} else if (unlikely(gbec_status &
1641 				PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
1642 			adapter->stats.rx_crc_errors++;
1643 			netdev_err(netdev, "Receive CRC Error\n");
1644 		} else {
1645 			/* get receive length */
1646 			/* length convert[-3], length includes FCS length */
1647 			length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
1648 			if (rx_desc->rx_words_eob & 0x02)
1649 				length = length - 4;
1650 			/*
1651 			 * buffer_info->rx_buffer: [Header:14][payload]
1652 			 * skb->data: [Reserve:2][Header:14][payload]
1653 			 */
1654 			memcpy(skb->data, buffer_info->rx_buffer, length);
1655 
1656 			/* update status of driver */
1657 			adapter->stats.rx_bytes += length;
1658 			adapter->stats.rx_packets++;
1659 			if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
1660 				adapter->stats.multicast++;
1661 			/* Write meta date of skb */
1662 			skb_put(skb, length);
1663 
1664 			pch_rx_timestamp(adapter, skb);
1665 
1666 			skb->protocol = eth_type_trans(skb, netdev);
1667 			if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
1668 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1669 			else
1670 				skb->ip_summed = CHECKSUM_NONE;
1671 
1672 			napi_gro_receive(&adapter->napi, skb);
1673 			(*work_done)++;
1674 			netdev_dbg(netdev,
1675 				   "Receive skb->ip_summed: %d length: %d\n",
1676 				   skb->ip_summed, length);
1677 		}
1678 		/* return some buffers to hardware, one at a time is too slow */
1679 		if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1680 			pch_gbe_alloc_rx_buffers(adapter, rx_ring,
1681 						 cleaned_count);
1682 			cleaned_count = 0;
1683 		}
1684 		if (++i == rx_ring->count)
1685 			i = 0;
1686 	}
1687 	rx_ring->next_to_clean = i;
1688 	if (cleaned_count)
1689 		pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1690 	return cleaned;
1691 }
1692 
1693 /**
1694  * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
1695  * @adapter:  Board private structure
1696  * @tx_ring:  Tx descriptor ring (for a specific queue) to setup
1697  * Returns:
1698  *	0:		Successfully
1699  *	Negative value:	Failed
1700  */
1701 int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1702 				struct pch_gbe_tx_ring *tx_ring)
1703 {
1704 	struct pci_dev *pdev = adapter->pdev;
1705 	struct pch_gbe_tx_desc *tx_desc;
1706 	int size;
1707 	int desNo;
1708 
1709 	size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1710 	tx_ring->buffer_info = vzalloc(size);
1711 	if (!tx_ring->buffer_info)
1712 		return -ENOMEM;
1713 
1714 	tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1715 
1716 	tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1717 					   &tx_ring->dma, GFP_KERNEL);
1718 	if (!tx_ring->desc) {
1719 		vfree(tx_ring->buffer_info);
1720 		return -ENOMEM;
1721 	}
1722 
1723 	tx_ring->next_to_use = 0;
1724 	tx_ring->next_to_clean = 0;
1725 
1726 	for (desNo = 0; desNo < tx_ring->count; desNo++) {
1727 		tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
1728 		tx_desc->gbec_status = DSC_INIT16;
1729 	}
1730 	netdev_dbg(adapter->netdev,
1731 		   "tx_ring->desc = 0x%p  tx_ring->dma = 0x%08llx next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
1732 		   tx_ring->desc, (unsigned long long)tx_ring->dma,
1733 		   tx_ring->next_to_clean, tx_ring->next_to_use);
1734 	return 0;
1735 }
1736 
1737 /**
1738  * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
1739  * @adapter:  Board private structure
1740  * @rx_ring:  Rx descriptor ring (for a specific queue) to setup
1741  * Returns:
1742  *	0:		Successfully
1743  *	Negative value:	Failed
1744  */
1745 int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1746 				struct pch_gbe_rx_ring *rx_ring)
1747 {
1748 	struct pci_dev *pdev = adapter->pdev;
1749 	struct pch_gbe_rx_desc *rx_desc;
1750 	int size;
1751 	int desNo;
1752 
1753 	size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1754 	rx_ring->buffer_info = vzalloc(size);
1755 	if (!rx_ring->buffer_info)
1756 		return -ENOMEM;
1757 
1758 	rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1759 	rx_ring->desc =	dma_alloc_coherent(&pdev->dev, rx_ring->size,
1760 						  &rx_ring->dma, GFP_KERNEL);
1761 	if (!rx_ring->desc) {
1762 		vfree(rx_ring->buffer_info);
1763 		return -ENOMEM;
1764 	}
1765 	rx_ring->next_to_clean = 0;
1766 	rx_ring->next_to_use = 0;
1767 	for (desNo = 0; desNo < rx_ring->count; desNo++) {
1768 		rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
1769 		rx_desc->gbec_status = DSC_INIT16;
1770 	}
1771 	netdev_dbg(adapter->netdev,
1772 		   "rx_ring->desc = 0x%p  rx_ring->dma = 0x%08llx next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
1773 		   rx_ring->desc, (unsigned long long)rx_ring->dma,
1774 		   rx_ring->next_to_clean, rx_ring->next_to_use);
1775 	return 0;
1776 }
1777 
1778 /**
1779  * pch_gbe_free_tx_resources - Free Tx Resources
1780  * @adapter:  Board private structure
1781  * @tx_ring:  Tx descriptor ring for a specific queue
1782  */
1783 void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
1784 				struct pch_gbe_tx_ring *tx_ring)
1785 {
1786 	struct pci_dev *pdev = adapter->pdev;
1787 
1788 	pch_gbe_clean_tx_ring(adapter, tx_ring);
1789 	vfree(tx_ring->buffer_info);
1790 	tx_ring->buffer_info = NULL;
1791 	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1792 			  tx_ring->dma);
1793 	tx_ring->desc = NULL;
1794 }
1795 
1796 /**
1797  * pch_gbe_free_rx_resources - Free Rx Resources
1798  * @adapter:  Board private structure
1799  * @rx_ring:  Ring to clean the resources from
1800  */
1801 void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
1802 				struct pch_gbe_rx_ring *rx_ring)
1803 {
1804 	struct pci_dev *pdev = adapter->pdev;
1805 
1806 	pch_gbe_clean_rx_ring(adapter, rx_ring);
1807 	vfree(rx_ring->buffer_info);
1808 	rx_ring->buffer_info = NULL;
1809 	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1810 			  rx_ring->dma);
1811 	rx_ring->desc = NULL;
1812 }
1813 
1814 /**
1815  * pch_gbe_request_irq - Allocate an interrupt line
1816  * @adapter:  Board private structure
1817  * Returns:
1818  *	0:		Successfully
1819  *	Negative value:	Failed
1820  */
1821 static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1822 {
1823 	struct net_device *netdev = adapter->netdev;
1824 	int err;
1825 
1826 	err = pci_alloc_irq_vectors(adapter->pdev, 1, 1, PCI_IRQ_ALL_TYPES);
1827 	if (err < 0)
1828 		return err;
1829 
1830 	adapter->irq = pci_irq_vector(adapter->pdev, 0);
1831 
1832 	err = request_irq(adapter->irq, &pch_gbe_intr, IRQF_SHARED,
1833 			  netdev->name, netdev);
1834 	if (err)
1835 		netdev_err(netdev, "Unable to allocate interrupt Error: %d\n",
1836 			   err);
1837 	netdev_dbg(netdev, "have_msi : %d  return : 0x%04x\n",
1838 		   pci_dev_msi_enabled(adapter->pdev), err);
1839 	return err;
1840 }
1841 
1842 /**
1843  * pch_gbe_up - Up GbE network device
1844  * @adapter:  Board private structure
1845  * Returns:
1846  *	0:		Successfully
1847  *	Negative value:	Failed
1848  */
1849 int pch_gbe_up(struct pch_gbe_adapter *adapter)
1850 {
1851 	struct net_device *netdev = adapter->netdev;
1852 	struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1853 	struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1854 	int err = -EINVAL;
1855 
1856 	/* Ensure we have a valid MAC */
1857 	if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1858 		netdev_err(netdev, "Error: Invalid MAC address\n");
1859 		goto out;
1860 	}
1861 
1862 	/* hardware has been reset, we need to reload some things */
1863 	pch_gbe_set_multi(netdev);
1864 
1865 	pch_gbe_setup_tctl(adapter);
1866 	pch_gbe_configure_tx(adapter);
1867 	pch_gbe_setup_rctl(adapter);
1868 	pch_gbe_configure_rx(adapter);
1869 
1870 	err = pch_gbe_request_irq(adapter);
1871 	if (err) {
1872 		netdev_err(netdev,
1873 			   "Error: can't bring device up - irq request failed\n");
1874 		goto out;
1875 	}
1876 	err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1877 	if (err) {
1878 		netdev_err(netdev,
1879 			   "Error: can't bring device up - alloc rx buffers pool failed\n");
1880 		goto freeirq;
1881 	}
1882 	pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1883 	pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1884 	adapter->tx_queue_len = netdev->tx_queue_len;
1885 	pch_gbe_enable_dma_rx(&adapter->hw);
1886 	pch_gbe_enable_mac_rx(&adapter->hw);
1887 
1888 	mod_timer(&adapter->watchdog_timer, jiffies);
1889 
1890 	napi_enable(&adapter->napi);
1891 	pch_gbe_irq_enable(adapter);
1892 	netif_start_queue(adapter->netdev);
1893 
1894 	return 0;
1895 
1896 freeirq:
1897 	pch_gbe_free_irq(adapter);
1898 out:
1899 	return err;
1900 }
1901 
1902 /**
1903  * pch_gbe_down - Down GbE network device
1904  * @adapter:  Board private structure
1905  */
1906 void pch_gbe_down(struct pch_gbe_adapter *adapter)
1907 {
1908 	struct net_device *netdev = adapter->netdev;
1909 	struct pci_dev *pdev = adapter->pdev;
1910 	struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1911 
1912 	/* signal that we're down so the interrupt handler does not
1913 	 * reschedule our watchdog timer */
1914 	napi_disable(&adapter->napi);
1915 	atomic_set(&adapter->irq_sem, 0);
1916 
1917 	pch_gbe_irq_disable(adapter);
1918 	pch_gbe_free_irq(adapter);
1919 
1920 	del_timer_sync(&adapter->watchdog_timer);
1921 
1922 	netdev->tx_queue_len = adapter->tx_queue_len;
1923 	netif_carrier_off(netdev);
1924 	netif_stop_queue(netdev);
1925 
1926 	if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1927 		pch_gbe_reset(adapter);
1928 	pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
1929 	pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
1930 
1931 	dma_free_coherent(&adapter->pdev->dev, rx_ring->rx_buff_pool_size,
1932 			  rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
1933 	rx_ring->rx_buff_pool_logic = 0;
1934 	rx_ring->rx_buff_pool_size = 0;
1935 	rx_ring->rx_buff_pool = NULL;
1936 }
1937 
1938 /**
1939  * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
1940  * @adapter:  Board private structure to initialize
1941  * Returns:
1942  *	0:		Successfully
1943  *	Negative value:	Failed
1944  */
1945 static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
1946 {
1947 	struct pch_gbe_hw *hw = &adapter->hw;
1948 	struct net_device *netdev = adapter->netdev;
1949 
1950 	adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
1951 	hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1952 	hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1953 	hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US;
1954 
1955 	if (pch_gbe_alloc_queues(adapter)) {
1956 		netdev_err(netdev, "Unable to allocate memory for queues\n");
1957 		return -ENOMEM;
1958 	}
1959 	spin_lock_init(&adapter->hw.miim_lock);
1960 	spin_lock_init(&adapter->stats_lock);
1961 	spin_lock_init(&adapter->ethtool_lock);
1962 	atomic_set(&adapter->irq_sem, 0);
1963 	pch_gbe_irq_disable(adapter);
1964 
1965 	pch_gbe_init_stats(adapter);
1966 
1967 	netdev_dbg(netdev,
1968 		   "rx_buffer_len : %d  mac.min_frame_size : %d  mac.max_frame_size : %d\n",
1969 		   (u32) adapter->rx_buffer_len,
1970 		   hw->mac.min_frame_size, hw->mac.max_frame_size);
1971 	return 0;
1972 }
1973 
1974 /**
1975  * pch_gbe_open - Called when a network interface is made active
1976  * @netdev:	Network interface device structure
1977  * Returns:
1978  *	0:		Successfully
1979  *	Negative value:	Failed
1980  */
1981 static int pch_gbe_open(struct net_device *netdev)
1982 {
1983 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1984 	struct pch_gbe_hw *hw = &adapter->hw;
1985 	int err;
1986 
1987 	/* allocate transmit descriptors */
1988 	err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
1989 	if (err)
1990 		goto err_setup_tx;
1991 	/* allocate receive descriptors */
1992 	err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
1993 	if (err)
1994 		goto err_setup_rx;
1995 	pch_gbe_phy_power_up(hw);
1996 	err = pch_gbe_up(adapter);
1997 	if (err)
1998 		goto err_up;
1999 	netdev_dbg(netdev, "Success End\n");
2000 	return 0;
2001 
2002 err_up:
2003 	if (!adapter->wake_up_evt)
2004 		pch_gbe_phy_power_down(hw);
2005 	pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2006 err_setup_rx:
2007 	pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2008 err_setup_tx:
2009 	pch_gbe_reset(adapter);
2010 	netdev_err(netdev, "Error End\n");
2011 	return err;
2012 }
2013 
2014 /**
2015  * pch_gbe_stop - Disables a network interface
2016  * @netdev:  Network interface device structure
2017  * Returns:
2018  *	0: Successfully
2019  */
2020 static int pch_gbe_stop(struct net_device *netdev)
2021 {
2022 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2023 	struct pch_gbe_hw *hw = &adapter->hw;
2024 
2025 	pch_gbe_down(adapter);
2026 	if (!adapter->wake_up_evt)
2027 		pch_gbe_phy_power_down(hw);
2028 	pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2029 	pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2030 	return 0;
2031 }
2032 
2033 /**
2034  * pch_gbe_xmit_frame - Packet transmitting start
2035  * @skb:     Socket buffer structure
2036  * @netdev:  Network interface device structure
2037  * Returns:
2038  *	- NETDEV_TX_OK:   Normal end
2039  *	- NETDEV_TX_BUSY: Error end
2040  */
2041 static netdev_tx_t pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2042 {
2043 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2044 	struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
2045 
2046 	if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
2047 		netif_stop_queue(netdev);
2048 		netdev_dbg(netdev,
2049 			   "Return : BUSY  next_to use : 0x%08x  next_to clean : 0x%08x\n",
2050 			   tx_ring->next_to_use, tx_ring->next_to_clean);
2051 		return NETDEV_TX_BUSY;
2052 	}
2053 
2054 	/* CRC,ITAG no support */
2055 	pch_gbe_tx_queue(adapter, tx_ring, skb);
2056 	return NETDEV_TX_OK;
2057 }
2058 
2059 /**
2060  * pch_gbe_set_multi - Multicast and Promiscuous mode set
2061  * @netdev:   Network interface device structure
2062  */
2063 static void pch_gbe_set_multi(struct net_device *netdev)
2064 {
2065 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2066 	struct pch_gbe_hw *hw = &adapter->hw;
2067 	struct netdev_hw_addr *ha;
2068 	u32 rctl, adrmask;
2069 	int mc_count, i;
2070 
2071 	netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags);
2072 
2073 	/* By default enable address & multicast filtering */
2074 	rctl = ioread32(&hw->reg->RX_MODE);
2075 	rctl |= PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN;
2076 
2077 	/* Promiscuous mode disables all hardware address filtering */
2078 	if (netdev->flags & IFF_PROMISC)
2079 		rctl &= ~(PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
2080 
2081 	/* If we want to monitor more multicast addresses than the hardware can
2082 	 * support then disable hardware multicast filtering.
2083 	 */
2084 	mc_count = netdev_mc_count(netdev);
2085 	if ((netdev->flags & IFF_ALLMULTI) || mc_count >= PCH_GBE_MAR_ENTRIES)
2086 		rctl &= ~PCH_GBE_MLT_FIL_EN;
2087 
2088 	iowrite32(rctl, &hw->reg->RX_MODE);
2089 
2090 	/* If we're not using multicast filtering then there's no point
2091 	 * configuring the unused MAC address registers.
2092 	 */
2093 	if (!(rctl & PCH_GBE_MLT_FIL_EN))
2094 		return;
2095 
2096 	/* Load the first set of multicast addresses into MAC address registers
2097 	 * for use by hardware filtering.
2098 	 */
2099 	i = 1;
2100 	netdev_for_each_mc_addr(ha, netdev)
2101 		pch_gbe_mac_mar_set(hw, ha->addr, i++);
2102 
2103 	/* If there are spare MAC registers, mask & clear them */
2104 	for (; i < PCH_GBE_MAR_ENTRIES; i++) {
2105 		/* Clear MAC address mask */
2106 		adrmask = ioread32(&hw->reg->ADDR_MASK);
2107 		iowrite32(adrmask | BIT(i), &hw->reg->ADDR_MASK);
2108 		/* wait busy */
2109 		pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
2110 		/* Clear MAC address */
2111 		iowrite32(0, &hw->reg->mac_adr[i].high);
2112 		iowrite32(0, &hw->reg->mac_adr[i].low);
2113 	}
2114 
2115 	netdev_dbg(netdev,
2116 		 "RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x  netdev->mc_count : 0x%08x\n",
2117 		 ioread32(&hw->reg->RX_MODE), mc_count);
2118 }
2119 
2120 /**
2121  * pch_gbe_set_mac - Change the Ethernet Address of the NIC
2122  * @netdev: Network interface device structure
2123  * @addr:   Pointer to an address structure
2124  * Returns:
2125  *	0:		Successfully
2126  *	-EADDRNOTAVAIL:	Failed
2127  */
2128 static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
2129 {
2130 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2131 	struct sockaddr *skaddr = addr;
2132 	int ret_val;
2133 
2134 	if (!is_valid_ether_addr(skaddr->sa_data)) {
2135 		ret_val = -EADDRNOTAVAIL;
2136 	} else {
2137 		eth_hw_addr_set(netdev, skaddr->sa_data);
2138 		memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
2139 		pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2140 		ret_val = 0;
2141 	}
2142 	netdev_dbg(netdev, "ret_val : 0x%08x\n", ret_val);
2143 	netdev_dbg(netdev, "dev_addr : %pM\n", netdev->dev_addr);
2144 	netdev_dbg(netdev, "mac_addr : %pM\n", adapter->hw.mac.addr);
2145 	netdev_dbg(netdev, "MAC_ADR1AB reg : 0x%08x 0x%08x\n",
2146 		   ioread32(&adapter->hw.reg->mac_adr[0].high),
2147 		   ioread32(&adapter->hw.reg->mac_adr[0].low));
2148 	return ret_val;
2149 }
2150 
2151 /**
2152  * pch_gbe_change_mtu - Change the Maximum Transfer Unit
2153  * @netdev:   Network interface device structure
2154  * @new_mtu:  New value for maximum frame size
2155  * Returns:
2156  *	0:		Successfully
2157  *	-EINVAL:	Failed
2158  */
2159 static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2160 {
2161 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2162 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2163 	unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
2164 	int err;
2165 
2166 	if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
2167 		adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2168 	else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
2169 		adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
2170 	else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2171 		adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2172 	else
2173 		adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
2174 
2175 	if (netif_running(netdev)) {
2176 		pch_gbe_down(adapter);
2177 		err = pch_gbe_up(adapter);
2178 		if (err) {
2179 			adapter->rx_buffer_len = old_rx_buffer_len;
2180 			pch_gbe_up(adapter);
2181 			return err;
2182 		} else {
2183 			netdev->mtu = new_mtu;
2184 			adapter->hw.mac.max_frame_size = max_frame;
2185 		}
2186 	} else {
2187 		pch_gbe_reset(adapter);
2188 		netdev->mtu = new_mtu;
2189 		adapter->hw.mac.max_frame_size = max_frame;
2190 	}
2191 
2192 	netdev_dbg(netdev,
2193 		   "max_frame : %d  rx_buffer_len : %d  mtu : %d  max_frame_size : %d\n",
2194 		   max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
2195 		   adapter->hw.mac.max_frame_size);
2196 	return 0;
2197 }
2198 
2199 /**
2200  * pch_gbe_set_features - Reset device after features changed
2201  * @netdev:   Network interface device structure
2202  * @features:  New features
2203  * Returns:
2204  *	0:		HW state updated successfully
2205  */
2206 static int pch_gbe_set_features(struct net_device *netdev,
2207 	netdev_features_t features)
2208 {
2209 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2210 	netdev_features_t changed = features ^ netdev->features;
2211 
2212 	if (!(changed & NETIF_F_RXCSUM))
2213 		return 0;
2214 
2215 	if (netif_running(netdev))
2216 		pch_gbe_reinit_locked(adapter);
2217 	else
2218 		pch_gbe_reset(adapter);
2219 
2220 	return 0;
2221 }
2222 
2223 /**
2224  * pch_gbe_ioctl - Controls register through a MII interface
2225  * @netdev:   Network interface device structure
2226  * @ifr:      Pointer to ifr structure
2227  * @cmd:      Control command
2228  * Returns:
2229  *	0:	Successfully
2230  *	Negative value:	Failed
2231  */
2232 static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2233 {
2234 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2235 
2236 	netdev_dbg(netdev, "cmd : 0x%04x\n", cmd);
2237 
2238 	if (cmd == SIOCSHWTSTAMP)
2239 		return hwtstamp_ioctl(netdev, ifr, cmd);
2240 
2241 	return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2242 }
2243 
2244 /**
2245  * pch_gbe_tx_timeout - Respond to a Tx Hang
2246  * @netdev:   Network interface device structure
2247  * @txqueue: index of hanging queue
2248  */
2249 static void pch_gbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2250 {
2251 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2252 
2253 	/* Do the reset outside of interrupt context */
2254 	adapter->stats.tx_timeout_count++;
2255 	schedule_work(&adapter->reset_task);
2256 }
2257 
2258 /**
2259  * pch_gbe_napi_poll - NAPI receive and transfer polling callback
2260  * @napi:    Pointer of polling device struct
2261  * @budget:  The maximum number of a packet
2262  * Returns:
2263  *	false:  Exit the polling mode
2264  *	true:   Continue the polling mode
2265  */
2266 static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2267 {
2268 	struct pch_gbe_adapter *adapter =
2269 	    container_of(napi, struct pch_gbe_adapter, napi);
2270 	int work_done = 0;
2271 	bool poll_end_flag = false;
2272 	bool cleaned = false;
2273 
2274 	netdev_dbg(adapter->netdev, "budget : %d\n", budget);
2275 
2276 	pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2277 	cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2278 
2279 	if (cleaned)
2280 		work_done = budget;
2281 	/* If no Tx and not enough Rx work done,
2282 	 * exit the polling mode
2283 	 */
2284 	if (work_done < budget)
2285 		poll_end_flag = true;
2286 
2287 	if (poll_end_flag) {
2288 		napi_complete_done(napi, work_done);
2289 		pch_gbe_irq_enable(adapter);
2290 	}
2291 
2292 	if (adapter->rx_stop_flag) {
2293 		adapter->rx_stop_flag = false;
2294 		pch_gbe_enable_dma_rx(&adapter->hw);
2295 	}
2296 
2297 	netdev_dbg(adapter->netdev,
2298 		   "poll_end_flag : %d  work_done : %d  budget : %d\n",
2299 		   poll_end_flag, work_done, budget);
2300 
2301 	return work_done;
2302 }
2303 
2304 #ifdef CONFIG_NET_POLL_CONTROLLER
2305 /**
2306  * pch_gbe_netpoll - Used by things like netconsole to send skbs
2307  * @netdev:  Network interface device structure
2308  */
2309 static void pch_gbe_netpoll(struct net_device *netdev)
2310 {
2311 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2312 
2313 	disable_irq(adapter->irq);
2314 	pch_gbe_intr(adapter->irq, netdev);
2315 	enable_irq(adapter->irq);
2316 }
2317 #endif
2318 
2319 static const struct net_device_ops pch_gbe_netdev_ops = {
2320 	.ndo_open = pch_gbe_open,
2321 	.ndo_stop = pch_gbe_stop,
2322 	.ndo_start_xmit = pch_gbe_xmit_frame,
2323 	.ndo_set_mac_address = pch_gbe_set_mac,
2324 	.ndo_tx_timeout = pch_gbe_tx_timeout,
2325 	.ndo_change_mtu = pch_gbe_change_mtu,
2326 	.ndo_set_features = pch_gbe_set_features,
2327 	.ndo_eth_ioctl = pch_gbe_ioctl,
2328 	.ndo_set_rx_mode = pch_gbe_set_multi,
2329 #ifdef CONFIG_NET_POLL_CONTROLLER
2330 	.ndo_poll_controller = pch_gbe_netpoll,
2331 #endif
2332 };
2333 
2334 static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
2335 						pci_channel_state_t state)
2336 {
2337 	struct net_device *netdev = pci_get_drvdata(pdev);
2338 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2339 
2340 	netif_device_detach(netdev);
2341 	if (netif_running(netdev))
2342 		pch_gbe_down(adapter);
2343 	pci_disable_device(pdev);
2344 	/* Request a slot slot reset. */
2345 	return PCI_ERS_RESULT_NEED_RESET;
2346 }
2347 
2348 static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
2349 {
2350 	struct net_device *netdev = pci_get_drvdata(pdev);
2351 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2352 	struct pch_gbe_hw *hw = &adapter->hw;
2353 
2354 	if (pci_enable_device(pdev)) {
2355 		netdev_err(netdev, "Cannot re-enable PCI device after reset\n");
2356 		return PCI_ERS_RESULT_DISCONNECT;
2357 	}
2358 	pci_set_master(pdev);
2359 	pci_enable_wake(pdev, PCI_D0, 0);
2360 	pch_gbe_phy_power_up(hw);
2361 	pch_gbe_reset(adapter);
2362 	/* Clear wake up status */
2363 	pch_gbe_mac_set_wol_event(hw, 0);
2364 
2365 	return PCI_ERS_RESULT_RECOVERED;
2366 }
2367 
2368 static void pch_gbe_io_resume(struct pci_dev *pdev)
2369 {
2370 	struct net_device *netdev = pci_get_drvdata(pdev);
2371 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2372 
2373 	if (netif_running(netdev)) {
2374 		if (pch_gbe_up(adapter)) {
2375 			netdev_dbg(netdev,
2376 				   "can't bring device back up after reset\n");
2377 			return;
2378 		}
2379 	}
2380 	netif_device_attach(netdev);
2381 }
2382 
2383 static int __pch_gbe_suspend(struct pci_dev *pdev)
2384 {
2385 	struct net_device *netdev = pci_get_drvdata(pdev);
2386 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2387 	struct pch_gbe_hw *hw = &adapter->hw;
2388 	u32 wufc = adapter->wake_up_evt;
2389 
2390 	netif_device_detach(netdev);
2391 	if (netif_running(netdev))
2392 		pch_gbe_down(adapter);
2393 	if (wufc) {
2394 		pch_gbe_set_multi(netdev);
2395 		pch_gbe_setup_rctl(adapter);
2396 		pch_gbe_configure_rx(adapter);
2397 		pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
2398 					hw->mac.link_duplex);
2399 		pch_gbe_set_mode(adapter, hw->mac.link_speed,
2400 					hw->mac.link_duplex);
2401 		pch_gbe_mac_set_wol_event(hw, wufc);
2402 		pci_disable_device(pdev);
2403 	} else {
2404 		pch_gbe_phy_power_down(hw);
2405 		pch_gbe_mac_set_wol_event(hw, wufc);
2406 		pci_disable_device(pdev);
2407 	}
2408 	return 0;
2409 }
2410 
2411 #ifdef CONFIG_PM
2412 static int pch_gbe_suspend(struct device *device)
2413 {
2414 	struct pci_dev *pdev = to_pci_dev(device);
2415 
2416 	return __pch_gbe_suspend(pdev);
2417 }
2418 
2419 static int pch_gbe_resume(struct device *device)
2420 {
2421 	struct pci_dev *pdev = to_pci_dev(device);
2422 	struct net_device *netdev = pci_get_drvdata(pdev);
2423 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2424 	struct pch_gbe_hw *hw = &adapter->hw;
2425 	u32 err;
2426 
2427 	err = pci_enable_device(pdev);
2428 	if (err) {
2429 		netdev_err(netdev, "Cannot enable PCI device from suspend\n");
2430 		return err;
2431 	}
2432 	pci_set_master(pdev);
2433 	pch_gbe_phy_power_up(hw);
2434 	pch_gbe_reset(adapter);
2435 	/* Clear wake on lan control and status */
2436 	pch_gbe_mac_set_wol_event(hw, 0);
2437 
2438 	if (netif_running(netdev))
2439 		pch_gbe_up(adapter);
2440 	netif_device_attach(netdev);
2441 
2442 	return 0;
2443 }
2444 #endif /* CONFIG_PM */
2445 
2446 static void pch_gbe_shutdown(struct pci_dev *pdev)
2447 {
2448 	__pch_gbe_suspend(pdev);
2449 	if (system_state == SYSTEM_POWER_OFF) {
2450 		pci_wake_from_d3(pdev, true);
2451 		pci_set_power_state(pdev, PCI_D3hot);
2452 	}
2453 }
2454 
2455 static void pch_gbe_remove(struct pci_dev *pdev)
2456 {
2457 	struct net_device *netdev = pci_get_drvdata(pdev);
2458 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2459 
2460 	cancel_work_sync(&adapter->reset_task);
2461 	unregister_netdev(netdev);
2462 
2463 	pch_gbe_phy_hw_reset(&adapter->hw);
2464 
2465 	free_netdev(netdev);
2466 }
2467 
2468 static int pch_gbe_probe(struct pci_dev *pdev,
2469 			  const struct pci_device_id *pci_id)
2470 {
2471 	struct net_device *netdev;
2472 	struct pch_gbe_adapter *adapter;
2473 	int ret;
2474 
2475 	ret = pcim_enable_device(pdev);
2476 	if (ret)
2477 		return ret;
2478 
2479 	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
2480 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2481 		if (ret) {
2482 			dev_err(&pdev->dev, "ERR: No usable DMA configuration, aborting\n");
2483 			return ret;
2484 		}
2485 	}
2486 
2487 	ret = pcim_iomap_regions(pdev, 1 << PCH_GBE_PCI_BAR, pci_name(pdev));
2488 	if (ret) {
2489 		dev_err(&pdev->dev,
2490 			"ERR: Can't reserve PCI I/O and memory resources\n");
2491 		return ret;
2492 	}
2493 	pci_set_master(pdev);
2494 
2495 	netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2496 	if (!netdev)
2497 		return -ENOMEM;
2498 	SET_NETDEV_DEV(netdev, &pdev->dev);
2499 
2500 	pci_set_drvdata(pdev, netdev);
2501 	adapter = netdev_priv(netdev);
2502 	adapter->netdev = netdev;
2503 	adapter->pdev = pdev;
2504 	adapter->hw.back = adapter;
2505 	adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR];
2506 
2507 	adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data;
2508 	if (adapter->pdata && adapter->pdata->platform_init) {
2509 		ret = adapter->pdata->platform_init(pdev);
2510 		if (ret)
2511 			goto err_free_netdev;
2512 	}
2513 
2514 	adapter->ptp_pdev =
2515 		pci_get_domain_bus_and_slot(pci_domain_nr(adapter->pdev->bus),
2516 					    adapter->pdev->bus->number,
2517 					    PCI_DEVFN(12, 4));
2518 
2519 	netdev->netdev_ops = &pch_gbe_netdev_ops;
2520 	netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2521 	netif_napi_add(netdev, &adapter->napi,
2522 		       pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
2523 	netdev->hw_features = NETIF_F_RXCSUM |
2524 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2525 	netdev->features = netdev->hw_features;
2526 	pch_gbe_set_ethtool_ops(netdev);
2527 
2528 	/* MTU range: 46 - 10300 */
2529 	netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
2530 	netdev->max_mtu = PCH_GBE_MAX_JUMBO_FRAME_SIZE -
2531 			  (ETH_HLEN + ETH_FCS_LEN);
2532 
2533 	pch_gbe_mac_load_mac_addr(&adapter->hw);
2534 	pch_gbe_mac_reset_hw(&adapter->hw);
2535 
2536 	/* setup the private structure */
2537 	ret = pch_gbe_sw_init(adapter);
2538 	if (ret)
2539 		goto err_free_netdev;
2540 
2541 	/* Initialize PHY */
2542 	ret = pch_gbe_init_phy(adapter);
2543 	if (ret) {
2544 		dev_err(&pdev->dev, "PHY initialize error\n");
2545 		goto err_free_adapter;
2546 	}
2547 
2548 	/* Read the MAC address. and store to the private data */
2549 	ret = pch_gbe_mac_read_mac_addr(&adapter->hw);
2550 	if (ret) {
2551 		dev_err(&pdev->dev, "MAC address Read Error\n");
2552 		goto err_free_adapter;
2553 	}
2554 
2555 	eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2556 	if (!is_valid_ether_addr(netdev->dev_addr)) {
2557 		/*
2558 		 * If the MAC is invalid (or just missing), display a warning
2559 		 * but do not abort setting up the device. pch_gbe_up will
2560 		 * prevent the interface from being brought up until a valid MAC
2561 		 * is set.
2562 		 */
2563 		dev_err(&pdev->dev, "Invalid MAC address, "
2564 		                    "interface disabled.\n");
2565 	}
2566 	timer_setup(&adapter->watchdog_timer, pch_gbe_watchdog, 0);
2567 
2568 	INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
2569 
2570 	pch_gbe_check_options(adapter);
2571 
2572 	/* initialize the wol settings based on the eeprom settings */
2573 	adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
2574 	dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
2575 
2576 	/* reset the hardware with the new settings */
2577 	pch_gbe_reset(adapter);
2578 
2579 	ret = register_netdev(netdev);
2580 	if (ret)
2581 		goto err_free_adapter;
2582 	/* tell the stack to leave us alone until pch_gbe_open() is called */
2583 	netif_carrier_off(netdev);
2584 	netif_stop_queue(netdev);
2585 
2586 	dev_dbg(&pdev->dev, "PCH Network Connection\n");
2587 
2588 	/* Disable hibernation on certain platforms */
2589 	if (adapter->pdata && adapter->pdata->phy_disable_hibernate)
2590 		pch_gbe_phy_disable_hibernate(&adapter->hw);
2591 
2592 	device_set_wakeup_enable(&pdev->dev, 1);
2593 	return 0;
2594 
2595 err_free_adapter:
2596 	pch_gbe_phy_hw_reset(&adapter->hw);
2597 err_free_netdev:
2598 	free_netdev(netdev);
2599 	return ret;
2600 }
2601 
2602 static void pch_gbe_gpio_remove_table(void *table)
2603 {
2604 	gpiod_remove_lookup_table(table);
2605 }
2606 
2607 static int pch_gbe_gpio_add_table(struct device *dev, void *table)
2608 {
2609 	gpiod_add_lookup_table(table);
2610 	return devm_add_action_or_reset(dev, pch_gbe_gpio_remove_table, table);
2611 }
2612 
2613 static struct gpiod_lookup_table pch_gbe_minnow_gpio_table = {
2614 	.dev_id		= "0000:02:00.1",
2615 	.table		= {
2616 		GPIO_LOOKUP("sch_gpio.33158", 13, NULL, GPIO_ACTIVE_LOW),
2617 		{}
2618 	},
2619 };
2620 
2621 /* The AR803X PHY on the MinnowBoard requires a physical pin to be toggled to
2622  * ensure it is awake for probe and init. Request the line and reset the PHY.
2623  */
2624 static int pch_gbe_minnow_platform_init(struct pci_dev *pdev)
2625 {
2626 	struct gpio_desc *gpiod;
2627 	int ret;
2628 
2629 	ret = pch_gbe_gpio_add_table(&pdev->dev, &pch_gbe_minnow_gpio_table);
2630 	if (ret)
2631 		return ret;
2632 
2633 	gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_OUT_HIGH);
2634 	if (IS_ERR(gpiod))
2635 		return dev_err_probe(&pdev->dev, PTR_ERR(gpiod),
2636 				     "Can't request PHY reset GPIO line\n");
2637 
2638 	gpiod_set_value(gpiod, 1);
2639 	usleep_range(1250, 1500);
2640 	gpiod_set_value(gpiod, 0);
2641 	usleep_range(1250, 1500);
2642 
2643 	return ret;
2644 }
2645 
2646 static struct pch_gbe_privdata pch_gbe_minnow_privdata = {
2647 	.phy_tx_clk_delay = true,
2648 	.phy_disable_hibernate = true,
2649 	.platform_init = pch_gbe_minnow_platform_init,
2650 };
2651 
2652 static const struct pci_device_id pch_gbe_pcidev_id[] = {
2653 	{.vendor = PCI_VENDOR_ID_INTEL,
2654 	 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2655 	 .subvendor = PCI_VENDOR_ID_CIRCUITCO,
2656 	 .subdevice = PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD,
2657 	 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2658 	 .class_mask = (0xFFFF00),
2659 	 .driver_data = (kernel_ulong_t)&pch_gbe_minnow_privdata
2660 	 },
2661 	{.vendor = PCI_VENDOR_ID_INTEL,
2662 	 .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2663 	 .subvendor = PCI_ANY_ID,
2664 	 .subdevice = PCI_ANY_ID,
2665 	 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2666 	 .class_mask = (0xFFFF00)
2667 	 },
2668 	{.vendor = PCI_VENDOR_ID_ROHM,
2669 	 .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
2670 	 .subvendor = PCI_ANY_ID,
2671 	 .subdevice = PCI_ANY_ID,
2672 	 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2673 	 .class_mask = (0xFFFF00)
2674 	 },
2675 	{.vendor = PCI_VENDOR_ID_ROHM,
2676 	 .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
2677 	 .subvendor = PCI_ANY_ID,
2678 	 .subdevice = PCI_ANY_ID,
2679 	 .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2680 	 .class_mask = (0xFFFF00)
2681 	 },
2682 	/* required last entry */
2683 	{0}
2684 };
2685 
2686 #ifdef CONFIG_PM
2687 static const struct dev_pm_ops pch_gbe_pm_ops = {
2688 	.suspend = pch_gbe_suspend,
2689 	.resume = pch_gbe_resume,
2690 	.freeze = pch_gbe_suspend,
2691 	.thaw = pch_gbe_resume,
2692 	.poweroff = pch_gbe_suspend,
2693 	.restore = pch_gbe_resume,
2694 };
2695 #endif
2696 
2697 static const struct pci_error_handlers pch_gbe_err_handler = {
2698 	.error_detected = pch_gbe_io_error_detected,
2699 	.slot_reset = pch_gbe_io_slot_reset,
2700 	.resume = pch_gbe_io_resume
2701 };
2702 
2703 static struct pci_driver pch_gbe_driver = {
2704 	.name = KBUILD_MODNAME,
2705 	.id_table = pch_gbe_pcidev_id,
2706 	.probe = pch_gbe_probe,
2707 	.remove = pch_gbe_remove,
2708 #ifdef CONFIG_PM
2709 	.driver.pm = &pch_gbe_pm_ops,
2710 #endif
2711 	.shutdown = pch_gbe_shutdown,
2712 	.err_handler = &pch_gbe_err_handler
2713 };
2714 module_pci_driver(pch_gbe_driver);
2715 
2716 MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
2717 MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
2718 MODULE_LICENSE("GPL");
2719 MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
2720 
2721 /* pch_gbe_main.c */
2722