1 /*
2  * Copyright(c) 2007 Atheros Corporation. All rights reserved.
3  *
4  * Derived from Intel e1000 driver
5  * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License as published by the Free
9  * Software Foundation; either version 2 of the License, or (at your option)
10  * any later version.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc., 59
19  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
20  */
21 
22 #include "atl1e.h"
23 
24 #define DRV_VERSION "1.0.0.7-NAPI"
25 
26 char atl1e_driver_name[] = "ATL1E";
27 char atl1e_driver_version[] = DRV_VERSION;
28 #define PCI_DEVICE_ID_ATTANSIC_L1E      0x1026
29 /*
30  * atl1e_pci_tbl - PCI Device ID Table
31  *
32  * Wildcard entries (PCI_ANY_ID) should come last
33  * Last entry must be all 0s
34  *
35  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
36  *   Class, Class Mask, private data (not used) }
37  */
38 static DEFINE_PCI_DEVICE_TABLE(atl1e_pci_tbl) = {
39 	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)},
40 	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)},
41 	/* required last entry */
42 	{ 0 }
43 };
44 MODULE_DEVICE_TABLE(pci, atl1e_pci_tbl);
45 
46 MODULE_AUTHOR("Atheros Corporation, <xiong.huang@atheros.com>, Jie Yang <jie.yang@atheros.com>");
47 MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver");
48 MODULE_LICENSE("GPL");
49 MODULE_VERSION(DRV_VERSION);
50 
51 static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter);
52 
53 static const u16
54 atl1e_rx_page_vld_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
55 {
56 	{REG_HOST_RXF0_PAGE0_VLD, REG_HOST_RXF0_PAGE1_VLD},
57 	{REG_HOST_RXF1_PAGE0_VLD, REG_HOST_RXF1_PAGE1_VLD},
58 	{REG_HOST_RXF2_PAGE0_VLD, REG_HOST_RXF2_PAGE1_VLD},
59 	{REG_HOST_RXF3_PAGE0_VLD, REG_HOST_RXF3_PAGE1_VLD}
60 };
61 
62 static const u16 atl1e_rx_page_hi_addr_regs[AT_MAX_RECEIVE_QUEUE] =
63 {
64 	REG_RXF0_BASE_ADDR_HI,
65 	REG_RXF1_BASE_ADDR_HI,
66 	REG_RXF2_BASE_ADDR_HI,
67 	REG_RXF3_BASE_ADDR_HI
68 };
69 
70 static const u16
71 atl1e_rx_page_lo_addr_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
72 {
73 	{REG_HOST_RXF0_PAGE0_LO, REG_HOST_RXF0_PAGE1_LO},
74 	{REG_HOST_RXF1_PAGE0_LO, REG_HOST_RXF1_PAGE1_LO},
75 	{REG_HOST_RXF2_PAGE0_LO, REG_HOST_RXF2_PAGE1_LO},
76 	{REG_HOST_RXF3_PAGE0_LO, REG_HOST_RXF3_PAGE1_LO}
77 };
78 
79 static const u16
80 atl1e_rx_page_write_offset_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] =
81 {
82 	{REG_HOST_RXF0_MB0_LO,  REG_HOST_RXF0_MB1_LO},
83 	{REG_HOST_RXF1_MB0_LO,  REG_HOST_RXF1_MB1_LO},
84 	{REG_HOST_RXF2_MB0_LO,  REG_HOST_RXF2_MB1_LO},
85 	{REG_HOST_RXF3_MB0_LO,  REG_HOST_RXF3_MB1_LO}
86 };
87 
88 static const u16 atl1e_pay_load_size[] = {
89 	128, 256, 512, 1024, 2048, 4096,
90 };
91 
92 /**
93  * atl1e_irq_enable - Enable default interrupt generation settings
94  * @adapter: board private structure
95  */
96 static inline void atl1e_irq_enable(struct atl1e_adapter *adapter)
97 {
98 	if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
99 		AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
100 		AT_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK);
101 		AT_WRITE_FLUSH(&adapter->hw);
102 	}
103 }
104 
105 /**
106  * atl1e_irq_disable - Mask off interrupt generation on the NIC
107  * @adapter: board private structure
108  */
109 static inline void atl1e_irq_disable(struct atl1e_adapter *adapter)
110 {
111 	atomic_inc(&adapter->irq_sem);
112 	AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
113 	AT_WRITE_FLUSH(&adapter->hw);
114 	synchronize_irq(adapter->pdev->irq);
115 }
116 
117 /**
118  * atl1e_irq_reset - reset interrupt confiure on the NIC
119  * @adapter: board private structure
120  */
121 static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
122 {
123 	atomic_set(&adapter->irq_sem, 0);
124 	AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
125 	AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
126 	AT_WRITE_FLUSH(&adapter->hw);
127 }
128 
129 /**
130  * atl1e_phy_config - Timer Call-back
131  * @data: pointer to netdev cast into an unsigned long
132  */
133 static void atl1e_phy_config(unsigned long data)
134 {
135 	struct atl1e_adapter *adapter = (struct atl1e_adapter *) data;
136 	struct atl1e_hw *hw = &adapter->hw;
137 	unsigned long flags;
138 
139 	spin_lock_irqsave(&adapter->mdio_lock, flags);
140 	atl1e_restart_autoneg(hw);
141 	spin_unlock_irqrestore(&adapter->mdio_lock, flags);
142 }
143 
144 void atl1e_reinit_locked(struct atl1e_adapter *adapter)
145 {
146 
147 	WARN_ON(in_interrupt());
148 	while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
149 		msleep(1);
150 	atl1e_down(adapter);
151 	atl1e_up(adapter);
152 	clear_bit(__AT_RESETTING, &adapter->flags);
153 }
154 
155 static void atl1e_reset_task(struct work_struct *work)
156 {
157 	struct atl1e_adapter *adapter;
158 	adapter = container_of(work, struct atl1e_adapter, reset_task);
159 
160 	atl1e_reinit_locked(adapter);
161 }
162 
163 static int atl1e_check_link(struct atl1e_adapter *adapter)
164 {
165 	struct atl1e_hw *hw = &adapter->hw;
166 	struct net_device *netdev = adapter->netdev;
167 	int err = 0;
168 	u16 speed, duplex, phy_data;
169 
170 	/* MII_BMSR must read twice */
171 	atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
172 	atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
173 	if ((phy_data & BMSR_LSTATUS) == 0) {
174 		/* link down */
175 		if (netif_carrier_ok(netdev)) { /* old link state: Up */
176 			u32 value;
177 			/* disable rx */
178 			value = AT_READ_REG(hw, REG_MAC_CTRL);
179 			value &= ~MAC_CTRL_RX_EN;
180 			AT_WRITE_REG(hw, REG_MAC_CTRL, value);
181 			adapter->link_speed = SPEED_0;
182 			netif_carrier_off(netdev);
183 			netif_stop_queue(netdev);
184 		}
185 	} else {
186 		/* Link Up */
187 		err = atl1e_get_speed_and_duplex(hw, &speed, &duplex);
188 		if (unlikely(err))
189 			return err;
190 
191 		/* link result is our setting */
192 		if (adapter->link_speed != speed ||
193 		    adapter->link_duplex != duplex) {
194 			adapter->link_speed  = speed;
195 			adapter->link_duplex = duplex;
196 			atl1e_setup_mac_ctrl(adapter);
197 			netdev_info(netdev,
198 				    "NIC Link is Up <%d Mbps %s Duplex>\n",
199 				    adapter->link_speed,
200 				    adapter->link_duplex == FULL_DUPLEX ?
201 				    "Full" : "Half");
202 		}
203 
204 		if (!netif_carrier_ok(netdev)) {
205 			/* Link down -> Up */
206 			netif_carrier_on(netdev);
207 			netif_wake_queue(netdev);
208 		}
209 	}
210 	return 0;
211 }
212 
213 /**
214  * atl1e_link_chg_task - deal with link change event Out of interrupt context
215  * @netdev: network interface device structure
216  */
217 static void atl1e_link_chg_task(struct work_struct *work)
218 {
219 	struct atl1e_adapter *adapter;
220 	unsigned long flags;
221 
222 	adapter = container_of(work, struct atl1e_adapter, link_chg_task);
223 	spin_lock_irqsave(&adapter->mdio_lock, flags);
224 	atl1e_check_link(adapter);
225 	spin_unlock_irqrestore(&adapter->mdio_lock, flags);
226 }
227 
228 static void atl1e_link_chg_event(struct atl1e_adapter *adapter)
229 {
230 	struct net_device *netdev = adapter->netdev;
231 	u16 phy_data = 0;
232 	u16 link_up = 0;
233 
234 	spin_lock(&adapter->mdio_lock);
235 	atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
236 	atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
237 	spin_unlock(&adapter->mdio_lock);
238 	link_up = phy_data & BMSR_LSTATUS;
239 	/* notify upper layer link down ASAP */
240 	if (!link_up) {
241 		if (netif_carrier_ok(netdev)) {
242 			/* old link state: Up */
243 			netdev_info(netdev, "NIC Link is Down\n");
244 			adapter->link_speed = SPEED_0;
245 			netif_stop_queue(netdev);
246 		}
247 	}
248 	schedule_work(&adapter->link_chg_task);
249 }
250 
251 static void atl1e_del_timer(struct atl1e_adapter *adapter)
252 {
253 	del_timer_sync(&adapter->phy_config_timer);
254 }
255 
256 static void atl1e_cancel_work(struct atl1e_adapter *adapter)
257 {
258 	cancel_work_sync(&adapter->reset_task);
259 	cancel_work_sync(&adapter->link_chg_task);
260 }
261 
262 /**
263  * atl1e_tx_timeout - Respond to a Tx Hang
264  * @netdev: network interface device structure
265  */
266 static void atl1e_tx_timeout(struct net_device *netdev)
267 {
268 	struct atl1e_adapter *adapter = netdev_priv(netdev);
269 
270 	/* Do the reset outside of interrupt context */
271 	schedule_work(&adapter->reset_task);
272 }
273 
274 /**
275  * atl1e_set_multi - Multicast and Promiscuous mode set
276  * @netdev: network interface device structure
277  *
278  * The set_multi entry point is called whenever the multicast address
279  * list or the network interface flags are updated.  This routine is
280  * responsible for configuring the hardware for proper multicast,
281  * promiscuous mode, and all-multi behavior.
282  */
283 static void atl1e_set_multi(struct net_device *netdev)
284 {
285 	struct atl1e_adapter *adapter = netdev_priv(netdev);
286 	struct atl1e_hw *hw = &adapter->hw;
287 	struct netdev_hw_addr *ha;
288 	u32 mac_ctrl_data = 0;
289 	u32 hash_value;
290 
291 	/* Check for Promiscuous and All Multicast modes */
292 	mac_ctrl_data = AT_READ_REG(hw, REG_MAC_CTRL);
293 
294 	if (netdev->flags & IFF_PROMISC) {
295 		mac_ctrl_data |= MAC_CTRL_PROMIS_EN;
296 	} else if (netdev->flags & IFF_ALLMULTI) {
297 		mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
298 		mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN;
299 	} else {
300 		mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
301 	}
302 
303 	AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
304 
305 	/* clear the old settings from the multicast hash table */
306 	AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
307 	AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
308 
309 	/* comoute mc addresses' hash value ,and put it into hash table */
310 	netdev_for_each_mc_addr(ha, netdev) {
311 		hash_value = atl1e_hash_mc_addr(hw, ha->addr);
312 		atl1e_hash_set(hw, hash_value);
313 	}
314 }
315 
316 static void __atl1e_rx_mode(netdev_features_t features, u32 *mac_ctrl_data)
317 {
318 
319 	if (features & NETIF_F_RXALL) {
320 		/* enable RX of ALL frames */
321 		*mac_ctrl_data |= MAC_CTRL_DBG;
322 	} else {
323 		/* disable RX of ALL frames */
324 		*mac_ctrl_data &= ~MAC_CTRL_DBG;
325 	}
326 }
327 
328 static void atl1e_rx_mode(struct net_device *netdev,
329 	netdev_features_t features)
330 {
331 	struct atl1e_adapter *adapter = netdev_priv(netdev);
332 	u32 mac_ctrl_data = 0;
333 
334 	netdev_dbg(adapter->netdev, "%s\n", __func__);
335 
336 	atl1e_irq_disable(adapter);
337 	mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL);
338 	__atl1e_rx_mode(features, &mac_ctrl_data);
339 	AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
340 	atl1e_irq_enable(adapter);
341 }
342 
343 
344 static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
345 {
346 	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
347 		/* enable VLAN tag insert/strip */
348 		*mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
349 	} else {
350 		/* disable VLAN tag insert/strip */
351 		*mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN;
352 	}
353 }
354 
355 static void atl1e_vlan_mode(struct net_device *netdev,
356 	netdev_features_t features)
357 {
358 	struct atl1e_adapter *adapter = netdev_priv(netdev);
359 	u32 mac_ctrl_data = 0;
360 
361 	netdev_dbg(adapter->netdev, "%s\n", __func__);
362 
363 	atl1e_irq_disable(adapter);
364 	mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL);
365 	__atl1e_vlan_mode(features, &mac_ctrl_data);
366 	AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
367 	atl1e_irq_enable(adapter);
368 }
369 
370 static void atl1e_restore_vlan(struct atl1e_adapter *adapter)
371 {
372 	netdev_dbg(adapter->netdev, "%s\n", __func__);
373 	atl1e_vlan_mode(adapter->netdev, adapter->netdev->features);
374 }
375 
376 /**
377  * atl1e_set_mac - Change the Ethernet Address of the NIC
378  * @netdev: network interface device structure
379  * @p: pointer to an address structure
380  *
381  * Returns 0 on success, negative on failure
382  */
383 static int atl1e_set_mac_addr(struct net_device *netdev, void *p)
384 {
385 	struct atl1e_adapter *adapter = netdev_priv(netdev);
386 	struct sockaddr *addr = p;
387 
388 	if (!is_valid_ether_addr(addr->sa_data))
389 		return -EADDRNOTAVAIL;
390 
391 	if (netif_running(netdev))
392 		return -EBUSY;
393 
394 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
395 	memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
396 
397 	atl1e_hw_set_mac_addr(&adapter->hw);
398 
399 	return 0;
400 }
401 
402 static netdev_features_t atl1e_fix_features(struct net_device *netdev,
403 	netdev_features_t features)
404 {
405 	/*
406 	 * Since there is no support for separate rx/tx vlan accel
407 	 * enable/disable make sure tx flag is always in same state as rx.
408 	 */
409 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
410 		features |= NETIF_F_HW_VLAN_CTAG_TX;
411 	else
412 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
413 
414 	return features;
415 }
416 
417 static int atl1e_set_features(struct net_device *netdev,
418 	netdev_features_t features)
419 {
420 	netdev_features_t changed = netdev->features ^ features;
421 
422 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
423 		atl1e_vlan_mode(netdev, features);
424 
425 	if (changed & NETIF_F_RXALL)
426 		atl1e_rx_mode(netdev, features);
427 
428 
429 	return 0;
430 }
431 
432 /**
433  * atl1e_change_mtu - Change the Maximum Transfer Unit
434  * @netdev: network interface device structure
435  * @new_mtu: new value for maximum frame size
436  *
437  * Returns 0 on success, negative on failure
438  */
439 static int atl1e_change_mtu(struct net_device *netdev, int new_mtu)
440 {
441 	struct atl1e_adapter *adapter = netdev_priv(netdev);
442 	int old_mtu   = netdev->mtu;
443 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
444 
445 	if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
446 			(max_frame > MAX_JUMBO_FRAME_SIZE)) {
447 		netdev_warn(adapter->netdev, "invalid MTU setting\n");
448 		return -EINVAL;
449 	}
450 	/* set MTU */
451 	if (old_mtu != new_mtu && netif_running(netdev)) {
452 		while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
453 			msleep(1);
454 		netdev->mtu = new_mtu;
455 		adapter->hw.max_frame_size = new_mtu;
456 		adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3;
457 		atl1e_down(adapter);
458 		atl1e_up(adapter);
459 		clear_bit(__AT_RESETTING, &adapter->flags);
460 	}
461 	return 0;
462 }
463 
464 /*
465  *  caller should hold mdio_lock
466  */
467 static int atl1e_mdio_read(struct net_device *netdev, int phy_id, int reg_num)
468 {
469 	struct atl1e_adapter *adapter = netdev_priv(netdev);
470 	u16 result;
471 
472 	atl1e_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result);
473 	return result;
474 }
475 
476 static void atl1e_mdio_write(struct net_device *netdev, int phy_id,
477 			     int reg_num, int val)
478 {
479 	struct atl1e_adapter *adapter = netdev_priv(netdev);
480 
481 	atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
482 }
483 
484 static int atl1e_mii_ioctl(struct net_device *netdev,
485 			   struct ifreq *ifr, int cmd)
486 {
487 	struct atl1e_adapter *adapter = netdev_priv(netdev);
488 	struct mii_ioctl_data *data = if_mii(ifr);
489 	unsigned long flags;
490 	int retval = 0;
491 
492 	if (!netif_running(netdev))
493 		return -EINVAL;
494 
495 	spin_lock_irqsave(&adapter->mdio_lock, flags);
496 	switch (cmd) {
497 	case SIOCGMIIPHY:
498 		data->phy_id = 0;
499 		break;
500 
501 	case SIOCGMIIREG:
502 		if (atl1e_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
503 				    &data->val_out)) {
504 			retval = -EIO;
505 			goto out;
506 		}
507 		break;
508 
509 	case SIOCSMIIREG:
510 		if (data->reg_num & ~(0x1F)) {
511 			retval = -EFAULT;
512 			goto out;
513 		}
514 
515 		netdev_dbg(adapter->netdev, "<atl1e_mii_ioctl> write %x %x\n",
516 			   data->reg_num, data->val_in);
517 		if (atl1e_write_phy_reg(&adapter->hw,
518 				     data->reg_num, data->val_in)) {
519 			retval = -EIO;
520 			goto out;
521 		}
522 		break;
523 
524 	default:
525 		retval = -EOPNOTSUPP;
526 		break;
527 	}
528 out:
529 	spin_unlock_irqrestore(&adapter->mdio_lock, flags);
530 	return retval;
531 
532 }
533 
534 static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
535 {
536 	switch (cmd) {
537 	case SIOCGMIIPHY:
538 	case SIOCGMIIREG:
539 	case SIOCSMIIREG:
540 		return atl1e_mii_ioctl(netdev, ifr, cmd);
541 	default:
542 		return -EOPNOTSUPP;
543 	}
544 }
545 
546 static void atl1e_setup_pcicmd(struct pci_dev *pdev)
547 {
548 	u16 cmd;
549 
550 	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
551 	cmd &= ~(PCI_COMMAND_INTX_DISABLE | PCI_COMMAND_IO);
552 	cmd |=  (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
553 	pci_write_config_word(pdev, PCI_COMMAND, cmd);
554 
555 	/*
556 	 * some motherboards BIOS(PXE/EFI) driver may set PME
557 	 * while they transfer control to OS (Windows/Linux)
558 	 * so we should clear this bit before NIC work normally
559 	 */
560 	pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0);
561 	msleep(1);
562 }
563 
564 /**
565  * atl1e_alloc_queues - Allocate memory for all rings
566  * @adapter: board private structure to initialize
567  *
568  */
569 static int atl1e_alloc_queues(struct atl1e_adapter *adapter)
570 {
571 	return 0;
572 }
573 
574 /**
575  * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter)
576  * @adapter: board private structure to initialize
577  *
578  * atl1e_sw_init initializes the Adapter private data structure.
579  * Fields are initialized based on PCI device information and
580  * OS network device settings (MTU size).
581  */
582 static int atl1e_sw_init(struct atl1e_adapter *adapter)
583 {
584 	struct atl1e_hw *hw   = &adapter->hw;
585 	struct pci_dev	*pdev = adapter->pdev;
586 	u32 phy_status_data = 0;
587 
588 	adapter->wol = 0;
589 	adapter->link_speed = SPEED_0;   /* hardware init */
590 	adapter->link_duplex = FULL_DUPLEX;
591 	adapter->num_rx_queues = 1;
592 
593 	/* PCI config space info */
594 	hw->vendor_id = pdev->vendor;
595 	hw->device_id = pdev->device;
596 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
597 	hw->subsystem_id = pdev->subsystem_device;
598 	hw->revision_id  = pdev->revision;
599 
600 	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
601 
602 	phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
603 	/* nic type */
604 	if (hw->revision_id >= 0xF0) {
605 		hw->nic_type = athr_l2e_revB;
606 	} else {
607 		if (phy_status_data & PHY_STATUS_100M)
608 			hw->nic_type = athr_l1e;
609 		else
610 			hw->nic_type = athr_l2e_revA;
611 	}
612 
613 	phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS);
614 
615 	if (phy_status_data & PHY_STATUS_EMI_CA)
616 		hw->emi_ca = true;
617 	else
618 		hw->emi_ca = false;
619 
620 	hw->phy_configured = false;
621 	hw->preamble_len = 7;
622 	hw->max_frame_size = adapter->netdev->mtu;
623 	hw->rx_jumbo_th = (hw->max_frame_size + ETH_HLEN +
624 				VLAN_HLEN + ETH_FCS_LEN + 7) >> 3;
625 
626 	hw->rrs_type = atl1e_rrs_disable;
627 	hw->indirect_tab = 0;
628 	hw->base_cpu = 0;
629 
630 	/* need confirm */
631 
632 	hw->ict = 50000;                 /* 100ms */
633 	hw->smb_timer = 200000;          /* 200ms  */
634 	hw->tpd_burst = 5;
635 	hw->rrd_thresh = 1;
636 	hw->tpd_thresh = adapter->tx_ring.count / 2;
637 	hw->rx_count_down = 4;  /* 2us resolution */
638 	hw->tx_count_down = hw->imt * 4 / 3;
639 	hw->dmar_block = atl1e_dma_req_1024;
640 	hw->dmaw_block = atl1e_dma_req_1024;
641 	hw->dmar_dly_cnt = 15;
642 	hw->dmaw_dly_cnt = 4;
643 
644 	if (atl1e_alloc_queues(adapter)) {
645 		netdev_err(adapter->netdev, "Unable to allocate memory for queues\n");
646 		return -ENOMEM;
647 	}
648 
649 	atomic_set(&adapter->irq_sem, 1);
650 	spin_lock_init(&adapter->mdio_lock);
651 	spin_lock_init(&adapter->tx_lock);
652 
653 	set_bit(__AT_DOWN, &adapter->flags);
654 
655 	return 0;
656 }
657 
658 /**
659  * atl1e_clean_tx_ring - Free Tx-skb
660  * @adapter: board private structure
661  */
662 static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
663 {
664 	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
665 	struct atl1e_tx_buffer *tx_buffer = NULL;
666 	struct pci_dev *pdev = adapter->pdev;
667 	u16 index, ring_count;
668 
669 	if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL)
670 		return;
671 
672 	ring_count = tx_ring->count;
673 	/* first unmmap dma */
674 	for (index = 0; index < ring_count; index++) {
675 		tx_buffer = &tx_ring->tx_buffer[index];
676 		if (tx_buffer->dma) {
677 			if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE)
678 				pci_unmap_single(pdev, tx_buffer->dma,
679 					tx_buffer->length, PCI_DMA_TODEVICE);
680 			else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE)
681 				pci_unmap_page(pdev, tx_buffer->dma,
682 					tx_buffer->length, PCI_DMA_TODEVICE);
683 			tx_buffer->dma = 0;
684 		}
685 	}
686 	/* second free skb */
687 	for (index = 0; index < ring_count; index++) {
688 		tx_buffer = &tx_ring->tx_buffer[index];
689 		if (tx_buffer->skb) {
690 			dev_kfree_skb_any(tx_buffer->skb);
691 			tx_buffer->skb = NULL;
692 		}
693 	}
694 	/* Zero out Tx-buffers */
695 	memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) *
696 				ring_count);
697 	memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) *
698 				ring_count);
699 }
700 
701 /**
702  * atl1e_clean_rx_ring - Free rx-reservation skbs
703  * @adapter: board private structure
704  */
705 static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter)
706 {
707 	struct atl1e_rx_ring *rx_ring =
708 		&adapter->rx_ring;
709 	struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc;
710 	u16 i, j;
711 
712 
713 	if (adapter->ring_vir_addr == NULL)
714 		return;
715 	/* Zero out the descriptor ring */
716 	for (i = 0; i < adapter->num_rx_queues; i++) {
717 		for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
718 			if (rx_page_desc[i].rx_page[j].addr != NULL) {
719 				memset(rx_page_desc[i].rx_page[j].addr, 0,
720 						rx_ring->real_page_size);
721 			}
722 		}
723 	}
724 }
725 
726 static void atl1e_cal_ring_size(struct atl1e_adapter *adapter, u32 *ring_size)
727 {
728 	*ring_size = ((u32)(adapter->tx_ring.count *
729 		     sizeof(struct atl1e_tpd_desc) + 7
730 			/* tx ring, qword align */
731 		     + adapter->rx_ring.real_page_size * AT_PAGE_NUM_PER_QUEUE *
732 			adapter->num_rx_queues + 31
733 			/* rx ring,  32 bytes align */
734 		     + (1 + AT_PAGE_NUM_PER_QUEUE * adapter->num_rx_queues) *
735 			sizeof(u32) + 3));
736 			/* tx, rx cmd, dword align   */
737 }
738 
739 static void atl1e_init_ring_resources(struct atl1e_adapter *adapter)
740 {
741 	struct atl1e_rx_ring *rx_ring = NULL;
742 
743 	rx_ring = &adapter->rx_ring;
744 
745 	rx_ring->real_page_size = adapter->rx_ring.page_size
746 				 + adapter->hw.max_frame_size
747 				 + ETH_HLEN + VLAN_HLEN
748 				 + ETH_FCS_LEN;
749 	rx_ring->real_page_size = roundup(rx_ring->real_page_size, 32);
750 	atl1e_cal_ring_size(adapter, &adapter->ring_size);
751 
752 	adapter->ring_vir_addr = NULL;
753 	adapter->rx_ring.desc = NULL;
754 	rwlock_init(&adapter->tx_ring.tx_lock);
755 }
756 
757 /*
758  * Read / Write Ptr Initialize:
759  */
760 static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter)
761 {
762 	struct atl1e_tx_ring *tx_ring = NULL;
763 	struct atl1e_rx_ring *rx_ring = NULL;
764 	struct atl1e_rx_page_desc *rx_page_desc = NULL;
765 	int i, j;
766 
767 	tx_ring = &adapter->tx_ring;
768 	rx_ring = &adapter->rx_ring;
769 	rx_page_desc = rx_ring->rx_page_desc;
770 
771 	tx_ring->next_to_use = 0;
772 	atomic_set(&tx_ring->next_to_clean, 0);
773 
774 	for (i = 0; i < adapter->num_rx_queues; i++) {
775 		rx_page_desc[i].rx_using  = 0;
776 		rx_page_desc[i].rx_nxseq = 0;
777 		for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
778 			*rx_page_desc[i].rx_page[j].write_offset_addr = 0;
779 			rx_page_desc[i].rx_page[j].read_offset = 0;
780 		}
781 	}
782 }
783 
784 /**
785  * atl1e_free_ring_resources - Free Tx / RX descriptor Resources
786  * @adapter: board private structure
787  *
788  * Free all transmit software resources
789  */
790 static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
791 {
792 	struct pci_dev *pdev = adapter->pdev;
793 
794 	atl1e_clean_tx_ring(adapter);
795 	atl1e_clean_rx_ring(adapter);
796 
797 	if (adapter->ring_vir_addr) {
798 		pci_free_consistent(pdev, adapter->ring_size,
799 				adapter->ring_vir_addr, adapter->ring_dma);
800 		adapter->ring_vir_addr = NULL;
801 	}
802 
803 	if (adapter->tx_ring.tx_buffer) {
804 		kfree(adapter->tx_ring.tx_buffer);
805 		adapter->tx_ring.tx_buffer = NULL;
806 	}
807 }
808 
809 /**
810  * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources
811  * @adapter: board private structure
812  *
813  * Return 0 on success, negative on failure
814  */
815 static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
816 {
817 	struct pci_dev *pdev = adapter->pdev;
818 	struct atl1e_tx_ring *tx_ring;
819 	struct atl1e_rx_ring *rx_ring;
820 	struct atl1e_rx_page_desc  *rx_page_desc;
821 	int size, i, j;
822 	u32 offset = 0;
823 	int err = 0;
824 
825 	if (adapter->ring_vir_addr != NULL)
826 		return 0; /* alloced already */
827 
828 	tx_ring = &adapter->tx_ring;
829 	rx_ring = &adapter->rx_ring;
830 
831 	/* real ring DMA buffer */
832 
833 	size = adapter->ring_size;
834 	adapter->ring_vir_addr = pci_alloc_consistent(pdev,
835 			adapter->ring_size, &adapter->ring_dma);
836 
837 	if (adapter->ring_vir_addr == NULL) {
838 		netdev_err(adapter->netdev,
839 			   "pci_alloc_consistent failed, size = D%d\n", size);
840 		return -ENOMEM;
841 	}
842 
843 	memset(adapter->ring_vir_addr, 0, adapter->ring_size);
844 
845 	rx_page_desc = rx_ring->rx_page_desc;
846 
847 	/* Init TPD Ring */
848 	tx_ring->dma = roundup(adapter->ring_dma, 8);
849 	offset = tx_ring->dma - adapter->ring_dma;
850 	tx_ring->desc = adapter->ring_vir_addr + offset;
851 	size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
852 	tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL);
853 	if (tx_ring->tx_buffer == NULL) {
854 		err = -ENOMEM;
855 		goto failed;
856 	}
857 
858 	/* Init RXF-Pages */
859 	offset += (sizeof(struct atl1e_tpd_desc) * tx_ring->count);
860 	offset = roundup(offset, 32);
861 
862 	for (i = 0; i < adapter->num_rx_queues; i++) {
863 		for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
864 			rx_page_desc[i].rx_page[j].dma =
865 				adapter->ring_dma + offset;
866 			rx_page_desc[i].rx_page[j].addr =
867 				adapter->ring_vir_addr + offset;
868 			offset += rx_ring->real_page_size;
869 		}
870 	}
871 
872 	/* Init CMB dma address */
873 	tx_ring->cmb_dma = adapter->ring_dma + offset;
874 	tx_ring->cmb = adapter->ring_vir_addr + offset;
875 	offset += sizeof(u32);
876 
877 	for (i = 0; i < adapter->num_rx_queues; i++) {
878 		for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
879 			rx_page_desc[i].rx_page[j].write_offset_dma =
880 				adapter->ring_dma + offset;
881 			rx_page_desc[i].rx_page[j].write_offset_addr =
882 				adapter->ring_vir_addr + offset;
883 			offset += sizeof(u32);
884 		}
885 	}
886 
887 	if (unlikely(offset > adapter->ring_size)) {
888 		netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n",
889 			   offset, adapter->ring_size);
890 		err = -1;
891 		goto failed;
892 	}
893 
894 	return 0;
895 failed:
896 	if (adapter->ring_vir_addr != NULL) {
897 		pci_free_consistent(pdev, adapter->ring_size,
898 				adapter->ring_vir_addr, adapter->ring_dma);
899 		adapter->ring_vir_addr = NULL;
900 	}
901 	return err;
902 }
903 
904 static inline void atl1e_configure_des_ring(struct atl1e_adapter *adapter)
905 {
906 
907 	struct atl1e_hw *hw = &adapter->hw;
908 	struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
909 	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
910 	struct atl1e_rx_page_desc *rx_page_desc = NULL;
911 	int i, j;
912 
913 	AT_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI,
914 			(u32)((adapter->ring_dma & AT_DMA_HI_ADDR_MASK) >> 32));
915 	AT_WRITE_REG(hw, REG_TPD_BASE_ADDR_LO,
916 			(u32)((tx_ring->dma) & AT_DMA_LO_ADDR_MASK));
917 	AT_WRITE_REG(hw, REG_TPD_RING_SIZE, (u16)(tx_ring->count));
918 	AT_WRITE_REG(hw, REG_HOST_TX_CMB_LO,
919 			(u32)((tx_ring->cmb_dma) & AT_DMA_LO_ADDR_MASK));
920 
921 	rx_page_desc = rx_ring->rx_page_desc;
922 	/* RXF Page Physical address / Page Length */
923 	for (i = 0; i < AT_MAX_RECEIVE_QUEUE; i++) {
924 		AT_WRITE_REG(hw, atl1e_rx_page_hi_addr_regs[i],
925 				 (u32)((adapter->ring_dma &
926 				 AT_DMA_HI_ADDR_MASK) >> 32));
927 		for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) {
928 			u32 page_phy_addr;
929 			u32 offset_phy_addr;
930 
931 			page_phy_addr = rx_page_desc[i].rx_page[j].dma;
932 			offset_phy_addr =
933 				   rx_page_desc[i].rx_page[j].write_offset_dma;
934 
935 			AT_WRITE_REG(hw, atl1e_rx_page_lo_addr_regs[i][j],
936 					page_phy_addr & AT_DMA_LO_ADDR_MASK);
937 			AT_WRITE_REG(hw, atl1e_rx_page_write_offset_regs[i][j],
938 					offset_phy_addr & AT_DMA_LO_ADDR_MASK);
939 			AT_WRITE_REGB(hw, atl1e_rx_page_vld_regs[i][j], 1);
940 		}
941 	}
942 	/* Page Length */
943 	AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size);
944 	/* Load all of base address above */
945 	AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
946 }
947 
948 static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
949 {
950 	struct atl1e_hw *hw = &adapter->hw;
951 	u32 dev_ctrl_data = 0;
952 	u32 max_pay_load = 0;
953 	u32 jumbo_thresh = 0;
954 	u32 extra_size = 0;     /* Jumbo frame threshold in QWORD unit */
955 
956 	/* configure TXQ param */
957 	if (hw->nic_type != athr_l2e_revB) {
958 		extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
959 		if (hw->max_frame_size <= 1500) {
960 			jumbo_thresh = hw->max_frame_size + extra_size;
961 		} else if (hw->max_frame_size < 6*1024) {
962 			jumbo_thresh =
963 				(hw->max_frame_size + extra_size) * 2 / 3;
964 		} else {
965 			jumbo_thresh = (hw->max_frame_size + extra_size) / 2;
966 		}
967 		AT_WRITE_REG(hw, REG_TX_EARLY_TH, (jumbo_thresh + 7) >> 3);
968 	}
969 
970 	dev_ctrl_data = AT_READ_REG(hw, REG_DEVICE_CTRL);
971 
972 	max_pay_load  = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) &
973 			DEVICE_CTRL_MAX_PAYLOAD_MASK;
974 
975 	hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
976 
977 	max_pay_load  = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) &
978 			DEVICE_CTRL_MAX_RREQ_SZ_MASK;
979 	hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
980 
981 	if (hw->nic_type != athr_l2e_revB)
982 		AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2,
983 			      atl1e_pay_load_size[hw->dmar_block]);
984 	/* enable TXQ */
985 	AT_WRITE_REGW(hw, REG_TXQ_CTRL,
986 			(((u16)hw->tpd_burst & TXQ_CTRL_NUM_TPD_BURST_MASK)
987 			 << TXQ_CTRL_NUM_TPD_BURST_SHIFT)
988 			| TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN);
989 }
990 
991 static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
992 {
993 	struct atl1e_hw *hw = &adapter->hw;
994 	u32 rxf_len  = 0;
995 	u32 rxf_low  = 0;
996 	u32 rxf_high = 0;
997 	u32 rxf_thresh_data = 0;
998 	u32 rxq_ctrl_data = 0;
999 
1000 	if (hw->nic_type != athr_l2e_revB) {
1001 		AT_WRITE_REGW(hw, REG_RXQ_JMBOSZ_RRDTIM,
1002 			      (u16)((hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) <<
1003 			      RXQ_JMBOSZ_TH_SHIFT |
1004 			      (1 & RXQ_JMBO_LKAH_MASK) <<
1005 			      RXQ_JMBO_LKAH_SHIFT));
1006 
1007 		rxf_len  = AT_READ_REG(hw, REG_SRAM_RXF_LEN);
1008 		rxf_high = rxf_len * 4 / 5;
1009 		rxf_low  = rxf_len / 5;
1010 		rxf_thresh_data = ((rxf_high  & RXQ_RXF_PAUSE_TH_HI_MASK)
1011 				  << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
1012 				  ((rxf_low & RXQ_RXF_PAUSE_TH_LO_MASK)
1013 				  << RXQ_RXF_PAUSE_TH_LO_SHIFT);
1014 
1015 		AT_WRITE_REG(hw, REG_RXQ_RXF_PAUSE_THRESH, rxf_thresh_data);
1016 	}
1017 
1018 	/* RRS */
1019 	AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab);
1020 	AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu);
1021 
1022 	if (hw->rrs_type & atl1e_rrs_ipv4)
1023 		rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4;
1024 
1025 	if (hw->rrs_type & atl1e_rrs_ipv4_tcp)
1026 		rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4_TCP;
1027 
1028 	if (hw->rrs_type & atl1e_rrs_ipv6)
1029 		rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6;
1030 
1031 	if (hw->rrs_type & atl1e_rrs_ipv6_tcp)
1032 		rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6_TCP;
1033 
1034 	if (hw->rrs_type != atl1e_rrs_disable)
1035 		rxq_ctrl_data |=
1036 			(RXQ_CTRL_HASH_ENABLE | RXQ_CTRL_RSS_MODE_MQUESINT);
1037 
1038 	rxq_ctrl_data |= RXQ_CTRL_IPV6_XSUM_VERIFY_EN | RXQ_CTRL_PBA_ALIGN_32 |
1039 			 RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN;
1040 
1041 	AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
1042 }
1043 
1044 static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
1045 {
1046 	struct atl1e_hw *hw = &adapter->hw;
1047 	u32 dma_ctrl_data = 0;
1048 
1049 	dma_ctrl_data = DMA_CTRL_RXCMB_EN;
1050 	dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
1051 		<< DMA_CTRL_DMAR_BURST_LEN_SHIFT;
1052 	dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
1053 		<< DMA_CTRL_DMAW_BURST_LEN_SHIFT;
1054 	dma_ctrl_data |= DMA_CTRL_DMAR_REQ_PRI | DMA_CTRL_DMAR_OUT_ORDER;
1055 	dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK)
1056 		<< DMA_CTRL_DMAR_DLY_CNT_SHIFT;
1057 	dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK)
1058 		<< DMA_CTRL_DMAW_DLY_CNT_SHIFT;
1059 
1060 	AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
1061 }
1062 
1063 static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
1064 {
1065 	u32 value;
1066 	struct atl1e_hw *hw = &adapter->hw;
1067 	struct net_device *netdev = adapter->netdev;
1068 
1069 	/* Config MAC CTRL Register */
1070 	value = MAC_CTRL_TX_EN |
1071 		MAC_CTRL_RX_EN ;
1072 
1073 	if (FULL_DUPLEX == adapter->link_duplex)
1074 		value |= MAC_CTRL_DUPLX;
1075 
1076 	value |= ((u32)((SPEED_1000 == adapter->link_speed) ?
1077 			  MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
1078 			  MAC_CTRL_SPEED_SHIFT);
1079 	value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
1080 
1081 	value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
1082 	value |= (((u32)adapter->hw.preamble_len &
1083 		  MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
1084 
1085 	__atl1e_vlan_mode(netdev->features, &value);
1086 
1087 	value |= MAC_CTRL_BC_EN;
1088 	if (netdev->flags & IFF_PROMISC)
1089 		value |= MAC_CTRL_PROMIS_EN;
1090 	if (netdev->flags & IFF_ALLMULTI)
1091 		value |= MAC_CTRL_MC_ALL_EN;
1092 	if (netdev->features & NETIF_F_RXALL)
1093 		value |= MAC_CTRL_DBG;
1094 	AT_WRITE_REG(hw, REG_MAC_CTRL, value);
1095 }
1096 
1097 /**
1098  * atl1e_configure - Configure Transmit&Receive Unit after Reset
1099  * @adapter: board private structure
1100  *
1101  * Configure the Tx /Rx unit of the MAC after a reset.
1102  */
1103 static int atl1e_configure(struct atl1e_adapter *adapter)
1104 {
1105 	struct atl1e_hw *hw = &adapter->hw;
1106 
1107 	u32 intr_status_data = 0;
1108 
1109 	/* clear interrupt status */
1110 	AT_WRITE_REG(hw, REG_ISR, ~0);
1111 
1112 	/* 1. set MAC Address */
1113 	atl1e_hw_set_mac_addr(hw);
1114 
1115 	/* 2. Init the Multicast HASH table done by set_muti */
1116 
1117 	/* 3. Clear any WOL status */
1118 	AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
1119 
1120 	/* 4. Descripter Ring BaseMem/Length/Read ptr/Write ptr
1121 	 *    TPD Ring/SMB/RXF0 Page CMBs, they use the same
1122 	 *    High 32bits memory */
1123 	atl1e_configure_des_ring(adapter);
1124 
1125 	/* 5. set Interrupt Moderator Timer */
1126 	AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, hw->imt);
1127 	AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER2_INIT, hw->imt);
1128 	AT_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_LED_MODE |
1129 			MASTER_CTRL_ITIMER_EN | MASTER_CTRL_ITIMER2_EN);
1130 
1131 	/* 6. rx/tx threshold to trig interrupt */
1132 	AT_WRITE_REGW(hw, REG_TRIG_RRD_THRESH, hw->rrd_thresh);
1133 	AT_WRITE_REGW(hw, REG_TRIG_TPD_THRESH, hw->tpd_thresh);
1134 	AT_WRITE_REGW(hw, REG_TRIG_RXTIMER, hw->rx_count_down);
1135 	AT_WRITE_REGW(hw, REG_TRIG_TXTIMER, hw->tx_count_down);
1136 
1137 	/* 7. set Interrupt Clear Timer */
1138 	AT_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, hw->ict);
1139 
1140 	/* 8. set MTU */
1141 	AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN +
1142 			VLAN_HLEN + ETH_FCS_LEN);
1143 
1144 	/* 9. config TXQ early tx threshold */
1145 	atl1e_configure_tx(adapter);
1146 
1147 	/* 10. config RXQ */
1148 	atl1e_configure_rx(adapter);
1149 
1150 	/* 11. config  DMA Engine */
1151 	atl1e_configure_dma(adapter);
1152 
1153 	/* 12. smb timer to trig interrupt */
1154 	AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, hw->smb_timer);
1155 
1156 	intr_status_data = AT_READ_REG(hw, REG_ISR);
1157 	if (unlikely((intr_status_data & ISR_PHY_LINKDOWN) != 0)) {
1158 		netdev_err(adapter->netdev,
1159 			   "atl1e_configure failed, PCIE phy link down\n");
1160 		return -1;
1161 	}
1162 
1163 	AT_WRITE_REG(hw, REG_ISR, 0x7fffffff);
1164 	return 0;
1165 }
1166 
1167 /**
1168  * atl1e_get_stats - Get System Network Statistics
1169  * @netdev: network interface device structure
1170  *
1171  * Returns the address of the device statistics structure.
1172  * The statistics are actually updated from the timer callback.
1173  */
1174 static struct net_device_stats *atl1e_get_stats(struct net_device *netdev)
1175 {
1176 	struct atl1e_adapter *adapter = netdev_priv(netdev);
1177 	struct atl1e_hw_stats  *hw_stats = &adapter->hw_stats;
1178 	struct net_device_stats *net_stats = &netdev->stats;
1179 
1180 	net_stats->rx_bytes   = hw_stats->rx_byte_cnt;
1181 	net_stats->tx_bytes   = hw_stats->tx_byte_cnt;
1182 	net_stats->multicast  = hw_stats->rx_mcast;
1183 	net_stats->collisions = hw_stats->tx_1_col +
1184 				hw_stats->tx_2_col +
1185 				hw_stats->tx_late_col +
1186 				hw_stats->tx_abort_col;
1187 
1188 	net_stats->rx_errors  = hw_stats->rx_frag +
1189 				hw_stats->rx_fcs_err +
1190 				hw_stats->rx_len_err +
1191 				hw_stats->rx_sz_ov +
1192 				hw_stats->rx_rrd_ov +
1193 				hw_stats->rx_align_err +
1194 				hw_stats->rx_rxf_ov;
1195 
1196 	net_stats->rx_fifo_errors   = hw_stats->rx_rxf_ov;
1197 	net_stats->rx_length_errors = hw_stats->rx_len_err;
1198 	net_stats->rx_crc_errors    = hw_stats->rx_fcs_err;
1199 	net_stats->rx_frame_errors  = hw_stats->rx_align_err;
1200 	net_stats->rx_dropped       = hw_stats->rx_rrd_ov;
1201 
1202 	net_stats->tx_errors = hw_stats->tx_late_col +
1203 			       hw_stats->tx_abort_col +
1204 			       hw_stats->tx_underrun +
1205 			       hw_stats->tx_trunc;
1206 
1207 	net_stats->tx_fifo_errors    = hw_stats->tx_underrun;
1208 	net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
1209 	net_stats->tx_window_errors  = hw_stats->tx_late_col;
1210 
1211 	net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
1212 	net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
1213 
1214 	return net_stats;
1215 }
1216 
1217 static void atl1e_update_hw_stats(struct atl1e_adapter *adapter)
1218 {
1219 	u16 hw_reg_addr = 0;
1220 	unsigned long *stats_item = NULL;
1221 
1222 	/* update rx status */
1223 	hw_reg_addr = REG_MAC_RX_STATUS_BIN;
1224 	stats_item  = &adapter->hw_stats.rx_ok;
1225 	while (hw_reg_addr <= REG_MAC_RX_STATUS_END) {
1226 		*stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr);
1227 		stats_item++;
1228 		hw_reg_addr += 4;
1229 	}
1230 	/* update tx status */
1231 	hw_reg_addr = REG_MAC_TX_STATUS_BIN;
1232 	stats_item  = &adapter->hw_stats.tx_ok;
1233 	while (hw_reg_addr <= REG_MAC_TX_STATUS_END) {
1234 		*stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr);
1235 		stats_item++;
1236 		hw_reg_addr += 4;
1237 	}
1238 }
1239 
1240 static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter)
1241 {
1242 	u16 phy_data;
1243 
1244 	spin_lock(&adapter->mdio_lock);
1245 	atl1e_read_phy_reg(&adapter->hw, MII_INT_STATUS, &phy_data);
1246 	spin_unlock(&adapter->mdio_lock);
1247 }
1248 
1249 static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
1250 {
1251 	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
1252 	struct atl1e_tx_buffer *tx_buffer = NULL;
1253 	u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX);
1254 	u16 next_to_clean = atomic_read(&tx_ring->next_to_clean);
1255 
1256 	while (next_to_clean != hw_next_to_clean) {
1257 		tx_buffer = &tx_ring->tx_buffer[next_to_clean];
1258 		if (tx_buffer->dma) {
1259 			if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE)
1260 				pci_unmap_single(adapter->pdev, tx_buffer->dma,
1261 					tx_buffer->length, PCI_DMA_TODEVICE);
1262 			else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE)
1263 				pci_unmap_page(adapter->pdev, tx_buffer->dma,
1264 					tx_buffer->length, PCI_DMA_TODEVICE);
1265 			tx_buffer->dma = 0;
1266 		}
1267 
1268 		if (tx_buffer->skb) {
1269 			dev_kfree_skb_irq(tx_buffer->skb);
1270 			tx_buffer->skb = NULL;
1271 		}
1272 
1273 		if (++next_to_clean == tx_ring->count)
1274 			next_to_clean = 0;
1275 	}
1276 
1277 	atomic_set(&tx_ring->next_to_clean, next_to_clean);
1278 
1279 	if (netif_queue_stopped(adapter->netdev) &&
1280 			netif_carrier_ok(adapter->netdev)) {
1281 		netif_wake_queue(adapter->netdev);
1282 	}
1283 
1284 	return true;
1285 }
1286 
1287 /**
1288  * atl1e_intr - Interrupt Handler
1289  * @irq: interrupt number
1290  * @data: pointer to a network interface device structure
1291  */
1292 static irqreturn_t atl1e_intr(int irq, void *data)
1293 {
1294 	struct net_device *netdev  = data;
1295 	struct atl1e_adapter *adapter = netdev_priv(netdev);
1296 	struct atl1e_hw *hw = &adapter->hw;
1297 	int max_ints = AT_MAX_INT_WORK;
1298 	int handled = IRQ_NONE;
1299 	u32 status;
1300 
1301 	do {
1302 		status = AT_READ_REG(hw, REG_ISR);
1303 		if ((status & IMR_NORMAL_MASK) == 0 ||
1304 				(status & ISR_DIS_INT) != 0) {
1305 			if (max_ints != AT_MAX_INT_WORK)
1306 				handled = IRQ_HANDLED;
1307 			break;
1308 		}
1309 		/* link event */
1310 		if (status & ISR_GPHY)
1311 			atl1e_clear_phy_int(adapter);
1312 		/* Ack ISR */
1313 		AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
1314 
1315 		handled = IRQ_HANDLED;
1316 		/* check if PCIE PHY Link down */
1317 		if (status & ISR_PHY_LINKDOWN) {
1318 			netdev_err(adapter->netdev,
1319 				   "pcie phy linkdown %x\n", status);
1320 			if (netif_running(adapter->netdev)) {
1321 				/* reset MAC */
1322 				atl1e_irq_reset(adapter);
1323 				schedule_work(&adapter->reset_task);
1324 				break;
1325 			}
1326 		}
1327 
1328 		/* check if DMA read/write error */
1329 		if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
1330 			netdev_err(adapter->netdev,
1331 				   "PCIE DMA RW error (status = 0x%x)\n",
1332 				   status);
1333 			atl1e_irq_reset(adapter);
1334 			schedule_work(&adapter->reset_task);
1335 			break;
1336 		}
1337 
1338 		if (status & ISR_SMB)
1339 			atl1e_update_hw_stats(adapter);
1340 
1341 		/* link event */
1342 		if (status & (ISR_GPHY | ISR_MANUAL)) {
1343 			netdev->stats.tx_carrier_errors++;
1344 			atl1e_link_chg_event(adapter);
1345 			break;
1346 		}
1347 
1348 		/* transmit event */
1349 		if (status & ISR_TX_EVENT)
1350 			atl1e_clean_tx_irq(adapter);
1351 
1352 		if (status & ISR_RX_EVENT) {
1353 			/*
1354 			 * disable rx interrupts, without
1355 			 * the synchronize_irq bit
1356 			 */
1357 			AT_WRITE_REG(hw, REG_IMR,
1358 				     IMR_NORMAL_MASK & ~ISR_RX_EVENT);
1359 			AT_WRITE_FLUSH(hw);
1360 			if (likely(napi_schedule_prep(
1361 				   &adapter->napi)))
1362 				__napi_schedule(&adapter->napi);
1363 		}
1364 	} while (--max_ints > 0);
1365 	/* re-enable Interrupt*/
1366 	AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
1367 
1368 	return handled;
1369 }
1370 
1371 static inline void atl1e_rx_checksum(struct atl1e_adapter *adapter,
1372 		  struct sk_buff *skb, struct atl1e_recv_ret_status *prrs)
1373 {
1374 	u8 *packet = (u8 *)(prrs + 1);
1375 	struct iphdr *iph;
1376 	u16 head_len = ETH_HLEN;
1377 	u16 pkt_flags;
1378 	u16 err_flags;
1379 
1380 	skb_checksum_none_assert(skb);
1381 	pkt_flags = prrs->pkt_flag;
1382 	err_flags = prrs->err_flag;
1383 	if (((pkt_flags & RRS_IS_IPV4) || (pkt_flags & RRS_IS_IPV6)) &&
1384 		((pkt_flags & RRS_IS_TCP) || (pkt_flags & RRS_IS_UDP))) {
1385 		if (pkt_flags & RRS_IS_IPV4) {
1386 			if (pkt_flags & RRS_IS_802_3)
1387 				head_len += 8;
1388 			iph = (struct iphdr *) (packet + head_len);
1389 			if (iph->frag_off != 0 && !(pkt_flags & RRS_IS_IP_DF))
1390 				goto hw_xsum;
1391 		}
1392 		if (!(err_flags & (RRS_ERR_IP_CSUM | RRS_ERR_L4_CSUM))) {
1393 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1394 			return;
1395 		}
1396 	}
1397 
1398 hw_xsum :
1399 	return;
1400 }
1401 
1402 static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter,
1403 					       u8 que)
1404 {
1405 	struct atl1e_rx_page_desc *rx_page_desc =
1406 		(struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc;
1407 	u8 rx_using = rx_page_desc[que].rx_using;
1408 
1409 	return &(rx_page_desc[que].rx_page[rx_using]);
1410 }
1411 
1412 static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1413 		   int *work_done, int work_to_do)
1414 {
1415 	struct net_device *netdev  = adapter->netdev;
1416 	struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
1417 	struct atl1e_rx_page_desc *rx_page_desc =
1418 		(struct atl1e_rx_page_desc *) rx_ring->rx_page_desc;
1419 	struct sk_buff *skb = NULL;
1420 	struct atl1e_rx_page *rx_page = atl1e_get_rx_page(adapter, que);
1421 	u32 packet_size, write_offset;
1422 	struct atl1e_recv_ret_status *prrs;
1423 
1424 	write_offset = *(rx_page->write_offset_addr);
1425 	if (likely(rx_page->read_offset < write_offset)) {
1426 		do {
1427 			if (*work_done >= work_to_do)
1428 				break;
1429 			(*work_done)++;
1430 			/* get new packet's  rrs */
1431 			prrs = (struct atl1e_recv_ret_status *) (rx_page->addr +
1432 						 rx_page->read_offset);
1433 			/* check sequence number */
1434 			if (prrs->seq_num != rx_page_desc[que].rx_nxseq) {
1435 				netdev_err(netdev,
1436 					   "rx sequence number error (rx=%d) (expect=%d)\n",
1437 					   prrs->seq_num,
1438 					   rx_page_desc[que].rx_nxseq);
1439 				rx_page_desc[que].rx_nxseq++;
1440 				/* just for debug use */
1441 				AT_WRITE_REG(&adapter->hw, REG_DEBUG_DATA0,
1442 					     (((u32)prrs->seq_num) << 16) |
1443 					     rx_page_desc[que].rx_nxseq);
1444 				goto fatal_err;
1445 			}
1446 			rx_page_desc[que].rx_nxseq++;
1447 
1448 			/* error packet */
1449 			if ((prrs->pkt_flag & RRS_IS_ERR_FRAME) &&
1450 			    !(netdev->features & NETIF_F_RXALL)) {
1451 				if (prrs->err_flag & (RRS_ERR_BAD_CRC |
1452 					RRS_ERR_DRIBBLE | RRS_ERR_CODE |
1453 					RRS_ERR_TRUNC)) {
1454 				/* hardware error, discard this packet*/
1455 					netdev_err(netdev,
1456 						   "rx packet desc error %x\n",
1457 						   *((u32 *)prrs + 1));
1458 					goto skip_pkt;
1459 				}
1460 			}
1461 
1462 			packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
1463 					RRS_PKT_SIZE_MASK);
1464 			if (likely(!(netdev->features & NETIF_F_RXFCS)))
1465 				packet_size -= 4; /* CRC */
1466 
1467 			skb = netdev_alloc_skb_ip_align(netdev, packet_size);
1468 			if (skb == NULL)
1469 				goto skip_pkt;
1470 
1471 			memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
1472 			skb_put(skb, packet_size);
1473 			skb->protocol = eth_type_trans(skb, netdev);
1474 			atl1e_rx_checksum(adapter, skb, prrs);
1475 
1476 			if (prrs->pkt_flag & RRS_IS_VLAN_TAG) {
1477 				u16 vlan_tag = (prrs->vtag >> 4) |
1478 					       ((prrs->vtag & 7) << 13) |
1479 					       ((prrs->vtag & 8) << 9);
1480 				netdev_dbg(netdev,
1481 					   "RXD VLAN TAG<RRD>=0x%04x\n",
1482 					   prrs->vtag);
1483 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1484 			}
1485 			netif_receive_skb(skb);
1486 
1487 skip_pkt:
1488 	/* skip current packet whether it's ok or not. */
1489 			rx_page->read_offset +=
1490 				(((u32)((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
1491 				RRS_PKT_SIZE_MASK) +
1492 				sizeof(struct atl1e_recv_ret_status) + 31) &
1493 						0xFFFFFFE0);
1494 
1495 			if (rx_page->read_offset >= rx_ring->page_size) {
1496 				/* mark this page clean */
1497 				u16 reg_addr;
1498 				u8  rx_using;
1499 
1500 				rx_page->read_offset =
1501 					*(rx_page->write_offset_addr) = 0;
1502 				rx_using = rx_page_desc[que].rx_using;
1503 				reg_addr =
1504 					atl1e_rx_page_vld_regs[que][rx_using];
1505 				AT_WRITE_REGB(&adapter->hw, reg_addr, 1);
1506 				rx_page_desc[que].rx_using ^= 1;
1507 				rx_page = atl1e_get_rx_page(adapter, que);
1508 			}
1509 			write_offset = *(rx_page->write_offset_addr);
1510 		} while (rx_page->read_offset < write_offset);
1511 	}
1512 
1513 	return;
1514 
1515 fatal_err:
1516 	if (!test_bit(__AT_DOWN, &adapter->flags))
1517 		schedule_work(&adapter->reset_task);
1518 }
1519 
1520 /**
1521  * atl1e_clean - NAPI Rx polling callback
1522  */
1523 static int atl1e_clean(struct napi_struct *napi, int budget)
1524 {
1525 	struct atl1e_adapter *adapter =
1526 			container_of(napi, struct atl1e_adapter, napi);
1527 	u32 imr_data;
1528 	int work_done = 0;
1529 
1530 	/* Keep link state information with original netdev */
1531 	if (!netif_carrier_ok(adapter->netdev))
1532 		goto quit_polling;
1533 
1534 	atl1e_clean_rx_irq(adapter, 0, &work_done, budget);
1535 
1536 	/* If no Tx and not enough Rx work done, exit the polling mode */
1537 	if (work_done < budget) {
1538 quit_polling:
1539 		napi_complete(napi);
1540 		imr_data = AT_READ_REG(&adapter->hw, REG_IMR);
1541 		AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT);
1542 		/* test debug */
1543 		if (test_bit(__AT_DOWN, &adapter->flags)) {
1544 			atomic_dec(&adapter->irq_sem);
1545 			netdev_err(adapter->netdev,
1546 				   "atl1e_clean is called when AT_DOWN\n");
1547 		}
1548 		/* reenable RX intr */
1549 		/*atl1e_irq_enable(adapter); */
1550 
1551 	}
1552 	return work_done;
1553 }
1554 
1555 #ifdef CONFIG_NET_POLL_CONTROLLER
1556 
1557 /*
1558  * Polling 'interrupt' - used by things like netconsole to send skbs
1559  * without having to re-enable interrupts. It's not called while
1560  * the interrupt routine is executing.
1561  */
1562 static void atl1e_netpoll(struct net_device *netdev)
1563 {
1564 	struct atl1e_adapter *adapter = netdev_priv(netdev);
1565 
1566 	disable_irq(adapter->pdev->irq);
1567 	atl1e_intr(adapter->pdev->irq, netdev);
1568 	enable_irq(adapter->pdev->irq);
1569 }
1570 #endif
1571 
1572 static inline u16 atl1e_tpd_avail(struct atl1e_adapter *adapter)
1573 {
1574 	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
1575 	u16 next_to_use = 0;
1576 	u16 next_to_clean = 0;
1577 
1578 	next_to_clean = atomic_read(&tx_ring->next_to_clean);
1579 	next_to_use   = tx_ring->next_to_use;
1580 
1581 	return (u16)(next_to_clean > next_to_use) ?
1582 		(next_to_clean - next_to_use - 1) :
1583 		(tx_ring->count + next_to_clean - next_to_use - 1);
1584 }
1585 
1586 /*
1587  * get next usable tpd
1588  * Note: should call atl1e_tdp_avail to make sure
1589  * there is enough tpd to use
1590  */
1591 static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter)
1592 {
1593 	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
1594 	u16 next_to_use = 0;
1595 
1596 	next_to_use = tx_ring->next_to_use;
1597 	if (++tx_ring->next_to_use == tx_ring->count)
1598 		tx_ring->next_to_use = 0;
1599 
1600 	memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc));
1601 	return &tx_ring->desc[next_to_use];
1602 }
1603 
1604 static struct atl1e_tx_buffer *
1605 atl1e_get_tx_buffer(struct atl1e_adapter *adapter, struct atl1e_tpd_desc *tpd)
1606 {
1607 	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
1608 
1609 	return &tx_ring->tx_buffer[tpd - tx_ring->desc];
1610 }
1611 
1612 /* Calculate the transmit packet descript needed*/
1613 static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
1614 {
1615 	int i = 0;
1616 	u16 tpd_req = 1;
1617 	u16 fg_size = 0;
1618 	u16 proto_hdr_len = 0;
1619 
1620 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1621 		fg_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1622 		tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT);
1623 	}
1624 
1625 	if (skb_is_gso(skb)) {
1626 		if (skb->protocol == htons(ETH_P_IP) ||
1627 		   (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) {
1628 			proto_hdr_len = skb_transport_offset(skb) +
1629 					tcp_hdrlen(skb);
1630 			if (proto_hdr_len < skb_headlen(skb)) {
1631 				tpd_req += ((skb_headlen(skb) - proto_hdr_len +
1632 					   MAX_TX_BUF_LEN - 1) >>
1633 					   MAX_TX_BUF_SHIFT);
1634 			}
1635 		}
1636 
1637 	}
1638 	return tpd_req;
1639 }
1640 
1641 static int atl1e_tso_csum(struct atl1e_adapter *adapter,
1642 		       struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
1643 {
1644 	u8 hdr_len;
1645 	u32 real_len;
1646 	unsigned short offload_type;
1647 	int err;
1648 
1649 	if (skb_is_gso(skb)) {
1650 		if (skb_header_cloned(skb)) {
1651 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1652 			if (unlikely(err))
1653 				return -1;
1654 		}
1655 		offload_type = skb_shinfo(skb)->gso_type;
1656 
1657 		if (offload_type & SKB_GSO_TCPV4) {
1658 			real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
1659 					+ ntohs(ip_hdr(skb)->tot_len));
1660 
1661 			if (real_len < skb->len)
1662 				pskb_trim(skb, real_len);
1663 
1664 			hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
1665 			if (unlikely(skb->len == hdr_len)) {
1666 				/* only xsum need */
1667 				netdev_warn(adapter->netdev,
1668 					    "IPV4 tso with zero data??\n");
1669 				goto check_sum;
1670 			} else {
1671 				ip_hdr(skb)->check = 0;
1672 				ip_hdr(skb)->tot_len = 0;
1673 				tcp_hdr(skb)->check = ~csum_tcpudp_magic(
1674 							ip_hdr(skb)->saddr,
1675 							ip_hdr(skb)->daddr,
1676 							0, IPPROTO_TCP, 0);
1677 				tpd->word3 |= (ip_hdr(skb)->ihl &
1678 					TDP_V4_IPHL_MASK) <<
1679 					TPD_V4_IPHL_SHIFT;
1680 				tpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
1681 					TPD_TCPHDRLEN_MASK) <<
1682 					TPD_TCPHDRLEN_SHIFT;
1683 				tpd->word3 |= ((skb_shinfo(skb)->gso_size) &
1684 					TPD_MSS_MASK) << TPD_MSS_SHIFT;
1685 				tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
1686 			}
1687 			return 0;
1688 		}
1689 	}
1690 
1691 check_sum:
1692 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1693 		u8 css, cso;
1694 
1695 		cso = skb_checksum_start_offset(skb);
1696 		if (unlikely(cso & 0x1)) {
1697 			netdev_err(adapter->netdev,
1698 				   "payload offset should not ant event number\n");
1699 			return -1;
1700 		} else {
1701 			css = cso + skb->csum_offset;
1702 			tpd->word3 |= (cso & TPD_PLOADOFFSET_MASK) <<
1703 					TPD_PLOADOFFSET_SHIFT;
1704 			tpd->word3 |= (css & TPD_CCSUMOFFSET_MASK) <<
1705 					TPD_CCSUMOFFSET_SHIFT;
1706 			tpd->word3 |= 1 << TPD_CC_SEGMENT_EN_SHIFT;
1707 		}
1708 	}
1709 
1710 	return 0;
1711 }
1712 
1713 static int atl1e_tx_map(struct atl1e_adapter *adapter,
1714 			struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
1715 {
1716 	struct atl1e_tpd_desc *use_tpd = NULL;
1717 	struct atl1e_tx_buffer *tx_buffer = NULL;
1718 	u16 buf_len = skb_headlen(skb);
1719 	u16 map_len = 0;
1720 	u16 mapped_len = 0;
1721 	u16 hdr_len = 0;
1722 	u16 nr_frags;
1723 	u16 f;
1724 	int segment;
1725 	int ring_start = adapter->tx_ring.next_to_use;
1726 	int ring_end;
1727 
1728 	nr_frags = skb_shinfo(skb)->nr_frags;
1729 	segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
1730 	if (segment) {
1731 		/* TSO */
1732 		map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1733 		use_tpd = tpd;
1734 
1735 		tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
1736 		tx_buffer->length = map_len;
1737 		tx_buffer->dma = pci_map_single(adapter->pdev,
1738 					skb->data, hdr_len, PCI_DMA_TODEVICE);
1739 		if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma))
1740 			return -ENOSPC;
1741 
1742 		ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
1743 		mapped_len += map_len;
1744 		use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
1745 		use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
1746 			((cpu_to_le32(tx_buffer->length) &
1747 			TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT);
1748 	}
1749 
1750 	while (mapped_len < buf_len) {
1751 		/* mapped_len == 0, means we should use the first tpd,
1752 		   which is given by caller  */
1753 		if (mapped_len == 0) {
1754 			use_tpd = tpd;
1755 		} else {
1756 			use_tpd = atl1e_get_tpd(adapter);
1757 			memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc));
1758 		}
1759 		tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
1760 		tx_buffer->skb = NULL;
1761 
1762 		tx_buffer->length = map_len =
1763 			((buf_len - mapped_len) >= MAX_TX_BUF_LEN) ?
1764 			MAX_TX_BUF_LEN : (buf_len - mapped_len);
1765 		tx_buffer->dma =
1766 			pci_map_single(adapter->pdev, skb->data + mapped_len,
1767 					map_len, PCI_DMA_TODEVICE);
1768 
1769 		if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
1770 			/* We need to unwind the mappings we've done */
1771 			ring_end = adapter->tx_ring.next_to_use;
1772 			adapter->tx_ring.next_to_use = ring_start;
1773 			while (adapter->tx_ring.next_to_use != ring_end) {
1774 				tpd = atl1e_get_tpd(adapter);
1775 				tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
1776 				pci_unmap_single(adapter->pdev, tx_buffer->dma,
1777 						 tx_buffer->length, PCI_DMA_TODEVICE);
1778 			}
1779 			/* Reset the tx rings next pointer */
1780 			adapter->tx_ring.next_to_use = ring_start;
1781 			return -ENOSPC;
1782 		}
1783 
1784 		ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
1785 		mapped_len  += map_len;
1786 		use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
1787 		use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
1788 			((cpu_to_le32(tx_buffer->length) &
1789 			TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT);
1790 	}
1791 
1792 	for (f = 0; f < nr_frags; f++) {
1793 		const struct skb_frag_struct *frag;
1794 		u16 i;
1795 		u16 seg_num;
1796 
1797 		frag = &skb_shinfo(skb)->frags[f];
1798 		buf_len = skb_frag_size(frag);
1799 
1800 		seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
1801 		for (i = 0; i < seg_num; i++) {
1802 			use_tpd = atl1e_get_tpd(adapter);
1803 			memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc));
1804 
1805 			tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
1806 			BUG_ON(tx_buffer->skb);
1807 
1808 			tx_buffer->skb = NULL;
1809 			tx_buffer->length =
1810 				(buf_len > MAX_TX_BUF_LEN) ?
1811 				MAX_TX_BUF_LEN : buf_len;
1812 			buf_len -= tx_buffer->length;
1813 
1814 			tx_buffer->dma = skb_frag_dma_map(&adapter->pdev->dev,
1815 							  frag,
1816 							  (i * MAX_TX_BUF_LEN),
1817 							  tx_buffer->length,
1818 							  DMA_TO_DEVICE);
1819 
1820 			if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
1821 				/* We need to unwind the mappings we've done */
1822 				ring_end = adapter->tx_ring.next_to_use;
1823 				adapter->tx_ring.next_to_use = ring_start;
1824 				while (adapter->tx_ring.next_to_use != ring_end) {
1825 					tpd = atl1e_get_tpd(adapter);
1826 					tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
1827 					dma_unmap_page(&adapter->pdev->dev, tx_buffer->dma,
1828 						       tx_buffer->length, DMA_TO_DEVICE);
1829 				}
1830 
1831 				/* Reset the ring next to use pointer */
1832 				adapter->tx_ring.next_to_use = ring_start;
1833 				return -ENOSPC;
1834 			}
1835 
1836 			ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE);
1837 			use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
1838 			use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
1839 					((cpu_to_le32(tx_buffer->length) &
1840 					TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT);
1841 		}
1842 	}
1843 
1844 	if ((tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK)
1845 		/* note this one is a tcp header */
1846 		tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT;
1847 	/* The last tpd */
1848 
1849 	use_tpd->word3 |= 1 << TPD_EOP_SHIFT;
1850 	/* The last buffer info contain the skb address,
1851 	   so it will be free after unmap */
1852 	tx_buffer->skb = skb;
1853 	return 0;
1854 }
1855 
1856 static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count,
1857 			   struct atl1e_tpd_desc *tpd)
1858 {
1859 	struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
1860 	/* Force memory writes to complete before letting h/w
1861 	 * know there are new descriptors to fetch.  (Only
1862 	 * applicable for weak-ordered memory model archs,
1863 	 * such as IA-64). */
1864 	wmb();
1865 	AT_WRITE_REG(&adapter->hw, REG_MB_TPD_PROD_IDX, tx_ring->next_to_use);
1866 }
1867 
1868 static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
1869 					  struct net_device *netdev)
1870 {
1871 	struct atl1e_adapter *adapter = netdev_priv(netdev);
1872 	unsigned long flags;
1873 	u16 tpd_req = 1;
1874 	struct atl1e_tpd_desc *tpd;
1875 
1876 	if (test_bit(__AT_DOWN, &adapter->flags)) {
1877 		dev_kfree_skb_any(skb);
1878 		return NETDEV_TX_OK;
1879 	}
1880 
1881 	if (unlikely(skb->len <= 0)) {
1882 		dev_kfree_skb_any(skb);
1883 		return NETDEV_TX_OK;
1884 	}
1885 	tpd_req = atl1e_cal_tdp_req(skb);
1886 	if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
1887 		return NETDEV_TX_LOCKED;
1888 
1889 	if (atl1e_tpd_avail(adapter) < tpd_req) {
1890 		/* no enough descriptor, just stop queue */
1891 		netif_stop_queue(netdev);
1892 		spin_unlock_irqrestore(&adapter->tx_lock, flags);
1893 		return NETDEV_TX_BUSY;
1894 	}
1895 
1896 	tpd = atl1e_get_tpd(adapter);
1897 
1898 	if (vlan_tx_tag_present(skb)) {
1899 		u16 vlan_tag = vlan_tx_tag_get(skb);
1900 		u16 atl1e_vlan_tag;
1901 
1902 		tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
1903 		AT_VLAN_TAG_TO_TPD_TAG(vlan_tag, atl1e_vlan_tag);
1904 		tpd->word2 |= (atl1e_vlan_tag & TPD_VLANTAG_MASK) <<
1905 				TPD_VLAN_SHIFT;
1906 	}
1907 
1908 	if (skb->protocol == htons(ETH_P_8021Q))
1909 		tpd->word3 |= 1 << TPD_VL_TAGGED_SHIFT;
1910 
1911 	if (skb_network_offset(skb) != ETH_HLEN)
1912 		tpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; /* 802.3 frame */
1913 
1914 	/* do TSO and check sum */
1915 	if (atl1e_tso_csum(adapter, skb, tpd) != 0) {
1916 		spin_unlock_irqrestore(&adapter->tx_lock, flags);
1917 		dev_kfree_skb_any(skb);
1918 		return NETDEV_TX_OK;
1919 	}
1920 
1921 	if (atl1e_tx_map(adapter, skb, tpd)) {
1922 		dev_kfree_skb_any(skb);
1923 		goto out;
1924 	}
1925 
1926 	atl1e_tx_queue(adapter, tpd_req, tpd);
1927 
1928 	netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
1929 out:
1930 	spin_unlock_irqrestore(&adapter->tx_lock, flags);
1931 	return NETDEV_TX_OK;
1932 }
1933 
1934 static void atl1e_free_irq(struct atl1e_adapter *adapter)
1935 {
1936 	struct net_device *netdev = adapter->netdev;
1937 
1938 	free_irq(adapter->pdev->irq, netdev);
1939 }
1940 
1941 static int atl1e_request_irq(struct atl1e_adapter *adapter)
1942 {
1943 	struct pci_dev    *pdev   = adapter->pdev;
1944 	struct net_device *netdev = adapter->netdev;
1945 	int err = 0;
1946 
1947 	err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name,
1948 			  netdev);
1949 	if (err) {
1950 		netdev_dbg(adapter->netdev,
1951 			   "Unable to allocate interrupt Error: %d\n", err);
1952 		return err;
1953 	}
1954 	netdev_dbg(netdev, "atl1e_request_irq OK\n");
1955 	return err;
1956 }
1957 
1958 int atl1e_up(struct atl1e_adapter *adapter)
1959 {
1960 	struct net_device *netdev = adapter->netdev;
1961 	int err = 0;
1962 	u32 val;
1963 
1964 	/* hardware has been reset, we need to reload some things */
1965 	err = atl1e_init_hw(&adapter->hw);
1966 	if (err) {
1967 		err = -EIO;
1968 		return err;
1969 	}
1970 	atl1e_init_ring_ptrs(adapter);
1971 	atl1e_set_multi(netdev);
1972 	atl1e_restore_vlan(adapter);
1973 
1974 	if (atl1e_configure(adapter)) {
1975 		err = -EIO;
1976 		goto err_up;
1977 	}
1978 
1979 	clear_bit(__AT_DOWN, &adapter->flags);
1980 	napi_enable(&adapter->napi);
1981 	atl1e_irq_enable(adapter);
1982 	val = AT_READ_REG(&adapter->hw, REG_MASTER_CTRL);
1983 	AT_WRITE_REG(&adapter->hw, REG_MASTER_CTRL,
1984 		      val | MASTER_CTRL_MANUAL_INT);
1985 
1986 err_up:
1987 	return err;
1988 }
1989 
1990 void atl1e_down(struct atl1e_adapter *adapter)
1991 {
1992 	struct net_device *netdev = adapter->netdev;
1993 
1994 	/* signal that we're down so the interrupt handler does not
1995 	 * reschedule our watchdog timer */
1996 	set_bit(__AT_DOWN, &adapter->flags);
1997 
1998 	netif_stop_queue(netdev);
1999 
2000 	/* reset MAC to disable all RX/TX */
2001 	atl1e_reset_hw(&adapter->hw);
2002 	msleep(1);
2003 
2004 	napi_disable(&adapter->napi);
2005 	atl1e_del_timer(adapter);
2006 	atl1e_irq_disable(adapter);
2007 
2008 	netif_carrier_off(netdev);
2009 	adapter->link_speed = SPEED_0;
2010 	adapter->link_duplex = -1;
2011 	atl1e_clean_tx_ring(adapter);
2012 	atl1e_clean_rx_ring(adapter);
2013 }
2014 
2015 /**
2016  * atl1e_open - Called when a network interface is made active
2017  * @netdev: network interface device structure
2018  *
2019  * Returns 0 on success, negative value on failure
2020  *
2021  * The open entry point is called when a network interface is made
2022  * active by the system (IFF_UP).  At this point all resources needed
2023  * for transmit and receive operations are allocated, the interrupt
2024  * handler is registered with the OS, the watchdog timer is started,
2025  * and the stack is notified that the interface is ready.
2026  */
2027 static int atl1e_open(struct net_device *netdev)
2028 {
2029 	struct atl1e_adapter *adapter = netdev_priv(netdev);
2030 	int err;
2031 
2032 	/* disallow open during test */
2033 	if (test_bit(__AT_TESTING, &adapter->flags))
2034 		return -EBUSY;
2035 
2036 	/* allocate rx/tx dma buffer & descriptors */
2037 	atl1e_init_ring_resources(adapter);
2038 	err = atl1e_setup_ring_resources(adapter);
2039 	if (unlikely(err))
2040 		return err;
2041 
2042 	err = atl1e_request_irq(adapter);
2043 	if (unlikely(err))
2044 		goto err_req_irq;
2045 
2046 	err = atl1e_up(adapter);
2047 	if (unlikely(err))
2048 		goto err_up;
2049 
2050 	return 0;
2051 
2052 err_up:
2053 	atl1e_free_irq(adapter);
2054 err_req_irq:
2055 	atl1e_free_ring_resources(adapter);
2056 	atl1e_reset_hw(&adapter->hw);
2057 
2058 	return err;
2059 }
2060 
2061 /**
2062  * atl1e_close - Disables a network interface
2063  * @netdev: network interface device structure
2064  *
2065  * Returns 0, this is not allowed to fail
2066  *
2067  * The close entry point is called when an interface is de-activated
2068  * by the OS.  The hardware is still under the drivers control, but
2069  * needs to be disabled.  A global MAC reset is issued to stop the
2070  * hardware, and all transmit and receive resources are freed.
2071  */
2072 static int atl1e_close(struct net_device *netdev)
2073 {
2074 	struct atl1e_adapter *adapter = netdev_priv(netdev);
2075 
2076 	WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
2077 	atl1e_down(adapter);
2078 	atl1e_free_irq(adapter);
2079 	atl1e_free_ring_resources(adapter);
2080 
2081 	return 0;
2082 }
2083 
2084 static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
2085 {
2086 	struct net_device *netdev = pci_get_drvdata(pdev);
2087 	struct atl1e_adapter *adapter = netdev_priv(netdev);
2088 	struct atl1e_hw *hw = &adapter->hw;
2089 	u32 ctrl = 0;
2090 	u32 mac_ctrl_data = 0;
2091 	u32 wol_ctrl_data = 0;
2092 	u16 mii_advertise_data = 0;
2093 	u16 mii_bmsr_data = 0;
2094 	u16 mii_intr_status_data = 0;
2095 	u32 wufc = adapter->wol;
2096 	u32 i;
2097 #ifdef CONFIG_PM
2098 	int retval = 0;
2099 #endif
2100 
2101 	if (netif_running(netdev)) {
2102 		WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
2103 		atl1e_down(adapter);
2104 	}
2105 	netif_device_detach(netdev);
2106 
2107 #ifdef CONFIG_PM
2108 	retval = pci_save_state(pdev);
2109 	if (retval)
2110 		return retval;
2111 #endif
2112 
2113 	if (wufc) {
2114 		/* get link status */
2115 		atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
2116 		atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
2117 
2118 		mii_advertise_data = ADVERTISE_10HALF;
2119 
2120 		if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) ||
2121 		    (atl1e_write_phy_reg(hw,
2122 			   MII_ADVERTISE, mii_advertise_data) != 0) ||
2123 		    (atl1e_phy_commit(hw)) != 0) {
2124 			netdev_dbg(adapter->netdev, "set phy register failed\n");
2125 			goto wol_dis;
2126 		}
2127 
2128 		hw->phy_configured = false; /* re-init PHY when resume */
2129 
2130 		/* turn on magic packet wol */
2131 		if (wufc & AT_WUFC_MAG)
2132 			wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
2133 
2134 		if (wufc & AT_WUFC_LNKC) {
2135 		/* if orignal link status is link, just wait for retrive link */
2136 			if (mii_bmsr_data & BMSR_LSTATUS) {
2137 				for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
2138 					msleep(100);
2139 					atl1e_read_phy_reg(hw, MII_BMSR,
2140 							&mii_bmsr_data);
2141 					if (mii_bmsr_data & BMSR_LSTATUS)
2142 						break;
2143 				}
2144 
2145 				if ((mii_bmsr_data & BMSR_LSTATUS) == 0)
2146 					netdev_dbg(adapter->netdev,
2147 						   "Link may change when suspend\n");
2148 			}
2149 			wol_ctrl_data |=  WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
2150 			/* only link up can wake up */
2151 			if (atl1e_write_phy_reg(hw, MII_INT_CTRL, 0x400) != 0) {
2152 				netdev_dbg(adapter->netdev,
2153 					   "read write phy register failed\n");
2154 				goto wol_dis;
2155 			}
2156 		}
2157 		/* clear phy interrupt */
2158 		atl1e_read_phy_reg(hw, MII_INT_STATUS, &mii_intr_status_data);
2159 		/* Config MAC Ctrl register */
2160 		mac_ctrl_data = MAC_CTRL_RX_EN;
2161 		/* set to 10/100M halt duplex */
2162 		mac_ctrl_data |= MAC_CTRL_SPEED_10_100 << MAC_CTRL_SPEED_SHIFT;
2163 		mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
2164 				 MAC_CTRL_PRMLEN_MASK) <<
2165 				 MAC_CTRL_PRMLEN_SHIFT);
2166 
2167 		__atl1e_vlan_mode(netdev->features, &mac_ctrl_data);
2168 
2169 		/* magic packet maybe Broadcast&multicast&Unicast frame */
2170 		if (wufc & AT_WUFC_MAG)
2171 			mac_ctrl_data |= MAC_CTRL_BC_EN;
2172 
2173 		netdev_dbg(adapter->netdev, "suspend MAC=0x%x\n",
2174 			   mac_ctrl_data);
2175 
2176 		AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
2177 		AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
2178 		/* pcie patch */
2179 		ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC);
2180 		ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
2181 		AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
2182 		pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
2183 		goto suspend_exit;
2184 	}
2185 wol_dis:
2186 
2187 	/* WOL disabled */
2188 	AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
2189 
2190 	/* pcie patch */
2191 	ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC);
2192 	ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
2193 	AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
2194 
2195 	atl1e_force_ps(hw);
2196 	hw->phy_configured = false; /* re-init PHY when resume */
2197 
2198 	pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
2199 
2200 suspend_exit:
2201 
2202 	if (netif_running(netdev))
2203 		atl1e_free_irq(adapter);
2204 
2205 	pci_disable_device(pdev);
2206 
2207 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
2208 
2209 	return 0;
2210 }
2211 
2212 #ifdef CONFIG_PM
2213 static int atl1e_resume(struct pci_dev *pdev)
2214 {
2215 	struct net_device *netdev = pci_get_drvdata(pdev);
2216 	struct atl1e_adapter *adapter = netdev_priv(netdev);
2217 	u32 err;
2218 
2219 	pci_set_power_state(pdev, PCI_D0);
2220 	pci_restore_state(pdev);
2221 
2222 	err = pci_enable_device(pdev);
2223 	if (err) {
2224 		netdev_err(adapter->netdev,
2225 			   "Cannot enable PCI device from suspend\n");
2226 		return err;
2227 	}
2228 
2229 	pci_set_master(pdev);
2230 
2231 	AT_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */
2232 
2233 	pci_enable_wake(pdev, PCI_D3hot, 0);
2234 	pci_enable_wake(pdev, PCI_D3cold, 0);
2235 
2236 	AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
2237 
2238 	if (netif_running(netdev)) {
2239 		err = atl1e_request_irq(adapter);
2240 		if (err)
2241 			return err;
2242 	}
2243 
2244 	atl1e_reset_hw(&adapter->hw);
2245 
2246 	if (netif_running(netdev))
2247 		atl1e_up(adapter);
2248 
2249 	netif_device_attach(netdev);
2250 
2251 	return 0;
2252 }
2253 #endif
2254 
2255 static void atl1e_shutdown(struct pci_dev *pdev)
2256 {
2257 	atl1e_suspend(pdev, PMSG_SUSPEND);
2258 }
2259 
2260 static const struct net_device_ops atl1e_netdev_ops = {
2261 	.ndo_open		= atl1e_open,
2262 	.ndo_stop		= atl1e_close,
2263 	.ndo_start_xmit		= atl1e_xmit_frame,
2264 	.ndo_get_stats		= atl1e_get_stats,
2265 	.ndo_set_rx_mode	= atl1e_set_multi,
2266 	.ndo_validate_addr	= eth_validate_addr,
2267 	.ndo_set_mac_address	= atl1e_set_mac_addr,
2268 	.ndo_fix_features	= atl1e_fix_features,
2269 	.ndo_set_features	= atl1e_set_features,
2270 	.ndo_change_mtu		= atl1e_change_mtu,
2271 	.ndo_do_ioctl		= atl1e_ioctl,
2272 	.ndo_tx_timeout		= atl1e_tx_timeout,
2273 #ifdef CONFIG_NET_POLL_CONTROLLER
2274 	.ndo_poll_controller	= atl1e_netpoll,
2275 #endif
2276 
2277 };
2278 
2279 static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2280 {
2281 	SET_NETDEV_DEV(netdev, &pdev->dev);
2282 	pci_set_drvdata(pdev, netdev);
2283 
2284 	netdev->netdev_ops = &atl1e_netdev_ops;
2285 
2286 	netdev->watchdog_timeo = AT_TX_WATCHDOG;
2287 	atl1e_set_ethtool_ops(netdev);
2288 
2289 	netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |
2290 			      NETIF_F_HW_VLAN_CTAG_RX;
2291 	netdev->features = netdev->hw_features | NETIF_F_LLTX |
2292 			   NETIF_F_HW_VLAN_CTAG_TX;
2293 	/* not enabled by default */
2294 	netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS;
2295 	return 0;
2296 }
2297 
2298 /**
2299  * atl1e_probe - Device Initialization Routine
2300  * @pdev: PCI device information struct
2301  * @ent: entry in atl1e_pci_tbl
2302  *
2303  * Returns 0 on success, negative on failure
2304  *
2305  * atl1e_probe initializes an adapter identified by a pci_dev structure.
2306  * The OS initialization, configuring of the adapter private structure,
2307  * and a hardware reset occur.
2308  */
2309 static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2310 {
2311 	struct net_device *netdev;
2312 	struct atl1e_adapter *adapter = NULL;
2313 	static int cards_found;
2314 
2315 	int err = 0;
2316 
2317 	err = pci_enable_device(pdev);
2318 	if (err) {
2319 		dev_err(&pdev->dev, "cannot enable PCI device\n");
2320 		return err;
2321 	}
2322 
2323 	/*
2324 	 * The atl1e chip can DMA to 64-bit addresses, but it uses a single
2325 	 * shared register for the high 32 bits, so only a single, aligned,
2326 	 * 4 GB physical address range can be used at a time.
2327 	 *
2328 	 * Supporting 64-bit DMA on this hardware is more trouble than it's
2329 	 * worth.  It is far easier to limit to 32-bit DMA than update
2330 	 * various kernel subsystems to support the mechanics required by a
2331 	 * fixed-high-32-bit system.
2332 	 */
2333 	if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
2334 	    (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
2335 		dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
2336 		goto err_dma;
2337 	}
2338 
2339 	err = pci_request_regions(pdev, atl1e_driver_name);
2340 	if (err) {
2341 		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2342 		goto err_pci_reg;
2343 	}
2344 
2345 	pci_set_master(pdev);
2346 
2347 	netdev = alloc_etherdev(sizeof(struct atl1e_adapter));
2348 	if (netdev == NULL) {
2349 		err = -ENOMEM;
2350 		goto err_alloc_etherdev;
2351 	}
2352 
2353 	err = atl1e_init_netdev(netdev, pdev);
2354 	if (err) {
2355 		netdev_err(netdev, "init netdevice failed\n");
2356 		goto err_init_netdev;
2357 	}
2358 	adapter = netdev_priv(netdev);
2359 	adapter->bd_number = cards_found;
2360 	adapter->netdev = netdev;
2361 	adapter->pdev = pdev;
2362 	adapter->hw.adapter = adapter;
2363 	adapter->hw.hw_addr = pci_iomap(pdev, BAR_0, 0);
2364 	if (!adapter->hw.hw_addr) {
2365 		err = -EIO;
2366 		netdev_err(netdev, "cannot map device registers\n");
2367 		goto err_ioremap;
2368 	}
2369 
2370 	/* init mii data */
2371 	adapter->mii.dev = netdev;
2372 	adapter->mii.mdio_read  = atl1e_mdio_read;
2373 	adapter->mii.mdio_write = atl1e_mdio_write;
2374 	adapter->mii.phy_id_mask = 0x1f;
2375 	adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
2376 
2377 	netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
2378 
2379 	init_timer(&adapter->phy_config_timer);
2380 	adapter->phy_config_timer.function = atl1e_phy_config;
2381 	adapter->phy_config_timer.data = (unsigned long) adapter;
2382 
2383 	/* get user settings */
2384 	atl1e_check_options(adapter);
2385 	/*
2386 	 * Mark all PCI regions associated with PCI device
2387 	 * pdev as being reserved by owner atl1e_driver_name
2388 	 * Enables bus-mastering on the device and calls
2389 	 * pcibios_set_master to do the needed arch specific settings
2390 	 */
2391 	atl1e_setup_pcicmd(pdev);
2392 	/* setup the private structure */
2393 	err = atl1e_sw_init(adapter);
2394 	if (err) {
2395 		netdev_err(netdev, "net device private data init failed\n");
2396 		goto err_sw_init;
2397 	}
2398 
2399 	/* Init GPHY as early as possible due to power saving issue  */
2400 	atl1e_phy_init(&adapter->hw);
2401 	/* reset the controller to
2402 	 * put the device in a known good starting state */
2403 	err = atl1e_reset_hw(&adapter->hw);
2404 	if (err) {
2405 		err = -EIO;
2406 		goto err_reset;
2407 	}
2408 
2409 	if (atl1e_read_mac_addr(&adapter->hw) != 0) {
2410 		err = -EIO;
2411 		netdev_err(netdev, "get mac address failed\n");
2412 		goto err_eeprom;
2413 	}
2414 
2415 	memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2416 	netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr);
2417 
2418 	INIT_WORK(&adapter->reset_task, atl1e_reset_task);
2419 	INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
2420 	netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE);
2421 	err = register_netdev(netdev);
2422 	if (err) {
2423 		netdev_err(netdev, "register netdevice failed\n");
2424 		goto err_register;
2425 	}
2426 
2427 	/* assume we have no link for now */
2428 	netif_stop_queue(netdev);
2429 	netif_carrier_off(netdev);
2430 
2431 	cards_found++;
2432 
2433 	return 0;
2434 
2435 err_reset:
2436 err_register:
2437 err_sw_init:
2438 err_eeprom:
2439 	iounmap(adapter->hw.hw_addr);
2440 err_init_netdev:
2441 err_ioremap:
2442 	free_netdev(netdev);
2443 err_alloc_etherdev:
2444 	pci_release_regions(pdev);
2445 err_pci_reg:
2446 err_dma:
2447 	pci_disable_device(pdev);
2448 	return err;
2449 }
2450 
2451 /**
2452  * atl1e_remove - Device Removal Routine
2453  * @pdev: PCI device information struct
2454  *
2455  * atl1e_remove is called by the PCI subsystem to alert the driver
2456  * that it should release a PCI device.  The could be caused by a
2457  * Hot-Plug event, or because the driver is going to be removed from
2458  * memory.
2459  */
2460 static void atl1e_remove(struct pci_dev *pdev)
2461 {
2462 	struct net_device *netdev = pci_get_drvdata(pdev);
2463 	struct atl1e_adapter *adapter = netdev_priv(netdev);
2464 
2465 	/*
2466 	 * flush_scheduled work may reschedule our watchdog task, so
2467 	 * explicitly disable watchdog tasks from being rescheduled
2468 	 */
2469 	set_bit(__AT_DOWN, &adapter->flags);
2470 
2471 	atl1e_del_timer(adapter);
2472 	atl1e_cancel_work(adapter);
2473 
2474 	unregister_netdev(netdev);
2475 	atl1e_free_ring_resources(adapter);
2476 	atl1e_force_ps(&adapter->hw);
2477 	iounmap(adapter->hw.hw_addr);
2478 	pci_release_regions(pdev);
2479 	free_netdev(netdev);
2480 	pci_disable_device(pdev);
2481 }
2482 
2483 /**
2484  * atl1e_io_error_detected - called when PCI error is detected
2485  * @pdev: Pointer to PCI device
2486  * @state: The current pci connection state
2487  *
2488  * This function is called after a PCI bus error affecting
2489  * this device has been detected.
2490  */
2491 static pci_ers_result_t
2492 atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2493 {
2494 	struct net_device *netdev = pci_get_drvdata(pdev);
2495 	struct atl1e_adapter *adapter = netdev_priv(netdev);
2496 
2497 	netif_device_detach(netdev);
2498 
2499 	if (state == pci_channel_io_perm_failure)
2500 		return PCI_ERS_RESULT_DISCONNECT;
2501 
2502 	if (netif_running(netdev))
2503 		atl1e_down(adapter);
2504 
2505 	pci_disable_device(pdev);
2506 
2507 	/* Request a slot slot reset. */
2508 	return PCI_ERS_RESULT_NEED_RESET;
2509 }
2510 
2511 /**
2512  * atl1e_io_slot_reset - called after the pci bus has been reset.
2513  * @pdev: Pointer to PCI device
2514  *
2515  * Restart the card from scratch, as if from a cold-boot. Implementation
2516  * resembles the first-half of the e1000_resume routine.
2517  */
2518 static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev)
2519 {
2520 	struct net_device *netdev = pci_get_drvdata(pdev);
2521 	struct atl1e_adapter *adapter = netdev_priv(netdev);
2522 
2523 	if (pci_enable_device(pdev)) {
2524 		netdev_err(adapter->netdev,
2525 			   "Cannot re-enable PCI device after reset\n");
2526 		return PCI_ERS_RESULT_DISCONNECT;
2527 	}
2528 	pci_set_master(pdev);
2529 
2530 	pci_enable_wake(pdev, PCI_D3hot, 0);
2531 	pci_enable_wake(pdev, PCI_D3cold, 0);
2532 
2533 	atl1e_reset_hw(&adapter->hw);
2534 
2535 	return PCI_ERS_RESULT_RECOVERED;
2536 }
2537 
2538 /**
2539  * atl1e_io_resume - called when traffic can start flowing again.
2540  * @pdev: Pointer to PCI device
2541  *
2542  * This callback is called when the error recovery driver tells us that
2543  * its OK to resume normal operation. Implementation resembles the
2544  * second-half of the atl1e_resume routine.
2545  */
2546 static void atl1e_io_resume(struct pci_dev *pdev)
2547 {
2548 	struct net_device *netdev = pci_get_drvdata(pdev);
2549 	struct atl1e_adapter *adapter = netdev_priv(netdev);
2550 
2551 	if (netif_running(netdev)) {
2552 		if (atl1e_up(adapter)) {
2553 			netdev_err(adapter->netdev,
2554 				   "can't bring device back up after reset\n");
2555 			return;
2556 		}
2557 	}
2558 
2559 	netif_device_attach(netdev);
2560 }
2561 
2562 static const struct pci_error_handlers atl1e_err_handler = {
2563 	.error_detected = atl1e_io_error_detected,
2564 	.slot_reset = atl1e_io_slot_reset,
2565 	.resume = atl1e_io_resume,
2566 };
2567 
2568 static struct pci_driver atl1e_driver = {
2569 	.name     = atl1e_driver_name,
2570 	.id_table = atl1e_pci_tbl,
2571 	.probe    = atl1e_probe,
2572 	.remove   = atl1e_remove,
2573 	/* Power Management Hooks */
2574 #ifdef CONFIG_PM
2575 	.suspend  = atl1e_suspend,
2576 	.resume   = atl1e_resume,
2577 #endif
2578 	.shutdown = atl1e_shutdown,
2579 	.err_handler = &atl1e_err_handler
2580 };
2581 
2582 module_pci_driver(atl1e_driver);
2583