1 /*
2  * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
3  *
4  *  This file is free software: you may copy, redistribute and/or modify it
5  *  under the terms of the GNU General Public License as published by the
6  *  Free Software Foundation, either version 2 of the License, or (at your
7  *  option) any later version.
8  *
9  *  This file is distributed in the hope that it will be useful, but
10  *  WITHOUT ANY WARRANTY; without even the implied warranty of
11  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  *  General Public License for more details.
13  *
14  *  You should have received a copy of the GNU General Public License
15  *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  *
17  * This file incorporates work covered by the following copyright and
18  * permission notice:
19  *
20  * Copyright (c) 2012 Qualcomm Atheros, Inc.
21  *
22  * Permission to use, copy, modify, and/or distribute this software for any
23  * purpose with or without fee is hereby granted, provided that the above
24  * copyright notice and this permission notice appear in all copies.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33  */
34 
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/interrupt.h>
38 #include <linux/ip.h>
39 #include <linux/ipv6.h>
40 #include <linux/if_vlan.h>
41 #include <linux/mdio.h>
42 #include <linux/aer.h>
43 #include <linux/bitops.h>
44 #include <linux/netdevice.h>
45 #include <linux/etherdevice.h>
46 #include <net/ip6_checksum.h>
47 #include <linux/crc32.h>
48 #include "alx.h"
49 #include "hw.h"
50 #include "reg.h"
51 
52 const char alx_drv_name[] = "alx";
53 
54 static void alx_free_txbuf(struct alx_tx_queue *txq, int entry)
55 {
56 	struct alx_buffer *txb = &txq->bufs[entry];
57 
58 	if (dma_unmap_len(txb, size)) {
59 		dma_unmap_single(txq->dev,
60 				 dma_unmap_addr(txb, dma),
61 				 dma_unmap_len(txb, size),
62 				 DMA_TO_DEVICE);
63 		dma_unmap_len_set(txb, size, 0);
64 	}
65 
66 	if (txb->skb) {
67 		dev_kfree_skb_any(txb->skb);
68 		txb->skb = NULL;
69 	}
70 }
71 
72 static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
73 {
74 	struct alx_rx_queue *rxq = alx->qnapi[0]->rxq;
75 	struct sk_buff *skb;
76 	struct alx_buffer *cur_buf;
77 	dma_addr_t dma;
78 	u16 cur, next, count = 0;
79 
80 	next = cur = rxq->write_idx;
81 	if (++next == alx->rx_ringsz)
82 		next = 0;
83 	cur_buf = &rxq->bufs[cur];
84 
85 	while (!cur_buf->skb && next != rxq->read_idx) {
86 		struct alx_rfd *rfd = &rxq->rfd[cur];
87 
88 		/*
89 		 * When DMA RX address is set to something like
90 		 * 0x....fc0, it will be very likely to cause DMA
91 		 * RFD overflow issue.
92 		 *
93 		 * To work around it, we apply rx skb with 64 bytes
94 		 * longer space, and offset the address whenever
95 		 * 0x....fc0 is detected.
96 		 */
97 		skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
98 		if (!skb)
99 			break;
100 
101 		if (((unsigned long)skb->data & 0xfff) == 0xfc0)
102 			skb_reserve(skb, 64);
103 
104 		dma = dma_map_single(&alx->hw.pdev->dev,
105 				     skb->data, alx->rxbuf_size,
106 				     DMA_FROM_DEVICE);
107 		if (dma_mapping_error(&alx->hw.pdev->dev, dma)) {
108 			dev_kfree_skb(skb);
109 			break;
110 		}
111 
112 		/* Unfortunately, RX descriptor buffers must be 4-byte
113 		 * aligned, so we can't use IP alignment.
114 		 */
115 		if (WARN_ON(dma & 3)) {
116 			dev_kfree_skb(skb);
117 			break;
118 		}
119 
120 		cur_buf->skb = skb;
121 		dma_unmap_len_set(cur_buf, size, alx->rxbuf_size);
122 		dma_unmap_addr_set(cur_buf, dma, dma);
123 		rfd->addr = cpu_to_le64(dma);
124 
125 		cur = next;
126 		if (++next == alx->rx_ringsz)
127 			next = 0;
128 		cur_buf = &rxq->bufs[cur];
129 		count++;
130 	}
131 
132 	if (count) {
133 		/* flush all updates before updating hardware */
134 		wmb();
135 		rxq->write_idx = cur;
136 		alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
137 	}
138 
139 	return count;
140 }
141 
142 static struct alx_tx_queue *alx_tx_queue_mapping(struct alx_priv *alx,
143 						 struct sk_buff *skb)
144 {
145 	unsigned int r_idx = skb->queue_mapping;
146 
147 	if (r_idx >= alx->num_txq)
148 		r_idx = r_idx % alx->num_txq;
149 
150 	return alx->qnapi[r_idx]->txq;
151 }
152 
153 static struct netdev_queue *alx_get_tx_queue(const struct alx_tx_queue *txq)
154 {
155 	return netdev_get_tx_queue(txq->netdev, txq->queue_idx);
156 }
157 
158 static inline int alx_tpd_avail(struct alx_tx_queue *txq)
159 {
160 	if (txq->write_idx >= txq->read_idx)
161 		return txq->count + txq->read_idx - txq->write_idx - 1;
162 	return txq->read_idx - txq->write_idx - 1;
163 }
164 
165 static bool alx_clean_tx_irq(struct alx_tx_queue *txq)
166 {
167 	struct alx_priv *alx;
168 	struct netdev_queue *tx_queue;
169 	u16 hw_read_idx, sw_read_idx;
170 	unsigned int total_bytes = 0, total_packets = 0;
171 	int budget = ALX_DEFAULT_TX_WORK;
172 
173 	alx = netdev_priv(txq->netdev);
174 	tx_queue = alx_get_tx_queue(txq);
175 
176 	sw_read_idx = txq->read_idx;
177 	hw_read_idx = alx_read_mem16(&alx->hw, txq->c_reg);
178 
179 	if (sw_read_idx != hw_read_idx) {
180 		while (sw_read_idx != hw_read_idx && budget > 0) {
181 			struct sk_buff *skb;
182 
183 			skb = txq->bufs[sw_read_idx].skb;
184 			if (skb) {
185 				total_bytes += skb->len;
186 				total_packets++;
187 				budget--;
188 			}
189 
190 			alx_free_txbuf(txq, sw_read_idx);
191 
192 			if (++sw_read_idx == txq->count)
193 				sw_read_idx = 0;
194 		}
195 		txq->read_idx = sw_read_idx;
196 
197 		netdev_tx_completed_queue(tx_queue, total_packets, total_bytes);
198 	}
199 
200 	if (netif_tx_queue_stopped(tx_queue) && netif_carrier_ok(alx->dev) &&
201 	    alx_tpd_avail(txq) > txq->count / 4)
202 		netif_tx_wake_queue(tx_queue);
203 
204 	return sw_read_idx == hw_read_idx;
205 }
206 
207 static void alx_schedule_link_check(struct alx_priv *alx)
208 {
209 	schedule_work(&alx->link_check_wk);
210 }
211 
212 static void alx_schedule_reset(struct alx_priv *alx)
213 {
214 	schedule_work(&alx->reset_wk);
215 }
216 
217 static int alx_clean_rx_irq(struct alx_rx_queue *rxq, int budget)
218 {
219 	struct alx_priv *alx;
220 	struct alx_rrd *rrd;
221 	struct alx_buffer *rxb;
222 	struct sk_buff *skb;
223 	u16 length, rfd_cleaned = 0;
224 	int work = 0;
225 
226 	alx = netdev_priv(rxq->netdev);
227 
228 	while (work < budget) {
229 		rrd = &rxq->rrd[rxq->rrd_read_idx];
230 		if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
231 			break;
232 		rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT);
233 
234 		if (ALX_GET_FIELD(le32_to_cpu(rrd->word0),
235 				  RRD_SI) != rxq->read_idx ||
236 		    ALX_GET_FIELD(le32_to_cpu(rrd->word0),
237 				  RRD_NOR) != 1) {
238 			alx_schedule_reset(alx);
239 			return work;
240 		}
241 
242 		rxb = &rxq->bufs[rxq->read_idx];
243 		dma_unmap_single(rxq->dev,
244 				 dma_unmap_addr(rxb, dma),
245 				 dma_unmap_len(rxb, size),
246 				 DMA_FROM_DEVICE);
247 		dma_unmap_len_set(rxb, size, 0);
248 		skb = rxb->skb;
249 		rxb->skb = NULL;
250 
251 		if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) ||
252 		    rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) {
253 			rrd->word3 = 0;
254 			dev_kfree_skb_any(skb);
255 			goto next_pkt;
256 		}
257 
258 		length = ALX_GET_FIELD(le32_to_cpu(rrd->word3),
259 				       RRD_PKTLEN) - ETH_FCS_LEN;
260 		skb_put(skb, length);
261 		skb->protocol = eth_type_trans(skb, rxq->netdev);
262 
263 		skb_checksum_none_assert(skb);
264 		if (alx->dev->features & NETIF_F_RXCSUM &&
265 		    !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) |
266 				    cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) {
267 			switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2),
268 					      RRD_PID)) {
269 			case RRD_PID_IPV6UDP:
270 			case RRD_PID_IPV4UDP:
271 			case RRD_PID_IPV4TCP:
272 			case RRD_PID_IPV6TCP:
273 				skb->ip_summed = CHECKSUM_UNNECESSARY;
274 				break;
275 			}
276 		}
277 
278 		napi_gro_receive(&rxq->np->napi, skb);
279 		work++;
280 
281 next_pkt:
282 		if (++rxq->read_idx == rxq->count)
283 			rxq->read_idx = 0;
284 		if (++rxq->rrd_read_idx == rxq->count)
285 			rxq->rrd_read_idx = 0;
286 
287 		if (++rfd_cleaned > ALX_RX_ALLOC_THRESH)
288 			rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC);
289 	}
290 
291 	if (rfd_cleaned)
292 		alx_refill_rx_ring(alx, GFP_ATOMIC);
293 
294 	return work;
295 }
296 
297 static int alx_poll(struct napi_struct *napi, int budget)
298 {
299 	struct alx_napi *np = container_of(napi, struct alx_napi, napi);
300 	struct alx_priv *alx = np->alx;
301 	struct alx_hw *hw = &alx->hw;
302 	unsigned long flags;
303 	bool tx_complete = true;
304 	int work = 0;
305 
306 	if (np->txq)
307 		tx_complete = alx_clean_tx_irq(np->txq);
308 	if (np->rxq)
309 		work = alx_clean_rx_irq(np->rxq, budget);
310 
311 	if (!tx_complete || work == budget)
312 		return budget;
313 
314 	napi_complete(&np->napi);
315 
316 	/* enable interrupt */
317 	if (alx->flags & ALX_FLAG_USING_MSIX) {
318 		alx_mask_msix(hw, np->vec_idx, false);
319 	} else {
320 		spin_lock_irqsave(&alx->irq_lock, flags);
321 		alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
322 		alx_write_mem32(hw, ALX_IMR, alx->int_mask);
323 		spin_unlock_irqrestore(&alx->irq_lock, flags);
324 	}
325 
326 	alx_post_write(hw);
327 
328 	return work;
329 }
330 
331 static bool alx_intr_handle_misc(struct alx_priv *alx, u32 intr)
332 {
333 	struct alx_hw *hw = &alx->hw;
334 
335 	if (intr & ALX_ISR_FATAL) {
336 		netif_warn(alx, hw, alx->dev,
337 			   "fatal interrupt 0x%x, resetting\n", intr);
338 		alx_schedule_reset(alx);
339 		return true;
340 	}
341 
342 	if (intr & ALX_ISR_ALERT)
343 		netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr);
344 
345 	if (intr & ALX_ISR_PHY) {
346 		/* suppress PHY interrupt, because the source
347 		 * is from PHY internal. only the internal status
348 		 * is cleared, the interrupt status could be cleared.
349 		 */
350 		alx->int_mask &= ~ALX_ISR_PHY;
351 		alx_write_mem32(hw, ALX_IMR, alx->int_mask);
352 		alx_schedule_link_check(alx);
353 	}
354 
355 	return false;
356 }
357 
358 static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
359 {
360 	struct alx_hw *hw = &alx->hw;
361 
362 	spin_lock(&alx->irq_lock);
363 
364 	/* ACK interrupt */
365 	alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
366 	intr &= alx->int_mask;
367 
368 	if (alx_intr_handle_misc(alx, intr))
369 		goto out;
370 
371 	if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
372 		napi_schedule(&alx->qnapi[0]->napi);
373 		/* mask rx/tx interrupt, enable them when napi complete */
374 		alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
375 		alx_write_mem32(hw, ALX_IMR, alx->int_mask);
376 	}
377 
378 	alx_write_mem32(hw, ALX_ISR, 0);
379 
380  out:
381 	spin_unlock(&alx->irq_lock);
382 	return IRQ_HANDLED;
383 }
384 
385 static irqreturn_t alx_intr_msix_ring(int irq, void *data)
386 {
387 	struct alx_napi *np = data;
388 	struct alx_hw *hw = &np->alx->hw;
389 
390 	/* mask interrupt to ACK chip */
391 	alx_mask_msix(hw, np->vec_idx, true);
392 	/* clear interrupt status */
393 	alx_write_mem32(hw, ALX_ISR, np->vec_mask);
394 
395 	napi_schedule(&np->napi);
396 
397 	return IRQ_HANDLED;
398 }
399 
400 static irqreturn_t alx_intr_msix_misc(int irq, void *data)
401 {
402 	struct alx_priv *alx = data;
403 	struct alx_hw *hw = &alx->hw;
404 	u32 intr;
405 
406 	/* mask interrupt to ACK chip */
407 	alx_mask_msix(hw, 0, true);
408 
409 	/* read interrupt status */
410 	intr = alx_read_mem32(hw, ALX_ISR);
411 	intr &= (alx->int_mask & ~ALX_ISR_ALL_QUEUES);
412 
413 	if (alx_intr_handle_misc(alx, intr))
414 		return IRQ_HANDLED;
415 
416 	/* clear interrupt status */
417 	alx_write_mem32(hw, ALX_ISR, intr);
418 
419 	/* enable interrupt again */
420 	alx_mask_msix(hw, 0, false);
421 
422 	return IRQ_HANDLED;
423 }
424 
425 static irqreturn_t alx_intr_msi(int irq, void *data)
426 {
427 	struct alx_priv *alx = data;
428 
429 	return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR));
430 }
431 
432 static irqreturn_t alx_intr_legacy(int irq, void *data)
433 {
434 	struct alx_priv *alx = data;
435 	struct alx_hw *hw = &alx->hw;
436 	u32 intr;
437 
438 	intr = alx_read_mem32(hw, ALX_ISR);
439 
440 	if (intr & ALX_ISR_DIS || !(intr & alx->int_mask))
441 		return IRQ_NONE;
442 
443 	return alx_intr_handle(alx, intr);
444 }
445 
446 static const u16 txring_header_reg[] = {ALX_TPD_PRI0_ADDR_LO,
447 					ALX_TPD_PRI1_ADDR_LO,
448 					ALX_TPD_PRI2_ADDR_LO,
449 					ALX_TPD_PRI3_ADDR_LO};
450 
451 static void alx_init_ring_ptrs(struct alx_priv *alx)
452 {
453 	struct alx_hw *hw = &alx->hw;
454 	u32 addr_hi = ((u64)alx->descmem.dma) >> 32;
455 	struct alx_napi *np;
456 	int i;
457 
458 	for (i = 0; i < alx->num_napi; i++) {
459 		np = alx->qnapi[i];
460 		if (np->txq) {
461 			np->txq->read_idx = 0;
462 			np->txq->write_idx = 0;
463 			alx_write_mem32(hw,
464 					txring_header_reg[np->txq->queue_idx],
465 					np->txq->tpd_dma);
466 		}
467 
468 		if (np->rxq) {
469 			np->rxq->read_idx = 0;
470 			np->rxq->write_idx = 0;
471 			np->rxq->rrd_read_idx = 0;
472 			alx_write_mem32(hw, ALX_RRD_ADDR_LO, np->rxq->rrd_dma);
473 			alx_write_mem32(hw, ALX_RFD_ADDR_LO, np->rxq->rfd_dma);
474 		}
475 	}
476 
477 	alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
478 	alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
479 
480 	alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi);
481 	alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz);
482 	alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz);
483 	alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size);
484 
485 	/* load these pointers into the chip */
486 	alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR);
487 }
488 
489 static void alx_free_txring_buf(struct alx_tx_queue *txq)
490 {
491 	int i;
492 
493 	if (!txq->bufs)
494 		return;
495 
496 	for (i = 0; i < txq->count; i++)
497 		alx_free_txbuf(txq, i);
498 
499 	memset(txq->bufs, 0, txq->count * sizeof(struct alx_buffer));
500 	memset(txq->tpd, 0, txq->count * sizeof(struct alx_txd));
501 	txq->write_idx = 0;
502 	txq->read_idx = 0;
503 
504 	netdev_tx_reset_queue(alx_get_tx_queue(txq));
505 }
506 
507 static void alx_free_rxring_buf(struct alx_rx_queue *rxq)
508 {
509 	struct alx_buffer *cur_buf;
510 	u16 i;
511 
512 	if (!rxq->bufs)
513 		return;
514 
515 	for (i = 0; i < rxq->count; i++) {
516 		cur_buf = rxq->bufs + i;
517 		if (cur_buf->skb) {
518 			dma_unmap_single(rxq->dev,
519 					 dma_unmap_addr(cur_buf, dma),
520 					 dma_unmap_len(cur_buf, size),
521 					 DMA_FROM_DEVICE);
522 			dev_kfree_skb(cur_buf->skb);
523 			cur_buf->skb = NULL;
524 			dma_unmap_len_set(cur_buf, size, 0);
525 			dma_unmap_addr_set(cur_buf, dma, 0);
526 		}
527 	}
528 
529 	rxq->write_idx = 0;
530 	rxq->read_idx = 0;
531 	rxq->rrd_read_idx = 0;
532 }
533 
534 static void alx_free_buffers(struct alx_priv *alx)
535 {
536 	int i;
537 
538 	for (i = 0; i < alx->num_txq; i++)
539 		if (alx->qnapi[i] && alx->qnapi[i]->txq)
540 			alx_free_txring_buf(alx->qnapi[i]->txq);
541 
542 	if (alx->qnapi[0] && alx->qnapi[0]->rxq)
543 		alx_free_rxring_buf(alx->qnapi[0]->rxq);
544 }
545 
546 static int alx_reinit_rings(struct alx_priv *alx)
547 {
548 	alx_free_buffers(alx);
549 
550 	alx_init_ring_ptrs(alx);
551 
552 	if (!alx_refill_rx_ring(alx, GFP_KERNEL))
553 		return -ENOMEM;
554 
555 	return 0;
556 }
557 
558 static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash)
559 {
560 	u32 crc32, bit, reg;
561 
562 	crc32 = ether_crc(ETH_ALEN, addr);
563 	reg = (crc32 >> 31) & 0x1;
564 	bit = (crc32 >> 26) & 0x1F;
565 
566 	mc_hash[reg] |= BIT(bit);
567 }
568 
569 static void __alx_set_rx_mode(struct net_device *netdev)
570 {
571 	struct alx_priv *alx = netdev_priv(netdev);
572 	struct alx_hw *hw = &alx->hw;
573 	struct netdev_hw_addr *ha;
574 	u32 mc_hash[2] = {};
575 
576 	if (!(netdev->flags & IFF_ALLMULTI)) {
577 		netdev_for_each_mc_addr(ha, netdev)
578 			alx_add_mc_addr(hw, ha->addr, mc_hash);
579 
580 		alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]);
581 		alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]);
582 	}
583 
584 	hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN);
585 	if (netdev->flags & IFF_PROMISC)
586 		hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN;
587 	if (netdev->flags & IFF_ALLMULTI)
588 		hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN;
589 
590 	alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
591 }
592 
593 static void alx_set_rx_mode(struct net_device *netdev)
594 {
595 	__alx_set_rx_mode(netdev);
596 }
597 
598 static int alx_set_mac_address(struct net_device *netdev, void *data)
599 {
600 	struct alx_priv *alx = netdev_priv(netdev);
601 	struct alx_hw *hw = &alx->hw;
602 	struct sockaddr *addr = data;
603 
604 	if (!is_valid_ether_addr(addr->sa_data))
605 		return -EADDRNOTAVAIL;
606 
607 	if (netdev->addr_assign_type & NET_ADDR_RANDOM)
608 		netdev->addr_assign_type ^= NET_ADDR_RANDOM;
609 
610 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
611 	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
612 	alx_set_macaddr(hw, hw->mac_addr);
613 
614 	return 0;
615 }
616 
617 static int alx_alloc_tx_ring(struct alx_priv *alx, struct alx_tx_queue *txq,
618 			     int offset)
619 {
620 	txq->bufs = kcalloc(txq->count, sizeof(struct alx_buffer), GFP_KERNEL);
621 	if (!txq->bufs)
622 		return -ENOMEM;
623 
624 	txq->tpd = alx->descmem.virt + offset;
625 	txq->tpd_dma = alx->descmem.dma + offset;
626 	offset += sizeof(struct alx_txd) * txq->count;
627 
628 	return offset;
629 }
630 
631 static int alx_alloc_rx_ring(struct alx_priv *alx, struct alx_rx_queue *rxq,
632 			     int offset)
633 {
634 	rxq->bufs = kcalloc(rxq->count, sizeof(struct alx_buffer), GFP_KERNEL);
635 	if (!rxq->bufs)
636 		return -ENOMEM;
637 
638 	rxq->rrd = alx->descmem.virt + offset;
639 	rxq->rrd_dma = alx->descmem.dma + offset;
640 	offset += sizeof(struct alx_rrd) * rxq->count;
641 
642 	rxq->rfd = alx->descmem.virt + offset;
643 	rxq->rfd_dma = alx->descmem.dma + offset;
644 	offset += sizeof(struct alx_rfd) * rxq->count;
645 
646 	return offset;
647 }
648 
649 static int alx_alloc_rings(struct alx_priv *alx)
650 {
651 	int i, offset = 0;
652 
653 	/* physical tx/rx ring descriptors
654 	 *
655 	 * Allocate them as a single chunk because they must not cross a
656 	 * 4G boundary (hardware has a single register for high 32 bits
657 	 * of addresses only)
658 	 */
659 	alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz *
660 			    alx->num_txq +
661 			    sizeof(struct alx_rrd) * alx->rx_ringsz +
662 			    sizeof(struct alx_rfd) * alx->rx_ringsz;
663 	alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
664 						alx->descmem.size,
665 						&alx->descmem.dma,
666 						GFP_KERNEL);
667 	if (!alx->descmem.virt)
668 		return -ENOMEM;
669 
670 	/* alignment requirements */
671 	BUILD_BUG_ON(sizeof(struct alx_txd) % 8);
672 	BUILD_BUG_ON(sizeof(struct alx_rrd) % 8);
673 
674 	for (i = 0; i < alx->num_txq; i++) {
675 		offset = alx_alloc_tx_ring(alx, alx->qnapi[i]->txq, offset);
676 		if (offset < 0) {
677 			netdev_err(alx->dev, "Allocation of tx buffer failed!\n");
678 			return -ENOMEM;
679 		}
680 	}
681 
682 	offset = alx_alloc_rx_ring(alx, alx->qnapi[0]->rxq, offset);
683 	if (offset < 0) {
684 		netdev_err(alx->dev, "Allocation of rx buffer failed!\n");
685 		return -ENOMEM;
686 	}
687 
688 	alx_reinit_rings(alx);
689 
690 	return 0;
691 }
692 
693 static void alx_free_rings(struct alx_priv *alx)
694 {
695 	int i;
696 
697 	alx_free_buffers(alx);
698 
699 	for (i = 0; i < alx->num_txq; i++)
700 		if (alx->qnapi[i] && alx->qnapi[i]->txq)
701 			kfree(alx->qnapi[i]->txq->bufs);
702 
703 	if (alx->qnapi[0] && alx->qnapi[0]->rxq)
704 		kfree(alx->qnapi[0]->rxq->bufs);
705 
706 	if (!alx->descmem.virt)
707 		dma_free_coherent(&alx->hw.pdev->dev,
708 				  alx->descmem.size,
709 				  alx->descmem.virt,
710 				  alx->descmem.dma);
711 }
712 
713 static void alx_free_napis(struct alx_priv *alx)
714 {
715 	struct alx_napi *np;
716 	int i;
717 
718 	for (i = 0; i < alx->num_napi; i++) {
719 		np = alx->qnapi[i];
720 		if (!np)
721 			continue;
722 
723 		netif_napi_del(&np->napi);
724 		kfree(np->txq);
725 		kfree(np->rxq);
726 		kfree(np);
727 		alx->qnapi[i] = NULL;
728 	}
729 }
730 
731 static const u16 tx_pidx_reg[] = {ALX_TPD_PRI0_PIDX, ALX_TPD_PRI1_PIDX,
732 				  ALX_TPD_PRI2_PIDX, ALX_TPD_PRI3_PIDX};
733 static const u16 tx_cidx_reg[] = {ALX_TPD_PRI0_CIDX, ALX_TPD_PRI1_CIDX,
734 				  ALX_TPD_PRI2_CIDX, ALX_TPD_PRI3_CIDX};
735 static const u32 tx_vect_mask[] = {ALX_ISR_TX_Q0, ALX_ISR_TX_Q1,
736 				   ALX_ISR_TX_Q2, ALX_ISR_TX_Q3};
737 static const u32 rx_vect_mask[] = {ALX_ISR_RX_Q0, ALX_ISR_RX_Q1,
738 				   ALX_ISR_RX_Q2, ALX_ISR_RX_Q3,
739 				   ALX_ISR_RX_Q4, ALX_ISR_RX_Q5,
740 				   ALX_ISR_RX_Q6, ALX_ISR_RX_Q7};
741 
742 static int alx_alloc_napis(struct alx_priv *alx)
743 {
744 	struct alx_napi *np;
745 	struct alx_rx_queue *rxq;
746 	struct alx_tx_queue *txq;
747 	int i;
748 
749 	alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
750 
751 	/* allocate alx_napi structures */
752 	for (i = 0; i < alx->num_napi; i++) {
753 		np = kzalloc(sizeof(struct alx_napi), GFP_KERNEL);
754 		if (!np)
755 			goto err_out;
756 
757 		np->alx = alx;
758 		netif_napi_add(alx->dev, &np->napi, alx_poll, 64);
759 		alx->qnapi[i] = np;
760 	}
761 
762 	/* allocate tx queues */
763 	for (i = 0; i < alx->num_txq; i++) {
764 		np = alx->qnapi[i];
765 		txq = kzalloc(sizeof(*txq), GFP_KERNEL);
766 		if (!txq)
767 			goto err_out;
768 
769 		np->txq = txq;
770 		txq->p_reg = tx_pidx_reg[i];
771 		txq->c_reg = tx_cidx_reg[i];
772 		txq->queue_idx = i;
773 		txq->count = alx->tx_ringsz;
774 		txq->netdev = alx->dev;
775 		txq->dev = &alx->hw.pdev->dev;
776 		np->vec_mask |= tx_vect_mask[i];
777 		alx->int_mask |= tx_vect_mask[i];
778 	}
779 
780 	/* allocate rx queues */
781 	np = alx->qnapi[0];
782 	rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
783 	if (!rxq)
784 		goto err_out;
785 
786 	np->rxq = rxq;
787 	rxq->np = alx->qnapi[0];
788 	rxq->queue_idx = 0;
789 	rxq->count = alx->rx_ringsz;
790 	rxq->netdev = alx->dev;
791 	rxq->dev = &alx->hw.pdev->dev;
792 	np->vec_mask |= rx_vect_mask[0];
793 	alx->int_mask |= rx_vect_mask[0];
794 
795 	return 0;
796 
797 err_out:
798 	netdev_err(alx->dev, "error allocating internal structures\n");
799 	alx_free_napis(alx);
800 	return -ENOMEM;
801 }
802 
803 static const int txq_vec_mapping_shift[] = {
804 	0, ALX_MSI_MAP_TBL1_TXQ0_SHIFT,
805 	0, ALX_MSI_MAP_TBL1_TXQ1_SHIFT,
806 	1, ALX_MSI_MAP_TBL2_TXQ2_SHIFT,
807 	1, ALX_MSI_MAP_TBL2_TXQ3_SHIFT,
808 };
809 
810 static void alx_config_vector_mapping(struct alx_priv *alx)
811 {
812 	struct alx_hw *hw = &alx->hw;
813 	u32 tbl[2] = {0, 0};
814 	int i, vector, idx, shift;
815 
816 	if (alx->flags & ALX_FLAG_USING_MSIX) {
817 		/* tx mappings */
818 		for (i = 0, vector = 1; i < alx->num_txq; i++, vector++) {
819 			idx = txq_vec_mapping_shift[i * 2];
820 			shift = txq_vec_mapping_shift[i * 2 + 1];
821 			tbl[idx] |= vector << shift;
822 		}
823 
824 		/* rx mapping */
825 		tbl[0] |= 1 << ALX_MSI_MAP_TBL1_RXQ0_SHIFT;
826 	}
827 
828 	alx_write_mem32(hw, ALX_MSI_MAP_TBL1, tbl[0]);
829 	alx_write_mem32(hw, ALX_MSI_MAP_TBL2, tbl[1]);
830 	alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
831 }
832 
833 static bool alx_enable_msix(struct alx_priv *alx)
834 {
835 	int i, err, num_vec, num_txq, num_rxq;
836 
837 	num_txq = min_t(int, num_online_cpus(), ALX_MAX_TX_QUEUES);
838 	num_rxq = 1;
839 	num_vec = max_t(int, num_txq, num_rxq) + 1;
840 
841 	alx->msix_entries = kcalloc(num_vec, sizeof(struct msix_entry),
842 				    GFP_KERNEL);
843 	if (!alx->msix_entries) {
844 		netdev_warn(alx->dev, "Allocation of msix entries failed!\n");
845 		return false;
846 	}
847 
848 	for (i = 0; i < num_vec; i++)
849 		alx->msix_entries[i].entry = i;
850 
851 	err = pci_enable_msix(alx->hw.pdev, alx->msix_entries, num_vec);
852 	if (err) {
853 		kfree(alx->msix_entries);
854 		netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n");
855 		return false;
856 	}
857 
858 	alx->num_vec = num_vec;
859 	alx->num_napi = num_vec - 1;
860 	alx->num_txq = num_txq;
861 	alx->num_rxq = num_rxq;
862 
863 	return true;
864 }
865 
866 static int alx_request_msix(struct alx_priv *alx)
867 {
868 	struct net_device *netdev = alx->dev;
869 	int i, err, vector = 0, free_vector = 0;
870 
871 	err = request_irq(alx->msix_entries[0].vector, alx_intr_msix_misc,
872 			  0, netdev->name, alx);
873 	if (err)
874 		goto out_err;
875 
876 	for (i = 0; i < alx->num_napi; i++) {
877 		struct alx_napi *np = alx->qnapi[i];
878 
879 		vector++;
880 
881 		if (np->txq && np->rxq)
882 			sprintf(np->irq_lbl, "%s-TxRx-%u", netdev->name,
883 				np->txq->queue_idx);
884 		else if (np->txq)
885 			sprintf(np->irq_lbl, "%s-tx-%u", netdev->name,
886 				np->txq->queue_idx);
887 		else if (np->rxq)
888 			sprintf(np->irq_lbl, "%s-rx-%u", netdev->name,
889 				np->rxq->queue_idx);
890 		else
891 			sprintf(np->irq_lbl, "%s-unused", netdev->name);
892 
893 		np->vec_idx = vector;
894 		err = request_irq(alx->msix_entries[vector].vector,
895 				  alx_intr_msix_ring, 0, np->irq_lbl, np);
896 		if (err)
897 			goto out_free;
898 	}
899 	return 0;
900 
901 out_free:
902 	free_irq(alx->msix_entries[free_vector++].vector, alx);
903 
904 	vector--;
905 	for (i = 0; i < vector; i++)
906 		free_irq(alx->msix_entries[free_vector++].vector,
907 			 alx->qnapi[i]);
908 
909 out_err:
910 	return err;
911 }
912 
913 static void alx_init_intr(struct alx_priv *alx, bool msix)
914 {
915 	if (msix) {
916 		if (alx_enable_msix(alx))
917 			alx->flags |= ALX_FLAG_USING_MSIX;
918 	}
919 
920 	if (!(alx->flags & ALX_FLAG_USING_MSIX)) {
921 		alx->num_vec = 1;
922 		alx->num_napi = 1;
923 		alx->num_txq = 1;
924 		alx->num_rxq = 1;
925 
926 		if (!pci_enable_msi(alx->hw.pdev))
927 			alx->flags |= ALX_FLAG_USING_MSI;
928 	}
929 }
930 
931 static void alx_disable_advanced_intr(struct alx_priv *alx)
932 {
933 	if (alx->flags & ALX_FLAG_USING_MSIX) {
934 		kfree(alx->msix_entries);
935 		pci_disable_msix(alx->hw.pdev);
936 		alx->flags &= ~ALX_FLAG_USING_MSIX;
937 	}
938 
939 	if (alx->flags & ALX_FLAG_USING_MSI) {
940 		pci_disable_msi(alx->hw.pdev);
941 		alx->flags &= ~ALX_FLAG_USING_MSI;
942 	}
943 }
944 
945 static void alx_irq_enable(struct alx_priv *alx)
946 {
947 	struct alx_hw *hw = &alx->hw;
948 	int i;
949 
950 	/* level-1 interrupt switch */
951 	alx_write_mem32(hw, ALX_ISR, 0);
952 	alx_write_mem32(hw, ALX_IMR, alx->int_mask);
953 	alx_post_write(hw);
954 
955 	if (alx->flags & ALX_FLAG_USING_MSIX)
956 		/* enable all msix irqs */
957 		for (i = 0; i < alx->num_vec; i++)
958 			alx_mask_msix(hw, i, false);
959 }
960 
961 static void alx_irq_disable(struct alx_priv *alx)
962 {
963 	struct alx_hw *hw = &alx->hw;
964 	int i;
965 
966 	alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
967 	alx_write_mem32(hw, ALX_IMR, 0);
968 	alx_post_write(hw);
969 
970 	if (alx->flags & ALX_FLAG_USING_MSIX) {
971 		for (i = 0; i < alx->num_vec; i++) {
972 			alx_mask_msix(hw, i, true);
973 			synchronize_irq(alx->msix_entries[i].vector);
974 		}
975 	} else {
976 		synchronize_irq(alx->hw.pdev->irq);
977 	}
978 }
979 
980 static int alx_realloc_resources(struct alx_priv *alx)
981 {
982 	int err;
983 
984 	alx_free_rings(alx);
985 	alx_free_napis(alx);
986 	alx_disable_advanced_intr(alx);
987 
988 	err = alx_alloc_napis(alx);
989 	if (err)
990 		return err;
991 
992 	err = alx_alloc_rings(alx);
993 	if (err)
994 		return err;
995 
996 	return 0;
997 }
998 
999 static int alx_request_irq(struct alx_priv *alx)
1000 {
1001 	struct pci_dev *pdev = alx->hw.pdev;
1002 	struct alx_hw *hw = &alx->hw;
1003 	int err;
1004 	u32 msi_ctrl;
1005 
1006 	msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
1007 
1008 	if (alx->flags & ALX_FLAG_USING_MSIX) {
1009 		alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, msi_ctrl);
1010 		err = alx_request_msix(alx);
1011 		if (!err)
1012 			goto out;
1013 
1014 		/* msix request failed, realloc resources */
1015 		err = alx_realloc_resources(alx);
1016 		if (err)
1017 			goto out;
1018 	}
1019 
1020 	if (alx->flags & ALX_FLAG_USING_MSI) {
1021 		alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
1022 				msi_ctrl | ALX_MSI_MASK_SEL_LINE);
1023 		err = request_irq(pdev->irq, alx_intr_msi, 0,
1024 				  alx->dev->name, alx);
1025 		if (!err)
1026 			goto out;
1027 		/* fall back to legacy interrupt */
1028 		alx->flags &= ~ALX_FLAG_USING_MSI;
1029 		pci_disable_msi(alx->hw.pdev);
1030 	}
1031 
1032 	alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0);
1033 	err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED,
1034 			  alx->dev->name, alx);
1035 out:
1036 	if (!err)
1037 		alx_config_vector_mapping(alx);
1038 	else
1039 		netdev_err(alx->dev, "IRQ registration failed!\n");
1040 	return err;
1041 }
1042 
1043 static void alx_free_irq(struct alx_priv *alx)
1044 {
1045 	struct pci_dev *pdev = alx->hw.pdev;
1046 	int i, vector = 0;
1047 
1048 	if (alx->flags & ALX_FLAG_USING_MSIX) {
1049 		free_irq(alx->msix_entries[vector++].vector, alx);
1050 		for (i = 0; i < alx->num_napi; i++)
1051 			free_irq(alx->msix_entries[vector++].vector,
1052 				 alx->qnapi[i]);
1053 	} else {
1054 		free_irq(pdev->irq, alx);
1055 	}
1056 
1057 	alx_disable_advanced_intr(alx);
1058 }
1059 
1060 static int alx_identify_hw(struct alx_priv *alx)
1061 {
1062 	struct alx_hw *hw = &alx->hw;
1063 	int rev = alx_hw_revision(hw);
1064 
1065 	if (rev > ALX_REV_C0)
1066 		return -EINVAL;
1067 
1068 	hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2;
1069 
1070 	return 0;
1071 }
1072 
1073 static int alx_init_sw(struct alx_priv *alx)
1074 {
1075 	struct pci_dev *pdev = alx->hw.pdev;
1076 	struct alx_hw *hw = &alx->hw;
1077 	int err;
1078 
1079 	err = alx_identify_hw(alx);
1080 	if (err) {
1081 		dev_err(&pdev->dev, "unrecognized chip, aborting\n");
1082 		return err;
1083 	}
1084 
1085 	alx->hw.lnk_patch =
1086 		pdev->device == ALX_DEV_ID_AR8161 &&
1087 		pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC &&
1088 		pdev->subsystem_device == 0x0091 &&
1089 		pdev->revision == 0;
1090 
1091 	hw->smb_timer = 400;
1092 	hw->mtu = alx->dev->mtu;
1093 	alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
1094 	/* MTU range: 34 - 9256 */
1095 	alx->dev->min_mtu = 34;
1096 	alx->dev->max_mtu = ALX_MAX_FRAME_LEN(ALX_MAX_FRAME_SIZE);
1097 	alx->tx_ringsz = 256;
1098 	alx->rx_ringsz = 512;
1099 	hw->imt = 200;
1100 	alx->int_mask = ALX_ISR_MISC;
1101 	hw->dma_chnl = hw->max_dma_chnl;
1102 	hw->ith_tpd = alx->tx_ringsz / 3;
1103 	hw->link_speed = SPEED_UNKNOWN;
1104 	hw->duplex = DUPLEX_UNKNOWN;
1105 	hw->adv_cfg = ADVERTISED_Autoneg |
1106 		      ADVERTISED_10baseT_Half |
1107 		      ADVERTISED_10baseT_Full |
1108 		      ADVERTISED_100baseT_Full |
1109 		      ADVERTISED_100baseT_Half |
1110 		      ADVERTISED_1000baseT_Full;
1111 	hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX;
1112 
1113 	hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN |
1114 		      ALX_MAC_CTRL_MHASH_ALG_HI5B |
1115 		      ALX_MAC_CTRL_BRD_EN |
1116 		      ALX_MAC_CTRL_PCRCE |
1117 		      ALX_MAC_CTRL_CRCE |
1118 		      ALX_MAC_CTRL_RXFC_EN |
1119 		      ALX_MAC_CTRL_TXFC_EN |
1120 		      7 << ALX_MAC_CTRL_PRMBLEN_SHIFT;
1121 
1122 	return err;
1123 }
1124 
1125 
1126 static netdev_features_t alx_fix_features(struct net_device *netdev,
1127 					  netdev_features_t features)
1128 {
1129 	if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE)
1130 		features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
1131 
1132 	return features;
1133 }
1134 
1135 static void alx_netif_stop(struct alx_priv *alx)
1136 {
1137 	int i;
1138 
1139 	netif_trans_update(alx->dev);
1140 	if (netif_carrier_ok(alx->dev)) {
1141 		netif_carrier_off(alx->dev);
1142 		netif_tx_disable(alx->dev);
1143 		for (i = 0; i < alx->num_napi; i++)
1144 			napi_disable(&alx->qnapi[i]->napi);
1145 	}
1146 }
1147 
1148 static void alx_halt(struct alx_priv *alx)
1149 {
1150 	struct alx_hw *hw = &alx->hw;
1151 
1152 	alx_netif_stop(alx);
1153 	hw->link_speed = SPEED_UNKNOWN;
1154 	hw->duplex = DUPLEX_UNKNOWN;
1155 
1156 	alx_reset_mac(hw);
1157 
1158 	/* disable l0s/l1 */
1159 	alx_enable_aspm(hw, false, false);
1160 	alx_irq_disable(alx);
1161 	alx_free_buffers(alx);
1162 }
1163 
1164 static void alx_configure(struct alx_priv *alx)
1165 {
1166 	struct alx_hw *hw = &alx->hw;
1167 
1168 	alx_configure_basic(hw);
1169 	alx_disable_rss(hw);
1170 	__alx_set_rx_mode(alx->dev);
1171 
1172 	alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
1173 }
1174 
1175 static void alx_activate(struct alx_priv *alx)
1176 {
1177 	/* hardware setting lost, restore it */
1178 	alx_reinit_rings(alx);
1179 	alx_configure(alx);
1180 
1181 	/* clear old interrupts */
1182 	alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
1183 
1184 	alx_irq_enable(alx);
1185 
1186 	alx_schedule_link_check(alx);
1187 }
1188 
1189 static void alx_reinit(struct alx_priv *alx)
1190 {
1191 	ASSERT_RTNL();
1192 
1193 	alx_halt(alx);
1194 	alx_activate(alx);
1195 }
1196 
1197 static int alx_change_mtu(struct net_device *netdev, int mtu)
1198 {
1199 	struct alx_priv *alx = netdev_priv(netdev);
1200 	int max_frame = ALX_MAX_FRAME_LEN(mtu);
1201 
1202 	netdev->mtu = mtu;
1203 	alx->hw.mtu = mtu;
1204 	alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
1205 	netdev_update_features(netdev);
1206 	if (netif_running(netdev))
1207 		alx_reinit(alx);
1208 	return 0;
1209 }
1210 
1211 static void alx_netif_start(struct alx_priv *alx)
1212 {
1213 	int i;
1214 
1215 	netif_tx_wake_all_queues(alx->dev);
1216 	for (i = 0; i < alx->num_napi; i++)
1217 		napi_enable(&alx->qnapi[i]->napi);
1218 	netif_carrier_on(alx->dev);
1219 }
1220 
1221 static int __alx_open(struct alx_priv *alx, bool resume)
1222 {
1223 	int err;
1224 
1225 	alx_init_intr(alx, true);
1226 
1227 	if (!resume)
1228 		netif_carrier_off(alx->dev);
1229 
1230 	err = alx_alloc_napis(alx);
1231 	if (err)
1232 		goto out_disable_adv_intr;
1233 
1234 	err = alx_alloc_rings(alx);
1235 	if (err)
1236 		goto out_free_rings;
1237 
1238 	alx_configure(alx);
1239 
1240 	err = alx_request_irq(alx);
1241 	if (err)
1242 		goto out_free_rings;
1243 
1244 	netif_set_real_num_tx_queues(alx->dev, alx->num_txq);
1245 	netif_set_real_num_rx_queues(alx->dev, alx->num_rxq);
1246 
1247 	/* clear old interrupts */
1248 	alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
1249 
1250 	alx_irq_enable(alx);
1251 
1252 	if (!resume)
1253 		netif_tx_start_all_queues(alx->dev);
1254 
1255 	alx_schedule_link_check(alx);
1256 	return 0;
1257 
1258 out_free_rings:
1259 	alx_free_rings(alx);
1260 	alx_free_napis(alx);
1261 out_disable_adv_intr:
1262 	alx_disable_advanced_intr(alx);
1263 	return err;
1264 }
1265 
1266 static void __alx_stop(struct alx_priv *alx)
1267 {
1268 	alx_halt(alx);
1269 	alx_free_irq(alx);
1270 	alx_free_rings(alx);
1271 	alx_free_napis(alx);
1272 }
1273 
1274 static const char *alx_speed_desc(struct alx_hw *hw)
1275 {
1276 	switch (alx_speed_to_ethadv(hw->link_speed, hw->duplex)) {
1277 	case ADVERTISED_1000baseT_Full:
1278 		return "1 Gbps Full";
1279 	case ADVERTISED_100baseT_Full:
1280 		return "100 Mbps Full";
1281 	case ADVERTISED_100baseT_Half:
1282 		return "100 Mbps Half";
1283 	case ADVERTISED_10baseT_Full:
1284 		return "10 Mbps Full";
1285 	case ADVERTISED_10baseT_Half:
1286 		return "10 Mbps Half";
1287 	default:
1288 		return "Unknown speed";
1289 	}
1290 }
1291 
1292 static void alx_check_link(struct alx_priv *alx)
1293 {
1294 	struct alx_hw *hw = &alx->hw;
1295 	unsigned long flags;
1296 	int old_speed;
1297 	u8 old_duplex;
1298 	int err;
1299 
1300 	/* clear PHY internal interrupt status, otherwise the main
1301 	 * interrupt status will be asserted forever
1302 	 */
1303 	alx_clear_phy_intr(hw);
1304 
1305 	old_speed = hw->link_speed;
1306 	old_duplex = hw->duplex;
1307 	err = alx_read_phy_link(hw);
1308 	if (err < 0)
1309 		goto reset;
1310 
1311 	spin_lock_irqsave(&alx->irq_lock, flags);
1312 	alx->int_mask |= ALX_ISR_PHY;
1313 	alx_write_mem32(hw, ALX_IMR, alx->int_mask);
1314 	spin_unlock_irqrestore(&alx->irq_lock, flags);
1315 
1316 	if (old_speed == hw->link_speed)
1317 		return;
1318 
1319 	if (hw->link_speed != SPEED_UNKNOWN) {
1320 		netif_info(alx, link, alx->dev,
1321 			   "NIC Up: %s\n", alx_speed_desc(hw));
1322 		alx_post_phy_link(hw);
1323 		alx_enable_aspm(hw, true, true);
1324 		alx_start_mac(hw);
1325 
1326 		if (old_speed == SPEED_UNKNOWN)
1327 			alx_netif_start(alx);
1328 	} else {
1329 		/* link is now down */
1330 		alx_netif_stop(alx);
1331 		netif_info(alx, link, alx->dev, "Link Down\n");
1332 		err = alx_reset_mac(hw);
1333 		if (err)
1334 			goto reset;
1335 		alx_irq_disable(alx);
1336 
1337 		/* MAC reset causes all HW settings to be lost, restore all */
1338 		err = alx_reinit_rings(alx);
1339 		if (err)
1340 			goto reset;
1341 		alx_configure(alx);
1342 		alx_enable_aspm(hw, false, true);
1343 		alx_post_phy_link(hw);
1344 		alx_irq_enable(alx);
1345 	}
1346 
1347 	return;
1348 
1349 reset:
1350 	alx_schedule_reset(alx);
1351 }
1352 
1353 static int alx_open(struct net_device *netdev)
1354 {
1355 	return __alx_open(netdev_priv(netdev), false);
1356 }
1357 
1358 static int alx_stop(struct net_device *netdev)
1359 {
1360 	__alx_stop(netdev_priv(netdev));
1361 	return 0;
1362 }
1363 
1364 static void alx_link_check(struct work_struct *work)
1365 {
1366 	struct alx_priv *alx;
1367 
1368 	alx = container_of(work, struct alx_priv, link_check_wk);
1369 
1370 	rtnl_lock();
1371 	alx_check_link(alx);
1372 	rtnl_unlock();
1373 }
1374 
1375 static void alx_reset(struct work_struct *work)
1376 {
1377 	struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
1378 
1379 	rtnl_lock();
1380 	alx_reinit(alx);
1381 	rtnl_unlock();
1382 }
1383 
1384 static int alx_tpd_req(struct sk_buff *skb)
1385 {
1386 	int num;
1387 
1388 	num = skb_shinfo(skb)->nr_frags + 1;
1389 	/* we need one extra descriptor for LSOv2 */
1390 	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1391 		num++;
1392 
1393 	return num;
1394 }
1395 
1396 static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
1397 {
1398 	u8 cso, css;
1399 
1400 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1401 		return 0;
1402 
1403 	cso = skb_checksum_start_offset(skb);
1404 	if (cso & 1)
1405 		return -EINVAL;
1406 
1407 	css = cso + skb->csum_offset;
1408 	first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT);
1409 	first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT);
1410 	first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT);
1411 
1412 	return 0;
1413 }
1414 
1415 static int alx_tso(struct sk_buff *skb, struct alx_txd *first)
1416 {
1417 	int err;
1418 
1419 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1420 		return 0;
1421 
1422 	if (!skb_is_gso(skb))
1423 		return 0;
1424 
1425 	err = skb_cow_head(skb, 0);
1426 	if (err < 0)
1427 		return err;
1428 
1429 	if (skb->protocol == htons(ETH_P_IP)) {
1430 		struct iphdr *iph = ip_hdr(skb);
1431 
1432 		iph->check = 0;
1433 		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1434 							 0, IPPROTO_TCP, 0);
1435 		first->word1 |= 1 << TPD_IPV4_SHIFT;
1436 	} else if (skb_is_gso_v6(skb)) {
1437 		ipv6_hdr(skb)->payload_len = 0;
1438 		tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1439 						       &ipv6_hdr(skb)->daddr,
1440 						       0, IPPROTO_TCP, 0);
1441 		/* LSOv2: the first TPD only provides the packet length */
1442 		first->adrl.l.pkt_len = skb->len;
1443 		first->word1 |= 1 << TPD_LSO_V2_SHIFT;
1444 	}
1445 
1446 	first->word1 |= 1 << TPD_LSO_EN_SHIFT;
1447 	first->word1 |= (skb_transport_offset(skb) &
1448 			 TPD_L4HDROFFSET_MASK) << TPD_L4HDROFFSET_SHIFT;
1449 	first->word1 |= (skb_shinfo(skb)->gso_size &
1450 			 TPD_MSS_MASK) << TPD_MSS_SHIFT;
1451 	return 1;
1452 }
1453 
1454 static int alx_map_tx_skb(struct alx_tx_queue *txq, struct sk_buff *skb)
1455 {
1456 	struct alx_txd *tpd, *first_tpd;
1457 	dma_addr_t dma;
1458 	int maplen, f, first_idx = txq->write_idx;
1459 
1460 	first_tpd = &txq->tpd[txq->write_idx];
1461 	tpd = first_tpd;
1462 
1463 	if (tpd->word1 & (1 << TPD_LSO_V2_SHIFT)) {
1464 		if (++txq->write_idx == txq->count)
1465 			txq->write_idx = 0;
1466 
1467 		tpd = &txq->tpd[txq->write_idx];
1468 		tpd->len = first_tpd->len;
1469 		tpd->vlan_tag = first_tpd->vlan_tag;
1470 		tpd->word1 = first_tpd->word1;
1471 	}
1472 
1473 	maplen = skb_headlen(skb);
1474 	dma = dma_map_single(txq->dev, skb->data, maplen,
1475 			     DMA_TO_DEVICE);
1476 	if (dma_mapping_error(txq->dev, dma))
1477 		goto err_dma;
1478 
1479 	dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1480 	dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1481 
1482 	tpd->adrl.addr = cpu_to_le64(dma);
1483 	tpd->len = cpu_to_le16(maplen);
1484 
1485 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
1486 		struct skb_frag_struct *frag;
1487 
1488 		frag = &skb_shinfo(skb)->frags[f];
1489 
1490 		if (++txq->write_idx == txq->count)
1491 			txq->write_idx = 0;
1492 		tpd = &txq->tpd[txq->write_idx];
1493 
1494 		tpd->word1 = first_tpd->word1;
1495 
1496 		maplen = skb_frag_size(frag);
1497 		dma = skb_frag_dma_map(txq->dev, frag, 0,
1498 				       maplen, DMA_TO_DEVICE);
1499 		if (dma_mapping_error(txq->dev, dma))
1500 			goto err_dma;
1501 		dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
1502 		dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
1503 
1504 		tpd->adrl.addr = cpu_to_le64(dma);
1505 		tpd->len = cpu_to_le16(maplen);
1506 	}
1507 
1508 	/* last TPD, set EOP flag and store skb */
1509 	tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT);
1510 	txq->bufs[txq->write_idx].skb = skb;
1511 
1512 	if (++txq->write_idx == txq->count)
1513 		txq->write_idx = 0;
1514 
1515 	return 0;
1516 
1517 err_dma:
1518 	f = first_idx;
1519 	while (f != txq->write_idx) {
1520 		alx_free_txbuf(txq, f);
1521 		if (++f == txq->count)
1522 			f = 0;
1523 	}
1524 	return -ENOMEM;
1525 }
1526 
1527 static netdev_tx_t alx_start_xmit_ring(struct sk_buff *skb,
1528 				       struct alx_tx_queue *txq)
1529 {
1530 	struct alx_priv *alx;
1531 	struct alx_txd *first;
1532 	int tso;
1533 
1534 	alx = netdev_priv(txq->netdev);
1535 
1536 	if (alx_tpd_avail(txq) < alx_tpd_req(skb)) {
1537 		netif_tx_stop_queue(alx_get_tx_queue(txq));
1538 		goto drop;
1539 	}
1540 
1541 	first = &txq->tpd[txq->write_idx];
1542 	memset(first, 0, sizeof(*first));
1543 
1544 	tso = alx_tso(skb, first);
1545 	if (tso < 0)
1546 		goto drop;
1547 	else if (!tso && alx_tx_csum(skb, first))
1548 		goto drop;
1549 
1550 	if (alx_map_tx_skb(txq, skb) < 0)
1551 		goto drop;
1552 
1553 	netdev_tx_sent_queue(alx_get_tx_queue(txq), skb->len);
1554 
1555 	/* flush updates before updating hardware */
1556 	wmb();
1557 	alx_write_mem16(&alx->hw, txq->p_reg, txq->write_idx);
1558 
1559 	if (alx_tpd_avail(txq) < txq->count / 8)
1560 		netif_tx_stop_queue(alx_get_tx_queue(txq));
1561 
1562 	return NETDEV_TX_OK;
1563 
1564 drop:
1565 	dev_kfree_skb_any(skb);
1566 	return NETDEV_TX_OK;
1567 }
1568 
1569 static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
1570 				  struct net_device *netdev)
1571 {
1572 	struct alx_priv *alx = netdev_priv(netdev);
1573 	return alx_start_xmit_ring(skb, alx_tx_queue_mapping(alx, skb));
1574 }
1575 
1576 static void alx_tx_timeout(struct net_device *dev)
1577 {
1578 	struct alx_priv *alx = netdev_priv(dev);
1579 
1580 	alx_schedule_reset(alx);
1581 }
1582 
1583 static int alx_mdio_read(struct net_device *netdev,
1584 			 int prtad, int devad, u16 addr)
1585 {
1586 	struct alx_priv *alx = netdev_priv(netdev);
1587 	struct alx_hw *hw = &alx->hw;
1588 	u16 val;
1589 	int err;
1590 
1591 	if (prtad != hw->mdio.prtad)
1592 		return -EINVAL;
1593 
1594 	if (devad == MDIO_DEVAD_NONE)
1595 		err = alx_read_phy_reg(hw, addr, &val);
1596 	else
1597 		err = alx_read_phy_ext(hw, devad, addr, &val);
1598 
1599 	if (err)
1600 		return err;
1601 	return val;
1602 }
1603 
1604 static int alx_mdio_write(struct net_device *netdev,
1605 			  int prtad, int devad, u16 addr, u16 val)
1606 {
1607 	struct alx_priv *alx = netdev_priv(netdev);
1608 	struct alx_hw *hw = &alx->hw;
1609 
1610 	if (prtad != hw->mdio.prtad)
1611 		return -EINVAL;
1612 
1613 	if (devad == MDIO_DEVAD_NONE)
1614 		return alx_write_phy_reg(hw, addr, val);
1615 
1616 	return alx_write_phy_ext(hw, devad, addr, val);
1617 }
1618 
1619 static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1620 {
1621 	struct alx_priv *alx = netdev_priv(netdev);
1622 
1623 	if (!netif_running(netdev))
1624 		return -EAGAIN;
1625 
1626 	return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd);
1627 }
1628 
1629 #ifdef CONFIG_NET_POLL_CONTROLLER
1630 static void alx_poll_controller(struct net_device *netdev)
1631 {
1632 	struct alx_priv *alx = netdev_priv(netdev);
1633 	int i;
1634 
1635 	if (alx->flags & ALX_FLAG_USING_MSIX) {
1636 		alx_intr_msix_misc(0, alx);
1637 		for (i = 0; i < alx->num_txq; i++)
1638 			alx_intr_msix_ring(0, alx->qnapi[i]);
1639 	} else if (alx->flags & ALX_FLAG_USING_MSI)
1640 		alx_intr_msi(0, alx);
1641 	else
1642 		alx_intr_legacy(0, alx);
1643 }
1644 #endif
1645 
1646 static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev,
1647 					struct rtnl_link_stats64 *net_stats)
1648 {
1649 	struct alx_priv *alx = netdev_priv(dev);
1650 	struct alx_hw_stats *hw_stats = &alx->hw.stats;
1651 
1652 	spin_lock(&alx->stats_lock);
1653 
1654 	alx_update_hw_stats(&alx->hw);
1655 
1656 	net_stats->tx_bytes   = hw_stats->tx_byte_cnt;
1657 	net_stats->rx_bytes   = hw_stats->rx_byte_cnt;
1658 	net_stats->multicast  = hw_stats->rx_mcast;
1659 	net_stats->collisions = hw_stats->tx_single_col +
1660 				hw_stats->tx_multi_col +
1661 				hw_stats->tx_late_col +
1662 				hw_stats->tx_abort_col;
1663 
1664 	net_stats->rx_errors  = hw_stats->rx_frag +
1665 				hw_stats->rx_fcs_err +
1666 				hw_stats->rx_len_err +
1667 				hw_stats->rx_ov_sz +
1668 				hw_stats->rx_ov_rrd +
1669 				hw_stats->rx_align_err +
1670 				hw_stats->rx_ov_rxf;
1671 
1672 	net_stats->rx_fifo_errors   = hw_stats->rx_ov_rxf;
1673 	net_stats->rx_length_errors = hw_stats->rx_len_err;
1674 	net_stats->rx_crc_errors    = hw_stats->rx_fcs_err;
1675 	net_stats->rx_frame_errors  = hw_stats->rx_align_err;
1676 	net_stats->rx_dropped       = hw_stats->rx_ov_rrd;
1677 
1678 	net_stats->tx_errors = hw_stats->tx_late_col +
1679 			       hw_stats->tx_abort_col +
1680 			       hw_stats->tx_underrun +
1681 			       hw_stats->tx_trunc;
1682 
1683 	net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
1684 	net_stats->tx_fifo_errors    = hw_stats->tx_underrun;
1685 	net_stats->tx_window_errors  = hw_stats->tx_late_col;
1686 
1687 	net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
1688 	net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
1689 
1690 	spin_unlock(&alx->stats_lock);
1691 
1692 	return net_stats;
1693 }
1694 
1695 static const struct net_device_ops alx_netdev_ops = {
1696 	.ndo_open               = alx_open,
1697 	.ndo_stop               = alx_stop,
1698 	.ndo_start_xmit         = alx_start_xmit,
1699 	.ndo_get_stats64        = alx_get_stats64,
1700 	.ndo_set_rx_mode        = alx_set_rx_mode,
1701 	.ndo_validate_addr      = eth_validate_addr,
1702 	.ndo_set_mac_address    = alx_set_mac_address,
1703 	.ndo_change_mtu         = alx_change_mtu,
1704 	.ndo_do_ioctl           = alx_ioctl,
1705 	.ndo_tx_timeout         = alx_tx_timeout,
1706 	.ndo_fix_features	= alx_fix_features,
1707 #ifdef CONFIG_NET_POLL_CONTROLLER
1708 	.ndo_poll_controller    = alx_poll_controller,
1709 #endif
1710 };
1711 
1712 static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1713 {
1714 	struct net_device *netdev;
1715 	struct alx_priv *alx;
1716 	struct alx_hw *hw;
1717 	bool phy_configured;
1718 	int err;
1719 
1720 	err = pci_enable_device_mem(pdev);
1721 	if (err)
1722 		return err;
1723 
1724 	/* The alx chip can DMA to 64-bit addresses, but it uses a single
1725 	 * shared register for the high 32 bits, so only a single, aligned,
1726 	 * 4 GB physical address range can be used for descriptors.
1727 	 */
1728 	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1729 		dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
1730 	} else {
1731 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1732 		if (err) {
1733 			dev_err(&pdev->dev, "No usable DMA config, aborting\n");
1734 			goto out_pci_disable;
1735 		}
1736 	}
1737 
1738 	err = pci_request_mem_regions(pdev, alx_drv_name);
1739 	if (err) {
1740 		dev_err(&pdev->dev,
1741 			"pci_request_mem_regions failed\n");
1742 		goto out_pci_disable;
1743 	}
1744 
1745 	pci_enable_pcie_error_reporting(pdev);
1746 	pci_set_master(pdev);
1747 
1748 	if (!pdev->pm_cap) {
1749 		dev_err(&pdev->dev,
1750 			"Can't find power management capability, aborting\n");
1751 		err = -EIO;
1752 		goto out_pci_release;
1753 	}
1754 
1755 	netdev = alloc_etherdev_mqs(sizeof(*alx),
1756 				    ALX_MAX_TX_QUEUES, 1);
1757 	if (!netdev) {
1758 		err = -ENOMEM;
1759 		goto out_pci_release;
1760 	}
1761 
1762 	SET_NETDEV_DEV(netdev, &pdev->dev);
1763 	alx = netdev_priv(netdev);
1764 	spin_lock_init(&alx->hw.mdio_lock);
1765 	spin_lock_init(&alx->irq_lock);
1766 	spin_lock_init(&alx->stats_lock);
1767 	alx->dev = netdev;
1768 	alx->hw.pdev = pdev;
1769 	alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
1770 			  NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL;
1771 	hw = &alx->hw;
1772 	pci_set_drvdata(pdev, alx);
1773 
1774 	hw->hw_addr = pci_ioremap_bar(pdev, 0);
1775 	if (!hw->hw_addr) {
1776 		dev_err(&pdev->dev, "cannot map device registers\n");
1777 		err = -EIO;
1778 		goto out_free_netdev;
1779 	}
1780 
1781 	netdev->netdev_ops = &alx_netdev_ops;
1782 	netdev->ethtool_ops = &alx_ethtool_ops;
1783 	netdev->irq = pdev->irq;
1784 	netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
1785 
1786 	if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG)
1787 		pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
1788 
1789 	err = alx_init_sw(alx);
1790 	if (err) {
1791 		dev_err(&pdev->dev, "net device private data init failed\n");
1792 		goto out_unmap;
1793 	}
1794 
1795 	alx_reset_pcie(hw);
1796 
1797 	phy_configured = alx_phy_configured(hw);
1798 
1799 	if (!phy_configured)
1800 		alx_reset_phy(hw);
1801 
1802 	err = alx_reset_mac(hw);
1803 	if (err) {
1804 		dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err);
1805 		goto out_unmap;
1806 	}
1807 
1808 	/* setup link to put it in a known good starting state */
1809 	if (!phy_configured) {
1810 		err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
1811 		if (err) {
1812 			dev_err(&pdev->dev,
1813 				"failed to configure PHY speed/duplex (err=%d)\n",
1814 				err);
1815 			goto out_unmap;
1816 		}
1817 	}
1818 
1819 	netdev->hw_features = NETIF_F_SG |
1820 			      NETIF_F_HW_CSUM |
1821 			      NETIF_F_TSO |
1822 			      NETIF_F_TSO6;
1823 
1824 	if (alx_get_perm_macaddr(hw, hw->perm_addr)) {
1825 		dev_warn(&pdev->dev,
1826 			 "Invalid permanent address programmed, using random one\n");
1827 		eth_hw_addr_random(netdev);
1828 		memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len);
1829 	}
1830 
1831 	memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
1832 	memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN);
1833 	memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
1834 
1835 	hw->mdio.prtad = 0;
1836 	hw->mdio.mmds = 0;
1837 	hw->mdio.dev = netdev;
1838 	hw->mdio.mode_support = MDIO_SUPPORTS_C45 |
1839 				MDIO_SUPPORTS_C22 |
1840 				MDIO_EMULATE_C22;
1841 	hw->mdio.mdio_read = alx_mdio_read;
1842 	hw->mdio.mdio_write = alx_mdio_write;
1843 
1844 	if (!alx_get_phy_info(hw)) {
1845 		dev_err(&pdev->dev, "failed to identify PHY\n");
1846 		err = -EIO;
1847 		goto out_unmap;
1848 	}
1849 
1850 	INIT_WORK(&alx->link_check_wk, alx_link_check);
1851 	INIT_WORK(&alx->reset_wk, alx_reset);
1852 	netif_carrier_off(netdev);
1853 
1854 	err = register_netdev(netdev);
1855 	if (err) {
1856 		dev_err(&pdev->dev, "register netdevice failed\n");
1857 		goto out_unmap;
1858 	}
1859 
1860 	netdev_info(netdev,
1861 		    "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
1862 		    netdev->dev_addr);
1863 
1864 	return 0;
1865 
1866 out_unmap:
1867 	iounmap(hw->hw_addr);
1868 out_free_netdev:
1869 	free_netdev(netdev);
1870 out_pci_release:
1871 	pci_release_mem_regions(pdev);
1872 out_pci_disable:
1873 	pci_disable_device(pdev);
1874 	return err;
1875 }
1876 
1877 static void alx_remove(struct pci_dev *pdev)
1878 {
1879 	struct alx_priv *alx = pci_get_drvdata(pdev);
1880 	struct alx_hw *hw = &alx->hw;
1881 
1882 	cancel_work_sync(&alx->link_check_wk);
1883 	cancel_work_sync(&alx->reset_wk);
1884 
1885 	/* restore permanent mac address */
1886 	alx_set_macaddr(hw, hw->perm_addr);
1887 
1888 	unregister_netdev(alx->dev);
1889 	iounmap(hw->hw_addr);
1890 	pci_release_mem_regions(pdev);
1891 
1892 	pci_disable_pcie_error_reporting(pdev);
1893 	pci_disable_device(pdev);
1894 
1895 	free_netdev(alx->dev);
1896 }
1897 
1898 #ifdef CONFIG_PM_SLEEP
1899 static int alx_suspend(struct device *dev)
1900 {
1901 	struct pci_dev *pdev = to_pci_dev(dev);
1902 	struct alx_priv *alx = pci_get_drvdata(pdev);
1903 
1904 	if (!netif_running(alx->dev))
1905 		return 0;
1906 	netif_device_detach(alx->dev);
1907 	__alx_stop(alx);
1908 	return 0;
1909 }
1910 
1911 static int alx_resume(struct device *dev)
1912 {
1913 	struct pci_dev *pdev = to_pci_dev(dev);
1914 	struct alx_priv *alx = pci_get_drvdata(pdev);
1915 	struct alx_hw *hw = &alx->hw;
1916 
1917 	alx_reset_phy(hw);
1918 
1919 	if (!netif_running(alx->dev))
1920 		return 0;
1921 	netif_device_attach(alx->dev);
1922 	return __alx_open(alx, true);
1923 }
1924 
1925 static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
1926 #define ALX_PM_OPS      (&alx_pm_ops)
1927 #else
1928 #define ALX_PM_OPS      NULL
1929 #endif
1930 
1931 
1932 static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
1933 					       pci_channel_state_t state)
1934 {
1935 	struct alx_priv *alx = pci_get_drvdata(pdev);
1936 	struct net_device *netdev = alx->dev;
1937 	pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET;
1938 
1939 	dev_info(&pdev->dev, "pci error detected\n");
1940 
1941 	rtnl_lock();
1942 
1943 	if (netif_running(netdev)) {
1944 		netif_device_detach(netdev);
1945 		alx_halt(alx);
1946 	}
1947 
1948 	if (state == pci_channel_io_perm_failure)
1949 		rc = PCI_ERS_RESULT_DISCONNECT;
1950 	else
1951 		pci_disable_device(pdev);
1952 
1953 	rtnl_unlock();
1954 
1955 	return rc;
1956 }
1957 
1958 static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
1959 {
1960 	struct alx_priv *alx = pci_get_drvdata(pdev);
1961 	struct alx_hw *hw = &alx->hw;
1962 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
1963 
1964 	dev_info(&pdev->dev, "pci error slot reset\n");
1965 
1966 	rtnl_lock();
1967 
1968 	if (pci_enable_device(pdev)) {
1969 		dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n");
1970 		goto out;
1971 	}
1972 
1973 	pci_set_master(pdev);
1974 
1975 	alx_reset_pcie(hw);
1976 	if (!alx_reset_mac(hw))
1977 		rc = PCI_ERS_RESULT_RECOVERED;
1978 out:
1979 	pci_cleanup_aer_uncorrect_error_status(pdev);
1980 
1981 	rtnl_unlock();
1982 
1983 	return rc;
1984 }
1985 
1986 static void alx_pci_error_resume(struct pci_dev *pdev)
1987 {
1988 	struct alx_priv *alx = pci_get_drvdata(pdev);
1989 	struct net_device *netdev = alx->dev;
1990 
1991 	dev_info(&pdev->dev, "pci error resume\n");
1992 
1993 	rtnl_lock();
1994 
1995 	if (netif_running(netdev)) {
1996 		alx_activate(alx);
1997 		netif_device_attach(netdev);
1998 	}
1999 
2000 	rtnl_unlock();
2001 }
2002 
2003 static const struct pci_error_handlers alx_err_handlers = {
2004 	.error_detected = alx_pci_error_detected,
2005 	.slot_reset     = alx_pci_error_slot_reset,
2006 	.resume         = alx_pci_error_resume,
2007 };
2008 
2009 static const struct pci_device_id alx_pci_tbl[] = {
2010 	{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161),
2011 	  .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
2012 	{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
2013 	  .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
2014 	{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400),
2015 	  .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
2016 	{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2500),
2017 	  .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
2018 	{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
2019 	  .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
2020 	{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
2021 	{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) },
2022 	{}
2023 };
2024 
2025 static struct pci_driver alx_driver = {
2026 	.name        = alx_drv_name,
2027 	.id_table    = alx_pci_tbl,
2028 	.probe       = alx_probe,
2029 	.remove      = alx_remove,
2030 	.err_handler = &alx_err_handlers,
2031 	.driver.pm   = ALX_PM_OPS,
2032 };
2033 
2034 module_pci_driver(alx_driver);
2035 MODULE_DEVICE_TABLE(pci, alx_pci_tbl);
2036 MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
2037 MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>");
2038 MODULE_DESCRIPTION(
2039 	"Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");
2040 MODULE_LICENSE("GPL");
2041