1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019  Realtek Corporation
3  */
4 
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include "main.h"
8 #include "pci.h"
9 #include "reg.h"
10 #include "tx.h"
11 #include "rx.h"
12 #include "fw.h"
13 #include "ps.h"
14 #include "debug.h"
15 
16 static bool rtw_disable_msi;
17 static bool rtw_pci_disable_aspm;
18 module_param_named(disable_msi, rtw_disable_msi, bool, 0644);
19 module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644);
20 MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
21 MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support");
22 
23 static u32 rtw_pci_tx_queue_idx_addr[] = {
24 	[RTW_TX_QUEUE_BK]	= RTK_PCI_TXBD_IDX_BKQ,
25 	[RTW_TX_QUEUE_BE]	= RTK_PCI_TXBD_IDX_BEQ,
26 	[RTW_TX_QUEUE_VI]	= RTK_PCI_TXBD_IDX_VIQ,
27 	[RTW_TX_QUEUE_VO]	= RTK_PCI_TXBD_IDX_VOQ,
28 	[RTW_TX_QUEUE_MGMT]	= RTK_PCI_TXBD_IDX_MGMTQ,
29 	[RTW_TX_QUEUE_HI0]	= RTK_PCI_TXBD_IDX_HI0Q,
30 	[RTW_TX_QUEUE_H2C]	= RTK_PCI_TXBD_IDX_H2CQ,
31 };
32 
33 static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue)
34 {
35 	switch (queue) {
36 	case RTW_TX_QUEUE_BCN:
37 		return TX_DESC_QSEL_BEACON;
38 	case RTW_TX_QUEUE_H2C:
39 		return TX_DESC_QSEL_H2C;
40 	case RTW_TX_QUEUE_MGMT:
41 		return TX_DESC_QSEL_MGMT;
42 	case RTW_TX_QUEUE_HI0:
43 		return TX_DESC_QSEL_HIGH;
44 	default:
45 		return skb->priority;
46 	}
47 };
48 
49 static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
50 {
51 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
52 
53 	return readb(rtwpci->mmap + addr);
54 }
55 
56 static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
57 {
58 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
59 
60 	return readw(rtwpci->mmap + addr);
61 }
62 
63 static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
64 {
65 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
66 
67 	return readl(rtwpci->mmap + addr);
68 }
69 
70 static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
71 {
72 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
73 
74 	writeb(val, rtwpci->mmap + addr);
75 }
76 
77 static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
78 {
79 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
80 
81 	writew(val, rtwpci->mmap + addr);
82 }
83 
84 static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
85 {
86 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
87 
88 	writel(val, rtwpci->mmap + addr);
89 }
90 
91 static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx)
92 {
93 	int offset = tx_ring->r.desc_size * idx;
94 
95 	return tx_ring->r.head + offset;
96 }
97 
98 static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
99 				      struct rtw_pci_tx_ring *tx_ring)
100 {
101 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
102 	struct rtw_pci_tx_data *tx_data;
103 	struct sk_buff *skb, *tmp;
104 	dma_addr_t dma;
105 
106 	/* free every skb remained in tx list */
107 	skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
108 		__skb_unlink(skb, &tx_ring->queue);
109 		tx_data = rtw_pci_get_tx_data(skb);
110 		dma = tx_data->dma;
111 
112 		dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE);
113 		dev_kfree_skb_any(skb);
114 	}
115 }
116 
117 static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
118 				 struct rtw_pci_tx_ring *tx_ring)
119 {
120 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
121 	u8 *head = tx_ring->r.head;
122 	u32 len = tx_ring->r.len;
123 	int ring_sz = len * tx_ring->r.desc_size;
124 
125 	rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
126 
127 	/* free the ring itself */
128 	dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma);
129 	tx_ring->r.head = NULL;
130 }
131 
132 static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
133 				      struct rtw_pci_rx_ring *rx_ring)
134 {
135 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
136 	struct sk_buff *skb;
137 	int buf_sz = RTK_PCI_RX_BUF_SIZE;
138 	dma_addr_t dma;
139 	int i;
140 
141 	for (i = 0; i < rx_ring->r.len; i++) {
142 		skb = rx_ring->buf[i];
143 		if (!skb)
144 			continue;
145 
146 		dma = *((dma_addr_t *)skb->cb);
147 		dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
148 		dev_kfree_skb(skb);
149 		rx_ring->buf[i] = NULL;
150 	}
151 }
152 
153 static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
154 				 struct rtw_pci_rx_ring *rx_ring)
155 {
156 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
157 	u8 *head = rx_ring->r.head;
158 	int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
159 
160 	rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
161 
162 	dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma);
163 }
164 
165 static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
166 {
167 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
168 	struct rtw_pci_tx_ring *tx_ring;
169 	struct rtw_pci_rx_ring *rx_ring;
170 	int i;
171 
172 	for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
173 		tx_ring = &rtwpci->tx_rings[i];
174 		rtw_pci_free_tx_ring(rtwdev, tx_ring);
175 	}
176 
177 	for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
178 		rx_ring = &rtwpci->rx_rings[i];
179 		rtw_pci_free_rx_ring(rtwdev, rx_ring);
180 	}
181 }
182 
183 static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
184 				struct rtw_pci_tx_ring *tx_ring,
185 				u8 desc_size, u32 len)
186 {
187 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
188 	int ring_sz = desc_size * len;
189 	dma_addr_t dma;
190 	u8 *head;
191 
192 	if (len > TRX_BD_IDX_MASK) {
193 		rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len);
194 		return -EINVAL;
195 	}
196 
197 	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
198 	if (!head) {
199 		rtw_err(rtwdev, "failed to allocate tx ring\n");
200 		return -ENOMEM;
201 	}
202 
203 	skb_queue_head_init(&tx_ring->queue);
204 	tx_ring->r.head = head;
205 	tx_ring->r.dma = dma;
206 	tx_ring->r.len = len;
207 	tx_ring->r.desc_size = desc_size;
208 	tx_ring->r.wp = 0;
209 	tx_ring->r.rp = 0;
210 
211 	return 0;
212 }
213 
214 static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
215 				 struct rtw_pci_rx_ring *rx_ring,
216 				 u32 idx, u32 desc_sz)
217 {
218 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
219 	struct rtw_pci_rx_buffer_desc *buf_desc;
220 	int buf_sz = RTK_PCI_RX_BUF_SIZE;
221 	dma_addr_t dma;
222 
223 	if (!skb)
224 		return -EINVAL;
225 
226 	dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
227 	if (dma_mapping_error(&pdev->dev, dma))
228 		return -EBUSY;
229 
230 	*((dma_addr_t *)skb->cb) = dma;
231 	buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
232 						     idx * desc_sz);
233 	memset(buf_desc, 0, sizeof(*buf_desc));
234 	buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
235 	buf_desc->dma = cpu_to_le32(dma);
236 
237 	return 0;
238 }
239 
240 static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
241 					struct rtw_pci_rx_ring *rx_ring,
242 					u32 idx, u32 desc_sz)
243 {
244 	struct device *dev = rtwdev->dev;
245 	struct rtw_pci_rx_buffer_desc *buf_desc;
246 	int buf_sz = RTK_PCI_RX_BUF_SIZE;
247 
248 	dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
249 
250 	buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
251 						     idx * desc_sz);
252 	memset(buf_desc, 0, sizeof(*buf_desc));
253 	buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
254 	buf_desc->dma = cpu_to_le32(dma);
255 }
256 
257 static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
258 				struct rtw_pci_rx_ring *rx_ring,
259 				u8 desc_size, u32 len)
260 {
261 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
262 	struct sk_buff *skb = NULL;
263 	dma_addr_t dma;
264 	u8 *head;
265 	int ring_sz = desc_size * len;
266 	int buf_sz = RTK_PCI_RX_BUF_SIZE;
267 	int i, allocated;
268 	int ret = 0;
269 
270 	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
271 	if (!head) {
272 		rtw_err(rtwdev, "failed to allocate rx ring\n");
273 		return -ENOMEM;
274 	}
275 	rx_ring->r.head = head;
276 
277 	for (i = 0; i < len; i++) {
278 		skb = dev_alloc_skb(buf_sz);
279 		if (!skb) {
280 			allocated = i;
281 			ret = -ENOMEM;
282 			goto err_out;
283 		}
284 
285 		memset(skb->data, 0, buf_sz);
286 		rx_ring->buf[i] = skb;
287 		ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
288 		if (ret) {
289 			allocated = i;
290 			dev_kfree_skb_any(skb);
291 			goto err_out;
292 		}
293 	}
294 
295 	rx_ring->r.dma = dma;
296 	rx_ring->r.len = len;
297 	rx_ring->r.desc_size = desc_size;
298 	rx_ring->r.wp = 0;
299 	rx_ring->r.rp = 0;
300 
301 	return 0;
302 
303 err_out:
304 	for (i = 0; i < allocated; i++) {
305 		skb = rx_ring->buf[i];
306 		if (!skb)
307 			continue;
308 		dma = *((dma_addr_t *)skb->cb);
309 		dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
310 		dev_kfree_skb_any(skb);
311 		rx_ring->buf[i] = NULL;
312 	}
313 	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
314 
315 	rtw_err(rtwdev, "failed to init rx buffer\n");
316 
317 	return ret;
318 }
319 
320 static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
321 {
322 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
323 	struct rtw_pci_tx_ring *tx_ring;
324 	struct rtw_pci_rx_ring *rx_ring;
325 	struct rtw_chip_info *chip = rtwdev->chip;
326 	int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
327 	int tx_desc_size, rx_desc_size;
328 	u32 len;
329 	int ret;
330 
331 	tx_desc_size = chip->tx_buf_desc_sz;
332 
333 	for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
334 		tx_ring = &rtwpci->tx_rings[i];
335 		len = max_num_of_tx_queue(i);
336 		ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
337 		if (ret)
338 			goto out;
339 	}
340 
341 	rx_desc_size = chip->rx_buf_desc_sz;
342 
343 	for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
344 		rx_ring = &rtwpci->rx_rings[j];
345 		ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
346 					   RTK_MAX_RX_DESC_NUM);
347 		if (ret)
348 			goto out;
349 	}
350 
351 	return 0;
352 
353 out:
354 	tx_alloced = i;
355 	for (i = 0; i < tx_alloced; i++) {
356 		tx_ring = &rtwpci->tx_rings[i];
357 		rtw_pci_free_tx_ring(rtwdev, tx_ring);
358 	}
359 
360 	rx_alloced = j;
361 	for (j = 0; j < rx_alloced; j++) {
362 		rx_ring = &rtwpci->rx_rings[j];
363 		rtw_pci_free_rx_ring(rtwdev, rx_ring);
364 	}
365 
366 	return ret;
367 }
368 
369 static void rtw_pci_deinit(struct rtw_dev *rtwdev)
370 {
371 	rtw_pci_free_trx_ring(rtwdev);
372 }
373 
374 static int rtw_pci_init(struct rtw_dev *rtwdev)
375 {
376 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
377 	int ret = 0;
378 
379 	rtwpci->irq_mask[0] = IMR_HIGHDOK |
380 			      IMR_MGNTDOK |
381 			      IMR_BKDOK |
382 			      IMR_BEDOK |
383 			      IMR_VIDOK |
384 			      IMR_VODOK |
385 			      IMR_ROK |
386 			      IMR_BCNDMAINT_E |
387 			      IMR_C2HCMD |
388 			      0;
389 	rtwpci->irq_mask[1] = IMR_TXFOVW |
390 			      0;
391 	rtwpci->irq_mask[3] = IMR_H2CDOK |
392 			      0;
393 	spin_lock_init(&rtwpci->irq_lock);
394 	spin_lock_init(&rtwpci->hwirq_lock);
395 	ret = rtw_pci_init_trx_ring(rtwdev);
396 
397 	return ret;
398 }
399 
400 static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
401 {
402 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
403 	u32 len;
404 	u8 tmp;
405 	dma_addr_t dma;
406 
407 	tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
408 	rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
409 
410 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
411 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
412 
413 	if (!rtw_chip_wcpu_11n(rtwdev)) {
414 		len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
415 		dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
416 		rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
417 		rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
418 		rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
419 		rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
420 	}
421 
422 	len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
423 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
424 	rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
425 	rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
426 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK);
427 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
428 
429 	len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
430 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
431 	rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
432 	rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
433 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK);
434 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
435 
436 	len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
437 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
438 	rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
439 	rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
440 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK);
441 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
442 
443 	len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
444 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
445 	rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
446 	rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
447 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK);
448 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
449 
450 	len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
451 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
452 	rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
453 	rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
454 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK);
455 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
456 
457 	len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
458 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
459 	rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
460 	rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
461 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK);
462 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
463 
464 	len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
465 	dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
466 	rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
467 	rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
468 	rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK);
469 	rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
470 
471 	/* reset read/write point */
472 	rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
473 
474 	/* reset H2C Queue index in a single write */
475 	if (rtw_chip_wcpu_11ac(rtwdev))
476 		rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
477 				BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
478 }
479 
480 static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
481 {
482 	rtw_pci_reset_buf_desc(rtwdev);
483 }
484 
485 static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
486 				     struct rtw_pci *rtwpci, bool exclude_rx)
487 {
488 	unsigned long flags;
489 	u32 imr0_unmask = exclude_rx ? IMR_ROK : 0;
490 
491 	spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
492 
493 	rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask);
494 	rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
495 	if (rtw_chip_wcpu_11ac(rtwdev))
496 		rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
497 
498 	rtwpci->irq_enabled = true;
499 
500 	spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
501 }
502 
503 static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
504 				      struct rtw_pci *rtwpci)
505 {
506 	unsigned long flags;
507 
508 	spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
509 
510 	if (!rtwpci->irq_enabled)
511 		goto out;
512 
513 	rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
514 	rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
515 	if (rtw_chip_wcpu_11ac(rtwdev))
516 		rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
517 
518 	rtwpci->irq_enabled = false;
519 
520 out:
521 	spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
522 }
523 
524 static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
525 {
526 	/* reset dma and rx tag */
527 	rtw_write32_set(rtwdev, RTK_PCI_CTRL,
528 			BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
529 	rtwpci->rx_tag = 0;
530 }
531 
532 static int rtw_pci_setup(struct rtw_dev *rtwdev)
533 {
534 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
535 
536 	rtw_pci_reset_trx_ring(rtwdev);
537 	rtw_pci_dma_reset(rtwdev, rtwpci);
538 
539 	return 0;
540 }
541 
542 static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
543 {
544 	struct rtw_pci_tx_ring *tx_ring;
545 	u8 queue;
546 
547 	rtw_pci_reset_trx_ring(rtwdev);
548 	for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
549 		tx_ring = &rtwpci->tx_rings[queue];
550 		rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
551 	}
552 }
553 
554 static void rtw_pci_napi_start(struct rtw_dev *rtwdev)
555 {
556 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
557 
558 	if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
559 		return;
560 
561 	napi_enable(&rtwpci->napi);
562 }
563 
564 static void rtw_pci_napi_stop(struct rtw_dev *rtwdev)
565 {
566 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
567 
568 	if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
569 		return;
570 
571 	napi_synchronize(&rtwpci->napi);
572 	napi_disable(&rtwpci->napi);
573 }
574 
575 static int rtw_pci_start(struct rtw_dev *rtwdev)
576 {
577 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
578 
579 	rtw_pci_napi_start(rtwdev);
580 
581 	spin_lock_bh(&rtwpci->irq_lock);
582 	rtwpci->running = true;
583 	rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
584 	spin_unlock_bh(&rtwpci->irq_lock);
585 
586 	return 0;
587 }
588 
589 static void rtw_pci_stop(struct rtw_dev *rtwdev)
590 {
591 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
592 	struct pci_dev *pdev = rtwpci->pdev;
593 
594 	spin_lock_bh(&rtwpci->irq_lock);
595 	rtwpci->running = false;
596 	rtw_pci_disable_interrupt(rtwdev, rtwpci);
597 	spin_unlock_bh(&rtwpci->irq_lock);
598 
599 	synchronize_irq(pdev->irq);
600 	rtw_pci_napi_stop(rtwdev);
601 
602 	spin_lock_bh(&rtwpci->irq_lock);
603 	rtw_pci_dma_release(rtwdev, rtwpci);
604 	spin_unlock_bh(&rtwpci->irq_lock);
605 }
606 
607 static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
608 {
609 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
610 	struct rtw_pci_tx_ring *tx_ring;
611 	bool tx_empty = true;
612 	u8 queue;
613 
614 	if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
615 		goto enter_deep_ps;
616 
617 	lockdep_assert_held(&rtwpci->irq_lock);
618 
619 	/* Deep PS state is not allowed to TX-DMA */
620 	for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
621 		/* BCN queue is rsvd page, does not have DMA interrupt
622 		 * H2C queue is managed by firmware
623 		 */
624 		if (queue == RTW_TX_QUEUE_BCN ||
625 		    queue == RTW_TX_QUEUE_H2C)
626 			continue;
627 
628 		tx_ring = &rtwpci->tx_rings[queue];
629 
630 		/* check if there is any skb DMAing */
631 		if (skb_queue_len(&tx_ring->queue)) {
632 			tx_empty = false;
633 			break;
634 		}
635 	}
636 
637 	if (!tx_empty) {
638 		rtw_dbg(rtwdev, RTW_DBG_PS,
639 			"TX path not empty, cannot enter deep power save state\n");
640 		return;
641 	}
642 enter_deep_ps:
643 	set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
644 	rtw_power_mode_change(rtwdev, true);
645 }
646 
647 static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
648 {
649 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
650 
651 	lockdep_assert_held(&rtwpci->irq_lock);
652 
653 	if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
654 		rtw_power_mode_change(rtwdev, false);
655 }
656 
657 static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
658 {
659 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
660 
661 	spin_lock_bh(&rtwpci->irq_lock);
662 
663 	if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
664 		rtw_pci_deep_ps_enter(rtwdev);
665 
666 	if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
667 		rtw_pci_deep_ps_leave(rtwdev);
668 
669 	spin_unlock_bh(&rtwpci->irq_lock);
670 }
671 
672 static u8 ac_to_hwq[] = {
673 	[IEEE80211_AC_VO] = RTW_TX_QUEUE_VO,
674 	[IEEE80211_AC_VI] = RTW_TX_QUEUE_VI,
675 	[IEEE80211_AC_BE] = RTW_TX_QUEUE_BE,
676 	[IEEE80211_AC_BK] = RTW_TX_QUEUE_BK,
677 };
678 
679 static_assert(ARRAY_SIZE(ac_to_hwq) == IEEE80211_NUM_ACS);
680 
681 static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
682 {
683 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
684 	__le16 fc = hdr->frame_control;
685 	u8 q_mapping = skb_get_queue_mapping(skb);
686 	u8 queue;
687 
688 	if (unlikely(ieee80211_is_beacon(fc)))
689 		queue = RTW_TX_QUEUE_BCN;
690 	else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
691 		queue = RTW_TX_QUEUE_MGMT;
692 	else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq)))
693 		queue = ac_to_hwq[IEEE80211_AC_BE];
694 	else
695 		queue = ac_to_hwq[q_mapping];
696 
697 	return queue;
698 }
699 
700 static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
701 				      struct rtw_pci_tx_ring *ring)
702 {
703 	struct sk_buff *prev = skb_dequeue(&ring->queue);
704 	struct rtw_pci_tx_data *tx_data;
705 	dma_addr_t dma;
706 
707 	if (!prev)
708 		return;
709 
710 	tx_data = rtw_pci_get_tx_data(prev);
711 	dma = tx_data->dma;
712 	dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE);
713 	dev_kfree_skb_any(prev);
714 }
715 
716 static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
717 			      struct rtw_pci_rx_ring *rx_ring,
718 			      u32 idx)
719 {
720 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
721 	struct rtw_chip_info *chip = rtwdev->chip;
722 	struct rtw_pci_rx_buffer_desc *buf_desc;
723 	u32 desc_sz = chip->rx_buf_desc_sz;
724 	u16 total_pkt_size;
725 
726 	buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
727 						     idx * desc_sz);
728 	total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
729 
730 	/* rx tag mismatch, throw a warning */
731 	if (total_pkt_size != rtwpci->rx_tag)
732 		rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
733 
734 	rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
735 }
736 
737 static u32 __pci_get_hw_tx_ring_rp(struct rtw_dev *rtwdev, u8 pci_q)
738 {
739 	u32 bd_idx_addr = rtw_pci_tx_queue_idx_addr[pci_q];
740 	u32 bd_idx = rtw_read16(rtwdev, bd_idx_addr + 2);
741 
742 	return FIELD_GET(TRX_BD_IDX_MASK, bd_idx);
743 }
744 
745 static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop)
746 {
747 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
748 	struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q];
749 	u32 cur_rp;
750 	u8 i;
751 
752 	/* Because the time taked by the I/O in __pci_get_hw_tx_ring_rp is a
753 	 * bit dynamic, it's hard to define a reasonable fixed total timeout to
754 	 * use read_poll_timeout* helper. Instead, we can ensure a reasonable
755 	 * polling times, so we just use for loop with udelay here.
756 	 */
757 	for (i = 0; i < 30; i++) {
758 		cur_rp = __pci_get_hw_tx_ring_rp(rtwdev, pci_q);
759 		if (cur_rp == ring->r.wp)
760 			return;
761 
762 		udelay(1);
763 	}
764 
765 	if (!drop)
766 		rtw_warn(rtwdev, "timed out to flush pci tx ring[%d]\n", pci_q);
767 }
768 
769 static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues,
770 				   bool drop)
771 {
772 	u8 q;
773 
774 	for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) {
775 		/* It may be not necessary to flush BCN and H2C tx queues. */
776 		if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C)
777 			continue;
778 
779 		if (pci_queues & BIT(q))
780 			__pci_flush_queue(rtwdev, q, drop);
781 	}
782 }
783 
784 static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
785 {
786 	u32 pci_queues = 0;
787 	u8 i;
788 
789 	/* If all of the hardware queues are requested to flush,
790 	 * flush all of the pci queues.
791 	 */
792 	if (queues == BIT(rtwdev->hw->queues) - 1) {
793 		pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1;
794 	} else {
795 		for (i = 0; i < rtwdev->hw->queues; i++)
796 			if (queues & BIT(i))
797 				pci_queues |= BIT(ac_to_hwq[i]);
798 	}
799 
800 	__rtw_pci_flush_queues(rtwdev, pci_queues, drop);
801 }
802 
803 static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue)
804 {
805 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
806 	struct rtw_pci_tx_ring *ring;
807 	u32 bd_idx;
808 
809 	ring = &rtwpci->tx_rings[queue];
810 	bd_idx = rtw_pci_tx_queue_idx_addr[queue];
811 
812 	spin_lock_bh(&rtwpci->irq_lock);
813 	if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
814 		rtw_pci_deep_ps_leave(rtwdev);
815 	rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK);
816 	spin_unlock_bh(&rtwpci->irq_lock);
817 }
818 
819 static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
820 {
821 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
822 	u8 queue;
823 
824 	for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++)
825 		if (test_and_clear_bit(queue, rtwpci->tx_queued))
826 			rtw_pci_tx_kick_off_queue(rtwdev, queue);
827 }
828 
829 static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
830 				 struct rtw_tx_pkt_info *pkt_info,
831 				 struct sk_buff *skb, u8 queue)
832 {
833 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
834 	struct rtw_chip_info *chip = rtwdev->chip;
835 	struct rtw_pci_tx_ring *ring;
836 	struct rtw_pci_tx_data *tx_data;
837 	dma_addr_t dma;
838 	u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
839 	u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
840 	u32 size;
841 	u32 psb_len;
842 	u8 *pkt_desc;
843 	struct rtw_pci_tx_buffer_desc *buf_desc;
844 
845 	ring = &rtwpci->tx_rings[queue];
846 
847 	size = skb->len;
848 
849 	if (queue == RTW_TX_QUEUE_BCN)
850 		rtw_pci_release_rsvd_page(rtwpci, ring);
851 	else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
852 		return -ENOSPC;
853 
854 	pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
855 	memset(pkt_desc, 0, tx_pkt_desc_sz);
856 	pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
857 	rtw_tx_fill_tx_desc(pkt_info, skb);
858 	dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len,
859 			     DMA_TO_DEVICE);
860 	if (dma_mapping_error(&rtwpci->pdev->dev, dma))
861 		return -EBUSY;
862 
863 	/* after this we got dma mapped, there is no way back */
864 	buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
865 	memset(buf_desc, 0, tx_buf_desc_sz);
866 	psb_len = (skb->len - 1) / 128 + 1;
867 	if (queue == RTW_TX_QUEUE_BCN)
868 		psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
869 
870 	buf_desc[0].psb_len = cpu_to_le16(psb_len);
871 	buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
872 	buf_desc[0].dma = cpu_to_le32(dma);
873 	buf_desc[1].buf_size = cpu_to_le16(size);
874 	buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
875 
876 	tx_data = rtw_pci_get_tx_data(skb);
877 	tx_data->dma = dma;
878 	tx_data->sn = pkt_info->sn;
879 
880 	spin_lock_bh(&rtwpci->irq_lock);
881 
882 	skb_queue_tail(&ring->queue, skb);
883 
884 	if (queue == RTW_TX_QUEUE_BCN)
885 		goto out_unlock;
886 
887 	/* update write-index, and kick it off later */
888 	set_bit(queue, rtwpci->tx_queued);
889 	if (++ring->r.wp >= ring->r.len)
890 		ring->r.wp = 0;
891 
892 out_unlock:
893 	spin_unlock_bh(&rtwpci->irq_lock);
894 
895 	return 0;
896 }
897 
898 static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
899 					u32 size)
900 {
901 	struct sk_buff *skb;
902 	struct rtw_tx_pkt_info pkt_info = {0};
903 	u8 reg_bcn_work;
904 	int ret;
905 
906 	skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);
907 	if (!skb)
908 		return -ENOMEM;
909 
910 	ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
911 	if (ret) {
912 		rtw_err(rtwdev, "failed to write rsvd page data\n");
913 		return ret;
914 	}
915 
916 	/* reserved pages go through beacon queue */
917 	reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
918 	reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
919 	rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
920 
921 	return 0;
922 }
923 
924 static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
925 {
926 	struct sk_buff *skb;
927 	struct rtw_tx_pkt_info pkt_info = {0};
928 	int ret;
929 
930 	skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);
931 	if (!skb)
932 		return -ENOMEM;
933 
934 	ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
935 	if (ret) {
936 		rtw_err(rtwdev, "failed to write h2c data\n");
937 		return ret;
938 	}
939 
940 	rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C);
941 
942 	return 0;
943 }
944 
945 static int rtw_pci_tx_write(struct rtw_dev *rtwdev,
946 			    struct rtw_tx_pkt_info *pkt_info,
947 			    struct sk_buff *skb)
948 {
949 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
950 	struct rtw_pci_tx_ring *ring;
951 	u8 queue = rtw_hw_queue_mapping(skb);
952 	int ret;
953 
954 	ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue);
955 	if (ret)
956 		return ret;
957 
958 	ring = &rtwpci->tx_rings[queue];
959 	spin_lock_bh(&rtwpci->irq_lock);
960 	if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
961 		ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
962 		ring->queue_stopped = true;
963 	}
964 	spin_unlock_bh(&rtwpci->irq_lock);
965 
966 	return 0;
967 }
968 
969 static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
970 			   u8 hw_queue)
971 {
972 	struct ieee80211_hw *hw = rtwdev->hw;
973 	struct ieee80211_tx_info *info;
974 	struct rtw_pci_tx_ring *ring;
975 	struct rtw_pci_tx_data *tx_data;
976 	struct sk_buff *skb;
977 	u32 count;
978 	u32 bd_idx_addr;
979 	u32 bd_idx, cur_rp, rp_idx;
980 	u16 q_map;
981 
982 	ring = &rtwpci->tx_rings[hw_queue];
983 
984 	bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
985 	bd_idx = rtw_read32(rtwdev, bd_idx_addr);
986 	cur_rp = bd_idx >> 16;
987 	cur_rp &= TRX_BD_IDX_MASK;
988 	rp_idx = ring->r.rp;
989 	if (cur_rp >= ring->r.rp)
990 		count = cur_rp - ring->r.rp;
991 	else
992 		count = ring->r.len - (ring->r.rp - cur_rp);
993 
994 	while (count--) {
995 		skb = skb_dequeue(&ring->queue);
996 		if (!skb) {
997 			rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
998 				count, hw_queue, bd_idx, ring->r.rp, cur_rp);
999 			break;
1000 		}
1001 		tx_data = rtw_pci_get_tx_data(skb);
1002 		dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
1003 				 DMA_TO_DEVICE);
1004 
1005 		/* just free command packets from host to card */
1006 		if (hw_queue == RTW_TX_QUEUE_H2C) {
1007 			dev_kfree_skb_irq(skb);
1008 			continue;
1009 		}
1010 
1011 		if (ring->queue_stopped &&
1012 		    avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) {
1013 			q_map = skb_get_queue_mapping(skb);
1014 			ieee80211_wake_queue(hw, q_map);
1015 			ring->queue_stopped = false;
1016 		}
1017 
1018 		if (++rp_idx >= ring->r.len)
1019 			rp_idx = 0;
1020 
1021 		skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
1022 
1023 		info = IEEE80211_SKB_CB(skb);
1024 
1025 		/* enqueue to wait for tx report */
1026 		if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
1027 			rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
1028 			continue;
1029 		}
1030 
1031 		/* always ACK for others, then they won't be marked as drop */
1032 		if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1033 			info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
1034 		else
1035 			info->flags |= IEEE80211_TX_STAT_ACK;
1036 
1037 		ieee80211_tx_info_clear_status(info);
1038 		ieee80211_tx_status_irqsafe(hw, skb);
1039 	}
1040 
1041 	ring->r.rp = cur_rp;
1042 }
1043 
1044 static void rtw_pci_rx_isr(struct rtw_dev *rtwdev)
1045 {
1046 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1047 	struct napi_struct *napi = &rtwpci->napi;
1048 
1049 	napi_schedule(napi);
1050 }
1051 
1052 static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev,
1053 				     struct rtw_pci *rtwpci)
1054 {
1055 	struct rtw_pci_rx_ring *ring;
1056 	int count = 0;
1057 	u32 tmp, cur_wp;
1058 
1059 	ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1060 	tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
1061 	cur_wp = u32_get_bits(tmp, TRX_BD_HW_IDX_MASK);
1062 	if (cur_wp >= ring->r.wp)
1063 		count = cur_wp - ring->r.wp;
1064 	else
1065 		count = ring->r.len - (ring->r.wp - cur_wp);
1066 
1067 	return count;
1068 }
1069 
1070 static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
1071 			   u8 hw_queue, u32 limit)
1072 {
1073 	struct rtw_chip_info *chip = rtwdev->chip;
1074 	struct napi_struct *napi = &rtwpci->napi;
1075 	struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1076 	struct rtw_rx_pkt_stat pkt_stat;
1077 	struct ieee80211_rx_status rx_status;
1078 	struct sk_buff *skb, *new;
1079 	u32 cur_rp = ring->r.rp;
1080 	u32 count, rx_done = 0;
1081 	u32 pkt_offset;
1082 	u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
1083 	u32 buf_desc_sz = chip->rx_buf_desc_sz;
1084 	u32 new_len;
1085 	u8 *rx_desc;
1086 	dma_addr_t dma;
1087 
1088 	count = rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci);
1089 	count = min(count, limit);
1090 
1091 	while (count--) {
1092 		rtw_pci_dma_check(rtwdev, ring, cur_rp);
1093 		skb = ring->buf[cur_rp];
1094 		dma = *((dma_addr_t *)skb->cb);
1095 		dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
1096 					DMA_FROM_DEVICE);
1097 		rx_desc = skb->data;
1098 		chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
1099 
1100 		/* offset from rx_desc to payload */
1101 		pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
1102 			     pkt_stat.shift;
1103 
1104 		/* allocate a new skb for this frame,
1105 		 * discard the frame if none available
1106 		 */
1107 		new_len = pkt_stat.pkt_len + pkt_offset;
1108 		new = dev_alloc_skb(new_len);
1109 		if (WARN_ONCE(!new, "rx routine starvation\n"))
1110 			goto next_rp;
1111 
1112 		/* put the DMA data including rx_desc from phy to new skb */
1113 		skb_put_data(new, skb->data, new_len);
1114 
1115 		if (pkt_stat.is_c2h) {
1116 			rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new);
1117 		} else {
1118 			/* remove rx_desc */
1119 			skb_pull(new, pkt_offset);
1120 
1121 			rtw_rx_stats(rtwdev, pkt_stat.vif, new);
1122 			memcpy(new->cb, &rx_status, sizeof(rx_status));
1123 			ieee80211_rx_napi(rtwdev->hw, NULL, new, napi);
1124 			rx_done++;
1125 		}
1126 
1127 next_rp:
1128 		/* new skb delivered to mac80211, re-enable original skb DMA */
1129 		rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
1130 					    buf_desc_sz);
1131 
1132 		/* host read next element in ring */
1133 		if (++cur_rp >= ring->r.len)
1134 			cur_rp = 0;
1135 	}
1136 
1137 	ring->r.rp = cur_rp;
1138 	/* 'rp', the last position we have read, is seen as previous posistion
1139 	 * of 'wp' that is used to calculate 'count' next time.
1140 	 */
1141 	ring->r.wp = cur_rp;
1142 	rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
1143 
1144 	return rx_done;
1145 }
1146 
1147 static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
1148 				   struct rtw_pci *rtwpci, u32 *irq_status)
1149 {
1150 	unsigned long flags;
1151 
1152 	spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
1153 
1154 	irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
1155 	irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
1156 	if (rtw_chip_wcpu_11ac(rtwdev))
1157 		irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
1158 	else
1159 		irq_status[3] = 0;
1160 	irq_status[0] &= rtwpci->irq_mask[0];
1161 	irq_status[1] &= rtwpci->irq_mask[1];
1162 	irq_status[3] &= rtwpci->irq_mask[3];
1163 	rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
1164 	rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
1165 	if (rtw_chip_wcpu_11ac(rtwdev))
1166 		rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
1167 
1168 	spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
1169 }
1170 
1171 static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
1172 {
1173 	struct rtw_dev *rtwdev = dev;
1174 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1175 
1176 	/* disable RTW PCI interrupt to avoid more interrupts before the end of
1177 	 * thread function
1178 	 *
1179 	 * disable HIMR here to also avoid new HISR flag being raised before
1180 	 * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs
1181 	 * are cleared, the edge-triggered interrupt will not be generated when
1182 	 * a new HISR flag is set.
1183 	 */
1184 	rtw_pci_disable_interrupt(rtwdev, rtwpci);
1185 
1186 	return IRQ_WAKE_THREAD;
1187 }
1188 
1189 static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
1190 {
1191 	struct rtw_dev *rtwdev = dev;
1192 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1193 	u32 irq_status[4];
1194 	bool rx = false;
1195 
1196 	spin_lock_bh(&rtwpci->irq_lock);
1197 	rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
1198 
1199 	if (irq_status[0] & IMR_MGNTDOK)
1200 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
1201 	if (irq_status[0] & IMR_HIGHDOK)
1202 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
1203 	if (irq_status[0] & IMR_BEDOK)
1204 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
1205 	if (irq_status[0] & IMR_BKDOK)
1206 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
1207 	if (irq_status[0] & IMR_VODOK)
1208 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
1209 	if (irq_status[0] & IMR_VIDOK)
1210 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
1211 	if (irq_status[3] & IMR_H2CDOK)
1212 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
1213 	if (irq_status[0] & IMR_ROK) {
1214 		rtw_pci_rx_isr(rtwdev);
1215 		rx = true;
1216 	}
1217 	if (unlikely(irq_status[0] & IMR_C2HCMD))
1218 		rtw_fw_c2h_cmd_isr(rtwdev);
1219 
1220 	/* all of the jobs for this interrupt have been done */
1221 	if (rtwpci->running)
1222 		rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
1223 	spin_unlock_bh(&rtwpci->irq_lock);
1224 
1225 	return IRQ_HANDLED;
1226 }
1227 
1228 static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
1229 			      struct pci_dev *pdev)
1230 {
1231 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1232 	unsigned long len;
1233 	u8 bar_id = 2;
1234 	int ret;
1235 
1236 	ret = pci_request_regions(pdev, KBUILD_MODNAME);
1237 	if (ret) {
1238 		rtw_err(rtwdev, "failed to request pci regions\n");
1239 		return ret;
1240 	}
1241 
1242 	len = pci_resource_len(pdev, bar_id);
1243 	rtwpci->mmap = pci_iomap(pdev, bar_id, len);
1244 	if (!rtwpci->mmap) {
1245 		pci_release_regions(pdev);
1246 		rtw_err(rtwdev, "failed to map pci memory\n");
1247 		return -ENOMEM;
1248 	}
1249 
1250 	return 0;
1251 }
1252 
1253 static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
1254 				 struct pci_dev *pdev)
1255 {
1256 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1257 
1258 	if (rtwpci->mmap) {
1259 		pci_iounmap(pdev, rtwpci->mmap);
1260 		pci_release_regions(pdev);
1261 	}
1262 }
1263 
1264 static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
1265 {
1266 	u16 write_addr;
1267 	u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK);
1268 	u8 flag;
1269 	u8 cnt;
1270 
1271 	write_addr = addr & BITS_DBI_ADDR_MASK;
1272 	write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN);
1273 	rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
1274 	rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
1275 	rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16);
1276 
1277 	for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1278 		flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1279 		if (flag == 0)
1280 			return;
1281 
1282 		udelay(10);
1283 	}
1284 
1285 	WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr);
1286 }
1287 
1288 static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value)
1289 {
1290 	u16 read_addr = addr & BITS_DBI_ADDR_MASK;
1291 	u8 flag;
1292 	u8 cnt;
1293 
1294 	rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr);
1295 	rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16);
1296 
1297 	for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1298 		flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1299 		if (flag == 0) {
1300 			read_addr = REG_DBI_RDATA_V1 + (addr & 3);
1301 			*value = rtw_read8(rtwdev, read_addr);
1302 			return 0;
1303 		}
1304 
1305 		udelay(10);
1306 	}
1307 
1308 	WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
1309 	return -EIO;
1310 }
1311 
1312 static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
1313 {
1314 	u8 page;
1315 	u8 wflag;
1316 	u8 cnt;
1317 
1318 	rtw_write16(rtwdev, REG_MDIO_V1, data);
1319 
1320 	page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1;
1321 	page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2;
1322 	rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK);
1323 	rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
1324 	rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
1325 
1326 	for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1327 		wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
1328 					BIT_MDIO_WFLAG_V1);
1329 		if (wflag == 0)
1330 			return;
1331 
1332 		udelay(10);
1333 	}
1334 
1335 	WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr);
1336 }
1337 
1338 static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable)
1339 {
1340 	u8 value;
1341 	int ret;
1342 
1343 	if (rtw_pci_disable_aspm)
1344 		return;
1345 
1346 	ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1347 	if (ret) {
1348 		rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1349 		return;
1350 	}
1351 
1352 	if (enable)
1353 		value |= BIT_CLKREQ_SW_EN;
1354 	else
1355 		value &= ~BIT_CLKREQ_SW_EN;
1356 
1357 	rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1358 }
1359 
1360 static void rtw_pci_clkreq_pad_low(struct rtw_dev *rtwdev, bool enable)
1361 {
1362 	u8 value;
1363 	int ret;
1364 
1365 	ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1366 	if (ret) {
1367 		rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1368 		return;
1369 	}
1370 
1371 	if (enable)
1372 		value &= ~BIT_CLKREQ_N_PAD;
1373 	else
1374 		value |= BIT_CLKREQ_N_PAD;
1375 
1376 	rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1377 }
1378 
1379 static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable)
1380 {
1381 	u8 value;
1382 	int ret;
1383 
1384 	if (rtw_pci_disable_aspm)
1385 		return;
1386 
1387 	ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1388 	if (ret) {
1389 		rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret);
1390 		return;
1391 	}
1392 
1393 	if (enable)
1394 		value |= BIT_L1_SW_EN;
1395 	else
1396 		value &= ~BIT_L1_SW_EN;
1397 
1398 	rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1399 }
1400 
1401 static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
1402 {
1403 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1404 
1405 	/* Like CLKREQ, ASPM is also implemented by two HW modules, and can
1406 	 * only be enabled when host supports it.
1407 	 *
1408 	 * And ASPM mechanism should be enabled when driver/firmware enters
1409 	 * power save mode, without having heavy traffic. Because we've
1410 	 * experienced some inter-operability issues that the link tends
1411 	 * to enter L1 state on the fly even when driver is having high
1412 	 * throughput. This is probably because the ASPM behavior slightly
1413 	 * varies from different SOC.
1414 	 */
1415 	if (!(rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1))
1416 		return;
1417 
1418 	if ((enter && atomic_dec_if_positive(&rtwpci->link_usage) == 0) ||
1419 	    (!enter && atomic_inc_return(&rtwpci->link_usage) == 1))
1420 		rtw_pci_aspm_set(rtwdev, enter);
1421 }
1422 
1423 static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
1424 {
1425 	struct rtw_chip_info *chip = rtwdev->chip;
1426 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1427 	struct pci_dev *pdev = rtwpci->pdev;
1428 	u16 link_ctrl;
1429 	int ret;
1430 
1431 	/* RTL8822CE has enabled REFCLK auto calibration, it does not need
1432 	 * to add clock delay to cover the REFCLK timing gap.
1433 	 */
1434 	if (chip->id == RTW_CHIP_TYPE_8822C)
1435 		rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0);
1436 
1437 	/* Though there is standard PCIE configuration space to set the
1438 	 * link control register, but by Realtek's design, driver should
1439 	 * check if host supports CLKREQ/ASPM to enable the HW module.
1440 	 *
1441 	 * These functions are implemented by two HW modules associated,
1442 	 * one is responsible to access PCIE configuration space to
1443 	 * follow the host settings, and another is in charge of doing
1444 	 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
1445 	 * the host does not support it, and due to some reasons or wrong
1446 	 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
1447 	 * loss if HW misbehaves on the link.
1448 	 *
1449 	 * Hence it's designed that driver should first check the PCIE
1450 	 * configuration space is sync'ed and enabled, then driver can turn
1451 	 * on the other module that is actually working on the mechanism.
1452 	 */
1453 	ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
1454 	if (ret) {
1455 		rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
1456 		return;
1457 	}
1458 
1459 	if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
1460 		rtw_pci_clkreq_set(rtwdev, true);
1461 
1462 	rtwpci->link_ctrl = link_ctrl;
1463 }
1464 
1465 static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
1466 {
1467 	struct rtw_chip_info *chip = rtwdev->chip;
1468 
1469 	switch (chip->id) {
1470 	case RTW_CHIP_TYPE_8822C:
1471 		if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D)
1472 			rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG,
1473 					 BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1);
1474 		break;
1475 	default:
1476 		break;
1477 	}
1478 }
1479 
1480 static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
1481 {
1482 	struct rtw_chip_info *chip = rtwdev->chip;
1483 	const struct rtw_intf_phy_para *para;
1484 	u16 cut;
1485 	u16 value;
1486 	u16 offset;
1487 	int i;
1488 
1489 	cut = BIT(0) << rtwdev->hal.cut_version;
1490 
1491 	for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
1492 		para = &chip->intf_table->gen1_para[i];
1493 		if (!(para->cut_mask & cut))
1494 			continue;
1495 		if (para->offset == 0xffff)
1496 			break;
1497 		offset = para->offset;
1498 		value = para->value;
1499 		if (para->ip_sel == RTW_IP_SEL_PHY)
1500 			rtw_mdio_write(rtwdev, offset, value, true);
1501 		else
1502 			rtw_dbi_write8(rtwdev, offset, value);
1503 	}
1504 
1505 	for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
1506 		para = &chip->intf_table->gen2_para[i];
1507 		if (!(para->cut_mask & cut))
1508 			continue;
1509 		if (para->offset == 0xffff)
1510 			break;
1511 		offset = para->offset;
1512 		value = para->value;
1513 		if (para->ip_sel == RTW_IP_SEL_PHY)
1514 			rtw_mdio_write(rtwdev, offset, value, false);
1515 		else
1516 			rtw_dbi_write8(rtwdev, offset, value);
1517 	}
1518 
1519 	rtw_pci_link_cfg(rtwdev);
1520 }
1521 
1522 static int __maybe_unused rtw_pci_suspend(struct device *dev)
1523 {
1524 	struct ieee80211_hw *hw = dev_get_drvdata(dev);
1525 	struct rtw_dev *rtwdev = hw->priv;
1526 	struct rtw_chip_info *chip = rtwdev->chip;
1527 	struct rtw_efuse *efuse = &rtwdev->efuse;
1528 
1529 	if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
1530 		rtw_pci_clkreq_pad_low(rtwdev, true);
1531 	return 0;
1532 }
1533 
1534 static int __maybe_unused rtw_pci_resume(struct device *dev)
1535 {
1536 	struct ieee80211_hw *hw = dev_get_drvdata(dev);
1537 	struct rtw_dev *rtwdev = hw->priv;
1538 	struct rtw_chip_info *chip = rtwdev->chip;
1539 	struct rtw_efuse *efuse = &rtwdev->efuse;
1540 
1541 	if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
1542 		rtw_pci_clkreq_pad_low(rtwdev, false);
1543 	return 0;
1544 }
1545 
1546 SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
1547 EXPORT_SYMBOL(rtw_pm_ops);
1548 
1549 static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1550 {
1551 	int ret;
1552 
1553 	ret = pci_enable_device(pdev);
1554 	if (ret) {
1555 		rtw_err(rtwdev, "failed to enable pci device\n");
1556 		return ret;
1557 	}
1558 
1559 	pci_set_master(pdev);
1560 	pci_set_drvdata(pdev, rtwdev->hw);
1561 	SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
1562 
1563 	return 0;
1564 }
1565 
1566 static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1567 {
1568 	pci_clear_master(pdev);
1569 	pci_disable_device(pdev);
1570 }
1571 
1572 static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1573 {
1574 	struct rtw_pci *rtwpci;
1575 	int ret;
1576 
1577 	rtwpci = (struct rtw_pci *)rtwdev->priv;
1578 	rtwpci->pdev = pdev;
1579 
1580 	/* after this driver can access to hw registers */
1581 	ret = rtw_pci_io_mapping(rtwdev, pdev);
1582 	if (ret) {
1583 		rtw_err(rtwdev, "failed to request pci io region\n");
1584 		goto err_out;
1585 	}
1586 
1587 	ret = rtw_pci_init(rtwdev);
1588 	if (ret) {
1589 		rtw_err(rtwdev, "failed to allocate pci resources\n");
1590 		goto err_io_unmap;
1591 	}
1592 
1593 	return 0;
1594 
1595 err_io_unmap:
1596 	rtw_pci_io_unmapping(rtwdev, pdev);
1597 
1598 err_out:
1599 	return ret;
1600 }
1601 
1602 static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1603 {
1604 	rtw_pci_deinit(rtwdev);
1605 	rtw_pci_io_unmapping(rtwdev, pdev);
1606 }
1607 
1608 static struct rtw_hci_ops rtw_pci_ops = {
1609 	.tx_write = rtw_pci_tx_write,
1610 	.tx_kick_off = rtw_pci_tx_kick_off,
1611 	.flush_queues = rtw_pci_flush_queues,
1612 	.setup = rtw_pci_setup,
1613 	.start = rtw_pci_start,
1614 	.stop = rtw_pci_stop,
1615 	.deep_ps = rtw_pci_deep_ps,
1616 	.link_ps = rtw_pci_link_ps,
1617 	.interface_cfg = rtw_pci_interface_cfg,
1618 
1619 	.read8 = rtw_pci_read8,
1620 	.read16 = rtw_pci_read16,
1621 	.read32 = rtw_pci_read32,
1622 	.write8 = rtw_pci_write8,
1623 	.write16 = rtw_pci_write16,
1624 	.write32 = rtw_pci_write32,
1625 	.write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
1626 	.write_data_h2c = rtw_pci_write_data_h2c,
1627 };
1628 
1629 static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1630 {
1631 	unsigned int flags = PCI_IRQ_LEGACY;
1632 	int ret;
1633 
1634 	if (!rtw_disable_msi)
1635 		flags |= PCI_IRQ_MSI;
1636 
1637 	ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
1638 	if (ret < 0) {
1639 		rtw_err(rtwdev, "failed to alloc PCI irq vectors\n");
1640 		return ret;
1641 	}
1642 
1643 	ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
1644 					rtw_pci_interrupt_handler,
1645 					rtw_pci_interrupt_threadfn,
1646 					IRQF_SHARED, KBUILD_MODNAME, rtwdev);
1647 	if (ret) {
1648 		rtw_err(rtwdev, "failed to request irq %d\n", ret);
1649 		pci_free_irq_vectors(pdev);
1650 	}
1651 
1652 	return ret;
1653 }
1654 
1655 static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1656 {
1657 	devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
1658 	pci_free_irq_vectors(pdev);
1659 }
1660 
1661 static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
1662 {
1663 	struct rtw_pci *rtwpci = container_of(napi, struct rtw_pci, napi);
1664 	struct rtw_dev *rtwdev = container_of((void *)rtwpci, struct rtw_dev,
1665 					      priv);
1666 	int work_done = 0;
1667 
1668 	if (rtwpci->rx_no_aspm)
1669 		rtw_pci_link_ps(rtwdev, false);
1670 
1671 	while (work_done < budget) {
1672 		u32 work_done_once;
1673 
1674 		work_done_once = rtw_pci_rx_napi(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU,
1675 						 budget - work_done);
1676 		if (work_done_once == 0)
1677 			break;
1678 		work_done += work_done_once;
1679 	}
1680 	if (work_done < budget) {
1681 		napi_complete_done(napi, work_done);
1682 		spin_lock_bh(&rtwpci->irq_lock);
1683 		if (rtwpci->running)
1684 			rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
1685 		spin_unlock_bh(&rtwpci->irq_lock);
1686 		/* When ISR happens during polling and before napi_complete
1687 		 * while no further data is received. Data on the dma_ring will
1688 		 * not be processed immediately. Check whether dma ring is
1689 		 * empty and perform napi_schedule accordingly.
1690 		 */
1691 		if (rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci))
1692 			napi_schedule(napi);
1693 	}
1694 	if (rtwpci->rx_no_aspm)
1695 		rtw_pci_link_ps(rtwdev, true);
1696 
1697 	return work_done;
1698 }
1699 
1700 static void rtw_pci_napi_init(struct rtw_dev *rtwdev)
1701 {
1702 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1703 
1704 	init_dummy_netdev(&rtwpci->netdev);
1705 	netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll,
1706 		       RTW_NAPI_WEIGHT_NUM);
1707 }
1708 
1709 static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
1710 {
1711 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1712 
1713 	rtw_pci_napi_stop(rtwdev);
1714 	netif_napi_del(&rtwpci->napi);
1715 }
1716 
1717 int rtw_pci_probe(struct pci_dev *pdev,
1718 		  const struct pci_device_id *id)
1719 {
1720 	struct pci_dev *bridge = pci_upstream_bridge(pdev);
1721 	struct ieee80211_hw *hw;
1722 	struct rtw_dev *rtwdev;
1723 	struct rtw_pci *rtwpci;
1724 	int drv_data_size;
1725 	int ret;
1726 
1727 	drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
1728 	hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1729 	if (!hw) {
1730 		dev_err(&pdev->dev, "failed to allocate hw\n");
1731 		return -ENOMEM;
1732 	}
1733 
1734 	rtwdev = hw->priv;
1735 	rtwdev->hw = hw;
1736 	rtwdev->dev = &pdev->dev;
1737 	rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1738 	rtwdev->hci.ops = &rtw_pci_ops;
1739 	rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
1740 
1741 	rtwpci = (struct rtw_pci *)rtwdev->priv;
1742 	atomic_set(&rtwpci->link_usage, 1);
1743 
1744 	ret = rtw_core_init(rtwdev);
1745 	if (ret)
1746 		goto err_release_hw;
1747 
1748 	rtw_dbg(rtwdev, RTW_DBG_PCI,
1749 		"rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
1750 		pdev->vendor, pdev->device, pdev->revision);
1751 
1752 	ret = rtw_pci_claim(rtwdev, pdev);
1753 	if (ret) {
1754 		rtw_err(rtwdev, "failed to claim pci device\n");
1755 		goto err_deinit_core;
1756 	}
1757 
1758 	ret = rtw_pci_setup_resource(rtwdev, pdev);
1759 	if (ret) {
1760 		rtw_err(rtwdev, "failed to setup pci resources\n");
1761 		goto err_pci_declaim;
1762 	}
1763 
1764 	rtw_pci_napi_init(rtwdev);
1765 
1766 	ret = rtw_chip_info_setup(rtwdev);
1767 	if (ret) {
1768 		rtw_err(rtwdev, "failed to setup chip information\n");
1769 		goto err_destroy_pci;
1770 	}
1771 
1772 	/* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */
1773 	if (pdev->device == 0xc821 && bridge->vendor == PCI_VENDOR_ID_INTEL)
1774 		rtwpci->rx_no_aspm = true;
1775 
1776 	rtw_pci_phy_cfg(rtwdev);
1777 
1778 	ret = rtw_register_hw(rtwdev, hw);
1779 	if (ret) {
1780 		rtw_err(rtwdev, "failed to register hw\n");
1781 		goto err_destroy_pci;
1782 	}
1783 
1784 	ret = rtw_pci_request_irq(rtwdev, pdev);
1785 	if (ret) {
1786 		ieee80211_unregister_hw(hw);
1787 		goto err_destroy_pci;
1788 	}
1789 
1790 	return 0;
1791 
1792 err_destroy_pci:
1793 	rtw_pci_napi_deinit(rtwdev);
1794 	rtw_pci_destroy(rtwdev, pdev);
1795 
1796 err_pci_declaim:
1797 	rtw_pci_declaim(rtwdev, pdev);
1798 
1799 err_deinit_core:
1800 	rtw_core_deinit(rtwdev);
1801 
1802 err_release_hw:
1803 	ieee80211_free_hw(hw);
1804 
1805 	return ret;
1806 }
1807 EXPORT_SYMBOL(rtw_pci_probe);
1808 
1809 void rtw_pci_remove(struct pci_dev *pdev)
1810 {
1811 	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1812 	struct rtw_dev *rtwdev;
1813 	struct rtw_pci *rtwpci;
1814 
1815 	if (!hw)
1816 		return;
1817 
1818 	rtwdev = hw->priv;
1819 	rtwpci = (struct rtw_pci *)rtwdev->priv;
1820 
1821 	rtw_unregister_hw(rtwdev, hw);
1822 	rtw_pci_disable_interrupt(rtwdev, rtwpci);
1823 	rtw_pci_napi_deinit(rtwdev);
1824 	rtw_pci_destroy(rtwdev, pdev);
1825 	rtw_pci_declaim(rtwdev, pdev);
1826 	rtw_pci_free_irq(rtwdev, pdev);
1827 	rtw_core_deinit(rtwdev);
1828 	ieee80211_free_hw(hw);
1829 }
1830 EXPORT_SYMBOL(rtw_pci_remove);
1831 
1832 void rtw_pci_shutdown(struct pci_dev *pdev)
1833 {
1834 	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1835 	struct rtw_dev *rtwdev;
1836 	struct rtw_chip_info *chip;
1837 
1838 	if (!hw)
1839 		return;
1840 
1841 	rtwdev = hw->priv;
1842 	chip = rtwdev->chip;
1843 
1844 	if (chip->ops->shutdown)
1845 		chip->ops->shutdown(rtwdev);
1846 
1847 	pci_set_power_state(pdev, PCI_D3hot);
1848 }
1849 EXPORT_SYMBOL(rtw_pci_shutdown);
1850 
1851 MODULE_AUTHOR("Realtek Corporation");
1852 MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver");
1853 MODULE_LICENSE("Dual BSD/GPL");
1854