xref: /openbmc/linux/drivers/net/wireless/realtek/rtw88/pci.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
1  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2  /* Copyright(c) 2018-2019  Realtek Corporation
3   */
4  
5  #include <linux/module.h>
6  #include <linux/pci.h>
7  #include "main.h"
8  #include "pci.h"
9  #include "reg.h"
10  #include "tx.h"
11  #include "rx.h"
12  #include "fw.h"
13  #include "ps.h"
14  #include "debug.h"
15  
16  static bool rtw_disable_msi;
17  static bool rtw_pci_disable_aspm;
18  module_param_named(disable_msi, rtw_disable_msi, bool, 0644);
19  module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644);
20  MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
21  MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support");
22  
23  static u32 rtw_pci_tx_queue_idx_addr[] = {
24  	[RTW_TX_QUEUE_BK]	= RTK_PCI_TXBD_IDX_BKQ,
25  	[RTW_TX_QUEUE_BE]	= RTK_PCI_TXBD_IDX_BEQ,
26  	[RTW_TX_QUEUE_VI]	= RTK_PCI_TXBD_IDX_VIQ,
27  	[RTW_TX_QUEUE_VO]	= RTK_PCI_TXBD_IDX_VOQ,
28  	[RTW_TX_QUEUE_MGMT]	= RTK_PCI_TXBD_IDX_MGMTQ,
29  	[RTW_TX_QUEUE_HI0]	= RTK_PCI_TXBD_IDX_HI0Q,
30  	[RTW_TX_QUEUE_H2C]	= RTK_PCI_TXBD_IDX_H2CQ,
31  };
32  
rtw_pci_get_tx_qsel(struct sk_buff * skb,enum rtw_tx_queue_type queue)33  static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb,
34  			      enum rtw_tx_queue_type queue)
35  {
36  	switch (queue) {
37  	case RTW_TX_QUEUE_BCN:
38  		return TX_DESC_QSEL_BEACON;
39  	case RTW_TX_QUEUE_H2C:
40  		return TX_DESC_QSEL_H2C;
41  	case RTW_TX_QUEUE_MGMT:
42  		return TX_DESC_QSEL_MGMT;
43  	case RTW_TX_QUEUE_HI0:
44  		return TX_DESC_QSEL_HIGH;
45  	default:
46  		return skb->priority;
47  	}
48  };
49  
rtw_pci_read8(struct rtw_dev * rtwdev,u32 addr)50  static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
51  {
52  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
53  
54  	return readb(rtwpci->mmap + addr);
55  }
56  
rtw_pci_read16(struct rtw_dev * rtwdev,u32 addr)57  static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
58  {
59  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
60  
61  	return readw(rtwpci->mmap + addr);
62  }
63  
rtw_pci_read32(struct rtw_dev * rtwdev,u32 addr)64  static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
65  {
66  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
67  
68  	return readl(rtwpci->mmap + addr);
69  }
70  
rtw_pci_write8(struct rtw_dev * rtwdev,u32 addr,u8 val)71  static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
72  {
73  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
74  
75  	writeb(val, rtwpci->mmap + addr);
76  }
77  
rtw_pci_write16(struct rtw_dev * rtwdev,u32 addr,u16 val)78  static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
79  {
80  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
81  
82  	writew(val, rtwpci->mmap + addr);
83  }
84  
rtw_pci_write32(struct rtw_dev * rtwdev,u32 addr,u32 val)85  static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
86  {
87  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
88  
89  	writel(val, rtwpci->mmap + addr);
90  }
91  
rtw_pci_free_tx_ring_skbs(struct rtw_dev * rtwdev,struct rtw_pci_tx_ring * tx_ring)92  static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
93  				      struct rtw_pci_tx_ring *tx_ring)
94  {
95  	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
96  	struct rtw_pci_tx_data *tx_data;
97  	struct sk_buff *skb, *tmp;
98  	dma_addr_t dma;
99  
100  	/* free every skb remained in tx list */
101  	skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
102  		__skb_unlink(skb, &tx_ring->queue);
103  		tx_data = rtw_pci_get_tx_data(skb);
104  		dma = tx_data->dma;
105  
106  		dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE);
107  		dev_kfree_skb_any(skb);
108  	}
109  }
110  
rtw_pci_free_tx_ring(struct rtw_dev * rtwdev,struct rtw_pci_tx_ring * tx_ring)111  static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
112  				 struct rtw_pci_tx_ring *tx_ring)
113  {
114  	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
115  	u8 *head = tx_ring->r.head;
116  	u32 len = tx_ring->r.len;
117  	int ring_sz = len * tx_ring->r.desc_size;
118  
119  	rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
120  
121  	/* free the ring itself */
122  	dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma);
123  	tx_ring->r.head = NULL;
124  }
125  
rtw_pci_free_rx_ring_skbs(struct rtw_dev * rtwdev,struct rtw_pci_rx_ring * rx_ring)126  static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
127  				      struct rtw_pci_rx_ring *rx_ring)
128  {
129  	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
130  	struct sk_buff *skb;
131  	int buf_sz = RTK_PCI_RX_BUF_SIZE;
132  	dma_addr_t dma;
133  	int i;
134  
135  	for (i = 0; i < rx_ring->r.len; i++) {
136  		skb = rx_ring->buf[i];
137  		if (!skb)
138  			continue;
139  
140  		dma = *((dma_addr_t *)skb->cb);
141  		dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
142  		dev_kfree_skb(skb);
143  		rx_ring->buf[i] = NULL;
144  	}
145  }
146  
rtw_pci_free_rx_ring(struct rtw_dev * rtwdev,struct rtw_pci_rx_ring * rx_ring)147  static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
148  				 struct rtw_pci_rx_ring *rx_ring)
149  {
150  	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
151  	u8 *head = rx_ring->r.head;
152  	int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
153  
154  	rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
155  
156  	dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma);
157  }
158  
rtw_pci_free_trx_ring(struct rtw_dev * rtwdev)159  static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
160  {
161  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
162  	struct rtw_pci_tx_ring *tx_ring;
163  	struct rtw_pci_rx_ring *rx_ring;
164  	int i;
165  
166  	for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
167  		tx_ring = &rtwpci->tx_rings[i];
168  		rtw_pci_free_tx_ring(rtwdev, tx_ring);
169  	}
170  
171  	for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
172  		rx_ring = &rtwpci->rx_rings[i];
173  		rtw_pci_free_rx_ring(rtwdev, rx_ring);
174  	}
175  }
176  
rtw_pci_init_tx_ring(struct rtw_dev * rtwdev,struct rtw_pci_tx_ring * tx_ring,u8 desc_size,u32 len)177  static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
178  				struct rtw_pci_tx_ring *tx_ring,
179  				u8 desc_size, u32 len)
180  {
181  	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
182  	int ring_sz = desc_size * len;
183  	dma_addr_t dma;
184  	u8 *head;
185  
186  	if (len > TRX_BD_IDX_MASK) {
187  		rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len);
188  		return -EINVAL;
189  	}
190  
191  	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
192  	if (!head) {
193  		rtw_err(rtwdev, "failed to allocate tx ring\n");
194  		return -ENOMEM;
195  	}
196  
197  	skb_queue_head_init(&tx_ring->queue);
198  	tx_ring->r.head = head;
199  	tx_ring->r.dma = dma;
200  	tx_ring->r.len = len;
201  	tx_ring->r.desc_size = desc_size;
202  	tx_ring->r.wp = 0;
203  	tx_ring->r.rp = 0;
204  
205  	return 0;
206  }
207  
rtw_pci_reset_rx_desc(struct rtw_dev * rtwdev,struct sk_buff * skb,struct rtw_pci_rx_ring * rx_ring,u32 idx,u32 desc_sz)208  static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
209  				 struct rtw_pci_rx_ring *rx_ring,
210  				 u32 idx, u32 desc_sz)
211  {
212  	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
213  	struct rtw_pci_rx_buffer_desc *buf_desc;
214  	int buf_sz = RTK_PCI_RX_BUF_SIZE;
215  	dma_addr_t dma;
216  
217  	if (!skb)
218  		return -EINVAL;
219  
220  	dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
221  	if (dma_mapping_error(&pdev->dev, dma))
222  		return -EBUSY;
223  
224  	*((dma_addr_t *)skb->cb) = dma;
225  	buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
226  						     idx * desc_sz);
227  	memset(buf_desc, 0, sizeof(*buf_desc));
228  	buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
229  	buf_desc->dma = cpu_to_le32(dma);
230  
231  	return 0;
232  }
233  
rtw_pci_sync_rx_desc_device(struct rtw_dev * rtwdev,dma_addr_t dma,struct rtw_pci_rx_ring * rx_ring,u32 idx,u32 desc_sz)234  static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
235  					struct rtw_pci_rx_ring *rx_ring,
236  					u32 idx, u32 desc_sz)
237  {
238  	struct device *dev = rtwdev->dev;
239  	struct rtw_pci_rx_buffer_desc *buf_desc;
240  	int buf_sz = RTK_PCI_RX_BUF_SIZE;
241  
242  	dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
243  
244  	buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
245  						     idx * desc_sz);
246  	memset(buf_desc, 0, sizeof(*buf_desc));
247  	buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
248  	buf_desc->dma = cpu_to_le32(dma);
249  }
250  
rtw_pci_init_rx_ring(struct rtw_dev * rtwdev,struct rtw_pci_rx_ring * rx_ring,u8 desc_size,u32 len)251  static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
252  				struct rtw_pci_rx_ring *rx_ring,
253  				u8 desc_size, u32 len)
254  {
255  	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
256  	struct sk_buff *skb = NULL;
257  	dma_addr_t dma;
258  	u8 *head;
259  	int ring_sz = desc_size * len;
260  	int buf_sz = RTK_PCI_RX_BUF_SIZE;
261  	int i, allocated;
262  	int ret = 0;
263  
264  	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
265  	if (!head) {
266  		rtw_err(rtwdev, "failed to allocate rx ring\n");
267  		return -ENOMEM;
268  	}
269  	rx_ring->r.head = head;
270  
271  	for (i = 0; i < len; i++) {
272  		skb = dev_alloc_skb(buf_sz);
273  		if (!skb) {
274  			allocated = i;
275  			ret = -ENOMEM;
276  			goto err_out;
277  		}
278  
279  		memset(skb->data, 0, buf_sz);
280  		rx_ring->buf[i] = skb;
281  		ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
282  		if (ret) {
283  			allocated = i;
284  			dev_kfree_skb_any(skb);
285  			goto err_out;
286  		}
287  	}
288  
289  	rx_ring->r.dma = dma;
290  	rx_ring->r.len = len;
291  	rx_ring->r.desc_size = desc_size;
292  	rx_ring->r.wp = 0;
293  	rx_ring->r.rp = 0;
294  
295  	return 0;
296  
297  err_out:
298  	for (i = 0; i < allocated; i++) {
299  		skb = rx_ring->buf[i];
300  		if (!skb)
301  			continue;
302  		dma = *((dma_addr_t *)skb->cb);
303  		dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
304  		dev_kfree_skb_any(skb);
305  		rx_ring->buf[i] = NULL;
306  	}
307  	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
308  
309  	rtw_err(rtwdev, "failed to init rx buffer\n");
310  
311  	return ret;
312  }
313  
rtw_pci_init_trx_ring(struct rtw_dev * rtwdev)314  static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
315  {
316  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
317  	struct rtw_pci_tx_ring *tx_ring;
318  	struct rtw_pci_rx_ring *rx_ring;
319  	const struct rtw_chip_info *chip = rtwdev->chip;
320  	int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
321  	int tx_desc_size, rx_desc_size;
322  	u32 len;
323  	int ret;
324  
325  	tx_desc_size = chip->tx_buf_desc_sz;
326  
327  	for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
328  		tx_ring = &rtwpci->tx_rings[i];
329  		len = max_num_of_tx_queue(i);
330  		ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
331  		if (ret)
332  			goto out;
333  	}
334  
335  	rx_desc_size = chip->rx_buf_desc_sz;
336  
337  	for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
338  		rx_ring = &rtwpci->rx_rings[j];
339  		ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
340  					   RTK_MAX_RX_DESC_NUM);
341  		if (ret)
342  			goto out;
343  	}
344  
345  	return 0;
346  
347  out:
348  	tx_alloced = i;
349  	for (i = 0; i < tx_alloced; i++) {
350  		tx_ring = &rtwpci->tx_rings[i];
351  		rtw_pci_free_tx_ring(rtwdev, tx_ring);
352  	}
353  
354  	rx_alloced = j;
355  	for (j = 0; j < rx_alloced; j++) {
356  		rx_ring = &rtwpci->rx_rings[j];
357  		rtw_pci_free_rx_ring(rtwdev, rx_ring);
358  	}
359  
360  	return ret;
361  }
362  
rtw_pci_deinit(struct rtw_dev * rtwdev)363  static void rtw_pci_deinit(struct rtw_dev *rtwdev)
364  {
365  	rtw_pci_free_trx_ring(rtwdev);
366  }
367  
rtw_pci_init(struct rtw_dev * rtwdev)368  static int rtw_pci_init(struct rtw_dev *rtwdev)
369  {
370  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
371  	int ret = 0;
372  
373  	rtwpci->irq_mask[0] = IMR_HIGHDOK |
374  			      IMR_MGNTDOK |
375  			      IMR_BKDOK |
376  			      IMR_BEDOK |
377  			      IMR_VIDOK |
378  			      IMR_VODOK |
379  			      IMR_ROK |
380  			      IMR_BCNDMAINT_E |
381  			      IMR_C2HCMD |
382  			      0;
383  	rtwpci->irq_mask[1] = IMR_TXFOVW |
384  			      0;
385  	rtwpci->irq_mask[3] = IMR_H2CDOK |
386  			      0;
387  	spin_lock_init(&rtwpci->irq_lock);
388  	spin_lock_init(&rtwpci->hwirq_lock);
389  	ret = rtw_pci_init_trx_ring(rtwdev);
390  
391  	return ret;
392  }
393  
rtw_pci_reset_buf_desc(struct rtw_dev * rtwdev)394  static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
395  {
396  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
397  	u32 len;
398  	u8 tmp;
399  	dma_addr_t dma;
400  
401  	tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
402  	rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
403  
404  	dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
405  	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
406  
407  	if (!rtw_chip_wcpu_11n(rtwdev)) {
408  		len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
409  		dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
410  		rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
411  		rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
412  		rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
413  		rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
414  	}
415  
416  	len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
417  	dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
418  	rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
419  	rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
420  	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK);
421  	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
422  
423  	len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
424  	dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
425  	rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
426  	rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
427  	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK);
428  	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
429  
430  	len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
431  	dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
432  	rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
433  	rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
434  	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK);
435  	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
436  
437  	len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
438  	dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
439  	rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
440  	rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
441  	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK);
442  	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
443  
444  	len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
445  	dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
446  	rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
447  	rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
448  	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK);
449  	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
450  
451  	len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
452  	dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
453  	rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
454  	rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
455  	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK);
456  	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
457  
458  	len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
459  	dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
460  	rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
461  	rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
462  	rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK);
463  	rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
464  
465  	/* reset read/write point */
466  	rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
467  
468  	/* reset H2C Queue index in a single write */
469  	if (rtw_chip_wcpu_11ac(rtwdev))
470  		rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
471  				BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
472  }
473  
rtw_pci_reset_trx_ring(struct rtw_dev * rtwdev)474  static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
475  {
476  	rtw_pci_reset_buf_desc(rtwdev);
477  }
478  
rtw_pci_enable_interrupt(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci,bool exclude_rx)479  static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
480  				     struct rtw_pci *rtwpci, bool exclude_rx)
481  {
482  	unsigned long flags;
483  	u32 imr0_unmask = exclude_rx ? IMR_ROK : 0;
484  
485  	spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
486  
487  	rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask);
488  	rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
489  	if (rtw_chip_wcpu_11ac(rtwdev))
490  		rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
491  
492  	rtwpci->irq_enabled = true;
493  
494  	spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
495  }
496  
rtw_pci_disable_interrupt(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci)497  static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
498  				      struct rtw_pci *rtwpci)
499  {
500  	unsigned long flags;
501  
502  	spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
503  
504  	if (!rtwpci->irq_enabled)
505  		goto out;
506  
507  	rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
508  	rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
509  	if (rtw_chip_wcpu_11ac(rtwdev))
510  		rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
511  
512  	rtwpci->irq_enabled = false;
513  
514  out:
515  	spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
516  }
517  
rtw_pci_dma_reset(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci)518  static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
519  {
520  	/* reset dma and rx tag */
521  	rtw_write32_set(rtwdev, RTK_PCI_CTRL,
522  			BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
523  	rtwpci->rx_tag = 0;
524  }
525  
rtw_pci_setup(struct rtw_dev * rtwdev)526  static int rtw_pci_setup(struct rtw_dev *rtwdev)
527  {
528  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
529  
530  	rtw_pci_reset_trx_ring(rtwdev);
531  	rtw_pci_dma_reset(rtwdev, rtwpci);
532  
533  	return 0;
534  }
535  
rtw_pci_dma_release(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci)536  static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
537  {
538  	struct rtw_pci_tx_ring *tx_ring;
539  	enum rtw_tx_queue_type queue;
540  
541  	rtw_pci_reset_trx_ring(rtwdev);
542  	for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
543  		tx_ring = &rtwpci->tx_rings[queue];
544  		rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
545  	}
546  }
547  
rtw_pci_napi_start(struct rtw_dev * rtwdev)548  static void rtw_pci_napi_start(struct rtw_dev *rtwdev)
549  {
550  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
551  
552  	if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
553  		return;
554  
555  	napi_enable(&rtwpci->napi);
556  }
557  
rtw_pci_napi_stop(struct rtw_dev * rtwdev)558  static void rtw_pci_napi_stop(struct rtw_dev *rtwdev)
559  {
560  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
561  
562  	if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
563  		return;
564  
565  	napi_synchronize(&rtwpci->napi);
566  	napi_disable(&rtwpci->napi);
567  }
568  
rtw_pci_start(struct rtw_dev * rtwdev)569  static int rtw_pci_start(struct rtw_dev *rtwdev)
570  {
571  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
572  
573  	rtw_pci_napi_start(rtwdev);
574  
575  	spin_lock_bh(&rtwpci->irq_lock);
576  	rtwpci->running = true;
577  	rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
578  	spin_unlock_bh(&rtwpci->irq_lock);
579  
580  	return 0;
581  }
582  
rtw_pci_stop(struct rtw_dev * rtwdev)583  static void rtw_pci_stop(struct rtw_dev *rtwdev)
584  {
585  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
586  	struct pci_dev *pdev = rtwpci->pdev;
587  
588  	spin_lock_bh(&rtwpci->irq_lock);
589  	rtwpci->running = false;
590  	rtw_pci_disable_interrupt(rtwdev, rtwpci);
591  	spin_unlock_bh(&rtwpci->irq_lock);
592  
593  	synchronize_irq(pdev->irq);
594  	rtw_pci_napi_stop(rtwdev);
595  
596  	spin_lock_bh(&rtwpci->irq_lock);
597  	rtw_pci_dma_release(rtwdev, rtwpci);
598  	spin_unlock_bh(&rtwpci->irq_lock);
599  }
600  
rtw_pci_deep_ps_enter(struct rtw_dev * rtwdev)601  static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
602  {
603  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
604  	struct rtw_pci_tx_ring *tx_ring;
605  	enum rtw_tx_queue_type queue;
606  	bool tx_empty = true;
607  
608  	if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
609  		goto enter_deep_ps;
610  
611  	lockdep_assert_held(&rtwpci->irq_lock);
612  
613  	/* Deep PS state is not allowed to TX-DMA */
614  	for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
615  		/* BCN queue is rsvd page, does not have DMA interrupt
616  		 * H2C queue is managed by firmware
617  		 */
618  		if (queue == RTW_TX_QUEUE_BCN ||
619  		    queue == RTW_TX_QUEUE_H2C)
620  			continue;
621  
622  		tx_ring = &rtwpci->tx_rings[queue];
623  
624  		/* check if there is any skb DMAing */
625  		if (skb_queue_len(&tx_ring->queue)) {
626  			tx_empty = false;
627  			break;
628  		}
629  	}
630  
631  	if (!tx_empty) {
632  		rtw_dbg(rtwdev, RTW_DBG_PS,
633  			"TX path not empty, cannot enter deep power save state\n");
634  		return;
635  	}
636  enter_deep_ps:
637  	set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
638  	rtw_power_mode_change(rtwdev, true);
639  }
640  
rtw_pci_deep_ps_leave(struct rtw_dev * rtwdev)641  static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
642  {
643  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
644  
645  	lockdep_assert_held(&rtwpci->irq_lock);
646  
647  	if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
648  		rtw_power_mode_change(rtwdev, false);
649  }
650  
rtw_pci_deep_ps(struct rtw_dev * rtwdev,bool enter)651  static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
652  {
653  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
654  
655  	spin_lock_bh(&rtwpci->irq_lock);
656  
657  	if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
658  		rtw_pci_deep_ps_enter(rtwdev);
659  
660  	if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
661  		rtw_pci_deep_ps_leave(rtwdev);
662  
663  	spin_unlock_bh(&rtwpci->irq_lock);
664  }
665  
rtw_pci_release_rsvd_page(struct rtw_pci * rtwpci,struct rtw_pci_tx_ring * ring)666  static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
667  				      struct rtw_pci_tx_ring *ring)
668  {
669  	struct sk_buff *prev = skb_dequeue(&ring->queue);
670  	struct rtw_pci_tx_data *tx_data;
671  	dma_addr_t dma;
672  
673  	if (!prev)
674  		return;
675  
676  	tx_data = rtw_pci_get_tx_data(prev);
677  	dma = tx_data->dma;
678  	dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE);
679  	dev_kfree_skb_any(prev);
680  }
681  
rtw_pci_dma_check(struct rtw_dev * rtwdev,struct rtw_pci_rx_ring * rx_ring,u32 idx)682  static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
683  			      struct rtw_pci_rx_ring *rx_ring,
684  			      u32 idx)
685  {
686  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
687  	const struct rtw_chip_info *chip = rtwdev->chip;
688  	struct rtw_pci_rx_buffer_desc *buf_desc;
689  	u32 desc_sz = chip->rx_buf_desc_sz;
690  	u16 total_pkt_size;
691  
692  	buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
693  						     idx * desc_sz);
694  	total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
695  
696  	/* rx tag mismatch, throw a warning */
697  	if (total_pkt_size != rtwpci->rx_tag)
698  		rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
699  
700  	rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
701  }
702  
__pci_get_hw_tx_ring_rp(struct rtw_dev * rtwdev,u8 pci_q)703  static u32 __pci_get_hw_tx_ring_rp(struct rtw_dev *rtwdev, u8 pci_q)
704  {
705  	u32 bd_idx_addr = rtw_pci_tx_queue_idx_addr[pci_q];
706  	u32 bd_idx = rtw_read16(rtwdev, bd_idx_addr + 2);
707  
708  	return FIELD_GET(TRX_BD_IDX_MASK, bd_idx);
709  }
710  
__pci_flush_queue(struct rtw_dev * rtwdev,u8 pci_q,bool drop)711  static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop)
712  {
713  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
714  	struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q];
715  	u32 cur_rp;
716  	u8 i;
717  
718  	/* Because the time taked by the I/O in __pci_get_hw_tx_ring_rp is a
719  	 * bit dynamic, it's hard to define a reasonable fixed total timeout to
720  	 * use read_poll_timeout* helper. Instead, we can ensure a reasonable
721  	 * polling times, so we just use for loop with udelay here.
722  	 */
723  	for (i = 0; i < 30; i++) {
724  		cur_rp = __pci_get_hw_tx_ring_rp(rtwdev, pci_q);
725  		if (cur_rp == ring->r.wp)
726  			return;
727  
728  		udelay(1);
729  	}
730  
731  	if (!drop)
732  		rtw_warn(rtwdev, "timed out to flush pci tx ring[%d]\n", pci_q);
733  }
734  
__rtw_pci_flush_queues(struct rtw_dev * rtwdev,u32 pci_queues,bool drop)735  static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues,
736  				   bool drop)
737  {
738  	u8 q;
739  
740  	for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) {
741  		/* Unnecessary to flush BCN, H2C and HI tx queues. */
742  		if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C ||
743  		    q == RTW_TX_QUEUE_HI0)
744  			continue;
745  
746  		if (pci_queues & BIT(q))
747  			__pci_flush_queue(rtwdev, q, drop);
748  	}
749  }
750  
rtw_pci_flush_queues(struct rtw_dev * rtwdev,u32 queues,bool drop)751  static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
752  {
753  	u32 pci_queues = 0;
754  	u8 i;
755  
756  	/* If all of the hardware queues are requested to flush,
757  	 * flush all of the pci queues.
758  	 */
759  	if (queues == BIT(rtwdev->hw->queues) - 1) {
760  		pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1;
761  	} else {
762  		for (i = 0; i < rtwdev->hw->queues; i++)
763  			if (queues & BIT(i))
764  				pci_queues |= BIT(rtw_tx_ac_to_hwq(i));
765  	}
766  
767  	__rtw_pci_flush_queues(rtwdev, pci_queues, drop);
768  }
769  
rtw_pci_tx_kick_off_queue(struct rtw_dev * rtwdev,enum rtw_tx_queue_type queue)770  static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev,
771  				      enum rtw_tx_queue_type queue)
772  {
773  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
774  	struct rtw_pci_tx_ring *ring;
775  	u32 bd_idx;
776  
777  	ring = &rtwpci->tx_rings[queue];
778  	bd_idx = rtw_pci_tx_queue_idx_addr[queue];
779  
780  	spin_lock_bh(&rtwpci->irq_lock);
781  	if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
782  		rtw_pci_deep_ps_leave(rtwdev);
783  	rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK);
784  	spin_unlock_bh(&rtwpci->irq_lock);
785  }
786  
rtw_pci_tx_kick_off(struct rtw_dev * rtwdev)787  static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
788  {
789  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
790  	enum rtw_tx_queue_type queue;
791  
792  	for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++)
793  		if (test_and_clear_bit(queue, rtwpci->tx_queued))
794  			rtw_pci_tx_kick_off_queue(rtwdev, queue);
795  }
796  
rtw_pci_tx_write_data(struct rtw_dev * rtwdev,struct rtw_tx_pkt_info * pkt_info,struct sk_buff * skb,enum rtw_tx_queue_type queue)797  static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
798  				 struct rtw_tx_pkt_info *pkt_info,
799  				 struct sk_buff *skb,
800  				 enum rtw_tx_queue_type queue)
801  {
802  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
803  	const struct rtw_chip_info *chip = rtwdev->chip;
804  	struct rtw_pci_tx_ring *ring;
805  	struct rtw_pci_tx_data *tx_data;
806  	dma_addr_t dma;
807  	u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
808  	u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
809  	u32 size;
810  	u32 psb_len;
811  	u8 *pkt_desc;
812  	struct rtw_pci_tx_buffer_desc *buf_desc;
813  
814  	ring = &rtwpci->tx_rings[queue];
815  
816  	size = skb->len;
817  
818  	if (queue == RTW_TX_QUEUE_BCN)
819  		rtw_pci_release_rsvd_page(rtwpci, ring);
820  	else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
821  		return -ENOSPC;
822  
823  	pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
824  	memset(pkt_desc, 0, tx_pkt_desc_sz);
825  	pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
826  	rtw_tx_fill_tx_desc(pkt_info, skb);
827  	dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len,
828  			     DMA_TO_DEVICE);
829  	if (dma_mapping_error(&rtwpci->pdev->dev, dma))
830  		return -EBUSY;
831  
832  	/* after this we got dma mapped, there is no way back */
833  	buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
834  	memset(buf_desc, 0, tx_buf_desc_sz);
835  	psb_len = (skb->len - 1) / 128 + 1;
836  	if (queue == RTW_TX_QUEUE_BCN)
837  		psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
838  
839  	buf_desc[0].psb_len = cpu_to_le16(psb_len);
840  	buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
841  	buf_desc[0].dma = cpu_to_le32(dma);
842  	buf_desc[1].buf_size = cpu_to_le16(size);
843  	buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
844  
845  	tx_data = rtw_pci_get_tx_data(skb);
846  	tx_data->dma = dma;
847  	tx_data->sn = pkt_info->sn;
848  
849  	spin_lock_bh(&rtwpci->irq_lock);
850  
851  	skb_queue_tail(&ring->queue, skb);
852  
853  	if (queue == RTW_TX_QUEUE_BCN)
854  		goto out_unlock;
855  
856  	/* update write-index, and kick it off later */
857  	set_bit(queue, rtwpci->tx_queued);
858  	if (++ring->r.wp >= ring->r.len)
859  		ring->r.wp = 0;
860  
861  out_unlock:
862  	spin_unlock_bh(&rtwpci->irq_lock);
863  
864  	return 0;
865  }
866  
rtw_pci_write_data_rsvd_page(struct rtw_dev * rtwdev,u8 * buf,u32 size)867  static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
868  					u32 size)
869  {
870  	struct sk_buff *skb;
871  	struct rtw_tx_pkt_info pkt_info = {0};
872  	u8 reg_bcn_work;
873  	int ret;
874  
875  	skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);
876  	if (!skb)
877  		return -ENOMEM;
878  
879  	ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
880  	if (ret) {
881  		rtw_err(rtwdev, "failed to write rsvd page data\n");
882  		return ret;
883  	}
884  
885  	/* reserved pages go through beacon queue */
886  	reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
887  	reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
888  	rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
889  
890  	return 0;
891  }
892  
rtw_pci_write_data_h2c(struct rtw_dev * rtwdev,u8 * buf,u32 size)893  static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
894  {
895  	struct sk_buff *skb;
896  	struct rtw_tx_pkt_info pkt_info = {0};
897  	int ret;
898  
899  	skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);
900  	if (!skb)
901  		return -ENOMEM;
902  
903  	ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
904  	if (ret) {
905  		rtw_err(rtwdev, "failed to write h2c data\n");
906  		return ret;
907  	}
908  
909  	rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C);
910  
911  	return 0;
912  }
913  
rtw_pci_tx_write(struct rtw_dev * rtwdev,struct rtw_tx_pkt_info * pkt_info,struct sk_buff * skb)914  static int rtw_pci_tx_write(struct rtw_dev *rtwdev,
915  			    struct rtw_tx_pkt_info *pkt_info,
916  			    struct sk_buff *skb)
917  {
918  	enum rtw_tx_queue_type queue = rtw_tx_queue_mapping(skb);
919  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
920  	struct rtw_pci_tx_ring *ring;
921  	int ret;
922  
923  	ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue);
924  	if (ret)
925  		return ret;
926  
927  	ring = &rtwpci->tx_rings[queue];
928  	spin_lock_bh(&rtwpci->irq_lock);
929  	if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
930  		ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
931  		ring->queue_stopped = true;
932  	}
933  	spin_unlock_bh(&rtwpci->irq_lock);
934  
935  	return 0;
936  }
937  
rtw_pci_tx_isr(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci,u8 hw_queue)938  static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
939  			   u8 hw_queue)
940  {
941  	struct ieee80211_hw *hw = rtwdev->hw;
942  	struct ieee80211_tx_info *info;
943  	struct rtw_pci_tx_ring *ring;
944  	struct rtw_pci_tx_data *tx_data;
945  	struct sk_buff *skb;
946  	u32 count;
947  	u32 bd_idx_addr;
948  	u32 bd_idx, cur_rp, rp_idx;
949  	u16 q_map;
950  
951  	ring = &rtwpci->tx_rings[hw_queue];
952  
953  	bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
954  	bd_idx = rtw_read32(rtwdev, bd_idx_addr);
955  	cur_rp = bd_idx >> 16;
956  	cur_rp &= TRX_BD_IDX_MASK;
957  	rp_idx = ring->r.rp;
958  	if (cur_rp >= ring->r.rp)
959  		count = cur_rp - ring->r.rp;
960  	else
961  		count = ring->r.len - (ring->r.rp - cur_rp);
962  
963  	while (count--) {
964  		skb = skb_dequeue(&ring->queue);
965  		if (!skb) {
966  			rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
967  				count, hw_queue, bd_idx, ring->r.rp, cur_rp);
968  			break;
969  		}
970  		tx_data = rtw_pci_get_tx_data(skb);
971  		dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
972  				 DMA_TO_DEVICE);
973  
974  		/* just free command packets from host to card */
975  		if (hw_queue == RTW_TX_QUEUE_H2C) {
976  			dev_kfree_skb_irq(skb);
977  			continue;
978  		}
979  
980  		if (ring->queue_stopped &&
981  		    avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) {
982  			q_map = skb_get_queue_mapping(skb);
983  			ieee80211_wake_queue(hw, q_map);
984  			ring->queue_stopped = false;
985  		}
986  
987  		if (++rp_idx >= ring->r.len)
988  			rp_idx = 0;
989  
990  		skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
991  
992  		info = IEEE80211_SKB_CB(skb);
993  
994  		/* enqueue to wait for tx report */
995  		if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
996  			rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
997  			continue;
998  		}
999  
1000  		/* always ACK for others, then they won't be marked as drop */
1001  		if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1002  			info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
1003  		else
1004  			info->flags |= IEEE80211_TX_STAT_ACK;
1005  
1006  		ieee80211_tx_info_clear_status(info);
1007  		ieee80211_tx_status_irqsafe(hw, skb);
1008  	}
1009  
1010  	ring->r.rp = cur_rp;
1011  }
1012  
rtw_pci_rx_isr(struct rtw_dev * rtwdev)1013  static void rtw_pci_rx_isr(struct rtw_dev *rtwdev)
1014  {
1015  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1016  	struct napi_struct *napi = &rtwpci->napi;
1017  
1018  	napi_schedule(napi);
1019  }
1020  
rtw_pci_get_hw_rx_ring_nr(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci)1021  static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev,
1022  				     struct rtw_pci *rtwpci)
1023  {
1024  	struct rtw_pci_rx_ring *ring;
1025  	int count = 0;
1026  	u32 tmp, cur_wp;
1027  
1028  	ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1029  	tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
1030  	cur_wp = u32_get_bits(tmp, TRX_BD_HW_IDX_MASK);
1031  	if (cur_wp >= ring->r.wp)
1032  		count = cur_wp - ring->r.wp;
1033  	else
1034  		count = ring->r.len - (ring->r.wp - cur_wp);
1035  
1036  	return count;
1037  }
1038  
rtw_pci_rx_napi(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci,u8 hw_queue,u32 limit)1039  static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
1040  			   u8 hw_queue, u32 limit)
1041  {
1042  	const struct rtw_chip_info *chip = rtwdev->chip;
1043  	struct napi_struct *napi = &rtwpci->napi;
1044  	struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1045  	struct rtw_rx_pkt_stat pkt_stat;
1046  	struct ieee80211_rx_status rx_status;
1047  	struct sk_buff *skb, *new;
1048  	u32 cur_rp = ring->r.rp;
1049  	u32 count, rx_done = 0;
1050  	u32 pkt_offset;
1051  	u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
1052  	u32 buf_desc_sz = chip->rx_buf_desc_sz;
1053  	u32 new_len;
1054  	u8 *rx_desc;
1055  	dma_addr_t dma;
1056  
1057  	count = rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci);
1058  	count = min(count, limit);
1059  
1060  	while (count--) {
1061  		rtw_pci_dma_check(rtwdev, ring, cur_rp);
1062  		skb = ring->buf[cur_rp];
1063  		dma = *((dma_addr_t *)skb->cb);
1064  		dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
1065  					DMA_FROM_DEVICE);
1066  		rx_desc = skb->data;
1067  		chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
1068  
1069  		/* offset from rx_desc to payload */
1070  		pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
1071  			     pkt_stat.shift;
1072  
1073  		/* allocate a new skb for this frame,
1074  		 * discard the frame if none available
1075  		 */
1076  		new_len = pkt_stat.pkt_len + pkt_offset;
1077  		new = dev_alloc_skb(new_len);
1078  		if (WARN_ONCE(!new, "rx routine starvation\n"))
1079  			goto next_rp;
1080  
1081  		/* put the DMA data including rx_desc from phy to new skb */
1082  		skb_put_data(new, skb->data, new_len);
1083  
1084  		if (pkt_stat.is_c2h) {
1085  			rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new);
1086  		} else {
1087  			/* remove rx_desc */
1088  			skb_pull(new, pkt_offset);
1089  
1090  			rtw_rx_stats(rtwdev, pkt_stat.vif, new);
1091  			memcpy(new->cb, &rx_status, sizeof(rx_status));
1092  			ieee80211_rx_napi(rtwdev->hw, NULL, new, napi);
1093  			rx_done++;
1094  		}
1095  
1096  next_rp:
1097  		/* new skb delivered to mac80211, re-enable original skb DMA */
1098  		rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
1099  					    buf_desc_sz);
1100  
1101  		/* host read next element in ring */
1102  		if (++cur_rp >= ring->r.len)
1103  			cur_rp = 0;
1104  	}
1105  
1106  	ring->r.rp = cur_rp;
1107  	/* 'rp', the last position we have read, is seen as previous posistion
1108  	 * of 'wp' that is used to calculate 'count' next time.
1109  	 */
1110  	ring->r.wp = cur_rp;
1111  	rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
1112  
1113  	return rx_done;
1114  }
1115  
rtw_pci_irq_recognized(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci,u32 * irq_status)1116  static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
1117  				   struct rtw_pci *rtwpci, u32 *irq_status)
1118  {
1119  	unsigned long flags;
1120  
1121  	spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
1122  
1123  	irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
1124  	irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
1125  	if (rtw_chip_wcpu_11ac(rtwdev))
1126  		irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
1127  	else
1128  		irq_status[3] = 0;
1129  	irq_status[0] &= rtwpci->irq_mask[0];
1130  	irq_status[1] &= rtwpci->irq_mask[1];
1131  	irq_status[3] &= rtwpci->irq_mask[3];
1132  	rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
1133  	rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
1134  	if (rtw_chip_wcpu_11ac(rtwdev))
1135  		rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
1136  
1137  	spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
1138  }
1139  
rtw_pci_interrupt_handler(int irq,void * dev)1140  static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
1141  {
1142  	struct rtw_dev *rtwdev = dev;
1143  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1144  
1145  	/* disable RTW PCI interrupt to avoid more interrupts before the end of
1146  	 * thread function
1147  	 *
1148  	 * disable HIMR here to also avoid new HISR flag being raised before
1149  	 * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs
1150  	 * are cleared, the edge-triggered interrupt will not be generated when
1151  	 * a new HISR flag is set.
1152  	 */
1153  	rtw_pci_disable_interrupt(rtwdev, rtwpci);
1154  
1155  	return IRQ_WAKE_THREAD;
1156  }
1157  
rtw_pci_interrupt_threadfn(int irq,void * dev)1158  static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
1159  {
1160  	struct rtw_dev *rtwdev = dev;
1161  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1162  	u32 irq_status[4];
1163  	bool rx = false;
1164  
1165  	spin_lock_bh(&rtwpci->irq_lock);
1166  	rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
1167  
1168  	if (irq_status[0] & IMR_MGNTDOK)
1169  		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
1170  	if (irq_status[0] & IMR_HIGHDOK)
1171  		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
1172  	if (irq_status[0] & IMR_BEDOK)
1173  		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
1174  	if (irq_status[0] & IMR_BKDOK)
1175  		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
1176  	if (irq_status[0] & IMR_VODOK)
1177  		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
1178  	if (irq_status[0] & IMR_VIDOK)
1179  		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
1180  	if (irq_status[3] & IMR_H2CDOK)
1181  		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
1182  	if (irq_status[0] & IMR_ROK) {
1183  		rtw_pci_rx_isr(rtwdev);
1184  		rx = true;
1185  	}
1186  	if (unlikely(irq_status[0] & IMR_C2HCMD))
1187  		rtw_fw_c2h_cmd_isr(rtwdev);
1188  
1189  	/* all of the jobs for this interrupt have been done */
1190  	if (rtwpci->running)
1191  		rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
1192  	spin_unlock_bh(&rtwpci->irq_lock);
1193  
1194  	return IRQ_HANDLED;
1195  }
1196  
rtw_pci_io_mapping(struct rtw_dev * rtwdev,struct pci_dev * pdev)1197  static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
1198  			      struct pci_dev *pdev)
1199  {
1200  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1201  	unsigned long len;
1202  	u8 bar_id = 2;
1203  	int ret;
1204  
1205  	ret = pci_request_regions(pdev, KBUILD_MODNAME);
1206  	if (ret) {
1207  		rtw_err(rtwdev, "failed to request pci regions\n");
1208  		return ret;
1209  	}
1210  
1211  	len = pci_resource_len(pdev, bar_id);
1212  	rtwpci->mmap = pci_iomap(pdev, bar_id, len);
1213  	if (!rtwpci->mmap) {
1214  		pci_release_regions(pdev);
1215  		rtw_err(rtwdev, "failed to map pci memory\n");
1216  		return -ENOMEM;
1217  	}
1218  
1219  	return 0;
1220  }
1221  
rtw_pci_io_unmapping(struct rtw_dev * rtwdev,struct pci_dev * pdev)1222  static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
1223  				 struct pci_dev *pdev)
1224  {
1225  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1226  
1227  	if (rtwpci->mmap) {
1228  		pci_iounmap(pdev, rtwpci->mmap);
1229  		pci_release_regions(pdev);
1230  	}
1231  }
1232  
rtw_dbi_write8(struct rtw_dev * rtwdev,u16 addr,u8 data)1233  static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
1234  {
1235  	u16 write_addr;
1236  	u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK);
1237  	u8 flag;
1238  	u8 cnt;
1239  
1240  	write_addr = addr & BITS_DBI_ADDR_MASK;
1241  	write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN);
1242  	rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
1243  	rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
1244  	rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16);
1245  
1246  	for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1247  		flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1248  		if (flag == 0)
1249  			return;
1250  
1251  		udelay(10);
1252  	}
1253  
1254  	WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr);
1255  }
1256  
rtw_dbi_read8(struct rtw_dev * rtwdev,u16 addr,u8 * value)1257  static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value)
1258  {
1259  	u16 read_addr = addr & BITS_DBI_ADDR_MASK;
1260  	u8 flag;
1261  	u8 cnt;
1262  
1263  	rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr);
1264  	rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16);
1265  
1266  	for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1267  		flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1268  		if (flag == 0) {
1269  			read_addr = REG_DBI_RDATA_V1 + (addr & 3);
1270  			*value = rtw_read8(rtwdev, read_addr);
1271  			return 0;
1272  		}
1273  
1274  		udelay(10);
1275  	}
1276  
1277  	WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
1278  	return -EIO;
1279  }
1280  
rtw_mdio_write(struct rtw_dev * rtwdev,u8 addr,u16 data,bool g1)1281  static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
1282  {
1283  	u8 page;
1284  	u8 wflag;
1285  	u8 cnt;
1286  
1287  	rtw_write16(rtwdev, REG_MDIO_V1, data);
1288  
1289  	page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1;
1290  	page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2;
1291  	rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK);
1292  	rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
1293  	rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
1294  
1295  	for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1296  		wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
1297  					BIT_MDIO_WFLAG_V1);
1298  		if (wflag == 0)
1299  			return;
1300  
1301  		udelay(10);
1302  	}
1303  
1304  	WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr);
1305  }
1306  
rtw_pci_clkreq_set(struct rtw_dev * rtwdev,bool enable)1307  static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable)
1308  {
1309  	u8 value;
1310  	int ret;
1311  
1312  	if (rtw_pci_disable_aspm)
1313  		return;
1314  
1315  	ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1316  	if (ret) {
1317  		rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1318  		return;
1319  	}
1320  
1321  	if (enable)
1322  		value |= BIT_CLKREQ_SW_EN;
1323  	else
1324  		value &= ~BIT_CLKREQ_SW_EN;
1325  
1326  	rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1327  }
1328  
rtw_pci_clkreq_pad_low(struct rtw_dev * rtwdev,bool enable)1329  static void rtw_pci_clkreq_pad_low(struct rtw_dev *rtwdev, bool enable)
1330  {
1331  	u8 value;
1332  	int ret;
1333  
1334  	ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1335  	if (ret) {
1336  		rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1337  		return;
1338  	}
1339  
1340  	if (enable)
1341  		value &= ~BIT_CLKREQ_N_PAD;
1342  	else
1343  		value |= BIT_CLKREQ_N_PAD;
1344  
1345  	rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1346  }
1347  
rtw_pci_aspm_set(struct rtw_dev * rtwdev,bool enable)1348  static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable)
1349  {
1350  	u8 value;
1351  	int ret;
1352  
1353  	if (rtw_pci_disable_aspm)
1354  		return;
1355  
1356  	ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1357  	if (ret) {
1358  		rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret);
1359  		return;
1360  	}
1361  
1362  	if (enable)
1363  		value |= BIT_L1_SW_EN;
1364  	else
1365  		value &= ~BIT_L1_SW_EN;
1366  
1367  	rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1368  }
1369  
rtw_pci_link_ps(struct rtw_dev * rtwdev,bool enter)1370  static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
1371  {
1372  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1373  
1374  	/* Like CLKREQ, ASPM is also implemented by two HW modules, and can
1375  	 * only be enabled when host supports it.
1376  	 *
1377  	 * And ASPM mechanism should be enabled when driver/firmware enters
1378  	 * power save mode, without having heavy traffic. Because we've
1379  	 * experienced some inter-operability issues that the link tends
1380  	 * to enter L1 state on the fly even when driver is having high
1381  	 * throughput. This is probably because the ASPM behavior slightly
1382  	 * varies from different SOC.
1383  	 */
1384  	if (!(rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1))
1385  		return;
1386  
1387  	if ((enter && atomic_dec_if_positive(&rtwpci->link_usage) == 0) ||
1388  	    (!enter && atomic_inc_return(&rtwpci->link_usage) == 1))
1389  		rtw_pci_aspm_set(rtwdev, enter);
1390  }
1391  
rtw_pci_link_cfg(struct rtw_dev * rtwdev)1392  static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
1393  {
1394  	const struct rtw_chip_info *chip = rtwdev->chip;
1395  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1396  	struct pci_dev *pdev = rtwpci->pdev;
1397  	u16 link_ctrl;
1398  	int ret;
1399  
1400  	/* RTL8822CE has enabled REFCLK auto calibration, it does not need
1401  	 * to add clock delay to cover the REFCLK timing gap.
1402  	 */
1403  	if (chip->id == RTW_CHIP_TYPE_8822C)
1404  		rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0);
1405  
1406  	/* Though there is standard PCIE configuration space to set the
1407  	 * link control register, but by Realtek's design, driver should
1408  	 * check if host supports CLKREQ/ASPM to enable the HW module.
1409  	 *
1410  	 * These functions are implemented by two HW modules associated,
1411  	 * one is responsible to access PCIE configuration space to
1412  	 * follow the host settings, and another is in charge of doing
1413  	 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
1414  	 * the host does not support it, and due to some reasons or wrong
1415  	 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
1416  	 * loss if HW misbehaves on the link.
1417  	 *
1418  	 * Hence it's designed that driver should first check the PCIE
1419  	 * configuration space is sync'ed and enabled, then driver can turn
1420  	 * on the other module that is actually working on the mechanism.
1421  	 */
1422  	ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
1423  	if (ret) {
1424  		rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
1425  		return;
1426  	}
1427  
1428  	if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
1429  		rtw_pci_clkreq_set(rtwdev, true);
1430  
1431  	rtwpci->link_ctrl = link_ctrl;
1432  }
1433  
rtw_pci_interface_cfg(struct rtw_dev * rtwdev)1434  static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
1435  {
1436  	const struct rtw_chip_info *chip = rtwdev->chip;
1437  
1438  	switch (chip->id) {
1439  	case RTW_CHIP_TYPE_8822C:
1440  		if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D)
1441  			rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG,
1442  					 BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1);
1443  		break;
1444  	default:
1445  		break;
1446  	}
1447  }
1448  
rtw_pci_phy_cfg(struct rtw_dev * rtwdev)1449  static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
1450  {
1451  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1452  	const struct rtw_chip_info *chip = rtwdev->chip;
1453  	struct pci_dev *pdev = rtwpci->pdev;
1454  	const struct rtw_intf_phy_para *para;
1455  	u16 cut;
1456  	u16 value;
1457  	u16 offset;
1458  	int i;
1459  	int ret;
1460  
1461  	cut = BIT(0) << rtwdev->hal.cut_version;
1462  
1463  	for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
1464  		para = &chip->intf_table->gen1_para[i];
1465  		if (!(para->cut_mask & cut))
1466  			continue;
1467  		if (para->offset == 0xffff)
1468  			break;
1469  		offset = para->offset;
1470  		value = para->value;
1471  		if (para->ip_sel == RTW_IP_SEL_PHY)
1472  			rtw_mdio_write(rtwdev, offset, value, true);
1473  		else
1474  			rtw_dbi_write8(rtwdev, offset, value);
1475  	}
1476  
1477  	for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
1478  		para = &chip->intf_table->gen2_para[i];
1479  		if (!(para->cut_mask & cut))
1480  			continue;
1481  		if (para->offset == 0xffff)
1482  			break;
1483  		offset = para->offset;
1484  		value = para->value;
1485  		if (para->ip_sel == RTW_IP_SEL_PHY)
1486  			rtw_mdio_write(rtwdev, offset, value, false);
1487  		else
1488  			rtw_dbi_write8(rtwdev, offset, value);
1489  	}
1490  
1491  	rtw_pci_link_cfg(rtwdev);
1492  
1493  	/* Disable 8821ce completion timeout by default */
1494  	if (chip->id == RTW_CHIP_TYPE_8821C) {
1495  		ret = pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
1496  					       PCI_EXP_DEVCTL2_COMP_TMOUT_DIS);
1497  		if (ret)
1498  			rtw_err(rtwdev, "failed to set PCI cap, ret = %d\n",
1499  				ret);
1500  	}
1501  }
1502  
rtw_pci_suspend(struct device * dev)1503  static int __maybe_unused rtw_pci_suspend(struct device *dev)
1504  {
1505  	struct ieee80211_hw *hw = dev_get_drvdata(dev);
1506  	struct rtw_dev *rtwdev = hw->priv;
1507  	const struct rtw_chip_info *chip = rtwdev->chip;
1508  	struct rtw_efuse *efuse = &rtwdev->efuse;
1509  
1510  	if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
1511  		rtw_pci_clkreq_pad_low(rtwdev, true);
1512  	return 0;
1513  }
1514  
rtw_pci_resume(struct device * dev)1515  static int __maybe_unused rtw_pci_resume(struct device *dev)
1516  {
1517  	struct ieee80211_hw *hw = dev_get_drvdata(dev);
1518  	struct rtw_dev *rtwdev = hw->priv;
1519  	const struct rtw_chip_info *chip = rtwdev->chip;
1520  	struct rtw_efuse *efuse = &rtwdev->efuse;
1521  
1522  	if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
1523  		rtw_pci_clkreq_pad_low(rtwdev, false);
1524  	return 0;
1525  }
1526  
1527  SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
1528  EXPORT_SYMBOL(rtw_pm_ops);
1529  
rtw_pci_claim(struct rtw_dev * rtwdev,struct pci_dev * pdev)1530  static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1531  {
1532  	int ret;
1533  
1534  	ret = pci_enable_device(pdev);
1535  	if (ret) {
1536  		rtw_err(rtwdev, "failed to enable pci device\n");
1537  		return ret;
1538  	}
1539  
1540  	pci_set_master(pdev);
1541  	pci_set_drvdata(pdev, rtwdev->hw);
1542  	SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
1543  
1544  	return 0;
1545  }
1546  
rtw_pci_declaim(struct rtw_dev * rtwdev,struct pci_dev * pdev)1547  static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1548  {
1549  	pci_disable_device(pdev);
1550  }
1551  
rtw_pci_setup_resource(struct rtw_dev * rtwdev,struct pci_dev * pdev)1552  static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1553  {
1554  	struct rtw_pci *rtwpci;
1555  	int ret;
1556  
1557  	rtwpci = (struct rtw_pci *)rtwdev->priv;
1558  	rtwpci->pdev = pdev;
1559  
1560  	/* after this driver can access to hw registers */
1561  	ret = rtw_pci_io_mapping(rtwdev, pdev);
1562  	if (ret) {
1563  		rtw_err(rtwdev, "failed to request pci io region\n");
1564  		goto err_out;
1565  	}
1566  
1567  	ret = rtw_pci_init(rtwdev);
1568  	if (ret) {
1569  		rtw_err(rtwdev, "failed to allocate pci resources\n");
1570  		goto err_io_unmap;
1571  	}
1572  
1573  	return 0;
1574  
1575  err_io_unmap:
1576  	rtw_pci_io_unmapping(rtwdev, pdev);
1577  
1578  err_out:
1579  	return ret;
1580  }
1581  
rtw_pci_destroy(struct rtw_dev * rtwdev,struct pci_dev * pdev)1582  static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1583  {
1584  	rtw_pci_deinit(rtwdev);
1585  	rtw_pci_io_unmapping(rtwdev, pdev);
1586  }
1587  
1588  static struct rtw_hci_ops rtw_pci_ops = {
1589  	.tx_write = rtw_pci_tx_write,
1590  	.tx_kick_off = rtw_pci_tx_kick_off,
1591  	.flush_queues = rtw_pci_flush_queues,
1592  	.setup = rtw_pci_setup,
1593  	.start = rtw_pci_start,
1594  	.stop = rtw_pci_stop,
1595  	.deep_ps = rtw_pci_deep_ps,
1596  	.link_ps = rtw_pci_link_ps,
1597  	.interface_cfg = rtw_pci_interface_cfg,
1598  
1599  	.read8 = rtw_pci_read8,
1600  	.read16 = rtw_pci_read16,
1601  	.read32 = rtw_pci_read32,
1602  	.write8 = rtw_pci_write8,
1603  	.write16 = rtw_pci_write16,
1604  	.write32 = rtw_pci_write32,
1605  	.write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
1606  	.write_data_h2c = rtw_pci_write_data_h2c,
1607  };
1608  
rtw_pci_request_irq(struct rtw_dev * rtwdev,struct pci_dev * pdev)1609  static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1610  {
1611  	unsigned int flags = PCI_IRQ_LEGACY;
1612  	int ret;
1613  
1614  	if (!rtw_disable_msi)
1615  		flags |= PCI_IRQ_MSI;
1616  
1617  	ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
1618  	if (ret < 0) {
1619  		rtw_err(rtwdev, "failed to alloc PCI irq vectors\n");
1620  		return ret;
1621  	}
1622  
1623  	ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
1624  					rtw_pci_interrupt_handler,
1625  					rtw_pci_interrupt_threadfn,
1626  					IRQF_SHARED, KBUILD_MODNAME, rtwdev);
1627  	if (ret) {
1628  		rtw_err(rtwdev, "failed to request irq %d\n", ret);
1629  		pci_free_irq_vectors(pdev);
1630  	}
1631  
1632  	return ret;
1633  }
1634  
rtw_pci_free_irq(struct rtw_dev * rtwdev,struct pci_dev * pdev)1635  static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1636  {
1637  	devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
1638  	pci_free_irq_vectors(pdev);
1639  }
1640  
rtw_pci_napi_poll(struct napi_struct * napi,int budget)1641  static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
1642  {
1643  	struct rtw_pci *rtwpci = container_of(napi, struct rtw_pci, napi);
1644  	struct rtw_dev *rtwdev = container_of((void *)rtwpci, struct rtw_dev,
1645  					      priv);
1646  	int work_done = 0;
1647  
1648  	if (rtwpci->rx_no_aspm)
1649  		rtw_pci_link_ps(rtwdev, false);
1650  
1651  	while (work_done < budget) {
1652  		u32 work_done_once;
1653  
1654  		work_done_once = rtw_pci_rx_napi(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU,
1655  						 budget - work_done);
1656  		if (work_done_once == 0)
1657  			break;
1658  		work_done += work_done_once;
1659  	}
1660  	if (work_done < budget) {
1661  		napi_complete_done(napi, work_done);
1662  		spin_lock_bh(&rtwpci->irq_lock);
1663  		if (rtwpci->running)
1664  			rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
1665  		spin_unlock_bh(&rtwpci->irq_lock);
1666  		/* When ISR happens during polling and before napi_complete
1667  		 * while no further data is received. Data on the dma_ring will
1668  		 * not be processed immediately. Check whether dma ring is
1669  		 * empty and perform napi_schedule accordingly.
1670  		 */
1671  		if (rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci))
1672  			napi_schedule(napi);
1673  	}
1674  	if (rtwpci->rx_no_aspm)
1675  		rtw_pci_link_ps(rtwdev, true);
1676  
1677  	return work_done;
1678  }
1679  
rtw_pci_napi_init(struct rtw_dev * rtwdev)1680  static void rtw_pci_napi_init(struct rtw_dev *rtwdev)
1681  {
1682  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1683  
1684  	init_dummy_netdev(&rtwpci->netdev);
1685  	netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll);
1686  }
1687  
rtw_pci_napi_deinit(struct rtw_dev * rtwdev)1688  static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
1689  {
1690  	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1691  
1692  	rtw_pci_napi_stop(rtwdev);
1693  	netif_napi_del(&rtwpci->napi);
1694  }
1695  
rtw_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)1696  int rtw_pci_probe(struct pci_dev *pdev,
1697  		  const struct pci_device_id *id)
1698  {
1699  	struct pci_dev *bridge = pci_upstream_bridge(pdev);
1700  	struct ieee80211_hw *hw;
1701  	struct rtw_dev *rtwdev;
1702  	struct rtw_pci *rtwpci;
1703  	int drv_data_size;
1704  	int ret;
1705  
1706  	drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
1707  	hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1708  	if (!hw) {
1709  		dev_err(&pdev->dev, "failed to allocate hw\n");
1710  		return -ENOMEM;
1711  	}
1712  
1713  	rtwdev = hw->priv;
1714  	rtwdev->hw = hw;
1715  	rtwdev->dev = &pdev->dev;
1716  	rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1717  	rtwdev->hci.ops = &rtw_pci_ops;
1718  	rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
1719  
1720  	rtwpci = (struct rtw_pci *)rtwdev->priv;
1721  	atomic_set(&rtwpci->link_usage, 1);
1722  
1723  	ret = rtw_core_init(rtwdev);
1724  	if (ret)
1725  		goto err_release_hw;
1726  
1727  	rtw_dbg(rtwdev, RTW_DBG_PCI,
1728  		"rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
1729  		pdev->vendor, pdev->device, pdev->revision);
1730  
1731  	ret = rtw_pci_claim(rtwdev, pdev);
1732  	if (ret) {
1733  		rtw_err(rtwdev, "failed to claim pci device\n");
1734  		goto err_deinit_core;
1735  	}
1736  
1737  	ret = rtw_pci_setup_resource(rtwdev, pdev);
1738  	if (ret) {
1739  		rtw_err(rtwdev, "failed to setup pci resources\n");
1740  		goto err_pci_declaim;
1741  	}
1742  
1743  	rtw_pci_napi_init(rtwdev);
1744  
1745  	ret = rtw_chip_info_setup(rtwdev);
1746  	if (ret) {
1747  		rtw_err(rtwdev, "failed to setup chip information\n");
1748  		goto err_destroy_pci;
1749  	}
1750  
1751  	/* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */
1752  	if (rtwdev->chip->id == RTW_CHIP_TYPE_8821C && bridge->vendor == PCI_VENDOR_ID_INTEL)
1753  		rtwpci->rx_no_aspm = true;
1754  
1755  	rtw_pci_phy_cfg(rtwdev);
1756  
1757  	ret = rtw_register_hw(rtwdev, hw);
1758  	if (ret) {
1759  		rtw_err(rtwdev, "failed to register hw\n");
1760  		goto err_destroy_pci;
1761  	}
1762  
1763  	ret = rtw_pci_request_irq(rtwdev, pdev);
1764  	if (ret) {
1765  		ieee80211_unregister_hw(hw);
1766  		goto err_destroy_pci;
1767  	}
1768  
1769  	return 0;
1770  
1771  err_destroy_pci:
1772  	rtw_pci_napi_deinit(rtwdev);
1773  	rtw_pci_destroy(rtwdev, pdev);
1774  
1775  err_pci_declaim:
1776  	rtw_pci_declaim(rtwdev, pdev);
1777  
1778  err_deinit_core:
1779  	rtw_core_deinit(rtwdev);
1780  
1781  err_release_hw:
1782  	ieee80211_free_hw(hw);
1783  
1784  	return ret;
1785  }
1786  EXPORT_SYMBOL(rtw_pci_probe);
1787  
rtw_pci_remove(struct pci_dev * pdev)1788  void rtw_pci_remove(struct pci_dev *pdev)
1789  {
1790  	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1791  	struct rtw_dev *rtwdev;
1792  	struct rtw_pci *rtwpci;
1793  
1794  	if (!hw)
1795  		return;
1796  
1797  	rtwdev = hw->priv;
1798  	rtwpci = (struct rtw_pci *)rtwdev->priv;
1799  
1800  	rtw_unregister_hw(rtwdev, hw);
1801  	rtw_pci_disable_interrupt(rtwdev, rtwpci);
1802  	rtw_pci_napi_deinit(rtwdev);
1803  	rtw_pci_destroy(rtwdev, pdev);
1804  	rtw_pci_declaim(rtwdev, pdev);
1805  	rtw_pci_free_irq(rtwdev, pdev);
1806  	rtw_core_deinit(rtwdev);
1807  	ieee80211_free_hw(hw);
1808  }
1809  EXPORT_SYMBOL(rtw_pci_remove);
1810  
rtw_pci_shutdown(struct pci_dev * pdev)1811  void rtw_pci_shutdown(struct pci_dev *pdev)
1812  {
1813  	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1814  	struct rtw_dev *rtwdev;
1815  	const struct rtw_chip_info *chip;
1816  
1817  	if (!hw)
1818  		return;
1819  
1820  	rtwdev = hw->priv;
1821  	chip = rtwdev->chip;
1822  
1823  	if (chip->ops->shutdown)
1824  		chip->ops->shutdown(rtwdev);
1825  
1826  	pci_set_power_state(pdev, PCI_D3hot);
1827  }
1828  EXPORT_SYMBOL(rtw_pci_shutdown);
1829  
1830  MODULE_AUTHOR("Realtek Corporation");
1831  MODULE_DESCRIPTION("Realtek PCI 802.11ac wireless driver");
1832  MODULE_LICENSE("Dual BSD/GPL");
1833