xref: /openbmc/linux/drivers/net/wireless/realtek/rtw89/pci.c (revision ba2929159000dc7015cc01cdf7bb72542e19952a)
1  // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2  /* Copyright(c) 2020  Realtek Corporation
3   */
4  
5  #include <linux/pci.h>
6  
7  #include "mac.h"
8  #include "pci.h"
9  #include "reg.h"
10  #include "ser.h"
11  
12  static bool rtw89_pci_disable_clkreq;
13  static bool rtw89_pci_disable_aspm_l1;
14  static bool rtw89_pci_disable_l1ss;
15  module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644);
16  module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644);
17  module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644);
18  MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support");
19  MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support");
20  MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support");
21  
rtw89_pci_rst_bdram_pcie(struct rtw89_dev * rtwdev)22  static int rtw89_pci_rst_bdram_pcie(struct rtw89_dev *rtwdev)
23  {
24  	u32 val;
25  	int ret;
26  
27  	rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1,
28  		      rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | B_AX_RST_BDRAM);
29  
30  	ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM),
31  				       1, RTW89_PCI_POLL_BDRAM_RST_CNT, false,
32  				       rtwdev, R_AX_PCIE_INIT_CFG1);
33  
34  	if (ret)
35  		return -EBUSY;
36  
37  	return 0;
38  }
39  
rtw89_pci_dma_recalc(struct rtw89_dev * rtwdev,struct rtw89_pci_dma_ring * bd_ring,u32 cur_idx,bool tx)40  static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev,
41  				struct rtw89_pci_dma_ring *bd_ring,
42  				u32 cur_idx, bool tx)
43  {
44  	u32 cnt, cur_rp, wp, rp, len;
45  
46  	rp = bd_ring->rp;
47  	wp = bd_ring->wp;
48  	len = bd_ring->len;
49  
50  	cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
51  	if (tx)
52  		cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp);
53  	else
54  		cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp);
55  
56  	bd_ring->rp = cur_rp;
57  
58  	return cnt;
59  }
60  
rtw89_pci_txbd_recalc(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)61  static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev,
62  				 struct rtw89_pci_tx_ring *tx_ring)
63  {
64  	struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
65  	u32 addr_idx = bd_ring->addr.idx;
66  	u32 cnt, idx;
67  
68  	idx = rtw89_read32(rtwdev, addr_idx);
69  	cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true);
70  
71  	return cnt;
72  }
73  
rtw89_pci_release_fwcmd(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,u32 cnt,bool release_all)74  static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev,
75  				    struct rtw89_pci *rtwpci,
76  				    u32 cnt, bool release_all)
77  {
78  	struct rtw89_pci_tx_data *tx_data;
79  	struct sk_buff *skb;
80  	u32 qlen;
81  
82  	while (cnt--) {
83  		skb = skb_dequeue(&rtwpci->h2c_queue);
84  		if (!skb) {
85  			rtw89_err(rtwdev, "failed to pre-release fwcmd\n");
86  			return;
87  		}
88  		skb_queue_tail(&rtwpci->h2c_release_queue, skb);
89  	}
90  
91  	qlen = skb_queue_len(&rtwpci->h2c_release_queue);
92  	if (!release_all)
93  	       qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0;
94  
95  	while (qlen--) {
96  		skb = skb_dequeue(&rtwpci->h2c_release_queue);
97  		if (!skb) {
98  			rtw89_err(rtwdev, "failed to release fwcmd\n");
99  			return;
100  		}
101  		tx_data = RTW89_PCI_TX_SKB_CB(skb);
102  		dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
103  				 DMA_TO_DEVICE);
104  		dev_kfree_skb_any(skb);
105  	}
106  }
107  
rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)108  static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev,
109  				       struct rtw89_pci *rtwpci)
110  {
111  	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
112  	u32 cnt;
113  
114  	cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
115  	if (!cnt)
116  		return;
117  	rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false);
118  }
119  
rtw89_pci_rxbd_recalc(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring)120  static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev,
121  				 struct rtw89_pci_rx_ring *rx_ring)
122  {
123  	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
124  	u32 addr_idx = bd_ring->addr.idx;
125  	u32 cnt, idx;
126  
127  	idx = rtw89_read32(rtwdev, addr_idx);
128  	cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false);
129  
130  	return cnt;
131  }
132  
rtw89_pci_sync_skb_for_cpu(struct rtw89_dev * rtwdev,struct sk_buff * skb)133  static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev,
134  				       struct sk_buff *skb)
135  {
136  	struct rtw89_pci_rx_info *rx_info;
137  	dma_addr_t dma;
138  
139  	rx_info = RTW89_PCI_RX_SKB_CB(skb);
140  	dma = rx_info->dma;
141  	dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
142  				DMA_FROM_DEVICE);
143  }
144  
rtw89_pci_sync_skb_for_device(struct rtw89_dev * rtwdev,struct sk_buff * skb)145  static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev,
146  					  struct sk_buff *skb)
147  {
148  	struct rtw89_pci_rx_info *rx_info;
149  	dma_addr_t dma;
150  
151  	rx_info = RTW89_PCI_RX_SKB_CB(skb);
152  	dma = rx_info->dma;
153  	dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
154  				   DMA_FROM_DEVICE);
155  }
156  
rtw89_pci_rxbd_info_update(struct rtw89_dev * rtwdev,struct sk_buff * skb)157  static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
158  				      struct sk_buff *skb)
159  {
160  	struct rtw89_pci_rxbd_info *rxbd_info;
161  	struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
162  
163  	rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data;
164  	rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS);
165  	rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS);
166  	rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE);
167  	rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG);
168  
169  	return 0;
170  }
171  
rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev * rtwdev,bool enable)172  static void rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev *rtwdev, bool enable)
173  {
174  	const struct rtw89_pci_info *info = rtwdev->pci_info;
175  	const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
176  	const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2;
177  
178  	if (enable) {
179  		rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask);
180  		if (dma_stop2->addr)
181  			rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask);
182  	} else {
183  		rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask);
184  		if (dma_stop2->addr)
185  			rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask);
186  	}
187  }
188  
rtw89_pci_ctrl_txdma_fw_ch_pcie(struct rtw89_dev * rtwdev,bool enable)189  static void rtw89_pci_ctrl_txdma_fw_ch_pcie(struct rtw89_dev *rtwdev, bool enable)
190  {
191  	const struct rtw89_pci_info *info = rtwdev->pci_info;
192  	const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
193  
194  	if (enable)
195  		rtw89_write32_clr(rtwdev, dma_stop1->addr, B_AX_STOP_CH12);
196  	else
197  		rtw89_write32_set(rtwdev, dma_stop1->addr, B_AX_STOP_CH12);
198  }
199  
200  static bool
rtw89_skb_put_rx_data(struct rtw89_dev * rtwdev,bool fs,bool ls,struct sk_buff * new,const struct sk_buff * skb,u32 offset,const struct rtw89_pci_rx_info * rx_info,const struct rtw89_rx_desc_info * desc_info)201  rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls,
202  		      struct sk_buff *new,
203  		      const struct sk_buff *skb, u32 offset,
204  		      const struct rtw89_pci_rx_info *rx_info,
205  		      const struct rtw89_rx_desc_info *desc_info)
206  {
207  	u32 copy_len = rx_info->len - offset;
208  
209  	if (unlikely(skb_tailroom(new) < copy_len)) {
210  		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
211  			    "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n",
212  			    rx_info->len, desc_info->pkt_size, offset, fs, ls);
213  		rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ",
214  			       skb->data, rx_info->len);
215  		/* length of a single segment skb is desc_info->pkt_size */
216  		if (fs && ls) {
217  			copy_len = desc_info->pkt_size;
218  		} else {
219  			rtw89_info(rtwdev, "drop rx data due to invalid length\n");
220  			return false;
221  		}
222  	}
223  
224  	skb_put_data(new, skb->data + offset, copy_len);
225  
226  	return true;
227  }
228  
rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring)229  static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
230  				       struct rtw89_pci_rx_ring *rx_ring)
231  {
232  	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
233  	struct rtw89_pci_rx_info *rx_info;
234  	struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc;
235  	struct sk_buff *new = rx_ring->diliver_skb;
236  	struct sk_buff *skb;
237  	u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
238  	u32 offset;
239  	u32 cnt = 1;
240  	bool fs, ls;
241  	int ret;
242  
243  	skb = rx_ring->buf[bd_ring->wp];
244  	rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
245  
246  	ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
247  	if (ret) {
248  		rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
249  			  bd_ring->wp, ret);
250  		goto err_sync_device;
251  	}
252  
253  	rx_info = RTW89_PCI_RX_SKB_CB(skb);
254  	fs = rx_info->fs;
255  	ls = rx_info->ls;
256  
257  	if (fs) {
258  		if (new) {
259  			rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
260  				    "skb should not be ready before first segment start\n");
261  			goto err_sync_device;
262  		}
263  		if (desc_info->ready) {
264  			rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n");
265  			goto err_sync_device;
266  		}
267  
268  		rtw89_chip_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size);
269  
270  		new = rtw89_alloc_skb_for_rx(rtwdev, desc_info->pkt_size);
271  		if (!new)
272  			goto err_sync_device;
273  
274  		rx_ring->diliver_skb = new;
275  
276  		/* first segment has RX desc */
277  		offset = desc_info->offset + desc_info->rxd_len;
278  	} else {
279  		offset = sizeof(struct rtw89_pci_rxbd_info);
280  		if (!new) {
281  			rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n");
282  			goto err_sync_device;
283  		}
284  	}
285  	if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info))
286  		goto err_sync_device;
287  	rtw89_pci_sync_skb_for_device(rtwdev, skb);
288  	rtw89_pci_rxbd_increase(rx_ring, 1);
289  
290  	if (!desc_info->ready) {
291  		rtw89_warn(rtwdev, "no rx desc information\n");
292  		goto err_free_resource;
293  	}
294  	if (ls) {
295  		rtw89_core_rx(rtwdev, desc_info, new);
296  		rx_ring->diliver_skb = NULL;
297  		desc_info->ready = false;
298  	}
299  
300  	return cnt;
301  
302  err_sync_device:
303  	rtw89_pci_sync_skb_for_device(rtwdev, skb);
304  	rtw89_pci_rxbd_increase(rx_ring, 1);
305  err_free_resource:
306  	if (new)
307  		dev_kfree_skb_any(new);
308  	rx_ring->diliver_skb = NULL;
309  	desc_info->ready = false;
310  
311  	return cnt;
312  }
313  
rtw89_pci_rxbd_deliver(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,u32 cnt)314  static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev,
315  				   struct rtw89_pci_rx_ring *rx_ring,
316  				   u32 cnt)
317  {
318  	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
319  	u32 rx_cnt;
320  
321  	while (cnt && rtwdev->napi_budget_countdown > 0) {
322  		rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring);
323  		if (!rx_cnt) {
324  			rtw89_err(rtwdev, "failed to deliver RXBD skb\n");
325  
326  			/* skip the rest RXBD bufs */
327  			rtw89_pci_rxbd_increase(rx_ring, cnt);
328  			break;
329  		}
330  
331  		cnt -= rx_cnt;
332  	}
333  
334  	rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
335  }
336  
rtw89_pci_poll_rxq_dma(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,int budget)337  static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev,
338  				  struct rtw89_pci *rtwpci, int budget)
339  {
340  	struct rtw89_pci_rx_ring *rx_ring;
341  	int countdown = rtwdev->napi_budget_countdown;
342  	u32 cnt;
343  
344  	rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ];
345  
346  	cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
347  	if (!cnt)
348  		return 0;
349  
350  	cnt = min_t(u32, budget, cnt);
351  
352  	rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt);
353  
354  	/* In case of flushing pending SKBs, the countdown may exceed. */
355  	if (rtwdev->napi_budget_countdown <= 0)
356  		return budget;
357  
358  	return budget - countdown;
359  }
360  
rtw89_pci_tx_status(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct sk_buff * skb,u8 tx_status)361  static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev,
362  				struct rtw89_pci_tx_ring *tx_ring,
363  				struct sk_buff *skb, u8 tx_status)
364  {
365  	struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
366  	struct ieee80211_tx_info *info;
367  
368  	rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE);
369  
370  	info = IEEE80211_SKB_CB(skb);
371  	ieee80211_tx_info_clear_status(info);
372  
373  	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
374  		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
375  	if (tx_status == RTW89_TX_DONE) {
376  		info->flags |= IEEE80211_TX_STAT_ACK;
377  		tx_ring->tx_acked++;
378  	} else {
379  		if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
380  			rtw89_debug(rtwdev, RTW89_DBG_FW,
381  				    "failed to TX of status %x\n", tx_status);
382  		switch (tx_status) {
383  		case RTW89_TX_RETRY_LIMIT:
384  			tx_ring->tx_retry_lmt++;
385  			break;
386  		case RTW89_TX_LIFE_TIME:
387  			tx_ring->tx_life_time++;
388  			break;
389  		case RTW89_TX_MACID_DROP:
390  			tx_ring->tx_mac_id_drop++;
391  			break;
392  		default:
393  			rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status);
394  			break;
395  		}
396  	}
397  
398  	ieee80211_tx_status_ni(rtwdev->hw, skb);
399  }
400  
rtw89_pci_reclaim_txbd(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)401  static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
402  {
403  	struct rtw89_pci_tx_wd *txwd;
404  	u32 cnt;
405  
406  	cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
407  	while (cnt--) {
408  		txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
409  		if (!txwd) {
410  			rtw89_warn(rtwdev, "No busy txwd pages available\n");
411  			break;
412  		}
413  
414  		list_del_init(&txwd->list);
415  
416  		/* this skb has been freed by RPP */
417  		if (skb_queue_len(&txwd->queue) == 0)
418  			rtw89_pci_enqueue_txwd(tx_ring, txwd);
419  	}
420  }
421  
rtw89_pci_release_busy_txwd(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)422  static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev,
423  					struct rtw89_pci_tx_ring *tx_ring)
424  {
425  	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
426  	struct rtw89_pci_tx_wd *txwd;
427  	int i;
428  
429  	for (i = 0; i < wd_ring->page_num; i++) {
430  		txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
431  		if (!txwd)
432  			break;
433  
434  		list_del_init(&txwd->list);
435  	}
436  }
437  
rtw89_pci_release_txwd_skb(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_wd * txwd,u16 seq,u8 tx_status)438  static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev,
439  				       struct rtw89_pci_tx_ring *tx_ring,
440  				       struct rtw89_pci_tx_wd *txwd, u16 seq,
441  				       u8 tx_status)
442  {
443  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
444  	struct rtw89_pci_tx_data *tx_data;
445  	struct sk_buff *skb, *tmp;
446  	u8 txch = tx_ring->txch;
447  
448  	if (!list_empty(&txwd->list)) {
449  		rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
450  		/* In low power mode, RPP can receive before updating of TX BD.
451  		 * In normal mode, it should not happen so give it a warning.
452  		 */
453  		if (!rtwpci->low_power && !list_empty(&txwd->list))
454  			rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n",
455  				   txch, seq);
456  	}
457  
458  	skb_queue_walk_safe(&txwd->queue, skb, tmp) {
459  		skb_unlink(skb, &txwd->queue);
460  
461  		tx_data = RTW89_PCI_TX_SKB_CB(skb);
462  		dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
463  				 DMA_TO_DEVICE);
464  
465  		rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status);
466  	}
467  
468  	if (list_empty(&txwd->list))
469  		rtw89_pci_enqueue_txwd(tx_ring, txwd);
470  }
471  
rtw89_pci_release_rpp(struct rtw89_dev * rtwdev,struct rtw89_pci_rpp_fmt * rpp)472  static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev,
473  				  struct rtw89_pci_rpp_fmt *rpp)
474  {
475  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
476  	struct rtw89_pci_tx_ring *tx_ring;
477  	struct rtw89_pci_tx_wd_ring *wd_ring;
478  	struct rtw89_pci_tx_wd *txwd;
479  	u16 seq;
480  	u8 qsel, tx_status, txch;
481  
482  	seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ);
483  	qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL);
484  	tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS);
485  	txch = rtw89_core_get_ch_dma(rtwdev, qsel);
486  
487  	if (txch == RTW89_TXCH_CH12) {
488  		rtw89_warn(rtwdev, "should no fwcmd release report\n");
489  		return;
490  	}
491  
492  	tx_ring = &rtwpci->tx_rings[txch];
493  	wd_ring = &tx_ring->wd_ring;
494  	txwd = &wd_ring->pages[seq];
495  
496  	rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status);
497  }
498  
rtw89_pci_release_pending_txwd_skb(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)499  static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev,
500  					       struct rtw89_pci_tx_ring *tx_ring)
501  {
502  	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
503  	struct rtw89_pci_tx_wd *txwd;
504  	int i;
505  
506  	for (i = 0; i < wd_ring->page_num; i++) {
507  		txwd = &wd_ring->pages[i];
508  
509  		if (!list_empty(&txwd->list))
510  			continue;
511  
512  		rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP);
513  	}
514  }
515  
rtw89_pci_release_tx_skbs(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,u32 max_cnt)516  static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
517  				     struct rtw89_pci_rx_ring *rx_ring,
518  				     u32 max_cnt)
519  {
520  	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
521  	struct rtw89_pci_rx_info *rx_info;
522  	struct rtw89_pci_rpp_fmt *rpp;
523  	struct rtw89_rx_desc_info desc_info = {};
524  	struct sk_buff *skb;
525  	u32 cnt = 0;
526  	u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt);
527  	u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
528  	u32 offset;
529  	int ret;
530  
531  	skb = rx_ring->buf[bd_ring->wp];
532  	rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
533  
534  	ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
535  	if (ret) {
536  		rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
537  			  bd_ring->wp, ret);
538  		goto err_sync_device;
539  	}
540  
541  	rx_info = RTW89_PCI_RX_SKB_CB(skb);
542  	if (!rx_info->fs || !rx_info->ls) {
543  		rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n");
544  		return cnt;
545  	}
546  
547  	rtw89_chip_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size);
548  
549  	/* first segment has RX desc */
550  	offset = desc_info.offset + desc_info.rxd_len;
551  	for (; offset + rpp_size <= rx_info->len; offset += rpp_size) {
552  		rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset);
553  		rtw89_pci_release_rpp(rtwdev, rpp);
554  	}
555  
556  	rtw89_pci_sync_skb_for_device(rtwdev, skb);
557  	rtw89_pci_rxbd_increase(rx_ring, 1);
558  	cnt++;
559  
560  	return cnt;
561  
562  err_sync_device:
563  	rtw89_pci_sync_skb_for_device(rtwdev, skb);
564  	return 0;
565  }
566  
rtw89_pci_release_tx(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,u32 cnt)567  static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev,
568  				 struct rtw89_pci_rx_ring *rx_ring,
569  				 u32 cnt)
570  {
571  	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
572  	u32 release_cnt;
573  
574  	while (cnt) {
575  		release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt);
576  		if (!release_cnt) {
577  			rtw89_err(rtwdev, "failed to release TX skbs\n");
578  
579  			/* skip the rest RXBD bufs */
580  			rtw89_pci_rxbd_increase(rx_ring, cnt);
581  			break;
582  		}
583  
584  		cnt -= release_cnt;
585  	}
586  
587  	rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
588  }
589  
rtw89_pci_poll_rpq_dma(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,int budget)590  static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev,
591  				  struct rtw89_pci *rtwpci, int budget)
592  {
593  	struct rtw89_pci_rx_ring *rx_ring;
594  	u32 cnt;
595  	int work_done;
596  
597  	rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
598  
599  	spin_lock_bh(&rtwpci->trx_lock);
600  
601  	cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
602  	if (cnt == 0)
603  		goto out_unlock;
604  
605  	rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
606  
607  out_unlock:
608  	spin_unlock_bh(&rtwpci->trx_lock);
609  
610  	/* always release all RPQ */
611  	work_done = min_t(int, cnt, budget);
612  	rtwdev->napi_budget_countdown -= work_done;
613  
614  	return work_done;
615  }
616  
rtw89_pci_isr_rxd_unavail(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)617  static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
618  				      struct rtw89_pci *rtwpci)
619  {
620  	struct rtw89_pci_rx_ring *rx_ring;
621  	struct rtw89_pci_dma_ring *bd_ring;
622  	u32 reg_idx;
623  	u16 hw_idx, hw_idx_next, host_idx;
624  	int i;
625  
626  	for (i = 0; i < RTW89_RXCH_NUM; i++) {
627  		rx_ring = &rtwpci->rx_rings[i];
628  		bd_ring = &rx_ring->bd_ring;
629  
630  		reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
631  		hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx);
632  		host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx);
633  		hw_idx_next = (hw_idx + 1) % bd_ring->len;
634  
635  		if (hw_idx_next == host_idx)
636  			rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i);
637  
638  		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
639  			    "%d RXD unavailable, idx=0x%08x, len=%d\n",
640  			    i, reg_idx, bd_ring->len);
641  	}
642  }
643  
rtw89_pci_recognize_intrs(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,struct rtw89_pci_isrs * isrs)644  void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
645  			       struct rtw89_pci *rtwpci,
646  			       struct rtw89_pci_isrs *isrs)
647  {
648  	isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs;
649  	isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0];
650  	isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1];
651  
652  	rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
653  	rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]);
654  	rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]);
655  }
656  EXPORT_SYMBOL(rtw89_pci_recognize_intrs);
657  
rtw89_pci_recognize_intrs_v1(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,struct rtw89_pci_isrs * isrs)658  void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev,
659  				  struct rtw89_pci *rtwpci,
660  				  struct rtw89_pci_isrs *isrs)
661  {
662  	isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs;
663  	isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ?
664  			      rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0;
665  	isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ?
666  			rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0;
667  	isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ?
668  			rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0;
669  
670  	if (isrs->halt_c2h_isrs)
671  		rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
672  	if (isrs->isrs[0])
673  		rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]);
674  	if (isrs->isrs[1])
675  		rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]);
676  }
677  EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1);
678  
rtw89_pci_clear_isr0(struct rtw89_dev * rtwdev,u32 isr00)679  static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00)
680  {
681  	/* write 1 clear */
682  	rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isr00);
683  }
684  
rtw89_pci_enable_intr(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)685  void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
686  {
687  	rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
688  	rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]);
689  	rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]);
690  }
691  EXPORT_SYMBOL(rtw89_pci_enable_intr);
692  
rtw89_pci_disable_intr(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)693  void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
694  {
695  	rtw89_write32(rtwdev, R_AX_HIMR0, 0);
696  	rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0);
697  	rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0);
698  }
699  EXPORT_SYMBOL(rtw89_pci_disable_intr);
700  
rtw89_pci_enable_intr_v1(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)701  void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
702  {
703  	rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs);
704  	rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
705  	rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]);
706  	rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]);
707  }
708  EXPORT_SYMBOL(rtw89_pci_enable_intr_v1);
709  
rtw89_pci_disable_intr_v1(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)710  void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
711  {
712  	rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0);
713  }
714  EXPORT_SYMBOL(rtw89_pci_disable_intr_v1);
715  
rtw89_pci_ops_recovery_start(struct rtw89_dev * rtwdev)716  static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev)
717  {
718  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
719  	unsigned long flags;
720  
721  	spin_lock_irqsave(&rtwpci->irq_lock, flags);
722  	rtw89_chip_disable_intr(rtwdev, rtwpci);
723  	rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START);
724  	rtw89_chip_enable_intr(rtwdev, rtwpci);
725  	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
726  }
727  
rtw89_pci_ops_recovery_complete(struct rtw89_dev * rtwdev)728  static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev)
729  {
730  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
731  	unsigned long flags;
732  
733  	spin_lock_irqsave(&rtwpci->irq_lock, flags);
734  	rtw89_chip_disable_intr(rtwdev, rtwpci);
735  	rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE);
736  	rtw89_chip_enable_intr(rtwdev, rtwpci);
737  	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
738  }
739  
rtw89_pci_low_power_interrupt_handler(struct rtw89_dev * rtwdev)740  static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev)
741  {
742  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
743  	int budget = NAPI_POLL_WEIGHT;
744  
745  	/* To prevent RXQ get stuck due to run out of budget. */
746  	rtwdev->napi_budget_countdown = budget;
747  
748  	rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget);
749  	rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget);
750  }
751  
rtw89_pci_interrupt_threadfn(int irq,void * dev)752  static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
753  {
754  	struct rtw89_dev *rtwdev = dev;
755  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
756  	struct rtw89_pci_isrs isrs;
757  	unsigned long flags;
758  
759  	spin_lock_irqsave(&rtwpci->irq_lock, flags);
760  	rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs);
761  	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
762  
763  	if (unlikely(isrs.isrs[0] & B_AX_RDU_INT))
764  		rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci);
765  
766  	if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN))
767  		rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev));
768  
769  	if (unlikely(isrs.halt_c2h_isrs & B_AX_WDT_TIMEOUT_INT_EN))
770  		rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT);
771  
772  	if (unlikely(rtwpci->under_recovery))
773  		goto enable_intr;
774  
775  	if (unlikely(rtwpci->low_power)) {
776  		rtw89_pci_low_power_interrupt_handler(rtwdev);
777  		goto enable_intr;
778  	}
779  
780  	if (likely(rtwpci->running)) {
781  		local_bh_disable();
782  		napi_schedule(&rtwdev->napi);
783  		local_bh_enable();
784  	}
785  
786  	return IRQ_HANDLED;
787  
788  enable_intr:
789  	spin_lock_irqsave(&rtwpci->irq_lock, flags);
790  	if (likely(rtwpci->running))
791  		rtw89_chip_enable_intr(rtwdev, rtwpci);
792  	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
793  	return IRQ_HANDLED;
794  }
795  
rtw89_pci_interrupt_handler(int irq,void * dev)796  static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev)
797  {
798  	struct rtw89_dev *rtwdev = dev;
799  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
800  	unsigned long flags;
801  	irqreturn_t irqret = IRQ_WAKE_THREAD;
802  
803  	spin_lock_irqsave(&rtwpci->irq_lock, flags);
804  
805  	/* If interrupt event is on the road, it is still trigger interrupt
806  	 * even we have done pci_stop() to turn off IMR.
807  	 */
808  	if (unlikely(!rtwpci->running)) {
809  		irqret = IRQ_HANDLED;
810  		goto exit;
811  	}
812  
813  	rtw89_chip_disable_intr(rtwdev, rtwpci);
814  exit:
815  	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
816  
817  	return irqret;
818  }
819  
820  #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \
821  	[RTW89_TXCH_##txch] = { \
822  		.num = R_AX_##txch##_TXBD_NUM ##v, \
823  		.idx = R_AX_##txch##_TXBD_IDX ##v, \
824  		.bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
825  		.desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
826  		.desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
827  	}
828  
829  #define DEF_TXCHADDRS(info, txch, v...) \
830  	[RTW89_TXCH_##txch] = { \
831  		.num = R_AX_##txch##_TXBD_NUM, \
832  		.idx = R_AX_##txch##_TXBD_IDX, \
833  		.bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
834  		.desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
835  		.desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
836  	}
837  
838  #define DEF_RXCHADDRS(info, rxch, v...) \
839  	[RTW89_RXCH_##rxch] = { \
840  		.num = R_AX_##rxch##_RXBD_NUM ##v, \
841  		.idx = R_AX_##rxch##_RXBD_IDX ##v, \
842  		.desa_l = R_AX_##rxch##_RXBD_DESA_L ##v, \
843  		.desa_h = R_AX_##rxch##_RXBD_DESA_H ##v, \
844  	}
845  
846  const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = {
847  	.tx = {
848  		DEF_TXCHADDRS(info, ACH0),
849  		DEF_TXCHADDRS(info, ACH1),
850  		DEF_TXCHADDRS(info, ACH2),
851  		DEF_TXCHADDRS(info, ACH3),
852  		DEF_TXCHADDRS(info, ACH4),
853  		DEF_TXCHADDRS(info, ACH5),
854  		DEF_TXCHADDRS(info, ACH6),
855  		DEF_TXCHADDRS(info, ACH7),
856  		DEF_TXCHADDRS(info, CH8),
857  		DEF_TXCHADDRS(info, CH9),
858  		DEF_TXCHADDRS_TYPE1(info, CH10),
859  		DEF_TXCHADDRS_TYPE1(info, CH11),
860  		DEF_TXCHADDRS(info, CH12),
861  	},
862  	.rx = {
863  		DEF_RXCHADDRS(info, RXQ),
864  		DEF_RXCHADDRS(info, RPQ),
865  	},
866  };
867  EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set);
868  
869  const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = {
870  	.tx = {
871  		DEF_TXCHADDRS(info, ACH0, _V1),
872  		DEF_TXCHADDRS(info, ACH1, _V1),
873  		DEF_TXCHADDRS(info, ACH2, _V1),
874  		DEF_TXCHADDRS(info, ACH3, _V1),
875  		DEF_TXCHADDRS(info, ACH4, _V1),
876  		DEF_TXCHADDRS(info, ACH5, _V1),
877  		DEF_TXCHADDRS(info, ACH6, _V1),
878  		DEF_TXCHADDRS(info, ACH7, _V1),
879  		DEF_TXCHADDRS(info, CH8, _V1),
880  		DEF_TXCHADDRS(info, CH9, _V1),
881  		DEF_TXCHADDRS_TYPE1(info, CH10, _V1),
882  		DEF_TXCHADDRS_TYPE1(info, CH11, _V1),
883  		DEF_TXCHADDRS(info, CH12, _V1),
884  	},
885  	.rx = {
886  		DEF_RXCHADDRS(info, RXQ, _V1),
887  		DEF_RXCHADDRS(info, RPQ, _V1),
888  	},
889  };
890  EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1);
891  
892  #undef DEF_TXCHADDRS_TYPE1
893  #undef DEF_TXCHADDRS
894  #undef DEF_RXCHADDRS
895  
rtw89_pci_get_txch_addrs(struct rtw89_dev * rtwdev,enum rtw89_tx_channel txch,const struct rtw89_pci_ch_dma_addr ** addr)896  static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev,
897  				    enum rtw89_tx_channel txch,
898  				    const struct rtw89_pci_ch_dma_addr **addr)
899  {
900  	const struct rtw89_pci_info *info = rtwdev->pci_info;
901  
902  	if (txch >= RTW89_TXCH_NUM)
903  		return -EINVAL;
904  
905  	*addr = &info->dma_addr_set->tx[txch];
906  
907  	return 0;
908  }
909  
rtw89_pci_get_rxch_addrs(struct rtw89_dev * rtwdev,enum rtw89_rx_channel rxch,const struct rtw89_pci_ch_dma_addr ** addr)910  static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev,
911  				    enum rtw89_rx_channel rxch,
912  				    const struct rtw89_pci_ch_dma_addr **addr)
913  {
914  	const struct rtw89_pci_info *info = rtwdev->pci_info;
915  
916  	if (rxch >= RTW89_RXCH_NUM)
917  		return -EINVAL;
918  
919  	*addr = &info->dma_addr_set->rx[rxch];
920  
921  	return 0;
922  }
923  
rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring * ring)924  static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring)
925  {
926  	struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring;
927  
928  	/* reserved 1 desc check ring is full or not */
929  	if (bd_ring->rp > bd_ring->wp)
930  		return bd_ring->rp - bd_ring->wp - 1;
931  
932  	return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1;
933  }
934  
935  static
__rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev * rtwdev)936  u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev)
937  {
938  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
939  	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
940  	u32 cnt;
941  
942  	spin_lock_bh(&rtwpci->trx_lock);
943  	rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci);
944  	cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
945  	spin_unlock_bh(&rtwpci->trx_lock);
946  
947  	return cnt;
948  }
949  
950  static
__rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev * rtwdev,u8 txch)951  u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
952  						   u8 txch)
953  {
954  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
955  	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
956  	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
957  	u32 cnt;
958  
959  	spin_lock_bh(&rtwpci->trx_lock);
960  	cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
961  	if (txch != RTW89_TXCH_CH12)
962  		cnt = min(cnt, wd_ring->curr_num);
963  	spin_unlock_bh(&rtwpci->trx_lock);
964  
965  	return cnt;
966  }
967  
__rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev * rtwdev,u8 txch)968  static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
969  						     u8 txch)
970  {
971  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
972  	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
973  	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
974  	const struct rtw89_chip_info *chip = rtwdev->chip;
975  	u32 bd_cnt, wd_cnt, min_cnt = 0;
976  	struct rtw89_pci_rx_ring *rx_ring;
977  	enum rtw89_debug_mask debug_mask;
978  	u32 cnt;
979  
980  	rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
981  
982  	spin_lock_bh(&rtwpci->trx_lock);
983  	bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
984  	wd_cnt = wd_ring->curr_num;
985  
986  	if (wd_cnt == 0 || bd_cnt == 0) {
987  		cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
988  		if (cnt)
989  			rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
990  		else if (wd_cnt == 0)
991  			goto out_unlock;
992  
993  		bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
994  		if (bd_cnt == 0)
995  			rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
996  	}
997  
998  	bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
999  	wd_cnt = wd_ring->curr_num;
1000  	min_cnt = min(bd_cnt, wd_cnt);
1001  	if (min_cnt == 0) {
1002  		/* This message can be frequently shown in low power mode or
1003  		 * high traffic with small FIFO chips, and we have recognized it as normal
1004  		 * behavior, so print with mask RTW89_DBG_TXRX in these situations.
1005  		 */
1006  		if (rtwpci->low_power || chip->small_fifo_size)
1007  			debug_mask = RTW89_DBG_TXRX;
1008  		else
1009  			debug_mask = RTW89_DBG_UNEXP;
1010  
1011  		rtw89_debug(rtwdev, debug_mask,
1012  			    "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n",
1013  			    wd_cnt, bd_cnt);
1014  	}
1015  
1016  out_unlock:
1017  	spin_unlock_bh(&rtwpci->trx_lock);
1018  
1019  	return min_cnt;
1020  }
1021  
rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev * rtwdev,u8 txch)1022  static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
1023  						   u8 txch)
1024  {
1025  	if (rtwdev->hci.paused)
1026  		return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch);
1027  
1028  	if (txch == RTW89_TXCH_CH12)
1029  		return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev);
1030  
1031  	return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch);
1032  }
1033  
__rtw89_pci_tx_kick_off(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)1034  static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
1035  {
1036  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1037  	struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1038  	u32 host_idx, addr;
1039  
1040  	spin_lock_bh(&rtwpci->trx_lock);
1041  
1042  	addr = bd_ring->addr.idx;
1043  	host_idx = bd_ring->wp;
1044  	rtw89_write16(rtwdev, addr, host_idx);
1045  
1046  	spin_unlock_bh(&rtwpci->trx_lock);
1047  }
1048  
rtw89_pci_tx_bd_ring_update(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,int n_txbd)1049  static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring,
1050  					int n_txbd)
1051  {
1052  	struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1053  	u32 host_idx, len;
1054  
1055  	len = bd_ring->len;
1056  	host_idx = bd_ring->wp + n_txbd;
1057  	host_idx = host_idx < len ? host_idx : host_idx - len;
1058  
1059  	bd_ring->wp = host_idx;
1060  }
1061  
rtw89_pci_ops_tx_kick_off(struct rtw89_dev * rtwdev,u8 txch)1062  static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
1063  {
1064  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1065  	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1066  
1067  	if (rtwdev->hci.paused) {
1068  		set_bit(txch, rtwpci->kick_map);
1069  		return;
1070  	}
1071  
1072  	__rtw89_pci_tx_kick_off(rtwdev, tx_ring);
1073  }
1074  
rtw89_pci_tx_kick_off_pending(struct rtw89_dev * rtwdev)1075  static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev)
1076  {
1077  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1078  	struct rtw89_pci_tx_ring *tx_ring;
1079  	int txch;
1080  
1081  	for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
1082  		if (!test_and_clear_bit(txch, rtwpci->kick_map))
1083  			continue;
1084  
1085  		tx_ring = &rtwpci->tx_rings[txch];
1086  		__rtw89_pci_tx_kick_off(rtwdev, tx_ring);
1087  	}
1088  }
1089  
__pci_flush_txch(struct rtw89_dev * rtwdev,u8 txch,bool drop)1090  static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
1091  {
1092  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1093  	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1094  	struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1095  	u32 cur_idx, cur_rp;
1096  	u8 i;
1097  
1098  	/* Because the time taked by the I/O is a bit dynamic, it's hard to
1099  	 * define a reasonable fixed total timeout to use read_poll_timeout*
1100  	 * helper. Instead, we can ensure a reasonable polling times, so we
1101  	 * just use for loop with udelay here.
1102  	 */
1103  	for (i = 0; i < 60; i++) {
1104  		cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
1105  		cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
1106  		if (cur_rp == bd_ring->wp)
1107  			return;
1108  
1109  		udelay(1);
1110  	}
1111  
1112  	if (!drop)
1113  		rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch);
1114  }
1115  
__rtw89_pci_ops_flush_txchs(struct rtw89_dev * rtwdev,u32 txchs,bool drop)1116  static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs,
1117  					bool drop)
1118  {
1119  	const struct rtw89_pci_info *info = rtwdev->pci_info;
1120  	u8 i;
1121  
1122  	for (i = 0; i < RTW89_TXCH_NUM; i++) {
1123  		/* It may be unnecessary to flush FWCMD queue. */
1124  		if (i == RTW89_TXCH_CH12)
1125  			continue;
1126  		if (info->tx_dma_ch_mask & BIT(i))
1127  			continue;
1128  
1129  		if (txchs & BIT(i))
1130  			__pci_flush_txch(rtwdev, i, drop);
1131  	}
1132  }
1133  
rtw89_pci_ops_flush_queues(struct rtw89_dev * rtwdev,u32 queues,bool drop)1134  static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues,
1135  				       bool drop)
1136  {
1137  	__rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop);
1138  }
1139  
rtw89_pci_fill_txaddr_info(struct rtw89_dev * rtwdev,void * txaddr_info_addr,u32 total_len,dma_addr_t dma,u8 * add_info_nr)1140  u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
1141  			       void *txaddr_info_addr, u32 total_len,
1142  			       dma_addr_t dma, u8 *add_info_nr)
1143  {
1144  	struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr;
1145  
1146  	txaddr_info->length = cpu_to_le16(total_len);
1147  	txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS |
1148  					  RTW89_PCI_ADDR_NUM(1));
1149  	txaddr_info->dma = cpu_to_le32(dma);
1150  
1151  	*add_info_nr = 1;
1152  
1153  	return sizeof(*txaddr_info);
1154  }
1155  EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info);
1156  
rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev * rtwdev,void * txaddr_info_addr,u32 total_len,dma_addr_t dma,u8 * add_info_nr)1157  u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
1158  				  void *txaddr_info_addr, u32 total_len,
1159  				  dma_addr_t dma, u8 *add_info_nr)
1160  {
1161  	struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr;
1162  	u32 remain = total_len;
1163  	u32 len;
1164  	u16 length_option;
1165  	int n;
1166  
1167  	for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) {
1168  		len = remain >= TXADDR_INFO_LENTHG_V1_MAX ?
1169  		      TXADDR_INFO_LENTHG_V1_MAX : remain;
1170  		remain -= len;
1171  
1172  		length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) |
1173  				FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) |
1174  				FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0);
1175  		txaddr_info->length_opt = cpu_to_le16(length_option);
1176  		txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma));
1177  		txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma));
1178  
1179  		dma += len;
1180  		txaddr_info++;
1181  	}
1182  
1183  	WARN_ONCE(remain, "length overflow remain=%u total_len=%u",
1184  		  remain, total_len);
1185  
1186  	*add_info_nr = n;
1187  
1188  	return n * sizeof(*txaddr_info);
1189  }
1190  EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1);
1191  
rtw89_pci_txwd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_wd * txwd,struct rtw89_core_tx_request * tx_req)1192  static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
1193  				 struct rtw89_pci_tx_ring *tx_ring,
1194  				 struct rtw89_pci_tx_wd *txwd,
1195  				 struct rtw89_core_tx_request *tx_req)
1196  {
1197  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1198  	const struct rtw89_chip_info *chip = rtwdev->chip;
1199  	struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1200  	struct rtw89_txwd_info *txwd_info;
1201  	struct rtw89_pci_tx_wp_info *txwp_info;
1202  	void *txaddr_info_addr;
1203  	struct pci_dev *pdev = rtwpci->pdev;
1204  	struct sk_buff *skb = tx_req->skb;
1205  	struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
1206  	struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
1207  	bool en_wd_info = desc_info->en_wd_info;
1208  	u32 txwd_len;
1209  	u32 txwp_len;
1210  	u32 txaddr_info_len;
1211  	dma_addr_t dma;
1212  	int ret;
1213  
1214  	dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1215  	if (dma_mapping_error(&pdev->dev, dma)) {
1216  		rtw89_err(rtwdev, "failed to map skb dma data\n");
1217  		ret = -EBUSY;
1218  		goto err;
1219  	}
1220  
1221  	tx_data->dma = dma;
1222  	rcu_assign_pointer(skb_data->wait, NULL);
1223  
1224  	txwp_len = sizeof(*txwp_info);
1225  	txwd_len = chip->txwd_body_size;
1226  	txwd_len += en_wd_info ? sizeof(*txwd_info) : 0;
1227  
1228  	txwp_info = txwd->vaddr + txwd_len;
1229  	txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID);
1230  	txwp_info->seq1 = 0;
1231  	txwp_info->seq2 = 0;
1232  	txwp_info->seq3 = 0;
1233  
1234  	tx_ring->tx_cnt++;
1235  	txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len;
1236  	txaddr_info_len =
1237  		rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len,
1238  					    dma, &desc_info->addr_info_nr);
1239  
1240  	txwd->len = txwd_len + txwp_len + txaddr_info_len;
1241  
1242  	rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr);
1243  
1244  	skb_queue_tail(&txwd->queue, skb);
1245  
1246  	return 0;
1247  
1248  err:
1249  	return ret;
1250  }
1251  
rtw89_pci_fwcmd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_bd_32 * txbd,struct rtw89_core_tx_request * tx_req)1252  static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev,
1253  				  struct rtw89_pci_tx_ring *tx_ring,
1254  				  struct rtw89_pci_tx_bd_32 *txbd,
1255  				  struct rtw89_core_tx_request *tx_req)
1256  {
1257  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1258  	const struct rtw89_chip_info *chip = rtwdev->chip;
1259  	struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1260  	void *txdesc;
1261  	int txdesc_size = chip->h2c_desc_size;
1262  	struct pci_dev *pdev = rtwpci->pdev;
1263  	struct sk_buff *skb = tx_req->skb;
1264  	struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
1265  	dma_addr_t dma;
1266  
1267  	txdesc = skb_push(skb, txdesc_size);
1268  	memset(txdesc, 0, txdesc_size);
1269  	rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc);
1270  
1271  	dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1272  	if (dma_mapping_error(&pdev->dev, dma)) {
1273  		rtw89_err(rtwdev, "failed to map fwcmd dma data\n");
1274  		return -EBUSY;
1275  	}
1276  
1277  	tx_data->dma = dma;
1278  	txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS);
1279  	txbd->length = cpu_to_le16(skb->len);
1280  	txbd->dma = cpu_to_le32(tx_data->dma);
1281  	skb_queue_tail(&rtwpci->h2c_queue, skb);
1282  
1283  	rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
1284  
1285  	return 0;
1286  }
1287  
rtw89_pci_txbd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_bd_32 * txbd,struct rtw89_core_tx_request * tx_req)1288  static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev,
1289  				 struct rtw89_pci_tx_ring *tx_ring,
1290  				 struct rtw89_pci_tx_bd_32 *txbd,
1291  				 struct rtw89_core_tx_request *tx_req)
1292  {
1293  	struct rtw89_pci_tx_wd *txwd;
1294  	int ret;
1295  
1296  	/* FWCMD queue doesn't have wd pages. Instead, it submits the CMD
1297  	 * buffer with WD BODY only. So here we don't need to check the free
1298  	 * pages of the wd ring.
1299  	 */
1300  	if (tx_ring->txch == RTW89_TXCH_CH12)
1301  		return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req);
1302  
1303  	txwd = rtw89_pci_dequeue_txwd(tx_ring);
1304  	if (!txwd) {
1305  		rtw89_err(rtwdev, "no available TXWD\n");
1306  		ret = -ENOSPC;
1307  		goto err;
1308  	}
1309  
1310  	ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req);
1311  	if (ret) {
1312  		rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq);
1313  		goto err_enqueue_wd;
1314  	}
1315  
1316  	list_add_tail(&txwd->list, &tx_ring->busy_pages);
1317  
1318  	txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS);
1319  	txbd->length = cpu_to_le16(txwd->len);
1320  	txbd->dma = cpu_to_le32(txwd->paddr);
1321  
1322  	rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
1323  
1324  	return 0;
1325  
1326  err_enqueue_wd:
1327  	rtw89_pci_enqueue_txwd(tx_ring, txwd);
1328  err:
1329  	return ret;
1330  }
1331  
rtw89_pci_tx_write(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req,u8 txch)1332  static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req,
1333  			      u8 txch)
1334  {
1335  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1336  	struct rtw89_pci_tx_ring *tx_ring;
1337  	struct rtw89_pci_tx_bd_32 *txbd;
1338  	u32 n_avail_txbd;
1339  	int ret = 0;
1340  
1341  	/* check the tx type and dma channel for fw cmd queue */
1342  	if ((txch == RTW89_TXCH_CH12 ||
1343  	     tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) &&
1344  	    (txch != RTW89_TXCH_CH12 ||
1345  	     tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) {
1346  		rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n");
1347  		return -EINVAL;
1348  	}
1349  
1350  	tx_ring = &rtwpci->tx_rings[txch];
1351  	spin_lock_bh(&rtwpci->trx_lock);
1352  
1353  	n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring);
1354  	if (n_avail_txbd == 0) {
1355  		rtw89_err(rtwdev, "no available TXBD\n");
1356  		ret = -ENOSPC;
1357  		goto err_unlock;
1358  	}
1359  
1360  	txbd = rtw89_pci_get_next_txbd(tx_ring);
1361  	ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req);
1362  	if (ret) {
1363  		rtw89_err(rtwdev, "failed to submit TXBD\n");
1364  		goto err_unlock;
1365  	}
1366  
1367  	spin_unlock_bh(&rtwpci->trx_lock);
1368  	return 0;
1369  
1370  err_unlock:
1371  	spin_unlock_bh(&rtwpci->trx_lock);
1372  	return ret;
1373  }
1374  
rtw89_pci_ops_tx_write(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req)1375  static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req)
1376  {
1377  	struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1378  	int ret;
1379  
1380  	ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma);
1381  	if (ret) {
1382  		rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma);
1383  		return ret;
1384  	}
1385  
1386  	return 0;
1387  }
1388  
1389  const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = {
1390  	[RTW89_TXCH_ACH0] = {.start_idx = 0,  .max_num = 5, .min_num = 2},
1391  	[RTW89_TXCH_ACH1] = {.start_idx = 5,  .max_num = 5, .min_num = 2},
1392  	[RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
1393  	[RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
1394  	[RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2},
1395  	[RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2},
1396  	[RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2},
1397  	[RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2},
1398  	[RTW89_TXCH_CH8]  = {.start_idx = 40, .max_num = 5, .min_num = 1},
1399  	[RTW89_TXCH_CH9]  = {.start_idx = 45, .max_num = 5, .min_num = 1},
1400  	[RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1},
1401  	[RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1},
1402  	[RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1},
1403  };
1404  EXPORT_SYMBOL(rtw89_bd_ram_table_dual);
1405  
1406  const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = {
1407  	[RTW89_TXCH_ACH0] = {.start_idx = 0,  .max_num = 5, .min_num = 2},
1408  	[RTW89_TXCH_ACH1] = {.start_idx = 5,  .max_num = 5, .min_num = 2},
1409  	[RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
1410  	[RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
1411  	[RTW89_TXCH_CH8]  = {.start_idx = 20, .max_num = 4, .min_num = 1},
1412  	[RTW89_TXCH_CH9]  = {.start_idx = 24, .max_num = 4, .min_num = 1},
1413  	[RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1},
1414  };
1415  EXPORT_SYMBOL(rtw89_bd_ram_table_single);
1416  
rtw89_pci_reset_trx_rings(struct rtw89_dev * rtwdev)1417  static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
1418  {
1419  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1420  	const struct rtw89_pci_info *info = rtwdev->pci_info;
1421  	const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table;
1422  	struct rtw89_pci_tx_ring *tx_ring;
1423  	struct rtw89_pci_rx_ring *rx_ring;
1424  	struct rtw89_pci_dma_ring *bd_ring;
1425  	const struct rtw89_pci_bd_ram *bd_ram;
1426  	u32 addr_num;
1427  	u32 addr_bdram;
1428  	u32 addr_desa_l;
1429  	u32 val32;
1430  	int i;
1431  
1432  	for (i = 0; i < RTW89_TXCH_NUM; i++) {
1433  		if (info->tx_dma_ch_mask & BIT(i))
1434  			continue;
1435  
1436  		tx_ring = &rtwpci->tx_rings[i];
1437  		bd_ring = &tx_ring->bd_ring;
1438  		bd_ram = &bd_ram_table[i];
1439  		addr_num = bd_ring->addr.num;
1440  		addr_bdram = bd_ring->addr.bdram;
1441  		addr_desa_l = bd_ring->addr.desa_l;
1442  		bd_ring->wp = 0;
1443  		bd_ring->rp = 0;
1444  
1445  		val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) |
1446  			FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) |
1447  			FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num);
1448  
1449  		rtw89_write16(rtwdev, addr_num, bd_ring->len);
1450  		rtw89_write32(rtwdev, addr_bdram, val32);
1451  		rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
1452  	}
1453  
1454  	for (i = 0; i < RTW89_RXCH_NUM; i++) {
1455  		rx_ring = &rtwpci->rx_rings[i];
1456  		bd_ring = &rx_ring->bd_ring;
1457  		addr_num = bd_ring->addr.num;
1458  		addr_desa_l = bd_ring->addr.desa_l;
1459  		bd_ring->wp = 0;
1460  		bd_ring->rp = 0;
1461  		rx_ring->diliver_skb = NULL;
1462  		rx_ring->diliver_desc.ready = false;
1463  
1464  		rtw89_write16(rtwdev, addr_num, bd_ring->len);
1465  		rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
1466  	}
1467  }
1468  
rtw89_pci_release_tx_ring(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)1469  static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev,
1470  				      struct rtw89_pci_tx_ring *tx_ring)
1471  {
1472  	rtw89_pci_release_busy_txwd(rtwdev, tx_ring);
1473  	rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring);
1474  }
1475  
rtw89_pci_ops_reset(struct rtw89_dev * rtwdev)1476  static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
1477  {
1478  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1479  	const struct rtw89_pci_info *info = rtwdev->pci_info;
1480  	int txch;
1481  
1482  	rtw89_pci_reset_trx_rings(rtwdev);
1483  
1484  	spin_lock_bh(&rtwpci->trx_lock);
1485  	for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
1486  		if (info->tx_dma_ch_mask & BIT(txch))
1487  			continue;
1488  		if (txch == RTW89_TXCH_CH12) {
1489  			rtw89_pci_release_fwcmd(rtwdev, rtwpci,
1490  						skb_queue_len(&rtwpci->h2c_queue), true);
1491  			continue;
1492  		}
1493  		rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]);
1494  	}
1495  	spin_unlock_bh(&rtwpci->trx_lock);
1496  }
1497  
rtw89_pci_enable_intr_lock(struct rtw89_dev * rtwdev)1498  static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev)
1499  {
1500  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1501  	unsigned long flags;
1502  
1503  	spin_lock_irqsave(&rtwpci->irq_lock, flags);
1504  	rtwpci->running = true;
1505  	rtw89_chip_enable_intr(rtwdev, rtwpci);
1506  	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
1507  }
1508  
rtw89_pci_disable_intr_lock(struct rtw89_dev * rtwdev)1509  static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev)
1510  {
1511  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1512  	unsigned long flags;
1513  
1514  	spin_lock_irqsave(&rtwpci->irq_lock, flags);
1515  	rtwpci->running = false;
1516  	rtw89_chip_disable_intr(rtwdev, rtwpci);
1517  	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
1518  }
1519  
rtw89_pci_ops_start(struct rtw89_dev * rtwdev)1520  static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev)
1521  {
1522  	rtw89_core_napi_start(rtwdev);
1523  	rtw89_pci_enable_intr_lock(rtwdev);
1524  
1525  	return 0;
1526  }
1527  
rtw89_pci_ops_stop(struct rtw89_dev * rtwdev)1528  static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev)
1529  {
1530  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1531  	struct pci_dev *pdev = rtwpci->pdev;
1532  
1533  	rtw89_pci_disable_intr_lock(rtwdev);
1534  	synchronize_irq(pdev->irq);
1535  	rtw89_core_napi_stop(rtwdev);
1536  }
1537  
rtw89_pci_ops_pause(struct rtw89_dev * rtwdev,bool pause)1538  static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause)
1539  {
1540  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1541  	struct pci_dev *pdev = rtwpci->pdev;
1542  
1543  	if (pause) {
1544  		rtw89_pci_disable_intr_lock(rtwdev);
1545  		synchronize_irq(pdev->irq);
1546  		if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags))
1547  			napi_synchronize(&rtwdev->napi);
1548  	} else {
1549  		rtw89_pci_enable_intr_lock(rtwdev);
1550  		rtw89_pci_tx_kick_off_pending(rtwdev);
1551  	}
1552  }
1553  
1554  static
rtw89_pci_switch_bd_idx_addr(struct rtw89_dev * rtwdev,bool low_power)1555  void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power)
1556  {
1557  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1558  	const struct rtw89_pci_info *info = rtwdev->pci_info;
1559  	const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power;
1560  	const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set;
1561  	struct rtw89_pci_tx_ring *tx_ring;
1562  	struct rtw89_pci_rx_ring *rx_ring;
1563  	int i;
1564  
1565  	if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n"))
1566  		return;
1567  
1568  	for (i = 0; i < RTW89_TXCH_NUM; i++) {
1569  		tx_ring = &rtwpci->tx_rings[i];
1570  		tx_ring->bd_ring.addr.idx = low_power ?
1571  					    bd_idx_addr->tx_bd_addrs[i] :
1572  					    dma_addr_set->tx[i].idx;
1573  	}
1574  
1575  	for (i = 0; i < RTW89_RXCH_NUM; i++) {
1576  		rx_ring = &rtwpci->rx_rings[i];
1577  		rx_ring->bd_ring.addr.idx = low_power ?
1578  					    bd_idx_addr->rx_bd_addrs[i] :
1579  					    dma_addr_set->rx[i].idx;
1580  	}
1581  }
1582  
rtw89_pci_ops_switch_mode(struct rtw89_dev * rtwdev,bool low_power)1583  static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power)
1584  {
1585  	enum rtw89_pci_intr_mask_cfg cfg;
1586  
1587  	WARN(!rtwdev->hci.paused, "HCI isn't paused\n");
1588  
1589  	cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL;
1590  	rtw89_chip_config_intr_mask(rtwdev, cfg);
1591  	rtw89_pci_switch_bd_idx_addr(rtwdev, low_power);
1592  }
1593  
1594  static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data);
1595  
rtw89_pci_ops_read32_cmac(struct rtw89_dev * rtwdev,u32 addr)1596  static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr)
1597  {
1598  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1599  	u32 val = readl(rtwpci->mmap + addr);
1600  	int count;
1601  
1602  	for (count = 0; ; count++) {
1603  		if (val != RTW89_R32_DEAD)
1604  			return val;
1605  		if (count >= MAC_REG_POOL_COUNT) {
1606  			rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val);
1607  			return RTW89_R32_DEAD;
1608  		}
1609  		rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN);
1610  		val = readl(rtwpci->mmap + addr);
1611  	}
1612  
1613  	return val;
1614  }
1615  
rtw89_pci_ops_read8(struct rtw89_dev * rtwdev,u32 addr)1616  static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr)
1617  {
1618  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1619  	u32 addr32, val32, shift;
1620  
1621  	if (!ACCESS_CMAC(addr))
1622  		return readb(rtwpci->mmap + addr);
1623  
1624  	addr32 = addr & ~0x3;
1625  	shift = (addr & 0x3) * 8;
1626  	val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
1627  	return val32 >> shift;
1628  }
1629  
rtw89_pci_ops_read16(struct rtw89_dev * rtwdev,u32 addr)1630  static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr)
1631  {
1632  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1633  	u32 addr32, val32, shift;
1634  
1635  	if (!ACCESS_CMAC(addr))
1636  		return readw(rtwpci->mmap + addr);
1637  
1638  	addr32 = addr & ~0x3;
1639  	shift = (addr & 0x3) * 8;
1640  	val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
1641  	return val32 >> shift;
1642  }
1643  
rtw89_pci_ops_read32(struct rtw89_dev * rtwdev,u32 addr)1644  static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr)
1645  {
1646  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1647  
1648  	if (!ACCESS_CMAC(addr))
1649  		return readl(rtwpci->mmap + addr);
1650  
1651  	return rtw89_pci_ops_read32_cmac(rtwdev, addr);
1652  }
1653  
rtw89_pci_ops_write8(struct rtw89_dev * rtwdev,u32 addr,u8 data)1654  static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data)
1655  {
1656  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1657  
1658  	writeb(data, rtwpci->mmap + addr);
1659  }
1660  
rtw89_pci_ops_write16(struct rtw89_dev * rtwdev,u32 addr,u16 data)1661  static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data)
1662  {
1663  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1664  
1665  	writew(data, rtwpci->mmap + addr);
1666  }
1667  
rtw89_pci_ops_write32(struct rtw89_dev * rtwdev,u32 addr,u32 data)1668  static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data)
1669  {
1670  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1671  
1672  	writel(data, rtwpci->mmap + addr);
1673  }
1674  
rtw89_pci_ctrl_dma_trx(struct rtw89_dev * rtwdev,bool enable)1675  static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable)
1676  {
1677  	const struct rtw89_pci_info *info = rtwdev->pci_info;
1678  
1679  	if (enable)
1680  		rtw89_write32_set(rtwdev, info->init_cfg_reg,
1681  				  info->rxhci_en_bit | info->txhci_en_bit);
1682  	else
1683  		rtw89_write32_clr(rtwdev, info->init_cfg_reg,
1684  				  info->rxhci_en_bit | info->txhci_en_bit);
1685  }
1686  
rtw89_pci_ctrl_dma_io(struct rtw89_dev * rtwdev,bool enable)1687  static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable)
1688  {
1689  	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
1690  	u32 reg, mask;
1691  
1692  	if (chip_id == RTL8852C) {
1693  		reg = R_AX_HAXI_INIT_CFG1;
1694  		mask = B_AX_STOP_AXI_MST;
1695  	} else {
1696  		reg = R_AX_PCIE_DMA_STOP1;
1697  		mask = B_AX_STOP_PCIEIO;
1698  	}
1699  
1700  	if (enable)
1701  		rtw89_write32_clr(rtwdev, reg, mask);
1702  	else
1703  		rtw89_write32_set(rtwdev, reg, mask);
1704  }
1705  
rtw89_pci_ctrl_dma_all(struct rtw89_dev * rtwdev,bool enable)1706  static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
1707  {
1708  	rtw89_pci_ctrl_dma_io(rtwdev, enable);
1709  	rtw89_pci_ctrl_dma_trx(rtwdev, enable);
1710  }
1711  
rtw89_pci_check_mdio(struct rtw89_dev * rtwdev,u8 addr,u8 speed,u16 rw_bit)1712  static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit)
1713  {
1714  	u16 val;
1715  
1716  	rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F);
1717  
1718  	val = rtw89_read16(rtwdev, R_AX_MDIO_CFG);
1719  	switch (speed) {
1720  	case PCIE_PHY_GEN1:
1721  		if (addr < 0x20)
1722  			val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK);
1723  		else
1724  			val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK);
1725  		break;
1726  	case PCIE_PHY_GEN2:
1727  		if (addr < 0x20)
1728  			val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK);
1729  		else
1730  			val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK);
1731  		break;
1732  	default:
1733  		rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed);
1734  		return -EINVAL;
1735  	}
1736  	rtw89_write16(rtwdev, R_AX_MDIO_CFG, val);
1737  	rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit);
1738  
1739  	return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000,
1740  				 false, rtwdev, R_AX_MDIO_CFG);
1741  }
1742  
1743  static int
rtw89_read16_mdio(struct rtw89_dev * rtwdev,u8 addr,u8 speed,u16 * val)1744  rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val)
1745  {
1746  	int ret;
1747  
1748  	ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG);
1749  	if (ret) {
1750  		rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret);
1751  		return ret;
1752  	}
1753  	*val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA);
1754  
1755  	return 0;
1756  }
1757  
1758  static int
rtw89_write16_mdio(struct rtw89_dev * rtwdev,u8 addr,u16 data,u8 speed)1759  rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed)
1760  {
1761  	int ret;
1762  
1763  	rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data);
1764  	ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG);
1765  	if (ret) {
1766  		rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret);
1767  		return ret;
1768  	}
1769  
1770  	return 0;
1771  }
1772  
1773  static int
rtw89_write16_mdio_mask(struct rtw89_dev * rtwdev,u8 addr,u16 mask,u16 data,u8 speed)1774  rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed)
1775  {
1776  	u32 shift;
1777  	int ret;
1778  	u16 val;
1779  
1780  	ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
1781  	if (ret)
1782  		return ret;
1783  
1784  	shift = __ffs(mask);
1785  	val &= ~mask;
1786  	val |= ((data << shift) & mask);
1787  
1788  	ret = rtw89_write16_mdio(rtwdev, addr, val, speed);
1789  	if (ret)
1790  		return ret;
1791  
1792  	return 0;
1793  }
1794  
rtw89_write16_mdio_set(struct rtw89_dev * rtwdev,u8 addr,u16 mask,u8 speed)1795  static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
1796  {
1797  	int ret;
1798  	u16 val;
1799  
1800  	ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
1801  	if (ret)
1802  		return ret;
1803  	ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed);
1804  	if (ret)
1805  		return ret;
1806  
1807  	return 0;
1808  }
1809  
rtw89_write16_mdio_clr(struct rtw89_dev * rtwdev,u8 addr,u16 mask,u8 speed)1810  static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
1811  {
1812  	int ret;
1813  	u16 val;
1814  
1815  	ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
1816  	if (ret)
1817  		return ret;
1818  	ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed);
1819  	if (ret)
1820  		return ret;
1821  
1822  	return 0;
1823  }
1824  
rtw89_pci_write_config_byte(struct rtw89_dev * rtwdev,u16 addr,u8 data)1825  static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
1826  				       u8 data)
1827  {
1828  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1829  	struct pci_dev *pdev = rtwpci->pdev;
1830  
1831  	return pci_write_config_byte(pdev, addr, data);
1832  }
1833  
rtw89_pci_read_config_byte(struct rtw89_dev * rtwdev,u16 addr,u8 * value)1834  static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
1835  				      u8 *value)
1836  {
1837  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1838  	struct pci_dev *pdev = rtwpci->pdev;
1839  
1840  	return pci_read_config_byte(pdev, addr, value);
1841  }
1842  
rtw89_pci_config_byte_set(struct rtw89_dev * rtwdev,u16 addr,u8 bit)1843  static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr,
1844  				     u8 bit)
1845  {
1846  	u8 value;
1847  	int ret;
1848  
1849  	ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
1850  	if (ret)
1851  		return ret;
1852  
1853  	value |= bit;
1854  	ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
1855  
1856  	return ret;
1857  }
1858  
rtw89_pci_config_byte_clr(struct rtw89_dev * rtwdev,u16 addr,u8 bit)1859  static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr,
1860  				     u8 bit)
1861  {
1862  	u8 value;
1863  	int ret;
1864  
1865  	ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
1866  	if (ret)
1867  		return ret;
1868  
1869  	value &= ~bit;
1870  	ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
1871  
1872  	return ret;
1873  }
1874  
1875  static int
__get_target(struct rtw89_dev * rtwdev,u16 * target,enum rtw89_pcie_phy phy_rate)1876  __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate)
1877  {
1878  	u16 val, tar;
1879  	int ret;
1880  
1881  	/* Enable counter */
1882  	ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val);
1883  	if (ret)
1884  		return ret;
1885  	ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
1886  				 phy_rate);
1887  	if (ret)
1888  		return ret;
1889  	ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN,
1890  				 phy_rate);
1891  	if (ret)
1892  		return ret;
1893  
1894  	fsleep(300);
1895  
1896  	ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar);
1897  	if (ret)
1898  		return ret;
1899  	ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
1900  				 phy_rate);
1901  	if (ret)
1902  		return ret;
1903  
1904  	tar = tar & 0x0FFF;
1905  	if (tar == 0 || tar == 0x0FFF) {
1906  		rtw89_err(rtwdev, "[ERR]Get target failed.\n");
1907  		return -EINVAL;
1908  	}
1909  
1910  	*target = tar;
1911  
1912  	return 0;
1913  }
1914  
rtw89_pci_autok_x(struct rtw89_dev * rtwdev)1915  static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev)
1916  {
1917  	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
1918  	int ret;
1919  
1920  	if (chip_id != RTL8852B && chip_id != RTL8851B)
1921  		return 0;
1922  
1923  	ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK,
1924  				      PCIE_AUTOK_4, PCIE_PHY_GEN1);
1925  	return ret;
1926  }
1927  
rtw89_pci_auto_refclk_cal(struct rtw89_dev * rtwdev,bool autook_en)1928  static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
1929  {
1930  	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
1931  	enum rtw89_pcie_phy phy_rate;
1932  	u16 val16, mgn_set, div_set, tar;
1933  	u8 val8, bdr_ori;
1934  	bool l1_flag = false;
1935  	int ret = 0;
1936  
1937  	if (chip_id != RTL8852B && chip_id != RTL8851B)
1938  		return 0;
1939  
1940  	ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
1941  	if (ret) {
1942  		rtw89_err(rtwdev, "[ERR]pci config read %X\n",
1943  			  RTW89_PCIE_PHY_RATE);
1944  		return ret;
1945  	}
1946  
1947  	if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) {
1948  		phy_rate = PCIE_PHY_GEN1;
1949  	} else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) {
1950  		phy_rate = PCIE_PHY_GEN2;
1951  	} else {
1952  		rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8);
1953  		return -EOPNOTSUPP;
1954  	}
1955  	/* Disable L1BD */
1956  	ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori);
1957  	if (ret) {
1958  		rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL);
1959  		return ret;
1960  	}
1961  
1962  	if (bdr_ori & RTW89_PCIE_BIT_L1) {
1963  		ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
1964  						  bdr_ori & ~RTW89_PCIE_BIT_L1);
1965  		if (ret) {
1966  			rtw89_err(rtwdev, "[ERR]pci config write %X\n",
1967  				  RTW89_PCIE_L1_CTRL);
1968  			return ret;
1969  		}
1970  		l1_flag = true;
1971  	}
1972  
1973  	ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
1974  	if (ret) {
1975  		rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
1976  		goto end;
1977  	}
1978  
1979  	if (val16 & B_AX_CALIB_EN) {
1980  		ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1,
1981  					 val16 & ~B_AX_CALIB_EN, phy_rate);
1982  		if (ret) {
1983  			rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
1984  			goto end;
1985  		}
1986  	}
1987  
1988  	if (!autook_en)
1989  		goto end;
1990  	/* Set div */
1991  	ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate);
1992  	if (ret) {
1993  		rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
1994  		goto end;
1995  	}
1996  
1997  	/* Obtain div and margin */
1998  	ret = __get_target(rtwdev, &tar, phy_rate);
1999  	if (ret) {
2000  		rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret);
2001  		goto end;
2002  	}
2003  
2004  	mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar;
2005  
2006  	if (mgn_set >= 128) {
2007  		div_set = 0x0003;
2008  		mgn_set = 0x000F;
2009  	} else if (mgn_set >= 64) {
2010  		div_set = 0x0003;
2011  		mgn_set >>= 3;
2012  	} else if (mgn_set >= 32) {
2013  		div_set = 0x0002;
2014  		mgn_set >>= 2;
2015  	} else if (mgn_set >= 16) {
2016  		div_set = 0x0001;
2017  		mgn_set >>= 1;
2018  	} else if (mgn_set == 0) {
2019  		rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar);
2020  		goto end;
2021  	} else {
2022  		div_set = 0x0000;
2023  	}
2024  
2025  	ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
2026  	if (ret) {
2027  		rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
2028  		goto end;
2029  	}
2030  
2031  	val16 |= u16_encode_bits(div_set, B_AX_DIV);
2032  
2033  	ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate);
2034  	if (ret) {
2035  		rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2036  		goto end;
2037  	}
2038  
2039  	ret = __get_target(rtwdev, &tar, phy_rate);
2040  	if (ret) {
2041  		rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret);
2042  		goto end;
2043  	}
2044  
2045  	rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n",
2046  		    tar, div_set, mgn_set);
2047  	ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1,
2048  				 (tar & 0x0FFF) | (mgn_set << 12), phy_rate);
2049  	if (ret) {
2050  		rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1);
2051  		goto end;
2052  	}
2053  
2054  	/* Enable function */
2055  	ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate);
2056  	if (ret) {
2057  		rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2058  		goto end;
2059  	}
2060  
2061  	/* CLK delay = 0 */
2062  	ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
2063  					  PCIE_CLKDLY_HW_0);
2064  
2065  end:
2066  	/* Set L1BD to ori */
2067  	if (l1_flag) {
2068  		ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
2069  						  bdr_ori);
2070  		if (ret) {
2071  			rtw89_err(rtwdev, "[ERR]pci config write %X\n",
2072  				  RTW89_PCIE_L1_CTRL);
2073  			return ret;
2074  		}
2075  	}
2076  
2077  	return ret;
2078  }
2079  
rtw89_pci_deglitch_setting(struct rtw89_dev * rtwdev)2080  static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev)
2081  {
2082  	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2083  	int ret;
2084  
2085  	if (chip_id == RTL8852A) {
2086  		ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
2087  					     PCIE_PHY_GEN1);
2088  		if (ret)
2089  			return ret;
2090  		ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
2091  					     PCIE_PHY_GEN2);
2092  		if (ret)
2093  			return ret;
2094  	} else if (chip_id == RTL8852C) {
2095  		rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2,
2096  				  B_AX_DEGLITCH);
2097  		rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2,
2098  				  B_AX_DEGLITCH);
2099  	}
2100  
2101  	return 0;
2102  }
2103  
rtw89_pci_rxdma_prefth(struct rtw89_dev * rtwdev)2104  static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev)
2105  {
2106  	if (rtwdev->chip->chip_id != RTL8852A)
2107  		return;
2108  
2109  	rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE);
2110  }
2111  
rtw89_pci_l1off_pwroff(struct rtw89_dev * rtwdev)2112  static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev)
2113  {
2114  	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2115  
2116  	if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B)
2117  		return;
2118  
2119  	rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN);
2120  }
2121  
rtw89_pci_l2_rxen_lat(struct rtw89_dev * rtwdev)2122  static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev)
2123  {
2124  	int ret;
2125  
2126  	if (rtwdev->chip->chip_id != RTL8852A)
2127  		return 0;
2128  
2129  	ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
2130  				     PCIE_PHY_GEN1);
2131  	if (ret)
2132  		return ret;
2133  
2134  	ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
2135  				     PCIE_PHY_GEN2);
2136  	if (ret)
2137  		return ret;
2138  
2139  	return 0;
2140  }
2141  
rtw89_pci_aphy_pwrcut(struct rtw89_dev * rtwdev)2142  static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev)
2143  {
2144  	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2145  
2146  	if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B)
2147  		return;
2148  
2149  	rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN);
2150  }
2151  
rtw89_pci_hci_ldo(struct rtw89_dev * rtwdev)2152  static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev)
2153  {
2154  	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2155  
2156  	if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
2157  		rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
2158  				  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
2159  		rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
2160  				  B_AX_PCIE_DIS_WLSUS_AFT_PDN);
2161  	} else if (rtwdev->chip->chip_id == RTL8852C) {
2162  		rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
2163  				  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
2164  	}
2165  }
2166  
rtw89_pci_dphy_delay(struct rtw89_dev * rtwdev)2167  static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev)
2168  {
2169  	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2170  
2171  	if (chip_id != RTL8852B && chip_id != RTL8851B)
2172  		return 0;
2173  
2174  	return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK,
2175  				       PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1);
2176  }
2177  
rtw89_pci_power_wake(struct rtw89_dev * rtwdev,bool pwr_up)2178  static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up)
2179  {
2180  	if (pwr_up)
2181  		rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
2182  	else
2183  		rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
2184  }
2185  
rtw89_pci_autoload_hang(struct rtw89_dev * rtwdev)2186  static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev)
2187  {
2188  	if (rtwdev->chip->chip_id != RTL8852C)
2189  		return;
2190  
2191  	rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
2192  	rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
2193  }
2194  
rtw89_pci_l12_vmain(struct rtw89_dev * rtwdev)2195  static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev)
2196  {
2197  	if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
2198  		return;
2199  
2200  	rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT);
2201  }
2202  
rtw89_pci_gen2_force_ib(struct rtw89_dev * rtwdev)2203  static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev)
2204  {
2205  	if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
2206  		return;
2207  
2208  	rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2,
2209  			  B_AX_SYSON_DIS_PMCR_AX_WRMSK);
2210  	rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3);
2211  	rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2,
2212  			  B_AX_SYSON_DIS_PMCR_AX_WRMSK);
2213  }
2214  
rtw89_pci_l1_ent_lat(struct rtw89_dev * rtwdev)2215  static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev)
2216  {
2217  	if (rtwdev->chip->chip_id != RTL8852C)
2218  		return;
2219  
2220  	rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1);
2221  }
2222  
rtw89_pci_wd_exit_l1(struct rtw89_dev * rtwdev)2223  static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev)
2224  {
2225  	if (rtwdev->chip->chip_id != RTL8852C)
2226  		return;
2227  
2228  	rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN);
2229  }
2230  
rtw89_pci_set_sic(struct rtw89_dev * rtwdev)2231  static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev)
2232  {
2233  	if (rtwdev->chip->chip_id == RTL8852C)
2234  		return;
2235  
2236  	rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL,
2237  			  B_AX_SIC_EN_FORCE_CLKREQ);
2238  }
2239  
rtw89_pci_set_lbc(struct rtw89_dev * rtwdev)2240  static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev)
2241  {
2242  	const struct rtw89_pci_info *info = rtwdev->pci_info;
2243  	u32 lbc;
2244  
2245  	if (rtwdev->chip->chip_id == RTL8852C)
2246  		return;
2247  
2248  	lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG);
2249  	if (info->lbc_en == MAC_AX_PCIE_ENABLE) {
2250  		lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER);
2251  		lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN;
2252  		rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc);
2253  	} else {
2254  		lbc &= ~B_AX_LBC_EN;
2255  	}
2256  	rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc);
2257  }
2258  
rtw89_pci_set_io_rcy(struct rtw89_dev * rtwdev)2259  static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev)
2260  {
2261  	const struct rtw89_pci_info *info = rtwdev->pci_info;
2262  	u32 val32;
2263  
2264  	if (rtwdev->chip->chip_id != RTL8852C)
2265  		return;
2266  
2267  	if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) {
2268  		val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK,
2269  				   info->io_rcy_tmr);
2270  		rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32);
2271  		rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32);
2272  		rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32);
2273  
2274  		rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
2275  		rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
2276  		rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
2277  	} else {
2278  		rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
2279  		rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
2280  		rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
2281  	}
2282  
2283  	rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1);
2284  }
2285  
rtw89_pci_set_dbg(struct rtw89_dev * rtwdev)2286  static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev)
2287  {
2288  	if (rtwdev->chip->chip_id == RTL8852C)
2289  		return;
2290  
2291  	rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL,
2292  			  B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG);
2293  
2294  	if (rtwdev->chip->chip_id == RTL8852A)
2295  		rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL,
2296  				  B_AX_EN_CHKDSC_NO_RX_STUCK);
2297  }
2298  
rtw89_pci_set_keep_reg(struct rtw89_dev * rtwdev)2299  static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev)
2300  {
2301  	if (rtwdev->chip->chip_id == RTL8852C)
2302  		return;
2303  
2304  	rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
2305  			  B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG);
2306  }
2307  
rtw89_pci_clr_idx_all(struct rtw89_dev * rtwdev)2308  static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev)
2309  {
2310  	const struct rtw89_pci_info *info = rtwdev->pci_info;
2311  	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2312  	u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX |
2313  		  B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX |
2314  		  B_AX_CLR_CH12_IDX;
2315  	u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg;
2316  	u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg;
2317  
2318  	if (chip_id == RTL8852A || chip_id == RTL8852C)
2319  		val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX |
2320  		       B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX;
2321  	/* clear DMA indexes */
2322  	rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val);
2323  	if (chip_id == RTL8852A || chip_id == RTL8852C)
2324  		rtw89_write32_set(rtwdev, txbd_rwptr_clr2,
2325  				  B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX);
2326  	rtw89_write32_set(rtwdev, rxbd_rwptr_clr,
2327  			  B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX);
2328  }
2329  
rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev * rtwdev)2330  static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
2331  {
2332  	const struct rtw89_pci_info *info = rtwdev->pci_info;
2333  	u32 ret, check, dma_busy;
2334  	u32 dma_busy1 = info->dma_busy1.addr;
2335  	u32 dma_busy2 = info->dma_busy2_reg;
2336  
2337  	check = info->dma_busy1.mask;
2338  
2339  	ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2340  				10, 100, false, rtwdev, dma_busy1);
2341  	if (ret)
2342  		return ret;
2343  
2344  	if (!dma_busy2)
2345  		return 0;
2346  
2347  	check = B_AX_CH10_BUSY | B_AX_CH11_BUSY;
2348  
2349  	ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2350  				10, 100, false, rtwdev, dma_busy2);
2351  	if (ret)
2352  		return ret;
2353  
2354  	return 0;
2355  }
2356  
rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev * rtwdev)2357  static int rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev *rtwdev)
2358  {
2359  	const struct rtw89_pci_info *info = rtwdev->pci_info;
2360  	u32 ret, check, dma_busy;
2361  	u32 dma_busy3 = info->dma_busy3_reg;
2362  
2363  	check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY;
2364  
2365  	ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2366  				10, 100, false, rtwdev, dma_busy3);
2367  	if (ret)
2368  		return ret;
2369  
2370  	return 0;
2371  }
2372  
rtw89_pci_poll_dma_all_idle(struct rtw89_dev * rtwdev)2373  static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev)
2374  {
2375  	u32 ret;
2376  
2377  	ret = rtw89_poll_txdma_ch_idle_pcie(rtwdev);
2378  	if (ret) {
2379  		rtw89_err(rtwdev, "txdma ch busy\n");
2380  		return ret;
2381  	}
2382  
2383  	ret = rtw89_poll_rxdma_ch_idle_pcie(rtwdev);
2384  	if (ret) {
2385  		rtw89_err(rtwdev, "rxdma ch busy\n");
2386  		return ret;
2387  	}
2388  
2389  	return 0;
2390  }
2391  
rtw89_pci_mode_op(struct rtw89_dev * rtwdev)2392  static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev)
2393  {
2394  	const struct rtw89_pci_info *info = rtwdev->pci_info;
2395  	enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode;
2396  	enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode;
2397  	enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode;
2398  	enum mac_ax_tag_mode tag_mode = info->tag_mode;
2399  	enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl;
2400  	enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl;
2401  	enum mac_ax_tx_burst tx_burst = info->tx_burst;
2402  	enum mac_ax_rx_burst rx_burst = info->rx_burst;
2403  	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2404  	u8 cv = rtwdev->hal.cv;
2405  	u32 val32;
2406  
2407  	if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
2408  		if (chip_id == RTL8852A && cv == CHIP_CBV)
2409  			rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
2410  	} else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
2411  		if (chip_id == RTL8852A || chip_id == RTL8852B)
2412  			rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
2413  	}
2414  
2415  	if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) {
2416  		if (chip_id == RTL8852A && cv == CHIP_CBV)
2417  			rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
2418  	} else if (rxbd_trunc_mode == MAC_AX_BD_NORM) {
2419  		if (chip_id == RTL8852A || chip_id == RTL8852B)
2420  			rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
2421  	}
2422  
2423  	if (rxbd_mode == MAC_AX_RXBD_PKT) {
2424  		rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
2425  	} else if (rxbd_mode == MAC_AX_RXBD_SEP) {
2426  		rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
2427  
2428  		if (chip_id == RTL8852A || chip_id == RTL8852B)
2429  			rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2,
2430  					   B_AX_PCIE_RX_APPLEN_MASK, 0);
2431  	}
2432  
2433  	if (chip_id == RTL8852A || chip_id == RTL8852B) {
2434  		rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst);
2435  		rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst);
2436  	} else if (chip_id == RTL8852C) {
2437  		rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst);
2438  		rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst);
2439  	}
2440  
2441  	if (chip_id == RTL8852A || chip_id == RTL8852B) {
2442  		if (tag_mode == MAC_AX_TAG_SGL) {
2443  			val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) &
2444  					    ~B_AX_LATENCY_CONTROL;
2445  			rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
2446  		} else if (tag_mode == MAC_AX_TAG_MULTI) {
2447  			val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) |
2448  					    B_AX_LATENCY_CONTROL;
2449  			rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
2450  		}
2451  	}
2452  
2453  	rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask,
2454  			   info->multi_tag_num);
2455  
2456  	if (chip_id == RTL8852A || chip_id == RTL8852B) {
2457  		rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE,
2458  				   wd_dma_idle_intvl);
2459  		rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT,
2460  				   wd_dma_act_intvl);
2461  	} else if (chip_id == RTL8852C) {
2462  		rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK,
2463  				   wd_dma_idle_intvl);
2464  		rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK,
2465  				   wd_dma_act_intvl);
2466  	}
2467  
2468  	if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
2469  		rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
2470  				  B_AX_HOST_ADDR_INFO_8B_SEL);
2471  		rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
2472  	} else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
2473  		rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
2474  				  B_AX_HOST_ADDR_INFO_8B_SEL);
2475  		rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
2476  	}
2477  
2478  	return 0;
2479  }
2480  
rtw89_pci_ops_deinit(struct rtw89_dev * rtwdev)2481  static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
2482  {
2483  	const struct rtw89_pci_info *info = rtwdev->pci_info;
2484  
2485  	if (rtwdev->chip->chip_id == RTL8852A) {
2486  		/* ltr sw trigger */
2487  		rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE);
2488  	}
2489  	info->ltr_set(rtwdev, false);
2490  	rtw89_pci_ctrl_dma_all(rtwdev, false);
2491  	rtw89_pci_clr_idx_all(rtwdev);
2492  
2493  	return 0;
2494  }
2495  
rtw89_pci_ops_mac_pre_init(struct rtw89_dev * rtwdev)2496  static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
2497  {
2498  	const struct rtw89_pci_info *info = rtwdev->pci_info;
2499  	int ret;
2500  
2501  	rtw89_pci_rxdma_prefth(rtwdev);
2502  	rtw89_pci_l1off_pwroff(rtwdev);
2503  	rtw89_pci_deglitch_setting(rtwdev);
2504  	ret = rtw89_pci_l2_rxen_lat(rtwdev);
2505  	if (ret) {
2506  		rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret);
2507  		return ret;
2508  	}
2509  
2510  	rtw89_pci_aphy_pwrcut(rtwdev);
2511  	rtw89_pci_hci_ldo(rtwdev);
2512  	rtw89_pci_dphy_delay(rtwdev);
2513  
2514  	ret = rtw89_pci_autok_x(rtwdev);
2515  	if (ret) {
2516  		rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret);
2517  		return ret;
2518  	}
2519  
2520  	ret = rtw89_pci_auto_refclk_cal(rtwdev, false);
2521  	if (ret) {
2522  		rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret);
2523  		return ret;
2524  	}
2525  
2526  	rtw89_pci_power_wake(rtwdev, true);
2527  	rtw89_pci_autoload_hang(rtwdev);
2528  	rtw89_pci_l12_vmain(rtwdev);
2529  	rtw89_pci_gen2_force_ib(rtwdev);
2530  	rtw89_pci_l1_ent_lat(rtwdev);
2531  	rtw89_pci_wd_exit_l1(rtwdev);
2532  	rtw89_pci_set_sic(rtwdev);
2533  	rtw89_pci_set_lbc(rtwdev);
2534  	rtw89_pci_set_io_rcy(rtwdev);
2535  	rtw89_pci_set_dbg(rtwdev);
2536  	rtw89_pci_set_keep_reg(rtwdev);
2537  
2538  	rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA);
2539  
2540  	/* stop DMA activities */
2541  	rtw89_pci_ctrl_dma_all(rtwdev, false);
2542  
2543  	ret = rtw89_pci_poll_dma_all_idle(rtwdev);
2544  	if (ret) {
2545  		rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n");
2546  		return ret;
2547  	}
2548  
2549  	rtw89_pci_clr_idx_all(rtwdev);
2550  	rtw89_pci_mode_op(rtwdev);
2551  
2552  	/* fill TRX BD indexes */
2553  	rtw89_pci_ops_reset(rtwdev);
2554  
2555  	ret = rtw89_pci_rst_bdram_pcie(rtwdev);
2556  	if (ret) {
2557  		rtw89_warn(rtwdev, "reset bdram busy\n");
2558  		return ret;
2559  	}
2560  
2561  	/* disable all channels except to FW CMD channel to download firmware */
2562  	rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, false);
2563  	rtw89_pci_ctrl_txdma_fw_ch_pcie(rtwdev, true);
2564  
2565  	/* start DMA activities */
2566  	rtw89_pci_ctrl_dma_all(rtwdev, true);
2567  
2568  	return 0;
2569  }
2570  
rtw89_pci_ltr_set(struct rtw89_dev * rtwdev,bool en)2571  int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en)
2572  {
2573  	u32 val;
2574  
2575  	if (!en)
2576  		return 0;
2577  
2578  	val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
2579  	if (rtw89_pci_ltr_is_err_reg_val(val))
2580  		return -EINVAL;
2581  	val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
2582  	if (rtw89_pci_ltr_is_err_reg_val(val))
2583  		return -EINVAL;
2584  	val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY);
2585  	if (rtw89_pci_ltr_is_err_reg_val(val))
2586  		return -EINVAL;
2587  	val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY);
2588  	if (rtw89_pci_ltr_is_err_reg_val(val))
2589  		return -EINVAL;
2590  
2591  	rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN |
2592  						   B_AX_LTR_WD_NOEMP_CHK);
2593  	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK,
2594  			   PCI_LTR_SPC_500US);
2595  	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
2596  			   PCI_LTR_IDLE_TIMER_3_2MS);
2597  	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
2598  	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
2599  	rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003);
2600  	rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b);
2601  
2602  	return 0;
2603  }
2604  EXPORT_SYMBOL(rtw89_pci_ltr_set);
2605  
rtw89_pci_ltr_set_v1(struct rtw89_dev * rtwdev,bool en)2606  int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en)
2607  {
2608  	u32 dec_ctrl;
2609  	u32 val32;
2610  
2611  	val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
2612  	if (rtw89_pci_ltr_is_err_reg_val(val32))
2613  		return -EINVAL;
2614  	val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
2615  	if (rtw89_pci_ltr_is_err_reg_val(val32))
2616  		return -EINVAL;
2617  	dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL);
2618  	if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl))
2619  		return -EINVAL;
2620  	val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3);
2621  	if (rtw89_pci_ltr_is_err_reg_val(val32))
2622  		return -EINVAL;
2623  	val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0);
2624  	if (rtw89_pci_ltr_is_err_reg_val(val32))
2625  		return -EINVAL;
2626  
2627  	if (!en) {
2628  		dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN);
2629  		dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) |
2630  			    B_AX_LTR_REQ_DRV;
2631  	} else {
2632  		dec_ctrl |= B_AX_LTR_HW_DEC_EN;
2633  	}
2634  
2635  	dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK;
2636  	dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US);
2637  
2638  	if (en)
2639  		rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0,
2640  				  B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN);
2641  	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
2642  			   PCI_LTR_IDLE_TIMER_3_2MS);
2643  	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
2644  	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
2645  	rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl);
2646  	rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003);
2647  	rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b);
2648  
2649  	return 0;
2650  }
2651  EXPORT_SYMBOL(rtw89_pci_ltr_set_v1);
2652  
rtw89_pci_ops_mac_post_init(struct rtw89_dev * rtwdev)2653  static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev)
2654  {
2655  	const struct rtw89_pci_info *info = rtwdev->pci_info;
2656  	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2657  	int ret;
2658  
2659  	ret = info->ltr_set(rtwdev, true);
2660  	if (ret) {
2661  		rtw89_err(rtwdev, "pci ltr set fail\n");
2662  		return ret;
2663  	}
2664  	if (chip_id == RTL8852A) {
2665  		/* ltr sw trigger */
2666  		rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT);
2667  	}
2668  	if (chip_id == RTL8852A || chip_id == RTL8852B) {
2669  		/* ADDR info 8-byte mode */
2670  		rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
2671  				  B_AX_HOST_ADDR_INFO_8B_SEL);
2672  		rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
2673  	}
2674  
2675  	/* enable DMA for all queues */
2676  	rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, true);
2677  
2678  	/* Release PCI IO */
2679  	rtw89_write32_clr(rtwdev, info->dma_stop1.addr,
2680  			  B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO);
2681  
2682  	return 0;
2683  }
2684  
rtw89_pci_claim_device(struct rtw89_dev * rtwdev,struct pci_dev * pdev)2685  static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev,
2686  				  struct pci_dev *pdev)
2687  {
2688  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2689  	int ret;
2690  
2691  	ret = pci_enable_device(pdev);
2692  	if (ret) {
2693  		rtw89_err(rtwdev, "failed to enable pci device\n");
2694  		return ret;
2695  	}
2696  
2697  	pci_set_master(pdev);
2698  	pci_set_drvdata(pdev, rtwdev->hw);
2699  
2700  	rtwpci->pdev = pdev;
2701  
2702  	return 0;
2703  }
2704  
rtw89_pci_declaim_device(struct rtw89_dev * rtwdev,struct pci_dev * pdev)2705  static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev,
2706  				     struct pci_dev *pdev)
2707  {
2708  	pci_disable_device(pdev);
2709  }
2710  
rtw89_pci_setup_mapping(struct rtw89_dev * rtwdev,struct pci_dev * pdev)2711  static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
2712  				   struct pci_dev *pdev)
2713  {
2714  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2715  	unsigned long resource_len;
2716  	u8 bar_id = 2;
2717  	int ret;
2718  
2719  	ret = pci_request_regions(pdev, KBUILD_MODNAME);
2720  	if (ret) {
2721  		rtw89_err(rtwdev, "failed to request pci regions\n");
2722  		goto err;
2723  	}
2724  
2725  	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2726  	if (ret) {
2727  		rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n");
2728  		goto err_release_regions;
2729  	}
2730  
2731  	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
2732  	if (ret) {
2733  		rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n");
2734  		goto err_release_regions;
2735  	}
2736  
2737  	resource_len = pci_resource_len(pdev, bar_id);
2738  	rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len);
2739  	if (!rtwpci->mmap) {
2740  		rtw89_err(rtwdev, "failed to map pci io\n");
2741  		ret = -EIO;
2742  		goto err_release_regions;
2743  	}
2744  
2745  	return 0;
2746  
2747  err_release_regions:
2748  	pci_release_regions(pdev);
2749  err:
2750  	return ret;
2751  }
2752  
rtw89_pci_clear_mapping(struct rtw89_dev * rtwdev,struct pci_dev * pdev)2753  static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev,
2754  				    struct pci_dev *pdev)
2755  {
2756  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2757  
2758  	if (rtwpci->mmap) {
2759  		pci_iounmap(pdev, rtwpci->mmap);
2760  		pci_release_regions(pdev);
2761  	}
2762  }
2763  
rtw89_pci_free_tx_wd_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring)2764  static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev,
2765  				      struct pci_dev *pdev,
2766  				      struct rtw89_pci_tx_ring *tx_ring)
2767  {
2768  	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
2769  	u8 *head = wd_ring->head;
2770  	dma_addr_t dma = wd_ring->dma;
2771  	u32 page_size = wd_ring->page_size;
2772  	u32 page_num = wd_ring->page_num;
2773  	u32 ring_sz = page_size * page_num;
2774  
2775  	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
2776  	wd_ring->head = NULL;
2777  }
2778  
rtw89_pci_free_tx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring)2779  static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev,
2780  				   struct pci_dev *pdev,
2781  				   struct rtw89_pci_tx_ring *tx_ring)
2782  {
2783  	int ring_sz;
2784  	u8 *head;
2785  	dma_addr_t dma;
2786  
2787  	head = tx_ring->bd_ring.head;
2788  	dma = tx_ring->bd_ring.dma;
2789  	ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len;
2790  	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
2791  
2792  	tx_ring->bd_ring.head = NULL;
2793  }
2794  
rtw89_pci_free_tx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)2795  static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
2796  				    struct pci_dev *pdev)
2797  {
2798  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2799  	const struct rtw89_pci_info *info = rtwdev->pci_info;
2800  	struct rtw89_pci_tx_ring *tx_ring;
2801  	int i;
2802  
2803  	for (i = 0; i < RTW89_TXCH_NUM; i++) {
2804  		if (info->tx_dma_ch_mask & BIT(i))
2805  			continue;
2806  		tx_ring = &rtwpci->tx_rings[i];
2807  		rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
2808  		rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
2809  	}
2810  }
2811  
rtw89_pci_free_rx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_rx_ring * rx_ring)2812  static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
2813  				   struct pci_dev *pdev,
2814  				   struct rtw89_pci_rx_ring *rx_ring)
2815  {
2816  	struct rtw89_pci_rx_info *rx_info;
2817  	struct sk_buff *skb;
2818  	dma_addr_t dma;
2819  	u32 buf_sz;
2820  	u8 *head;
2821  	int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len;
2822  	int i;
2823  
2824  	buf_sz = rx_ring->buf_sz;
2825  	for (i = 0; i < rx_ring->bd_ring.len; i++) {
2826  		skb = rx_ring->buf[i];
2827  		if (!skb)
2828  			continue;
2829  
2830  		rx_info = RTW89_PCI_RX_SKB_CB(skb);
2831  		dma = rx_info->dma;
2832  		dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
2833  		dev_kfree_skb(skb);
2834  		rx_ring->buf[i] = NULL;
2835  	}
2836  
2837  	head = rx_ring->bd_ring.head;
2838  	dma = rx_ring->bd_ring.dma;
2839  	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
2840  
2841  	rx_ring->bd_ring.head = NULL;
2842  }
2843  
rtw89_pci_free_rx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)2844  static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev,
2845  				    struct pci_dev *pdev)
2846  {
2847  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2848  	struct rtw89_pci_rx_ring *rx_ring;
2849  	int i;
2850  
2851  	for (i = 0; i < RTW89_RXCH_NUM; i++) {
2852  		rx_ring = &rtwpci->rx_rings[i];
2853  		rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
2854  	}
2855  }
2856  
rtw89_pci_free_trx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)2857  static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev,
2858  				     struct pci_dev *pdev)
2859  {
2860  	rtw89_pci_free_rx_rings(rtwdev, pdev);
2861  	rtw89_pci_free_tx_rings(rtwdev, pdev);
2862  }
2863  
rtw89_pci_init_rx_bd(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_rx_ring * rx_ring,struct sk_buff * skb,int buf_sz,u32 idx)2864  static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev,
2865  				struct rtw89_pci_rx_ring *rx_ring,
2866  				struct sk_buff *skb, int buf_sz, u32 idx)
2867  {
2868  	struct rtw89_pci_rx_info *rx_info;
2869  	struct rtw89_pci_rx_bd_32 *rx_bd;
2870  	dma_addr_t dma;
2871  
2872  	if (!skb)
2873  		return -EINVAL;
2874  
2875  	dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
2876  	if (dma_mapping_error(&pdev->dev, dma))
2877  		return -EBUSY;
2878  
2879  	rx_info = RTW89_PCI_RX_SKB_CB(skb);
2880  	rx_bd = RTW89_PCI_RX_BD(rx_ring, idx);
2881  
2882  	memset(rx_bd, 0, sizeof(*rx_bd));
2883  	rx_bd->buf_size = cpu_to_le16(buf_sz);
2884  	rx_bd->dma = cpu_to_le32(dma);
2885  	rx_info->dma = dma;
2886  
2887  	return 0;
2888  }
2889  
rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring,enum rtw89_tx_channel txch)2890  static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev,
2891  				      struct pci_dev *pdev,
2892  				      struct rtw89_pci_tx_ring *tx_ring,
2893  				      enum rtw89_tx_channel txch)
2894  {
2895  	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
2896  	struct rtw89_pci_tx_wd *txwd;
2897  	dma_addr_t dma;
2898  	dma_addr_t cur_paddr;
2899  	u8 *head;
2900  	u8 *cur_vaddr;
2901  	u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE;
2902  	u32 page_num = RTW89_PCI_TXWD_NUM_MAX;
2903  	u32 ring_sz = page_size * page_num;
2904  	u32 page_offset;
2905  	int i;
2906  
2907  	/* FWCMD queue doesn't use txwd as pages */
2908  	if (txch == RTW89_TXCH_CH12)
2909  		return 0;
2910  
2911  	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
2912  	if (!head)
2913  		return -ENOMEM;
2914  
2915  	INIT_LIST_HEAD(&wd_ring->free_pages);
2916  	wd_ring->head = head;
2917  	wd_ring->dma = dma;
2918  	wd_ring->page_size = page_size;
2919  	wd_ring->page_num = page_num;
2920  
2921  	page_offset = 0;
2922  	for (i = 0; i < page_num; i++) {
2923  		txwd = &wd_ring->pages[i];
2924  		cur_paddr = dma + page_offset;
2925  		cur_vaddr = head + page_offset;
2926  
2927  		skb_queue_head_init(&txwd->queue);
2928  		INIT_LIST_HEAD(&txwd->list);
2929  		txwd->paddr = cur_paddr;
2930  		txwd->vaddr = cur_vaddr;
2931  		txwd->len = page_size;
2932  		txwd->seq = i;
2933  		rtw89_pci_enqueue_txwd(tx_ring, txwd);
2934  
2935  		page_offset += page_size;
2936  	}
2937  
2938  	return 0;
2939  }
2940  
rtw89_pci_alloc_tx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring,u32 desc_size,u32 len,enum rtw89_tx_channel txch)2941  static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
2942  				   struct pci_dev *pdev,
2943  				   struct rtw89_pci_tx_ring *tx_ring,
2944  				   u32 desc_size, u32 len,
2945  				   enum rtw89_tx_channel txch)
2946  {
2947  	const struct rtw89_pci_ch_dma_addr *txch_addr;
2948  	int ring_sz = desc_size * len;
2949  	u8 *head;
2950  	dma_addr_t dma;
2951  	int ret;
2952  
2953  	ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch);
2954  	if (ret) {
2955  		rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch);
2956  		goto err;
2957  	}
2958  
2959  	ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr);
2960  	if (ret) {
2961  		rtw89_err(rtwdev, "failed to get address of txch %d", txch);
2962  		goto err_free_wd_ring;
2963  	}
2964  
2965  	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
2966  	if (!head) {
2967  		ret = -ENOMEM;
2968  		goto err_free_wd_ring;
2969  	}
2970  
2971  	INIT_LIST_HEAD(&tx_ring->busy_pages);
2972  	tx_ring->bd_ring.head = head;
2973  	tx_ring->bd_ring.dma = dma;
2974  	tx_ring->bd_ring.len = len;
2975  	tx_ring->bd_ring.desc_size = desc_size;
2976  	tx_ring->bd_ring.addr = *txch_addr;
2977  	tx_ring->bd_ring.wp = 0;
2978  	tx_ring->bd_ring.rp = 0;
2979  	tx_ring->txch = txch;
2980  
2981  	return 0;
2982  
2983  err_free_wd_ring:
2984  	rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
2985  err:
2986  	return ret;
2987  }
2988  
rtw89_pci_alloc_tx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)2989  static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
2990  				    struct pci_dev *pdev)
2991  {
2992  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2993  	const struct rtw89_pci_info *info = rtwdev->pci_info;
2994  	struct rtw89_pci_tx_ring *tx_ring;
2995  	u32 desc_size;
2996  	u32 len;
2997  	u32 i, tx_allocated;
2998  	int ret;
2999  
3000  	for (i = 0; i < RTW89_TXCH_NUM; i++) {
3001  		if (info->tx_dma_ch_mask & BIT(i))
3002  			continue;
3003  		tx_ring = &rtwpci->tx_rings[i];
3004  		desc_size = sizeof(struct rtw89_pci_tx_bd_32);
3005  		len = RTW89_PCI_TXBD_NUM_MAX;
3006  		ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring,
3007  					      desc_size, len, i);
3008  		if (ret) {
3009  			rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i);
3010  			goto err_free;
3011  		}
3012  	}
3013  
3014  	return 0;
3015  
3016  err_free:
3017  	tx_allocated = i;
3018  	for (i = 0; i < tx_allocated; i++) {
3019  		tx_ring = &rtwpci->tx_rings[i];
3020  		rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
3021  	}
3022  
3023  	return ret;
3024  }
3025  
rtw89_pci_alloc_rx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_rx_ring * rx_ring,u32 desc_size,u32 len,u32 rxch)3026  static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
3027  				   struct pci_dev *pdev,
3028  				   struct rtw89_pci_rx_ring *rx_ring,
3029  				   u32 desc_size, u32 len, u32 rxch)
3030  {
3031  	const struct rtw89_pci_ch_dma_addr *rxch_addr;
3032  	struct sk_buff *skb;
3033  	u8 *head;
3034  	dma_addr_t dma;
3035  	int ring_sz = desc_size * len;
3036  	int buf_sz = RTW89_PCI_RX_BUF_SIZE;
3037  	int i, allocated;
3038  	int ret;
3039  
3040  	ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr);
3041  	if (ret) {
3042  		rtw89_err(rtwdev, "failed to get address of rxch %d", rxch);
3043  		return ret;
3044  	}
3045  
3046  	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
3047  	if (!head) {
3048  		ret = -ENOMEM;
3049  		goto err;
3050  	}
3051  
3052  	rx_ring->bd_ring.head = head;
3053  	rx_ring->bd_ring.dma = dma;
3054  	rx_ring->bd_ring.len = len;
3055  	rx_ring->bd_ring.desc_size = desc_size;
3056  	rx_ring->bd_ring.addr = *rxch_addr;
3057  	rx_ring->bd_ring.wp = 0;
3058  	rx_ring->bd_ring.rp = 0;
3059  	rx_ring->buf_sz = buf_sz;
3060  	rx_ring->diliver_skb = NULL;
3061  	rx_ring->diliver_desc.ready = false;
3062  
3063  	for (i = 0; i < len; i++) {
3064  		skb = dev_alloc_skb(buf_sz);
3065  		if (!skb) {
3066  			ret = -ENOMEM;
3067  			goto err_free;
3068  		}
3069  
3070  		memset(skb->data, 0, buf_sz);
3071  		rx_ring->buf[i] = skb;
3072  		ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb,
3073  					   buf_sz, i);
3074  		if (ret) {
3075  			rtw89_err(rtwdev, "failed to init rx buf %d\n", i);
3076  			dev_kfree_skb_any(skb);
3077  			rx_ring->buf[i] = NULL;
3078  			goto err_free;
3079  		}
3080  	}
3081  
3082  	return 0;
3083  
3084  err_free:
3085  	allocated = i;
3086  	for (i = 0; i < allocated; i++) {
3087  		skb = rx_ring->buf[i];
3088  		if (!skb)
3089  			continue;
3090  		dma = *((dma_addr_t *)skb->cb);
3091  		dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
3092  		dev_kfree_skb(skb);
3093  		rx_ring->buf[i] = NULL;
3094  	}
3095  
3096  	head = rx_ring->bd_ring.head;
3097  	dma = rx_ring->bd_ring.dma;
3098  	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
3099  
3100  	rx_ring->bd_ring.head = NULL;
3101  err:
3102  	return ret;
3103  }
3104  
rtw89_pci_alloc_rx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3105  static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev,
3106  				    struct pci_dev *pdev)
3107  {
3108  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3109  	struct rtw89_pci_rx_ring *rx_ring;
3110  	u32 desc_size;
3111  	u32 len;
3112  	int i, rx_allocated;
3113  	int ret;
3114  
3115  	for (i = 0; i < RTW89_RXCH_NUM; i++) {
3116  		rx_ring = &rtwpci->rx_rings[i];
3117  		desc_size = sizeof(struct rtw89_pci_rx_bd_32);
3118  		len = RTW89_PCI_RXBD_NUM_MAX;
3119  		ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring,
3120  					      desc_size, len, i);
3121  		if (ret) {
3122  			rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i);
3123  			goto err_free;
3124  		}
3125  	}
3126  
3127  	return 0;
3128  
3129  err_free:
3130  	rx_allocated = i;
3131  	for (i = 0; i < rx_allocated; i++) {
3132  		rx_ring = &rtwpci->rx_rings[i];
3133  		rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
3134  	}
3135  
3136  	return ret;
3137  }
3138  
rtw89_pci_alloc_trx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3139  static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev,
3140  				     struct pci_dev *pdev)
3141  {
3142  	int ret;
3143  
3144  	ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev);
3145  	if (ret) {
3146  		rtw89_err(rtwdev, "failed to alloc dma tx rings\n");
3147  		goto err;
3148  	}
3149  
3150  	ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev);
3151  	if (ret) {
3152  		rtw89_err(rtwdev, "failed to alloc dma rx rings\n");
3153  		goto err_free_tx_rings;
3154  	}
3155  
3156  	return 0;
3157  
3158  err_free_tx_rings:
3159  	rtw89_pci_free_tx_rings(rtwdev, pdev);
3160  err:
3161  	return ret;
3162  }
3163  
rtw89_pci_h2c_init(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)3164  static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev,
3165  			       struct rtw89_pci *rtwpci)
3166  {
3167  	skb_queue_head_init(&rtwpci->h2c_queue);
3168  	skb_queue_head_init(&rtwpci->h2c_release_queue);
3169  }
3170  
rtw89_pci_setup_resource(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3171  static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev,
3172  				    struct pci_dev *pdev)
3173  {
3174  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3175  	int ret;
3176  
3177  	ret = rtw89_pci_setup_mapping(rtwdev, pdev);
3178  	if (ret) {
3179  		rtw89_err(rtwdev, "failed to setup pci mapping\n");
3180  		goto err;
3181  	}
3182  
3183  	ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev);
3184  	if (ret) {
3185  		rtw89_err(rtwdev, "failed to alloc pci trx rings\n");
3186  		goto err_pci_unmap;
3187  	}
3188  
3189  	rtw89_pci_h2c_init(rtwdev, rtwpci);
3190  
3191  	spin_lock_init(&rtwpci->irq_lock);
3192  	spin_lock_init(&rtwpci->trx_lock);
3193  
3194  	return 0;
3195  
3196  err_pci_unmap:
3197  	rtw89_pci_clear_mapping(rtwdev, pdev);
3198  err:
3199  	return ret;
3200  }
3201  
rtw89_pci_clear_resource(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3202  static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev,
3203  				     struct pci_dev *pdev)
3204  {
3205  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3206  
3207  	rtw89_pci_free_trx_rings(rtwdev, pdev);
3208  	rtw89_pci_clear_mapping(rtwdev, pdev);
3209  	rtw89_pci_release_fwcmd(rtwdev, rtwpci,
3210  				skb_queue_len(&rtwpci->h2c_queue), true);
3211  }
3212  
rtw89_pci_config_intr_mask(struct rtw89_dev * rtwdev)3213  void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev)
3214  {
3215  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3216  	const struct rtw89_chip_info *chip = rtwdev->chip;
3217  	u32 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN;
3218  
3219  	if (chip->chip_id == RTL8851B)
3220  		hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN_WKARND;
3221  
3222  	rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0;
3223  
3224  	if (rtwpci->under_recovery) {
3225  		rtwpci->intrs[0] = hs0isr_ind_int_en;
3226  		rtwpci->intrs[1] = 0;
3227  	} else {
3228  		rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
3229  				   B_AX_RXDMA_INT_EN |
3230  				   B_AX_RXP1DMA_INT_EN |
3231  				   B_AX_RPQDMA_INT_EN |
3232  				   B_AX_RXDMA_STUCK_INT_EN |
3233  				   B_AX_RDU_INT_EN |
3234  				   B_AX_RPQBD_FULL_INT_EN |
3235  				   hs0isr_ind_int_en;
3236  
3237  		rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN;
3238  	}
3239  }
3240  EXPORT_SYMBOL(rtw89_pci_config_intr_mask);
3241  
rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev * rtwdev)3242  static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev)
3243  {
3244  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3245  
3246  	rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN;
3247  	rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3248  	rtwpci->intrs[0] = 0;
3249  	rtwpci->intrs[1] = 0;
3250  }
3251  
rtw89_pci_default_intr_mask_v1(struct rtw89_dev * rtwdev)3252  static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev)
3253  {
3254  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3255  
3256  	rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN |
3257  			    B_AX_HS1ISR_IND_INT_EN |
3258  			    B_AX_HS0ISR_IND_INT_EN;
3259  	rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3260  	rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
3261  			   B_AX_RXDMA_INT_EN |
3262  			   B_AX_RXP1DMA_INT_EN |
3263  			   B_AX_RPQDMA_INT_EN |
3264  			   B_AX_RXDMA_STUCK_INT_EN |
3265  			   B_AX_RDU_INT_EN |
3266  			   B_AX_RPQBD_FULL_INT_EN;
3267  	rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
3268  }
3269  
rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev * rtwdev)3270  static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev)
3271  {
3272  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3273  
3274  	rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN |
3275  			    B_AX_HS0ISR_IND_INT_EN;
3276  	rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3277  	rtwpci->intrs[0] = 0;
3278  	rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
3279  }
3280  
rtw89_pci_config_intr_mask_v1(struct rtw89_dev * rtwdev)3281  void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev)
3282  {
3283  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3284  
3285  	if (rtwpci->under_recovery)
3286  		rtw89_pci_recovery_intr_mask_v1(rtwdev);
3287  	else if (rtwpci->low_power)
3288  		rtw89_pci_low_power_intr_mask_v1(rtwdev);
3289  	else
3290  		rtw89_pci_default_intr_mask_v1(rtwdev);
3291  }
3292  EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1);
3293  
rtw89_pci_request_irq(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3294  static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
3295  				 struct pci_dev *pdev)
3296  {
3297  	unsigned long flags = 0;
3298  	int ret;
3299  
3300  	flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI;
3301  	ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
3302  	if (ret < 0) {
3303  		rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret);
3304  		goto err;
3305  	}
3306  
3307  	ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
3308  					rtw89_pci_interrupt_handler,
3309  					rtw89_pci_interrupt_threadfn,
3310  					IRQF_SHARED, KBUILD_MODNAME, rtwdev);
3311  	if (ret) {
3312  		rtw89_err(rtwdev, "failed to request threaded irq\n");
3313  		goto err_free_vector;
3314  	}
3315  
3316  	rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET);
3317  
3318  	return 0;
3319  
3320  err_free_vector:
3321  	pci_free_irq_vectors(pdev);
3322  err:
3323  	return ret;
3324  }
3325  
rtw89_pci_free_irq(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3326  static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev,
3327  			       struct pci_dev *pdev)
3328  {
3329  	devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
3330  	pci_free_irq_vectors(pdev);
3331  }
3332  
gray_code_to_bin(u16 gray_code,u32 bit_num)3333  static u16 gray_code_to_bin(u16 gray_code, u32 bit_num)
3334  {
3335  	u16 bin = 0, gray_bit;
3336  	u32 bit_idx;
3337  
3338  	for (bit_idx = 0; bit_idx < bit_num; bit_idx++) {
3339  		gray_bit = (gray_code >> bit_idx) & 0x1;
3340  		if (bit_num - bit_idx > 1)
3341  			gray_bit ^= (gray_code >> (bit_idx + 1)) & 0x1;
3342  		bin |= (gray_bit << bit_idx);
3343  	}
3344  
3345  	return bin;
3346  }
3347  
rtw89_pci_filter_out(struct rtw89_dev * rtwdev)3348  static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev)
3349  {
3350  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3351  	struct pci_dev *pdev = rtwpci->pdev;
3352  	u16 val16, filter_out_val;
3353  	u32 val, phy_offset;
3354  	int ret;
3355  
3356  	if (rtwdev->chip->chip_id != RTL8852C)
3357  		return 0;
3358  
3359  	val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
3360  	if (val == B_AX_ASPM_CTRL_L1)
3361  		return 0;
3362  
3363  	ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val);
3364  	if (ret)
3365  		return ret;
3366  
3367  	val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val);
3368  	if (val == RTW89_PCIE_GEN1_SPEED) {
3369  		phy_offset = R_RAC_DIRECT_OFFSET_G1;
3370  	} else if (val == RTW89_PCIE_GEN2_SPEED) {
3371  		phy_offset = R_RAC_DIRECT_OFFSET_G2;
3372  		val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT);
3373  		rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT,
3374  				  val16 | B_PCIE_BIT_PINOUT_DIS);
3375  		rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT,
3376  				  val16 & ~B_PCIE_BIT_RD_SEL);
3377  
3378  		val16 = rtw89_read16_mask(rtwdev,
3379  					  phy_offset + RAC_ANA1F * RAC_MULT,
3380  					  FILTER_OUT_EQ_MASK);
3381  		val16 = gray_code_to_bin(val16, hweight16(val16));
3382  		filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 *
3383  					      RAC_MULT);
3384  		filter_out_val &= ~REG_FILTER_OUT_MASK;
3385  		filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16);
3386  
3387  		rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT,
3388  			      filter_out_val);
3389  		rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT,
3390  				  B_BAC_EQ_SEL);
3391  		rtw89_write16_set(rtwdev,
3392  				  R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT,
3393  				  B_PCIE_BIT_PSAVE);
3394  	} else {
3395  		return -EOPNOTSUPP;
3396  	}
3397  	rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT,
3398  			  B_PCIE_BIT_PSAVE);
3399  
3400  	return 0;
3401  }
3402  
rtw89_pci_clkreq_set(struct rtw89_dev * rtwdev,bool enable)3403  static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
3404  {
3405  	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3406  	int ret;
3407  
3408  	if (rtw89_pci_disable_clkreq)
3409  		return;
3410  
3411  	ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
3412  					  PCIE_CLKDLY_HW_30US);
3413  	if (ret)
3414  		rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
3415  
3416  	if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
3417  		if (enable)
3418  			ret = rtw89_pci_config_byte_set(rtwdev,
3419  							RTW89_PCIE_L1_CTRL,
3420  							RTW89_PCIE_BIT_CLK);
3421  		else
3422  			ret = rtw89_pci_config_byte_clr(rtwdev,
3423  							RTW89_PCIE_L1_CTRL,
3424  							RTW89_PCIE_BIT_CLK);
3425  		if (ret)
3426  			rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
3427  				  enable ? "set" : "unset", ret);
3428  	} else if (chip_id == RTL8852C) {
3429  		rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL,
3430  				  B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL);
3431  		if (enable)
3432  			rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL,
3433  					  B_AX_CLK_REQ_N);
3434  		else
3435  			rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL,
3436  					  B_AX_CLK_REQ_N);
3437  	}
3438  }
3439  
rtw89_pci_aspm_set(struct rtw89_dev * rtwdev,bool enable)3440  static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
3441  {
3442  	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3443  	u8 value = 0;
3444  	int ret;
3445  
3446  	if (rtw89_pci_disable_aspm_l1)
3447  		return;
3448  
3449  	ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
3450  	if (ret)
3451  		rtw89_err(rtwdev, "failed to read ASPM Delay\n");
3452  
3453  	value &= ~(RTW89_L1DLY_MASK | RTW89_L0DLY_MASK);
3454  	value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) |
3455  		 FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US);
3456  
3457  	ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
3458  	if (ret)
3459  		rtw89_err(rtwdev, "failed to read ASPM Delay\n");
3460  
3461  	if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
3462  		if (enable)
3463  			ret = rtw89_pci_config_byte_set(rtwdev,
3464  							RTW89_PCIE_L1_CTRL,
3465  							RTW89_PCIE_BIT_L1);
3466  		else
3467  			ret = rtw89_pci_config_byte_clr(rtwdev,
3468  							RTW89_PCIE_L1_CTRL,
3469  							RTW89_PCIE_BIT_L1);
3470  	} else if (chip_id == RTL8852C) {
3471  		if (enable)
3472  			rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
3473  					  B_AX_ASPM_CTRL_L1);
3474  		else
3475  			rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
3476  					  B_AX_ASPM_CTRL_L1);
3477  	}
3478  	if (ret)
3479  		rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d",
3480  			  enable ? "set" : "unset", ret);
3481  }
3482  
rtw89_pci_recalc_int_mit(struct rtw89_dev * rtwdev)3483  static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev)
3484  {
3485  	struct rtw89_traffic_stats *stats = &rtwdev->stats;
3486  	enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv;
3487  	enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv;
3488  	u32 val = 0;
3489  
3490  	if (!rtwdev->scanning &&
3491  	    (tx_tfc_lv >= RTW89_TFC_HIGH || rx_tfc_lv >= RTW89_TFC_HIGH))
3492  		val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL |
3493  		      FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) |
3494  		      FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) |
3495  		      FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64);
3496  
3497  	rtw89_write32(rtwdev, R_AX_INT_MIT_RX, val);
3498  }
3499  
rtw89_pci_link_cfg(struct rtw89_dev * rtwdev)3500  static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev)
3501  {
3502  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3503  	struct pci_dev *pdev = rtwpci->pdev;
3504  	u16 link_ctrl;
3505  	int ret;
3506  
3507  	/* Though there is standard PCIE configuration space to set the
3508  	 * link control register, but by Realtek's design, driver should
3509  	 * check if host supports CLKREQ/ASPM to enable the HW module.
3510  	 *
3511  	 * These functions are implemented by two HW modules associated,
3512  	 * one is responsible to access PCIE configuration space to
3513  	 * follow the host settings, and another is in charge of doing
3514  	 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
3515  	 * the host does not support it, and due to some reasons or wrong
3516  	 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
3517  	 * loss if HW misbehaves on the link.
3518  	 *
3519  	 * Hence it's designed that driver should first check the PCIE
3520  	 * configuration space is sync'ed and enabled, then driver can turn
3521  	 * on the other module that is actually working on the mechanism.
3522  	 */
3523  	ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
3524  	if (ret) {
3525  		rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
3526  		return;
3527  	}
3528  
3529  	if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
3530  		rtw89_pci_clkreq_set(rtwdev, true);
3531  
3532  	if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
3533  		rtw89_pci_aspm_set(rtwdev, true);
3534  }
3535  
rtw89_pci_l1ss_set(struct rtw89_dev * rtwdev,bool enable)3536  static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable)
3537  {
3538  	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3539  	int ret;
3540  
3541  	if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
3542  		if (enable)
3543  			ret = rtw89_pci_config_byte_set(rtwdev,
3544  							RTW89_PCIE_TIMER_CTRL,
3545  							RTW89_PCIE_BIT_L1SUB);
3546  		else
3547  			ret = rtw89_pci_config_byte_clr(rtwdev,
3548  							RTW89_PCIE_TIMER_CTRL,
3549  							RTW89_PCIE_BIT_L1SUB);
3550  		if (ret)
3551  			rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
3552  				  enable ? "set" : "unset", ret);
3553  	} else if (chip_id == RTL8852C) {
3554  		ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1,
3555  						RTW89_PCIE_BIT_ASPM_L11 |
3556  						RTW89_PCIE_BIT_PCI_L11);
3557  		if (ret)
3558  			rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret);
3559  		if (enable)
3560  			rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
3561  					  B_AX_L1SUB_DISABLE);
3562  		else
3563  			rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
3564  					  B_AX_L1SUB_DISABLE);
3565  	}
3566  }
3567  
rtw89_pci_l1ss_cfg(struct rtw89_dev * rtwdev)3568  static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
3569  {
3570  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3571  	struct pci_dev *pdev = rtwpci->pdev;
3572  	u32 l1ss_cap_ptr, l1ss_ctrl;
3573  
3574  	if (rtw89_pci_disable_l1ss)
3575  		return;
3576  
3577  	l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
3578  	if (!l1ss_cap_ptr)
3579  		return;
3580  
3581  	pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl);
3582  
3583  	if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK)
3584  		rtw89_pci_l1ss_set(rtwdev, true);
3585  }
3586  
rtw89_pci_poll_io_idle(struct rtw89_dev * rtwdev)3587  static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev)
3588  {
3589  	int ret = 0;
3590  	u32 sts;
3591  	u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY;
3592  
3593  	ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0,
3594  				       10, 1000, false, rtwdev,
3595  				       R_AX_PCIE_DMA_BUSY1);
3596  	if (ret) {
3597  		rtw89_err(rtwdev, "pci dmach busy1 0x%X\n",
3598  			  rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1));
3599  		return -EINVAL;
3600  	}
3601  	return ret;
3602  }
3603  
rtw89_pci_lv1rst_stop_dma(struct rtw89_dev * rtwdev)3604  static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev)
3605  {
3606  	u32 val;
3607  	int ret;
3608  
3609  	if (rtwdev->chip->chip_id == RTL8852C)
3610  		return 0;
3611  
3612  	rtw89_pci_ctrl_dma_all(rtwdev, false);
3613  	ret = rtw89_pci_poll_io_idle(rtwdev);
3614  	if (ret) {
3615  		val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
3616  		rtw89_debug(rtwdev, RTW89_DBG_HCI,
3617  			    "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n",
3618  			    R_AX_DBG_ERR_FLAG, val);
3619  		if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0)
3620  			rtw89_mac_ctrl_hci_dma_tx(rtwdev, false);
3621  		if (val & B_AX_RX_STUCK)
3622  			rtw89_mac_ctrl_hci_dma_rx(rtwdev, false);
3623  		rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
3624  		ret = rtw89_pci_poll_io_idle(rtwdev);
3625  		val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
3626  		rtw89_debug(rtwdev, RTW89_DBG_HCI,
3627  			    "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n",
3628  			    R_AX_DBG_ERR_FLAG, val);
3629  	}
3630  
3631  	return ret;
3632  }
3633  
3634  
3635  
rtw89_pci_rst_bdram(struct rtw89_dev * rtwdev)3636  static int rtw89_pci_rst_bdram(struct rtw89_dev *rtwdev)
3637  {
3638  	int ret = 0;
3639  	u32 val32, sts;
3640  
3641  	val32 = B_AX_RST_BDRAM;
3642  	rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
3643  
3644  	ret = read_poll_timeout_atomic(rtw89_read32, sts,
3645  				       (sts & B_AX_RST_BDRAM) == 0x0, 1, 100,
3646  				       true, rtwdev, R_AX_PCIE_INIT_CFG1);
3647  	return ret;
3648  }
3649  
rtw89_pci_lv1rst_start_dma(struct rtw89_dev * rtwdev)3650  static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev)
3651  {
3652  	u32 ret;
3653  
3654  	if (rtwdev->chip->chip_id == RTL8852C)
3655  		return 0;
3656  
3657  	rtw89_mac_ctrl_hci_dma_trx(rtwdev, false);
3658  	rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
3659  	rtw89_pci_clr_idx_all(rtwdev);
3660  
3661  	ret = rtw89_pci_rst_bdram(rtwdev);
3662  	if (ret)
3663  		return ret;
3664  
3665  	rtw89_pci_ctrl_dma_all(rtwdev, true);
3666  	return ret;
3667  }
3668  
rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev * rtwdev,enum rtw89_lv1_rcvy_step step)3669  static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev,
3670  					  enum rtw89_lv1_rcvy_step step)
3671  {
3672  	int ret;
3673  
3674  	switch (step) {
3675  	case RTW89_LV1_RCVY_STEP_1:
3676  		ret = rtw89_pci_lv1rst_stop_dma(rtwdev);
3677  		if (ret)
3678  			rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n");
3679  
3680  		break;
3681  
3682  	case RTW89_LV1_RCVY_STEP_2:
3683  		ret = rtw89_pci_lv1rst_start_dma(rtwdev);
3684  		if (ret)
3685  			rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n");
3686  		break;
3687  
3688  	default:
3689  		return -EINVAL;
3690  	}
3691  
3692  	return ret;
3693  }
3694  
rtw89_pci_ops_dump_err_status(struct rtw89_dev * rtwdev)3695  static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev)
3696  {
3697  	rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n",
3698  		   rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX));
3699  	rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
3700  		   rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG));
3701  	rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
3702  		   rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG));
3703  }
3704  
rtw89_pci_napi_poll(struct napi_struct * napi,int budget)3705  static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget)
3706  {
3707  	struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi);
3708  	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3709  	unsigned long flags;
3710  	int work_done;
3711  
3712  	rtwdev->napi_budget_countdown = budget;
3713  
3714  	rtw89_pci_clear_isr0(rtwdev, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT);
3715  	work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
3716  	if (work_done == budget)
3717  		return budget;
3718  
3719  	rtw89_pci_clear_isr0(rtwdev, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | B_AX_RDU_INT);
3720  	work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
3721  	if (work_done < budget && napi_complete_done(napi, work_done)) {
3722  		spin_lock_irqsave(&rtwpci->irq_lock, flags);
3723  		if (likely(rtwpci->running))
3724  			rtw89_chip_enable_intr(rtwdev, rtwpci);
3725  		spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
3726  	}
3727  
3728  	return work_done;
3729  }
3730  
rtw89_pci_suspend(struct device * dev)3731  static int __maybe_unused rtw89_pci_suspend(struct device *dev)
3732  {
3733  	struct ieee80211_hw *hw = dev_get_drvdata(dev);
3734  	struct rtw89_dev *rtwdev = hw->priv;
3735  	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3736  
3737  	rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
3738  	rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
3739  	rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
3740  	if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
3741  		rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
3742  				  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
3743  		rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
3744  				  B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
3745  	} else {
3746  		rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
3747  				  B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
3748  	}
3749  
3750  	return 0;
3751  }
3752  
rtw89_pci_l2_hci_ldo(struct rtw89_dev * rtwdev)3753  static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev)
3754  {
3755  	if (rtwdev->chip->chip_id == RTL8852C)
3756  		return;
3757  
3758  	/* Hardware need write the reg twice to ensure the setting work */
3759  	rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
3760  				    RTW89_PCIE_BIT_CFG_RST_MSTATE);
3761  	rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
3762  				    RTW89_PCIE_BIT_CFG_RST_MSTATE);
3763  }
3764  
rtw89_pci_resume(struct device * dev)3765  static int __maybe_unused rtw89_pci_resume(struct device *dev)
3766  {
3767  	struct ieee80211_hw *hw = dev_get_drvdata(dev);
3768  	struct rtw89_dev *rtwdev = hw->priv;
3769  	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3770  
3771  	rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
3772  	rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
3773  	rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
3774  	if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
3775  		rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
3776  				  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
3777  		rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
3778  				  B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
3779  	} else {
3780  		rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1,
3781  				  B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
3782  		rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
3783  				  B_AX_SEL_REQ_ENTR_L1);
3784  	}
3785  	rtw89_pci_l2_hci_ldo(rtwdev);
3786  	rtw89_pci_filter_out(rtwdev);
3787  	rtw89_pci_link_cfg(rtwdev);
3788  	rtw89_pci_l1ss_cfg(rtwdev);
3789  
3790  	return 0;
3791  }
3792  
3793  SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume);
3794  EXPORT_SYMBOL(rtw89_pm_ops);
3795  
3796  static const struct rtw89_hci_ops rtw89_pci_ops = {
3797  	.tx_write	= rtw89_pci_ops_tx_write,
3798  	.tx_kick_off	= rtw89_pci_ops_tx_kick_off,
3799  	.flush_queues	= rtw89_pci_ops_flush_queues,
3800  	.reset		= rtw89_pci_ops_reset,
3801  	.start		= rtw89_pci_ops_start,
3802  	.stop		= rtw89_pci_ops_stop,
3803  	.pause		= rtw89_pci_ops_pause,
3804  	.switch_mode	= rtw89_pci_ops_switch_mode,
3805  	.recalc_int_mit = rtw89_pci_recalc_int_mit,
3806  
3807  	.read8		= rtw89_pci_ops_read8,
3808  	.read16		= rtw89_pci_ops_read16,
3809  	.read32		= rtw89_pci_ops_read32,
3810  	.write8		= rtw89_pci_ops_write8,
3811  	.write16	= rtw89_pci_ops_write16,
3812  	.write32	= rtw89_pci_ops_write32,
3813  
3814  	.mac_pre_init	= rtw89_pci_ops_mac_pre_init,
3815  	.mac_post_init	= rtw89_pci_ops_mac_post_init,
3816  	.deinit		= rtw89_pci_ops_deinit,
3817  
3818  	.check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource,
3819  	.mac_lv1_rcvy	= rtw89_pci_ops_mac_lv1_recovery,
3820  	.dump_err_status = rtw89_pci_ops_dump_err_status,
3821  	.napi_poll	= rtw89_pci_napi_poll,
3822  
3823  	.recovery_start = rtw89_pci_ops_recovery_start,
3824  	.recovery_complete = rtw89_pci_ops_recovery_complete,
3825  
3826  	.ctrl_txdma_ch	= rtw89_pci_ctrl_txdma_ch_pcie,
3827  	.ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_pcie,
3828  	.ctrl_trxhci	= rtw89_pci_ctrl_dma_trx,
3829  	.poll_txdma_ch	= rtw89_poll_txdma_ch_idle_pcie,
3830  	.clr_idx_all	= rtw89_pci_clr_idx_all,
3831  	.clear		= rtw89_pci_clear_resource,
3832  	.disable_intr	= rtw89_pci_disable_intr_lock,
3833  	.enable_intr	= rtw89_pci_enable_intr_lock,
3834  	.rst_bdram	= rtw89_pci_rst_bdram_pcie,
3835  };
3836  
rtw89_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)3837  int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3838  {
3839  	struct rtw89_dev *rtwdev;
3840  	const struct rtw89_driver_info *info;
3841  	const struct rtw89_pci_info *pci_info;
3842  	int ret;
3843  
3844  	info = (const struct rtw89_driver_info *)id->driver_data;
3845  
3846  	rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev,
3847  					  sizeof(struct rtw89_pci),
3848  					  info->chip);
3849  	if (!rtwdev) {
3850  		dev_err(&pdev->dev, "failed to allocate hw\n");
3851  		return -ENOMEM;
3852  	}
3853  
3854  	pci_info = info->bus.pci;
3855  
3856  	rtwdev->pci_info = info->bus.pci;
3857  	rtwdev->hci.ops = &rtw89_pci_ops;
3858  	rtwdev->hci.type = RTW89_HCI_TYPE_PCIE;
3859  	rtwdev->hci.rpwm_addr = pci_info->rpwm_addr;
3860  	rtwdev->hci.cpwm_addr = pci_info->cpwm_addr;
3861  
3862  	SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
3863  
3864  	ret = rtw89_core_init(rtwdev);
3865  	if (ret) {
3866  		rtw89_err(rtwdev, "failed to initialise core\n");
3867  		goto err_release_hw;
3868  	}
3869  
3870  	ret = rtw89_pci_claim_device(rtwdev, pdev);
3871  	if (ret) {
3872  		rtw89_err(rtwdev, "failed to claim pci device\n");
3873  		goto err_core_deinit;
3874  	}
3875  
3876  	ret = rtw89_pci_setup_resource(rtwdev, pdev);
3877  	if (ret) {
3878  		rtw89_err(rtwdev, "failed to setup pci resource\n");
3879  		goto err_declaim_pci;
3880  	}
3881  
3882  	ret = rtw89_chip_info_setup(rtwdev);
3883  	if (ret) {
3884  		rtw89_err(rtwdev, "failed to setup chip information\n");
3885  		goto err_clear_resource;
3886  	}
3887  
3888  	rtw89_pci_filter_out(rtwdev);
3889  	rtw89_pci_link_cfg(rtwdev);
3890  	rtw89_pci_l1ss_cfg(rtwdev);
3891  
3892  	rtw89_core_napi_init(rtwdev);
3893  
3894  	ret = rtw89_pci_request_irq(rtwdev, pdev);
3895  	if (ret) {
3896  		rtw89_err(rtwdev, "failed to request pci irq\n");
3897  		goto err_deinit_napi;
3898  	}
3899  
3900  	ret = rtw89_core_register(rtwdev);
3901  	if (ret) {
3902  		rtw89_err(rtwdev, "failed to register core\n");
3903  		goto err_free_irq;
3904  	}
3905  
3906  	return 0;
3907  
3908  err_free_irq:
3909  	rtw89_pci_free_irq(rtwdev, pdev);
3910  err_deinit_napi:
3911  	rtw89_core_napi_deinit(rtwdev);
3912  err_clear_resource:
3913  	rtw89_pci_clear_resource(rtwdev, pdev);
3914  err_declaim_pci:
3915  	rtw89_pci_declaim_device(rtwdev, pdev);
3916  err_core_deinit:
3917  	rtw89_core_deinit(rtwdev);
3918  err_release_hw:
3919  	rtw89_free_ieee80211_hw(rtwdev);
3920  
3921  	return ret;
3922  }
3923  EXPORT_SYMBOL(rtw89_pci_probe);
3924  
rtw89_pci_remove(struct pci_dev * pdev)3925  void rtw89_pci_remove(struct pci_dev *pdev)
3926  {
3927  	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
3928  	struct rtw89_dev *rtwdev;
3929  
3930  	rtwdev = hw->priv;
3931  
3932  	rtw89_pci_free_irq(rtwdev, pdev);
3933  	rtw89_core_napi_deinit(rtwdev);
3934  	rtw89_core_unregister(rtwdev);
3935  	rtw89_pci_clear_resource(rtwdev, pdev);
3936  	rtw89_pci_declaim_device(rtwdev, pdev);
3937  	rtw89_core_deinit(rtwdev);
3938  	rtw89_free_ieee80211_hw(rtwdev);
3939  }
3940  EXPORT_SYMBOL(rtw89_pci_remove);
3941  
3942  MODULE_AUTHOR("Realtek Corporation");
3943  MODULE_DESCRIPTION("Realtek PCI 802.11ax wireless driver");
3944  MODULE_LICENSE("Dual BSD/GPL");
3945