1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2020  Realtek Corporation
3  */
4 
5 #include <linux/pci.h>
6 
7 #include "mac.h"
8 #include "pci.h"
9 #include "reg.h"
10 #include "ser.h"
11 
12 static bool rtw89_pci_disable_clkreq;
13 static bool rtw89_pci_disable_aspm_l1;
14 static bool rtw89_pci_disable_l1ss;
15 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644);
16 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644);
17 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644);
18 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support");
19 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support");
20 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support");
21 
22 static int rtw89_pci_rst_bdram_pcie(struct rtw89_dev *rtwdev)
23 {
24 	u32 val;
25 	int ret;
26 
27 	rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1,
28 		      rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | B_AX_RST_BDRAM);
29 
30 	ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM),
31 				       1, RTW89_PCI_POLL_BDRAM_RST_CNT, false,
32 				       rtwdev, R_AX_PCIE_INIT_CFG1);
33 
34 	if (ret)
35 		return -EBUSY;
36 
37 	return 0;
38 }
39 
40 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev,
41 				struct rtw89_pci_dma_ring *bd_ring,
42 				u32 cur_idx, bool tx)
43 {
44 	u32 cnt, cur_rp, wp, rp, len;
45 
46 	rp = bd_ring->rp;
47 	wp = bd_ring->wp;
48 	len = bd_ring->len;
49 
50 	cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
51 	if (tx)
52 		cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp);
53 	else
54 		cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp);
55 
56 	bd_ring->rp = cur_rp;
57 
58 	return cnt;
59 }
60 
61 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev,
62 				 struct rtw89_pci_tx_ring *tx_ring)
63 {
64 	struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
65 	u32 addr_idx = bd_ring->addr.idx;
66 	u32 cnt, idx;
67 
68 	idx = rtw89_read32(rtwdev, addr_idx);
69 	cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true);
70 
71 	return cnt;
72 }
73 
74 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev,
75 				    struct rtw89_pci *rtwpci,
76 				    u32 cnt, bool release_all)
77 {
78 	struct rtw89_pci_tx_data *tx_data;
79 	struct sk_buff *skb;
80 	u32 qlen;
81 
82 	while (cnt--) {
83 		skb = skb_dequeue(&rtwpci->h2c_queue);
84 		if (!skb) {
85 			rtw89_err(rtwdev, "failed to pre-release fwcmd\n");
86 			return;
87 		}
88 		skb_queue_tail(&rtwpci->h2c_release_queue, skb);
89 	}
90 
91 	qlen = skb_queue_len(&rtwpci->h2c_release_queue);
92 	if (!release_all)
93 	       qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0;
94 
95 	while (qlen--) {
96 		skb = skb_dequeue(&rtwpci->h2c_release_queue);
97 		if (!skb) {
98 			rtw89_err(rtwdev, "failed to release fwcmd\n");
99 			return;
100 		}
101 		tx_data = RTW89_PCI_TX_SKB_CB(skb);
102 		dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
103 				 DMA_TO_DEVICE);
104 		dev_kfree_skb_any(skb);
105 	}
106 }
107 
108 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev,
109 				       struct rtw89_pci *rtwpci)
110 {
111 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
112 	u32 cnt;
113 
114 	cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
115 	if (!cnt)
116 		return;
117 	rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false);
118 }
119 
120 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev,
121 				 struct rtw89_pci_rx_ring *rx_ring)
122 {
123 	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
124 	u32 addr_idx = bd_ring->addr.idx;
125 	u32 cnt, idx;
126 
127 	idx = rtw89_read32(rtwdev, addr_idx);
128 	cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false);
129 
130 	return cnt;
131 }
132 
133 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev,
134 				       struct sk_buff *skb)
135 {
136 	struct rtw89_pci_rx_info *rx_info;
137 	dma_addr_t dma;
138 
139 	rx_info = RTW89_PCI_RX_SKB_CB(skb);
140 	dma = rx_info->dma;
141 	dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
142 				DMA_FROM_DEVICE);
143 }
144 
145 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev,
146 					  struct sk_buff *skb)
147 {
148 	struct rtw89_pci_rx_info *rx_info;
149 	dma_addr_t dma;
150 
151 	rx_info = RTW89_PCI_RX_SKB_CB(skb);
152 	dma = rx_info->dma;
153 	dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
154 				   DMA_FROM_DEVICE);
155 }
156 
157 static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
158 				      struct sk_buff *skb)
159 {
160 	struct rtw89_pci_rxbd_info *rxbd_info;
161 	struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
162 
163 	rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data;
164 	rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS);
165 	rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS);
166 	rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE);
167 	rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG);
168 
169 	return 0;
170 }
171 
172 static bool
173 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls,
174 		      struct sk_buff *new,
175 		      const struct sk_buff *skb, u32 offset,
176 		      const struct rtw89_pci_rx_info *rx_info,
177 		      const struct rtw89_rx_desc_info *desc_info)
178 {
179 	u32 copy_len = rx_info->len - offset;
180 
181 	if (unlikely(skb_tailroom(new) < copy_len)) {
182 		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
183 			    "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n",
184 			    rx_info->len, desc_info->pkt_size, offset, fs, ls);
185 		rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ",
186 			       skb->data, rx_info->len);
187 		/* length of a single segment skb is desc_info->pkt_size */
188 		if (fs && ls) {
189 			copy_len = desc_info->pkt_size;
190 		} else {
191 			rtw89_info(rtwdev, "drop rx data due to invalid length\n");
192 			return false;
193 		}
194 	}
195 
196 	skb_put_data(new, skb->data + offset, copy_len);
197 
198 	return true;
199 }
200 
201 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
202 				       struct rtw89_pci_rx_ring *rx_ring)
203 {
204 	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
205 	struct rtw89_pci_rx_info *rx_info;
206 	struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc;
207 	struct sk_buff *new = rx_ring->diliver_skb;
208 	struct sk_buff *skb;
209 	u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
210 	u32 offset;
211 	u32 cnt = 1;
212 	bool fs, ls;
213 	int ret;
214 
215 	skb = rx_ring->buf[bd_ring->wp];
216 	rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
217 
218 	ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
219 	if (ret) {
220 		rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
221 			  bd_ring->wp, ret);
222 		goto err_sync_device;
223 	}
224 
225 	rx_info = RTW89_PCI_RX_SKB_CB(skb);
226 	fs = rx_info->fs;
227 	ls = rx_info->ls;
228 
229 	if (fs) {
230 		if (new) {
231 			rtw89_err(rtwdev, "skb should not be ready before first segment start\n");
232 			goto err_sync_device;
233 		}
234 		if (desc_info->ready) {
235 			rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n");
236 			goto err_sync_device;
237 		}
238 
239 		rtw89_core_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size);
240 
241 		new = dev_alloc_skb(desc_info->pkt_size);
242 		if (!new)
243 			goto err_sync_device;
244 
245 		rx_ring->diliver_skb = new;
246 
247 		/* first segment has RX desc */
248 		offset = desc_info->offset;
249 		offset += desc_info->long_rxdesc ? sizeof(struct rtw89_rxdesc_long) :
250 			  sizeof(struct rtw89_rxdesc_short);
251 	} else {
252 		offset = sizeof(struct rtw89_pci_rxbd_info);
253 		if (!new) {
254 			rtw89_warn(rtwdev, "no last skb\n");
255 			goto err_sync_device;
256 		}
257 	}
258 	if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info))
259 		goto err_sync_device;
260 	rtw89_pci_sync_skb_for_device(rtwdev, skb);
261 	rtw89_pci_rxbd_increase(rx_ring, 1);
262 
263 	if (!desc_info->ready) {
264 		rtw89_warn(rtwdev, "no rx desc information\n");
265 		goto err_free_resource;
266 	}
267 	if (ls) {
268 		rtw89_core_rx(rtwdev, desc_info, new);
269 		rx_ring->diliver_skb = NULL;
270 		desc_info->ready = false;
271 	}
272 
273 	return cnt;
274 
275 err_sync_device:
276 	rtw89_pci_sync_skb_for_device(rtwdev, skb);
277 	rtw89_pci_rxbd_increase(rx_ring, 1);
278 err_free_resource:
279 	if (new)
280 		dev_kfree_skb_any(new);
281 	rx_ring->diliver_skb = NULL;
282 	desc_info->ready = false;
283 
284 	return cnt;
285 }
286 
287 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev,
288 				   struct rtw89_pci_rx_ring *rx_ring,
289 				   u32 cnt)
290 {
291 	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
292 	u32 rx_cnt;
293 
294 	while (cnt && rtwdev->napi_budget_countdown > 0) {
295 		rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring);
296 		if (!rx_cnt) {
297 			rtw89_err(rtwdev, "failed to deliver RXBD skb\n");
298 
299 			/* skip the rest RXBD bufs */
300 			rtw89_pci_rxbd_increase(rx_ring, cnt);
301 			break;
302 		}
303 
304 		cnt -= rx_cnt;
305 	}
306 
307 	rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
308 }
309 
310 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev,
311 				  struct rtw89_pci *rtwpci, int budget)
312 {
313 	struct rtw89_pci_rx_ring *rx_ring;
314 	int countdown = rtwdev->napi_budget_countdown;
315 	u32 cnt;
316 
317 	rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ];
318 
319 	cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
320 	if (!cnt)
321 		return 0;
322 
323 	cnt = min_t(u32, budget, cnt);
324 
325 	rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt);
326 
327 	/* In case of flushing pending SKBs, the countdown may exceed. */
328 	if (rtwdev->napi_budget_countdown <= 0)
329 		return budget;
330 
331 	return budget - countdown;
332 }
333 
334 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev,
335 				struct rtw89_pci_tx_ring *tx_ring,
336 				struct sk_buff *skb, u8 tx_status)
337 {
338 	struct ieee80211_tx_info *info;
339 
340 	info = IEEE80211_SKB_CB(skb);
341 	ieee80211_tx_info_clear_status(info);
342 
343 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
344 		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
345 	if (tx_status == RTW89_TX_DONE) {
346 		info->flags |= IEEE80211_TX_STAT_ACK;
347 		tx_ring->tx_acked++;
348 	} else {
349 		if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
350 			rtw89_debug(rtwdev, RTW89_DBG_FW,
351 				    "failed to TX of status %x\n", tx_status);
352 		switch (tx_status) {
353 		case RTW89_TX_RETRY_LIMIT:
354 			tx_ring->tx_retry_lmt++;
355 			break;
356 		case RTW89_TX_LIFE_TIME:
357 			tx_ring->tx_life_time++;
358 			break;
359 		case RTW89_TX_MACID_DROP:
360 			tx_ring->tx_mac_id_drop++;
361 			break;
362 		default:
363 			rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status);
364 			break;
365 		}
366 	}
367 
368 	ieee80211_tx_status_ni(rtwdev->hw, skb);
369 }
370 
371 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
372 {
373 	struct rtw89_pci_tx_wd *txwd;
374 	u32 cnt;
375 
376 	cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
377 	while (cnt--) {
378 		txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
379 		if (!txwd) {
380 			rtw89_warn(rtwdev, "No busy txwd pages available\n");
381 			break;
382 		}
383 
384 		list_del_init(&txwd->list);
385 	}
386 }
387 
388 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev,
389 					struct rtw89_pci_tx_ring *tx_ring)
390 {
391 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
392 	struct rtw89_pci_tx_wd *txwd;
393 	int i;
394 
395 	for (i = 0; i < wd_ring->page_num; i++) {
396 		txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
397 		if (!txwd)
398 			break;
399 
400 		list_del_init(&txwd->list);
401 	}
402 }
403 
404 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev,
405 				       struct rtw89_pci_tx_ring *tx_ring,
406 				       struct rtw89_pci_tx_wd *txwd, u16 seq,
407 				       u8 tx_status)
408 {
409 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
410 	struct rtw89_pci_tx_data *tx_data;
411 	struct sk_buff *skb, *tmp;
412 	u8 txch = tx_ring->txch;
413 
414 	if (!list_empty(&txwd->list)) {
415 		rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n",
416 			   txch, seq);
417 		return;
418 	}
419 
420 	/* currently, support for only one frame */
421 	if (skb_queue_len(&txwd->queue) != 1) {
422 		rtw89_warn(rtwdev, "empty pending queue %d page %d\n",
423 			   txch, seq);
424 		return;
425 	}
426 
427 	skb_queue_walk_safe(&txwd->queue, skb, tmp) {
428 		skb_unlink(skb, &txwd->queue);
429 
430 		tx_data = RTW89_PCI_TX_SKB_CB(skb);
431 		dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
432 				 DMA_TO_DEVICE);
433 
434 		rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status);
435 	}
436 
437 	rtw89_pci_enqueue_txwd(tx_ring, txwd);
438 }
439 
440 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev,
441 				  struct rtw89_pci_rpp_fmt *rpp)
442 {
443 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
444 	struct rtw89_pci_tx_ring *tx_ring;
445 	struct rtw89_pci_tx_wd_ring *wd_ring;
446 	struct rtw89_pci_tx_wd *txwd;
447 	u16 seq;
448 	u8 qsel, tx_status, txch;
449 
450 	seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ);
451 	qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL);
452 	tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS);
453 	txch = rtw89_core_get_ch_dma(rtwdev, qsel);
454 
455 	if (txch == RTW89_TXCH_CH12) {
456 		rtw89_warn(rtwdev, "should no fwcmd release report\n");
457 		return;
458 	}
459 
460 	tx_ring = &rtwpci->tx_rings[txch];
461 	rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
462 	wd_ring = &tx_ring->wd_ring;
463 	txwd = &wd_ring->pages[seq];
464 
465 	rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status);
466 }
467 
468 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev,
469 					       struct rtw89_pci_tx_ring *tx_ring)
470 {
471 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
472 	struct rtw89_pci_tx_wd *txwd;
473 	int i;
474 
475 	for (i = 0; i < wd_ring->page_num; i++) {
476 		txwd = &wd_ring->pages[i];
477 
478 		if (!list_empty(&txwd->list))
479 			continue;
480 
481 		rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP);
482 	}
483 }
484 
485 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
486 				     struct rtw89_pci_rx_ring *rx_ring,
487 				     u32 max_cnt)
488 {
489 	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
490 	struct rtw89_pci_rx_info *rx_info;
491 	struct rtw89_pci_rpp_fmt *rpp;
492 	struct rtw89_rx_desc_info desc_info = {};
493 	struct sk_buff *skb;
494 	u32 cnt = 0;
495 	u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt);
496 	u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
497 	u32 offset;
498 	int ret;
499 
500 	skb = rx_ring->buf[bd_ring->wp];
501 	rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
502 
503 	ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
504 	if (ret) {
505 		rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
506 			  bd_ring->wp, ret);
507 		goto err_sync_device;
508 	}
509 
510 	rx_info = RTW89_PCI_RX_SKB_CB(skb);
511 	if (!rx_info->fs || !rx_info->ls) {
512 		rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n");
513 		return cnt;
514 	}
515 
516 	rtw89_core_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size);
517 
518 	/* first segment has RX desc */
519 	offset = desc_info.offset;
520 	offset += desc_info.long_rxdesc ? sizeof(struct rtw89_rxdesc_long) :
521 					  sizeof(struct rtw89_rxdesc_short);
522 	for (; offset + rpp_size <= rx_info->len; offset += rpp_size) {
523 		rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset);
524 		rtw89_pci_release_rpp(rtwdev, rpp);
525 	}
526 
527 	rtw89_pci_sync_skb_for_device(rtwdev, skb);
528 	rtw89_pci_rxbd_increase(rx_ring, 1);
529 	cnt++;
530 
531 	return cnt;
532 
533 err_sync_device:
534 	rtw89_pci_sync_skb_for_device(rtwdev, skb);
535 	return 0;
536 }
537 
538 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev,
539 				 struct rtw89_pci_rx_ring *rx_ring,
540 				 u32 cnt)
541 {
542 	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
543 	u32 release_cnt;
544 
545 	while (cnt) {
546 		release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt);
547 		if (!release_cnt) {
548 			rtw89_err(rtwdev, "failed to release TX skbs\n");
549 
550 			/* skip the rest RXBD bufs */
551 			rtw89_pci_rxbd_increase(rx_ring, cnt);
552 			break;
553 		}
554 
555 		cnt -= release_cnt;
556 	}
557 
558 	rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
559 }
560 
561 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev,
562 				  struct rtw89_pci *rtwpci, int budget)
563 {
564 	struct rtw89_pci_rx_ring *rx_ring;
565 	u32 cnt;
566 	int work_done;
567 
568 	rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
569 
570 	spin_lock_bh(&rtwpci->trx_lock);
571 
572 	cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
573 	if (cnt == 0)
574 		goto out_unlock;
575 
576 	rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
577 
578 out_unlock:
579 	spin_unlock_bh(&rtwpci->trx_lock);
580 
581 	/* always release all RPQ */
582 	work_done = min_t(int, cnt, budget);
583 	rtwdev->napi_budget_countdown -= work_done;
584 
585 	return work_done;
586 }
587 
588 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
589 				      struct rtw89_pci *rtwpci)
590 {
591 	struct rtw89_pci_rx_ring *rx_ring;
592 	struct rtw89_pci_dma_ring *bd_ring;
593 	u32 reg_idx;
594 	u16 hw_idx, hw_idx_next, host_idx;
595 	int i;
596 
597 	for (i = 0; i < RTW89_RXCH_NUM; i++) {
598 		rx_ring = &rtwpci->rx_rings[i];
599 		bd_ring = &rx_ring->bd_ring;
600 
601 		reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
602 		hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx);
603 		host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx);
604 		hw_idx_next = (hw_idx + 1) % bd_ring->len;
605 
606 		if (hw_idx_next == host_idx)
607 			rtw89_warn(rtwdev, "%d RXD unavailable\n", i);
608 
609 		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
610 			    "%d RXD unavailable, idx=0x%08x, len=%d\n",
611 			    i, reg_idx, bd_ring->len);
612 	}
613 }
614 
615 static void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
616 				      struct rtw89_pci *rtwpci,
617 				      struct rtw89_pci_isrs *isrs)
618 {
619 	isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs;
620 	isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0];
621 	isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1];
622 
623 	rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
624 	rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]);
625 	rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]);
626 }
627 
628 static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00)
629 {
630 	/* write 1 clear */
631 	rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isr00);
632 }
633 
634 static void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev,
635 				  struct rtw89_pci *rtwpci)
636 {
637 	rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
638 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]);
639 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]);
640 }
641 
642 static void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev,
643 				   struct rtw89_pci *rtwpci)
644 {
645 	rtw89_write32(rtwdev, R_AX_HIMR0, 0);
646 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0);
647 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0);
648 }
649 
650 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
651 {
652 	struct rtw89_dev *rtwdev = dev;
653 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
654 	struct rtw89_pci_isrs isrs;
655 	unsigned long flags;
656 
657 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
658 	rtw89_pci_recognize_intrs(rtwdev, rtwpci, &isrs);
659 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
660 
661 	if (unlikely(isrs.isrs[0] & B_AX_RDU_INT))
662 		rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci);
663 
664 	if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN))
665 		rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev));
666 
667 	if (likely(rtwpci->running)) {
668 		local_bh_disable();
669 		napi_schedule(&rtwdev->napi);
670 		local_bh_enable();
671 	}
672 
673 	return IRQ_HANDLED;
674 }
675 
676 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev)
677 {
678 	struct rtw89_dev *rtwdev = dev;
679 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
680 	unsigned long flags;
681 	irqreturn_t irqret = IRQ_WAKE_THREAD;
682 
683 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
684 
685 	/* If interrupt event is on the road, it is still trigger interrupt
686 	 * even we have done pci_stop() to turn off IMR.
687 	 */
688 	if (unlikely(!rtwpci->running)) {
689 		irqret = IRQ_HANDLED;
690 		goto exit;
691 	}
692 
693 	rtw89_pci_disable_intr(rtwdev, rtwpci);
694 exit:
695 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
696 
697 	return irqret;
698 }
699 
700 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \
701 	[RTW89_TXCH_##txch] = { \
702 		.num = R_AX_##txch##_TXBD_NUM ##v, \
703 		.idx = R_AX_##txch##_TXBD_IDX ##v, \
704 		.bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
705 		.desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
706 		.desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
707 	}
708 
709 #define DEF_TXCHADDRS(info, txch, v...) \
710 	[RTW89_TXCH_##txch] = { \
711 		.num = R_AX_##txch##_TXBD_NUM, \
712 		.idx = R_AX_##txch##_TXBD_IDX, \
713 		.bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
714 		.desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
715 		.desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
716 	}
717 
718 #define DEF_RXCHADDRS(info, rxch, v...) \
719 	[RTW89_RXCH_##rxch] = { \
720 		.num = R_AX_##rxch##_RXBD_NUM ##v, \
721 		.idx = R_AX_##rxch##_RXBD_IDX ##v, \
722 		.desa_l = R_AX_##rxch##_RXBD_DESA_L ##v, \
723 		.desa_h = R_AX_##rxch##_RXBD_DESA_H ##v, \
724 	}
725 
726 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = {
727 	.tx = {
728 		DEF_TXCHADDRS(info, ACH0),
729 		DEF_TXCHADDRS(info, ACH1),
730 		DEF_TXCHADDRS(info, ACH2),
731 		DEF_TXCHADDRS(info, ACH3),
732 		DEF_TXCHADDRS(info, ACH4),
733 		DEF_TXCHADDRS(info, ACH5),
734 		DEF_TXCHADDRS(info, ACH6),
735 		DEF_TXCHADDRS(info, ACH7),
736 		DEF_TXCHADDRS(info, CH8),
737 		DEF_TXCHADDRS(info, CH9),
738 		DEF_TXCHADDRS_TYPE1(info, CH10),
739 		DEF_TXCHADDRS_TYPE1(info, CH11),
740 		DEF_TXCHADDRS(info, CH12),
741 	},
742 	.rx = {
743 		DEF_RXCHADDRS(info, RXQ),
744 		DEF_RXCHADDRS(info, RPQ),
745 	},
746 };
747 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set);
748 
749 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = {
750 	.tx = {
751 		DEF_TXCHADDRS(info, ACH0, _V1),
752 		DEF_TXCHADDRS(info, ACH1, _V1),
753 		DEF_TXCHADDRS(info, ACH2, _V1),
754 		DEF_TXCHADDRS(info, ACH3, _V1),
755 		DEF_TXCHADDRS(info, ACH4, _V1),
756 		DEF_TXCHADDRS(info, ACH5, _V1),
757 		DEF_TXCHADDRS(info, ACH6, _V1),
758 		DEF_TXCHADDRS(info, ACH7, _V1),
759 		DEF_TXCHADDRS(info, CH8, _V1),
760 		DEF_TXCHADDRS(info, CH9, _V1),
761 		DEF_TXCHADDRS_TYPE1(info, CH10, _V1),
762 		DEF_TXCHADDRS_TYPE1(info, CH11, _V1),
763 		DEF_TXCHADDRS(info, CH12, _V1),
764 	},
765 	.rx = {
766 		DEF_RXCHADDRS(info, RXQ, _V1),
767 		DEF_RXCHADDRS(info, RPQ, _V1),
768 	},
769 };
770 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1);
771 
772 #undef DEF_TXCHADDRS_TYPE1
773 #undef DEF_TXCHADDRS
774 #undef DEF_RXCHADDRS
775 
776 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev,
777 				    enum rtw89_tx_channel txch,
778 				    const struct rtw89_pci_ch_dma_addr **addr)
779 {
780 	const struct rtw89_pci_info *info = rtwdev->pci_info;
781 
782 	if (txch >= RTW89_TXCH_NUM)
783 		return -EINVAL;
784 
785 	*addr = &info->dma_addr_set->tx[txch];
786 
787 	return 0;
788 }
789 
790 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev,
791 				    enum rtw89_rx_channel rxch,
792 				    const struct rtw89_pci_ch_dma_addr **addr)
793 {
794 	const struct rtw89_pci_info *info = rtwdev->pci_info;
795 
796 	if (rxch >= RTW89_RXCH_NUM)
797 		return -EINVAL;
798 
799 	*addr = &info->dma_addr_set->rx[rxch];
800 
801 	return 0;
802 }
803 
804 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring)
805 {
806 	struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring;
807 
808 	/* reserved 1 desc check ring is full or not */
809 	if (bd_ring->rp > bd_ring->wp)
810 		return bd_ring->rp - bd_ring->wp - 1;
811 
812 	return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1;
813 }
814 
815 static
816 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev)
817 {
818 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
819 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
820 	u32 cnt;
821 
822 	spin_lock_bh(&rtwpci->trx_lock);
823 	rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci);
824 	cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
825 	spin_unlock_bh(&rtwpci->trx_lock);
826 
827 	return cnt;
828 }
829 
830 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
831 						     u8 txch)
832 {
833 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
834 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
835 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
836 	u32 bd_cnt, wd_cnt, min_cnt = 0;
837 	struct rtw89_pci_rx_ring *rx_ring;
838 	u32 cnt;
839 
840 	rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
841 
842 	spin_lock_bh(&rtwpci->trx_lock);
843 	bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
844 	wd_cnt = wd_ring->curr_num;
845 
846 	if (wd_cnt == 0 || bd_cnt == 0) {
847 		cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
848 		if (!cnt)
849 			goto out_unlock;
850 		rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
851 	}
852 
853 	bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
854 	wd_cnt = wd_ring->curr_num;
855 	min_cnt = min(bd_cnt, wd_cnt);
856 	if (min_cnt == 0)
857 		rtw89_warn(rtwdev, "still no tx resource after reclaim\n");
858 
859 out_unlock:
860 	spin_unlock_bh(&rtwpci->trx_lock);
861 
862 	return min_cnt;
863 }
864 
865 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
866 						   u8 txch)
867 {
868 	if (txch == RTW89_TXCH_CH12)
869 		return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev);
870 
871 	return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch);
872 }
873 
874 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
875 {
876 	struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
877 	u32 host_idx, addr;
878 
879 	addr = bd_ring->addr.idx;
880 	host_idx = bd_ring->wp;
881 	rtw89_write16(rtwdev, addr, host_idx);
882 }
883 
884 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring,
885 					int n_txbd)
886 {
887 	struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
888 	u32 host_idx, len;
889 
890 	len = bd_ring->len;
891 	host_idx = bd_ring->wp + n_txbd;
892 	host_idx = host_idx < len ? host_idx : host_idx - len;
893 
894 	bd_ring->wp = host_idx;
895 }
896 
897 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
898 {
899 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
900 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
901 
902 	spin_lock_bh(&rtwpci->trx_lock);
903 	__rtw89_pci_tx_kick_off(rtwdev, tx_ring);
904 	spin_unlock_bh(&rtwpci->trx_lock);
905 }
906 
907 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
908 {
909 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
910 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
911 	struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
912 	u32 cur_idx, cur_rp;
913 	u8 i;
914 
915 	/* Because the time taked by the I/O is a bit dynamic, it's hard to
916 	 * define a reasonable fixed total timeout to use read_poll_timeout*
917 	 * helper. Instead, we can ensure a reasonable polling times, so we
918 	 * just use for loop with udelay here.
919 	 */
920 	for (i = 0; i < 60; i++) {
921 		cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
922 		cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
923 		if (cur_rp == bd_ring->wp)
924 			return;
925 
926 		udelay(1);
927 	}
928 
929 	if (!drop)
930 		rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch);
931 }
932 
933 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs,
934 					bool drop)
935 {
936 	u8 i;
937 
938 	for (i = 0; i < RTW89_TXCH_NUM; i++) {
939 		/* It may be unnecessary to flush FWCMD queue. */
940 		if (i == RTW89_TXCH_CH12)
941 			continue;
942 
943 		if (txchs & BIT(i))
944 			__pci_flush_txch(rtwdev, i, drop);
945 	}
946 }
947 
948 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues,
949 				       bool drop)
950 {
951 	__rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop);
952 }
953 
954 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
955 				 struct rtw89_pci_tx_ring *tx_ring,
956 				 struct rtw89_pci_tx_wd *txwd,
957 				 struct rtw89_core_tx_request *tx_req)
958 {
959 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
960 	struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
961 	struct rtw89_txwd_body *txwd_body;
962 	struct rtw89_txwd_info *txwd_info;
963 	struct rtw89_pci_tx_wp_info *txwp_info;
964 	struct rtw89_pci_tx_addr_info_32 *txaddr_info;
965 	struct pci_dev *pdev = rtwpci->pdev;
966 	struct sk_buff *skb = tx_req->skb;
967 	struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
968 	bool en_wd_info = desc_info->en_wd_info;
969 	u32 txwd_len;
970 	u32 txwp_len;
971 	u32 txaddr_info_len;
972 	dma_addr_t dma;
973 	int ret;
974 
975 	rtw89_core_fill_txdesc(rtwdev, desc_info, txwd->vaddr);
976 
977 	dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
978 	if (dma_mapping_error(&pdev->dev, dma)) {
979 		rtw89_err(rtwdev, "failed to map skb dma data\n");
980 		ret = -EBUSY;
981 		goto err;
982 	}
983 
984 	tx_data->dma = dma;
985 
986 	txaddr_info_len = sizeof(*txaddr_info);
987 	txwp_len = sizeof(*txwp_info);
988 	txwd_len = sizeof(*txwd_body);
989 	txwd_len += en_wd_info ? sizeof(*txwd_info) : 0;
990 
991 	txwp_info = txwd->vaddr + txwd_len;
992 	txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID);
993 	txwp_info->seq1 = 0;
994 	txwp_info->seq2 = 0;
995 	txwp_info->seq3 = 0;
996 
997 	tx_ring->tx_cnt++;
998 	txaddr_info = txwd->vaddr + txwd_len + txwp_len;
999 	txaddr_info->length = cpu_to_le16(skb->len);
1000 	txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS |
1001 					  RTW89_PCI_ADDR_NUM(1));
1002 	txaddr_info->dma = cpu_to_le32(dma);
1003 
1004 	txwd->len = txwd_len + txwp_len + txaddr_info_len;
1005 
1006 	skb_queue_tail(&txwd->queue, skb);
1007 
1008 	return 0;
1009 
1010 err:
1011 	return ret;
1012 }
1013 
1014 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev,
1015 				  struct rtw89_pci_tx_ring *tx_ring,
1016 				  struct rtw89_pci_tx_bd_32 *txbd,
1017 				  struct rtw89_core_tx_request *tx_req)
1018 {
1019 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1020 	struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1021 	struct rtw89_txwd_body *txwd_body;
1022 	struct pci_dev *pdev = rtwpci->pdev;
1023 	struct sk_buff *skb = tx_req->skb;
1024 	struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
1025 	dma_addr_t dma;
1026 
1027 	txwd_body = (struct rtw89_txwd_body *)skb_push(skb, sizeof(*txwd_body));
1028 	memset(txwd_body, 0, sizeof(*txwd_body));
1029 	rtw89_core_fill_txdesc(rtwdev, desc_info, txwd_body);
1030 
1031 	dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1032 	if (dma_mapping_error(&pdev->dev, dma)) {
1033 		rtw89_err(rtwdev, "failed to map fwcmd dma data\n");
1034 		return -EBUSY;
1035 	}
1036 
1037 	tx_data->dma = dma;
1038 	txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS);
1039 	txbd->length = cpu_to_le16(skb->len);
1040 	txbd->dma = cpu_to_le32(tx_data->dma);
1041 	skb_queue_tail(&rtwpci->h2c_queue, skb);
1042 
1043 	rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
1044 
1045 	return 0;
1046 }
1047 
1048 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev,
1049 				 struct rtw89_pci_tx_ring *tx_ring,
1050 				 struct rtw89_pci_tx_bd_32 *txbd,
1051 				 struct rtw89_core_tx_request *tx_req)
1052 {
1053 	struct rtw89_pci_tx_wd *txwd;
1054 	int ret;
1055 
1056 	/* FWCMD queue doesn't have wd pages. Instead, it submits the CMD
1057 	 * buffer with WD BODY only. So here we don't need to check the free
1058 	 * pages of the wd ring.
1059 	 */
1060 	if (tx_ring->txch == RTW89_TXCH_CH12)
1061 		return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req);
1062 
1063 	txwd = rtw89_pci_dequeue_txwd(tx_ring);
1064 	if (!txwd) {
1065 		rtw89_err(rtwdev, "no available TXWD\n");
1066 		ret = -ENOSPC;
1067 		goto err;
1068 	}
1069 
1070 	ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req);
1071 	if (ret) {
1072 		rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq);
1073 		goto err_enqueue_wd;
1074 	}
1075 
1076 	list_add_tail(&txwd->list, &tx_ring->busy_pages);
1077 
1078 	txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS);
1079 	txbd->length = cpu_to_le16(txwd->len);
1080 	txbd->dma = cpu_to_le32(txwd->paddr);
1081 
1082 	rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
1083 
1084 	return 0;
1085 
1086 err_enqueue_wd:
1087 	rtw89_pci_enqueue_txwd(tx_ring, txwd);
1088 err:
1089 	return ret;
1090 }
1091 
1092 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req,
1093 			      u8 txch)
1094 {
1095 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1096 	struct rtw89_pci_tx_ring *tx_ring;
1097 	struct rtw89_pci_tx_bd_32 *txbd;
1098 	u32 n_avail_txbd;
1099 	int ret = 0;
1100 
1101 	/* check the tx type and dma channel for fw cmd queue */
1102 	if ((txch == RTW89_TXCH_CH12 ||
1103 	     tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) &&
1104 	    (txch != RTW89_TXCH_CH12 ||
1105 	     tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) {
1106 		rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n");
1107 		return -EINVAL;
1108 	}
1109 
1110 	tx_ring = &rtwpci->tx_rings[txch];
1111 	spin_lock_bh(&rtwpci->trx_lock);
1112 
1113 	n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring);
1114 	if (n_avail_txbd == 0) {
1115 		rtw89_err(rtwdev, "no available TXBD\n");
1116 		ret = -ENOSPC;
1117 		goto err_unlock;
1118 	}
1119 
1120 	txbd = rtw89_pci_get_next_txbd(tx_ring);
1121 	ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req);
1122 	if (ret) {
1123 		rtw89_err(rtwdev, "failed to submit TXBD\n");
1124 		goto err_unlock;
1125 	}
1126 
1127 	spin_unlock_bh(&rtwpci->trx_lock);
1128 	return 0;
1129 
1130 err_unlock:
1131 	spin_unlock_bh(&rtwpci->trx_lock);
1132 	return ret;
1133 }
1134 
1135 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req)
1136 {
1137 	struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1138 	int ret;
1139 
1140 	ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma);
1141 	if (ret) {
1142 		rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma);
1143 		return ret;
1144 	}
1145 
1146 	return 0;
1147 }
1148 
1149 static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = {
1150 	[RTW89_TXCH_ACH0] = {.start_idx = 0,  .max_num = 5, .min_num = 2},
1151 	[RTW89_TXCH_ACH1] = {.start_idx = 5,  .max_num = 5, .min_num = 2},
1152 	[RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
1153 	[RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
1154 	[RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2},
1155 	[RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2},
1156 	[RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2},
1157 	[RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2},
1158 	[RTW89_TXCH_CH8]  = {.start_idx = 40, .max_num = 5, .min_num = 1},
1159 	[RTW89_TXCH_CH9]  = {.start_idx = 45, .max_num = 5, .min_num = 1},
1160 	[RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1},
1161 	[RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1},
1162 	[RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1},
1163 };
1164 
1165 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
1166 {
1167 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1168 	struct rtw89_pci_tx_ring *tx_ring;
1169 	struct rtw89_pci_rx_ring *rx_ring;
1170 	struct rtw89_pci_dma_ring *bd_ring;
1171 	const struct rtw89_pci_bd_ram *bd_ram;
1172 	u32 addr_num;
1173 	u32 addr_bdram;
1174 	u32 addr_desa_l;
1175 	u32 val32;
1176 	int i;
1177 
1178 	for (i = 0; i < RTW89_TXCH_NUM; i++) {
1179 		tx_ring = &rtwpci->tx_rings[i];
1180 		bd_ring = &tx_ring->bd_ring;
1181 		bd_ram = &bd_ram_table[i];
1182 		addr_num = bd_ring->addr.num;
1183 		addr_bdram = bd_ring->addr.bdram;
1184 		addr_desa_l = bd_ring->addr.desa_l;
1185 		bd_ring->wp = 0;
1186 		bd_ring->rp = 0;
1187 
1188 		val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) |
1189 			FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) |
1190 			FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num);
1191 
1192 		rtw89_write16(rtwdev, addr_num, bd_ring->len);
1193 		rtw89_write32(rtwdev, addr_bdram, val32);
1194 		rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
1195 	}
1196 
1197 	for (i = 0; i < RTW89_RXCH_NUM; i++) {
1198 		rx_ring = &rtwpci->rx_rings[i];
1199 		bd_ring = &rx_ring->bd_ring;
1200 		addr_num = bd_ring->addr.num;
1201 		addr_desa_l = bd_ring->addr.desa_l;
1202 		bd_ring->wp = 0;
1203 		bd_ring->rp = 0;
1204 		rx_ring->diliver_skb = NULL;
1205 		rx_ring->diliver_desc.ready = false;
1206 
1207 		rtw89_write16(rtwdev, addr_num, bd_ring->len);
1208 		rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
1209 	}
1210 }
1211 
1212 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev,
1213 				      struct rtw89_pci_tx_ring *tx_ring)
1214 {
1215 	rtw89_pci_release_busy_txwd(rtwdev, tx_ring);
1216 	rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring);
1217 }
1218 
1219 static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
1220 {
1221 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1222 	int txch;
1223 
1224 	rtw89_pci_reset_trx_rings(rtwdev);
1225 
1226 	spin_lock_bh(&rtwpci->trx_lock);
1227 	for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
1228 		if (txch == RTW89_TXCH_CH12) {
1229 			rtw89_pci_release_fwcmd(rtwdev, rtwpci,
1230 						skb_queue_len(&rtwpci->h2c_queue), true);
1231 			continue;
1232 		}
1233 		rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]);
1234 	}
1235 	spin_unlock_bh(&rtwpci->trx_lock);
1236 }
1237 
1238 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev)
1239 {
1240 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1241 	unsigned long flags;
1242 
1243 	rtw89_core_napi_start(rtwdev);
1244 
1245 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
1246 	rtwpci->running = true;
1247 	rtw89_pci_enable_intr(rtwdev, rtwpci);
1248 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
1249 
1250 	return 0;
1251 }
1252 
1253 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev)
1254 {
1255 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1256 	struct pci_dev *pdev = rtwpci->pdev;
1257 	unsigned long flags;
1258 
1259 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
1260 	rtwpci->running = false;
1261 	rtw89_pci_disable_intr(rtwdev, rtwpci);
1262 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
1263 
1264 	synchronize_irq(pdev->irq);
1265 	rtw89_core_napi_stop(rtwdev);
1266 }
1267 
1268 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data);
1269 
1270 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr)
1271 {
1272 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1273 	u32 val = readl(rtwpci->mmap + addr);
1274 	int count;
1275 
1276 	for (count = 0; ; count++) {
1277 		if (val != RTW89_R32_DEAD)
1278 			return val;
1279 		if (count >= MAC_REG_POOL_COUNT) {
1280 			rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val);
1281 			return RTW89_R32_DEAD;
1282 		}
1283 		rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN);
1284 		val = readl(rtwpci->mmap + addr);
1285 	}
1286 
1287 	return val;
1288 }
1289 
1290 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr)
1291 {
1292 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1293 	u32 addr32, val32, shift;
1294 
1295 	if (!ACCESS_CMAC(addr))
1296 		return readb(rtwpci->mmap + addr);
1297 
1298 	addr32 = addr & ~0x3;
1299 	shift = (addr & 0x3) * 8;
1300 	val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
1301 	return val32 >> shift;
1302 }
1303 
1304 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr)
1305 {
1306 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1307 	u32 addr32, val32, shift;
1308 
1309 	if (!ACCESS_CMAC(addr))
1310 		return readw(rtwpci->mmap + addr);
1311 
1312 	addr32 = addr & ~0x3;
1313 	shift = (addr & 0x3) * 8;
1314 	val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
1315 	return val32 >> shift;
1316 }
1317 
1318 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr)
1319 {
1320 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1321 
1322 	if (!ACCESS_CMAC(addr))
1323 		return readl(rtwpci->mmap + addr);
1324 
1325 	return rtw89_pci_ops_read32_cmac(rtwdev, addr);
1326 }
1327 
1328 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data)
1329 {
1330 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1331 
1332 	writeb(data, rtwpci->mmap + addr);
1333 }
1334 
1335 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data)
1336 {
1337 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1338 
1339 	writew(data, rtwpci->mmap + addr);
1340 }
1341 
1342 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data)
1343 {
1344 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1345 
1346 	writel(data, rtwpci->mmap + addr);
1347 }
1348 
1349 static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
1350 {
1351 	if (enable) {
1352 		rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
1353 				  B_AX_TXHCI_EN | B_AX_RXHCI_EN);
1354 		rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1,
1355 				  B_AX_STOP_PCIEIO);
1356 	} else {
1357 		rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1,
1358 				  B_AX_STOP_PCIEIO);
1359 		rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
1360 				  B_AX_TXHCI_EN | B_AX_RXHCI_EN);
1361 	}
1362 }
1363 
1364 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit)
1365 {
1366 	u16 val;
1367 
1368 	rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F);
1369 
1370 	val = rtw89_read16(rtwdev, R_AX_MDIO_CFG);
1371 	switch (speed) {
1372 	case PCIE_PHY_GEN1:
1373 		if (addr < 0x20)
1374 			val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK);
1375 		else
1376 			val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK);
1377 		break;
1378 	case PCIE_PHY_GEN2:
1379 		if (addr < 0x20)
1380 			val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK);
1381 		else
1382 			val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK);
1383 		break;
1384 	default:
1385 		rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed);
1386 		return -EINVAL;
1387 	}
1388 	rtw89_write16(rtwdev, R_AX_MDIO_CFG, val);
1389 	rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit);
1390 
1391 	return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000,
1392 				 false, rtwdev, R_AX_MDIO_CFG);
1393 }
1394 
1395 static int
1396 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val)
1397 {
1398 	int ret;
1399 
1400 	ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG);
1401 	if (ret) {
1402 		rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret);
1403 		return ret;
1404 	}
1405 	*val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA);
1406 
1407 	return 0;
1408 }
1409 
1410 static int
1411 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed)
1412 {
1413 	int ret;
1414 
1415 	rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data);
1416 	ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG);
1417 	if (ret) {
1418 		rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret);
1419 		return ret;
1420 	}
1421 
1422 	return 0;
1423 }
1424 
1425 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
1426 {
1427 	int ret;
1428 	u16 val;
1429 
1430 	ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
1431 	if (ret)
1432 		return ret;
1433 	ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed);
1434 	if (ret)
1435 		return ret;
1436 
1437 	return 0;
1438 }
1439 
1440 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
1441 {
1442 	int ret;
1443 	u16 val;
1444 
1445 	ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
1446 	if (ret)
1447 		return ret;
1448 	ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed);
1449 	if (ret)
1450 		return ret;
1451 
1452 	return 0;
1453 }
1454 
1455 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
1456 				       u8 data)
1457 {
1458 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1459 	struct pci_dev *pdev = rtwpci->pdev;
1460 
1461 	return pci_write_config_byte(pdev, addr, data);
1462 }
1463 
1464 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
1465 				      u8 *value)
1466 {
1467 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1468 	struct pci_dev *pdev = rtwpci->pdev;
1469 
1470 	return pci_read_config_byte(pdev, addr, value);
1471 }
1472 
1473 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr,
1474 				     u8 bit)
1475 {
1476 	u8 value;
1477 	int ret;
1478 
1479 	ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
1480 	if (ret)
1481 		return ret;
1482 
1483 	value |= bit;
1484 	ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
1485 
1486 	return ret;
1487 }
1488 
1489 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr,
1490 				     u8 bit)
1491 {
1492 	u8 value;
1493 	int ret;
1494 
1495 	ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
1496 	if (ret)
1497 		return ret;
1498 
1499 	value &= ~bit;
1500 	ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
1501 
1502 	return ret;
1503 }
1504 
1505 static int
1506 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate)
1507 {
1508 	u16 val, tar;
1509 	int ret;
1510 
1511 	/* Enable counter */
1512 	ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val);
1513 	if (ret)
1514 		return ret;
1515 	ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
1516 				 phy_rate);
1517 	if (ret)
1518 		return ret;
1519 	ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN,
1520 				 phy_rate);
1521 	if (ret)
1522 		return ret;
1523 
1524 	fsleep(300);
1525 
1526 	ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar);
1527 	if (ret)
1528 		return ret;
1529 	ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
1530 				 phy_rate);
1531 	if (ret)
1532 		return ret;
1533 
1534 	tar = tar & 0x0FFF;
1535 	if (tar == 0 || tar == 0x0FFF) {
1536 		rtw89_err(rtwdev, "[ERR]Get target failed.\n");
1537 		return -EINVAL;
1538 	}
1539 
1540 	*target = tar;
1541 
1542 	return 0;
1543 }
1544 
1545 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
1546 {
1547 	enum rtw89_pcie_phy phy_rate;
1548 	u16 val16, mgn_set, div_set, tar;
1549 	u8 val8, bdr_ori;
1550 	bool l1_flag = false;
1551 	int ret = 0;
1552 
1553 	if ((rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV) ||
1554 	    rtwdev->chip->chip_id == RTL8852C)
1555 		return 0;
1556 
1557 	ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
1558 	if (ret) {
1559 		rtw89_err(rtwdev, "[ERR]pci config read %X\n",
1560 			  RTW89_PCIE_PHY_RATE);
1561 		return ret;
1562 	}
1563 
1564 	if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) {
1565 		phy_rate = PCIE_PHY_GEN1;
1566 	} else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) {
1567 		phy_rate = PCIE_PHY_GEN2;
1568 	} else {
1569 		rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8);
1570 		return -EOPNOTSUPP;
1571 	}
1572 	/* Disable L1BD */
1573 	ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori);
1574 	if (ret) {
1575 		rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL);
1576 		return ret;
1577 	}
1578 
1579 	if (bdr_ori & RTW89_PCIE_BIT_L1) {
1580 		ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
1581 						  bdr_ori & ~RTW89_PCIE_BIT_L1);
1582 		if (ret) {
1583 			rtw89_err(rtwdev, "[ERR]pci config write %X\n",
1584 				  RTW89_PCIE_L1_CTRL);
1585 			return ret;
1586 		}
1587 		l1_flag = true;
1588 	}
1589 
1590 	ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
1591 	if (ret) {
1592 		rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
1593 		goto end;
1594 	}
1595 
1596 	if (val16 & B_AX_CALIB_EN) {
1597 		ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1,
1598 					 val16 & ~B_AX_CALIB_EN, phy_rate);
1599 		if (ret) {
1600 			rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
1601 			goto end;
1602 		}
1603 	}
1604 
1605 	if (!autook_en)
1606 		goto end;
1607 	/* Set div */
1608 	ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate);
1609 	if (ret) {
1610 		rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
1611 		goto end;
1612 	}
1613 
1614 	/* Obtain div and margin */
1615 	ret = __get_target(rtwdev, &tar, phy_rate);
1616 	if (ret) {
1617 		rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret);
1618 		goto end;
1619 	}
1620 
1621 	mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar;
1622 
1623 	if (mgn_set >= 128) {
1624 		div_set = 0x0003;
1625 		mgn_set = 0x000F;
1626 	} else if (mgn_set >= 64) {
1627 		div_set = 0x0003;
1628 		mgn_set >>= 3;
1629 	} else if (mgn_set >= 32) {
1630 		div_set = 0x0002;
1631 		mgn_set >>= 2;
1632 	} else if (mgn_set >= 16) {
1633 		div_set = 0x0001;
1634 		mgn_set >>= 1;
1635 	} else if (mgn_set == 0) {
1636 		rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar);
1637 		goto end;
1638 	} else {
1639 		div_set = 0x0000;
1640 	}
1641 
1642 	ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
1643 	if (ret) {
1644 		rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
1645 		goto end;
1646 	}
1647 
1648 	val16 |= u16_encode_bits(div_set, B_AX_DIV);
1649 
1650 	ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate);
1651 	if (ret) {
1652 		rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
1653 		goto end;
1654 	}
1655 
1656 	ret = __get_target(rtwdev, &tar, phy_rate);
1657 	if (ret) {
1658 		rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret);
1659 		goto end;
1660 	}
1661 
1662 	rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n",
1663 		    tar, div_set, mgn_set);
1664 	ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1,
1665 				 (tar & 0x0FFF) | (mgn_set << 12), phy_rate);
1666 	if (ret) {
1667 		rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1);
1668 		goto end;
1669 	}
1670 
1671 	/* Enable function */
1672 	ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate);
1673 	if (ret) {
1674 		rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
1675 		goto end;
1676 	}
1677 
1678 	/* CLK delay = 0 */
1679 	ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
1680 					  PCIE_CLKDLY_HW_0);
1681 
1682 end:
1683 	/* Set L1BD to ori */
1684 	if (l1_flag) {
1685 		ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
1686 						  bdr_ori);
1687 		if (ret) {
1688 			rtw89_err(rtwdev, "[ERR]pci config write %X\n",
1689 				  RTW89_PCIE_L1_CTRL);
1690 			return ret;
1691 		}
1692 	}
1693 
1694 	return ret;
1695 }
1696 
1697 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev)
1698 {
1699 	int ret;
1700 
1701 	if (rtwdev->chip->chip_id != RTL8852A)
1702 		return 0;
1703 
1704 	ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
1705 				     PCIE_PHY_GEN1);
1706 	if (ret)
1707 		return ret;
1708 	ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
1709 				     PCIE_PHY_GEN2);
1710 	if (ret)
1711 		return ret;
1712 
1713 	return 0;
1714 }
1715 
1716 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev)
1717 {
1718 	rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE);
1719 }
1720 
1721 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev)
1722 {
1723 	if (rtwdev->chip->chip_id == RTL8852C)
1724 		return;
1725 
1726 	rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN);
1727 }
1728 
1729 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev)
1730 {
1731 	int ret;
1732 
1733 	if (rtwdev->chip->chip_id == RTL8852C)
1734 		return 0;
1735 
1736 	ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
1737 				     PCIE_PHY_GEN1);
1738 	if (ret)
1739 		return ret;
1740 
1741 	ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
1742 				     PCIE_PHY_GEN2);
1743 	if (ret)
1744 		return ret;
1745 
1746 	return 0;
1747 }
1748 
1749 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev)
1750 {
1751 	if (rtwdev->chip->chip_id != RTL8852A)
1752 		return;
1753 
1754 	rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN);
1755 }
1756 
1757 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev)
1758 {
1759 	if (rtwdev->chip->chip_id != RTL8852A)
1760 		return;
1761 
1762 	rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
1763 			  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
1764 	rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
1765 			  B_AX_PCIE_DIS_WLSUS_AFT_PDN);
1766 }
1767 
1768 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev)
1769 {
1770 	if (rtwdev->chip->chip_id == RTL8852C)
1771 		return;
1772 
1773 	rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL,
1774 			  B_AX_SIC_EN_FORCE_CLKREQ);
1775 }
1776 
1777 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev)
1778 {
1779 	if (rtwdev->chip->chip_id == RTL8852C)
1780 		return;
1781 
1782 	rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL,
1783 			  B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG);
1784 
1785 	if (rtwdev->chip->chip_id == RTL8852A)
1786 		rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL,
1787 				  B_AX_EN_CHKDSC_NO_RX_STUCK);
1788 }
1789 
1790 static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev)
1791 {
1792 	u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX |
1793 		  B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX |
1794 		  B_AX_CLR_CH12_IDX;
1795 
1796 	if (rtwdev->chip->chip_id == RTL8852A)
1797 		val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX |
1798 		       B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX;
1799 	/* clear DMA indexes */
1800 	rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val);
1801 	if (rtwdev->chip->chip_id == RTL8852A)
1802 		rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR2,
1803 				  B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX);
1804 	rtw89_write32_set(rtwdev, R_AX_RXBD_RWPTR_CLR,
1805 			  B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX);
1806 }
1807 
1808 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
1809 {
1810 	if (rtwdev->chip->chip_id == RTL8852A) {
1811 		/* ltr sw trigger */
1812 		rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE);
1813 	}
1814 	rtw89_pci_ctrl_dma_all(rtwdev, false);
1815 	rtw89_pci_clr_idx_all(rtwdev);
1816 
1817 	return 0;
1818 }
1819 
1820 static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
1821 {
1822 	u32 dma_busy;
1823 	u32 check;
1824 	u32 lbc;
1825 	int ret;
1826 
1827 	rtw89_pci_rxdma_prefth(rtwdev);
1828 	rtw89_pci_l1off_pwroff(rtwdev);
1829 	rtw89_pci_deglitch_setting(rtwdev);
1830 	ret = rtw89_pci_l2_rxen_lat(rtwdev);
1831 	if (ret) {
1832 		rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret);
1833 		return ret;
1834 	}
1835 
1836 	rtw89_pci_aphy_pwrcut(rtwdev);
1837 	rtw89_pci_hci_ldo(rtwdev);
1838 
1839 	ret = rtw89_pci_auto_refclk_cal(rtwdev, false);
1840 	if (ret) {
1841 		rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret);
1842 		return ret;
1843 	}
1844 
1845 	rtw89_pci_set_sic(rtwdev);
1846 	rtw89_pci_set_dbg(rtwdev);
1847 
1848 	if (rtwdev->chip->chip_id == RTL8852A)
1849 		rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
1850 				  B_AX_PCIE_AUXCLK_GATE);
1851 
1852 	lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG);
1853 	lbc = u32_replace_bits(lbc, RTW89_MAC_LBC_TMR_128US, B_AX_LBC_TIMER);
1854 	lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN;
1855 	rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc);
1856 
1857 	rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
1858 			  B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG);
1859 	rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_STOP_WPDMA);
1860 
1861 	/* stop DMA activities */
1862 	rtw89_pci_ctrl_dma_all(rtwdev, false);
1863 
1864 	/* check PCI at idle state */
1865 	check = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY;
1866 	ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
1867 				100, 3000, false, rtwdev, R_AX_PCIE_DMA_BUSY1);
1868 	if (ret) {
1869 		rtw89_err(rtwdev, "failed to poll io busy\n");
1870 		return ret;
1871 	}
1872 
1873 	rtw89_pci_clr_idx_all(rtwdev);
1874 
1875 	/* configure TX/RX op modes */
1876 	rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE |
1877 						       B_AX_RX_TRUNC_MODE);
1878 	rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RXBD_MODE);
1879 	rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, 7);
1880 	rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, 3);
1881 	/* multi-tag mode */
1882 	rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_LATENCY_CONTROL);
1883 	rtw89_write32_mask(rtwdev, R_AX_PCIE_EXP_CTRL, B_AX_MAX_TAG_NUM,
1884 			   RTW89_MAC_TAG_NUM_8);
1885 	rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE,
1886 			   RTW89_MAC_WD_DMA_INTVL_256NS);
1887 	rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT,
1888 			   RTW89_MAC_WD_DMA_INTVL_256NS);
1889 
1890 	/* fill TRX BD indexes */
1891 	rtw89_pci_ops_reset(rtwdev);
1892 
1893 	ret = rtw89_pci_rst_bdram_pcie(rtwdev);
1894 	if (ret) {
1895 		rtw89_warn(rtwdev, "reset bdram busy\n");
1896 		return ret;
1897 	}
1898 
1899 	/* enable FW CMD queue to download firmware */
1900 	rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_ALL);
1901 	rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_STOP_CH12);
1902 	rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP2, B_AX_TX_STOP2_ALL);
1903 
1904 	/* start DMA activities */
1905 	rtw89_pci_ctrl_dma_all(rtwdev, true);
1906 
1907 	return 0;
1908 }
1909 
1910 static int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev)
1911 {
1912 	u32 val;
1913 
1914 	val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
1915 	if (rtw89_pci_ltr_is_err_reg_val(val))
1916 		return -EINVAL;
1917 	val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
1918 	if (rtw89_pci_ltr_is_err_reg_val(val))
1919 		return -EINVAL;
1920 	val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY);
1921 	if (rtw89_pci_ltr_is_err_reg_val(val))
1922 		return -EINVAL;
1923 	val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY);
1924 	if (rtw89_pci_ltr_is_err_reg_val(val))
1925 		return -EINVAL;
1926 
1927 	rtw89_write32_clr(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN);
1928 	rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_EN);
1929 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK,
1930 			   PCI_LTR_SPC_500US);
1931 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
1932 			   PCI_LTR_IDLE_TIMER_800US);
1933 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
1934 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
1935 	rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x88e088e0);
1936 	rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b);
1937 
1938 	return 0;
1939 }
1940 
1941 static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev)
1942 {
1943 	int ret;
1944 
1945 	ret = rtw89_pci_ltr_set(rtwdev);
1946 	if (ret) {
1947 		rtw89_err(rtwdev, "pci ltr set fail\n");
1948 		return ret;
1949 	}
1950 	if (rtwdev->chip->chip_id == RTL8852A) {
1951 		/* ltr sw trigger */
1952 		rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT);
1953 	}
1954 	/* ADDR info 8-byte mode */
1955 	rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
1956 			  B_AX_HOST_ADDR_INFO_8B_SEL);
1957 	rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
1958 
1959 	/* enable DMA for all queues */
1960 	rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_ALL);
1961 	rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP2, B_AX_TX_STOP2_ALL);
1962 
1963 	/* Release PCI IO */
1964 	rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1,
1965 			  B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO);
1966 
1967 	return 0;
1968 }
1969 
1970 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev,
1971 				  struct pci_dev *pdev)
1972 {
1973 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1974 	int ret;
1975 
1976 	ret = pci_enable_device(pdev);
1977 	if (ret) {
1978 		rtw89_err(rtwdev, "failed to enable pci device\n");
1979 		return ret;
1980 	}
1981 
1982 	pci_set_master(pdev);
1983 	pci_set_drvdata(pdev, rtwdev->hw);
1984 
1985 	rtwpci->pdev = pdev;
1986 
1987 	return 0;
1988 }
1989 
1990 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev,
1991 				     struct pci_dev *pdev)
1992 {
1993 	pci_clear_master(pdev);
1994 	pci_disable_device(pdev);
1995 }
1996 
1997 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
1998 				   struct pci_dev *pdev)
1999 {
2000 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2001 	unsigned long resource_len;
2002 	u8 bar_id = 2;
2003 	int ret;
2004 
2005 	ret = pci_request_regions(pdev, KBUILD_MODNAME);
2006 	if (ret) {
2007 		rtw89_err(rtwdev, "failed to request pci regions\n");
2008 		goto err;
2009 	}
2010 
2011 	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2012 	if (ret) {
2013 		rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n");
2014 		goto err_release_regions;
2015 	}
2016 
2017 	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
2018 	if (ret) {
2019 		rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n");
2020 		goto err_release_regions;
2021 	}
2022 
2023 	resource_len = pci_resource_len(pdev, bar_id);
2024 	rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len);
2025 	if (!rtwpci->mmap) {
2026 		rtw89_err(rtwdev, "failed to map pci io\n");
2027 		ret = -EIO;
2028 		goto err_release_regions;
2029 	}
2030 
2031 	return 0;
2032 
2033 err_release_regions:
2034 	pci_release_regions(pdev);
2035 err:
2036 	return ret;
2037 }
2038 
2039 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev,
2040 				    struct pci_dev *pdev)
2041 {
2042 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2043 
2044 	if (rtwpci->mmap) {
2045 		pci_iounmap(pdev, rtwpci->mmap);
2046 		pci_release_regions(pdev);
2047 	}
2048 }
2049 
2050 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev,
2051 				      struct pci_dev *pdev,
2052 				      struct rtw89_pci_tx_ring *tx_ring)
2053 {
2054 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
2055 	u8 *head = wd_ring->head;
2056 	dma_addr_t dma = wd_ring->dma;
2057 	u32 page_size = wd_ring->page_size;
2058 	u32 page_num = wd_ring->page_num;
2059 	u32 ring_sz = page_size * page_num;
2060 
2061 	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
2062 	wd_ring->head = NULL;
2063 }
2064 
2065 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev,
2066 				   struct pci_dev *pdev,
2067 				   struct rtw89_pci_tx_ring *tx_ring)
2068 {
2069 	int ring_sz;
2070 	u8 *head;
2071 	dma_addr_t dma;
2072 
2073 	head = tx_ring->bd_ring.head;
2074 	dma = tx_ring->bd_ring.dma;
2075 	ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len;
2076 	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
2077 
2078 	tx_ring->bd_ring.head = NULL;
2079 }
2080 
2081 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
2082 				    struct pci_dev *pdev)
2083 {
2084 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2085 	struct rtw89_pci_tx_ring *tx_ring;
2086 	int i;
2087 
2088 	for (i = 0; i < RTW89_TXCH_NUM; i++) {
2089 		tx_ring = &rtwpci->tx_rings[i];
2090 		rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
2091 		rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
2092 	}
2093 }
2094 
2095 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
2096 				   struct pci_dev *pdev,
2097 				   struct rtw89_pci_rx_ring *rx_ring)
2098 {
2099 	struct rtw89_pci_rx_info *rx_info;
2100 	struct sk_buff *skb;
2101 	dma_addr_t dma;
2102 	u32 buf_sz;
2103 	u8 *head;
2104 	int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len;
2105 	int i;
2106 
2107 	buf_sz = rx_ring->buf_sz;
2108 	for (i = 0; i < rx_ring->bd_ring.len; i++) {
2109 		skb = rx_ring->buf[i];
2110 		if (!skb)
2111 			continue;
2112 
2113 		rx_info = RTW89_PCI_RX_SKB_CB(skb);
2114 		dma = rx_info->dma;
2115 		dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
2116 		dev_kfree_skb(skb);
2117 		rx_ring->buf[i] = NULL;
2118 	}
2119 
2120 	head = rx_ring->bd_ring.head;
2121 	dma = rx_ring->bd_ring.dma;
2122 	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
2123 
2124 	rx_ring->bd_ring.head = NULL;
2125 }
2126 
2127 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev,
2128 				    struct pci_dev *pdev)
2129 {
2130 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2131 	struct rtw89_pci_rx_ring *rx_ring;
2132 	int i;
2133 
2134 	for (i = 0; i < RTW89_RXCH_NUM; i++) {
2135 		rx_ring = &rtwpci->rx_rings[i];
2136 		rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
2137 	}
2138 }
2139 
2140 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev,
2141 				     struct pci_dev *pdev)
2142 {
2143 	rtw89_pci_free_rx_rings(rtwdev, pdev);
2144 	rtw89_pci_free_tx_rings(rtwdev, pdev);
2145 }
2146 
2147 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev,
2148 				struct rtw89_pci_rx_ring *rx_ring,
2149 				struct sk_buff *skb, int buf_sz, u32 idx)
2150 {
2151 	struct rtw89_pci_rx_info *rx_info;
2152 	struct rtw89_pci_rx_bd_32 *rx_bd;
2153 	dma_addr_t dma;
2154 
2155 	if (!skb)
2156 		return -EINVAL;
2157 
2158 	dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
2159 	if (dma_mapping_error(&pdev->dev, dma))
2160 		return -EBUSY;
2161 
2162 	rx_info = RTW89_PCI_RX_SKB_CB(skb);
2163 	rx_bd = RTW89_PCI_RX_BD(rx_ring, idx);
2164 
2165 	memset(rx_bd, 0, sizeof(*rx_bd));
2166 	rx_bd->buf_size = cpu_to_le16(buf_sz);
2167 	rx_bd->dma = cpu_to_le32(dma);
2168 	rx_info->dma = dma;
2169 
2170 	return 0;
2171 }
2172 
2173 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev,
2174 				      struct pci_dev *pdev,
2175 				      struct rtw89_pci_tx_ring *tx_ring,
2176 				      enum rtw89_tx_channel txch)
2177 {
2178 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
2179 	struct rtw89_pci_tx_wd *txwd;
2180 	dma_addr_t dma;
2181 	dma_addr_t cur_paddr;
2182 	u8 *head;
2183 	u8 *cur_vaddr;
2184 	u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE;
2185 	u32 page_num = RTW89_PCI_TXWD_NUM_MAX;
2186 	u32 ring_sz = page_size * page_num;
2187 	u32 page_offset;
2188 	int i;
2189 
2190 	/* FWCMD queue doesn't use txwd as pages */
2191 	if (txch == RTW89_TXCH_CH12)
2192 		return 0;
2193 
2194 	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
2195 	if (!head)
2196 		return -ENOMEM;
2197 
2198 	INIT_LIST_HEAD(&wd_ring->free_pages);
2199 	wd_ring->head = head;
2200 	wd_ring->dma = dma;
2201 	wd_ring->page_size = page_size;
2202 	wd_ring->page_num = page_num;
2203 
2204 	page_offset = 0;
2205 	for (i = 0; i < page_num; i++) {
2206 		txwd = &wd_ring->pages[i];
2207 		cur_paddr = dma + page_offset;
2208 		cur_vaddr = head + page_offset;
2209 
2210 		skb_queue_head_init(&txwd->queue);
2211 		INIT_LIST_HEAD(&txwd->list);
2212 		txwd->paddr = cur_paddr;
2213 		txwd->vaddr = cur_vaddr;
2214 		txwd->len = page_size;
2215 		txwd->seq = i;
2216 		rtw89_pci_enqueue_txwd(tx_ring, txwd);
2217 
2218 		page_offset += page_size;
2219 	}
2220 
2221 	return 0;
2222 }
2223 
2224 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
2225 				   struct pci_dev *pdev,
2226 				   struct rtw89_pci_tx_ring *tx_ring,
2227 				   u32 desc_size, u32 len,
2228 				   enum rtw89_tx_channel txch)
2229 {
2230 	const struct rtw89_pci_ch_dma_addr *txch_addr;
2231 	int ring_sz = desc_size * len;
2232 	u8 *head;
2233 	dma_addr_t dma;
2234 	int ret;
2235 
2236 	ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch);
2237 	if (ret) {
2238 		rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch);
2239 		goto err;
2240 	}
2241 
2242 	ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr);
2243 	if (ret) {
2244 		rtw89_err(rtwdev, "failed to get address of txch %d", txch);
2245 		goto err_free_wd_ring;
2246 	}
2247 
2248 	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
2249 	if (!head) {
2250 		ret = -ENOMEM;
2251 		goto err_free_wd_ring;
2252 	}
2253 
2254 	INIT_LIST_HEAD(&tx_ring->busy_pages);
2255 	tx_ring->bd_ring.head = head;
2256 	tx_ring->bd_ring.dma = dma;
2257 	tx_ring->bd_ring.len = len;
2258 	tx_ring->bd_ring.desc_size = desc_size;
2259 	tx_ring->bd_ring.addr = *txch_addr;
2260 	tx_ring->bd_ring.wp = 0;
2261 	tx_ring->bd_ring.rp = 0;
2262 	tx_ring->txch = txch;
2263 
2264 	return 0;
2265 
2266 err_free_wd_ring:
2267 	rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
2268 err:
2269 	return ret;
2270 }
2271 
2272 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
2273 				    struct pci_dev *pdev)
2274 {
2275 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2276 	struct rtw89_pci_tx_ring *tx_ring;
2277 	u32 desc_size;
2278 	u32 len;
2279 	u32 i, tx_allocated;
2280 	int ret;
2281 
2282 	for (i = 0; i < RTW89_TXCH_NUM; i++) {
2283 		tx_ring = &rtwpci->tx_rings[i];
2284 		desc_size = sizeof(struct rtw89_pci_tx_bd_32);
2285 		len = RTW89_PCI_TXBD_NUM_MAX;
2286 		ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring,
2287 					      desc_size, len, i);
2288 		if (ret) {
2289 			rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i);
2290 			goto err_free;
2291 		}
2292 	}
2293 
2294 	return 0;
2295 
2296 err_free:
2297 	tx_allocated = i;
2298 	for (i = 0; i < tx_allocated; i++) {
2299 		tx_ring = &rtwpci->tx_rings[i];
2300 		rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
2301 	}
2302 
2303 	return ret;
2304 }
2305 
2306 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
2307 				   struct pci_dev *pdev,
2308 				   struct rtw89_pci_rx_ring *rx_ring,
2309 				   u32 desc_size, u32 len, u32 rxch)
2310 {
2311 	const struct rtw89_pci_ch_dma_addr *rxch_addr;
2312 	struct sk_buff *skb;
2313 	u8 *head;
2314 	dma_addr_t dma;
2315 	int ring_sz = desc_size * len;
2316 	int buf_sz = RTW89_PCI_RX_BUF_SIZE;
2317 	int i, allocated;
2318 	int ret;
2319 
2320 	ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr);
2321 	if (ret) {
2322 		rtw89_err(rtwdev, "failed to get address of rxch %d", rxch);
2323 		return ret;
2324 	}
2325 
2326 	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
2327 	if (!head) {
2328 		ret = -ENOMEM;
2329 		goto err;
2330 	}
2331 
2332 	rx_ring->bd_ring.head = head;
2333 	rx_ring->bd_ring.dma = dma;
2334 	rx_ring->bd_ring.len = len;
2335 	rx_ring->bd_ring.desc_size = desc_size;
2336 	rx_ring->bd_ring.addr = *rxch_addr;
2337 	rx_ring->bd_ring.wp = 0;
2338 	rx_ring->bd_ring.rp = 0;
2339 	rx_ring->buf_sz = buf_sz;
2340 	rx_ring->diliver_skb = NULL;
2341 	rx_ring->diliver_desc.ready = false;
2342 
2343 	for (i = 0; i < len; i++) {
2344 		skb = dev_alloc_skb(buf_sz);
2345 		if (!skb) {
2346 			ret = -ENOMEM;
2347 			goto err_free;
2348 		}
2349 
2350 		memset(skb->data, 0, buf_sz);
2351 		rx_ring->buf[i] = skb;
2352 		ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb,
2353 					   buf_sz, i);
2354 		if (ret) {
2355 			rtw89_err(rtwdev, "failed to init rx buf %d\n", i);
2356 			dev_kfree_skb_any(skb);
2357 			rx_ring->buf[i] = NULL;
2358 			goto err_free;
2359 		}
2360 	}
2361 
2362 	return 0;
2363 
2364 err_free:
2365 	allocated = i;
2366 	for (i = 0; i < allocated; i++) {
2367 		skb = rx_ring->buf[i];
2368 		if (!skb)
2369 			continue;
2370 		dma = *((dma_addr_t *)skb->cb);
2371 		dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
2372 		dev_kfree_skb(skb);
2373 		rx_ring->buf[i] = NULL;
2374 	}
2375 
2376 	head = rx_ring->bd_ring.head;
2377 	dma = rx_ring->bd_ring.dma;
2378 	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
2379 
2380 	rx_ring->bd_ring.head = NULL;
2381 err:
2382 	return ret;
2383 }
2384 
2385 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev,
2386 				    struct pci_dev *pdev)
2387 {
2388 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2389 	struct rtw89_pci_rx_ring *rx_ring;
2390 	u32 desc_size;
2391 	u32 len;
2392 	int i, rx_allocated;
2393 	int ret;
2394 
2395 	for (i = 0; i < RTW89_RXCH_NUM; i++) {
2396 		rx_ring = &rtwpci->rx_rings[i];
2397 		desc_size = sizeof(struct rtw89_pci_rx_bd_32);
2398 		len = RTW89_PCI_RXBD_NUM_MAX;
2399 		ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring,
2400 					      desc_size, len, i);
2401 		if (ret) {
2402 			rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i);
2403 			goto err_free;
2404 		}
2405 	}
2406 
2407 	return 0;
2408 
2409 err_free:
2410 	rx_allocated = i;
2411 	for (i = 0; i < rx_allocated; i++) {
2412 		rx_ring = &rtwpci->rx_rings[i];
2413 		rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
2414 	}
2415 
2416 	return ret;
2417 }
2418 
2419 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev,
2420 				     struct pci_dev *pdev)
2421 {
2422 	int ret;
2423 
2424 	ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev);
2425 	if (ret) {
2426 		rtw89_err(rtwdev, "failed to alloc dma tx rings\n");
2427 		goto err;
2428 	}
2429 
2430 	ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev);
2431 	if (ret) {
2432 		rtw89_err(rtwdev, "failed to alloc dma rx rings\n");
2433 		goto err_free_tx_rings;
2434 	}
2435 
2436 	return 0;
2437 
2438 err_free_tx_rings:
2439 	rtw89_pci_free_tx_rings(rtwdev, pdev);
2440 err:
2441 	return ret;
2442 }
2443 
2444 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev,
2445 			       struct rtw89_pci *rtwpci)
2446 {
2447 	skb_queue_head_init(&rtwpci->h2c_queue);
2448 	skb_queue_head_init(&rtwpci->h2c_release_queue);
2449 }
2450 
2451 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev,
2452 				    struct pci_dev *pdev)
2453 {
2454 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2455 	int ret;
2456 
2457 	ret = rtw89_pci_setup_mapping(rtwdev, pdev);
2458 	if (ret) {
2459 		rtw89_err(rtwdev, "failed to setup pci mapping\n");
2460 		goto err;
2461 	}
2462 
2463 	ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev);
2464 	if (ret) {
2465 		rtw89_err(rtwdev, "failed to alloc pci trx rings\n");
2466 		goto err_pci_unmap;
2467 	}
2468 
2469 	rtw89_pci_h2c_init(rtwdev, rtwpci);
2470 
2471 	spin_lock_init(&rtwpci->irq_lock);
2472 	spin_lock_init(&rtwpci->trx_lock);
2473 
2474 	return 0;
2475 
2476 err_pci_unmap:
2477 	rtw89_pci_clear_mapping(rtwdev, pdev);
2478 err:
2479 	return ret;
2480 }
2481 
2482 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev,
2483 				     struct pci_dev *pdev)
2484 {
2485 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2486 
2487 	rtw89_pci_free_trx_rings(rtwdev, pdev);
2488 	rtw89_pci_clear_mapping(rtwdev, pdev);
2489 	rtw89_pci_release_fwcmd(rtwdev, rtwpci,
2490 				skb_queue_len(&rtwpci->h2c_queue), true);
2491 }
2492 
2493 static void rtw89_pci_default_intr_mask(struct rtw89_dev *rtwdev)
2494 {
2495 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2496 
2497 	rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0;
2498 	rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
2499 			   B_AX_RXDMA_INT_EN |
2500 			   B_AX_RXP1DMA_INT_EN |
2501 			   B_AX_RPQDMA_INT_EN |
2502 			   B_AX_RXDMA_STUCK_INT_EN |
2503 			   B_AX_RDU_INT_EN |
2504 			   B_AX_RPQBD_FULL_INT_EN |
2505 			   B_AX_HS0ISR_IND_INT_EN;
2506 
2507 	rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN;
2508 }
2509 
2510 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
2511 				 struct pci_dev *pdev)
2512 {
2513 	unsigned long flags = 0;
2514 	int ret;
2515 
2516 	flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI;
2517 	ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
2518 	if (ret < 0) {
2519 		rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret);
2520 		goto err;
2521 	}
2522 
2523 	ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
2524 					rtw89_pci_interrupt_handler,
2525 					rtw89_pci_interrupt_threadfn,
2526 					IRQF_SHARED, KBUILD_MODNAME, rtwdev);
2527 	if (ret) {
2528 		rtw89_err(rtwdev, "failed to request threaded irq\n");
2529 		goto err_free_vector;
2530 	}
2531 
2532 	rtw89_pci_default_intr_mask(rtwdev);
2533 
2534 	return 0;
2535 
2536 err_free_vector:
2537 	pci_free_irq_vectors(pdev);
2538 err:
2539 	return ret;
2540 }
2541 
2542 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev,
2543 			       struct pci_dev *pdev)
2544 {
2545 	devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
2546 	pci_free_irq_vectors(pdev);
2547 }
2548 
2549 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
2550 {
2551 	int ret;
2552 
2553 	if (rtw89_pci_disable_clkreq)
2554 		return;
2555 
2556 	ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
2557 					  PCIE_CLKDLY_HW_30US);
2558 	if (ret)
2559 		rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
2560 
2561 	if (enable)
2562 		ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL,
2563 						RTW89_PCIE_BIT_CLK);
2564 	else
2565 		ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1_CTRL,
2566 						RTW89_PCIE_BIT_CLK);
2567 	if (ret)
2568 		rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
2569 			  enable ? "set" : "unset", ret);
2570 }
2571 
2572 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
2573 {
2574 	u8 value = 0;
2575 	int ret;
2576 
2577 	if (rtw89_pci_disable_aspm_l1)
2578 		return;
2579 
2580 	ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
2581 	if (ret)
2582 		rtw89_err(rtwdev, "failed to read ASPM Delay\n");
2583 
2584 	value &= ~(RTW89_L1DLY_MASK | RTW89_L0DLY_MASK);
2585 	value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) |
2586 		 FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US);
2587 
2588 	ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
2589 	if (ret)
2590 		rtw89_err(rtwdev, "failed to read ASPM Delay\n");
2591 
2592 	if (enable)
2593 		ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL,
2594 						RTW89_PCIE_BIT_L1);
2595 	else
2596 		ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1_CTRL,
2597 						RTW89_PCIE_BIT_L1);
2598 	if (ret)
2599 		rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d",
2600 			  enable ? "set" : "unset", ret);
2601 }
2602 
2603 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev)
2604 {
2605 	struct rtw89_traffic_stats *stats = &rtwdev->stats;
2606 	enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv;
2607 	enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv;
2608 	u32 val = 0;
2609 
2610 	if (!rtwdev->scanning &&
2611 	    (tx_tfc_lv >= RTW89_TFC_HIGH || rx_tfc_lv >= RTW89_TFC_HIGH))
2612 		val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL |
2613 		      FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) |
2614 		      FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) |
2615 		      FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64);
2616 
2617 	rtw89_write32(rtwdev, R_AX_INT_MIT_RX, val);
2618 }
2619 
2620 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev)
2621 {
2622 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2623 	struct pci_dev *pdev = rtwpci->pdev;
2624 	u16 link_ctrl;
2625 	int ret;
2626 
2627 	/* Though there is standard PCIE configuration space to set the
2628 	 * link control register, but by Realtek's design, driver should
2629 	 * check if host supports CLKREQ/ASPM to enable the HW module.
2630 	 *
2631 	 * These functions are implemented by two HW modules associated,
2632 	 * one is responsible to access PCIE configuration space to
2633 	 * follow the host settings, and another is in charge of doing
2634 	 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
2635 	 * the host does not support it, and due to some reasons or wrong
2636 	 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
2637 	 * loss if HW misbehaves on the link.
2638 	 *
2639 	 * Hence it's designed that driver should first check the PCIE
2640 	 * configuration space is sync'ed and enabled, then driver can turn
2641 	 * on the other module that is actually working on the mechanism.
2642 	 */
2643 	ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
2644 	if (ret) {
2645 		rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
2646 		return;
2647 	}
2648 
2649 	if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
2650 		rtw89_pci_clkreq_set(rtwdev, true);
2651 
2652 	if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
2653 		rtw89_pci_aspm_set(rtwdev, true);
2654 }
2655 
2656 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable)
2657 {
2658 	int ret;
2659 
2660 	if (enable)
2661 		ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_TIMER_CTRL,
2662 						RTW89_PCIE_BIT_L1SUB);
2663 	else
2664 		ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_TIMER_CTRL,
2665 						RTW89_PCIE_BIT_L1SUB);
2666 	if (ret)
2667 		rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
2668 			  enable ? "set" : "unset", ret);
2669 }
2670 
2671 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
2672 {
2673 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2674 	struct pci_dev *pdev = rtwpci->pdev;
2675 	u32 l1ss_cap_ptr, l1ss_ctrl;
2676 
2677 	if (rtw89_pci_disable_l1ss)
2678 		return;
2679 
2680 	l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
2681 	if (!l1ss_cap_ptr)
2682 		return;
2683 
2684 	pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl);
2685 
2686 	if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK)
2687 		rtw89_pci_l1ss_set(rtwdev, true);
2688 }
2689 
2690 static void rtw89_pci_ctrl_dma_all_pcie(struct rtw89_dev *rtwdev, u8 en)
2691 {
2692 	u32 val32;
2693 
2694 	if (en == MAC_AX_FUNC_EN) {
2695 		val32 = B_AX_STOP_PCIEIO;
2696 		rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, val32);
2697 
2698 		val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN;
2699 		rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
2700 	} else {
2701 		val32 = B_AX_STOP_PCIEIO;
2702 		rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, val32);
2703 
2704 		val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN;
2705 		rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
2706 	}
2707 }
2708 
2709 static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev)
2710 {
2711 	int ret = 0;
2712 	u32 sts;
2713 	u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY;
2714 
2715 	ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0,
2716 				       10, 1000, false, rtwdev,
2717 				       R_AX_PCIE_DMA_BUSY1);
2718 	if (ret) {
2719 		rtw89_err(rtwdev, "pci dmach busy1 0x%X\n",
2720 			  rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1));
2721 		return -EINVAL;
2722 	}
2723 	return ret;
2724 }
2725 
2726 static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev)
2727 {
2728 	u32 val, dma_rst = 0;
2729 	int ret;
2730 
2731 	rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_DIS);
2732 	ret = rtw89_pci_poll_io_idle(rtwdev);
2733 	if (ret) {
2734 		val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
2735 		rtw89_debug(rtwdev, RTW89_DBG_HCI,
2736 			    "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n",
2737 			    R_AX_DBG_ERR_FLAG, val);
2738 		if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0)
2739 			dma_rst |= B_AX_HCI_TXDMA_EN;
2740 		if (val & B_AX_RX_STUCK)
2741 			dma_rst |= B_AX_HCI_RXDMA_EN;
2742 		val = rtw89_read32(rtwdev, R_AX_HCI_FUNC_EN);
2743 		rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val & ~dma_rst);
2744 		rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val | dma_rst);
2745 		ret = rtw89_pci_poll_io_idle(rtwdev);
2746 		val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
2747 		rtw89_debug(rtwdev, RTW89_DBG_HCI,
2748 			    "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n",
2749 			    R_AX_DBG_ERR_FLAG, val);
2750 	}
2751 
2752 	return ret;
2753 }
2754 
2755 static void rtw89_pci_ctrl_hci_dma_en(struct rtw89_dev *rtwdev, u8 en)
2756 {
2757 	u32 val32;
2758 
2759 	if (en == MAC_AX_FUNC_EN) {
2760 		val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN;
2761 		rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN, val32);
2762 	} else {
2763 		val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN;
2764 		rtw89_write32_clr(rtwdev, R_AX_HCI_FUNC_EN, val32);
2765 	}
2766 }
2767 
2768 static int rtw89_pci_rst_bdram(struct rtw89_dev *rtwdev)
2769 {
2770 	int ret = 0;
2771 	u32 val32, sts;
2772 
2773 	val32 = B_AX_RST_BDRAM;
2774 	rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
2775 
2776 	ret = read_poll_timeout_atomic(rtw89_read32, sts,
2777 				       (sts & B_AX_RST_BDRAM) == 0x0, 1, 100,
2778 				       true, rtwdev, R_AX_PCIE_INIT_CFG1);
2779 	return ret;
2780 }
2781 
2782 static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev)
2783 {
2784 	u32 ret;
2785 
2786 	rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_DIS);
2787 	rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_EN);
2788 	rtw89_pci_clr_idx_all(rtwdev);
2789 
2790 	ret = rtw89_pci_rst_bdram(rtwdev);
2791 	if (ret)
2792 		return ret;
2793 
2794 	rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_EN);
2795 	return ret;
2796 }
2797 
2798 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev,
2799 					  enum rtw89_lv1_rcvy_step step)
2800 {
2801 	int ret;
2802 
2803 	switch (step) {
2804 	case RTW89_LV1_RCVY_STEP_1:
2805 		ret = rtw89_pci_lv1rst_stop_dma(rtwdev);
2806 		if (ret)
2807 			rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n");
2808 
2809 		break;
2810 
2811 	case RTW89_LV1_RCVY_STEP_2:
2812 		ret = rtw89_pci_lv1rst_start_dma(rtwdev);
2813 		if (ret)
2814 			rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n");
2815 		break;
2816 
2817 	default:
2818 		return -EINVAL;
2819 	}
2820 
2821 	return ret;
2822 }
2823 
2824 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev)
2825 {
2826 	rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n",
2827 		   rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX));
2828 	rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
2829 		   rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG));
2830 	rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
2831 		   rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG));
2832 }
2833 
2834 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget)
2835 {
2836 	struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi);
2837 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2838 	unsigned long flags;
2839 	int work_done;
2840 
2841 	rtwdev->napi_budget_countdown = budget;
2842 
2843 	rtw89_pci_clear_isr0(rtwdev, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT);
2844 	work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
2845 	if (work_done == budget)
2846 		return budget;
2847 
2848 	rtw89_pci_clear_isr0(rtwdev, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | B_AX_RDU_INT);
2849 	work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
2850 	if (work_done < budget && napi_complete_done(napi, work_done)) {
2851 		spin_lock_irqsave(&rtwpci->irq_lock, flags);
2852 		if (likely(rtwpci->running))
2853 			rtw89_pci_enable_intr(rtwdev, rtwpci);
2854 		spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
2855 	}
2856 
2857 	return work_done;
2858 }
2859 
2860 static int __maybe_unused rtw89_pci_suspend(struct device *dev)
2861 {
2862 	struct ieee80211_hw *hw = dev_get_drvdata(dev);
2863 	struct rtw89_dev *rtwdev = hw->priv;
2864 
2865 	rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
2866 			  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
2867 	rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
2868 	rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
2869 	rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
2870 	rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
2871 			  B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
2872 
2873 	return 0;
2874 }
2875 
2876 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev)
2877 {
2878 	if (rtwdev->chip->chip_id == RTL8852C)
2879 		return;
2880 
2881 	/* Hardware need write the reg twice to ensure the setting work */
2882 	rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
2883 				    RTW89_PCIE_BIT_CFG_RST_MSTATE);
2884 	rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
2885 				    RTW89_PCIE_BIT_CFG_RST_MSTATE);
2886 }
2887 
2888 static int __maybe_unused rtw89_pci_resume(struct device *dev)
2889 {
2890 	struct ieee80211_hw *hw = dev_get_drvdata(dev);
2891 	struct rtw89_dev *rtwdev = hw->priv;
2892 
2893 	rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
2894 			  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
2895 	rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
2896 	rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
2897 	rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
2898 	rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
2899 			  B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
2900 	rtw89_pci_l2_hci_ldo(rtwdev);
2901 	rtw89_pci_link_cfg(rtwdev);
2902 	rtw89_pci_l1ss_cfg(rtwdev);
2903 
2904 	return 0;
2905 }
2906 
2907 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume);
2908 EXPORT_SYMBOL(rtw89_pm_ops);
2909 
2910 static const struct rtw89_hci_ops rtw89_pci_ops = {
2911 	.tx_write	= rtw89_pci_ops_tx_write,
2912 	.tx_kick_off	= rtw89_pci_ops_tx_kick_off,
2913 	.flush_queues	= rtw89_pci_ops_flush_queues,
2914 	.reset		= rtw89_pci_ops_reset,
2915 	.start		= rtw89_pci_ops_start,
2916 	.stop		= rtw89_pci_ops_stop,
2917 	.recalc_int_mit = rtw89_pci_recalc_int_mit,
2918 
2919 	.read8		= rtw89_pci_ops_read8,
2920 	.read16		= rtw89_pci_ops_read16,
2921 	.read32		= rtw89_pci_ops_read32,
2922 	.write8		= rtw89_pci_ops_write8,
2923 	.write16	= rtw89_pci_ops_write16,
2924 	.write32	= rtw89_pci_ops_write32,
2925 
2926 	.mac_pre_init	= rtw89_pci_ops_mac_pre_init,
2927 	.mac_post_init	= rtw89_pci_ops_mac_post_init,
2928 	.deinit		= rtw89_pci_ops_deinit,
2929 
2930 	.check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource,
2931 	.mac_lv1_rcvy	= rtw89_pci_ops_mac_lv1_recovery,
2932 	.dump_err_status = rtw89_pci_ops_dump_err_status,
2933 	.napi_poll	= rtw89_pci_napi_poll,
2934 };
2935 
2936 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2937 {
2938 	struct ieee80211_hw *hw;
2939 	struct rtw89_dev *rtwdev;
2940 	const struct rtw89_driver_info *info;
2941 	int driver_data_size;
2942 	int ret;
2943 
2944 	driver_data_size = sizeof(struct rtw89_dev) + sizeof(struct rtw89_pci);
2945 	hw = ieee80211_alloc_hw(driver_data_size, &rtw89_ops);
2946 	if (!hw) {
2947 		dev_err(&pdev->dev, "failed to allocate hw\n");
2948 		return -ENOMEM;
2949 	}
2950 
2951 	rtwdev = hw->priv;
2952 	rtwdev->hw = hw;
2953 	rtwdev->dev = &pdev->dev;
2954 	rtwdev->hci.ops = &rtw89_pci_ops;
2955 	rtwdev->hci.type = RTW89_HCI_TYPE_PCIE;
2956 	rtwdev->hci.rpwm_addr = R_AX_PCIE_HRPWM;
2957 	rtwdev->hci.cpwm_addr = R_AX_CPWM;
2958 
2959 	SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
2960 
2961 	info = (const struct rtw89_driver_info *)id->driver_data;
2962 	rtwdev->chip = info->chip;
2963 	rtwdev->pci_info = info->bus.pci;
2964 
2965 	ret = rtw89_core_init(rtwdev);
2966 	if (ret) {
2967 		rtw89_err(rtwdev, "failed to initialise core\n");
2968 		goto err_release_hw;
2969 	}
2970 
2971 	ret = rtw89_pci_claim_device(rtwdev, pdev);
2972 	if (ret) {
2973 		rtw89_err(rtwdev, "failed to claim pci device\n");
2974 		goto err_core_deinit;
2975 	}
2976 
2977 	ret = rtw89_pci_setup_resource(rtwdev, pdev);
2978 	if (ret) {
2979 		rtw89_err(rtwdev, "failed to setup pci resource\n");
2980 		goto err_declaim_pci;
2981 	}
2982 
2983 	ret = rtw89_chip_info_setup(rtwdev);
2984 	if (ret) {
2985 		rtw89_err(rtwdev, "failed to setup chip information\n");
2986 		goto err_clear_resource;
2987 	}
2988 
2989 	rtw89_pci_link_cfg(rtwdev);
2990 	rtw89_pci_l1ss_cfg(rtwdev);
2991 
2992 	ret = rtw89_core_register(rtwdev);
2993 	if (ret) {
2994 		rtw89_err(rtwdev, "failed to register core\n");
2995 		goto err_clear_resource;
2996 	}
2997 
2998 	rtw89_core_napi_init(rtwdev);
2999 
3000 	ret = rtw89_pci_request_irq(rtwdev, pdev);
3001 	if (ret) {
3002 		rtw89_err(rtwdev, "failed to request pci irq\n");
3003 		goto err_unregister;
3004 	}
3005 
3006 	return 0;
3007 
3008 err_unregister:
3009 	rtw89_core_napi_deinit(rtwdev);
3010 	rtw89_core_unregister(rtwdev);
3011 err_clear_resource:
3012 	rtw89_pci_clear_resource(rtwdev, pdev);
3013 err_declaim_pci:
3014 	rtw89_pci_declaim_device(rtwdev, pdev);
3015 err_core_deinit:
3016 	rtw89_core_deinit(rtwdev);
3017 err_release_hw:
3018 	ieee80211_free_hw(hw);
3019 
3020 	return ret;
3021 }
3022 EXPORT_SYMBOL(rtw89_pci_probe);
3023 
3024 void rtw89_pci_remove(struct pci_dev *pdev)
3025 {
3026 	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
3027 	struct rtw89_dev *rtwdev;
3028 
3029 	rtwdev = hw->priv;
3030 
3031 	rtw89_pci_free_irq(rtwdev, pdev);
3032 	rtw89_core_napi_deinit(rtwdev);
3033 	rtw89_core_unregister(rtwdev);
3034 	rtw89_pci_clear_resource(rtwdev, pdev);
3035 	rtw89_pci_declaim_device(rtwdev, pdev);
3036 	rtw89_core_deinit(rtwdev);
3037 	ieee80211_free_hw(hw);
3038 }
3039 EXPORT_SYMBOL(rtw89_pci_remove);
3040 
3041 MODULE_AUTHOR("Realtek Corporation");
3042 MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver");
3043 MODULE_LICENSE("Dual BSD/GPL");
3044