1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2020 Realtek Corporation 3 */ 4 5 #include <linux/pci.h> 6 7 #include "mac.h" 8 #include "pci.h" 9 #include "reg.h" 10 #include "ser.h" 11 12 static bool rtw89_pci_disable_clkreq; 13 static bool rtw89_pci_disable_aspm_l1; 14 static bool rtw89_pci_disable_l1ss; 15 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644); 16 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644); 17 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644); 18 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support"); 19 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support"); 20 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support"); 21 22 static int rtw89_pci_rst_bdram_pcie(struct rtw89_dev *rtwdev) 23 { 24 u32 val; 25 int ret; 26 27 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, 28 rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | B_AX_RST_BDRAM); 29 30 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM), 31 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false, 32 rtwdev, R_AX_PCIE_INIT_CFG1); 33 34 if (ret) 35 return -EBUSY; 36 37 return 0; 38 } 39 40 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev, 41 struct rtw89_pci_dma_ring *bd_ring, 42 u32 cur_idx, bool tx) 43 { 44 u32 cnt, cur_rp, wp, rp, len; 45 46 rp = bd_ring->rp; 47 wp = bd_ring->wp; 48 len = bd_ring->len; 49 50 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 51 if (tx) 52 cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp); 53 else 54 cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp); 55 56 bd_ring->rp = cur_rp; 57 58 return cnt; 59 } 60 61 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev, 62 struct rtw89_pci_tx_ring *tx_ring) 63 { 64 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 65 u32 addr_idx = bd_ring->addr.idx; 66 u32 cnt, idx; 67 68 idx = rtw89_read32(rtwdev, addr_idx); 69 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true); 70 71 return cnt; 72 } 73 74 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev, 75 struct rtw89_pci *rtwpci, 76 u32 cnt, bool release_all) 77 { 78 struct rtw89_pci_tx_data *tx_data; 79 struct sk_buff *skb; 80 u32 qlen; 81 82 while (cnt--) { 83 skb = skb_dequeue(&rtwpci->h2c_queue); 84 if (!skb) { 85 rtw89_err(rtwdev, "failed to pre-release fwcmd\n"); 86 return; 87 } 88 skb_queue_tail(&rtwpci->h2c_release_queue, skb); 89 } 90 91 qlen = skb_queue_len(&rtwpci->h2c_release_queue); 92 if (!release_all) 93 qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0; 94 95 while (qlen--) { 96 skb = skb_dequeue(&rtwpci->h2c_release_queue); 97 if (!skb) { 98 rtw89_err(rtwdev, "failed to release fwcmd\n"); 99 return; 100 } 101 tx_data = RTW89_PCI_TX_SKB_CB(skb); 102 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 103 DMA_TO_DEVICE); 104 dev_kfree_skb_any(skb); 105 } 106 } 107 108 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev, 109 struct rtw89_pci *rtwpci) 110 { 111 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 112 u32 cnt; 113 114 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 115 if (!cnt) 116 return; 117 rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false); 118 } 119 120 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev, 121 struct rtw89_pci_rx_ring *rx_ring) 122 { 123 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 124 u32 addr_idx = bd_ring->addr.idx; 125 u32 cnt, idx; 126 127 idx = rtw89_read32(rtwdev, addr_idx); 128 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false); 129 130 return cnt; 131 } 132 133 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev, 134 struct sk_buff *skb) 135 { 136 struct rtw89_pci_rx_info *rx_info; 137 dma_addr_t dma; 138 139 rx_info = RTW89_PCI_RX_SKB_CB(skb); 140 dma = rx_info->dma; 141 dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 142 DMA_FROM_DEVICE); 143 } 144 145 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, 146 struct sk_buff *skb) 147 { 148 struct rtw89_pci_rx_info *rx_info; 149 dma_addr_t dma; 150 151 rx_info = RTW89_PCI_RX_SKB_CB(skb); 152 dma = rx_info->dma; 153 dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 154 DMA_FROM_DEVICE); 155 } 156 157 static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, 158 struct sk_buff *skb) 159 { 160 struct rtw89_pci_rxbd_info *rxbd_info; 161 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 162 163 rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; 164 rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS); 165 rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS); 166 rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE); 167 rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG); 168 169 return 0; 170 } 171 172 static bool 173 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls, 174 struct sk_buff *new, 175 const struct sk_buff *skb, u32 offset, 176 const struct rtw89_pci_rx_info *rx_info, 177 const struct rtw89_rx_desc_info *desc_info) 178 { 179 u32 copy_len = rx_info->len - offset; 180 181 if (unlikely(skb_tailroom(new) < copy_len)) { 182 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 183 "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n", 184 rx_info->len, desc_info->pkt_size, offset, fs, ls); 185 rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ", 186 skb->data, rx_info->len); 187 /* length of a single segment skb is desc_info->pkt_size */ 188 if (fs && ls) { 189 copy_len = desc_info->pkt_size; 190 } else { 191 rtw89_info(rtwdev, "drop rx data due to invalid length\n"); 192 return false; 193 } 194 } 195 196 skb_put_data(new, skb->data + offset, copy_len); 197 198 return true; 199 } 200 201 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev, 202 struct rtw89_pci_rx_ring *rx_ring) 203 { 204 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 205 struct rtw89_pci_rx_info *rx_info; 206 struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc; 207 struct sk_buff *new = rx_ring->diliver_skb; 208 struct sk_buff *skb; 209 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 210 u32 offset; 211 u32 cnt = 1; 212 bool fs, ls; 213 int ret; 214 215 skb = rx_ring->buf[bd_ring->wp]; 216 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 217 218 ret = rtw89_pci_rxbd_info_update(rtwdev, skb); 219 if (ret) { 220 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 221 bd_ring->wp, ret); 222 goto err_sync_device; 223 } 224 225 rx_info = RTW89_PCI_RX_SKB_CB(skb); 226 fs = rx_info->fs; 227 ls = rx_info->ls; 228 229 if (fs) { 230 if (new) { 231 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 232 "skb should not be ready before first segment start\n"); 233 goto err_sync_device; 234 } 235 if (desc_info->ready) { 236 rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n"); 237 goto err_sync_device; 238 } 239 240 rtw89_core_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size); 241 242 new = dev_alloc_skb(desc_info->pkt_size); 243 if (!new) 244 goto err_sync_device; 245 246 rx_ring->diliver_skb = new; 247 248 /* first segment has RX desc */ 249 offset = desc_info->offset; 250 offset += desc_info->long_rxdesc ? sizeof(struct rtw89_rxdesc_long) : 251 sizeof(struct rtw89_rxdesc_short); 252 } else { 253 offset = sizeof(struct rtw89_pci_rxbd_info); 254 if (!new) { 255 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n"); 256 goto err_sync_device; 257 } 258 } 259 if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info)) 260 goto err_sync_device; 261 rtw89_pci_sync_skb_for_device(rtwdev, skb); 262 rtw89_pci_rxbd_increase(rx_ring, 1); 263 264 if (!desc_info->ready) { 265 rtw89_warn(rtwdev, "no rx desc information\n"); 266 goto err_free_resource; 267 } 268 if (ls) { 269 rtw89_core_rx(rtwdev, desc_info, new); 270 rx_ring->diliver_skb = NULL; 271 desc_info->ready = false; 272 } 273 274 return cnt; 275 276 err_sync_device: 277 rtw89_pci_sync_skb_for_device(rtwdev, skb); 278 rtw89_pci_rxbd_increase(rx_ring, 1); 279 err_free_resource: 280 if (new) 281 dev_kfree_skb_any(new); 282 rx_ring->diliver_skb = NULL; 283 desc_info->ready = false; 284 285 return cnt; 286 } 287 288 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev, 289 struct rtw89_pci_rx_ring *rx_ring, 290 u32 cnt) 291 { 292 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 293 u32 rx_cnt; 294 295 while (cnt && rtwdev->napi_budget_countdown > 0) { 296 rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring); 297 if (!rx_cnt) { 298 rtw89_err(rtwdev, "failed to deliver RXBD skb\n"); 299 300 /* skip the rest RXBD bufs */ 301 rtw89_pci_rxbd_increase(rx_ring, cnt); 302 break; 303 } 304 305 cnt -= rx_cnt; 306 } 307 308 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 309 } 310 311 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev, 312 struct rtw89_pci *rtwpci, int budget) 313 { 314 struct rtw89_pci_rx_ring *rx_ring; 315 int countdown = rtwdev->napi_budget_countdown; 316 u32 cnt; 317 318 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ]; 319 320 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 321 if (!cnt) 322 return 0; 323 324 cnt = min_t(u32, budget, cnt); 325 326 rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt); 327 328 /* In case of flushing pending SKBs, the countdown may exceed. */ 329 if (rtwdev->napi_budget_countdown <= 0) 330 return budget; 331 332 return budget - countdown; 333 } 334 335 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev, 336 struct rtw89_pci_tx_ring *tx_ring, 337 struct sk_buff *skb, u8 tx_status) 338 { 339 struct ieee80211_tx_info *info; 340 341 info = IEEE80211_SKB_CB(skb); 342 ieee80211_tx_info_clear_status(info); 343 344 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 345 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 346 if (tx_status == RTW89_TX_DONE) { 347 info->flags |= IEEE80211_TX_STAT_ACK; 348 tx_ring->tx_acked++; 349 } else { 350 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) 351 rtw89_debug(rtwdev, RTW89_DBG_FW, 352 "failed to TX of status %x\n", tx_status); 353 switch (tx_status) { 354 case RTW89_TX_RETRY_LIMIT: 355 tx_ring->tx_retry_lmt++; 356 break; 357 case RTW89_TX_LIFE_TIME: 358 tx_ring->tx_life_time++; 359 break; 360 case RTW89_TX_MACID_DROP: 361 tx_ring->tx_mac_id_drop++; 362 break; 363 default: 364 rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status); 365 break; 366 } 367 } 368 369 ieee80211_tx_status_ni(rtwdev->hw, skb); 370 } 371 372 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 373 { 374 struct rtw89_pci_tx_wd *txwd; 375 u32 cnt; 376 377 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 378 while (cnt--) { 379 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 380 if (!txwd) { 381 rtw89_warn(rtwdev, "No busy txwd pages available\n"); 382 break; 383 } 384 385 list_del_init(&txwd->list); 386 387 /* this skb has been freed by RPP */ 388 if (skb_queue_len(&txwd->queue) == 0) 389 rtw89_pci_enqueue_txwd(tx_ring, txwd); 390 } 391 } 392 393 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev, 394 struct rtw89_pci_tx_ring *tx_ring) 395 { 396 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 397 struct rtw89_pci_tx_wd *txwd; 398 int i; 399 400 for (i = 0; i < wd_ring->page_num; i++) { 401 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 402 if (!txwd) 403 break; 404 405 list_del_init(&txwd->list); 406 } 407 } 408 409 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, 410 struct rtw89_pci_tx_ring *tx_ring, 411 struct rtw89_pci_tx_wd *txwd, u16 seq, 412 u8 tx_status) 413 { 414 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 415 struct rtw89_pci_tx_data *tx_data; 416 struct sk_buff *skb, *tmp; 417 u8 txch = tx_ring->txch; 418 419 if (!list_empty(&txwd->list)) { 420 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 421 /* In low power mode, RPP can receive before updating of TX BD. 422 * In normal mode, it should not happen so give it a warning. 423 */ 424 if (!rtwpci->low_power && !list_empty(&txwd->list)) 425 rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n", 426 txch, seq); 427 } 428 429 skb_queue_walk_safe(&txwd->queue, skb, tmp) { 430 skb_unlink(skb, &txwd->queue); 431 432 tx_data = RTW89_PCI_TX_SKB_CB(skb); 433 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 434 DMA_TO_DEVICE); 435 436 rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status); 437 } 438 439 if (list_empty(&txwd->list)) 440 rtw89_pci_enqueue_txwd(tx_ring, txwd); 441 } 442 443 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, 444 struct rtw89_pci_rpp_fmt *rpp) 445 { 446 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 447 struct rtw89_pci_tx_ring *tx_ring; 448 struct rtw89_pci_tx_wd_ring *wd_ring; 449 struct rtw89_pci_tx_wd *txwd; 450 u16 seq; 451 u8 qsel, tx_status, txch; 452 453 seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ); 454 qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL); 455 tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS); 456 txch = rtw89_core_get_ch_dma(rtwdev, qsel); 457 458 if (txch == RTW89_TXCH_CH12) { 459 rtw89_warn(rtwdev, "should no fwcmd release report\n"); 460 return; 461 } 462 463 tx_ring = &rtwpci->tx_rings[txch]; 464 wd_ring = &tx_ring->wd_ring; 465 txwd = &wd_ring->pages[seq]; 466 467 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status); 468 } 469 470 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev, 471 struct rtw89_pci_tx_ring *tx_ring) 472 { 473 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 474 struct rtw89_pci_tx_wd *txwd; 475 int i; 476 477 for (i = 0; i < wd_ring->page_num; i++) { 478 txwd = &wd_ring->pages[i]; 479 480 if (!list_empty(&txwd->list)) 481 continue; 482 483 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP); 484 } 485 } 486 487 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev, 488 struct rtw89_pci_rx_ring *rx_ring, 489 u32 max_cnt) 490 { 491 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 492 struct rtw89_pci_rx_info *rx_info; 493 struct rtw89_pci_rpp_fmt *rpp; 494 struct rtw89_rx_desc_info desc_info = {}; 495 struct sk_buff *skb; 496 u32 cnt = 0; 497 u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt); 498 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 499 u32 offset; 500 int ret; 501 502 skb = rx_ring->buf[bd_ring->wp]; 503 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 504 505 ret = rtw89_pci_rxbd_info_update(rtwdev, skb); 506 if (ret) { 507 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 508 bd_ring->wp, ret); 509 goto err_sync_device; 510 } 511 512 rx_info = RTW89_PCI_RX_SKB_CB(skb); 513 if (!rx_info->fs || !rx_info->ls) { 514 rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n"); 515 return cnt; 516 } 517 518 rtw89_core_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size); 519 520 /* first segment has RX desc */ 521 offset = desc_info.offset; 522 offset += desc_info.long_rxdesc ? sizeof(struct rtw89_rxdesc_long) : 523 sizeof(struct rtw89_rxdesc_short); 524 for (; offset + rpp_size <= rx_info->len; offset += rpp_size) { 525 rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset); 526 rtw89_pci_release_rpp(rtwdev, rpp); 527 } 528 529 rtw89_pci_sync_skb_for_device(rtwdev, skb); 530 rtw89_pci_rxbd_increase(rx_ring, 1); 531 cnt++; 532 533 return cnt; 534 535 err_sync_device: 536 rtw89_pci_sync_skb_for_device(rtwdev, skb); 537 return 0; 538 } 539 540 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev, 541 struct rtw89_pci_rx_ring *rx_ring, 542 u32 cnt) 543 { 544 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 545 u32 release_cnt; 546 547 while (cnt) { 548 release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt); 549 if (!release_cnt) { 550 rtw89_err(rtwdev, "failed to release TX skbs\n"); 551 552 /* skip the rest RXBD bufs */ 553 rtw89_pci_rxbd_increase(rx_ring, cnt); 554 break; 555 } 556 557 cnt -= release_cnt; 558 } 559 560 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 561 } 562 563 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev, 564 struct rtw89_pci *rtwpci, int budget) 565 { 566 struct rtw89_pci_rx_ring *rx_ring; 567 u32 cnt; 568 int work_done; 569 570 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 571 572 spin_lock_bh(&rtwpci->trx_lock); 573 574 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 575 if (cnt == 0) 576 goto out_unlock; 577 578 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 579 580 out_unlock: 581 spin_unlock_bh(&rtwpci->trx_lock); 582 583 /* always release all RPQ */ 584 work_done = min_t(int, cnt, budget); 585 rtwdev->napi_budget_countdown -= work_done; 586 587 return work_done; 588 } 589 590 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev, 591 struct rtw89_pci *rtwpci) 592 { 593 struct rtw89_pci_rx_ring *rx_ring; 594 struct rtw89_pci_dma_ring *bd_ring; 595 u32 reg_idx; 596 u16 hw_idx, hw_idx_next, host_idx; 597 int i; 598 599 for (i = 0; i < RTW89_RXCH_NUM; i++) { 600 rx_ring = &rtwpci->rx_rings[i]; 601 bd_ring = &rx_ring->bd_ring; 602 603 reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 604 hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx); 605 host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx); 606 hw_idx_next = (hw_idx + 1) % bd_ring->len; 607 608 if (hw_idx_next == host_idx) 609 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i); 610 611 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 612 "%d RXD unavailable, idx=0x%08x, len=%d\n", 613 i, reg_idx, bd_ring->len); 614 } 615 } 616 617 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, 618 struct rtw89_pci *rtwpci, 619 struct rtw89_pci_isrs *isrs) 620 { 621 isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs; 622 isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0]; 623 isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1]; 624 625 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 626 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]); 627 rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]); 628 } 629 EXPORT_SYMBOL(rtw89_pci_recognize_intrs); 630 631 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev, 632 struct rtw89_pci *rtwpci, 633 struct rtw89_pci_isrs *isrs) 634 { 635 isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs; 636 isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ? 637 rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0; 638 isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ? 639 rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0; 640 isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ? 641 rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0; 642 643 if (isrs->halt_c2h_isrs) 644 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 645 if (isrs->isrs[0]) 646 rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]); 647 if (isrs->isrs[1]) 648 rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]); 649 } 650 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1); 651 652 static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00) 653 { 654 /* write 1 clear */ 655 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isr00); 656 } 657 658 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 659 { 660 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 661 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]); 662 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]); 663 } 664 EXPORT_SYMBOL(rtw89_pci_enable_intr); 665 666 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 667 { 668 rtw89_write32(rtwdev, R_AX_HIMR0, 0); 669 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0); 670 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); 671 } 672 EXPORT_SYMBOL(rtw89_pci_disable_intr); 673 674 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 675 { 676 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs); 677 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 678 rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]); 679 rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]); 680 } 681 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1); 682 683 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 684 { 685 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0); 686 } 687 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1); 688 689 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev) 690 { 691 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 692 unsigned long flags; 693 694 spin_lock_irqsave(&rtwpci->irq_lock, flags); 695 rtw89_chip_disable_intr(rtwdev, rtwpci); 696 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START); 697 rtw89_chip_enable_intr(rtwdev, rtwpci); 698 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 699 } 700 701 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev) 702 { 703 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 704 unsigned long flags; 705 706 spin_lock_irqsave(&rtwpci->irq_lock, flags); 707 rtw89_chip_disable_intr(rtwdev, rtwpci); 708 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE); 709 rtw89_chip_enable_intr(rtwdev, rtwpci); 710 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 711 } 712 713 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev) 714 { 715 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 716 int budget = NAPI_POLL_WEIGHT; 717 718 /* To prevent RXQ get stuck due to run out of budget. */ 719 rtwdev->napi_budget_countdown = budget; 720 721 rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget); 722 rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget); 723 } 724 725 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) 726 { 727 struct rtw89_dev *rtwdev = dev; 728 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 729 struct rtw89_pci_isrs isrs; 730 unsigned long flags; 731 732 spin_lock_irqsave(&rtwpci->irq_lock, flags); 733 rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs); 734 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 735 736 if (unlikely(isrs.isrs[0] & B_AX_RDU_INT)) 737 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci); 738 739 if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN)) 740 rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev)); 741 742 if (unlikely(isrs.halt_c2h_isrs & B_AX_WDT_TIMEOUT_INT_EN)) 743 rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT); 744 745 if (unlikely(rtwpci->under_recovery)) 746 goto enable_intr; 747 748 if (unlikely(rtwpci->low_power)) { 749 rtw89_pci_low_power_interrupt_handler(rtwdev); 750 goto enable_intr; 751 } 752 753 if (likely(rtwpci->running)) { 754 local_bh_disable(); 755 napi_schedule(&rtwdev->napi); 756 local_bh_enable(); 757 } 758 759 return IRQ_HANDLED; 760 761 enable_intr: 762 spin_lock_irqsave(&rtwpci->irq_lock, flags); 763 rtw89_chip_enable_intr(rtwdev, rtwpci); 764 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 765 return IRQ_HANDLED; 766 } 767 768 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev) 769 { 770 struct rtw89_dev *rtwdev = dev; 771 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 772 unsigned long flags; 773 irqreturn_t irqret = IRQ_WAKE_THREAD; 774 775 spin_lock_irqsave(&rtwpci->irq_lock, flags); 776 777 /* If interrupt event is on the road, it is still trigger interrupt 778 * even we have done pci_stop() to turn off IMR. 779 */ 780 if (unlikely(!rtwpci->running)) { 781 irqret = IRQ_HANDLED; 782 goto exit; 783 } 784 785 rtw89_chip_disable_intr(rtwdev, rtwpci); 786 exit: 787 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 788 789 return irqret; 790 } 791 792 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \ 793 [RTW89_TXCH_##txch] = { \ 794 .num = R_AX_##txch##_TXBD_NUM ##v, \ 795 .idx = R_AX_##txch##_TXBD_IDX ##v, \ 796 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 797 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 798 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 799 } 800 801 #define DEF_TXCHADDRS(info, txch, v...) \ 802 [RTW89_TXCH_##txch] = { \ 803 .num = R_AX_##txch##_TXBD_NUM, \ 804 .idx = R_AX_##txch##_TXBD_IDX, \ 805 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 806 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 807 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 808 } 809 810 #define DEF_RXCHADDRS(info, rxch, v...) \ 811 [RTW89_RXCH_##rxch] = { \ 812 .num = R_AX_##rxch##_RXBD_NUM ##v, \ 813 .idx = R_AX_##rxch##_RXBD_IDX ##v, \ 814 .desa_l = R_AX_##rxch##_RXBD_DESA_L ##v, \ 815 .desa_h = R_AX_##rxch##_RXBD_DESA_H ##v, \ 816 } 817 818 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = { 819 .tx = { 820 DEF_TXCHADDRS(info, ACH0), 821 DEF_TXCHADDRS(info, ACH1), 822 DEF_TXCHADDRS(info, ACH2), 823 DEF_TXCHADDRS(info, ACH3), 824 DEF_TXCHADDRS(info, ACH4), 825 DEF_TXCHADDRS(info, ACH5), 826 DEF_TXCHADDRS(info, ACH6), 827 DEF_TXCHADDRS(info, ACH7), 828 DEF_TXCHADDRS(info, CH8), 829 DEF_TXCHADDRS(info, CH9), 830 DEF_TXCHADDRS_TYPE1(info, CH10), 831 DEF_TXCHADDRS_TYPE1(info, CH11), 832 DEF_TXCHADDRS(info, CH12), 833 }, 834 .rx = { 835 DEF_RXCHADDRS(info, RXQ), 836 DEF_RXCHADDRS(info, RPQ), 837 }, 838 }; 839 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set); 840 841 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = { 842 .tx = { 843 DEF_TXCHADDRS(info, ACH0, _V1), 844 DEF_TXCHADDRS(info, ACH1, _V1), 845 DEF_TXCHADDRS(info, ACH2, _V1), 846 DEF_TXCHADDRS(info, ACH3, _V1), 847 DEF_TXCHADDRS(info, ACH4, _V1), 848 DEF_TXCHADDRS(info, ACH5, _V1), 849 DEF_TXCHADDRS(info, ACH6, _V1), 850 DEF_TXCHADDRS(info, ACH7, _V1), 851 DEF_TXCHADDRS(info, CH8, _V1), 852 DEF_TXCHADDRS(info, CH9, _V1), 853 DEF_TXCHADDRS_TYPE1(info, CH10, _V1), 854 DEF_TXCHADDRS_TYPE1(info, CH11, _V1), 855 DEF_TXCHADDRS(info, CH12, _V1), 856 }, 857 .rx = { 858 DEF_RXCHADDRS(info, RXQ, _V1), 859 DEF_RXCHADDRS(info, RPQ, _V1), 860 }, 861 }; 862 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1); 863 864 #undef DEF_TXCHADDRS_TYPE1 865 #undef DEF_TXCHADDRS 866 #undef DEF_RXCHADDRS 867 868 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev, 869 enum rtw89_tx_channel txch, 870 const struct rtw89_pci_ch_dma_addr **addr) 871 { 872 const struct rtw89_pci_info *info = rtwdev->pci_info; 873 874 if (txch >= RTW89_TXCH_NUM) 875 return -EINVAL; 876 877 *addr = &info->dma_addr_set->tx[txch]; 878 879 return 0; 880 } 881 882 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev, 883 enum rtw89_rx_channel rxch, 884 const struct rtw89_pci_ch_dma_addr **addr) 885 { 886 const struct rtw89_pci_info *info = rtwdev->pci_info; 887 888 if (rxch >= RTW89_RXCH_NUM) 889 return -EINVAL; 890 891 *addr = &info->dma_addr_set->rx[rxch]; 892 893 return 0; 894 } 895 896 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring) 897 { 898 struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring; 899 900 /* reserved 1 desc check ring is full or not */ 901 if (bd_ring->rp > bd_ring->wp) 902 return bd_ring->rp - bd_ring->wp - 1; 903 904 return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1; 905 } 906 907 static 908 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev) 909 { 910 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 911 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 912 u32 cnt; 913 914 spin_lock_bh(&rtwpci->trx_lock); 915 rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci); 916 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 917 spin_unlock_bh(&rtwpci->trx_lock); 918 919 return cnt; 920 } 921 922 static 923 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev, 924 u8 txch) 925 { 926 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 927 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 928 u32 cnt; 929 930 spin_lock_bh(&rtwpci->trx_lock); 931 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 932 spin_unlock_bh(&rtwpci->trx_lock); 933 934 return cnt; 935 } 936 937 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 938 u8 txch) 939 { 940 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 941 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 942 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 943 u32 bd_cnt, wd_cnt, min_cnt = 0; 944 struct rtw89_pci_rx_ring *rx_ring; 945 u32 cnt; 946 947 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 948 949 spin_lock_bh(&rtwpci->trx_lock); 950 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 951 wd_cnt = wd_ring->curr_num; 952 953 if (wd_cnt == 0 || bd_cnt == 0) { 954 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 955 if (cnt) 956 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 957 else if (wd_cnt == 0) 958 goto out_unlock; 959 960 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 961 if (bd_cnt == 0) 962 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 963 } 964 965 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 966 wd_cnt = wd_ring->curr_num; 967 min_cnt = min(bd_cnt, wd_cnt); 968 if (min_cnt == 0) 969 rtw89_debug(rtwdev, rtwpci->low_power ? RTW89_DBG_TXRX : RTW89_DBG_UNEXP, 970 "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n", 971 wd_cnt, bd_cnt); 972 973 out_unlock: 974 spin_unlock_bh(&rtwpci->trx_lock); 975 976 return min_cnt; 977 } 978 979 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 980 u8 txch) 981 { 982 if (rtwdev->hci.paused) 983 return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch); 984 985 if (txch == RTW89_TXCH_CH12) 986 return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev); 987 988 return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch); 989 } 990 991 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 992 { 993 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 994 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 995 u32 host_idx, addr; 996 997 spin_lock_bh(&rtwpci->trx_lock); 998 999 addr = bd_ring->addr.idx; 1000 host_idx = bd_ring->wp; 1001 rtw89_write16(rtwdev, addr, host_idx); 1002 1003 spin_unlock_bh(&rtwpci->trx_lock); 1004 } 1005 1006 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, 1007 int n_txbd) 1008 { 1009 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1010 u32 host_idx, len; 1011 1012 len = bd_ring->len; 1013 host_idx = bd_ring->wp + n_txbd; 1014 host_idx = host_idx < len ? host_idx : host_idx - len; 1015 1016 bd_ring->wp = host_idx; 1017 } 1018 1019 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) 1020 { 1021 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1022 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1023 1024 if (rtwdev->hci.paused) { 1025 set_bit(txch, rtwpci->kick_map); 1026 return; 1027 } 1028 1029 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1030 } 1031 1032 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev) 1033 { 1034 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1035 struct rtw89_pci_tx_ring *tx_ring; 1036 int txch; 1037 1038 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1039 if (!test_and_clear_bit(txch, rtwpci->kick_map)) 1040 continue; 1041 1042 tx_ring = &rtwpci->tx_rings[txch]; 1043 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1044 } 1045 } 1046 1047 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop) 1048 { 1049 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1050 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1051 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1052 u32 cur_idx, cur_rp; 1053 u8 i; 1054 1055 /* Because the time taked by the I/O is a bit dynamic, it's hard to 1056 * define a reasonable fixed total timeout to use read_poll_timeout* 1057 * helper. Instead, we can ensure a reasonable polling times, so we 1058 * just use for loop with udelay here. 1059 */ 1060 for (i = 0; i < 60; i++) { 1061 cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 1062 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 1063 if (cur_rp == bd_ring->wp) 1064 return; 1065 1066 udelay(1); 1067 } 1068 1069 if (!drop) 1070 rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch); 1071 } 1072 1073 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs, 1074 bool drop) 1075 { 1076 u8 i; 1077 1078 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1079 /* It may be unnecessary to flush FWCMD queue. */ 1080 if (i == RTW89_TXCH_CH12) 1081 continue; 1082 1083 if (txchs & BIT(i)) 1084 __pci_flush_txch(rtwdev, i, drop); 1085 } 1086 } 1087 1088 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues, 1089 bool drop) 1090 { 1091 __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop); 1092 } 1093 1094 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, 1095 void *txaddr_info_addr, u32 total_len, 1096 dma_addr_t dma, u8 *add_info_nr) 1097 { 1098 struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr; 1099 1100 txaddr_info->length = cpu_to_le16(total_len); 1101 txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | 1102 RTW89_PCI_ADDR_NUM(1)); 1103 txaddr_info->dma = cpu_to_le32(dma); 1104 1105 *add_info_nr = 1; 1106 1107 return sizeof(*txaddr_info); 1108 } 1109 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info); 1110 1111 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, 1112 void *txaddr_info_addr, u32 total_len, 1113 dma_addr_t dma, u8 *add_info_nr) 1114 { 1115 struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr; 1116 u32 remain = total_len; 1117 u32 len; 1118 u16 length_option; 1119 int n; 1120 1121 for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) { 1122 len = remain >= TXADDR_INFO_LENTHG_V1_MAX ? 1123 TXADDR_INFO_LENTHG_V1_MAX : remain; 1124 remain -= len; 1125 1126 length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) | 1127 FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) | 1128 FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0); 1129 txaddr_info->length_opt = cpu_to_le16(length_option); 1130 txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma)); 1131 txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma)); 1132 1133 dma += len; 1134 txaddr_info++; 1135 } 1136 1137 WARN_ONCE(remain, "length overflow remain=%u total_len=%u", 1138 remain, total_len); 1139 1140 *add_info_nr = n; 1141 1142 return n * sizeof(*txaddr_info); 1143 } 1144 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1); 1145 1146 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, 1147 struct rtw89_pci_tx_ring *tx_ring, 1148 struct rtw89_pci_tx_wd *txwd, 1149 struct rtw89_core_tx_request *tx_req) 1150 { 1151 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1152 const struct rtw89_chip_info *chip = rtwdev->chip; 1153 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1154 struct rtw89_txwd_info *txwd_info; 1155 struct rtw89_pci_tx_wp_info *txwp_info; 1156 void *txaddr_info_addr; 1157 struct pci_dev *pdev = rtwpci->pdev; 1158 struct sk_buff *skb = tx_req->skb; 1159 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1160 bool en_wd_info = desc_info->en_wd_info; 1161 u32 txwd_len; 1162 u32 txwp_len; 1163 u32 txaddr_info_len; 1164 dma_addr_t dma; 1165 int ret; 1166 1167 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1168 if (dma_mapping_error(&pdev->dev, dma)) { 1169 rtw89_err(rtwdev, "failed to map skb dma data\n"); 1170 ret = -EBUSY; 1171 goto err; 1172 } 1173 1174 tx_data->dma = dma; 1175 1176 txwp_len = sizeof(*txwp_info); 1177 txwd_len = chip->txwd_body_size; 1178 txwd_len += en_wd_info ? sizeof(*txwd_info) : 0; 1179 1180 txwp_info = txwd->vaddr + txwd_len; 1181 txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID); 1182 txwp_info->seq1 = 0; 1183 txwp_info->seq2 = 0; 1184 txwp_info->seq3 = 0; 1185 1186 tx_ring->tx_cnt++; 1187 txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len; 1188 txaddr_info_len = 1189 rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len, 1190 dma, &desc_info->addr_info_nr); 1191 1192 txwd->len = txwd_len + txwp_len + txaddr_info_len; 1193 1194 rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr); 1195 1196 skb_queue_tail(&txwd->queue, skb); 1197 1198 return 0; 1199 1200 err: 1201 return ret; 1202 } 1203 1204 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, 1205 struct rtw89_pci_tx_ring *tx_ring, 1206 struct rtw89_pci_tx_bd_32 *txbd, 1207 struct rtw89_core_tx_request *tx_req) 1208 { 1209 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1210 const struct rtw89_chip_info *chip = rtwdev->chip; 1211 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1212 void *txdesc; 1213 int txdesc_size = chip->h2c_desc_size; 1214 struct pci_dev *pdev = rtwpci->pdev; 1215 struct sk_buff *skb = tx_req->skb; 1216 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1217 dma_addr_t dma; 1218 1219 txdesc = skb_push(skb, txdesc_size); 1220 memset(txdesc, 0, txdesc_size); 1221 rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc); 1222 1223 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1224 if (dma_mapping_error(&pdev->dev, dma)) { 1225 rtw89_err(rtwdev, "failed to map fwcmd dma data\n"); 1226 return -EBUSY; 1227 } 1228 1229 tx_data->dma = dma; 1230 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); 1231 txbd->length = cpu_to_le16(skb->len); 1232 txbd->dma = cpu_to_le32(tx_data->dma); 1233 skb_queue_tail(&rtwpci->h2c_queue, skb); 1234 1235 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1236 1237 return 0; 1238 } 1239 1240 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, 1241 struct rtw89_pci_tx_ring *tx_ring, 1242 struct rtw89_pci_tx_bd_32 *txbd, 1243 struct rtw89_core_tx_request *tx_req) 1244 { 1245 struct rtw89_pci_tx_wd *txwd; 1246 int ret; 1247 1248 /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD 1249 * buffer with WD BODY only. So here we don't need to check the free 1250 * pages of the wd ring. 1251 */ 1252 if (tx_ring->txch == RTW89_TXCH_CH12) 1253 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); 1254 1255 txwd = rtw89_pci_dequeue_txwd(tx_ring); 1256 if (!txwd) { 1257 rtw89_err(rtwdev, "no available TXWD\n"); 1258 ret = -ENOSPC; 1259 goto err; 1260 } 1261 1262 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); 1263 if (ret) { 1264 rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq); 1265 goto err_enqueue_wd; 1266 } 1267 1268 list_add_tail(&txwd->list, &tx_ring->busy_pages); 1269 1270 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); 1271 txbd->length = cpu_to_le16(txwd->len); 1272 txbd->dma = cpu_to_le32(txwd->paddr); 1273 1274 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1275 1276 return 0; 1277 1278 err_enqueue_wd: 1279 rtw89_pci_enqueue_txwd(tx_ring, txwd); 1280 err: 1281 return ret; 1282 } 1283 1284 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, 1285 u8 txch) 1286 { 1287 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1288 struct rtw89_pci_tx_ring *tx_ring; 1289 struct rtw89_pci_tx_bd_32 *txbd; 1290 u32 n_avail_txbd; 1291 int ret = 0; 1292 1293 /* check the tx type and dma channel for fw cmd queue */ 1294 if ((txch == RTW89_TXCH_CH12 || 1295 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) && 1296 (txch != RTW89_TXCH_CH12 || 1297 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) { 1298 rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n"); 1299 return -EINVAL; 1300 } 1301 1302 tx_ring = &rtwpci->tx_rings[txch]; 1303 spin_lock_bh(&rtwpci->trx_lock); 1304 1305 n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring); 1306 if (n_avail_txbd == 0) { 1307 rtw89_err(rtwdev, "no available TXBD\n"); 1308 ret = -ENOSPC; 1309 goto err_unlock; 1310 } 1311 1312 txbd = rtw89_pci_get_next_txbd(tx_ring); 1313 ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req); 1314 if (ret) { 1315 rtw89_err(rtwdev, "failed to submit TXBD\n"); 1316 goto err_unlock; 1317 } 1318 1319 spin_unlock_bh(&rtwpci->trx_lock); 1320 return 0; 1321 1322 err_unlock: 1323 spin_unlock_bh(&rtwpci->trx_lock); 1324 return ret; 1325 } 1326 1327 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) 1328 { 1329 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1330 int ret; 1331 1332 ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma); 1333 if (ret) { 1334 rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma); 1335 return ret; 1336 } 1337 1338 return 0; 1339 } 1340 1341 static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = { 1342 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1343 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1344 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1345 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1346 [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2}, 1347 [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2}, 1348 [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2}, 1349 [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2}, 1350 [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1}, 1351 [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1}, 1352 [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1}, 1353 [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1}, 1354 [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1}, 1355 }; 1356 1357 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) 1358 { 1359 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1360 struct rtw89_pci_tx_ring *tx_ring; 1361 struct rtw89_pci_rx_ring *rx_ring; 1362 struct rtw89_pci_dma_ring *bd_ring; 1363 const struct rtw89_pci_bd_ram *bd_ram; 1364 u32 addr_num; 1365 u32 addr_bdram; 1366 u32 addr_desa_l; 1367 u32 val32; 1368 int i; 1369 1370 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1371 tx_ring = &rtwpci->tx_rings[i]; 1372 bd_ring = &tx_ring->bd_ring; 1373 bd_ram = &bd_ram_table[i]; 1374 addr_num = bd_ring->addr.num; 1375 addr_bdram = bd_ring->addr.bdram; 1376 addr_desa_l = bd_ring->addr.desa_l; 1377 bd_ring->wp = 0; 1378 bd_ring->rp = 0; 1379 1380 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) | 1381 FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) | 1382 FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num); 1383 1384 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1385 rtw89_write32(rtwdev, addr_bdram, val32); 1386 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1387 } 1388 1389 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1390 rx_ring = &rtwpci->rx_rings[i]; 1391 bd_ring = &rx_ring->bd_ring; 1392 addr_num = bd_ring->addr.num; 1393 addr_desa_l = bd_ring->addr.desa_l; 1394 bd_ring->wp = 0; 1395 bd_ring->rp = 0; 1396 rx_ring->diliver_skb = NULL; 1397 rx_ring->diliver_desc.ready = false; 1398 1399 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1400 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1401 } 1402 } 1403 1404 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev, 1405 struct rtw89_pci_tx_ring *tx_ring) 1406 { 1407 rtw89_pci_release_busy_txwd(rtwdev, tx_ring); 1408 rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring); 1409 } 1410 1411 static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev) 1412 { 1413 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1414 int txch; 1415 1416 rtw89_pci_reset_trx_rings(rtwdev); 1417 1418 spin_lock_bh(&rtwpci->trx_lock); 1419 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1420 if (txch == RTW89_TXCH_CH12) { 1421 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 1422 skb_queue_len(&rtwpci->h2c_queue), true); 1423 continue; 1424 } 1425 rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]); 1426 } 1427 spin_unlock_bh(&rtwpci->trx_lock); 1428 } 1429 1430 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev) 1431 { 1432 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1433 unsigned long flags; 1434 1435 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1436 rtwpci->running = true; 1437 rtw89_chip_enable_intr(rtwdev, rtwpci); 1438 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1439 } 1440 1441 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev) 1442 { 1443 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1444 unsigned long flags; 1445 1446 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1447 rtwpci->running = false; 1448 rtw89_chip_disable_intr(rtwdev, rtwpci); 1449 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1450 } 1451 1452 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev) 1453 { 1454 rtw89_core_napi_start(rtwdev); 1455 rtw89_pci_enable_intr_lock(rtwdev); 1456 1457 return 0; 1458 } 1459 1460 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) 1461 { 1462 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1463 struct pci_dev *pdev = rtwpci->pdev; 1464 1465 rtw89_pci_disable_intr_lock(rtwdev); 1466 synchronize_irq(pdev->irq); 1467 rtw89_core_napi_stop(rtwdev); 1468 } 1469 1470 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause) 1471 { 1472 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1473 struct pci_dev *pdev = rtwpci->pdev; 1474 1475 if (pause) { 1476 rtw89_pci_disable_intr_lock(rtwdev); 1477 synchronize_irq(pdev->irq); 1478 if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) 1479 napi_synchronize(&rtwdev->napi); 1480 } else { 1481 rtw89_pci_enable_intr_lock(rtwdev); 1482 rtw89_pci_tx_kick_off_pending(rtwdev); 1483 } 1484 } 1485 1486 static 1487 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power) 1488 { 1489 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1490 const struct rtw89_pci_info *info = rtwdev->pci_info; 1491 const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power; 1492 const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set; 1493 struct rtw89_pci_tx_ring *tx_ring; 1494 struct rtw89_pci_rx_ring *rx_ring; 1495 int i; 1496 1497 if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n")) 1498 return; 1499 1500 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1501 tx_ring = &rtwpci->tx_rings[i]; 1502 tx_ring->bd_ring.addr.idx = low_power ? 1503 bd_idx_addr->tx_bd_addrs[i] : 1504 dma_addr_set->tx[i].idx; 1505 } 1506 1507 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1508 rx_ring = &rtwpci->rx_rings[i]; 1509 rx_ring->bd_ring.addr.idx = low_power ? 1510 bd_idx_addr->rx_bd_addrs[i] : 1511 dma_addr_set->rx[i].idx; 1512 } 1513 } 1514 1515 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power) 1516 { 1517 enum rtw89_pci_intr_mask_cfg cfg; 1518 1519 WARN(!rtwdev->hci.paused, "HCI isn't paused\n"); 1520 1521 cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL; 1522 rtw89_chip_config_intr_mask(rtwdev, cfg); 1523 rtw89_pci_switch_bd_idx_addr(rtwdev, low_power); 1524 } 1525 1526 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data); 1527 1528 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr) 1529 { 1530 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1531 u32 val = readl(rtwpci->mmap + addr); 1532 int count; 1533 1534 for (count = 0; ; count++) { 1535 if (val != RTW89_R32_DEAD) 1536 return val; 1537 if (count >= MAC_REG_POOL_COUNT) { 1538 rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val); 1539 return RTW89_R32_DEAD; 1540 } 1541 rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN); 1542 val = readl(rtwpci->mmap + addr); 1543 } 1544 1545 return val; 1546 } 1547 1548 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr) 1549 { 1550 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1551 u32 addr32, val32, shift; 1552 1553 if (!ACCESS_CMAC(addr)) 1554 return readb(rtwpci->mmap + addr); 1555 1556 addr32 = addr & ~0x3; 1557 shift = (addr & 0x3) * 8; 1558 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1559 return val32 >> shift; 1560 } 1561 1562 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr) 1563 { 1564 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1565 u32 addr32, val32, shift; 1566 1567 if (!ACCESS_CMAC(addr)) 1568 return readw(rtwpci->mmap + addr); 1569 1570 addr32 = addr & ~0x3; 1571 shift = (addr & 0x3) * 8; 1572 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1573 return val32 >> shift; 1574 } 1575 1576 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr) 1577 { 1578 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1579 1580 if (!ACCESS_CMAC(addr)) 1581 return readl(rtwpci->mmap + addr); 1582 1583 return rtw89_pci_ops_read32_cmac(rtwdev, addr); 1584 } 1585 1586 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) 1587 { 1588 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1589 1590 writeb(data, rtwpci->mmap + addr); 1591 } 1592 1593 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) 1594 { 1595 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1596 1597 writew(data, rtwpci->mmap + addr); 1598 } 1599 1600 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) 1601 { 1602 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1603 1604 writel(data, rtwpci->mmap + addr); 1605 } 1606 1607 static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable) 1608 { 1609 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1610 const struct rtw89_pci_info *info = rtwdev->pci_info; 1611 u32 txhci_en = info->txhci_en_bit; 1612 u32 rxhci_en = info->rxhci_en_bit; 1613 1614 if (enable) { 1615 if (chip_id != RTL8852C) 1616 rtw89_write32_clr(rtwdev, info->dma_stop1_reg, 1617 B_AX_STOP_PCIEIO); 1618 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 1619 txhci_en | rxhci_en); 1620 if (chip_id == RTL8852C) 1621 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 1622 B_AX_STOP_AXI_MST); 1623 } else { 1624 if (chip_id != RTL8852C) 1625 rtw89_write32_set(rtwdev, info->dma_stop1_reg, 1626 B_AX_STOP_PCIEIO); 1627 else 1628 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 1629 B_AX_STOP_AXI_MST); 1630 if (chip_id == RTL8852C) 1631 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 1632 B_AX_STOP_AXI_MST); 1633 } 1634 } 1635 1636 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit) 1637 { 1638 u16 val; 1639 1640 rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F); 1641 1642 val = rtw89_read16(rtwdev, R_AX_MDIO_CFG); 1643 switch (speed) { 1644 case PCIE_PHY_GEN1: 1645 if (addr < 0x20) 1646 val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK); 1647 else 1648 val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK); 1649 break; 1650 case PCIE_PHY_GEN2: 1651 if (addr < 0x20) 1652 val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK); 1653 else 1654 val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK); 1655 break; 1656 default: 1657 rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed); 1658 return -EINVAL; 1659 } 1660 rtw89_write16(rtwdev, R_AX_MDIO_CFG, val); 1661 rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit); 1662 1663 return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000, 1664 false, rtwdev, R_AX_MDIO_CFG); 1665 } 1666 1667 static int 1668 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val) 1669 { 1670 int ret; 1671 1672 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG); 1673 if (ret) { 1674 rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret); 1675 return ret; 1676 } 1677 *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA); 1678 1679 return 0; 1680 } 1681 1682 static int 1683 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed) 1684 { 1685 int ret; 1686 1687 rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data); 1688 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG); 1689 if (ret) { 1690 rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret); 1691 return ret; 1692 } 1693 1694 return 0; 1695 } 1696 1697 static int 1698 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed) 1699 { 1700 u32 shift; 1701 int ret; 1702 u16 val; 1703 1704 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1705 if (ret) 1706 return ret; 1707 1708 shift = __ffs(mask); 1709 val &= ~mask; 1710 val |= ((data << shift) & mask); 1711 1712 ret = rtw89_write16_mdio(rtwdev, addr, val, speed); 1713 if (ret) 1714 return ret; 1715 1716 return 0; 1717 } 1718 1719 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1720 { 1721 int ret; 1722 u16 val; 1723 1724 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1725 if (ret) 1726 return ret; 1727 ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed); 1728 if (ret) 1729 return ret; 1730 1731 return 0; 1732 } 1733 1734 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1735 { 1736 int ret; 1737 u16 val; 1738 1739 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1740 if (ret) 1741 return ret; 1742 ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed); 1743 if (ret) 1744 return ret; 1745 1746 return 0; 1747 } 1748 1749 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr, 1750 u8 data) 1751 { 1752 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1753 struct pci_dev *pdev = rtwpci->pdev; 1754 1755 return pci_write_config_byte(pdev, addr, data); 1756 } 1757 1758 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr, 1759 u8 *value) 1760 { 1761 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1762 struct pci_dev *pdev = rtwpci->pdev; 1763 1764 return pci_read_config_byte(pdev, addr, value); 1765 } 1766 1767 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr, 1768 u8 bit) 1769 { 1770 u8 value; 1771 int ret; 1772 1773 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 1774 if (ret) 1775 return ret; 1776 1777 value |= bit; 1778 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 1779 1780 return ret; 1781 } 1782 1783 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr, 1784 u8 bit) 1785 { 1786 u8 value; 1787 int ret; 1788 1789 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 1790 if (ret) 1791 return ret; 1792 1793 value &= ~bit; 1794 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 1795 1796 return ret; 1797 } 1798 1799 static int 1800 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate) 1801 { 1802 u16 val, tar; 1803 int ret; 1804 1805 /* Enable counter */ 1806 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val); 1807 if (ret) 1808 return ret; 1809 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 1810 phy_rate); 1811 if (ret) 1812 return ret; 1813 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN, 1814 phy_rate); 1815 if (ret) 1816 return ret; 1817 1818 fsleep(300); 1819 1820 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar); 1821 if (ret) 1822 return ret; 1823 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 1824 phy_rate); 1825 if (ret) 1826 return ret; 1827 1828 tar = tar & 0x0FFF; 1829 if (tar == 0 || tar == 0x0FFF) { 1830 rtw89_err(rtwdev, "[ERR]Get target failed.\n"); 1831 return -EINVAL; 1832 } 1833 1834 *target = tar; 1835 1836 return 0; 1837 } 1838 1839 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) 1840 { 1841 enum rtw89_pcie_phy phy_rate; 1842 u16 val16, mgn_set, div_set, tar; 1843 u8 val8, bdr_ori; 1844 bool l1_flag = false; 1845 int ret = 0; 1846 1847 if (rtwdev->chip->chip_id != RTL8852B) 1848 return 0; 1849 1850 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8); 1851 if (ret) { 1852 rtw89_err(rtwdev, "[ERR]pci config read %X\n", 1853 RTW89_PCIE_PHY_RATE); 1854 return ret; 1855 } 1856 1857 if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) { 1858 phy_rate = PCIE_PHY_GEN1; 1859 } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) { 1860 phy_rate = PCIE_PHY_GEN2; 1861 } else { 1862 rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8); 1863 return -EOPNOTSUPP; 1864 } 1865 /* Disable L1BD */ 1866 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori); 1867 if (ret) { 1868 rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL); 1869 return ret; 1870 } 1871 1872 if (bdr_ori & RTW89_PCIE_BIT_L1) { 1873 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 1874 bdr_ori & ~RTW89_PCIE_BIT_L1); 1875 if (ret) { 1876 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 1877 RTW89_PCIE_L1_CTRL); 1878 return ret; 1879 } 1880 l1_flag = true; 1881 } 1882 1883 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 1884 if (ret) { 1885 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 1886 goto end; 1887 } 1888 1889 if (val16 & B_AX_CALIB_EN) { 1890 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, 1891 val16 & ~B_AX_CALIB_EN, phy_rate); 1892 if (ret) { 1893 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1894 goto end; 1895 } 1896 } 1897 1898 if (!autook_en) 1899 goto end; 1900 /* Set div */ 1901 ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate); 1902 if (ret) { 1903 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1904 goto end; 1905 } 1906 1907 /* Obtain div and margin */ 1908 ret = __get_target(rtwdev, &tar, phy_rate); 1909 if (ret) { 1910 rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret); 1911 goto end; 1912 } 1913 1914 mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar; 1915 1916 if (mgn_set >= 128) { 1917 div_set = 0x0003; 1918 mgn_set = 0x000F; 1919 } else if (mgn_set >= 64) { 1920 div_set = 0x0003; 1921 mgn_set >>= 3; 1922 } else if (mgn_set >= 32) { 1923 div_set = 0x0002; 1924 mgn_set >>= 2; 1925 } else if (mgn_set >= 16) { 1926 div_set = 0x0001; 1927 mgn_set >>= 1; 1928 } else if (mgn_set == 0) { 1929 rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar); 1930 goto end; 1931 } else { 1932 div_set = 0x0000; 1933 } 1934 1935 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 1936 if (ret) { 1937 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 1938 goto end; 1939 } 1940 1941 val16 |= u16_encode_bits(div_set, B_AX_DIV); 1942 1943 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate); 1944 if (ret) { 1945 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1946 goto end; 1947 } 1948 1949 ret = __get_target(rtwdev, &tar, phy_rate); 1950 if (ret) { 1951 rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret); 1952 goto end; 1953 } 1954 1955 rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n", 1956 tar, div_set, mgn_set); 1957 ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1, 1958 (tar & 0x0FFF) | (mgn_set << 12), phy_rate); 1959 if (ret) { 1960 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1); 1961 goto end; 1962 } 1963 1964 /* Enable function */ 1965 ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate); 1966 if (ret) { 1967 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1968 goto end; 1969 } 1970 1971 /* CLK delay = 0 */ 1972 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 1973 PCIE_CLKDLY_HW_0); 1974 1975 end: 1976 /* Set L1BD to ori */ 1977 if (l1_flag) { 1978 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 1979 bdr_ori); 1980 if (ret) { 1981 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 1982 RTW89_PCIE_L1_CTRL); 1983 return ret; 1984 } 1985 } 1986 1987 return ret; 1988 } 1989 1990 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) 1991 { 1992 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1993 int ret; 1994 1995 if (chip_id == RTL8852A) { 1996 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 1997 PCIE_PHY_GEN1); 1998 if (ret) 1999 return ret; 2000 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2001 PCIE_PHY_GEN2); 2002 if (ret) 2003 return ret; 2004 } else if (chip_id == RTL8852C) { 2005 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2, 2006 B_AX_DEGLITCH); 2007 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2, 2008 B_AX_DEGLITCH); 2009 } 2010 2011 return 0; 2012 } 2013 2014 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) 2015 { 2016 if (rtwdev->chip->chip_id != RTL8852A) 2017 return; 2018 2019 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE); 2020 } 2021 2022 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) 2023 { 2024 if (rtwdev->chip->chip_id != RTL8852A && rtwdev->chip->chip_id != RTL8852B) 2025 return; 2026 2027 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); 2028 } 2029 2030 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev) 2031 { 2032 int ret; 2033 2034 if (rtwdev->chip->chip_id != RTL8852A) 2035 return 0; 2036 2037 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2038 PCIE_PHY_GEN1); 2039 if (ret) 2040 return ret; 2041 2042 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2043 PCIE_PHY_GEN2); 2044 if (ret) 2045 return ret; 2046 2047 return 0; 2048 } 2049 2050 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) 2051 { 2052 if (rtwdev->chip->chip_id != RTL8852A) 2053 return; 2054 2055 rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN); 2056 } 2057 2058 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) 2059 { 2060 if (rtwdev->chip->chip_id == RTL8852A || 2061 rtwdev->chip->chip_id == RTL8852B) { 2062 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 2063 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2064 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2065 B_AX_PCIE_DIS_WLSUS_AFT_PDN); 2066 } else if (rtwdev->chip->chip_id == RTL8852C) { 2067 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2068 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2069 } 2070 } 2071 2072 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev) 2073 { 2074 if (rtwdev->chip->chip_id != RTL8852B) 2075 return 0; 2076 2077 return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK, 2078 PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1); 2079 } 2080 2081 static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up) 2082 { 2083 if (pwr_up) 2084 rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2085 else 2086 rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2087 } 2088 2089 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev) 2090 { 2091 if (rtwdev->chip->chip_id != RTL8852C) 2092 return; 2093 2094 rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2095 rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2096 } 2097 2098 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev) 2099 { 2100 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2101 return; 2102 2103 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT); 2104 } 2105 2106 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev) 2107 { 2108 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2109 return; 2110 2111 rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, 2112 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2113 rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3); 2114 rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, 2115 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2116 } 2117 2118 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev) 2119 { 2120 if (rtwdev->chip->chip_id != RTL8852C) 2121 return; 2122 2123 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1); 2124 } 2125 2126 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev) 2127 { 2128 if (rtwdev->chip->chip_id != RTL8852C) 2129 return; 2130 2131 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN); 2132 } 2133 2134 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) 2135 { 2136 if (rtwdev->chip->chip_id == RTL8852C) 2137 return; 2138 2139 rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL, 2140 B_AX_SIC_EN_FORCE_CLKREQ); 2141 } 2142 2143 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev) 2144 { 2145 const struct rtw89_pci_info *info = rtwdev->pci_info; 2146 u32 lbc; 2147 2148 if (rtwdev->chip->chip_id == RTL8852C) 2149 return; 2150 2151 lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); 2152 if (info->lbc_en == MAC_AX_PCIE_ENABLE) { 2153 lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER); 2154 lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; 2155 rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2156 } else { 2157 lbc &= ~B_AX_LBC_EN; 2158 } 2159 rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2160 } 2161 2162 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev) 2163 { 2164 const struct rtw89_pci_info *info = rtwdev->pci_info; 2165 u32 val32; 2166 2167 if (rtwdev->chip->chip_id != RTL8852C) 2168 return; 2169 2170 if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) { 2171 val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK, 2172 info->io_rcy_tmr); 2173 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32); 2174 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32); 2175 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32); 2176 2177 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2178 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2179 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2180 } else { 2181 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2182 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2183 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2184 } 2185 2186 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1); 2187 } 2188 2189 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) 2190 { 2191 if (rtwdev->chip->chip_id == RTL8852C) 2192 return; 2193 2194 rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL, 2195 B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG); 2196 2197 if (rtwdev->chip->chip_id == RTL8852A) 2198 rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL, 2199 B_AX_EN_CHKDSC_NO_RX_STUCK); 2200 } 2201 2202 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev) 2203 { 2204 if (rtwdev->chip->chip_id == RTL8852C) 2205 return; 2206 2207 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 2208 B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); 2209 } 2210 2211 static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev) 2212 { 2213 const struct rtw89_pci_info *info = rtwdev->pci_info; 2214 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2215 u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX | 2216 B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX | 2217 B_AX_CLR_CH12_IDX; 2218 u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg; 2219 u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg; 2220 2221 if (chip_id == RTL8852A || chip_id == RTL8852C) 2222 val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX | 2223 B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX; 2224 /* clear DMA indexes */ 2225 rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val); 2226 if (chip_id == RTL8852A || chip_id == RTL8852C) 2227 rtw89_write32_set(rtwdev, txbd_rwptr_clr2, 2228 B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX); 2229 rtw89_write32_set(rtwdev, rxbd_rwptr_clr, 2230 B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); 2231 } 2232 2233 static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev) 2234 { 2235 const struct rtw89_pci_info *info = rtwdev->pci_info; 2236 u32 ret, check, dma_busy; 2237 u32 dma_busy1 = info->dma_busy1_reg; 2238 u32 dma_busy2 = info->dma_busy2_reg; 2239 2240 check = B_AX_ACH0_BUSY | B_AX_ACH1_BUSY | B_AX_ACH2_BUSY | 2241 B_AX_ACH3_BUSY | B_AX_ACH4_BUSY | B_AX_ACH5_BUSY | 2242 B_AX_ACH6_BUSY | B_AX_ACH7_BUSY | B_AX_CH8_BUSY | 2243 B_AX_CH9_BUSY | B_AX_CH12_BUSY; 2244 2245 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2246 10, 100, false, rtwdev, dma_busy1); 2247 if (ret) 2248 return ret; 2249 2250 check = B_AX_CH10_BUSY | B_AX_CH11_BUSY; 2251 2252 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2253 10, 100, false, rtwdev, dma_busy2); 2254 if (ret) 2255 return ret; 2256 2257 return 0; 2258 } 2259 2260 static int rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev *rtwdev) 2261 { 2262 const struct rtw89_pci_info *info = rtwdev->pci_info; 2263 u32 ret, check, dma_busy; 2264 u32 dma_busy3 = info->dma_busy3_reg; 2265 2266 check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY; 2267 2268 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2269 10, 100, false, rtwdev, dma_busy3); 2270 if (ret) 2271 return ret; 2272 2273 return 0; 2274 } 2275 2276 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev) 2277 { 2278 u32 ret; 2279 2280 ret = rtw89_poll_txdma_ch_idle_pcie(rtwdev); 2281 if (ret) { 2282 rtw89_err(rtwdev, "txdma ch busy\n"); 2283 return ret; 2284 } 2285 2286 ret = rtw89_poll_rxdma_ch_idle_pcie(rtwdev); 2287 if (ret) { 2288 rtw89_err(rtwdev, "rxdma ch busy\n"); 2289 return ret; 2290 } 2291 2292 return 0; 2293 } 2294 2295 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) 2296 { 2297 const struct rtw89_pci_info *info = rtwdev->pci_info; 2298 enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode; 2299 enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode; 2300 enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode; 2301 enum mac_ax_tag_mode tag_mode = info->tag_mode; 2302 enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl; 2303 enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl; 2304 enum mac_ax_tx_burst tx_burst = info->tx_burst; 2305 enum mac_ax_rx_burst rx_burst = info->rx_burst; 2306 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2307 u8 cv = rtwdev->hal.cv; 2308 u32 val32; 2309 2310 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2311 if (chip_id == RTL8852A && cv == CHIP_CBV) 2312 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2313 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2314 if (chip_id == RTL8852A || chip_id == RTL8852B) 2315 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2316 } 2317 2318 if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) { 2319 if (chip_id == RTL8852A && cv == CHIP_CBV) 2320 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2321 } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) { 2322 if (chip_id == RTL8852A || chip_id == RTL8852B) 2323 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2324 } 2325 2326 if (rxbd_mode == MAC_AX_RXBD_PKT) { 2327 rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2328 } else if (rxbd_mode == MAC_AX_RXBD_SEP) { 2329 rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2330 2331 if (chip_id == RTL8852A || chip_id == RTL8852B) 2332 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, 2333 B_AX_PCIE_RX_APPLEN_MASK, 0); 2334 } 2335 2336 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2337 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst); 2338 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst); 2339 } else if (chip_id == RTL8852C) { 2340 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst); 2341 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst); 2342 } 2343 2344 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2345 if (tag_mode == MAC_AX_TAG_SGL) { 2346 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) & 2347 ~B_AX_LATENCY_CONTROL; 2348 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2349 } else if (tag_mode == MAC_AX_TAG_MULTI) { 2350 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | 2351 B_AX_LATENCY_CONTROL; 2352 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2353 } 2354 } 2355 2356 rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask, 2357 info->multi_tag_num); 2358 2359 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2360 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, 2361 wd_dma_idle_intvl); 2362 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, 2363 wd_dma_act_intvl); 2364 } else if (chip_id == RTL8852C) { 2365 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK, 2366 wd_dma_idle_intvl); 2367 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK, 2368 wd_dma_act_intvl); 2369 } 2370 2371 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2372 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2373 B_AX_HOST_ADDR_INFO_8B_SEL); 2374 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2375 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2376 rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2377 B_AX_HOST_ADDR_INFO_8B_SEL); 2378 rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2379 } 2380 2381 return 0; 2382 } 2383 2384 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) 2385 { 2386 const struct rtw89_pci_info *info = rtwdev->pci_info; 2387 2388 if (rtwdev->chip->chip_id == RTL8852A) { 2389 /* ltr sw trigger */ 2390 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE); 2391 } 2392 info->ltr_set(rtwdev, false); 2393 rtw89_pci_ctrl_dma_all(rtwdev, false); 2394 rtw89_pci_clr_idx_all(rtwdev); 2395 2396 return 0; 2397 } 2398 2399 static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev) 2400 { 2401 const struct rtw89_pci_info *info = rtwdev->pci_info; 2402 int ret; 2403 2404 rtw89_pci_rxdma_prefth(rtwdev); 2405 rtw89_pci_l1off_pwroff(rtwdev); 2406 rtw89_pci_deglitch_setting(rtwdev); 2407 ret = rtw89_pci_l2_rxen_lat(rtwdev); 2408 if (ret) { 2409 rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret); 2410 return ret; 2411 } 2412 2413 rtw89_pci_aphy_pwrcut(rtwdev); 2414 rtw89_pci_hci_ldo(rtwdev); 2415 rtw89_pci_dphy_delay(rtwdev); 2416 2417 ret = rtw89_pci_auto_refclk_cal(rtwdev, false); 2418 if (ret) { 2419 rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret); 2420 return ret; 2421 } 2422 2423 rtw89_pci_power_wake(rtwdev, true); 2424 rtw89_pci_autoload_hang(rtwdev); 2425 rtw89_pci_l12_vmain(rtwdev); 2426 rtw89_pci_gen2_force_ib(rtwdev); 2427 rtw89_pci_l1_ent_lat(rtwdev); 2428 rtw89_pci_wd_exit_l1(rtwdev); 2429 rtw89_pci_set_sic(rtwdev); 2430 rtw89_pci_set_lbc(rtwdev); 2431 rtw89_pci_set_io_rcy(rtwdev); 2432 rtw89_pci_set_dbg(rtwdev); 2433 rtw89_pci_set_keep_reg(rtwdev); 2434 2435 rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_STOP_WPDMA); 2436 2437 /* stop DMA activities */ 2438 rtw89_pci_ctrl_dma_all(rtwdev, false); 2439 2440 ret = rtw89_pci_poll_dma_all_idle(rtwdev); 2441 if (ret) { 2442 rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n"); 2443 return ret; 2444 } 2445 2446 rtw89_pci_clr_idx_all(rtwdev); 2447 rtw89_pci_mode_op(rtwdev); 2448 2449 /* fill TRX BD indexes */ 2450 rtw89_pci_ops_reset(rtwdev); 2451 2452 ret = rtw89_pci_rst_bdram_pcie(rtwdev); 2453 if (ret) { 2454 rtw89_warn(rtwdev, "reset bdram busy\n"); 2455 return ret; 2456 } 2457 2458 /* enable FW CMD queue to download firmware */ 2459 rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL); 2460 rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_STOP_CH12); 2461 rtw89_write32_set(rtwdev, info->dma_stop2_reg, B_AX_TX_STOP2_ALL); 2462 2463 /* start DMA activities */ 2464 rtw89_pci_ctrl_dma_all(rtwdev, true); 2465 2466 return 0; 2467 } 2468 2469 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en) 2470 { 2471 u32 val; 2472 2473 if (!en) 2474 return 0; 2475 2476 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2477 if (rtw89_pci_ltr_is_err_reg_val(val)) 2478 return -EINVAL; 2479 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2480 if (rtw89_pci_ltr_is_err_reg_val(val)) 2481 return -EINVAL; 2482 val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY); 2483 if (rtw89_pci_ltr_is_err_reg_val(val)) 2484 return -EINVAL; 2485 val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY); 2486 if (rtw89_pci_ltr_is_err_reg_val(val)) 2487 return -EINVAL; 2488 2489 rtw89_write32_clr(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN); 2490 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_EN); 2491 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK, 2492 PCI_LTR_SPC_500US); 2493 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2494 PCI_LTR_IDLE_TIMER_800US); 2495 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2496 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2497 rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x88e088e0); 2498 rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b); 2499 2500 return 0; 2501 } 2502 EXPORT_SYMBOL(rtw89_pci_ltr_set); 2503 2504 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en) 2505 { 2506 u32 dec_ctrl; 2507 u32 val32; 2508 2509 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2510 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2511 return -EINVAL; 2512 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2513 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2514 return -EINVAL; 2515 dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL); 2516 if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl)) 2517 return -EINVAL; 2518 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3); 2519 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2520 return -EINVAL; 2521 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0); 2522 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2523 return -EINVAL; 2524 2525 if (!en) { 2526 dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN); 2527 dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) | 2528 B_AX_LTR_REQ_DRV; 2529 } else { 2530 dec_ctrl |= B_AX_LTR_HW_DEC_EN; 2531 } 2532 2533 dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK; 2534 dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US); 2535 2536 if (en) 2537 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, 2538 B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN); 2539 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2540 PCI_LTR_IDLE_TIMER_3_2MS); 2541 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2542 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2543 rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl); 2544 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003); 2545 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b); 2546 2547 return 0; 2548 } 2549 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1); 2550 2551 static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev) 2552 { 2553 const struct rtw89_pci_info *info = rtwdev->pci_info; 2554 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2555 int ret; 2556 2557 ret = info->ltr_set(rtwdev, true); 2558 if (ret) { 2559 rtw89_err(rtwdev, "pci ltr set fail\n"); 2560 return ret; 2561 } 2562 if (chip_id == RTL8852A) { 2563 /* ltr sw trigger */ 2564 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); 2565 } 2566 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2567 /* ADDR info 8-byte mode */ 2568 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2569 B_AX_HOST_ADDR_INFO_8B_SEL); 2570 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2571 } 2572 2573 /* enable DMA for all queues */ 2574 rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL); 2575 rtw89_write32_clr(rtwdev, info->dma_stop2_reg, B_AX_TX_STOP2_ALL); 2576 2577 /* Release PCI IO */ 2578 rtw89_write32_clr(rtwdev, info->dma_stop1_reg, 2579 B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO); 2580 2581 return 0; 2582 } 2583 2584 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev, 2585 struct pci_dev *pdev) 2586 { 2587 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2588 int ret; 2589 2590 ret = pci_enable_device(pdev); 2591 if (ret) { 2592 rtw89_err(rtwdev, "failed to enable pci device\n"); 2593 return ret; 2594 } 2595 2596 pci_set_master(pdev); 2597 pci_set_drvdata(pdev, rtwdev->hw); 2598 2599 rtwpci->pdev = pdev; 2600 2601 return 0; 2602 } 2603 2604 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, 2605 struct pci_dev *pdev) 2606 { 2607 pci_clear_master(pdev); 2608 pci_disable_device(pdev); 2609 } 2610 2611 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, 2612 struct pci_dev *pdev) 2613 { 2614 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2615 unsigned long resource_len; 2616 u8 bar_id = 2; 2617 int ret; 2618 2619 ret = pci_request_regions(pdev, KBUILD_MODNAME); 2620 if (ret) { 2621 rtw89_err(rtwdev, "failed to request pci regions\n"); 2622 goto err; 2623 } 2624 2625 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 2626 if (ret) { 2627 rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n"); 2628 goto err_release_regions; 2629 } 2630 2631 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 2632 if (ret) { 2633 rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n"); 2634 goto err_release_regions; 2635 } 2636 2637 resource_len = pci_resource_len(pdev, bar_id); 2638 rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len); 2639 if (!rtwpci->mmap) { 2640 rtw89_err(rtwdev, "failed to map pci io\n"); 2641 ret = -EIO; 2642 goto err_release_regions; 2643 } 2644 2645 return 0; 2646 2647 err_release_regions: 2648 pci_release_regions(pdev); 2649 err: 2650 return ret; 2651 } 2652 2653 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev, 2654 struct pci_dev *pdev) 2655 { 2656 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2657 2658 if (rtwpci->mmap) { 2659 pci_iounmap(pdev, rtwpci->mmap); 2660 pci_release_regions(pdev); 2661 } 2662 } 2663 2664 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev, 2665 struct pci_dev *pdev, 2666 struct rtw89_pci_tx_ring *tx_ring) 2667 { 2668 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 2669 u8 *head = wd_ring->head; 2670 dma_addr_t dma = wd_ring->dma; 2671 u32 page_size = wd_ring->page_size; 2672 u32 page_num = wd_ring->page_num; 2673 u32 ring_sz = page_size * page_num; 2674 2675 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2676 wd_ring->head = NULL; 2677 } 2678 2679 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev, 2680 struct pci_dev *pdev, 2681 struct rtw89_pci_tx_ring *tx_ring) 2682 { 2683 int ring_sz; 2684 u8 *head; 2685 dma_addr_t dma; 2686 2687 head = tx_ring->bd_ring.head; 2688 dma = tx_ring->bd_ring.dma; 2689 ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len; 2690 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2691 2692 tx_ring->bd_ring.head = NULL; 2693 } 2694 2695 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev, 2696 struct pci_dev *pdev) 2697 { 2698 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2699 struct rtw89_pci_tx_ring *tx_ring; 2700 int i; 2701 2702 for (i = 0; i < RTW89_TXCH_NUM; i++) { 2703 tx_ring = &rtwpci->tx_rings[i]; 2704 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 2705 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 2706 } 2707 } 2708 2709 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev, 2710 struct pci_dev *pdev, 2711 struct rtw89_pci_rx_ring *rx_ring) 2712 { 2713 struct rtw89_pci_rx_info *rx_info; 2714 struct sk_buff *skb; 2715 dma_addr_t dma; 2716 u32 buf_sz; 2717 u8 *head; 2718 int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len; 2719 int i; 2720 2721 buf_sz = rx_ring->buf_sz; 2722 for (i = 0; i < rx_ring->bd_ring.len; i++) { 2723 skb = rx_ring->buf[i]; 2724 if (!skb) 2725 continue; 2726 2727 rx_info = RTW89_PCI_RX_SKB_CB(skb); 2728 dma = rx_info->dma; 2729 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 2730 dev_kfree_skb(skb); 2731 rx_ring->buf[i] = NULL; 2732 } 2733 2734 head = rx_ring->bd_ring.head; 2735 dma = rx_ring->bd_ring.dma; 2736 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2737 2738 rx_ring->bd_ring.head = NULL; 2739 } 2740 2741 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev, 2742 struct pci_dev *pdev) 2743 { 2744 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2745 struct rtw89_pci_rx_ring *rx_ring; 2746 int i; 2747 2748 for (i = 0; i < RTW89_RXCH_NUM; i++) { 2749 rx_ring = &rtwpci->rx_rings[i]; 2750 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 2751 } 2752 } 2753 2754 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev, 2755 struct pci_dev *pdev) 2756 { 2757 rtw89_pci_free_rx_rings(rtwdev, pdev); 2758 rtw89_pci_free_tx_rings(rtwdev, pdev); 2759 } 2760 2761 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev, 2762 struct rtw89_pci_rx_ring *rx_ring, 2763 struct sk_buff *skb, int buf_sz, u32 idx) 2764 { 2765 struct rtw89_pci_rx_info *rx_info; 2766 struct rtw89_pci_rx_bd_32 *rx_bd; 2767 dma_addr_t dma; 2768 2769 if (!skb) 2770 return -EINVAL; 2771 2772 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); 2773 if (dma_mapping_error(&pdev->dev, dma)) 2774 return -EBUSY; 2775 2776 rx_info = RTW89_PCI_RX_SKB_CB(skb); 2777 rx_bd = RTW89_PCI_RX_BD(rx_ring, idx); 2778 2779 memset(rx_bd, 0, sizeof(*rx_bd)); 2780 rx_bd->buf_size = cpu_to_le16(buf_sz); 2781 rx_bd->dma = cpu_to_le32(dma); 2782 rx_info->dma = dma; 2783 2784 return 0; 2785 } 2786 2787 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev, 2788 struct pci_dev *pdev, 2789 struct rtw89_pci_tx_ring *tx_ring, 2790 enum rtw89_tx_channel txch) 2791 { 2792 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 2793 struct rtw89_pci_tx_wd *txwd; 2794 dma_addr_t dma; 2795 dma_addr_t cur_paddr; 2796 u8 *head; 2797 u8 *cur_vaddr; 2798 u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE; 2799 u32 page_num = RTW89_PCI_TXWD_NUM_MAX; 2800 u32 ring_sz = page_size * page_num; 2801 u32 page_offset; 2802 int i; 2803 2804 /* FWCMD queue doesn't use txwd as pages */ 2805 if (txch == RTW89_TXCH_CH12) 2806 return 0; 2807 2808 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2809 if (!head) 2810 return -ENOMEM; 2811 2812 INIT_LIST_HEAD(&wd_ring->free_pages); 2813 wd_ring->head = head; 2814 wd_ring->dma = dma; 2815 wd_ring->page_size = page_size; 2816 wd_ring->page_num = page_num; 2817 2818 page_offset = 0; 2819 for (i = 0; i < page_num; i++) { 2820 txwd = &wd_ring->pages[i]; 2821 cur_paddr = dma + page_offset; 2822 cur_vaddr = head + page_offset; 2823 2824 skb_queue_head_init(&txwd->queue); 2825 INIT_LIST_HEAD(&txwd->list); 2826 txwd->paddr = cur_paddr; 2827 txwd->vaddr = cur_vaddr; 2828 txwd->len = page_size; 2829 txwd->seq = i; 2830 rtw89_pci_enqueue_txwd(tx_ring, txwd); 2831 2832 page_offset += page_size; 2833 } 2834 2835 return 0; 2836 } 2837 2838 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev, 2839 struct pci_dev *pdev, 2840 struct rtw89_pci_tx_ring *tx_ring, 2841 u32 desc_size, u32 len, 2842 enum rtw89_tx_channel txch) 2843 { 2844 const struct rtw89_pci_ch_dma_addr *txch_addr; 2845 int ring_sz = desc_size * len; 2846 u8 *head; 2847 dma_addr_t dma; 2848 int ret; 2849 2850 ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch); 2851 if (ret) { 2852 rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch); 2853 goto err; 2854 } 2855 2856 ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr); 2857 if (ret) { 2858 rtw89_err(rtwdev, "failed to get address of txch %d", txch); 2859 goto err_free_wd_ring; 2860 } 2861 2862 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2863 if (!head) { 2864 ret = -ENOMEM; 2865 goto err_free_wd_ring; 2866 } 2867 2868 INIT_LIST_HEAD(&tx_ring->busy_pages); 2869 tx_ring->bd_ring.head = head; 2870 tx_ring->bd_ring.dma = dma; 2871 tx_ring->bd_ring.len = len; 2872 tx_ring->bd_ring.desc_size = desc_size; 2873 tx_ring->bd_ring.addr = *txch_addr; 2874 tx_ring->bd_ring.wp = 0; 2875 tx_ring->bd_ring.rp = 0; 2876 tx_ring->txch = txch; 2877 2878 return 0; 2879 2880 err_free_wd_ring: 2881 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 2882 err: 2883 return ret; 2884 } 2885 2886 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev, 2887 struct pci_dev *pdev) 2888 { 2889 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2890 struct rtw89_pci_tx_ring *tx_ring; 2891 u32 desc_size; 2892 u32 len; 2893 u32 i, tx_allocated; 2894 int ret; 2895 2896 for (i = 0; i < RTW89_TXCH_NUM; i++) { 2897 tx_ring = &rtwpci->tx_rings[i]; 2898 desc_size = sizeof(struct rtw89_pci_tx_bd_32); 2899 len = RTW89_PCI_TXBD_NUM_MAX; 2900 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring, 2901 desc_size, len, i); 2902 if (ret) { 2903 rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i); 2904 goto err_free; 2905 } 2906 } 2907 2908 return 0; 2909 2910 err_free: 2911 tx_allocated = i; 2912 for (i = 0; i < tx_allocated; i++) { 2913 tx_ring = &rtwpci->tx_rings[i]; 2914 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 2915 } 2916 2917 return ret; 2918 } 2919 2920 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev, 2921 struct pci_dev *pdev, 2922 struct rtw89_pci_rx_ring *rx_ring, 2923 u32 desc_size, u32 len, u32 rxch) 2924 { 2925 const struct rtw89_pci_ch_dma_addr *rxch_addr; 2926 struct sk_buff *skb; 2927 u8 *head; 2928 dma_addr_t dma; 2929 int ring_sz = desc_size * len; 2930 int buf_sz = RTW89_PCI_RX_BUF_SIZE; 2931 int i, allocated; 2932 int ret; 2933 2934 ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr); 2935 if (ret) { 2936 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch); 2937 return ret; 2938 } 2939 2940 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2941 if (!head) { 2942 ret = -ENOMEM; 2943 goto err; 2944 } 2945 2946 rx_ring->bd_ring.head = head; 2947 rx_ring->bd_ring.dma = dma; 2948 rx_ring->bd_ring.len = len; 2949 rx_ring->bd_ring.desc_size = desc_size; 2950 rx_ring->bd_ring.addr = *rxch_addr; 2951 rx_ring->bd_ring.wp = 0; 2952 rx_ring->bd_ring.rp = 0; 2953 rx_ring->buf_sz = buf_sz; 2954 rx_ring->diliver_skb = NULL; 2955 rx_ring->diliver_desc.ready = false; 2956 2957 for (i = 0; i < len; i++) { 2958 skb = dev_alloc_skb(buf_sz); 2959 if (!skb) { 2960 ret = -ENOMEM; 2961 goto err_free; 2962 } 2963 2964 memset(skb->data, 0, buf_sz); 2965 rx_ring->buf[i] = skb; 2966 ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb, 2967 buf_sz, i); 2968 if (ret) { 2969 rtw89_err(rtwdev, "failed to init rx buf %d\n", i); 2970 dev_kfree_skb_any(skb); 2971 rx_ring->buf[i] = NULL; 2972 goto err_free; 2973 } 2974 } 2975 2976 return 0; 2977 2978 err_free: 2979 allocated = i; 2980 for (i = 0; i < allocated; i++) { 2981 skb = rx_ring->buf[i]; 2982 if (!skb) 2983 continue; 2984 dma = *((dma_addr_t *)skb->cb); 2985 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 2986 dev_kfree_skb(skb); 2987 rx_ring->buf[i] = NULL; 2988 } 2989 2990 head = rx_ring->bd_ring.head; 2991 dma = rx_ring->bd_ring.dma; 2992 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2993 2994 rx_ring->bd_ring.head = NULL; 2995 err: 2996 return ret; 2997 } 2998 2999 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev, 3000 struct pci_dev *pdev) 3001 { 3002 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3003 struct rtw89_pci_rx_ring *rx_ring; 3004 u32 desc_size; 3005 u32 len; 3006 int i, rx_allocated; 3007 int ret; 3008 3009 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3010 rx_ring = &rtwpci->rx_rings[i]; 3011 desc_size = sizeof(struct rtw89_pci_rx_bd_32); 3012 len = RTW89_PCI_RXBD_NUM_MAX; 3013 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring, 3014 desc_size, len, i); 3015 if (ret) { 3016 rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i); 3017 goto err_free; 3018 } 3019 } 3020 3021 return 0; 3022 3023 err_free: 3024 rx_allocated = i; 3025 for (i = 0; i < rx_allocated; i++) { 3026 rx_ring = &rtwpci->rx_rings[i]; 3027 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3028 } 3029 3030 return ret; 3031 } 3032 3033 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev, 3034 struct pci_dev *pdev) 3035 { 3036 int ret; 3037 3038 ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev); 3039 if (ret) { 3040 rtw89_err(rtwdev, "failed to alloc dma tx rings\n"); 3041 goto err; 3042 } 3043 3044 ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev); 3045 if (ret) { 3046 rtw89_err(rtwdev, "failed to alloc dma rx rings\n"); 3047 goto err_free_tx_rings; 3048 } 3049 3050 return 0; 3051 3052 err_free_tx_rings: 3053 rtw89_pci_free_tx_rings(rtwdev, pdev); 3054 err: 3055 return ret; 3056 } 3057 3058 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev, 3059 struct rtw89_pci *rtwpci) 3060 { 3061 skb_queue_head_init(&rtwpci->h2c_queue); 3062 skb_queue_head_init(&rtwpci->h2c_release_queue); 3063 } 3064 3065 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev, 3066 struct pci_dev *pdev) 3067 { 3068 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3069 int ret; 3070 3071 ret = rtw89_pci_setup_mapping(rtwdev, pdev); 3072 if (ret) { 3073 rtw89_err(rtwdev, "failed to setup pci mapping\n"); 3074 goto err; 3075 } 3076 3077 ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev); 3078 if (ret) { 3079 rtw89_err(rtwdev, "failed to alloc pci trx rings\n"); 3080 goto err_pci_unmap; 3081 } 3082 3083 rtw89_pci_h2c_init(rtwdev, rtwpci); 3084 3085 spin_lock_init(&rtwpci->irq_lock); 3086 spin_lock_init(&rtwpci->trx_lock); 3087 3088 return 0; 3089 3090 err_pci_unmap: 3091 rtw89_pci_clear_mapping(rtwdev, pdev); 3092 err: 3093 return ret; 3094 } 3095 3096 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev, 3097 struct pci_dev *pdev) 3098 { 3099 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3100 3101 rtw89_pci_free_trx_rings(rtwdev, pdev); 3102 rtw89_pci_clear_mapping(rtwdev, pdev); 3103 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 3104 skb_queue_len(&rtwpci->h2c_queue), true); 3105 } 3106 3107 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev) 3108 { 3109 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3110 3111 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0; 3112 3113 if (rtwpci->under_recovery) { 3114 rtwpci->intrs[0] = B_AX_HS0ISR_IND_INT_EN; 3115 rtwpci->intrs[1] = 0; 3116 } else { 3117 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3118 B_AX_RXDMA_INT_EN | 3119 B_AX_RXP1DMA_INT_EN | 3120 B_AX_RPQDMA_INT_EN | 3121 B_AX_RXDMA_STUCK_INT_EN | 3122 B_AX_RDU_INT_EN | 3123 B_AX_RPQBD_FULL_INT_EN | 3124 B_AX_HS0ISR_IND_INT_EN; 3125 3126 rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN; 3127 } 3128 } 3129 EXPORT_SYMBOL(rtw89_pci_config_intr_mask); 3130 3131 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev) 3132 { 3133 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3134 3135 rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN; 3136 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3137 rtwpci->intrs[0] = 0; 3138 rtwpci->intrs[1] = 0; 3139 } 3140 3141 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev) 3142 { 3143 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3144 3145 rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN | 3146 B_AX_HS1ISR_IND_INT_EN | 3147 B_AX_HS0ISR_IND_INT_EN; 3148 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3149 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3150 B_AX_RXDMA_INT_EN | 3151 B_AX_RXP1DMA_INT_EN | 3152 B_AX_RPQDMA_INT_EN | 3153 B_AX_RXDMA_STUCK_INT_EN | 3154 B_AX_RDU_INT_EN | 3155 B_AX_RPQBD_FULL_INT_EN; 3156 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3157 } 3158 3159 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev) 3160 { 3161 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3162 3163 rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN | 3164 B_AX_HS0ISR_IND_INT_EN; 3165 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3166 rtwpci->intrs[0] = 0; 3167 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3168 } 3169 3170 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev) 3171 { 3172 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3173 3174 if (rtwpci->under_recovery) 3175 rtw89_pci_recovery_intr_mask_v1(rtwdev); 3176 else if (rtwpci->low_power) 3177 rtw89_pci_low_power_intr_mask_v1(rtwdev); 3178 else 3179 rtw89_pci_default_intr_mask_v1(rtwdev); 3180 } 3181 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1); 3182 3183 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, 3184 struct pci_dev *pdev) 3185 { 3186 unsigned long flags = 0; 3187 int ret; 3188 3189 flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI; 3190 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); 3191 if (ret < 0) { 3192 rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret); 3193 goto err; 3194 } 3195 3196 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, 3197 rtw89_pci_interrupt_handler, 3198 rtw89_pci_interrupt_threadfn, 3199 IRQF_SHARED, KBUILD_MODNAME, rtwdev); 3200 if (ret) { 3201 rtw89_err(rtwdev, "failed to request threaded irq\n"); 3202 goto err_free_vector; 3203 } 3204 3205 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET); 3206 3207 return 0; 3208 3209 err_free_vector: 3210 pci_free_irq_vectors(pdev); 3211 err: 3212 return ret; 3213 } 3214 3215 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev, 3216 struct pci_dev *pdev) 3217 { 3218 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); 3219 pci_free_irq_vectors(pdev); 3220 } 3221 3222 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable) 3223 { 3224 int ret; 3225 3226 if (rtw89_pci_disable_clkreq) 3227 return; 3228 3229 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 3230 PCIE_CLKDLY_HW_30US); 3231 if (ret) 3232 rtw89_err(rtwdev, "failed to set CLKREQ Delay\n"); 3233 3234 if (enable) 3235 ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, 3236 RTW89_PCIE_BIT_CLK); 3237 else 3238 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1_CTRL, 3239 RTW89_PCIE_BIT_CLK); 3240 if (ret) 3241 rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d", 3242 enable ? "set" : "unset", ret); 3243 } 3244 3245 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable) 3246 { 3247 u8 value = 0; 3248 int ret; 3249 3250 if (rtw89_pci_disable_aspm_l1) 3251 return; 3252 3253 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value); 3254 if (ret) 3255 rtw89_err(rtwdev, "failed to read ASPM Delay\n"); 3256 3257 value &= ~(RTW89_L1DLY_MASK | RTW89_L0DLY_MASK); 3258 value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) | 3259 FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US); 3260 3261 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value); 3262 if (ret) 3263 rtw89_err(rtwdev, "failed to read ASPM Delay\n"); 3264 3265 if (enable) 3266 ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, 3267 RTW89_PCIE_BIT_L1); 3268 else 3269 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1_CTRL, 3270 RTW89_PCIE_BIT_L1); 3271 if (ret) 3272 rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d", 3273 enable ? "set" : "unset", ret); 3274 } 3275 3276 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev) 3277 { 3278 struct rtw89_traffic_stats *stats = &rtwdev->stats; 3279 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; 3280 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; 3281 u32 val = 0; 3282 3283 if (!rtwdev->scanning && 3284 (tx_tfc_lv >= RTW89_TFC_HIGH || rx_tfc_lv >= RTW89_TFC_HIGH)) 3285 val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL | 3286 FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) | 3287 FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) | 3288 FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64); 3289 3290 rtw89_write32(rtwdev, R_AX_INT_MIT_RX, val); 3291 } 3292 3293 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev) 3294 { 3295 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3296 struct pci_dev *pdev = rtwpci->pdev; 3297 u16 link_ctrl; 3298 int ret; 3299 3300 /* Though there is standard PCIE configuration space to set the 3301 * link control register, but by Realtek's design, driver should 3302 * check if host supports CLKREQ/ASPM to enable the HW module. 3303 * 3304 * These functions are implemented by two HW modules associated, 3305 * one is responsible to access PCIE configuration space to 3306 * follow the host settings, and another is in charge of doing 3307 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes 3308 * the host does not support it, and due to some reasons or wrong 3309 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device 3310 * loss if HW misbehaves on the link. 3311 * 3312 * Hence it's designed that driver should first check the PCIE 3313 * configuration space is sync'ed and enabled, then driver can turn 3314 * on the other module that is actually working on the mechanism. 3315 */ 3316 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); 3317 if (ret) { 3318 rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); 3319 return; 3320 } 3321 3322 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) 3323 rtw89_pci_clkreq_set(rtwdev, true); 3324 3325 if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) 3326 rtw89_pci_aspm_set(rtwdev, true); 3327 } 3328 3329 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable) 3330 { 3331 int ret; 3332 3333 if (enable) 3334 ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_TIMER_CTRL, 3335 RTW89_PCIE_BIT_L1SUB); 3336 else 3337 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_TIMER_CTRL, 3338 RTW89_PCIE_BIT_L1SUB); 3339 if (ret) 3340 rtw89_err(rtwdev, "failed to %s L1SS, ret=%d", 3341 enable ? "set" : "unset", ret); 3342 } 3343 3344 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev) 3345 { 3346 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3347 struct pci_dev *pdev = rtwpci->pdev; 3348 u32 l1ss_cap_ptr, l1ss_ctrl; 3349 3350 if (rtw89_pci_disable_l1ss) 3351 return; 3352 3353 l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); 3354 if (!l1ss_cap_ptr) 3355 return; 3356 3357 pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl); 3358 3359 if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK) 3360 rtw89_pci_l1ss_set(rtwdev, true); 3361 } 3362 3363 static void rtw89_pci_ctrl_dma_all_pcie(struct rtw89_dev *rtwdev, u8 en) 3364 { 3365 const struct rtw89_pci_info *info = rtwdev->pci_info; 3366 u32 val32; 3367 3368 if (en == MAC_AX_FUNC_EN) { 3369 val32 = B_AX_STOP_PCIEIO; 3370 rtw89_write32_clr(rtwdev, info->dma_stop1_reg, val32); 3371 3372 val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN; 3373 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 3374 } else { 3375 val32 = B_AX_STOP_PCIEIO; 3376 rtw89_write32_set(rtwdev, info->dma_stop1_reg, val32); 3377 3378 val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN; 3379 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 3380 } 3381 } 3382 3383 static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev) 3384 { 3385 int ret = 0; 3386 u32 sts; 3387 u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; 3388 3389 ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0, 3390 10, 1000, false, rtwdev, 3391 R_AX_PCIE_DMA_BUSY1); 3392 if (ret) { 3393 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n", 3394 rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1)); 3395 return -EINVAL; 3396 } 3397 return ret; 3398 } 3399 3400 static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev) 3401 { 3402 u32 val, dma_rst = 0; 3403 int ret; 3404 3405 rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_DIS); 3406 ret = rtw89_pci_poll_io_idle(rtwdev); 3407 if (ret) { 3408 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 3409 rtw89_debug(rtwdev, RTW89_DBG_HCI, 3410 "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n", 3411 R_AX_DBG_ERR_FLAG, val); 3412 if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0) 3413 dma_rst |= B_AX_HCI_TXDMA_EN; 3414 if (val & B_AX_RX_STUCK) 3415 dma_rst |= B_AX_HCI_RXDMA_EN; 3416 val = rtw89_read32(rtwdev, R_AX_HCI_FUNC_EN); 3417 rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val & ~dma_rst); 3418 rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val | dma_rst); 3419 ret = rtw89_pci_poll_io_idle(rtwdev); 3420 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 3421 rtw89_debug(rtwdev, RTW89_DBG_HCI, 3422 "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n", 3423 R_AX_DBG_ERR_FLAG, val); 3424 } 3425 3426 return ret; 3427 } 3428 3429 static void rtw89_pci_ctrl_hci_dma_en(struct rtw89_dev *rtwdev, u8 en) 3430 { 3431 u32 val32; 3432 3433 if (en == MAC_AX_FUNC_EN) { 3434 val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN; 3435 rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN, val32); 3436 } else { 3437 val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN; 3438 rtw89_write32_clr(rtwdev, R_AX_HCI_FUNC_EN, val32); 3439 } 3440 } 3441 3442 static int rtw89_pci_rst_bdram(struct rtw89_dev *rtwdev) 3443 { 3444 int ret = 0; 3445 u32 val32, sts; 3446 3447 val32 = B_AX_RST_BDRAM; 3448 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 3449 3450 ret = read_poll_timeout_atomic(rtw89_read32, sts, 3451 (sts & B_AX_RST_BDRAM) == 0x0, 1, 100, 3452 true, rtwdev, R_AX_PCIE_INIT_CFG1); 3453 return ret; 3454 } 3455 3456 static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev) 3457 { 3458 u32 ret; 3459 3460 rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_DIS); 3461 rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_EN); 3462 rtw89_pci_clr_idx_all(rtwdev); 3463 3464 ret = rtw89_pci_rst_bdram(rtwdev); 3465 if (ret) 3466 return ret; 3467 3468 rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_EN); 3469 return ret; 3470 } 3471 3472 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev, 3473 enum rtw89_lv1_rcvy_step step) 3474 { 3475 int ret; 3476 3477 switch (step) { 3478 case RTW89_LV1_RCVY_STEP_1: 3479 ret = rtw89_pci_lv1rst_stop_dma(rtwdev); 3480 if (ret) 3481 rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n"); 3482 3483 break; 3484 3485 case RTW89_LV1_RCVY_STEP_2: 3486 ret = rtw89_pci_lv1rst_start_dma(rtwdev); 3487 if (ret) 3488 rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n"); 3489 break; 3490 3491 default: 3492 return -EINVAL; 3493 } 3494 3495 return ret; 3496 } 3497 3498 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev) 3499 { 3500 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n", 3501 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); 3502 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 3503 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG)); 3504 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 3505 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG)); 3506 } 3507 3508 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget) 3509 { 3510 struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi); 3511 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3512 unsigned long flags; 3513 int work_done; 3514 3515 rtwdev->napi_budget_countdown = budget; 3516 3517 rtw89_pci_clear_isr0(rtwdev, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT); 3518 work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 3519 if (work_done == budget) 3520 return budget; 3521 3522 rtw89_pci_clear_isr0(rtwdev, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | B_AX_RDU_INT); 3523 work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 3524 if (work_done < budget && napi_complete_done(napi, work_done)) { 3525 spin_lock_irqsave(&rtwpci->irq_lock, flags); 3526 if (likely(rtwpci->running)) 3527 rtw89_chip_enable_intr(rtwdev, rtwpci); 3528 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 3529 } 3530 3531 return work_done; 3532 } 3533 3534 static int __maybe_unused rtw89_pci_suspend(struct device *dev) 3535 { 3536 struct ieee80211_hw *hw = dev_get_drvdata(dev); 3537 struct rtw89_dev *rtwdev = hw->priv; 3538 3539 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 3540 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 3541 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3542 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 3543 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3544 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 3545 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 3546 3547 return 0; 3548 } 3549 3550 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev) 3551 { 3552 if (rtwdev->chip->chip_id == RTL8852C) 3553 return; 3554 3555 /* Hardware need write the reg twice to ensure the setting work */ 3556 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 3557 RTW89_PCIE_BIT_CFG_RST_MSTATE); 3558 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 3559 RTW89_PCIE_BIT_CFG_RST_MSTATE); 3560 } 3561 3562 static int __maybe_unused rtw89_pci_resume(struct device *dev) 3563 { 3564 struct ieee80211_hw *hw = dev_get_drvdata(dev); 3565 struct rtw89_dev *rtwdev = hw->priv; 3566 3567 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 3568 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 3569 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3570 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 3571 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3572 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 3573 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 3574 rtw89_pci_l2_hci_ldo(rtwdev); 3575 rtw89_pci_link_cfg(rtwdev); 3576 rtw89_pci_l1ss_cfg(rtwdev); 3577 3578 return 0; 3579 } 3580 3581 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume); 3582 EXPORT_SYMBOL(rtw89_pm_ops); 3583 3584 static const struct rtw89_hci_ops rtw89_pci_ops = { 3585 .tx_write = rtw89_pci_ops_tx_write, 3586 .tx_kick_off = rtw89_pci_ops_tx_kick_off, 3587 .flush_queues = rtw89_pci_ops_flush_queues, 3588 .reset = rtw89_pci_ops_reset, 3589 .start = rtw89_pci_ops_start, 3590 .stop = rtw89_pci_ops_stop, 3591 .pause = rtw89_pci_ops_pause, 3592 .switch_mode = rtw89_pci_ops_switch_mode, 3593 .recalc_int_mit = rtw89_pci_recalc_int_mit, 3594 3595 .read8 = rtw89_pci_ops_read8, 3596 .read16 = rtw89_pci_ops_read16, 3597 .read32 = rtw89_pci_ops_read32, 3598 .write8 = rtw89_pci_ops_write8, 3599 .write16 = rtw89_pci_ops_write16, 3600 .write32 = rtw89_pci_ops_write32, 3601 3602 .mac_pre_init = rtw89_pci_ops_mac_pre_init, 3603 .mac_post_init = rtw89_pci_ops_mac_post_init, 3604 .deinit = rtw89_pci_ops_deinit, 3605 3606 .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource, 3607 .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery, 3608 .dump_err_status = rtw89_pci_ops_dump_err_status, 3609 .napi_poll = rtw89_pci_napi_poll, 3610 3611 .recovery_start = rtw89_pci_ops_recovery_start, 3612 .recovery_complete = rtw89_pci_ops_recovery_complete, 3613 }; 3614 3615 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 3616 { 3617 struct ieee80211_hw *hw; 3618 struct rtw89_dev *rtwdev; 3619 const struct rtw89_driver_info *info; 3620 const struct rtw89_pci_info *pci_info; 3621 int driver_data_size; 3622 int ret; 3623 3624 driver_data_size = sizeof(struct rtw89_dev) + sizeof(struct rtw89_pci); 3625 hw = ieee80211_alloc_hw(driver_data_size, &rtw89_ops); 3626 if (!hw) { 3627 dev_err(&pdev->dev, "failed to allocate hw\n"); 3628 return -ENOMEM; 3629 } 3630 3631 info = (const struct rtw89_driver_info *)id->driver_data; 3632 pci_info = info->bus.pci; 3633 3634 rtwdev = hw->priv; 3635 rtwdev->hw = hw; 3636 rtwdev->dev = &pdev->dev; 3637 rtwdev->chip = info->chip; 3638 rtwdev->pci_info = info->bus.pci; 3639 rtwdev->hci.ops = &rtw89_pci_ops; 3640 rtwdev->hci.type = RTW89_HCI_TYPE_PCIE; 3641 rtwdev->hci.rpwm_addr = pci_info->rpwm_addr; 3642 rtwdev->hci.cpwm_addr = pci_info->cpwm_addr; 3643 3644 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); 3645 3646 ret = rtw89_core_init(rtwdev); 3647 if (ret) { 3648 rtw89_err(rtwdev, "failed to initialise core\n"); 3649 goto err_release_hw; 3650 } 3651 3652 ret = rtw89_pci_claim_device(rtwdev, pdev); 3653 if (ret) { 3654 rtw89_err(rtwdev, "failed to claim pci device\n"); 3655 goto err_core_deinit; 3656 } 3657 3658 ret = rtw89_pci_setup_resource(rtwdev, pdev); 3659 if (ret) { 3660 rtw89_err(rtwdev, "failed to setup pci resource\n"); 3661 goto err_declaim_pci; 3662 } 3663 3664 ret = rtw89_chip_info_setup(rtwdev); 3665 if (ret) { 3666 rtw89_err(rtwdev, "failed to setup chip information\n"); 3667 goto err_clear_resource; 3668 } 3669 3670 rtw89_pci_link_cfg(rtwdev); 3671 rtw89_pci_l1ss_cfg(rtwdev); 3672 3673 ret = rtw89_core_register(rtwdev); 3674 if (ret) { 3675 rtw89_err(rtwdev, "failed to register core\n"); 3676 goto err_clear_resource; 3677 } 3678 3679 rtw89_core_napi_init(rtwdev); 3680 3681 ret = rtw89_pci_request_irq(rtwdev, pdev); 3682 if (ret) { 3683 rtw89_err(rtwdev, "failed to request pci irq\n"); 3684 goto err_unregister; 3685 } 3686 3687 return 0; 3688 3689 err_unregister: 3690 rtw89_core_napi_deinit(rtwdev); 3691 rtw89_core_unregister(rtwdev); 3692 err_clear_resource: 3693 rtw89_pci_clear_resource(rtwdev, pdev); 3694 err_declaim_pci: 3695 rtw89_pci_declaim_device(rtwdev, pdev); 3696 err_core_deinit: 3697 rtw89_core_deinit(rtwdev); 3698 err_release_hw: 3699 ieee80211_free_hw(hw); 3700 3701 return ret; 3702 } 3703 EXPORT_SYMBOL(rtw89_pci_probe); 3704 3705 void rtw89_pci_remove(struct pci_dev *pdev) 3706 { 3707 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 3708 struct rtw89_dev *rtwdev; 3709 3710 rtwdev = hw->priv; 3711 3712 rtw89_pci_free_irq(rtwdev, pdev); 3713 rtw89_core_napi_deinit(rtwdev); 3714 rtw89_core_unregister(rtwdev); 3715 rtw89_pci_clear_resource(rtwdev, pdev); 3716 rtw89_pci_declaim_device(rtwdev, pdev); 3717 rtw89_core_deinit(rtwdev); 3718 ieee80211_free_hw(hw); 3719 } 3720 EXPORT_SYMBOL(rtw89_pci_remove); 3721 3722 MODULE_AUTHOR("Realtek Corporation"); 3723 MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver"); 3724 MODULE_LICENSE("Dual BSD/GPL"); 3725