1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2020 Realtek Corporation 3 */ 4 5 #include <linux/pci.h> 6 7 #include "mac.h" 8 #include "pci.h" 9 #include "reg.h" 10 #include "ser.h" 11 12 static bool rtw89_pci_disable_clkreq; 13 static bool rtw89_pci_disable_aspm_l1; 14 static bool rtw89_pci_disable_l1ss; 15 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644); 16 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644); 17 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644); 18 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support"); 19 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support"); 20 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support"); 21 22 static int rtw89_pci_rst_bdram_pcie(struct rtw89_dev *rtwdev) 23 { 24 u32 val; 25 int ret; 26 27 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, 28 rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | B_AX_RST_BDRAM); 29 30 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM), 31 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false, 32 rtwdev, R_AX_PCIE_INIT_CFG1); 33 34 if (ret) 35 return -EBUSY; 36 37 return 0; 38 } 39 40 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev, 41 struct rtw89_pci_dma_ring *bd_ring, 42 u32 cur_idx, bool tx) 43 { 44 u32 cnt, cur_rp, wp, rp, len; 45 46 rp = bd_ring->rp; 47 wp = bd_ring->wp; 48 len = bd_ring->len; 49 50 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 51 if (tx) 52 cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp); 53 else 54 cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp); 55 56 bd_ring->rp = cur_rp; 57 58 return cnt; 59 } 60 61 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev, 62 struct rtw89_pci_tx_ring *tx_ring) 63 { 64 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 65 u32 addr_idx = bd_ring->addr.idx; 66 u32 cnt, idx; 67 68 idx = rtw89_read32(rtwdev, addr_idx); 69 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true); 70 71 return cnt; 72 } 73 74 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev, 75 struct rtw89_pci *rtwpci, 76 u32 cnt, bool release_all) 77 { 78 struct rtw89_pci_tx_data *tx_data; 79 struct sk_buff *skb; 80 u32 qlen; 81 82 while (cnt--) { 83 skb = skb_dequeue(&rtwpci->h2c_queue); 84 if (!skb) { 85 rtw89_err(rtwdev, "failed to pre-release fwcmd\n"); 86 return; 87 } 88 skb_queue_tail(&rtwpci->h2c_release_queue, skb); 89 } 90 91 qlen = skb_queue_len(&rtwpci->h2c_release_queue); 92 if (!release_all) 93 qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0; 94 95 while (qlen--) { 96 skb = skb_dequeue(&rtwpci->h2c_release_queue); 97 if (!skb) { 98 rtw89_err(rtwdev, "failed to release fwcmd\n"); 99 return; 100 } 101 tx_data = RTW89_PCI_TX_SKB_CB(skb); 102 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 103 DMA_TO_DEVICE); 104 dev_kfree_skb_any(skb); 105 } 106 } 107 108 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev, 109 struct rtw89_pci *rtwpci) 110 { 111 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 112 u32 cnt; 113 114 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 115 if (!cnt) 116 return; 117 rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false); 118 } 119 120 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev, 121 struct rtw89_pci_rx_ring *rx_ring) 122 { 123 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 124 u32 addr_idx = bd_ring->addr.idx; 125 u32 cnt, idx; 126 127 idx = rtw89_read32(rtwdev, addr_idx); 128 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false); 129 130 return cnt; 131 } 132 133 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev, 134 struct sk_buff *skb) 135 { 136 struct rtw89_pci_rx_info *rx_info; 137 dma_addr_t dma; 138 139 rx_info = RTW89_PCI_RX_SKB_CB(skb); 140 dma = rx_info->dma; 141 dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 142 DMA_FROM_DEVICE); 143 } 144 145 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, 146 struct sk_buff *skb) 147 { 148 struct rtw89_pci_rx_info *rx_info; 149 dma_addr_t dma; 150 151 rx_info = RTW89_PCI_RX_SKB_CB(skb); 152 dma = rx_info->dma; 153 dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 154 DMA_FROM_DEVICE); 155 } 156 157 static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, 158 struct sk_buff *skb) 159 { 160 struct rtw89_pci_rxbd_info *rxbd_info; 161 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 162 163 rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; 164 rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS); 165 rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS); 166 rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE); 167 rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG); 168 169 return 0; 170 } 171 172 static bool 173 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls, 174 struct sk_buff *new, 175 const struct sk_buff *skb, u32 offset, 176 const struct rtw89_pci_rx_info *rx_info, 177 const struct rtw89_rx_desc_info *desc_info) 178 { 179 u32 copy_len = rx_info->len - offset; 180 181 if (unlikely(skb_tailroom(new) < copy_len)) { 182 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 183 "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n", 184 rx_info->len, desc_info->pkt_size, offset, fs, ls); 185 rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ", 186 skb->data, rx_info->len); 187 /* length of a single segment skb is desc_info->pkt_size */ 188 if (fs && ls) { 189 copy_len = desc_info->pkt_size; 190 } else { 191 rtw89_info(rtwdev, "drop rx data due to invalid length\n"); 192 return false; 193 } 194 } 195 196 skb_put_data(new, skb->data + offset, copy_len); 197 198 return true; 199 } 200 201 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev, 202 struct rtw89_pci_rx_ring *rx_ring) 203 { 204 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 205 struct rtw89_pci_rx_info *rx_info; 206 struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc; 207 struct sk_buff *new = rx_ring->diliver_skb; 208 struct sk_buff *skb; 209 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 210 u32 offset; 211 u32 cnt = 1; 212 bool fs, ls; 213 int ret; 214 215 skb = rx_ring->buf[bd_ring->wp]; 216 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 217 218 ret = rtw89_pci_rxbd_info_update(rtwdev, skb); 219 if (ret) { 220 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 221 bd_ring->wp, ret); 222 goto err_sync_device; 223 } 224 225 rx_info = RTW89_PCI_RX_SKB_CB(skb); 226 fs = rx_info->fs; 227 ls = rx_info->ls; 228 229 if (fs) { 230 if (new) { 231 rtw89_err(rtwdev, "skb should not be ready before first segment start\n"); 232 goto err_sync_device; 233 } 234 if (desc_info->ready) { 235 rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n"); 236 goto err_sync_device; 237 } 238 239 rtw89_core_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size); 240 241 new = dev_alloc_skb(desc_info->pkt_size); 242 if (!new) 243 goto err_sync_device; 244 245 rx_ring->diliver_skb = new; 246 247 /* first segment has RX desc */ 248 offset = desc_info->offset; 249 offset += desc_info->long_rxdesc ? sizeof(struct rtw89_rxdesc_long) : 250 sizeof(struct rtw89_rxdesc_short); 251 } else { 252 offset = sizeof(struct rtw89_pci_rxbd_info); 253 if (!new) { 254 rtw89_warn(rtwdev, "no last skb\n"); 255 goto err_sync_device; 256 } 257 } 258 if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info)) 259 goto err_sync_device; 260 rtw89_pci_sync_skb_for_device(rtwdev, skb); 261 rtw89_pci_rxbd_increase(rx_ring, 1); 262 263 if (!desc_info->ready) { 264 rtw89_warn(rtwdev, "no rx desc information\n"); 265 goto err_free_resource; 266 } 267 if (ls) { 268 rtw89_core_rx(rtwdev, desc_info, new); 269 rx_ring->diliver_skb = NULL; 270 desc_info->ready = false; 271 } 272 273 return cnt; 274 275 err_sync_device: 276 rtw89_pci_sync_skb_for_device(rtwdev, skb); 277 rtw89_pci_rxbd_increase(rx_ring, 1); 278 err_free_resource: 279 if (new) 280 dev_kfree_skb_any(new); 281 rx_ring->diliver_skb = NULL; 282 desc_info->ready = false; 283 284 return cnt; 285 } 286 287 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev, 288 struct rtw89_pci_rx_ring *rx_ring, 289 u32 cnt) 290 { 291 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 292 u32 rx_cnt; 293 294 while (cnt && rtwdev->napi_budget_countdown > 0) { 295 rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring); 296 if (!rx_cnt) { 297 rtw89_err(rtwdev, "failed to deliver RXBD skb\n"); 298 299 /* skip the rest RXBD bufs */ 300 rtw89_pci_rxbd_increase(rx_ring, cnt); 301 break; 302 } 303 304 cnt -= rx_cnt; 305 } 306 307 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 308 } 309 310 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev, 311 struct rtw89_pci *rtwpci, int budget) 312 { 313 struct rtw89_pci_rx_ring *rx_ring; 314 int countdown = rtwdev->napi_budget_countdown; 315 u32 cnt; 316 317 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ]; 318 319 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 320 if (!cnt) 321 return 0; 322 323 cnt = min_t(u32, budget, cnt); 324 325 rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt); 326 327 /* In case of flushing pending SKBs, the countdown may exceed. */ 328 if (rtwdev->napi_budget_countdown <= 0) 329 return budget; 330 331 return budget - countdown; 332 } 333 334 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev, 335 struct rtw89_pci_tx_ring *tx_ring, 336 struct sk_buff *skb, u8 tx_status) 337 { 338 struct ieee80211_tx_info *info; 339 340 info = IEEE80211_SKB_CB(skb); 341 ieee80211_tx_info_clear_status(info); 342 343 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 344 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 345 if (tx_status == RTW89_TX_DONE) { 346 info->flags |= IEEE80211_TX_STAT_ACK; 347 tx_ring->tx_acked++; 348 } else { 349 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) 350 rtw89_debug(rtwdev, RTW89_DBG_FW, 351 "failed to TX of status %x\n", tx_status); 352 switch (tx_status) { 353 case RTW89_TX_RETRY_LIMIT: 354 tx_ring->tx_retry_lmt++; 355 break; 356 case RTW89_TX_LIFE_TIME: 357 tx_ring->tx_life_time++; 358 break; 359 case RTW89_TX_MACID_DROP: 360 tx_ring->tx_mac_id_drop++; 361 break; 362 default: 363 rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status); 364 break; 365 } 366 } 367 368 ieee80211_tx_status_ni(rtwdev->hw, skb); 369 } 370 371 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 372 { 373 struct rtw89_pci_tx_wd *txwd; 374 u32 cnt; 375 376 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 377 while (cnt--) { 378 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 379 if (!txwd) { 380 rtw89_warn(rtwdev, "No busy txwd pages available\n"); 381 break; 382 } 383 384 list_del_init(&txwd->list); 385 386 /* this skb has been freed by RPP */ 387 if (skb_queue_len(&txwd->queue) == 0) 388 rtw89_pci_enqueue_txwd(tx_ring, txwd); 389 } 390 } 391 392 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev, 393 struct rtw89_pci_tx_ring *tx_ring) 394 { 395 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 396 struct rtw89_pci_tx_wd *txwd; 397 int i; 398 399 for (i = 0; i < wd_ring->page_num; i++) { 400 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 401 if (!txwd) 402 break; 403 404 list_del_init(&txwd->list); 405 } 406 } 407 408 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, 409 struct rtw89_pci_tx_ring *tx_ring, 410 struct rtw89_pci_tx_wd *txwd, u16 seq, 411 u8 tx_status) 412 { 413 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 414 struct rtw89_pci_tx_data *tx_data; 415 struct sk_buff *skb, *tmp; 416 u8 txch = tx_ring->txch; 417 418 if (!list_empty(&txwd->list)) { 419 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 420 /* In low power mode, RPP can receive before updating of TX BD. 421 * In normal mode, it should not happen so give it a warning. 422 */ 423 if (!rtwpci->low_power && !list_empty(&txwd->list)) 424 rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n", 425 txch, seq); 426 } 427 428 skb_queue_walk_safe(&txwd->queue, skb, tmp) { 429 skb_unlink(skb, &txwd->queue); 430 431 tx_data = RTW89_PCI_TX_SKB_CB(skb); 432 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 433 DMA_TO_DEVICE); 434 435 rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status); 436 } 437 438 if (list_empty(&txwd->list)) 439 rtw89_pci_enqueue_txwd(tx_ring, txwd); 440 } 441 442 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, 443 struct rtw89_pci_rpp_fmt *rpp) 444 { 445 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 446 struct rtw89_pci_tx_ring *tx_ring; 447 struct rtw89_pci_tx_wd_ring *wd_ring; 448 struct rtw89_pci_tx_wd *txwd; 449 u16 seq; 450 u8 qsel, tx_status, txch; 451 452 seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ); 453 qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL); 454 tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS); 455 txch = rtw89_core_get_ch_dma(rtwdev, qsel); 456 457 if (txch == RTW89_TXCH_CH12) { 458 rtw89_warn(rtwdev, "should no fwcmd release report\n"); 459 return; 460 } 461 462 tx_ring = &rtwpci->tx_rings[txch]; 463 wd_ring = &tx_ring->wd_ring; 464 txwd = &wd_ring->pages[seq]; 465 466 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status); 467 } 468 469 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev, 470 struct rtw89_pci_tx_ring *tx_ring) 471 { 472 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 473 struct rtw89_pci_tx_wd *txwd; 474 int i; 475 476 for (i = 0; i < wd_ring->page_num; i++) { 477 txwd = &wd_ring->pages[i]; 478 479 if (!list_empty(&txwd->list)) 480 continue; 481 482 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP); 483 } 484 } 485 486 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev, 487 struct rtw89_pci_rx_ring *rx_ring, 488 u32 max_cnt) 489 { 490 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 491 struct rtw89_pci_rx_info *rx_info; 492 struct rtw89_pci_rpp_fmt *rpp; 493 struct rtw89_rx_desc_info desc_info = {}; 494 struct sk_buff *skb; 495 u32 cnt = 0; 496 u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt); 497 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 498 u32 offset; 499 int ret; 500 501 skb = rx_ring->buf[bd_ring->wp]; 502 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 503 504 ret = rtw89_pci_rxbd_info_update(rtwdev, skb); 505 if (ret) { 506 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 507 bd_ring->wp, ret); 508 goto err_sync_device; 509 } 510 511 rx_info = RTW89_PCI_RX_SKB_CB(skb); 512 if (!rx_info->fs || !rx_info->ls) { 513 rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n"); 514 return cnt; 515 } 516 517 rtw89_core_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size); 518 519 /* first segment has RX desc */ 520 offset = desc_info.offset; 521 offset += desc_info.long_rxdesc ? sizeof(struct rtw89_rxdesc_long) : 522 sizeof(struct rtw89_rxdesc_short); 523 for (; offset + rpp_size <= rx_info->len; offset += rpp_size) { 524 rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset); 525 rtw89_pci_release_rpp(rtwdev, rpp); 526 } 527 528 rtw89_pci_sync_skb_for_device(rtwdev, skb); 529 rtw89_pci_rxbd_increase(rx_ring, 1); 530 cnt++; 531 532 return cnt; 533 534 err_sync_device: 535 rtw89_pci_sync_skb_for_device(rtwdev, skb); 536 return 0; 537 } 538 539 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev, 540 struct rtw89_pci_rx_ring *rx_ring, 541 u32 cnt) 542 { 543 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 544 u32 release_cnt; 545 546 while (cnt) { 547 release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt); 548 if (!release_cnt) { 549 rtw89_err(rtwdev, "failed to release TX skbs\n"); 550 551 /* skip the rest RXBD bufs */ 552 rtw89_pci_rxbd_increase(rx_ring, cnt); 553 break; 554 } 555 556 cnt -= release_cnt; 557 } 558 559 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 560 } 561 562 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev, 563 struct rtw89_pci *rtwpci, int budget) 564 { 565 struct rtw89_pci_rx_ring *rx_ring; 566 u32 cnt; 567 int work_done; 568 569 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 570 571 spin_lock_bh(&rtwpci->trx_lock); 572 573 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 574 if (cnt == 0) 575 goto out_unlock; 576 577 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 578 579 out_unlock: 580 spin_unlock_bh(&rtwpci->trx_lock); 581 582 /* always release all RPQ */ 583 work_done = min_t(int, cnt, budget); 584 rtwdev->napi_budget_countdown -= work_done; 585 586 return work_done; 587 } 588 589 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev, 590 struct rtw89_pci *rtwpci) 591 { 592 struct rtw89_pci_rx_ring *rx_ring; 593 struct rtw89_pci_dma_ring *bd_ring; 594 u32 reg_idx; 595 u16 hw_idx, hw_idx_next, host_idx; 596 int i; 597 598 for (i = 0; i < RTW89_RXCH_NUM; i++) { 599 rx_ring = &rtwpci->rx_rings[i]; 600 bd_ring = &rx_ring->bd_ring; 601 602 reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 603 hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx); 604 host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx); 605 hw_idx_next = (hw_idx + 1) % bd_ring->len; 606 607 if (hw_idx_next == host_idx) 608 rtw89_warn(rtwdev, "%d RXD unavailable\n", i); 609 610 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 611 "%d RXD unavailable, idx=0x%08x, len=%d\n", 612 i, reg_idx, bd_ring->len); 613 } 614 } 615 616 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, 617 struct rtw89_pci *rtwpci, 618 struct rtw89_pci_isrs *isrs) 619 { 620 isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs; 621 isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0]; 622 isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1]; 623 624 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 625 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]); 626 rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]); 627 } 628 EXPORT_SYMBOL(rtw89_pci_recognize_intrs); 629 630 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev, 631 struct rtw89_pci *rtwpci, 632 struct rtw89_pci_isrs *isrs) 633 { 634 isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs; 635 isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ? 636 rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0; 637 isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ? 638 rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0; 639 isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ? 640 rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0; 641 642 if (isrs->halt_c2h_isrs) 643 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 644 if (isrs->isrs[0]) 645 rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]); 646 if (isrs->isrs[1]) 647 rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]); 648 } 649 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1); 650 651 static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00) 652 { 653 /* write 1 clear */ 654 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isr00); 655 } 656 657 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 658 { 659 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 660 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]); 661 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]); 662 } 663 EXPORT_SYMBOL(rtw89_pci_enable_intr); 664 665 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 666 { 667 rtw89_write32(rtwdev, R_AX_HIMR0, 0); 668 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0); 669 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); 670 } 671 EXPORT_SYMBOL(rtw89_pci_disable_intr); 672 673 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 674 { 675 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs); 676 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 677 rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]); 678 rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]); 679 } 680 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1); 681 682 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 683 { 684 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0); 685 } 686 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1); 687 688 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev) 689 { 690 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 691 unsigned long flags; 692 693 spin_lock_irqsave(&rtwpci->irq_lock, flags); 694 rtw89_chip_disable_intr(rtwdev, rtwpci); 695 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START); 696 rtw89_chip_enable_intr(rtwdev, rtwpci); 697 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 698 } 699 700 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev) 701 { 702 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 703 unsigned long flags; 704 705 spin_lock_irqsave(&rtwpci->irq_lock, flags); 706 rtw89_chip_disable_intr(rtwdev, rtwpci); 707 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE); 708 rtw89_chip_enable_intr(rtwdev, rtwpci); 709 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 710 } 711 712 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev) 713 { 714 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 715 int budget = NAPI_POLL_WEIGHT; 716 717 /* To prevent RXQ get stuck due to run out of budget. */ 718 rtwdev->napi_budget_countdown = budget; 719 720 rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget); 721 rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget); 722 } 723 724 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) 725 { 726 struct rtw89_dev *rtwdev = dev; 727 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 728 struct rtw89_pci_isrs isrs; 729 unsigned long flags; 730 731 spin_lock_irqsave(&rtwpci->irq_lock, flags); 732 rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs); 733 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 734 735 if (unlikely(isrs.isrs[0] & B_AX_RDU_INT)) 736 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci); 737 738 if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN)) 739 rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev)); 740 741 if (unlikely(isrs.halt_c2h_isrs & B_AX_WDT_TIMEOUT_INT_EN)) 742 rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT); 743 744 if (unlikely(rtwpci->under_recovery)) 745 goto enable_intr; 746 747 if (unlikely(rtwpci->low_power)) { 748 rtw89_pci_low_power_interrupt_handler(rtwdev); 749 goto enable_intr; 750 } 751 752 if (likely(rtwpci->running)) { 753 local_bh_disable(); 754 napi_schedule(&rtwdev->napi); 755 local_bh_enable(); 756 } 757 758 return IRQ_HANDLED; 759 760 enable_intr: 761 spin_lock_irqsave(&rtwpci->irq_lock, flags); 762 rtw89_chip_enable_intr(rtwdev, rtwpci); 763 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 764 return IRQ_HANDLED; 765 } 766 767 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev) 768 { 769 struct rtw89_dev *rtwdev = dev; 770 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 771 unsigned long flags; 772 irqreturn_t irqret = IRQ_WAKE_THREAD; 773 774 spin_lock_irqsave(&rtwpci->irq_lock, flags); 775 776 /* If interrupt event is on the road, it is still trigger interrupt 777 * even we have done pci_stop() to turn off IMR. 778 */ 779 if (unlikely(!rtwpci->running)) { 780 irqret = IRQ_HANDLED; 781 goto exit; 782 } 783 784 rtw89_chip_disable_intr(rtwdev, rtwpci); 785 exit: 786 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 787 788 return irqret; 789 } 790 791 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \ 792 [RTW89_TXCH_##txch] = { \ 793 .num = R_AX_##txch##_TXBD_NUM ##v, \ 794 .idx = R_AX_##txch##_TXBD_IDX ##v, \ 795 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 796 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 797 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 798 } 799 800 #define DEF_TXCHADDRS(info, txch, v...) \ 801 [RTW89_TXCH_##txch] = { \ 802 .num = R_AX_##txch##_TXBD_NUM, \ 803 .idx = R_AX_##txch##_TXBD_IDX, \ 804 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 805 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 806 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 807 } 808 809 #define DEF_RXCHADDRS(info, rxch, v...) \ 810 [RTW89_RXCH_##rxch] = { \ 811 .num = R_AX_##rxch##_RXBD_NUM ##v, \ 812 .idx = R_AX_##rxch##_RXBD_IDX ##v, \ 813 .desa_l = R_AX_##rxch##_RXBD_DESA_L ##v, \ 814 .desa_h = R_AX_##rxch##_RXBD_DESA_H ##v, \ 815 } 816 817 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = { 818 .tx = { 819 DEF_TXCHADDRS(info, ACH0), 820 DEF_TXCHADDRS(info, ACH1), 821 DEF_TXCHADDRS(info, ACH2), 822 DEF_TXCHADDRS(info, ACH3), 823 DEF_TXCHADDRS(info, ACH4), 824 DEF_TXCHADDRS(info, ACH5), 825 DEF_TXCHADDRS(info, ACH6), 826 DEF_TXCHADDRS(info, ACH7), 827 DEF_TXCHADDRS(info, CH8), 828 DEF_TXCHADDRS(info, CH9), 829 DEF_TXCHADDRS_TYPE1(info, CH10), 830 DEF_TXCHADDRS_TYPE1(info, CH11), 831 DEF_TXCHADDRS(info, CH12), 832 }, 833 .rx = { 834 DEF_RXCHADDRS(info, RXQ), 835 DEF_RXCHADDRS(info, RPQ), 836 }, 837 }; 838 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set); 839 840 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = { 841 .tx = { 842 DEF_TXCHADDRS(info, ACH0, _V1), 843 DEF_TXCHADDRS(info, ACH1, _V1), 844 DEF_TXCHADDRS(info, ACH2, _V1), 845 DEF_TXCHADDRS(info, ACH3, _V1), 846 DEF_TXCHADDRS(info, ACH4, _V1), 847 DEF_TXCHADDRS(info, ACH5, _V1), 848 DEF_TXCHADDRS(info, ACH6, _V1), 849 DEF_TXCHADDRS(info, ACH7, _V1), 850 DEF_TXCHADDRS(info, CH8, _V1), 851 DEF_TXCHADDRS(info, CH9, _V1), 852 DEF_TXCHADDRS_TYPE1(info, CH10, _V1), 853 DEF_TXCHADDRS_TYPE1(info, CH11, _V1), 854 DEF_TXCHADDRS(info, CH12, _V1), 855 }, 856 .rx = { 857 DEF_RXCHADDRS(info, RXQ, _V1), 858 DEF_RXCHADDRS(info, RPQ, _V1), 859 }, 860 }; 861 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1); 862 863 #undef DEF_TXCHADDRS_TYPE1 864 #undef DEF_TXCHADDRS 865 #undef DEF_RXCHADDRS 866 867 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev, 868 enum rtw89_tx_channel txch, 869 const struct rtw89_pci_ch_dma_addr **addr) 870 { 871 const struct rtw89_pci_info *info = rtwdev->pci_info; 872 873 if (txch >= RTW89_TXCH_NUM) 874 return -EINVAL; 875 876 *addr = &info->dma_addr_set->tx[txch]; 877 878 return 0; 879 } 880 881 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev, 882 enum rtw89_rx_channel rxch, 883 const struct rtw89_pci_ch_dma_addr **addr) 884 { 885 const struct rtw89_pci_info *info = rtwdev->pci_info; 886 887 if (rxch >= RTW89_RXCH_NUM) 888 return -EINVAL; 889 890 *addr = &info->dma_addr_set->rx[rxch]; 891 892 return 0; 893 } 894 895 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring) 896 { 897 struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring; 898 899 /* reserved 1 desc check ring is full or not */ 900 if (bd_ring->rp > bd_ring->wp) 901 return bd_ring->rp - bd_ring->wp - 1; 902 903 return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1; 904 } 905 906 static 907 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev) 908 { 909 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 910 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 911 u32 cnt; 912 913 spin_lock_bh(&rtwpci->trx_lock); 914 rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci); 915 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 916 spin_unlock_bh(&rtwpci->trx_lock); 917 918 return cnt; 919 } 920 921 static 922 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev, 923 u8 txch) 924 { 925 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 926 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 927 u32 cnt; 928 929 spin_lock_bh(&rtwpci->trx_lock); 930 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 931 spin_unlock_bh(&rtwpci->trx_lock); 932 933 return cnt; 934 } 935 936 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 937 u8 txch) 938 { 939 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 940 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 941 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 942 u32 bd_cnt, wd_cnt, min_cnt = 0; 943 struct rtw89_pci_rx_ring *rx_ring; 944 u32 cnt; 945 946 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 947 948 spin_lock_bh(&rtwpci->trx_lock); 949 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 950 wd_cnt = wd_ring->curr_num; 951 952 if (wd_cnt == 0 || bd_cnt == 0) { 953 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 954 if (!cnt) 955 goto out_unlock; 956 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 957 958 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 959 if (bd_cnt == 0) 960 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 961 } 962 963 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 964 wd_cnt = wd_ring->curr_num; 965 min_cnt = min(bd_cnt, wd_cnt); 966 if (min_cnt == 0) 967 rtw89_warn(rtwdev, "still no tx resource after reclaim\n"); 968 969 out_unlock: 970 spin_unlock_bh(&rtwpci->trx_lock); 971 972 return min_cnt; 973 } 974 975 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 976 u8 txch) 977 { 978 if (rtwdev->hci.paused) 979 return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch); 980 981 if (txch == RTW89_TXCH_CH12) 982 return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev); 983 984 return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch); 985 } 986 987 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 988 { 989 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 990 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 991 u32 host_idx, addr; 992 993 spin_lock_bh(&rtwpci->trx_lock); 994 995 addr = bd_ring->addr.idx; 996 host_idx = bd_ring->wp; 997 rtw89_write16(rtwdev, addr, host_idx); 998 999 spin_unlock_bh(&rtwpci->trx_lock); 1000 } 1001 1002 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, 1003 int n_txbd) 1004 { 1005 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1006 u32 host_idx, len; 1007 1008 len = bd_ring->len; 1009 host_idx = bd_ring->wp + n_txbd; 1010 host_idx = host_idx < len ? host_idx : host_idx - len; 1011 1012 bd_ring->wp = host_idx; 1013 } 1014 1015 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) 1016 { 1017 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1018 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1019 1020 if (rtwdev->hci.paused) { 1021 set_bit(txch, rtwpci->kick_map); 1022 return; 1023 } 1024 1025 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1026 } 1027 1028 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev) 1029 { 1030 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1031 struct rtw89_pci_tx_ring *tx_ring; 1032 int txch; 1033 1034 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1035 if (!test_and_clear_bit(txch, rtwpci->kick_map)) 1036 continue; 1037 1038 tx_ring = &rtwpci->tx_rings[txch]; 1039 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1040 } 1041 } 1042 1043 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop) 1044 { 1045 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1046 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1047 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1048 u32 cur_idx, cur_rp; 1049 u8 i; 1050 1051 /* Because the time taked by the I/O is a bit dynamic, it's hard to 1052 * define a reasonable fixed total timeout to use read_poll_timeout* 1053 * helper. Instead, we can ensure a reasonable polling times, so we 1054 * just use for loop with udelay here. 1055 */ 1056 for (i = 0; i < 60; i++) { 1057 cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 1058 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 1059 if (cur_rp == bd_ring->wp) 1060 return; 1061 1062 udelay(1); 1063 } 1064 1065 if (!drop) 1066 rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch); 1067 } 1068 1069 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs, 1070 bool drop) 1071 { 1072 u8 i; 1073 1074 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1075 /* It may be unnecessary to flush FWCMD queue. */ 1076 if (i == RTW89_TXCH_CH12) 1077 continue; 1078 1079 if (txchs & BIT(i)) 1080 __pci_flush_txch(rtwdev, i, drop); 1081 } 1082 } 1083 1084 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues, 1085 bool drop) 1086 { 1087 __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop); 1088 } 1089 1090 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, 1091 void *txaddr_info_addr, u32 total_len, 1092 dma_addr_t dma, u8 *add_info_nr) 1093 { 1094 struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr; 1095 1096 txaddr_info->length = cpu_to_le16(total_len); 1097 txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | 1098 RTW89_PCI_ADDR_NUM(1)); 1099 txaddr_info->dma = cpu_to_le32(dma); 1100 1101 *add_info_nr = 1; 1102 1103 return sizeof(*txaddr_info); 1104 } 1105 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info); 1106 1107 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, 1108 void *txaddr_info_addr, u32 total_len, 1109 dma_addr_t dma, u8 *add_info_nr) 1110 { 1111 struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr; 1112 u32 remain = total_len; 1113 u32 len; 1114 u16 length_option; 1115 int n; 1116 1117 for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) { 1118 len = remain >= TXADDR_INFO_LENTHG_V1_MAX ? 1119 TXADDR_INFO_LENTHG_V1_MAX : remain; 1120 remain -= len; 1121 1122 length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) | 1123 FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) | 1124 FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0); 1125 txaddr_info->length_opt = cpu_to_le16(length_option); 1126 txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma)); 1127 txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma)); 1128 1129 dma += len; 1130 txaddr_info++; 1131 } 1132 1133 WARN_ONCE(remain, "length overflow remain=%u total_len=%u", 1134 remain, total_len); 1135 1136 *add_info_nr = n; 1137 1138 return n * sizeof(*txaddr_info); 1139 } 1140 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1); 1141 1142 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, 1143 struct rtw89_pci_tx_ring *tx_ring, 1144 struct rtw89_pci_tx_wd *txwd, 1145 struct rtw89_core_tx_request *tx_req) 1146 { 1147 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1148 const struct rtw89_chip_info *chip = rtwdev->chip; 1149 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1150 struct rtw89_txwd_info *txwd_info; 1151 struct rtw89_pci_tx_wp_info *txwp_info; 1152 void *txaddr_info_addr; 1153 struct pci_dev *pdev = rtwpci->pdev; 1154 struct sk_buff *skb = tx_req->skb; 1155 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1156 bool en_wd_info = desc_info->en_wd_info; 1157 u32 txwd_len; 1158 u32 txwp_len; 1159 u32 txaddr_info_len; 1160 dma_addr_t dma; 1161 int ret; 1162 1163 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1164 if (dma_mapping_error(&pdev->dev, dma)) { 1165 rtw89_err(rtwdev, "failed to map skb dma data\n"); 1166 ret = -EBUSY; 1167 goto err; 1168 } 1169 1170 tx_data->dma = dma; 1171 1172 txwp_len = sizeof(*txwp_info); 1173 txwd_len = chip->txwd_body_size; 1174 txwd_len += en_wd_info ? sizeof(*txwd_info) : 0; 1175 1176 txwp_info = txwd->vaddr + txwd_len; 1177 txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID); 1178 txwp_info->seq1 = 0; 1179 txwp_info->seq2 = 0; 1180 txwp_info->seq3 = 0; 1181 1182 tx_ring->tx_cnt++; 1183 txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len; 1184 txaddr_info_len = 1185 rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len, 1186 dma, &desc_info->addr_info_nr); 1187 1188 txwd->len = txwd_len + txwp_len + txaddr_info_len; 1189 1190 rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr); 1191 1192 skb_queue_tail(&txwd->queue, skb); 1193 1194 return 0; 1195 1196 err: 1197 return ret; 1198 } 1199 1200 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, 1201 struct rtw89_pci_tx_ring *tx_ring, 1202 struct rtw89_pci_tx_bd_32 *txbd, 1203 struct rtw89_core_tx_request *tx_req) 1204 { 1205 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1206 const struct rtw89_chip_info *chip = rtwdev->chip; 1207 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1208 void *txdesc; 1209 int txdesc_size = chip->h2c_desc_size; 1210 struct pci_dev *pdev = rtwpci->pdev; 1211 struct sk_buff *skb = tx_req->skb; 1212 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1213 dma_addr_t dma; 1214 1215 txdesc = skb_push(skb, txdesc_size); 1216 memset(txdesc, 0, txdesc_size); 1217 rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc); 1218 1219 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1220 if (dma_mapping_error(&pdev->dev, dma)) { 1221 rtw89_err(rtwdev, "failed to map fwcmd dma data\n"); 1222 return -EBUSY; 1223 } 1224 1225 tx_data->dma = dma; 1226 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); 1227 txbd->length = cpu_to_le16(skb->len); 1228 txbd->dma = cpu_to_le32(tx_data->dma); 1229 skb_queue_tail(&rtwpci->h2c_queue, skb); 1230 1231 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1232 1233 return 0; 1234 } 1235 1236 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, 1237 struct rtw89_pci_tx_ring *tx_ring, 1238 struct rtw89_pci_tx_bd_32 *txbd, 1239 struct rtw89_core_tx_request *tx_req) 1240 { 1241 struct rtw89_pci_tx_wd *txwd; 1242 int ret; 1243 1244 /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD 1245 * buffer with WD BODY only. So here we don't need to check the free 1246 * pages of the wd ring. 1247 */ 1248 if (tx_ring->txch == RTW89_TXCH_CH12) 1249 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); 1250 1251 txwd = rtw89_pci_dequeue_txwd(tx_ring); 1252 if (!txwd) { 1253 rtw89_err(rtwdev, "no available TXWD\n"); 1254 ret = -ENOSPC; 1255 goto err; 1256 } 1257 1258 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); 1259 if (ret) { 1260 rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq); 1261 goto err_enqueue_wd; 1262 } 1263 1264 list_add_tail(&txwd->list, &tx_ring->busy_pages); 1265 1266 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); 1267 txbd->length = cpu_to_le16(txwd->len); 1268 txbd->dma = cpu_to_le32(txwd->paddr); 1269 1270 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1271 1272 return 0; 1273 1274 err_enqueue_wd: 1275 rtw89_pci_enqueue_txwd(tx_ring, txwd); 1276 err: 1277 return ret; 1278 } 1279 1280 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, 1281 u8 txch) 1282 { 1283 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1284 struct rtw89_pci_tx_ring *tx_ring; 1285 struct rtw89_pci_tx_bd_32 *txbd; 1286 u32 n_avail_txbd; 1287 int ret = 0; 1288 1289 /* check the tx type and dma channel for fw cmd queue */ 1290 if ((txch == RTW89_TXCH_CH12 || 1291 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) && 1292 (txch != RTW89_TXCH_CH12 || 1293 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) { 1294 rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n"); 1295 return -EINVAL; 1296 } 1297 1298 tx_ring = &rtwpci->tx_rings[txch]; 1299 spin_lock_bh(&rtwpci->trx_lock); 1300 1301 n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring); 1302 if (n_avail_txbd == 0) { 1303 rtw89_err(rtwdev, "no available TXBD\n"); 1304 ret = -ENOSPC; 1305 goto err_unlock; 1306 } 1307 1308 txbd = rtw89_pci_get_next_txbd(tx_ring); 1309 ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req); 1310 if (ret) { 1311 rtw89_err(rtwdev, "failed to submit TXBD\n"); 1312 goto err_unlock; 1313 } 1314 1315 spin_unlock_bh(&rtwpci->trx_lock); 1316 return 0; 1317 1318 err_unlock: 1319 spin_unlock_bh(&rtwpci->trx_lock); 1320 return ret; 1321 } 1322 1323 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) 1324 { 1325 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1326 int ret; 1327 1328 ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma); 1329 if (ret) { 1330 rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma); 1331 return ret; 1332 } 1333 1334 return 0; 1335 } 1336 1337 static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = { 1338 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1339 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1340 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1341 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1342 [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2}, 1343 [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2}, 1344 [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2}, 1345 [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2}, 1346 [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1}, 1347 [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1}, 1348 [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1}, 1349 [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1}, 1350 [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1}, 1351 }; 1352 1353 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) 1354 { 1355 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1356 struct rtw89_pci_tx_ring *tx_ring; 1357 struct rtw89_pci_rx_ring *rx_ring; 1358 struct rtw89_pci_dma_ring *bd_ring; 1359 const struct rtw89_pci_bd_ram *bd_ram; 1360 u32 addr_num; 1361 u32 addr_bdram; 1362 u32 addr_desa_l; 1363 u32 val32; 1364 int i; 1365 1366 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1367 tx_ring = &rtwpci->tx_rings[i]; 1368 bd_ring = &tx_ring->bd_ring; 1369 bd_ram = &bd_ram_table[i]; 1370 addr_num = bd_ring->addr.num; 1371 addr_bdram = bd_ring->addr.bdram; 1372 addr_desa_l = bd_ring->addr.desa_l; 1373 bd_ring->wp = 0; 1374 bd_ring->rp = 0; 1375 1376 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) | 1377 FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) | 1378 FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num); 1379 1380 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1381 rtw89_write32(rtwdev, addr_bdram, val32); 1382 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1383 } 1384 1385 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1386 rx_ring = &rtwpci->rx_rings[i]; 1387 bd_ring = &rx_ring->bd_ring; 1388 addr_num = bd_ring->addr.num; 1389 addr_desa_l = bd_ring->addr.desa_l; 1390 bd_ring->wp = 0; 1391 bd_ring->rp = 0; 1392 rx_ring->diliver_skb = NULL; 1393 rx_ring->diliver_desc.ready = false; 1394 1395 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1396 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1397 } 1398 } 1399 1400 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev, 1401 struct rtw89_pci_tx_ring *tx_ring) 1402 { 1403 rtw89_pci_release_busy_txwd(rtwdev, tx_ring); 1404 rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring); 1405 } 1406 1407 static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev) 1408 { 1409 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1410 int txch; 1411 1412 rtw89_pci_reset_trx_rings(rtwdev); 1413 1414 spin_lock_bh(&rtwpci->trx_lock); 1415 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1416 if (txch == RTW89_TXCH_CH12) { 1417 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 1418 skb_queue_len(&rtwpci->h2c_queue), true); 1419 continue; 1420 } 1421 rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]); 1422 } 1423 spin_unlock_bh(&rtwpci->trx_lock); 1424 } 1425 1426 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev) 1427 { 1428 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1429 unsigned long flags; 1430 1431 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1432 rtwpci->running = true; 1433 rtw89_chip_enable_intr(rtwdev, rtwpci); 1434 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1435 } 1436 1437 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev) 1438 { 1439 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1440 unsigned long flags; 1441 1442 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1443 rtwpci->running = false; 1444 rtw89_chip_disable_intr(rtwdev, rtwpci); 1445 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1446 } 1447 1448 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev) 1449 { 1450 rtw89_core_napi_start(rtwdev); 1451 rtw89_pci_enable_intr_lock(rtwdev); 1452 1453 return 0; 1454 } 1455 1456 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) 1457 { 1458 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1459 struct pci_dev *pdev = rtwpci->pdev; 1460 1461 rtw89_pci_disable_intr_lock(rtwdev); 1462 synchronize_irq(pdev->irq); 1463 rtw89_core_napi_stop(rtwdev); 1464 } 1465 1466 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause) 1467 { 1468 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1469 struct pci_dev *pdev = rtwpci->pdev; 1470 1471 if (pause) { 1472 rtw89_pci_disable_intr_lock(rtwdev); 1473 synchronize_irq(pdev->irq); 1474 if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) 1475 napi_synchronize(&rtwdev->napi); 1476 } else { 1477 rtw89_pci_enable_intr_lock(rtwdev); 1478 rtw89_pci_tx_kick_off_pending(rtwdev); 1479 } 1480 } 1481 1482 static 1483 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power) 1484 { 1485 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1486 const struct rtw89_pci_info *info = rtwdev->pci_info; 1487 const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power; 1488 const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set; 1489 struct rtw89_pci_tx_ring *tx_ring; 1490 struct rtw89_pci_rx_ring *rx_ring; 1491 int i; 1492 1493 if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n")) 1494 return; 1495 1496 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1497 tx_ring = &rtwpci->tx_rings[i]; 1498 tx_ring->bd_ring.addr.idx = low_power ? 1499 bd_idx_addr->tx_bd_addrs[i] : 1500 dma_addr_set->tx[i].idx; 1501 } 1502 1503 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1504 rx_ring = &rtwpci->rx_rings[i]; 1505 rx_ring->bd_ring.addr.idx = low_power ? 1506 bd_idx_addr->rx_bd_addrs[i] : 1507 dma_addr_set->rx[i].idx; 1508 } 1509 } 1510 1511 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power) 1512 { 1513 enum rtw89_pci_intr_mask_cfg cfg; 1514 1515 WARN(!rtwdev->hci.paused, "HCI isn't paused\n"); 1516 1517 cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL; 1518 rtw89_chip_config_intr_mask(rtwdev, cfg); 1519 rtw89_pci_switch_bd_idx_addr(rtwdev, low_power); 1520 } 1521 1522 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data); 1523 1524 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr) 1525 { 1526 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1527 u32 val = readl(rtwpci->mmap + addr); 1528 int count; 1529 1530 for (count = 0; ; count++) { 1531 if (val != RTW89_R32_DEAD) 1532 return val; 1533 if (count >= MAC_REG_POOL_COUNT) { 1534 rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val); 1535 return RTW89_R32_DEAD; 1536 } 1537 rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN); 1538 val = readl(rtwpci->mmap + addr); 1539 } 1540 1541 return val; 1542 } 1543 1544 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr) 1545 { 1546 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1547 u32 addr32, val32, shift; 1548 1549 if (!ACCESS_CMAC(addr)) 1550 return readb(rtwpci->mmap + addr); 1551 1552 addr32 = addr & ~0x3; 1553 shift = (addr & 0x3) * 8; 1554 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1555 return val32 >> shift; 1556 } 1557 1558 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr) 1559 { 1560 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1561 u32 addr32, val32, shift; 1562 1563 if (!ACCESS_CMAC(addr)) 1564 return readw(rtwpci->mmap + addr); 1565 1566 addr32 = addr & ~0x3; 1567 shift = (addr & 0x3) * 8; 1568 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1569 return val32 >> shift; 1570 } 1571 1572 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr) 1573 { 1574 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1575 1576 if (!ACCESS_CMAC(addr)) 1577 return readl(rtwpci->mmap + addr); 1578 1579 return rtw89_pci_ops_read32_cmac(rtwdev, addr); 1580 } 1581 1582 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) 1583 { 1584 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1585 1586 writeb(data, rtwpci->mmap + addr); 1587 } 1588 1589 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) 1590 { 1591 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1592 1593 writew(data, rtwpci->mmap + addr); 1594 } 1595 1596 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) 1597 { 1598 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1599 1600 writel(data, rtwpci->mmap + addr); 1601 } 1602 1603 static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable) 1604 { 1605 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1606 const struct rtw89_pci_info *info = rtwdev->pci_info; 1607 u32 txhci_en = info->txhci_en_bit; 1608 u32 rxhci_en = info->rxhci_en_bit; 1609 1610 if (enable) { 1611 if (chip_id != RTL8852C) 1612 rtw89_write32_clr(rtwdev, info->dma_stop1_reg, 1613 B_AX_STOP_PCIEIO); 1614 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 1615 txhci_en | rxhci_en); 1616 if (chip_id == RTL8852C) 1617 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 1618 B_AX_STOP_AXI_MST); 1619 } else { 1620 if (chip_id != RTL8852C) 1621 rtw89_write32_set(rtwdev, info->dma_stop1_reg, 1622 B_AX_STOP_PCIEIO); 1623 else 1624 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 1625 B_AX_STOP_AXI_MST); 1626 if (chip_id == RTL8852C) 1627 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 1628 B_AX_STOP_AXI_MST); 1629 } 1630 } 1631 1632 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit) 1633 { 1634 u16 val; 1635 1636 rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F); 1637 1638 val = rtw89_read16(rtwdev, R_AX_MDIO_CFG); 1639 switch (speed) { 1640 case PCIE_PHY_GEN1: 1641 if (addr < 0x20) 1642 val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK); 1643 else 1644 val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK); 1645 break; 1646 case PCIE_PHY_GEN2: 1647 if (addr < 0x20) 1648 val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK); 1649 else 1650 val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK); 1651 break; 1652 default: 1653 rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed); 1654 return -EINVAL; 1655 } 1656 rtw89_write16(rtwdev, R_AX_MDIO_CFG, val); 1657 rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit); 1658 1659 return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000, 1660 false, rtwdev, R_AX_MDIO_CFG); 1661 } 1662 1663 static int 1664 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val) 1665 { 1666 int ret; 1667 1668 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG); 1669 if (ret) { 1670 rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret); 1671 return ret; 1672 } 1673 *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA); 1674 1675 return 0; 1676 } 1677 1678 static int 1679 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed) 1680 { 1681 int ret; 1682 1683 rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data); 1684 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG); 1685 if (ret) { 1686 rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret); 1687 return ret; 1688 } 1689 1690 return 0; 1691 } 1692 1693 static int 1694 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed) 1695 { 1696 u32 shift; 1697 int ret; 1698 u16 val; 1699 1700 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1701 if (ret) 1702 return ret; 1703 1704 shift = __ffs(mask); 1705 val &= ~mask; 1706 val |= ((data << shift) & mask); 1707 1708 ret = rtw89_write16_mdio(rtwdev, addr, val, speed); 1709 if (ret) 1710 return ret; 1711 1712 return 0; 1713 } 1714 1715 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1716 { 1717 int ret; 1718 u16 val; 1719 1720 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1721 if (ret) 1722 return ret; 1723 ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed); 1724 if (ret) 1725 return ret; 1726 1727 return 0; 1728 } 1729 1730 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1731 { 1732 int ret; 1733 u16 val; 1734 1735 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1736 if (ret) 1737 return ret; 1738 ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed); 1739 if (ret) 1740 return ret; 1741 1742 return 0; 1743 } 1744 1745 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr, 1746 u8 data) 1747 { 1748 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1749 struct pci_dev *pdev = rtwpci->pdev; 1750 1751 return pci_write_config_byte(pdev, addr, data); 1752 } 1753 1754 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr, 1755 u8 *value) 1756 { 1757 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1758 struct pci_dev *pdev = rtwpci->pdev; 1759 1760 return pci_read_config_byte(pdev, addr, value); 1761 } 1762 1763 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr, 1764 u8 bit) 1765 { 1766 u8 value; 1767 int ret; 1768 1769 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 1770 if (ret) 1771 return ret; 1772 1773 value |= bit; 1774 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 1775 1776 return ret; 1777 } 1778 1779 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr, 1780 u8 bit) 1781 { 1782 u8 value; 1783 int ret; 1784 1785 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 1786 if (ret) 1787 return ret; 1788 1789 value &= ~bit; 1790 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 1791 1792 return ret; 1793 } 1794 1795 static int 1796 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate) 1797 { 1798 u16 val, tar; 1799 int ret; 1800 1801 /* Enable counter */ 1802 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val); 1803 if (ret) 1804 return ret; 1805 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 1806 phy_rate); 1807 if (ret) 1808 return ret; 1809 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN, 1810 phy_rate); 1811 if (ret) 1812 return ret; 1813 1814 fsleep(300); 1815 1816 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar); 1817 if (ret) 1818 return ret; 1819 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 1820 phy_rate); 1821 if (ret) 1822 return ret; 1823 1824 tar = tar & 0x0FFF; 1825 if (tar == 0 || tar == 0x0FFF) { 1826 rtw89_err(rtwdev, "[ERR]Get target failed.\n"); 1827 return -EINVAL; 1828 } 1829 1830 *target = tar; 1831 1832 return 0; 1833 } 1834 1835 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) 1836 { 1837 enum rtw89_pcie_phy phy_rate; 1838 u16 val16, mgn_set, div_set, tar; 1839 u8 val8, bdr_ori; 1840 bool l1_flag = false; 1841 int ret = 0; 1842 1843 if (rtwdev->chip->chip_id != RTL8852B) 1844 return 0; 1845 1846 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8); 1847 if (ret) { 1848 rtw89_err(rtwdev, "[ERR]pci config read %X\n", 1849 RTW89_PCIE_PHY_RATE); 1850 return ret; 1851 } 1852 1853 if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) { 1854 phy_rate = PCIE_PHY_GEN1; 1855 } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) { 1856 phy_rate = PCIE_PHY_GEN2; 1857 } else { 1858 rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8); 1859 return -EOPNOTSUPP; 1860 } 1861 /* Disable L1BD */ 1862 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori); 1863 if (ret) { 1864 rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL); 1865 return ret; 1866 } 1867 1868 if (bdr_ori & RTW89_PCIE_BIT_L1) { 1869 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 1870 bdr_ori & ~RTW89_PCIE_BIT_L1); 1871 if (ret) { 1872 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 1873 RTW89_PCIE_L1_CTRL); 1874 return ret; 1875 } 1876 l1_flag = true; 1877 } 1878 1879 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 1880 if (ret) { 1881 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 1882 goto end; 1883 } 1884 1885 if (val16 & B_AX_CALIB_EN) { 1886 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, 1887 val16 & ~B_AX_CALIB_EN, phy_rate); 1888 if (ret) { 1889 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1890 goto end; 1891 } 1892 } 1893 1894 if (!autook_en) 1895 goto end; 1896 /* Set div */ 1897 ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate); 1898 if (ret) { 1899 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1900 goto end; 1901 } 1902 1903 /* Obtain div and margin */ 1904 ret = __get_target(rtwdev, &tar, phy_rate); 1905 if (ret) { 1906 rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret); 1907 goto end; 1908 } 1909 1910 mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar; 1911 1912 if (mgn_set >= 128) { 1913 div_set = 0x0003; 1914 mgn_set = 0x000F; 1915 } else if (mgn_set >= 64) { 1916 div_set = 0x0003; 1917 mgn_set >>= 3; 1918 } else if (mgn_set >= 32) { 1919 div_set = 0x0002; 1920 mgn_set >>= 2; 1921 } else if (mgn_set >= 16) { 1922 div_set = 0x0001; 1923 mgn_set >>= 1; 1924 } else if (mgn_set == 0) { 1925 rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar); 1926 goto end; 1927 } else { 1928 div_set = 0x0000; 1929 } 1930 1931 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 1932 if (ret) { 1933 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 1934 goto end; 1935 } 1936 1937 val16 |= u16_encode_bits(div_set, B_AX_DIV); 1938 1939 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate); 1940 if (ret) { 1941 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1942 goto end; 1943 } 1944 1945 ret = __get_target(rtwdev, &tar, phy_rate); 1946 if (ret) { 1947 rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret); 1948 goto end; 1949 } 1950 1951 rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n", 1952 tar, div_set, mgn_set); 1953 ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1, 1954 (tar & 0x0FFF) | (mgn_set << 12), phy_rate); 1955 if (ret) { 1956 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1); 1957 goto end; 1958 } 1959 1960 /* Enable function */ 1961 ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate); 1962 if (ret) { 1963 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1964 goto end; 1965 } 1966 1967 /* CLK delay = 0 */ 1968 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 1969 PCIE_CLKDLY_HW_0); 1970 1971 end: 1972 /* Set L1BD to ori */ 1973 if (l1_flag) { 1974 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 1975 bdr_ori); 1976 if (ret) { 1977 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 1978 RTW89_PCIE_L1_CTRL); 1979 return ret; 1980 } 1981 } 1982 1983 return ret; 1984 } 1985 1986 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) 1987 { 1988 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1989 int ret; 1990 1991 if (chip_id == RTL8852A) { 1992 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 1993 PCIE_PHY_GEN1); 1994 if (ret) 1995 return ret; 1996 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 1997 PCIE_PHY_GEN2); 1998 if (ret) 1999 return ret; 2000 } else if (chip_id == RTL8852C) { 2001 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2, 2002 B_AX_DEGLITCH); 2003 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2, 2004 B_AX_DEGLITCH); 2005 } 2006 2007 return 0; 2008 } 2009 2010 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) 2011 { 2012 if (rtwdev->chip->chip_id != RTL8852A) 2013 return; 2014 2015 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE); 2016 } 2017 2018 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) 2019 { 2020 if (rtwdev->chip->chip_id != RTL8852A && rtwdev->chip->chip_id != RTL8852B) 2021 return; 2022 2023 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); 2024 } 2025 2026 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev) 2027 { 2028 int ret; 2029 2030 if (rtwdev->chip->chip_id != RTL8852A) 2031 return 0; 2032 2033 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2034 PCIE_PHY_GEN1); 2035 if (ret) 2036 return ret; 2037 2038 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2039 PCIE_PHY_GEN2); 2040 if (ret) 2041 return ret; 2042 2043 return 0; 2044 } 2045 2046 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) 2047 { 2048 if (rtwdev->chip->chip_id != RTL8852A) 2049 return; 2050 2051 rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN); 2052 } 2053 2054 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) 2055 { 2056 if (rtwdev->chip->chip_id == RTL8852A || 2057 rtwdev->chip->chip_id == RTL8852B) { 2058 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 2059 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2060 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2061 B_AX_PCIE_DIS_WLSUS_AFT_PDN); 2062 } else if (rtwdev->chip->chip_id == RTL8852C) { 2063 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2064 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2065 } 2066 } 2067 2068 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev) 2069 { 2070 if (rtwdev->chip->chip_id != RTL8852B) 2071 return 0; 2072 2073 return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK, 2074 PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1); 2075 } 2076 2077 static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up) 2078 { 2079 if (pwr_up) 2080 rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2081 else 2082 rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2083 } 2084 2085 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev) 2086 { 2087 if (rtwdev->chip->chip_id != RTL8852C) 2088 return; 2089 2090 rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2091 rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2092 } 2093 2094 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev) 2095 { 2096 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2097 return; 2098 2099 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT); 2100 } 2101 2102 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev) 2103 { 2104 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2105 return; 2106 2107 rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, 2108 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2109 rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3); 2110 rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, 2111 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2112 } 2113 2114 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev) 2115 { 2116 if (rtwdev->chip->chip_id != RTL8852C) 2117 return; 2118 2119 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1); 2120 } 2121 2122 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev) 2123 { 2124 if (rtwdev->chip->chip_id != RTL8852C) 2125 return; 2126 2127 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN); 2128 } 2129 2130 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) 2131 { 2132 if (rtwdev->chip->chip_id == RTL8852C) 2133 return; 2134 2135 rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL, 2136 B_AX_SIC_EN_FORCE_CLKREQ); 2137 } 2138 2139 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev) 2140 { 2141 const struct rtw89_pci_info *info = rtwdev->pci_info; 2142 u32 lbc; 2143 2144 if (rtwdev->chip->chip_id == RTL8852C) 2145 return; 2146 2147 lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); 2148 if (info->lbc_en == MAC_AX_PCIE_ENABLE) { 2149 lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER); 2150 lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; 2151 rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2152 } else { 2153 lbc &= ~B_AX_LBC_EN; 2154 } 2155 rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2156 } 2157 2158 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev) 2159 { 2160 const struct rtw89_pci_info *info = rtwdev->pci_info; 2161 u32 val32; 2162 2163 if (rtwdev->chip->chip_id != RTL8852C) 2164 return; 2165 2166 if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) { 2167 val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK, 2168 info->io_rcy_tmr); 2169 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32); 2170 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32); 2171 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32); 2172 2173 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2174 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2175 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2176 } else { 2177 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2178 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2179 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2180 } 2181 2182 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1); 2183 } 2184 2185 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) 2186 { 2187 if (rtwdev->chip->chip_id == RTL8852C) 2188 return; 2189 2190 rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL, 2191 B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG); 2192 2193 if (rtwdev->chip->chip_id == RTL8852A) 2194 rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL, 2195 B_AX_EN_CHKDSC_NO_RX_STUCK); 2196 } 2197 2198 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev) 2199 { 2200 if (rtwdev->chip->chip_id == RTL8852C) 2201 return; 2202 2203 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 2204 B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); 2205 } 2206 2207 static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev) 2208 { 2209 const struct rtw89_pci_info *info = rtwdev->pci_info; 2210 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2211 u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX | 2212 B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX | 2213 B_AX_CLR_CH12_IDX; 2214 u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg; 2215 u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg; 2216 2217 if (chip_id == RTL8852A || chip_id == RTL8852C) 2218 val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX | 2219 B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX; 2220 /* clear DMA indexes */ 2221 rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val); 2222 if (chip_id == RTL8852A || chip_id == RTL8852C) 2223 rtw89_write32_set(rtwdev, txbd_rwptr_clr2, 2224 B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX); 2225 rtw89_write32_set(rtwdev, rxbd_rwptr_clr, 2226 B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); 2227 } 2228 2229 static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev) 2230 { 2231 const struct rtw89_pci_info *info = rtwdev->pci_info; 2232 u32 ret, check, dma_busy; 2233 u32 dma_busy1 = info->dma_busy1_reg; 2234 u32 dma_busy2 = info->dma_busy2_reg; 2235 2236 check = B_AX_ACH0_BUSY | B_AX_ACH1_BUSY | B_AX_ACH2_BUSY | 2237 B_AX_ACH3_BUSY | B_AX_ACH4_BUSY | B_AX_ACH5_BUSY | 2238 B_AX_ACH6_BUSY | B_AX_ACH7_BUSY | B_AX_CH8_BUSY | 2239 B_AX_CH9_BUSY | B_AX_CH12_BUSY; 2240 2241 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2242 10, 100, false, rtwdev, dma_busy1); 2243 if (ret) 2244 return ret; 2245 2246 check = B_AX_CH10_BUSY | B_AX_CH11_BUSY; 2247 2248 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2249 10, 100, false, rtwdev, dma_busy2); 2250 if (ret) 2251 return ret; 2252 2253 return 0; 2254 } 2255 2256 static int rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev *rtwdev) 2257 { 2258 const struct rtw89_pci_info *info = rtwdev->pci_info; 2259 u32 ret, check, dma_busy; 2260 u32 dma_busy3 = info->dma_busy3_reg; 2261 2262 check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY; 2263 2264 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2265 10, 100, false, rtwdev, dma_busy3); 2266 if (ret) 2267 return ret; 2268 2269 return 0; 2270 } 2271 2272 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev) 2273 { 2274 u32 ret; 2275 2276 ret = rtw89_poll_txdma_ch_idle_pcie(rtwdev); 2277 if (ret) { 2278 rtw89_err(rtwdev, "txdma ch busy\n"); 2279 return ret; 2280 } 2281 2282 ret = rtw89_poll_rxdma_ch_idle_pcie(rtwdev); 2283 if (ret) { 2284 rtw89_err(rtwdev, "rxdma ch busy\n"); 2285 return ret; 2286 } 2287 2288 return 0; 2289 } 2290 2291 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) 2292 { 2293 const struct rtw89_pci_info *info = rtwdev->pci_info; 2294 enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode; 2295 enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode; 2296 enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode; 2297 enum mac_ax_tag_mode tag_mode = info->tag_mode; 2298 enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl; 2299 enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl; 2300 enum mac_ax_tx_burst tx_burst = info->tx_burst; 2301 enum mac_ax_rx_burst rx_burst = info->rx_burst; 2302 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2303 u8 cv = rtwdev->hal.cv; 2304 u32 val32; 2305 2306 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2307 if (chip_id == RTL8852A && cv == CHIP_CBV) 2308 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2309 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2310 if (chip_id == RTL8852A || chip_id == RTL8852B) 2311 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2312 } 2313 2314 if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) { 2315 if (chip_id == RTL8852A && cv == CHIP_CBV) 2316 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2317 } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) { 2318 if (chip_id == RTL8852A || chip_id == RTL8852B) 2319 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2320 } 2321 2322 if (rxbd_mode == MAC_AX_RXBD_PKT) { 2323 rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2324 } else if (rxbd_mode == MAC_AX_RXBD_SEP) { 2325 rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2326 2327 if (chip_id == RTL8852A || chip_id == RTL8852B) 2328 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, 2329 B_AX_PCIE_RX_APPLEN_MASK, 0); 2330 } 2331 2332 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2333 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst); 2334 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst); 2335 } else if (chip_id == RTL8852C) { 2336 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst); 2337 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst); 2338 } 2339 2340 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2341 if (tag_mode == MAC_AX_TAG_SGL) { 2342 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) & 2343 ~B_AX_LATENCY_CONTROL; 2344 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2345 } else if (tag_mode == MAC_AX_TAG_MULTI) { 2346 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | 2347 B_AX_LATENCY_CONTROL; 2348 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2349 } 2350 } 2351 2352 rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask, 2353 info->multi_tag_num); 2354 2355 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2356 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, 2357 wd_dma_idle_intvl); 2358 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, 2359 wd_dma_act_intvl); 2360 } else if (chip_id == RTL8852C) { 2361 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK, 2362 wd_dma_idle_intvl); 2363 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK, 2364 wd_dma_act_intvl); 2365 } 2366 2367 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2368 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2369 B_AX_HOST_ADDR_INFO_8B_SEL); 2370 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2371 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2372 rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2373 B_AX_HOST_ADDR_INFO_8B_SEL); 2374 rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2375 } 2376 2377 return 0; 2378 } 2379 2380 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) 2381 { 2382 const struct rtw89_pci_info *info = rtwdev->pci_info; 2383 2384 if (rtwdev->chip->chip_id == RTL8852A) { 2385 /* ltr sw trigger */ 2386 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE); 2387 } 2388 info->ltr_set(rtwdev, false); 2389 rtw89_pci_ctrl_dma_all(rtwdev, false); 2390 rtw89_pci_clr_idx_all(rtwdev); 2391 2392 return 0; 2393 } 2394 2395 static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev) 2396 { 2397 const struct rtw89_pci_info *info = rtwdev->pci_info; 2398 int ret; 2399 2400 rtw89_pci_rxdma_prefth(rtwdev); 2401 rtw89_pci_l1off_pwroff(rtwdev); 2402 rtw89_pci_deglitch_setting(rtwdev); 2403 ret = rtw89_pci_l2_rxen_lat(rtwdev); 2404 if (ret) { 2405 rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret); 2406 return ret; 2407 } 2408 2409 rtw89_pci_aphy_pwrcut(rtwdev); 2410 rtw89_pci_hci_ldo(rtwdev); 2411 rtw89_pci_dphy_delay(rtwdev); 2412 2413 ret = rtw89_pci_auto_refclk_cal(rtwdev, false); 2414 if (ret) { 2415 rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret); 2416 return ret; 2417 } 2418 2419 rtw89_pci_power_wake(rtwdev, true); 2420 rtw89_pci_autoload_hang(rtwdev); 2421 rtw89_pci_l12_vmain(rtwdev); 2422 rtw89_pci_gen2_force_ib(rtwdev); 2423 rtw89_pci_l1_ent_lat(rtwdev); 2424 rtw89_pci_wd_exit_l1(rtwdev); 2425 rtw89_pci_set_sic(rtwdev); 2426 rtw89_pci_set_lbc(rtwdev); 2427 rtw89_pci_set_io_rcy(rtwdev); 2428 rtw89_pci_set_dbg(rtwdev); 2429 rtw89_pci_set_keep_reg(rtwdev); 2430 2431 rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_STOP_WPDMA); 2432 2433 /* stop DMA activities */ 2434 rtw89_pci_ctrl_dma_all(rtwdev, false); 2435 2436 ret = rtw89_pci_poll_dma_all_idle(rtwdev); 2437 if (ret) { 2438 rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n"); 2439 return ret; 2440 } 2441 2442 rtw89_pci_clr_idx_all(rtwdev); 2443 rtw89_pci_mode_op(rtwdev); 2444 2445 /* fill TRX BD indexes */ 2446 rtw89_pci_ops_reset(rtwdev); 2447 2448 ret = rtw89_pci_rst_bdram_pcie(rtwdev); 2449 if (ret) { 2450 rtw89_warn(rtwdev, "reset bdram busy\n"); 2451 return ret; 2452 } 2453 2454 /* enable FW CMD queue to download firmware */ 2455 rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL); 2456 rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_STOP_CH12); 2457 rtw89_write32_set(rtwdev, info->dma_stop2_reg, B_AX_TX_STOP2_ALL); 2458 2459 /* start DMA activities */ 2460 rtw89_pci_ctrl_dma_all(rtwdev, true); 2461 2462 return 0; 2463 } 2464 2465 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en) 2466 { 2467 u32 val; 2468 2469 if (!en) 2470 return 0; 2471 2472 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2473 if (rtw89_pci_ltr_is_err_reg_val(val)) 2474 return -EINVAL; 2475 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2476 if (rtw89_pci_ltr_is_err_reg_val(val)) 2477 return -EINVAL; 2478 val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY); 2479 if (rtw89_pci_ltr_is_err_reg_val(val)) 2480 return -EINVAL; 2481 val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY); 2482 if (rtw89_pci_ltr_is_err_reg_val(val)) 2483 return -EINVAL; 2484 2485 rtw89_write32_clr(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN); 2486 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_EN); 2487 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK, 2488 PCI_LTR_SPC_500US); 2489 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2490 PCI_LTR_IDLE_TIMER_800US); 2491 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2492 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2493 rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x88e088e0); 2494 rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b); 2495 2496 return 0; 2497 } 2498 EXPORT_SYMBOL(rtw89_pci_ltr_set); 2499 2500 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en) 2501 { 2502 u32 dec_ctrl; 2503 u32 val32; 2504 2505 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2506 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2507 return -EINVAL; 2508 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2509 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2510 return -EINVAL; 2511 dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL); 2512 if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl)) 2513 return -EINVAL; 2514 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3); 2515 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2516 return -EINVAL; 2517 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0); 2518 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2519 return -EINVAL; 2520 2521 if (!en) { 2522 dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN); 2523 dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) | 2524 B_AX_LTR_REQ_DRV; 2525 } else { 2526 dec_ctrl |= B_AX_LTR_HW_DEC_EN; 2527 } 2528 2529 dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK; 2530 dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US); 2531 2532 if (en) 2533 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, 2534 B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN); 2535 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2536 PCI_LTR_IDLE_TIMER_3_2MS); 2537 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2538 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2539 rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl); 2540 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003); 2541 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b); 2542 2543 return 0; 2544 } 2545 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1); 2546 2547 static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev) 2548 { 2549 const struct rtw89_pci_info *info = rtwdev->pci_info; 2550 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2551 int ret; 2552 2553 ret = info->ltr_set(rtwdev, true); 2554 if (ret) { 2555 rtw89_err(rtwdev, "pci ltr set fail\n"); 2556 return ret; 2557 } 2558 if (chip_id == RTL8852A) { 2559 /* ltr sw trigger */ 2560 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); 2561 } 2562 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2563 /* ADDR info 8-byte mode */ 2564 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2565 B_AX_HOST_ADDR_INFO_8B_SEL); 2566 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2567 } 2568 2569 /* enable DMA for all queues */ 2570 rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL); 2571 rtw89_write32_clr(rtwdev, info->dma_stop2_reg, B_AX_TX_STOP2_ALL); 2572 2573 /* Release PCI IO */ 2574 rtw89_write32_clr(rtwdev, info->dma_stop1_reg, 2575 B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO); 2576 2577 return 0; 2578 } 2579 2580 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev, 2581 struct pci_dev *pdev) 2582 { 2583 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2584 int ret; 2585 2586 ret = pci_enable_device(pdev); 2587 if (ret) { 2588 rtw89_err(rtwdev, "failed to enable pci device\n"); 2589 return ret; 2590 } 2591 2592 pci_set_master(pdev); 2593 pci_set_drvdata(pdev, rtwdev->hw); 2594 2595 rtwpci->pdev = pdev; 2596 2597 return 0; 2598 } 2599 2600 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, 2601 struct pci_dev *pdev) 2602 { 2603 pci_clear_master(pdev); 2604 pci_disable_device(pdev); 2605 } 2606 2607 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, 2608 struct pci_dev *pdev) 2609 { 2610 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2611 unsigned long resource_len; 2612 u8 bar_id = 2; 2613 int ret; 2614 2615 ret = pci_request_regions(pdev, KBUILD_MODNAME); 2616 if (ret) { 2617 rtw89_err(rtwdev, "failed to request pci regions\n"); 2618 goto err; 2619 } 2620 2621 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 2622 if (ret) { 2623 rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n"); 2624 goto err_release_regions; 2625 } 2626 2627 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 2628 if (ret) { 2629 rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n"); 2630 goto err_release_regions; 2631 } 2632 2633 resource_len = pci_resource_len(pdev, bar_id); 2634 rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len); 2635 if (!rtwpci->mmap) { 2636 rtw89_err(rtwdev, "failed to map pci io\n"); 2637 ret = -EIO; 2638 goto err_release_regions; 2639 } 2640 2641 return 0; 2642 2643 err_release_regions: 2644 pci_release_regions(pdev); 2645 err: 2646 return ret; 2647 } 2648 2649 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev, 2650 struct pci_dev *pdev) 2651 { 2652 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2653 2654 if (rtwpci->mmap) { 2655 pci_iounmap(pdev, rtwpci->mmap); 2656 pci_release_regions(pdev); 2657 } 2658 } 2659 2660 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev, 2661 struct pci_dev *pdev, 2662 struct rtw89_pci_tx_ring *tx_ring) 2663 { 2664 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 2665 u8 *head = wd_ring->head; 2666 dma_addr_t dma = wd_ring->dma; 2667 u32 page_size = wd_ring->page_size; 2668 u32 page_num = wd_ring->page_num; 2669 u32 ring_sz = page_size * page_num; 2670 2671 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2672 wd_ring->head = NULL; 2673 } 2674 2675 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev, 2676 struct pci_dev *pdev, 2677 struct rtw89_pci_tx_ring *tx_ring) 2678 { 2679 int ring_sz; 2680 u8 *head; 2681 dma_addr_t dma; 2682 2683 head = tx_ring->bd_ring.head; 2684 dma = tx_ring->bd_ring.dma; 2685 ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len; 2686 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2687 2688 tx_ring->bd_ring.head = NULL; 2689 } 2690 2691 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev, 2692 struct pci_dev *pdev) 2693 { 2694 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2695 struct rtw89_pci_tx_ring *tx_ring; 2696 int i; 2697 2698 for (i = 0; i < RTW89_TXCH_NUM; i++) { 2699 tx_ring = &rtwpci->tx_rings[i]; 2700 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 2701 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 2702 } 2703 } 2704 2705 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev, 2706 struct pci_dev *pdev, 2707 struct rtw89_pci_rx_ring *rx_ring) 2708 { 2709 struct rtw89_pci_rx_info *rx_info; 2710 struct sk_buff *skb; 2711 dma_addr_t dma; 2712 u32 buf_sz; 2713 u8 *head; 2714 int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len; 2715 int i; 2716 2717 buf_sz = rx_ring->buf_sz; 2718 for (i = 0; i < rx_ring->bd_ring.len; i++) { 2719 skb = rx_ring->buf[i]; 2720 if (!skb) 2721 continue; 2722 2723 rx_info = RTW89_PCI_RX_SKB_CB(skb); 2724 dma = rx_info->dma; 2725 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 2726 dev_kfree_skb(skb); 2727 rx_ring->buf[i] = NULL; 2728 } 2729 2730 head = rx_ring->bd_ring.head; 2731 dma = rx_ring->bd_ring.dma; 2732 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2733 2734 rx_ring->bd_ring.head = NULL; 2735 } 2736 2737 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev, 2738 struct pci_dev *pdev) 2739 { 2740 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2741 struct rtw89_pci_rx_ring *rx_ring; 2742 int i; 2743 2744 for (i = 0; i < RTW89_RXCH_NUM; i++) { 2745 rx_ring = &rtwpci->rx_rings[i]; 2746 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 2747 } 2748 } 2749 2750 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev, 2751 struct pci_dev *pdev) 2752 { 2753 rtw89_pci_free_rx_rings(rtwdev, pdev); 2754 rtw89_pci_free_tx_rings(rtwdev, pdev); 2755 } 2756 2757 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev, 2758 struct rtw89_pci_rx_ring *rx_ring, 2759 struct sk_buff *skb, int buf_sz, u32 idx) 2760 { 2761 struct rtw89_pci_rx_info *rx_info; 2762 struct rtw89_pci_rx_bd_32 *rx_bd; 2763 dma_addr_t dma; 2764 2765 if (!skb) 2766 return -EINVAL; 2767 2768 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); 2769 if (dma_mapping_error(&pdev->dev, dma)) 2770 return -EBUSY; 2771 2772 rx_info = RTW89_PCI_RX_SKB_CB(skb); 2773 rx_bd = RTW89_PCI_RX_BD(rx_ring, idx); 2774 2775 memset(rx_bd, 0, sizeof(*rx_bd)); 2776 rx_bd->buf_size = cpu_to_le16(buf_sz); 2777 rx_bd->dma = cpu_to_le32(dma); 2778 rx_info->dma = dma; 2779 2780 return 0; 2781 } 2782 2783 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev, 2784 struct pci_dev *pdev, 2785 struct rtw89_pci_tx_ring *tx_ring, 2786 enum rtw89_tx_channel txch) 2787 { 2788 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 2789 struct rtw89_pci_tx_wd *txwd; 2790 dma_addr_t dma; 2791 dma_addr_t cur_paddr; 2792 u8 *head; 2793 u8 *cur_vaddr; 2794 u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE; 2795 u32 page_num = RTW89_PCI_TXWD_NUM_MAX; 2796 u32 ring_sz = page_size * page_num; 2797 u32 page_offset; 2798 int i; 2799 2800 /* FWCMD queue doesn't use txwd as pages */ 2801 if (txch == RTW89_TXCH_CH12) 2802 return 0; 2803 2804 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2805 if (!head) 2806 return -ENOMEM; 2807 2808 INIT_LIST_HEAD(&wd_ring->free_pages); 2809 wd_ring->head = head; 2810 wd_ring->dma = dma; 2811 wd_ring->page_size = page_size; 2812 wd_ring->page_num = page_num; 2813 2814 page_offset = 0; 2815 for (i = 0; i < page_num; i++) { 2816 txwd = &wd_ring->pages[i]; 2817 cur_paddr = dma + page_offset; 2818 cur_vaddr = head + page_offset; 2819 2820 skb_queue_head_init(&txwd->queue); 2821 INIT_LIST_HEAD(&txwd->list); 2822 txwd->paddr = cur_paddr; 2823 txwd->vaddr = cur_vaddr; 2824 txwd->len = page_size; 2825 txwd->seq = i; 2826 rtw89_pci_enqueue_txwd(tx_ring, txwd); 2827 2828 page_offset += page_size; 2829 } 2830 2831 return 0; 2832 } 2833 2834 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev, 2835 struct pci_dev *pdev, 2836 struct rtw89_pci_tx_ring *tx_ring, 2837 u32 desc_size, u32 len, 2838 enum rtw89_tx_channel txch) 2839 { 2840 const struct rtw89_pci_ch_dma_addr *txch_addr; 2841 int ring_sz = desc_size * len; 2842 u8 *head; 2843 dma_addr_t dma; 2844 int ret; 2845 2846 ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch); 2847 if (ret) { 2848 rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch); 2849 goto err; 2850 } 2851 2852 ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr); 2853 if (ret) { 2854 rtw89_err(rtwdev, "failed to get address of txch %d", txch); 2855 goto err_free_wd_ring; 2856 } 2857 2858 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2859 if (!head) { 2860 ret = -ENOMEM; 2861 goto err_free_wd_ring; 2862 } 2863 2864 INIT_LIST_HEAD(&tx_ring->busy_pages); 2865 tx_ring->bd_ring.head = head; 2866 tx_ring->bd_ring.dma = dma; 2867 tx_ring->bd_ring.len = len; 2868 tx_ring->bd_ring.desc_size = desc_size; 2869 tx_ring->bd_ring.addr = *txch_addr; 2870 tx_ring->bd_ring.wp = 0; 2871 tx_ring->bd_ring.rp = 0; 2872 tx_ring->txch = txch; 2873 2874 return 0; 2875 2876 err_free_wd_ring: 2877 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 2878 err: 2879 return ret; 2880 } 2881 2882 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev, 2883 struct pci_dev *pdev) 2884 { 2885 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2886 struct rtw89_pci_tx_ring *tx_ring; 2887 u32 desc_size; 2888 u32 len; 2889 u32 i, tx_allocated; 2890 int ret; 2891 2892 for (i = 0; i < RTW89_TXCH_NUM; i++) { 2893 tx_ring = &rtwpci->tx_rings[i]; 2894 desc_size = sizeof(struct rtw89_pci_tx_bd_32); 2895 len = RTW89_PCI_TXBD_NUM_MAX; 2896 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring, 2897 desc_size, len, i); 2898 if (ret) { 2899 rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i); 2900 goto err_free; 2901 } 2902 } 2903 2904 return 0; 2905 2906 err_free: 2907 tx_allocated = i; 2908 for (i = 0; i < tx_allocated; i++) { 2909 tx_ring = &rtwpci->tx_rings[i]; 2910 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 2911 } 2912 2913 return ret; 2914 } 2915 2916 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev, 2917 struct pci_dev *pdev, 2918 struct rtw89_pci_rx_ring *rx_ring, 2919 u32 desc_size, u32 len, u32 rxch) 2920 { 2921 const struct rtw89_pci_ch_dma_addr *rxch_addr; 2922 struct sk_buff *skb; 2923 u8 *head; 2924 dma_addr_t dma; 2925 int ring_sz = desc_size * len; 2926 int buf_sz = RTW89_PCI_RX_BUF_SIZE; 2927 int i, allocated; 2928 int ret; 2929 2930 ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr); 2931 if (ret) { 2932 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch); 2933 return ret; 2934 } 2935 2936 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2937 if (!head) { 2938 ret = -ENOMEM; 2939 goto err; 2940 } 2941 2942 rx_ring->bd_ring.head = head; 2943 rx_ring->bd_ring.dma = dma; 2944 rx_ring->bd_ring.len = len; 2945 rx_ring->bd_ring.desc_size = desc_size; 2946 rx_ring->bd_ring.addr = *rxch_addr; 2947 rx_ring->bd_ring.wp = 0; 2948 rx_ring->bd_ring.rp = 0; 2949 rx_ring->buf_sz = buf_sz; 2950 rx_ring->diliver_skb = NULL; 2951 rx_ring->diliver_desc.ready = false; 2952 2953 for (i = 0; i < len; i++) { 2954 skb = dev_alloc_skb(buf_sz); 2955 if (!skb) { 2956 ret = -ENOMEM; 2957 goto err_free; 2958 } 2959 2960 memset(skb->data, 0, buf_sz); 2961 rx_ring->buf[i] = skb; 2962 ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb, 2963 buf_sz, i); 2964 if (ret) { 2965 rtw89_err(rtwdev, "failed to init rx buf %d\n", i); 2966 dev_kfree_skb_any(skb); 2967 rx_ring->buf[i] = NULL; 2968 goto err_free; 2969 } 2970 } 2971 2972 return 0; 2973 2974 err_free: 2975 allocated = i; 2976 for (i = 0; i < allocated; i++) { 2977 skb = rx_ring->buf[i]; 2978 if (!skb) 2979 continue; 2980 dma = *((dma_addr_t *)skb->cb); 2981 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 2982 dev_kfree_skb(skb); 2983 rx_ring->buf[i] = NULL; 2984 } 2985 2986 head = rx_ring->bd_ring.head; 2987 dma = rx_ring->bd_ring.dma; 2988 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2989 2990 rx_ring->bd_ring.head = NULL; 2991 err: 2992 return ret; 2993 } 2994 2995 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev, 2996 struct pci_dev *pdev) 2997 { 2998 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2999 struct rtw89_pci_rx_ring *rx_ring; 3000 u32 desc_size; 3001 u32 len; 3002 int i, rx_allocated; 3003 int ret; 3004 3005 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3006 rx_ring = &rtwpci->rx_rings[i]; 3007 desc_size = sizeof(struct rtw89_pci_rx_bd_32); 3008 len = RTW89_PCI_RXBD_NUM_MAX; 3009 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring, 3010 desc_size, len, i); 3011 if (ret) { 3012 rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i); 3013 goto err_free; 3014 } 3015 } 3016 3017 return 0; 3018 3019 err_free: 3020 rx_allocated = i; 3021 for (i = 0; i < rx_allocated; i++) { 3022 rx_ring = &rtwpci->rx_rings[i]; 3023 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3024 } 3025 3026 return ret; 3027 } 3028 3029 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev, 3030 struct pci_dev *pdev) 3031 { 3032 int ret; 3033 3034 ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev); 3035 if (ret) { 3036 rtw89_err(rtwdev, "failed to alloc dma tx rings\n"); 3037 goto err; 3038 } 3039 3040 ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev); 3041 if (ret) { 3042 rtw89_err(rtwdev, "failed to alloc dma rx rings\n"); 3043 goto err_free_tx_rings; 3044 } 3045 3046 return 0; 3047 3048 err_free_tx_rings: 3049 rtw89_pci_free_tx_rings(rtwdev, pdev); 3050 err: 3051 return ret; 3052 } 3053 3054 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev, 3055 struct rtw89_pci *rtwpci) 3056 { 3057 skb_queue_head_init(&rtwpci->h2c_queue); 3058 skb_queue_head_init(&rtwpci->h2c_release_queue); 3059 } 3060 3061 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev, 3062 struct pci_dev *pdev) 3063 { 3064 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3065 int ret; 3066 3067 ret = rtw89_pci_setup_mapping(rtwdev, pdev); 3068 if (ret) { 3069 rtw89_err(rtwdev, "failed to setup pci mapping\n"); 3070 goto err; 3071 } 3072 3073 ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev); 3074 if (ret) { 3075 rtw89_err(rtwdev, "failed to alloc pci trx rings\n"); 3076 goto err_pci_unmap; 3077 } 3078 3079 rtw89_pci_h2c_init(rtwdev, rtwpci); 3080 3081 spin_lock_init(&rtwpci->irq_lock); 3082 spin_lock_init(&rtwpci->trx_lock); 3083 3084 return 0; 3085 3086 err_pci_unmap: 3087 rtw89_pci_clear_mapping(rtwdev, pdev); 3088 err: 3089 return ret; 3090 } 3091 3092 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev, 3093 struct pci_dev *pdev) 3094 { 3095 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3096 3097 rtw89_pci_free_trx_rings(rtwdev, pdev); 3098 rtw89_pci_clear_mapping(rtwdev, pdev); 3099 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 3100 skb_queue_len(&rtwpci->h2c_queue), true); 3101 } 3102 3103 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev) 3104 { 3105 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3106 3107 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0; 3108 3109 if (rtwpci->under_recovery) { 3110 rtwpci->intrs[0] = 0; 3111 rtwpci->intrs[1] = 0; 3112 } else { 3113 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3114 B_AX_RXDMA_INT_EN | 3115 B_AX_RXP1DMA_INT_EN | 3116 B_AX_RPQDMA_INT_EN | 3117 B_AX_RXDMA_STUCK_INT_EN | 3118 B_AX_RDU_INT_EN | 3119 B_AX_RPQBD_FULL_INT_EN | 3120 B_AX_HS0ISR_IND_INT_EN; 3121 3122 rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN; 3123 } 3124 } 3125 EXPORT_SYMBOL(rtw89_pci_config_intr_mask); 3126 3127 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev) 3128 { 3129 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3130 3131 rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN; 3132 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3133 rtwpci->intrs[0] = 0; 3134 rtwpci->intrs[1] = 0; 3135 } 3136 3137 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev) 3138 { 3139 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3140 3141 rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN | 3142 B_AX_HS1ISR_IND_INT_EN | 3143 B_AX_HS0ISR_IND_INT_EN; 3144 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3145 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3146 B_AX_RXDMA_INT_EN | 3147 B_AX_RXP1DMA_INT_EN | 3148 B_AX_RPQDMA_INT_EN | 3149 B_AX_RXDMA_STUCK_INT_EN | 3150 B_AX_RDU_INT_EN | 3151 B_AX_RPQBD_FULL_INT_EN; 3152 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3153 } 3154 3155 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev) 3156 { 3157 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3158 3159 rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN | 3160 B_AX_HS0ISR_IND_INT_EN; 3161 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3162 rtwpci->intrs[0] = 0; 3163 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3164 } 3165 3166 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev) 3167 { 3168 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3169 3170 if (rtwpci->under_recovery) 3171 rtw89_pci_recovery_intr_mask_v1(rtwdev); 3172 else if (rtwpci->low_power) 3173 rtw89_pci_low_power_intr_mask_v1(rtwdev); 3174 else 3175 rtw89_pci_default_intr_mask_v1(rtwdev); 3176 } 3177 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1); 3178 3179 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, 3180 struct pci_dev *pdev) 3181 { 3182 unsigned long flags = 0; 3183 int ret; 3184 3185 flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI; 3186 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); 3187 if (ret < 0) { 3188 rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret); 3189 goto err; 3190 } 3191 3192 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, 3193 rtw89_pci_interrupt_handler, 3194 rtw89_pci_interrupt_threadfn, 3195 IRQF_SHARED, KBUILD_MODNAME, rtwdev); 3196 if (ret) { 3197 rtw89_err(rtwdev, "failed to request threaded irq\n"); 3198 goto err_free_vector; 3199 } 3200 3201 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET); 3202 3203 return 0; 3204 3205 err_free_vector: 3206 pci_free_irq_vectors(pdev); 3207 err: 3208 return ret; 3209 } 3210 3211 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev, 3212 struct pci_dev *pdev) 3213 { 3214 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); 3215 pci_free_irq_vectors(pdev); 3216 } 3217 3218 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable) 3219 { 3220 int ret; 3221 3222 if (rtw89_pci_disable_clkreq) 3223 return; 3224 3225 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 3226 PCIE_CLKDLY_HW_30US); 3227 if (ret) 3228 rtw89_err(rtwdev, "failed to set CLKREQ Delay\n"); 3229 3230 if (enable) 3231 ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, 3232 RTW89_PCIE_BIT_CLK); 3233 else 3234 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1_CTRL, 3235 RTW89_PCIE_BIT_CLK); 3236 if (ret) 3237 rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d", 3238 enable ? "set" : "unset", ret); 3239 } 3240 3241 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable) 3242 { 3243 u8 value = 0; 3244 int ret; 3245 3246 if (rtw89_pci_disable_aspm_l1) 3247 return; 3248 3249 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value); 3250 if (ret) 3251 rtw89_err(rtwdev, "failed to read ASPM Delay\n"); 3252 3253 value &= ~(RTW89_L1DLY_MASK | RTW89_L0DLY_MASK); 3254 value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) | 3255 FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US); 3256 3257 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value); 3258 if (ret) 3259 rtw89_err(rtwdev, "failed to read ASPM Delay\n"); 3260 3261 if (enable) 3262 ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, 3263 RTW89_PCIE_BIT_L1); 3264 else 3265 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1_CTRL, 3266 RTW89_PCIE_BIT_L1); 3267 if (ret) 3268 rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d", 3269 enable ? "set" : "unset", ret); 3270 } 3271 3272 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev) 3273 { 3274 struct rtw89_traffic_stats *stats = &rtwdev->stats; 3275 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; 3276 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; 3277 u32 val = 0; 3278 3279 if (!rtwdev->scanning && 3280 (tx_tfc_lv >= RTW89_TFC_HIGH || rx_tfc_lv >= RTW89_TFC_HIGH)) 3281 val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL | 3282 FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) | 3283 FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) | 3284 FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64); 3285 3286 rtw89_write32(rtwdev, R_AX_INT_MIT_RX, val); 3287 } 3288 3289 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev) 3290 { 3291 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3292 struct pci_dev *pdev = rtwpci->pdev; 3293 u16 link_ctrl; 3294 int ret; 3295 3296 /* Though there is standard PCIE configuration space to set the 3297 * link control register, but by Realtek's design, driver should 3298 * check if host supports CLKREQ/ASPM to enable the HW module. 3299 * 3300 * These functions are implemented by two HW modules associated, 3301 * one is responsible to access PCIE configuration space to 3302 * follow the host settings, and another is in charge of doing 3303 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes 3304 * the host does not support it, and due to some reasons or wrong 3305 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device 3306 * loss if HW misbehaves on the link. 3307 * 3308 * Hence it's designed that driver should first check the PCIE 3309 * configuration space is sync'ed and enabled, then driver can turn 3310 * on the other module that is actually working on the mechanism. 3311 */ 3312 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); 3313 if (ret) { 3314 rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); 3315 return; 3316 } 3317 3318 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) 3319 rtw89_pci_clkreq_set(rtwdev, true); 3320 3321 if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) 3322 rtw89_pci_aspm_set(rtwdev, true); 3323 } 3324 3325 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable) 3326 { 3327 int ret; 3328 3329 if (enable) 3330 ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_TIMER_CTRL, 3331 RTW89_PCIE_BIT_L1SUB); 3332 else 3333 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_TIMER_CTRL, 3334 RTW89_PCIE_BIT_L1SUB); 3335 if (ret) 3336 rtw89_err(rtwdev, "failed to %s L1SS, ret=%d", 3337 enable ? "set" : "unset", ret); 3338 } 3339 3340 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev) 3341 { 3342 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3343 struct pci_dev *pdev = rtwpci->pdev; 3344 u32 l1ss_cap_ptr, l1ss_ctrl; 3345 3346 if (rtw89_pci_disable_l1ss) 3347 return; 3348 3349 l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); 3350 if (!l1ss_cap_ptr) 3351 return; 3352 3353 pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl); 3354 3355 if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK) 3356 rtw89_pci_l1ss_set(rtwdev, true); 3357 } 3358 3359 static void rtw89_pci_ctrl_dma_all_pcie(struct rtw89_dev *rtwdev, u8 en) 3360 { 3361 const struct rtw89_pci_info *info = rtwdev->pci_info; 3362 u32 val32; 3363 3364 if (en == MAC_AX_FUNC_EN) { 3365 val32 = B_AX_STOP_PCIEIO; 3366 rtw89_write32_clr(rtwdev, info->dma_stop1_reg, val32); 3367 3368 val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN; 3369 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 3370 } else { 3371 val32 = B_AX_STOP_PCIEIO; 3372 rtw89_write32_set(rtwdev, info->dma_stop1_reg, val32); 3373 3374 val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN; 3375 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 3376 } 3377 } 3378 3379 static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev) 3380 { 3381 int ret = 0; 3382 u32 sts; 3383 u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; 3384 3385 ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0, 3386 10, 1000, false, rtwdev, 3387 R_AX_PCIE_DMA_BUSY1); 3388 if (ret) { 3389 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n", 3390 rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1)); 3391 return -EINVAL; 3392 } 3393 return ret; 3394 } 3395 3396 static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev) 3397 { 3398 u32 val, dma_rst = 0; 3399 int ret; 3400 3401 rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_DIS); 3402 ret = rtw89_pci_poll_io_idle(rtwdev); 3403 if (ret) { 3404 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 3405 rtw89_debug(rtwdev, RTW89_DBG_HCI, 3406 "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n", 3407 R_AX_DBG_ERR_FLAG, val); 3408 if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0) 3409 dma_rst |= B_AX_HCI_TXDMA_EN; 3410 if (val & B_AX_RX_STUCK) 3411 dma_rst |= B_AX_HCI_RXDMA_EN; 3412 val = rtw89_read32(rtwdev, R_AX_HCI_FUNC_EN); 3413 rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val & ~dma_rst); 3414 rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val | dma_rst); 3415 ret = rtw89_pci_poll_io_idle(rtwdev); 3416 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 3417 rtw89_debug(rtwdev, RTW89_DBG_HCI, 3418 "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n", 3419 R_AX_DBG_ERR_FLAG, val); 3420 } 3421 3422 return ret; 3423 } 3424 3425 static void rtw89_pci_ctrl_hci_dma_en(struct rtw89_dev *rtwdev, u8 en) 3426 { 3427 u32 val32; 3428 3429 if (en == MAC_AX_FUNC_EN) { 3430 val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN; 3431 rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN, val32); 3432 } else { 3433 val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN; 3434 rtw89_write32_clr(rtwdev, R_AX_HCI_FUNC_EN, val32); 3435 } 3436 } 3437 3438 static int rtw89_pci_rst_bdram(struct rtw89_dev *rtwdev) 3439 { 3440 int ret = 0; 3441 u32 val32, sts; 3442 3443 val32 = B_AX_RST_BDRAM; 3444 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 3445 3446 ret = read_poll_timeout_atomic(rtw89_read32, sts, 3447 (sts & B_AX_RST_BDRAM) == 0x0, 1, 100, 3448 true, rtwdev, R_AX_PCIE_INIT_CFG1); 3449 return ret; 3450 } 3451 3452 static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev) 3453 { 3454 u32 ret; 3455 3456 rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_DIS); 3457 rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_EN); 3458 rtw89_pci_clr_idx_all(rtwdev); 3459 3460 ret = rtw89_pci_rst_bdram(rtwdev); 3461 if (ret) 3462 return ret; 3463 3464 rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_EN); 3465 return ret; 3466 } 3467 3468 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev, 3469 enum rtw89_lv1_rcvy_step step) 3470 { 3471 int ret; 3472 3473 switch (step) { 3474 case RTW89_LV1_RCVY_STEP_1: 3475 ret = rtw89_pci_lv1rst_stop_dma(rtwdev); 3476 if (ret) 3477 rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n"); 3478 3479 break; 3480 3481 case RTW89_LV1_RCVY_STEP_2: 3482 ret = rtw89_pci_lv1rst_start_dma(rtwdev); 3483 if (ret) 3484 rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n"); 3485 break; 3486 3487 default: 3488 return -EINVAL; 3489 } 3490 3491 return ret; 3492 } 3493 3494 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev) 3495 { 3496 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n", 3497 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); 3498 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 3499 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG)); 3500 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 3501 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG)); 3502 } 3503 3504 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget) 3505 { 3506 struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi); 3507 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3508 unsigned long flags; 3509 int work_done; 3510 3511 rtwdev->napi_budget_countdown = budget; 3512 3513 rtw89_pci_clear_isr0(rtwdev, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT); 3514 work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 3515 if (work_done == budget) 3516 return budget; 3517 3518 rtw89_pci_clear_isr0(rtwdev, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | B_AX_RDU_INT); 3519 work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 3520 if (work_done < budget && napi_complete_done(napi, work_done)) { 3521 spin_lock_irqsave(&rtwpci->irq_lock, flags); 3522 if (likely(rtwpci->running)) 3523 rtw89_chip_enable_intr(rtwdev, rtwpci); 3524 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 3525 } 3526 3527 return work_done; 3528 } 3529 3530 static int __maybe_unused rtw89_pci_suspend(struct device *dev) 3531 { 3532 struct ieee80211_hw *hw = dev_get_drvdata(dev); 3533 struct rtw89_dev *rtwdev = hw->priv; 3534 3535 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 3536 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 3537 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3538 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 3539 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3540 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 3541 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 3542 3543 return 0; 3544 } 3545 3546 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev) 3547 { 3548 if (rtwdev->chip->chip_id == RTL8852C) 3549 return; 3550 3551 /* Hardware need write the reg twice to ensure the setting work */ 3552 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 3553 RTW89_PCIE_BIT_CFG_RST_MSTATE); 3554 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 3555 RTW89_PCIE_BIT_CFG_RST_MSTATE); 3556 } 3557 3558 static int __maybe_unused rtw89_pci_resume(struct device *dev) 3559 { 3560 struct ieee80211_hw *hw = dev_get_drvdata(dev); 3561 struct rtw89_dev *rtwdev = hw->priv; 3562 3563 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 3564 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 3565 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3566 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 3567 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3568 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 3569 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 3570 rtw89_pci_l2_hci_ldo(rtwdev); 3571 rtw89_pci_link_cfg(rtwdev); 3572 rtw89_pci_l1ss_cfg(rtwdev); 3573 3574 return 0; 3575 } 3576 3577 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume); 3578 EXPORT_SYMBOL(rtw89_pm_ops); 3579 3580 static const struct rtw89_hci_ops rtw89_pci_ops = { 3581 .tx_write = rtw89_pci_ops_tx_write, 3582 .tx_kick_off = rtw89_pci_ops_tx_kick_off, 3583 .flush_queues = rtw89_pci_ops_flush_queues, 3584 .reset = rtw89_pci_ops_reset, 3585 .start = rtw89_pci_ops_start, 3586 .stop = rtw89_pci_ops_stop, 3587 .pause = rtw89_pci_ops_pause, 3588 .switch_mode = rtw89_pci_ops_switch_mode, 3589 .recalc_int_mit = rtw89_pci_recalc_int_mit, 3590 3591 .read8 = rtw89_pci_ops_read8, 3592 .read16 = rtw89_pci_ops_read16, 3593 .read32 = rtw89_pci_ops_read32, 3594 .write8 = rtw89_pci_ops_write8, 3595 .write16 = rtw89_pci_ops_write16, 3596 .write32 = rtw89_pci_ops_write32, 3597 3598 .mac_pre_init = rtw89_pci_ops_mac_pre_init, 3599 .mac_post_init = rtw89_pci_ops_mac_post_init, 3600 .deinit = rtw89_pci_ops_deinit, 3601 3602 .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource, 3603 .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery, 3604 .dump_err_status = rtw89_pci_ops_dump_err_status, 3605 .napi_poll = rtw89_pci_napi_poll, 3606 3607 .recovery_start = rtw89_pci_ops_recovery_start, 3608 .recovery_complete = rtw89_pci_ops_recovery_complete, 3609 }; 3610 3611 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 3612 { 3613 struct ieee80211_hw *hw; 3614 struct rtw89_dev *rtwdev; 3615 const struct rtw89_driver_info *info; 3616 const struct rtw89_pci_info *pci_info; 3617 int driver_data_size; 3618 int ret; 3619 3620 driver_data_size = sizeof(struct rtw89_dev) + sizeof(struct rtw89_pci); 3621 hw = ieee80211_alloc_hw(driver_data_size, &rtw89_ops); 3622 if (!hw) { 3623 dev_err(&pdev->dev, "failed to allocate hw\n"); 3624 return -ENOMEM; 3625 } 3626 3627 info = (const struct rtw89_driver_info *)id->driver_data; 3628 pci_info = info->bus.pci; 3629 3630 rtwdev = hw->priv; 3631 rtwdev->hw = hw; 3632 rtwdev->dev = &pdev->dev; 3633 rtwdev->chip = info->chip; 3634 rtwdev->pci_info = info->bus.pci; 3635 rtwdev->hci.ops = &rtw89_pci_ops; 3636 rtwdev->hci.type = RTW89_HCI_TYPE_PCIE; 3637 rtwdev->hci.rpwm_addr = pci_info->rpwm_addr; 3638 rtwdev->hci.cpwm_addr = pci_info->cpwm_addr; 3639 3640 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); 3641 3642 ret = rtw89_core_init(rtwdev); 3643 if (ret) { 3644 rtw89_err(rtwdev, "failed to initialise core\n"); 3645 goto err_release_hw; 3646 } 3647 3648 ret = rtw89_pci_claim_device(rtwdev, pdev); 3649 if (ret) { 3650 rtw89_err(rtwdev, "failed to claim pci device\n"); 3651 goto err_core_deinit; 3652 } 3653 3654 ret = rtw89_pci_setup_resource(rtwdev, pdev); 3655 if (ret) { 3656 rtw89_err(rtwdev, "failed to setup pci resource\n"); 3657 goto err_declaim_pci; 3658 } 3659 3660 ret = rtw89_chip_info_setup(rtwdev); 3661 if (ret) { 3662 rtw89_err(rtwdev, "failed to setup chip information\n"); 3663 goto err_clear_resource; 3664 } 3665 3666 rtw89_pci_link_cfg(rtwdev); 3667 rtw89_pci_l1ss_cfg(rtwdev); 3668 3669 ret = rtw89_core_register(rtwdev); 3670 if (ret) { 3671 rtw89_err(rtwdev, "failed to register core\n"); 3672 goto err_clear_resource; 3673 } 3674 3675 rtw89_core_napi_init(rtwdev); 3676 3677 ret = rtw89_pci_request_irq(rtwdev, pdev); 3678 if (ret) { 3679 rtw89_err(rtwdev, "failed to request pci irq\n"); 3680 goto err_unregister; 3681 } 3682 3683 return 0; 3684 3685 err_unregister: 3686 rtw89_core_napi_deinit(rtwdev); 3687 rtw89_core_unregister(rtwdev); 3688 err_clear_resource: 3689 rtw89_pci_clear_resource(rtwdev, pdev); 3690 err_declaim_pci: 3691 rtw89_pci_declaim_device(rtwdev, pdev); 3692 err_core_deinit: 3693 rtw89_core_deinit(rtwdev); 3694 err_release_hw: 3695 ieee80211_free_hw(hw); 3696 3697 return ret; 3698 } 3699 EXPORT_SYMBOL(rtw89_pci_probe); 3700 3701 void rtw89_pci_remove(struct pci_dev *pdev) 3702 { 3703 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 3704 struct rtw89_dev *rtwdev; 3705 3706 rtwdev = hw->priv; 3707 3708 rtw89_pci_free_irq(rtwdev, pdev); 3709 rtw89_core_napi_deinit(rtwdev); 3710 rtw89_core_unregister(rtwdev); 3711 rtw89_pci_clear_resource(rtwdev, pdev); 3712 rtw89_pci_declaim_device(rtwdev, pdev); 3713 rtw89_core_deinit(rtwdev); 3714 ieee80211_free_hw(hw); 3715 } 3716 EXPORT_SYMBOL(rtw89_pci_remove); 3717 3718 MODULE_AUTHOR("Realtek Corporation"); 3719 MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver"); 3720 MODULE_LICENSE("Dual BSD/GPL"); 3721