1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2020 Realtek Corporation 3 */ 4 5 #include <linux/pci.h> 6 7 #include "mac.h" 8 #include "pci.h" 9 #include "reg.h" 10 #include "ser.h" 11 12 static bool rtw89_pci_disable_clkreq; 13 static bool rtw89_pci_disable_aspm_l1; 14 static bool rtw89_pci_disable_l1ss; 15 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644); 16 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644); 17 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644); 18 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support"); 19 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support"); 20 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support"); 21 22 static int rtw89_pci_rst_bdram_pcie(struct rtw89_dev *rtwdev) 23 { 24 u32 val; 25 int ret; 26 27 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, 28 rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | B_AX_RST_BDRAM); 29 30 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM), 31 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false, 32 rtwdev, R_AX_PCIE_INIT_CFG1); 33 34 if (ret) 35 return -EBUSY; 36 37 return 0; 38 } 39 40 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev, 41 struct rtw89_pci_dma_ring *bd_ring, 42 u32 cur_idx, bool tx) 43 { 44 u32 cnt, cur_rp, wp, rp, len; 45 46 rp = bd_ring->rp; 47 wp = bd_ring->wp; 48 len = bd_ring->len; 49 50 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 51 if (tx) 52 cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp); 53 else 54 cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp); 55 56 bd_ring->rp = cur_rp; 57 58 return cnt; 59 } 60 61 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev, 62 struct rtw89_pci_tx_ring *tx_ring) 63 { 64 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 65 u32 addr_idx = bd_ring->addr_idx; 66 u32 cnt, idx; 67 68 idx = rtw89_read32(rtwdev, addr_idx); 69 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true); 70 71 return cnt; 72 } 73 74 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev, 75 struct rtw89_pci *rtwpci, 76 u32 cnt, bool release_all) 77 { 78 struct rtw89_pci_tx_data *tx_data; 79 struct sk_buff *skb; 80 u32 qlen; 81 82 while (cnt--) { 83 skb = skb_dequeue(&rtwpci->h2c_queue); 84 if (!skb) { 85 rtw89_err(rtwdev, "failed to pre-release fwcmd\n"); 86 return; 87 } 88 skb_queue_tail(&rtwpci->h2c_release_queue, skb); 89 } 90 91 qlen = skb_queue_len(&rtwpci->h2c_release_queue); 92 if (!release_all) 93 qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0; 94 95 while (qlen--) { 96 skb = skb_dequeue(&rtwpci->h2c_release_queue); 97 if (!skb) { 98 rtw89_err(rtwdev, "failed to release fwcmd\n"); 99 return; 100 } 101 tx_data = RTW89_PCI_TX_SKB_CB(skb); 102 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 103 DMA_TO_DEVICE); 104 dev_kfree_skb_any(skb); 105 } 106 } 107 108 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev, 109 struct rtw89_pci *rtwpci) 110 { 111 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 112 u32 cnt; 113 114 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 115 if (!cnt) 116 return; 117 rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false); 118 } 119 120 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev, 121 struct rtw89_pci_rx_ring *rx_ring) 122 { 123 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 124 u32 addr_idx = bd_ring->addr_idx; 125 u32 cnt, idx; 126 127 idx = rtw89_read32(rtwdev, addr_idx); 128 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false); 129 130 return cnt; 131 } 132 133 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev, 134 struct sk_buff *skb) 135 { 136 struct rtw89_pci_rx_info *rx_info; 137 dma_addr_t dma; 138 139 rx_info = RTW89_PCI_RX_SKB_CB(skb); 140 dma = rx_info->dma; 141 dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 142 DMA_FROM_DEVICE); 143 } 144 145 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, 146 struct sk_buff *skb) 147 { 148 struct rtw89_pci_rx_info *rx_info; 149 dma_addr_t dma; 150 151 rx_info = RTW89_PCI_RX_SKB_CB(skb); 152 dma = rx_info->dma; 153 dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 154 DMA_FROM_DEVICE); 155 } 156 157 static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, 158 struct sk_buff *skb) 159 { 160 struct rtw89_pci_rxbd_info *rxbd_info; 161 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 162 163 rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; 164 rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS); 165 rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS); 166 rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE); 167 rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG); 168 169 return 0; 170 } 171 172 static bool 173 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls, 174 struct sk_buff *new, 175 const struct sk_buff *skb, u32 offset, 176 const struct rtw89_pci_rx_info *rx_info, 177 const struct rtw89_rx_desc_info *desc_info) 178 { 179 u32 copy_len = rx_info->len - offset; 180 181 if (unlikely(skb_tailroom(new) < copy_len)) { 182 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 183 "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n", 184 rx_info->len, desc_info->pkt_size, offset, fs, ls); 185 rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ", 186 skb->data, rx_info->len); 187 /* length of a single segment skb is desc_info->pkt_size */ 188 if (fs && ls) { 189 copy_len = desc_info->pkt_size; 190 } else { 191 rtw89_info(rtwdev, "drop rx data due to invalid length\n"); 192 return false; 193 } 194 } 195 196 skb_put_data(new, skb->data + offset, copy_len); 197 198 return true; 199 } 200 201 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev, 202 struct rtw89_pci_rx_ring *rx_ring) 203 { 204 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 205 struct rtw89_pci_rx_info *rx_info; 206 struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc; 207 struct sk_buff *new = rx_ring->diliver_skb; 208 struct sk_buff *skb; 209 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 210 u32 offset; 211 u32 cnt = 1; 212 bool fs, ls; 213 int ret; 214 215 skb = rx_ring->buf[bd_ring->wp]; 216 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 217 218 ret = rtw89_pci_rxbd_info_update(rtwdev, skb); 219 if (ret) { 220 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 221 bd_ring->wp, ret); 222 goto err_sync_device; 223 } 224 225 rx_info = RTW89_PCI_RX_SKB_CB(skb); 226 fs = rx_info->fs; 227 ls = rx_info->ls; 228 229 if (fs) { 230 if (new) { 231 rtw89_err(rtwdev, "skb should not be ready before first segment start\n"); 232 goto err_sync_device; 233 } 234 if (desc_info->ready) { 235 rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n"); 236 goto err_sync_device; 237 } 238 239 rtw89_core_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size); 240 241 new = dev_alloc_skb(desc_info->pkt_size); 242 if (!new) 243 goto err_sync_device; 244 245 rx_ring->diliver_skb = new; 246 247 /* first segment has RX desc */ 248 offset = desc_info->offset; 249 offset += desc_info->long_rxdesc ? sizeof(struct rtw89_rxdesc_long) : 250 sizeof(struct rtw89_rxdesc_short); 251 } else { 252 offset = sizeof(struct rtw89_pci_rxbd_info); 253 if (!new) { 254 rtw89_warn(rtwdev, "no last skb\n"); 255 goto err_sync_device; 256 } 257 } 258 if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info)) 259 goto err_sync_device; 260 rtw89_pci_sync_skb_for_device(rtwdev, skb); 261 rtw89_pci_rxbd_increase(rx_ring, 1); 262 263 if (!desc_info->ready) { 264 rtw89_warn(rtwdev, "no rx desc information\n"); 265 goto err_free_resource; 266 } 267 if (ls) { 268 rtw89_core_rx(rtwdev, desc_info, new); 269 rx_ring->diliver_skb = NULL; 270 desc_info->ready = false; 271 } 272 273 return cnt; 274 275 err_sync_device: 276 rtw89_pci_sync_skb_for_device(rtwdev, skb); 277 rtw89_pci_rxbd_increase(rx_ring, 1); 278 err_free_resource: 279 if (new) 280 dev_kfree_skb_any(new); 281 rx_ring->diliver_skb = NULL; 282 desc_info->ready = false; 283 284 return cnt; 285 } 286 287 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev, 288 struct rtw89_pci_rx_ring *rx_ring, 289 u32 cnt) 290 { 291 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 292 u32 rx_cnt; 293 294 while (cnt && rtwdev->napi_budget_countdown > 0) { 295 rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring); 296 if (!rx_cnt) { 297 rtw89_err(rtwdev, "failed to deliver RXBD skb\n"); 298 299 /* skip the rest RXBD bufs */ 300 rtw89_pci_rxbd_increase(rx_ring, cnt); 301 break; 302 } 303 304 cnt -= rx_cnt; 305 } 306 307 rtw89_write16(rtwdev, bd_ring->addr_idx, bd_ring->wp); 308 } 309 310 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev, 311 struct rtw89_pci *rtwpci, int budget) 312 { 313 struct rtw89_pci_rx_ring *rx_ring; 314 int countdown = rtwdev->napi_budget_countdown; 315 u32 cnt; 316 317 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ]; 318 319 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 320 if (!cnt) 321 return 0; 322 323 cnt = min_t(u32, budget, cnt); 324 325 rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt); 326 327 /* In case of flushing pending SKBs, the countdown may exceed. */ 328 if (rtwdev->napi_budget_countdown <= 0) 329 return budget; 330 331 return budget - countdown; 332 } 333 334 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev, 335 struct rtw89_pci_tx_ring *tx_ring, 336 struct sk_buff *skb, u8 tx_status) 337 { 338 struct ieee80211_tx_info *info; 339 340 info = IEEE80211_SKB_CB(skb); 341 ieee80211_tx_info_clear_status(info); 342 343 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 344 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 345 if (tx_status == RTW89_TX_DONE) { 346 info->flags |= IEEE80211_TX_STAT_ACK; 347 tx_ring->tx_acked++; 348 } else { 349 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) 350 rtw89_debug(rtwdev, RTW89_DBG_FW, 351 "failed to TX of status %x\n", tx_status); 352 switch (tx_status) { 353 case RTW89_TX_RETRY_LIMIT: 354 tx_ring->tx_retry_lmt++; 355 break; 356 case RTW89_TX_LIFE_TIME: 357 tx_ring->tx_life_time++; 358 break; 359 case RTW89_TX_MACID_DROP: 360 tx_ring->tx_mac_id_drop++; 361 break; 362 default: 363 rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status); 364 break; 365 } 366 } 367 368 ieee80211_tx_status_ni(rtwdev->hw, skb); 369 } 370 371 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 372 { 373 struct rtw89_pci_tx_wd *txwd; 374 u32 cnt; 375 376 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 377 while (cnt--) { 378 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 379 if (!txwd) { 380 rtw89_warn(rtwdev, "No busy txwd pages available\n"); 381 break; 382 } 383 384 list_del_init(&txwd->list); 385 } 386 } 387 388 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev, 389 struct rtw89_pci_tx_ring *tx_ring) 390 { 391 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 392 struct rtw89_pci_tx_wd *txwd; 393 int i; 394 395 for (i = 0; i < wd_ring->page_num; i++) { 396 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 397 if (!txwd) 398 break; 399 400 list_del_init(&txwd->list); 401 } 402 } 403 404 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, 405 struct rtw89_pci_tx_ring *tx_ring, 406 struct rtw89_pci_tx_wd *txwd, u16 seq, 407 u8 tx_status) 408 { 409 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 410 struct rtw89_pci_tx_data *tx_data; 411 struct sk_buff *skb, *tmp; 412 u8 txch = tx_ring->txch; 413 414 if (!list_empty(&txwd->list)) { 415 rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n", 416 txch, seq); 417 return; 418 } 419 420 /* currently, support for only one frame */ 421 if (skb_queue_len(&txwd->queue) != 1) { 422 rtw89_warn(rtwdev, "empty pending queue %d page %d\n", 423 txch, seq); 424 return; 425 } 426 427 skb_queue_walk_safe(&txwd->queue, skb, tmp) { 428 skb_unlink(skb, &txwd->queue); 429 430 tx_data = RTW89_PCI_TX_SKB_CB(skb); 431 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 432 DMA_TO_DEVICE); 433 434 rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status); 435 } 436 437 rtw89_pci_enqueue_txwd(tx_ring, txwd); 438 } 439 440 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, 441 struct rtw89_pci_rpp_fmt *rpp) 442 { 443 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 444 struct rtw89_pci_tx_ring *tx_ring; 445 struct rtw89_pci_tx_wd_ring *wd_ring; 446 struct rtw89_pci_tx_wd *txwd; 447 u16 seq; 448 u8 qsel, tx_status, txch; 449 450 seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ); 451 qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL); 452 tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS); 453 txch = rtw89_core_get_ch_dma(rtwdev, qsel); 454 455 if (txch == RTW89_TXCH_CH12) { 456 rtw89_warn(rtwdev, "should no fwcmd release report\n"); 457 return; 458 } 459 460 tx_ring = &rtwpci->tx_rings[txch]; 461 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 462 wd_ring = &tx_ring->wd_ring; 463 txwd = &wd_ring->pages[seq]; 464 465 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status); 466 } 467 468 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev, 469 struct rtw89_pci_tx_ring *tx_ring) 470 { 471 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 472 struct rtw89_pci_tx_wd *txwd; 473 int i; 474 475 for (i = 0; i < wd_ring->page_num; i++) { 476 txwd = &wd_ring->pages[i]; 477 478 if (!list_empty(&txwd->list)) 479 continue; 480 481 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP); 482 } 483 } 484 485 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev, 486 struct rtw89_pci_rx_ring *rx_ring, 487 u32 max_cnt) 488 { 489 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 490 struct rtw89_pci_rx_info *rx_info; 491 struct rtw89_pci_rpp_fmt *rpp; 492 struct rtw89_rx_desc_info desc_info = {}; 493 struct sk_buff *skb; 494 u32 cnt = 0; 495 u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt); 496 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 497 u32 offset; 498 int ret; 499 500 skb = rx_ring->buf[bd_ring->wp]; 501 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 502 503 ret = rtw89_pci_rxbd_info_update(rtwdev, skb); 504 if (ret) { 505 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 506 bd_ring->wp, ret); 507 goto err_sync_device; 508 } 509 510 rx_info = RTW89_PCI_RX_SKB_CB(skb); 511 if (!rx_info->fs || !rx_info->ls) { 512 rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n"); 513 return cnt; 514 } 515 516 rtw89_core_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size); 517 518 /* first segment has RX desc */ 519 offset = desc_info.offset; 520 offset += desc_info.long_rxdesc ? sizeof(struct rtw89_rxdesc_long) : 521 sizeof(struct rtw89_rxdesc_short); 522 for (; offset + rpp_size <= rx_info->len; offset += rpp_size) { 523 rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset); 524 rtw89_pci_release_rpp(rtwdev, rpp); 525 } 526 527 rtw89_pci_sync_skb_for_device(rtwdev, skb); 528 rtw89_pci_rxbd_increase(rx_ring, 1); 529 cnt++; 530 531 return cnt; 532 533 err_sync_device: 534 rtw89_pci_sync_skb_for_device(rtwdev, skb); 535 return 0; 536 } 537 538 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev, 539 struct rtw89_pci_rx_ring *rx_ring, 540 u32 cnt) 541 { 542 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 543 u32 release_cnt; 544 545 while (cnt) { 546 release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt); 547 if (!release_cnt) { 548 rtw89_err(rtwdev, "failed to release TX skbs\n"); 549 550 /* skip the rest RXBD bufs */ 551 rtw89_pci_rxbd_increase(rx_ring, cnt); 552 break; 553 } 554 555 cnt -= release_cnt; 556 } 557 558 rtw89_write16(rtwdev, bd_ring->addr_idx, bd_ring->wp); 559 } 560 561 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev, 562 struct rtw89_pci *rtwpci, int budget) 563 { 564 struct rtw89_pci_rx_ring *rx_ring; 565 u32 cnt; 566 int work_done; 567 568 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 569 570 spin_lock_bh(&rtwpci->trx_lock); 571 572 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 573 if (cnt == 0) 574 goto out_unlock; 575 576 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 577 578 out_unlock: 579 spin_unlock_bh(&rtwpci->trx_lock); 580 581 /* always release all RPQ */ 582 work_done = min_t(int, cnt, budget); 583 rtwdev->napi_budget_countdown -= work_done; 584 585 return work_done; 586 } 587 588 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev, 589 struct rtw89_pci *rtwpci) 590 { 591 struct rtw89_pci_rx_ring *rx_ring; 592 struct rtw89_pci_dma_ring *bd_ring; 593 u32 reg_idx; 594 u16 hw_idx, hw_idx_next, host_idx; 595 int i; 596 597 for (i = 0; i < RTW89_RXCH_NUM; i++) { 598 rx_ring = &rtwpci->rx_rings[i]; 599 bd_ring = &rx_ring->bd_ring; 600 601 reg_idx = rtw89_read32(rtwdev, bd_ring->addr_idx); 602 hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx); 603 host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx); 604 hw_idx_next = (hw_idx + 1) % bd_ring->len; 605 606 if (hw_idx_next == host_idx) 607 rtw89_warn(rtwdev, "%d RXD unavailable\n", i); 608 609 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 610 "%d RXD unavailable, idx=0x%08x, len=%d\n", 611 i, reg_idx, bd_ring->len); 612 } 613 } 614 615 static void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, 616 struct rtw89_pci *rtwpci, 617 struct rtw89_pci_isrs *isrs) 618 { 619 isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs; 620 isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0]; 621 isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1]; 622 623 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 624 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]); 625 rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]); 626 } 627 628 static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00) 629 { 630 /* write 1 clear */ 631 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isr00); 632 } 633 634 static void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, 635 struct rtw89_pci *rtwpci) 636 { 637 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 638 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]); 639 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]); 640 } 641 642 static void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, 643 struct rtw89_pci *rtwpci) 644 { 645 rtw89_write32(rtwdev, R_AX_HIMR0, 0); 646 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0); 647 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); 648 } 649 650 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) 651 { 652 struct rtw89_dev *rtwdev = dev; 653 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 654 struct rtw89_pci_isrs isrs; 655 unsigned long flags; 656 657 spin_lock_irqsave(&rtwpci->irq_lock, flags); 658 rtw89_pci_recognize_intrs(rtwdev, rtwpci, &isrs); 659 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 660 661 if (unlikely(isrs.isrs[0] & B_AX_RDU_INT)) 662 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci); 663 664 if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN)) 665 rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev)); 666 667 if (likely(rtwpci->running)) { 668 local_bh_disable(); 669 napi_schedule(&rtwdev->napi); 670 local_bh_enable(); 671 } 672 673 return IRQ_HANDLED; 674 } 675 676 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev) 677 { 678 struct rtw89_dev *rtwdev = dev; 679 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 680 unsigned long flags; 681 irqreturn_t irqret = IRQ_WAKE_THREAD; 682 683 spin_lock_irqsave(&rtwpci->irq_lock, flags); 684 685 /* If interrupt event is on the road, it is still trigger interrupt 686 * even we have done pci_stop() to turn off IMR. 687 */ 688 if (unlikely(!rtwpci->running)) { 689 irqret = IRQ_HANDLED; 690 goto exit; 691 } 692 693 rtw89_pci_disable_intr(rtwdev, rtwpci); 694 exit: 695 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 696 697 return irqret; 698 } 699 700 #define case_TXCHADDRS(txch) \ 701 case RTW89_TXCH_##txch: \ 702 *addr_num = R_AX_##txch##_TXBD_NUM; \ 703 *addr_idx = R_AX_##txch##_TXBD_IDX; \ 704 *addr_bdram = R_AX_##txch##_BDRAM_CTRL; \ 705 *addr_desa_l = R_AX_##txch##_TXBD_DESA_L; \ 706 *addr_desa_h = R_AX_##txch##_TXBD_DESA_H; \ 707 break 708 709 static int rtw89_pci_get_txch_addrs(enum rtw89_tx_channel txch, 710 u32 *addr_num, 711 u32 *addr_idx, 712 u32 *addr_bdram, 713 u32 *addr_desa_l, 714 u32 *addr_desa_h) 715 { 716 switch (txch) { 717 case_TXCHADDRS(ACH0); 718 case_TXCHADDRS(ACH1); 719 case_TXCHADDRS(ACH2); 720 case_TXCHADDRS(ACH3); 721 case_TXCHADDRS(ACH4); 722 case_TXCHADDRS(ACH5); 723 case_TXCHADDRS(ACH6); 724 case_TXCHADDRS(ACH7); 725 case_TXCHADDRS(CH8); 726 case_TXCHADDRS(CH9); 727 case_TXCHADDRS(CH10); 728 case_TXCHADDRS(CH11); 729 case_TXCHADDRS(CH12); 730 default: 731 return -EINVAL; 732 } 733 734 return 0; 735 } 736 737 #undef case_TXCHADDRS 738 739 #define case_RXCHADDRS(rxch) \ 740 case RTW89_RXCH_##rxch: \ 741 *addr_num = R_AX_##rxch##_RXBD_NUM; \ 742 *addr_idx = R_AX_##rxch##_RXBD_IDX; \ 743 *addr_desa_l = R_AX_##rxch##_RXBD_DESA_L; \ 744 *addr_desa_h = R_AX_##rxch##_RXBD_DESA_H; \ 745 break 746 747 static int rtw89_pci_get_rxch_addrs(enum rtw89_rx_channel rxch, 748 u32 *addr_num, 749 u32 *addr_idx, 750 u32 *addr_desa_l, 751 u32 *addr_desa_h) 752 { 753 switch (rxch) { 754 case_RXCHADDRS(RXQ); 755 case_RXCHADDRS(RPQ); 756 default: 757 return -EINVAL; 758 } 759 760 return 0; 761 } 762 763 #undef case_RXCHADDRS 764 765 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring) 766 { 767 struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring; 768 769 /* reserved 1 desc check ring is full or not */ 770 if (bd_ring->rp > bd_ring->wp) 771 return bd_ring->rp - bd_ring->wp - 1; 772 773 return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1; 774 } 775 776 static 777 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev) 778 { 779 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 780 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 781 u32 cnt; 782 783 spin_lock_bh(&rtwpci->trx_lock); 784 rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci); 785 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 786 spin_unlock_bh(&rtwpci->trx_lock); 787 788 return cnt; 789 } 790 791 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 792 u8 txch) 793 { 794 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 795 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 796 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 797 u32 bd_cnt, wd_cnt, min_cnt = 0; 798 struct rtw89_pci_rx_ring *rx_ring; 799 u32 cnt; 800 801 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 802 803 spin_lock_bh(&rtwpci->trx_lock); 804 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 805 wd_cnt = wd_ring->curr_num; 806 807 if (wd_cnt == 0 || bd_cnt == 0) { 808 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 809 if (!cnt) 810 goto out_unlock; 811 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 812 } 813 814 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 815 wd_cnt = wd_ring->curr_num; 816 min_cnt = min(bd_cnt, wd_cnt); 817 if (min_cnt == 0) 818 rtw89_warn(rtwdev, "still no tx resource after reclaim\n"); 819 820 out_unlock: 821 spin_unlock_bh(&rtwpci->trx_lock); 822 823 return min_cnt; 824 } 825 826 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 827 u8 txch) 828 { 829 if (txch == RTW89_TXCH_CH12) 830 return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev); 831 832 return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch); 833 } 834 835 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 836 { 837 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 838 u32 host_idx, addr; 839 840 addr = bd_ring->addr_idx; 841 host_idx = bd_ring->wp; 842 rtw89_write16(rtwdev, addr, host_idx); 843 } 844 845 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, 846 int n_txbd) 847 { 848 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 849 u32 host_idx, len; 850 851 len = bd_ring->len; 852 host_idx = bd_ring->wp + n_txbd; 853 host_idx = host_idx < len ? host_idx : host_idx - len; 854 855 bd_ring->wp = host_idx; 856 } 857 858 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) 859 { 860 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 861 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 862 863 spin_lock_bh(&rtwpci->trx_lock); 864 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 865 spin_unlock_bh(&rtwpci->trx_lock); 866 } 867 868 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop) 869 { 870 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 871 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 872 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 873 u32 cur_idx, cur_rp; 874 u8 i; 875 876 /* Because the time taked by the I/O is a bit dynamic, it's hard to 877 * define a reasonable fixed total timeout to use read_poll_timeout* 878 * helper. Instead, we can ensure a reasonable polling times, so we 879 * just use for loop with udelay here. 880 */ 881 for (i = 0; i < 60; i++) { 882 cur_idx = rtw89_read32(rtwdev, bd_ring->addr_idx); 883 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 884 if (cur_rp == bd_ring->wp) 885 return; 886 887 udelay(1); 888 } 889 890 if (!drop) 891 rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch); 892 } 893 894 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs, 895 bool drop) 896 { 897 u8 i; 898 899 for (i = 0; i < RTW89_TXCH_NUM; i++) { 900 /* It may be unnecessary to flush FWCMD queue. */ 901 if (i == RTW89_TXCH_CH12) 902 continue; 903 904 if (txchs & BIT(i)) 905 __pci_flush_txch(rtwdev, i, drop); 906 } 907 } 908 909 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues, 910 bool drop) 911 { 912 __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop); 913 } 914 915 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, 916 struct rtw89_pci_tx_ring *tx_ring, 917 struct rtw89_pci_tx_wd *txwd, 918 struct rtw89_core_tx_request *tx_req) 919 { 920 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 921 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 922 struct rtw89_txwd_body *txwd_body; 923 struct rtw89_txwd_info *txwd_info; 924 struct rtw89_pci_tx_wp_info *txwp_info; 925 struct rtw89_pci_tx_addr_info_32 *txaddr_info; 926 struct pci_dev *pdev = rtwpci->pdev; 927 struct sk_buff *skb = tx_req->skb; 928 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 929 bool en_wd_info = desc_info->en_wd_info; 930 u32 txwd_len; 931 u32 txwp_len; 932 u32 txaddr_info_len; 933 dma_addr_t dma; 934 int ret; 935 936 rtw89_core_fill_txdesc(rtwdev, desc_info, txwd->vaddr); 937 938 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 939 if (dma_mapping_error(&pdev->dev, dma)) { 940 rtw89_err(rtwdev, "failed to map skb dma data\n"); 941 ret = -EBUSY; 942 goto err; 943 } 944 945 tx_data->dma = dma; 946 947 txaddr_info_len = sizeof(*txaddr_info); 948 txwp_len = sizeof(*txwp_info); 949 txwd_len = sizeof(*txwd_body); 950 txwd_len += en_wd_info ? sizeof(*txwd_info) : 0; 951 952 txwp_info = txwd->vaddr + txwd_len; 953 txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID); 954 txwp_info->seq1 = 0; 955 txwp_info->seq2 = 0; 956 txwp_info->seq3 = 0; 957 958 tx_ring->tx_cnt++; 959 txaddr_info = txwd->vaddr + txwd_len + txwp_len; 960 txaddr_info->length = cpu_to_le16(skb->len); 961 txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | 962 RTW89_PCI_ADDR_NUM(1)); 963 txaddr_info->dma = cpu_to_le32(dma); 964 965 txwd->len = txwd_len + txwp_len + txaddr_info_len; 966 967 skb_queue_tail(&txwd->queue, skb); 968 969 return 0; 970 971 err: 972 return ret; 973 } 974 975 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, 976 struct rtw89_pci_tx_ring *tx_ring, 977 struct rtw89_pci_tx_bd_32 *txbd, 978 struct rtw89_core_tx_request *tx_req) 979 { 980 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 981 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 982 struct rtw89_txwd_body *txwd_body; 983 struct pci_dev *pdev = rtwpci->pdev; 984 struct sk_buff *skb = tx_req->skb; 985 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 986 dma_addr_t dma; 987 988 txwd_body = (struct rtw89_txwd_body *)skb_push(skb, sizeof(*txwd_body)); 989 memset(txwd_body, 0, sizeof(*txwd_body)); 990 rtw89_core_fill_txdesc(rtwdev, desc_info, txwd_body); 991 992 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 993 if (dma_mapping_error(&pdev->dev, dma)) { 994 rtw89_err(rtwdev, "failed to map fwcmd dma data\n"); 995 return -EBUSY; 996 } 997 998 tx_data->dma = dma; 999 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); 1000 txbd->length = cpu_to_le16(skb->len); 1001 txbd->dma = cpu_to_le32(tx_data->dma); 1002 skb_queue_tail(&rtwpci->h2c_queue, skb); 1003 1004 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1005 1006 return 0; 1007 } 1008 1009 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, 1010 struct rtw89_pci_tx_ring *tx_ring, 1011 struct rtw89_pci_tx_bd_32 *txbd, 1012 struct rtw89_core_tx_request *tx_req) 1013 { 1014 struct rtw89_pci_tx_wd *txwd; 1015 int ret; 1016 1017 /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD 1018 * buffer with WD BODY only. So here we don't need to check the free 1019 * pages of the wd ring. 1020 */ 1021 if (tx_ring->txch == RTW89_TXCH_CH12) 1022 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); 1023 1024 txwd = rtw89_pci_dequeue_txwd(tx_ring); 1025 if (!txwd) { 1026 rtw89_err(rtwdev, "no available TXWD\n"); 1027 ret = -ENOSPC; 1028 goto err; 1029 } 1030 1031 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); 1032 if (ret) { 1033 rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq); 1034 goto err_enqueue_wd; 1035 } 1036 1037 list_add_tail(&txwd->list, &tx_ring->busy_pages); 1038 1039 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); 1040 txbd->length = cpu_to_le16(txwd->len); 1041 txbd->dma = cpu_to_le32(txwd->paddr); 1042 1043 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1044 1045 return 0; 1046 1047 err_enqueue_wd: 1048 rtw89_pci_enqueue_txwd(tx_ring, txwd); 1049 err: 1050 return ret; 1051 } 1052 1053 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, 1054 u8 txch) 1055 { 1056 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1057 struct rtw89_pci_tx_ring *tx_ring; 1058 struct rtw89_pci_tx_bd_32 *txbd; 1059 u32 n_avail_txbd; 1060 int ret = 0; 1061 1062 /* check the tx type and dma channel for fw cmd queue */ 1063 if ((txch == RTW89_TXCH_CH12 || 1064 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) && 1065 (txch != RTW89_TXCH_CH12 || 1066 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) { 1067 rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n"); 1068 return -EINVAL; 1069 } 1070 1071 tx_ring = &rtwpci->tx_rings[txch]; 1072 spin_lock_bh(&rtwpci->trx_lock); 1073 1074 n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring); 1075 if (n_avail_txbd == 0) { 1076 rtw89_err(rtwdev, "no available TXBD\n"); 1077 ret = -ENOSPC; 1078 goto err_unlock; 1079 } 1080 1081 txbd = rtw89_pci_get_next_txbd(tx_ring); 1082 ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req); 1083 if (ret) { 1084 rtw89_err(rtwdev, "failed to submit TXBD\n"); 1085 goto err_unlock; 1086 } 1087 1088 spin_unlock_bh(&rtwpci->trx_lock); 1089 return 0; 1090 1091 err_unlock: 1092 spin_unlock_bh(&rtwpci->trx_lock); 1093 return ret; 1094 } 1095 1096 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) 1097 { 1098 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1099 int ret; 1100 1101 ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma); 1102 if (ret) { 1103 rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma); 1104 return ret; 1105 } 1106 1107 return 0; 1108 } 1109 1110 static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = { 1111 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1112 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1113 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1114 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1115 [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2}, 1116 [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2}, 1117 [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2}, 1118 [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2}, 1119 [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1}, 1120 [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1}, 1121 [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1}, 1122 [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1}, 1123 [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1}, 1124 }; 1125 1126 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) 1127 { 1128 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1129 struct rtw89_pci_tx_ring *tx_ring; 1130 struct rtw89_pci_rx_ring *rx_ring; 1131 struct rtw89_pci_dma_ring *bd_ring; 1132 const struct rtw89_pci_bd_ram *bd_ram; 1133 u32 addr_num; 1134 u32 addr_bdram; 1135 u32 addr_desa_l; 1136 u32 val32; 1137 int i; 1138 1139 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1140 tx_ring = &rtwpci->tx_rings[i]; 1141 bd_ring = &tx_ring->bd_ring; 1142 bd_ram = &bd_ram_table[i]; 1143 addr_num = bd_ring->addr_num; 1144 addr_bdram = bd_ring->addr_bdram; 1145 addr_desa_l = bd_ring->addr_desa_l; 1146 bd_ring->wp = 0; 1147 bd_ring->rp = 0; 1148 1149 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) | 1150 FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) | 1151 FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num); 1152 1153 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1154 rtw89_write32(rtwdev, addr_bdram, val32); 1155 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1156 } 1157 1158 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1159 rx_ring = &rtwpci->rx_rings[i]; 1160 bd_ring = &rx_ring->bd_ring; 1161 addr_num = bd_ring->addr_num; 1162 addr_desa_l = bd_ring->addr_desa_l; 1163 bd_ring->wp = 0; 1164 bd_ring->rp = 0; 1165 rx_ring->diliver_skb = NULL; 1166 rx_ring->diliver_desc.ready = false; 1167 1168 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1169 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1170 } 1171 } 1172 1173 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev, 1174 struct rtw89_pci_tx_ring *tx_ring) 1175 { 1176 rtw89_pci_release_busy_txwd(rtwdev, tx_ring); 1177 rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring); 1178 } 1179 1180 static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev) 1181 { 1182 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1183 int txch; 1184 1185 rtw89_pci_reset_trx_rings(rtwdev); 1186 1187 spin_lock_bh(&rtwpci->trx_lock); 1188 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1189 if (txch == RTW89_TXCH_CH12) { 1190 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 1191 skb_queue_len(&rtwpci->h2c_queue), true); 1192 continue; 1193 } 1194 rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]); 1195 } 1196 spin_unlock_bh(&rtwpci->trx_lock); 1197 } 1198 1199 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev) 1200 { 1201 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1202 unsigned long flags; 1203 1204 rtw89_core_napi_start(rtwdev); 1205 1206 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1207 rtwpci->running = true; 1208 rtw89_pci_enable_intr(rtwdev, rtwpci); 1209 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1210 1211 return 0; 1212 } 1213 1214 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) 1215 { 1216 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1217 struct pci_dev *pdev = rtwpci->pdev; 1218 unsigned long flags; 1219 1220 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1221 rtwpci->running = false; 1222 rtw89_pci_disable_intr(rtwdev, rtwpci); 1223 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1224 1225 synchronize_irq(pdev->irq); 1226 rtw89_core_napi_stop(rtwdev); 1227 } 1228 1229 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data); 1230 1231 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr) 1232 { 1233 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1234 u32 val = readl(rtwpci->mmap + addr); 1235 int count; 1236 1237 for (count = 0; ; count++) { 1238 if (val != RTW89_R32_DEAD) 1239 return val; 1240 if (count >= MAC_REG_POOL_COUNT) { 1241 rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val); 1242 return RTW89_R32_DEAD; 1243 } 1244 rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN); 1245 val = readl(rtwpci->mmap + addr); 1246 } 1247 1248 return val; 1249 } 1250 1251 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr) 1252 { 1253 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1254 u32 addr32, val32, shift; 1255 1256 if (!ACCESS_CMAC(addr)) 1257 return readb(rtwpci->mmap + addr); 1258 1259 addr32 = addr & ~0x3; 1260 shift = (addr & 0x3) * 8; 1261 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1262 return val32 >> shift; 1263 } 1264 1265 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr) 1266 { 1267 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1268 u32 addr32, val32, shift; 1269 1270 if (!ACCESS_CMAC(addr)) 1271 return readw(rtwpci->mmap + addr); 1272 1273 addr32 = addr & ~0x3; 1274 shift = (addr & 0x3) * 8; 1275 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1276 return val32 >> shift; 1277 } 1278 1279 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr) 1280 { 1281 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1282 1283 if (!ACCESS_CMAC(addr)) 1284 return readl(rtwpci->mmap + addr); 1285 1286 return rtw89_pci_ops_read32_cmac(rtwdev, addr); 1287 } 1288 1289 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) 1290 { 1291 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1292 1293 writeb(data, rtwpci->mmap + addr); 1294 } 1295 1296 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) 1297 { 1298 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1299 1300 writew(data, rtwpci->mmap + addr); 1301 } 1302 1303 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) 1304 { 1305 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1306 1307 writel(data, rtwpci->mmap + addr); 1308 } 1309 1310 static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable) 1311 { 1312 if (enable) { 1313 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 1314 B_AX_TXHCI_EN | B_AX_RXHCI_EN); 1315 rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, 1316 B_AX_STOP_PCIEIO); 1317 } else { 1318 rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, 1319 B_AX_STOP_PCIEIO); 1320 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 1321 B_AX_TXHCI_EN | B_AX_RXHCI_EN); 1322 } 1323 } 1324 1325 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit) 1326 { 1327 u16 val; 1328 1329 rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F); 1330 1331 val = rtw89_read16(rtwdev, R_AX_MDIO_CFG); 1332 switch (speed) { 1333 case PCIE_PHY_GEN1: 1334 if (addr < 0x20) 1335 val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK); 1336 else 1337 val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK); 1338 break; 1339 case PCIE_PHY_GEN2: 1340 if (addr < 0x20) 1341 val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK); 1342 else 1343 val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK); 1344 break; 1345 default: 1346 rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed); 1347 return -EINVAL; 1348 } 1349 rtw89_write16(rtwdev, R_AX_MDIO_CFG, val); 1350 rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit); 1351 1352 return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000, 1353 false, rtwdev, R_AX_MDIO_CFG); 1354 } 1355 1356 static int 1357 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val) 1358 { 1359 int ret; 1360 1361 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG); 1362 if (ret) { 1363 rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret); 1364 return ret; 1365 } 1366 *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA); 1367 1368 return 0; 1369 } 1370 1371 static int 1372 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed) 1373 { 1374 int ret; 1375 1376 rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data); 1377 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG); 1378 if (ret) { 1379 rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret); 1380 return ret; 1381 } 1382 1383 return 0; 1384 } 1385 1386 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1387 { 1388 int ret; 1389 u16 val; 1390 1391 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1392 if (ret) 1393 return ret; 1394 ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed); 1395 if (ret) 1396 return ret; 1397 1398 return 0; 1399 } 1400 1401 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1402 { 1403 int ret; 1404 u16 val; 1405 1406 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1407 if (ret) 1408 return ret; 1409 ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed); 1410 if (ret) 1411 return ret; 1412 1413 return 0; 1414 } 1415 1416 static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data) 1417 { 1418 u16 write_addr; 1419 u16 remainder = addr & ~(B_AX_DBI_ADDR_MSK | B_AX_DBI_WREN_MSK); 1420 u8 flag; 1421 int ret; 1422 1423 write_addr = addr & B_AX_DBI_ADDR_MSK; 1424 write_addr |= u16_encode_bits(BIT(remainder), B_AX_DBI_WREN_MSK); 1425 rtw89_write8(rtwdev, R_AX_DBI_WDATA + remainder, data); 1426 rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr); 1427 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16); 1428 1429 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10, 1430 10 * RTW89_PCI_WR_RETRY_CNT, false, 1431 rtwdev, R_AX_DBI_FLAG + 2); 1432 if (ret) 1433 WARN(flag, "failed to write to DBI register, addr=0x%04x\n", 1434 addr); 1435 1436 return ret; 1437 } 1438 1439 static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value) 1440 { 1441 u16 read_addr = addr & B_AX_DBI_ADDR_MSK; 1442 u8 flag; 1443 int ret; 1444 1445 rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr); 1446 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16); 1447 1448 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10, 1449 10 * RTW89_PCI_WR_RETRY_CNT, false, 1450 rtwdev, R_AX_DBI_FLAG + 2); 1451 1452 if (!ret) { 1453 read_addr = R_AX_DBI_RDATA + (addr & 3); 1454 *value = rtw89_read8(rtwdev, read_addr); 1455 } else { 1456 WARN(1, "failed to read DBI register, addr=0x%04x\n", addr); 1457 ret = -EIO; 1458 } 1459 1460 return ret; 1461 } 1462 1463 static int rtw89_dbi_write8_set(struct rtw89_dev *rtwdev, u16 addr, u8 bit) 1464 { 1465 u8 value; 1466 int ret; 1467 1468 ret = rtw89_dbi_read8(rtwdev, addr, &value); 1469 if (ret) 1470 return ret; 1471 1472 value |= bit; 1473 ret = rtw89_dbi_write8(rtwdev, addr, value); 1474 1475 return ret; 1476 } 1477 1478 static int rtw89_dbi_write8_clr(struct rtw89_dev *rtwdev, u16 addr, u8 bit) 1479 { 1480 u8 value; 1481 int ret; 1482 1483 ret = rtw89_dbi_read8(rtwdev, addr, &value); 1484 if (ret) 1485 return ret; 1486 1487 value &= ~bit; 1488 ret = rtw89_dbi_write8(rtwdev, addr, value); 1489 1490 return ret; 1491 } 1492 1493 static int 1494 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate) 1495 { 1496 u16 val, tar; 1497 int ret; 1498 1499 /* Enable counter */ 1500 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val); 1501 if (ret) 1502 return ret; 1503 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 1504 phy_rate); 1505 if (ret) 1506 return ret; 1507 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN, 1508 phy_rate); 1509 if (ret) 1510 return ret; 1511 1512 fsleep(300); 1513 1514 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar); 1515 if (ret) 1516 return ret; 1517 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 1518 phy_rate); 1519 if (ret) 1520 return ret; 1521 1522 tar = tar & 0x0FFF; 1523 if (tar == 0 || tar == 0x0FFF) { 1524 rtw89_err(rtwdev, "[ERR]Get target failed.\n"); 1525 return -EINVAL; 1526 } 1527 1528 *target = tar; 1529 1530 return 0; 1531 } 1532 1533 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) 1534 { 1535 enum rtw89_pcie_phy phy_rate; 1536 u16 val16, mgn_set, div_set, tar; 1537 u8 val8, bdr_ori; 1538 bool l1_flag = false; 1539 int ret = 0; 1540 1541 if ((rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV) || 1542 rtwdev->chip->chip_id == RTL8852C) 1543 return 0; 1544 1545 ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_PHY_RATE, &val8); 1546 if (ret) { 1547 rtw89_err(rtwdev, "[ERR]dbi_r8_pcie %X\n", RTW89_PCIE_PHY_RATE); 1548 return ret; 1549 } 1550 1551 if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) { 1552 phy_rate = PCIE_PHY_GEN1; 1553 } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) { 1554 phy_rate = PCIE_PHY_GEN2; 1555 } else { 1556 rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8); 1557 return -EOPNOTSUPP; 1558 } 1559 /* Disable L1BD */ 1560 ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori); 1561 if (ret) { 1562 rtw89_err(rtwdev, "[ERR]dbi_r8_pcie %X\n", RTW89_PCIE_L1_CTRL); 1563 return ret; 1564 } 1565 1566 if (bdr_ori & RTW89_PCIE_BIT_L1) { 1567 ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_L1_CTRL, 1568 bdr_ori & ~RTW89_PCIE_BIT_L1); 1569 if (ret) { 1570 rtw89_err(rtwdev, "[ERR]dbi_w8_pcie %X\n", RTW89_PCIE_L1_CTRL); 1571 return ret; 1572 } 1573 l1_flag = true; 1574 } 1575 1576 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 1577 if (ret) { 1578 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 1579 goto end; 1580 } 1581 1582 if (val16 & B_AX_CALIB_EN) { 1583 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, 1584 val16 & ~B_AX_CALIB_EN, phy_rate); 1585 if (ret) { 1586 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1587 goto end; 1588 } 1589 } 1590 1591 if (!autook_en) 1592 goto end; 1593 /* Set div */ 1594 ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate); 1595 if (ret) { 1596 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1597 goto end; 1598 } 1599 1600 /* Obtain div and margin */ 1601 ret = __get_target(rtwdev, &tar, phy_rate); 1602 if (ret) { 1603 rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret); 1604 goto end; 1605 } 1606 1607 mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar; 1608 1609 if (mgn_set >= 128) { 1610 div_set = 0x0003; 1611 mgn_set = 0x000F; 1612 } else if (mgn_set >= 64) { 1613 div_set = 0x0003; 1614 mgn_set >>= 3; 1615 } else if (mgn_set >= 32) { 1616 div_set = 0x0002; 1617 mgn_set >>= 2; 1618 } else if (mgn_set >= 16) { 1619 div_set = 0x0001; 1620 mgn_set >>= 1; 1621 } else if (mgn_set == 0) { 1622 rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar); 1623 goto end; 1624 } else { 1625 div_set = 0x0000; 1626 } 1627 1628 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 1629 if (ret) { 1630 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 1631 goto end; 1632 } 1633 1634 val16 |= u16_encode_bits(div_set, B_AX_DIV); 1635 1636 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate); 1637 if (ret) { 1638 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1639 goto end; 1640 } 1641 1642 ret = __get_target(rtwdev, &tar, phy_rate); 1643 if (ret) { 1644 rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret); 1645 goto end; 1646 } 1647 1648 rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n", 1649 tar, div_set, mgn_set); 1650 ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1, 1651 (tar & 0x0FFF) | (mgn_set << 12), phy_rate); 1652 if (ret) { 1653 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1); 1654 goto end; 1655 } 1656 1657 /* Enable function */ 1658 ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate); 1659 if (ret) { 1660 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1661 goto end; 1662 } 1663 1664 /* CLK delay = 0 */ 1665 ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_CLK_CTRL, PCIE_CLKDLY_HW_0); 1666 1667 end: 1668 /* Set L1BD to ori */ 1669 if (l1_flag) { 1670 ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_L1_CTRL, bdr_ori); 1671 if (ret) { 1672 rtw89_err(rtwdev, "[ERR]dbi_w8_pcie %X\n", RTW89_PCIE_L1_CTRL); 1673 return ret; 1674 } 1675 } 1676 1677 return ret; 1678 } 1679 1680 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) 1681 { 1682 int ret; 1683 1684 if (rtwdev->chip->chip_id != RTL8852A) 1685 return 0; 1686 1687 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 1688 PCIE_PHY_GEN1); 1689 if (ret) 1690 return ret; 1691 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 1692 PCIE_PHY_GEN2); 1693 if (ret) 1694 return ret; 1695 1696 return 0; 1697 } 1698 1699 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) 1700 { 1701 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE); 1702 } 1703 1704 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) 1705 { 1706 if (rtwdev->chip->chip_id == RTL8852C) 1707 return; 1708 1709 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); 1710 } 1711 1712 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev) 1713 { 1714 int ret; 1715 1716 if (rtwdev->chip->chip_id == RTL8852C) 1717 return 0; 1718 1719 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 1720 PCIE_PHY_GEN1); 1721 if (ret) 1722 return ret; 1723 1724 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 1725 PCIE_PHY_GEN2); 1726 if (ret) 1727 return ret; 1728 1729 return 0; 1730 } 1731 1732 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) 1733 { 1734 if (rtwdev->chip->chip_id != RTL8852A) 1735 return; 1736 1737 rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN); 1738 } 1739 1740 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) 1741 { 1742 if (rtwdev->chip->chip_id != RTL8852A) 1743 return; 1744 1745 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 1746 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 1747 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 1748 B_AX_PCIE_DIS_WLSUS_AFT_PDN); 1749 } 1750 1751 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) 1752 { 1753 if (rtwdev->chip->chip_id == RTL8852C) 1754 return; 1755 1756 rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL, 1757 B_AX_SIC_EN_FORCE_CLKREQ); 1758 } 1759 1760 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) 1761 { 1762 if (rtwdev->chip->chip_id == RTL8852C) 1763 return; 1764 1765 rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL, 1766 B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG); 1767 1768 if (rtwdev->chip->chip_id == RTL8852A) 1769 rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL, 1770 B_AX_EN_CHKDSC_NO_RX_STUCK); 1771 } 1772 1773 static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev) 1774 { 1775 u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX | 1776 B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX | 1777 B_AX_CLR_CH12_IDX; 1778 1779 if (rtwdev->chip->chip_id == RTL8852A) 1780 val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX | 1781 B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX; 1782 /* clear DMA indexes */ 1783 rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val); 1784 if (rtwdev->chip->chip_id == RTL8852A) 1785 rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR2, 1786 B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX); 1787 rtw89_write32_set(rtwdev, R_AX_RXBD_RWPTR_CLR, 1788 B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); 1789 } 1790 1791 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) 1792 { 1793 if (rtwdev->chip->chip_id == RTL8852A) { 1794 /* ltr sw trigger */ 1795 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE); 1796 } 1797 rtw89_pci_ctrl_dma_all(rtwdev, false); 1798 rtw89_pci_clr_idx_all(rtwdev); 1799 1800 return 0; 1801 } 1802 1803 static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev) 1804 { 1805 u32 dma_busy; 1806 u32 check; 1807 u32 lbc; 1808 int ret; 1809 1810 rtw89_pci_rxdma_prefth(rtwdev); 1811 rtw89_pci_l1off_pwroff(rtwdev); 1812 rtw89_pci_deglitch_setting(rtwdev); 1813 ret = rtw89_pci_l2_rxen_lat(rtwdev); 1814 if (ret) { 1815 rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret); 1816 return ret; 1817 } 1818 1819 rtw89_pci_aphy_pwrcut(rtwdev); 1820 rtw89_pci_hci_ldo(rtwdev); 1821 1822 ret = rtw89_pci_auto_refclk_cal(rtwdev, false); 1823 if (ret) { 1824 rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret); 1825 return ret; 1826 } 1827 1828 rtw89_pci_set_sic(rtwdev); 1829 rtw89_pci_set_dbg(rtwdev); 1830 1831 if (rtwdev->chip->chip_id == RTL8852A) 1832 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 1833 B_AX_PCIE_AUXCLK_GATE); 1834 1835 lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); 1836 lbc = u32_replace_bits(lbc, RTW89_MAC_LBC_TMR_128US, B_AX_LBC_TIMER); 1837 lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; 1838 rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); 1839 1840 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 1841 B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); 1842 rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_STOP_WPDMA); 1843 1844 /* stop DMA activities */ 1845 rtw89_pci_ctrl_dma_all(rtwdev, false); 1846 1847 /* check PCI at idle state */ 1848 check = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; 1849 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 1850 100, 3000, false, rtwdev, R_AX_PCIE_DMA_BUSY1); 1851 if (ret) { 1852 rtw89_err(rtwdev, "failed to poll io busy\n"); 1853 return ret; 1854 } 1855 1856 rtw89_pci_clr_idx_all(rtwdev); 1857 1858 /* configure TX/RX op modes */ 1859 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE | 1860 B_AX_RX_TRUNC_MODE); 1861 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RXBD_MODE); 1862 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, 7); 1863 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, 3); 1864 /* multi-tag mode */ 1865 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_LATENCY_CONTROL); 1866 rtw89_write32_mask(rtwdev, R_AX_PCIE_EXP_CTRL, B_AX_MAX_TAG_NUM, 1867 RTW89_MAC_TAG_NUM_8); 1868 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, 1869 RTW89_MAC_WD_DMA_INTVL_256NS); 1870 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, 1871 RTW89_MAC_WD_DMA_INTVL_256NS); 1872 1873 /* fill TRX BD indexes */ 1874 rtw89_pci_ops_reset(rtwdev); 1875 1876 ret = rtw89_pci_rst_bdram_pcie(rtwdev); 1877 if (ret) { 1878 rtw89_warn(rtwdev, "reset bdram busy\n"); 1879 return ret; 1880 } 1881 1882 /* enable FW CMD queue to download firmware */ 1883 rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_ALL); 1884 rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_STOP_CH12); 1885 rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP2, B_AX_TX_STOP2_ALL); 1886 1887 /* start DMA activities */ 1888 rtw89_pci_ctrl_dma_all(rtwdev, true); 1889 1890 return 0; 1891 } 1892 1893 static int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev) 1894 { 1895 u32 val; 1896 1897 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 1898 if (rtw89_pci_ltr_is_err_reg_val(val)) 1899 return -EINVAL; 1900 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 1901 if (rtw89_pci_ltr_is_err_reg_val(val)) 1902 return -EINVAL; 1903 val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY); 1904 if (rtw89_pci_ltr_is_err_reg_val(val)) 1905 return -EINVAL; 1906 val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY); 1907 if (rtw89_pci_ltr_is_err_reg_val(val)) 1908 return -EINVAL; 1909 1910 rtw89_write32_clr(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN); 1911 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_EN); 1912 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK, 1913 PCI_LTR_SPC_500US); 1914 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 1915 PCI_LTR_IDLE_TIMER_800US); 1916 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 1917 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 1918 rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x88e088e0); 1919 rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b); 1920 1921 return 0; 1922 } 1923 1924 static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev) 1925 { 1926 int ret; 1927 1928 ret = rtw89_pci_ltr_set(rtwdev); 1929 if (ret) { 1930 rtw89_err(rtwdev, "pci ltr set fail\n"); 1931 return ret; 1932 } 1933 if (rtwdev->chip->chip_id == RTL8852A) { 1934 /* ltr sw trigger */ 1935 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); 1936 } 1937 /* ADDR info 8-byte mode */ 1938 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 1939 B_AX_HOST_ADDR_INFO_8B_SEL); 1940 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 1941 1942 /* enable DMA for all queues */ 1943 rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_ALL); 1944 rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP2, B_AX_TX_STOP2_ALL); 1945 1946 /* Release PCI IO */ 1947 rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, 1948 B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO); 1949 1950 return 0; 1951 } 1952 1953 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev, 1954 struct pci_dev *pdev) 1955 { 1956 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1957 int ret; 1958 1959 ret = pci_enable_device(pdev); 1960 if (ret) { 1961 rtw89_err(rtwdev, "failed to enable pci device\n"); 1962 return ret; 1963 } 1964 1965 pci_set_master(pdev); 1966 pci_set_drvdata(pdev, rtwdev->hw); 1967 1968 rtwpci->pdev = pdev; 1969 1970 return 0; 1971 } 1972 1973 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, 1974 struct pci_dev *pdev) 1975 { 1976 pci_clear_master(pdev); 1977 pci_disable_device(pdev); 1978 } 1979 1980 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, 1981 struct pci_dev *pdev) 1982 { 1983 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1984 unsigned long resource_len; 1985 u8 bar_id = 2; 1986 int ret; 1987 1988 ret = pci_request_regions(pdev, KBUILD_MODNAME); 1989 if (ret) { 1990 rtw89_err(rtwdev, "failed to request pci regions\n"); 1991 goto err; 1992 } 1993 1994 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 1995 if (ret) { 1996 rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n"); 1997 goto err_release_regions; 1998 } 1999 2000 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 2001 if (ret) { 2002 rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n"); 2003 goto err_release_regions; 2004 } 2005 2006 resource_len = pci_resource_len(pdev, bar_id); 2007 rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len); 2008 if (!rtwpci->mmap) { 2009 rtw89_err(rtwdev, "failed to map pci io\n"); 2010 ret = -EIO; 2011 goto err_release_regions; 2012 } 2013 2014 return 0; 2015 2016 err_release_regions: 2017 pci_release_regions(pdev); 2018 err: 2019 return ret; 2020 } 2021 2022 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev, 2023 struct pci_dev *pdev) 2024 { 2025 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2026 2027 if (rtwpci->mmap) { 2028 pci_iounmap(pdev, rtwpci->mmap); 2029 pci_release_regions(pdev); 2030 } 2031 } 2032 2033 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev, 2034 struct pci_dev *pdev, 2035 struct rtw89_pci_tx_ring *tx_ring) 2036 { 2037 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 2038 u8 *head = wd_ring->head; 2039 dma_addr_t dma = wd_ring->dma; 2040 u32 page_size = wd_ring->page_size; 2041 u32 page_num = wd_ring->page_num; 2042 u32 ring_sz = page_size * page_num; 2043 2044 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2045 wd_ring->head = NULL; 2046 } 2047 2048 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev, 2049 struct pci_dev *pdev, 2050 struct rtw89_pci_tx_ring *tx_ring) 2051 { 2052 int ring_sz; 2053 u8 *head; 2054 dma_addr_t dma; 2055 2056 head = tx_ring->bd_ring.head; 2057 dma = tx_ring->bd_ring.dma; 2058 ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len; 2059 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2060 2061 tx_ring->bd_ring.head = NULL; 2062 } 2063 2064 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev, 2065 struct pci_dev *pdev) 2066 { 2067 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2068 struct rtw89_pci_tx_ring *tx_ring; 2069 int i; 2070 2071 for (i = 0; i < RTW89_TXCH_NUM; i++) { 2072 tx_ring = &rtwpci->tx_rings[i]; 2073 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 2074 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 2075 } 2076 } 2077 2078 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev, 2079 struct pci_dev *pdev, 2080 struct rtw89_pci_rx_ring *rx_ring) 2081 { 2082 struct rtw89_pci_rx_info *rx_info; 2083 struct sk_buff *skb; 2084 dma_addr_t dma; 2085 u32 buf_sz; 2086 u8 *head; 2087 int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len; 2088 int i; 2089 2090 buf_sz = rx_ring->buf_sz; 2091 for (i = 0; i < rx_ring->bd_ring.len; i++) { 2092 skb = rx_ring->buf[i]; 2093 if (!skb) 2094 continue; 2095 2096 rx_info = RTW89_PCI_RX_SKB_CB(skb); 2097 dma = rx_info->dma; 2098 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 2099 dev_kfree_skb(skb); 2100 rx_ring->buf[i] = NULL; 2101 } 2102 2103 head = rx_ring->bd_ring.head; 2104 dma = rx_ring->bd_ring.dma; 2105 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2106 2107 rx_ring->bd_ring.head = NULL; 2108 } 2109 2110 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev, 2111 struct pci_dev *pdev) 2112 { 2113 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2114 struct rtw89_pci_rx_ring *rx_ring; 2115 int i; 2116 2117 for (i = 0; i < RTW89_RXCH_NUM; i++) { 2118 rx_ring = &rtwpci->rx_rings[i]; 2119 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 2120 } 2121 } 2122 2123 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev, 2124 struct pci_dev *pdev) 2125 { 2126 rtw89_pci_free_rx_rings(rtwdev, pdev); 2127 rtw89_pci_free_tx_rings(rtwdev, pdev); 2128 } 2129 2130 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev, 2131 struct rtw89_pci_rx_ring *rx_ring, 2132 struct sk_buff *skb, int buf_sz, u32 idx) 2133 { 2134 struct rtw89_pci_rx_info *rx_info; 2135 struct rtw89_pci_rx_bd_32 *rx_bd; 2136 dma_addr_t dma; 2137 2138 if (!skb) 2139 return -EINVAL; 2140 2141 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); 2142 if (dma_mapping_error(&pdev->dev, dma)) 2143 return -EBUSY; 2144 2145 rx_info = RTW89_PCI_RX_SKB_CB(skb); 2146 rx_bd = RTW89_PCI_RX_BD(rx_ring, idx); 2147 2148 memset(rx_bd, 0, sizeof(*rx_bd)); 2149 rx_bd->buf_size = cpu_to_le16(buf_sz); 2150 rx_bd->dma = cpu_to_le32(dma); 2151 rx_info->dma = dma; 2152 2153 return 0; 2154 } 2155 2156 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev, 2157 struct pci_dev *pdev, 2158 struct rtw89_pci_tx_ring *tx_ring, 2159 enum rtw89_tx_channel txch) 2160 { 2161 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 2162 struct rtw89_pci_tx_wd *txwd; 2163 dma_addr_t dma; 2164 dma_addr_t cur_paddr; 2165 u8 *head; 2166 u8 *cur_vaddr; 2167 u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE; 2168 u32 page_num = RTW89_PCI_TXWD_NUM_MAX; 2169 u32 ring_sz = page_size * page_num; 2170 u32 page_offset; 2171 int i; 2172 2173 /* FWCMD queue doesn't use txwd as pages */ 2174 if (txch == RTW89_TXCH_CH12) 2175 return 0; 2176 2177 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2178 if (!head) 2179 return -ENOMEM; 2180 2181 INIT_LIST_HEAD(&wd_ring->free_pages); 2182 wd_ring->head = head; 2183 wd_ring->dma = dma; 2184 wd_ring->page_size = page_size; 2185 wd_ring->page_num = page_num; 2186 2187 page_offset = 0; 2188 for (i = 0; i < page_num; i++) { 2189 txwd = &wd_ring->pages[i]; 2190 cur_paddr = dma + page_offset; 2191 cur_vaddr = head + page_offset; 2192 2193 skb_queue_head_init(&txwd->queue); 2194 INIT_LIST_HEAD(&txwd->list); 2195 txwd->paddr = cur_paddr; 2196 txwd->vaddr = cur_vaddr; 2197 txwd->len = page_size; 2198 txwd->seq = i; 2199 rtw89_pci_enqueue_txwd(tx_ring, txwd); 2200 2201 page_offset += page_size; 2202 } 2203 2204 return 0; 2205 } 2206 2207 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev, 2208 struct pci_dev *pdev, 2209 struct rtw89_pci_tx_ring *tx_ring, 2210 u32 desc_size, u32 len, 2211 enum rtw89_tx_channel txch) 2212 { 2213 int ring_sz = desc_size * len; 2214 u8 *head; 2215 dma_addr_t dma; 2216 u32 addr_num; 2217 u32 addr_idx; 2218 u32 addr_bdram; 2219 u32 addr_desa_l; 2220 u32 addr_desa_h; 2221 int ret; 2222 2223 ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch); 2224 if (ret) { 2225 rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch); 2226 goto err; 2227 } 2228 2229 ret = rtw89_pci_get_txch_addrs(txch, &addr_num, &addr_idx, &addr_bdram, 2230 &addr_desa_l, &addr_desa_h); 2231 if (ret) { 2232 rtw89_err(rtwdev, "failed to get address of txch %d", txch); 2233 goto err_free_wd_ring; 2234 } 2235 2236 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2237 if (!head) { 2238 ret = -ENOMEM; 2239 goto err_free_wd_ring; 2240 } 2241 2242 INIT_LIST_HEAD(&tx_ring->busy_pages); 2243 tx_ring->bd_ring.head = head; 2244 tx_ring->bd_ring.dma = dma; 2245 tx_ring->bd_ring.len = len; 2246 tx_ring->bd_ring.desc_size = desc_size; 2247 tx_ring->bd_ring.addr_num = addr_num; 2248 tx_ring->bd_ring.addr_idx = addr_idx; 2249 tx_ring->bd_ring.addr_bdram = addr_bdram; 2250 tx_ring->bd_ring.addr_desa_l = addr_desa_l; 2251 tx_ring->bd_ring.addr_desa_h = addr_desa_h; 2252 tx_ring->bd_ring.wp = 0; 2253 tx_ring->bd_ring.rp = 0; 2254 tx_ring->txch = txch; 2255 2256 return 0; 2257 2258 err_free_wd_ring: 2259 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 2260 err: 2261 return ret; 2262 } 2263 2264 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev, 2265 struct pci_dev *pdev) 2266 { 2267 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2268 struct rtw89_pci_tx_ring *tx_ring; 2269 u32 desc_size; 2270 u32 len; 2271 u32 i, tx_allocated; 2272 int ret; 2273 2274 for (i = 0; i < RTW89_TXCH_NUM; i++) { 2275 tx_ring = &rtwpci->tx_rings[i]; 2276 desc_size = sizeof(struct rtw89_pci_tx_bd_32); 2277 len = RTW89_PCI_TXBD_NUM_MAX; 2278 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring, 2279 desc_size, len, i); 2280 if (ret) { 2281 rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i); 2282 goto err_free; 2283 } 2284 } 2285 2286 return 0; 2287 2288 err_free: 2289 tx_allocated = i; 2290 for (i = 0; i < tx_allocated; i++) { 2291 tx_ring = &rtwpci->tx_rings[i]; 2292 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 2293 } 2294 2295 return ret; 2296 } 2297 2298 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev, 2299 struct pci_dev *pdev, 2300 struct rtw89_pci_rx_ring *rx_ring, 2301 u32 desc_size, u32 len, u32 rxch) 2302 { 2303 struct sk_buff *skb; 2304 u8 *head; 2305 dma_addr_t dma; 2306 u32 addr_num; 2307 u32 addr_idx; 2308 u32 addr_desa_l; 2309 u32 addr_desa_h; 2310 int ring_sz = desc_size * len; 2311 int buf_sz = RTW89_PCI_RX_BUF_SIZE; 2312 int i, allocated; 2313 int ret; 2314 2315 ret = rtw89_pci_get_rxch_addrs(rxch, &addr_num, &addr_idx, 2316 &addr_desa_l, &addr_desa_h); 2317 if (ret) { 2318 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch); 2319 return ret; 2320 } 2321 2322 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2323 if (!head) { 2324 ret = -ENOMEM; 2325 goto err; 2326 } 2327 2328 rx_ring->bd_ring.head = head; 2329 rx_ring->bd_ring.dma = dma; 2330 rx_ring->bd_ring.len = len; 2331 rx_ring->bd_ring.desc_size = desc_size; 2332 rx_ring->bd_ring.addr_num = addr_num; 2333 rx_ring->bd_ring.addr_idx = addr_idx; 2334 rx_ring->bd_ring.addr_desa_l = addr_desa_l; 2335 rx_ring->bd_ring.addr_desa_h = addr_desa_h; 2336 rx_ring->bd_ring.wp = 0; 2337 rx_ring->bd_ring.rp = 0; 2338 rx_ring->buf_sz = buf_sz; 2339 rx_ring->diliver_skb = NULL; 2340 rx_ring->diliver_desc.ready = false; 2341 2342 for (i = 0; i < len; i++) { 2343 skb = dev_alloc_skb(buf_sz); 2344 if (!skb) { 2345 ret = -ENOMEM; 2346 goto err_free; 2347 } 2348 2349 memset(skb->data, 0, buf_sz); 2350 rx_ring->buf[i] = skb; 2351 ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb, 2352 buf_sz, i); 2353 if (ret) { 2354 rtw89_err(rtwdev, "failed to init rx buf %d\n", i); 2355 dev_kfree_skb_any(skb); 2356 rx_ring->buf[i] = NULL; 2357 goto err_free; 2358 } 2359 } 2360 2361 return 0; 2362 2363 err_free: 2364 allocated = i; 2365 for (i = 0; i < allocated; i++) { 2366 skb = rx_ring->buf[i]; 2367 if (!skb) 2368 continue; 2369 dma = *((dma_addr_t *)skb->cb); 2370 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 2371 dev_kfree_skb(skb); 2372 rx_ring->buf[i] = NULL; 2373 } 2374 2375 head = rx_ring->bd_ring.head; 2376 dma = rx_ring->bd_ring.dma; 2377 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2378 2379 rx_ring->bd_ring.head = NULL; 2380 err: 2381 return ret; 2382 } 2383 2384 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev, 2385 struct pci_dev *pdev) 2386 { 2387 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2388 struct rtw89_pci_rx_ring *rx_ring; 2389 u32 desc_size; 2390 u32 len; 2391 int i, rx_allocated; 2392 int ret; 2393 2394 for (i = 0; i < RTW89_RXCH_NUM; i++) { 2395 rx_ring = &rtwpci->rx_rings[i]; 2396 desc_size = sizeof(struct rtw89_pci_rx_bd_32); 2397 len = RTW89_PCI_RXBD_NUM_MAX; 2398 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring, 2399 desc_size, len, i); 2400 if (ret) { 2401 rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i); 2402 goto err_free; 2403 } 2404 } 2405 2406 return 0; 2407 2408 err_free: 2409 rx_allocated = i; 2410 for (i = 0; i < rx_allocated; i++) { 2411 rx_ring = &rtwpci->rx_rings[i]; 2412 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 2413 } 2414 2415 return ret; 2416 } 2417 2418 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev, 2419 struct pci_dev *pdev) 2420 { 2421 int ret; 2422 2423 ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev); 2424 if (ret) { 2425 rtw89_err(rtwdev, "failed to alloc dma tx rings\n"); 2426 goto err; 2427 } 2428 2429 ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev); 2430 if (ret) { 2431 rtw89_err(rtwdev, "failed to alloc dma rx rings\n"); 2432 goto err_free_tx_rings; 2433 } 2434 2435 return 0; 2436 2437 err_free_tx_rings: 2438 rtw89_pci_free_tx_rings(rtwdev, pdev); 2439 err: 2440 return ret; 2441 } 2442 2443 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev, 2444 struct rtw89_pci *rtwpci) 2445 { 2446 skb_queue_head_init(&rtwpci->h2c_queue); 2447 skb_queue_head_init(&rtwpci->h2c_release_queue); 2448 } 2449 2450 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev, 2451 struct pci_dev *pdev) 2452 { 2453 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2454 int ret; 2455 2456 ret = rtw89_pci_setup_mapping(rtwdev, pdev); 2457 if (ret) { 2458 rtw89_err(rtwdev, "failed to setup pci mapping\n"); 2459 goto err; 2460 } 2461 2462 ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev); 2463 if (ret) { 2464 rtw89_err(rtwdev, "failed to alloc pci trx rings\n"); 2465 goto err_pci_unmap; 2466 } 2467 2468 rtw89_pci_h2c_init(rtwdev, rtwpci); 2469 2470 spin_lock_init(&rtwpci->irq_lock); 2471 spin_lock_init(&rtwpci->trx_lock); 2472 2473 return 0; 2474 2475 err_pci_unmap: 2476 rtw89_pci_clear_mapping(rtwdev, pdev); 2477 err: 2478 return ret; 2479 } 2480 2481 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev, 2482 struct pci_dev *pdev) 2483 { 2484 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2485 2486 rtw89_pci_free_trx_rings(rtwdev, pdev); 2487 rtw89_pci_clear_mapping(rtwdev, pdev); 2488 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 2489 skb_queue_len(&rtwpci->h2c_queue), true); 2490 } 2491 2492 static void rtw89_pci_default_intr_mask(struct rtw89_dev *rtwdev) 2493 { 2494 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2495 2496 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0; 2497 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 2498 B_AX_RXDMA_INT_EN | 2499 B_AX_RXP1DMA_INT_EN | 2500 B_AX_RPQDMA_INT_EN | 2501 B_AX_RXDMA_STUCK_INT_EN | 2502 B_AX_RDU_INT_EN | 2503 B_AX_RPQBD_FULL_INT_EN | 2504 B_AX_HS0ISR_IND_INT_EN; 2505 2506 rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN; 2507 } 2508 2509 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, 2510 struct pci_dev *pdev) 2511 { 2512 unsigned long flags = 0; 2513 int ret; 2514 2515 flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI; 2516 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); 2517 if (ret < 0) { 2518 rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret); 2519 goto err; 2520 } 2521 2522 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, 2523 rtw89_pci_interrupt_handler, 2524 rtw89_pci_interrupt_threadfn, 2525 IRQF_SHARED, KBUILD_MODNAME, rtwdev); 2526 if (ret) { 2527 rtw89_err(rtwdev, "failed to request threaded irq\n"); 2528 goto err_free_vector; 2529 } 2530 2531 rtw89_pci_default_intr_mask(rtwdev); 2532 2533 return 0; 2534 2535 err_free_vector: 2536 pci_free_irq_vectors(pdev); 2537 err: 2538 return ret; 2539 } 2540 2541 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev, 2542 struct pci_dev *pdev) 2543 { 2544 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); 2545 pci_free_irq_vectors(pdev); 2546 } 2547 2548 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable) 2549 { 2550 int ret; 2551 2552 if (rtw89_pci_disable_clkreq) 2553 return; 2554 2555 ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_CLK_CTRL, 2556 PCIE_CLKDLY_HW_30US); 2557 if (ret) 2558 rtw89_err(rtwdev, "failed to set CLKREQ Delay\n"); 2559 2560 if (enable) 2561 ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_L1_CTRL, 2562 RTW89_PCIE_BIT_CLK); 2563 else 2564 ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_L1_CTRL, 2565 RTW89_PCIE_BIT_CLK); 2566 if (ret) 2567 rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d", 2568 enable ? "set" : "unset", ret); 2569 } 2570 2571 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable) 2572 { 2573 u8 value = 0; 2574 int ret; 2575 2576 if (rtw89_pci_disable_aspm_l1) 2577 return; 2578 2579 ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_ASPM_CTRL, &value); 2580 if (ret) 2581 rtw89_err(rtwdev, "failed to read ASPM Delay\n"); 2582 2583 value &= ~(RTW89_L1DLY_MASK | RTW89_L0DLY_MASK); 2584 value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) | 2585 FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US); 2586 2587 ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_ASPM_CTRL, value); 2588 if (ret) 2589 rtw89_err(rtwdev, "failed to read ASPM Delay\n"); 2590 2591 if (enable) 2592 ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_L1_CTRL, 2593 RTW89_PCIE_BIT_L1); 2594 else 2595 ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_L1_CTRL, 2596 RTW89_PCIE_BIT_L1); 2597 if (ret) 2598 rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d", 2599 enable ? "set" : "unset", ret); 2600 } 2601 2602 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev) 2603 { 2604 struct rtw89_traffic_stats *stats = &rtwdev->stats; 2605 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; 2606 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; 2607 u32 val = 0; 2608 2609 if (!rtwdev->scanning && 2610 (tx_tfc_lv >= RTW89_TFC_HIGH || rx_tfc_lv >= RTW89_TFC_HIGH)) 2611 val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL | 2612 FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) | 2613 FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) | 2614 FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64); 2615 2616 rtw89_write32(rtwdev, R_AX_INT_MIT_RX, val); 2617 } 2618 2619 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev) 2620 { 2621 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2622 struct pci_dev *pdev = rtwpci->pdev; 2623 u16 link_ctrl; 2624 int ret; 2625 2626 /* Though there is standard PCIE configuration space to set the 2627 * link control register, but by Realtek's design, driver should 2628 * check if host supports CLKREQ/ASPM to enable the HW module. 2629 * 2630 * These functions are implemented by two HW modules associated, 2631 * one is responsible to access PCIE configuration space to 2632 * follow the host settings, and another is in charge of doing 2633 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes 2634 * the host does not support it, and due to some reasons or wrong 2635 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device 2636 * loss if HW misbehaves on the link. 2637 * 2638 * Hence it's designed that driver should first check the PCIE 2639 * configuration space is sync'ed and enabled, then driver can turn 2640 * on the other module that is actually working on the mechanism. 2641 */ 2642 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); 2643 if (ret) { 2644 rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); 2645 return; 2646 } 2647 2648 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) 2649 rtw89_pci_clkreq_set(rtwdev, true); 2650 2651 if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) 2652 rtw89_pci_aspm_set(rtwdev, true); 2653 } 2654 2655 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable) 2656 { 2657 int ret; 2658 2659 if (enable) 2660 ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_TIMER_CTRL, 2661 RTW89_PCIE_BIT_L1SUB); 2662 else 2663 ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_TIMER_CTRL, 2664 RTW89_PCIE_BIT_L1SUB); 2665 if (ret) 2666 rtw89_err(rtwdev, "failed to %s L1SS, ret=%d", 2667 enable ? "set" : "unset", ret); 2668 } 2669 2670 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev) 2671 { 2672 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2673 struct pci_dev *pdev = rtwpci->pdev; 2674 u32 l1ss_cap_ptr, l1ss_ctrl; 2675 2676 if (rtw89_pci_disable_l1ss) 2677 return; 2678 2679 l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); 2680 if (!l1ss_cap_ptr) 2681 return; 2682 2683 pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl); 2684 2685 if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK) 2686 rtw89_pci_l1ss_set(rtwdev, true); 2687 } 2688 2689 static void rtw89_pci_ctrl_dma_all_pcie(struct rtw89_dev *rtwdev, u8 en) 2690 { 2691 u32 val32; 2692 2693 if (en == MAC_AX_FUNC_EN) { 2694 val32 = B_AX_STOP_PCIEIO; 2695 rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, val32); 2696 2697 val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN; 2698 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2699 } else { 2700 val32 = B_AX_STOP_PCIEIO; 2701 rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, val32); 2702 2703 val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN; 2704 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2705 } 2706 } 2707 2708 static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev) 2709 { 2710 int ret = 0; 2711 u32 sts; 2712 u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; 2713 2714 ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0, 2715 10, 1000, false, rtwdev, 2716 R_AX_PCIE_DMA_BUSY1); 2717 if (ret) { 2718 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n", 2719 rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1)); 2720 return -EINVAL; 2721 } 2722 return ret; 2723 } 2724 2725 static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev) 2726 { 2727 u32 val, dma_rst = 0; 2728 int ret; 2729 2730 rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_DIS); 2731 ret = rtw89_pci_poll_io_idle(rtwdev); 2732 if (ret) { 2733 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 2734 rtw89_debug(rtwdev, RTW89_DBG_HCI, 2735 "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n", 2736 R_AX_DBG_ERR_FLAG, val); 2737 if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0) 2738 dma_rst |= B_AX_HCI_TXDMA_EN; 2739 if (val & B_AX_RX_STUCK) 2740 dma_rst |= B_AX_HCI_RXDMA_EN; 2741 val = rtw89_read32(rtwdev, R_AX_HCI_FUNC_EN); 2742 rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val & ~dma_rst); 2743 rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val | dma_rst); 2744 ret = rtw89_pci_poll_io_idle(rtwdev); 2745 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 2746 rtw89_debug(rtwdev, RTW89_DBG_HCI, 2747 "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n", 2748 R_AX_DBG_ERR_FLAG, val); 2749 } 2750 2751 return ret; 2752 } 2753 2754 static void rtw89_pci_ctrl_hci_dma_en(struct rtw89_dev *rtwdev, u8 en) 2755 { 2756 u32 val32; 2757 2758 if (en == MAC_AX_FUNC_EN) { 2759 val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN; 2760 rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN, val32); 2761 } else { 2762 val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN; 2763 rtw89_write32_clr(rtwdev, R_AX_HCI_FUNC_EN, val32); 2764 } 2765 } 2766 2767 static int rtw89_pci_rst_bdram(struct rtw89_dev *rtwdev) 2768 { 2769 int ret = 0; 2770 u32 val32, sts; 2771 2772 val32 = B_AX_RST_BDRAM; 2773 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2774 2775 ret = read_poll_timeout_atomic(rtw89_read32, sts, 2776 (sts & B_AX_RST_BDRAM) == 0x0, 1, 100, 2777 true, rtwdev, R_AX_PCIE_INIT_CFG1); 2778 return ret; 2779 } 2780 2781 static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev) 2782 { 2783 u32 ret; 2784 2785 rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_DIS); 2786 rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_EN); 2787 rtw89_pci_clr_idx_all(rtwdev); 2788 2789 ret = rtw89_pci_rst_bdram(rtwdev); 2790 if (ret) 2791 return ret; 2792 2793 rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_EN); 2794 return ret; 2795 } 2796 2797 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev, 2798 enum rtw89_lv1_rcvy_step step) 2799 { 2800 int ret; 2801 2802 switch (step) { 2803 case RTW89_LV1_RCVY_STEP_1: 2804 ret = rtw89_pci_lv1rst_stop_dma(rtwdev); 2805 if (ret) 2806 rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n"); 2807 2808 break; 2809 2810 case RTW89_LV1_RCVY_STEP_2: 2811 ret = rtw89_pci_lv1rst_start_dma(rtwdev); 2812 if (ret) 2813 rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n"); 2814 break; 2815 2816 default: 2817 return -EINVAL; 2818 } 2819 2820 return ret; 2821 } 2822 2823 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev) 2824 { 2825 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n", 2826 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); 2827 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 2828 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG)); 2829 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 2830 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG)); 2831 } 2832 2833 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget) 2834 { 2835 struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi); 2836 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2837 unsigned long flags; 2838 int work_done; 2839 2840 rtwdev->napi_budget_countdown = budget; 2841 2842 rtw89_pci_clear_isr0(rtwdev, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT); 2843 work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 2844 if (work_done == budget) 2845 return budget; 2846 2847 rtw89_pci_clear_isr0(rtwdev, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | B_AX_RDU_INT); 2848 work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 2849 if (work_done < budget && napi_complete_done(napi, work_done)) { 2850 spin_lock_irqsave(&rtwpci->irq_lock, flags); 2851 if (likely(rtwpci->running)) 2852 rtw89_pci_enable_intr(rtwdev, rtwpci); 2853 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 2854 } 2855 2856 return work_done; 2857 } 2858 2859 static int __maybe_unused rtw89_pci_suspend(struct device *dev) 2860 { 2861 struct ieee80211_hw *hw = dev_get_drvdata(dev); 2862 struct rtw89_dev *rtwdev = hw->priv; 2863 2864 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2865 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2866 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 2867 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 2868 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 2869 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 2870 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 2871 2872 return 0; 2873 } 2874 2875 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev) 2876 { 2877 if (rtwdev->chip->chip_id == RTL8852C) 2878 return; 2879 2880 /* Hardware need write the reg twice to ensure the setting work */ 2881 rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_RST_MSTATE, 2882 RTW89_PCIE_BIT_CFG_RST_MSTATE); 2883 rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_RST_MSTATE, 2884 RTW89_PCIE_BIT_CFG_RST_MSTATE); 2885 } 2886 2887 static int __maybe_unused rtw89_pci_resume(struct device *dev) 2888 { 2889 struct ieee80211_hw *hw = dev_get_drvdata(dev); 2890 struct rtw89_dev *rtwdev = hw->priv; 2891 2892 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 2893 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2894 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 2895 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 2896 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 2897 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 2898 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 2899 rtw89_pci_l2_hci_ldo(rtwdev); 2900 rtw89_pci_link_cfg(rtwdev); 2901 rtw89_pci_l1ss_cfg(rtwdev); 2902 2903 return 0; 2904 } 2905 2906 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume); 2907 EXPORT_SYMBOL(rtw89_pm_ops); 2908 2909 static const struct rtw89_hci_ops rtw89_pci_ops = { 2910 .tx_write = rtw89_pci_ops_tx_write, 2911 .tx_kick_off = rtw89_pci_ops_tx_kick_off, 2912 .flush_queues = rtw89_pci_ops_flush_queues, 2913 .reset = rtw89_pci_ops_reset, 2914 .start = rtw89_pci_ops_start, 2915 .stop = rtw89_pci_ops_stop, 2916 .recalc_int_mit = rtw89_pci_recalc_int_mit, 2917 2918 .read8 = rtw89_pci_ops_read8, 2919 .read16 = rtw89_pci_ops_read16, 2920 .read32 = rtw89_pci_ops_read32, 2921 .write8 = rtw89_pci_ops_write8, 2922 .write16 = rtw89_pci_ops_write16, 2923 .write32 = rtw89_pci_ops_write32, 2924 2925 .mac_pre_init = rtw89_pci_ops_mac_pre_init, 2926 .mac_post_init = rtw89_pci_ops_mac_post_init, 2927 .deinit = rtw89_pci_ops_deinit, 2928 2929 .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource, 2930 .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery, 2931 .dump_err_status = rtw89_pci_ops_dump_err_status, 2932 .napi_poll = rtw89_pci_napi_poll, 2933 }; 2934 2935 static int rtw89_pci_probe(struct pci_dev *pdev, 2936 const struct pci_device_id *id) 2937 { 2938 struct ieee80211_hw *hw; 2939 struct rtw89_dev *rtwdev; 2940 int driver_data_size; 2941 int ret; 2942 2943 driver_data_size = sizeof(struct rtw89_dev) + sizeof(struct rtw89_pci); 2944 hw = ieee80211_alloc_hw(driver_data_size, &rtw89_ops); 2945 if (!hw) { 2946 dev_err(&pdev->dev, "failed to allocate hw\n"); 2947 return -ENOMEM; 2948 } 2949 2950 rtwdev = hw->priv; 2951 rtwdev->hw = hw; 2952 rtwdev->dev = &pdev->dev; 2953 rtwdev->hci.ops = &rtw89_pci_ops; 2954 rtwdev->hci.type = RTW89_HCI_TYPE_PCIE; 2955 rtwdev->hci.rpwm_addr = R_AX_PCIE_HRPWM; 2956 rtwdev->hci.cpwm_addr = R_AX_CPWM; 2957 2958 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); 2959 2960 switch (id->driver_data) { 2961 case RTL8852A: 2962 rtwdev->chip = &rtw8852a_chip_info; 2963 break; 2964 default: 2965 return -ENOENT; 2966 } 2967 2968 ret = rtw89_core_init(rtwdev); 2969 if (ret) { 2970 rtw89_err(rtwdev, "failed to initialise core\n"); 2971 goto err_release_hw; 2972 } 2973 2974 ret = rtw89_pci_claim_device(rtwdev, pdev); 2975 if (ret) { 2976 rtw89_err(rtwdev, "failed to claim pci device\n"); 2977 goto err_core_deinit; 2978 } 2979 2980 ret = rtw89_pci_setup_resource(rtwdev, pdev); 2981 if (ret) { 2982 rtw89_err(rtwdev, "failed to setup pci resource\n"); 2983 goto err_declaim_pci; 2984 } 2985 2986 ret = rtw89_chip_info_setup(rtwdev); 2987 if (ret) { 2988 rtw89_err(rtwdev, "failed to setup chip information\n"); 2989 goto err_clear_resource; 2990 } 2991 2992 rtw89_pci_link_cfg(rtwdev); 2993 rtw89_pci_l1ss_cfg(rtwdev); 2994 2995 ret = rtw89_core_register(rtwdev); 2996 if (ret) { 2997 rtw89_err(rtwdev, "failed to register core\n"); 2998 goto err_clear_resource; 2999 } 3000 3001 rtw89_core_napi_init(rtwdev); 3002 3003 ret = rtw89_pci_request_irq(rtwdev, pdev); 3004 if (ret) { 3005 rtw89_err(rtwdev, "failed to request pci irq\n"); 3006 goto err_unregister; 3007 } 3008 3009 return 0; 3010 3011 err_unregister: 3012 rtw89_core_napi_deinit(rtwdev); 3013 rtw89_core_unregister(rtwdev); 3014 err_clear_resource: 3015 rtw89_pci_clear_resource(rtwdev, pdev); 3016 err_declaim_pci: 3017 rtw89_pci_declaim_device(rtwdev, pdev); 3018 err_core_deinit: 3019 rtw89_core_deinit(rtwdev); 3020 err_release_hw: 3021 ieee80211_free_hw(hw); 3022 3023 return ret; 3024 } 3025 3026 static void rtw89_pci_remove(struct pci_dev *pdev) 3027 { 3028 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 3029 struct rtw89_dev *rtwdev; 3030 3031 rtwdev = hw->priv; 3032 3033 rtw89_pci_free_irq(rtwdev, pdev); 3034 rtw89_core_napi_deinit(rtwdev); 3035 rtw89_core_unregister(rtwdev); 3036 rtw89_pci_clear_resource(rtwdev, pdev); 3037 rtw89_pci_declaim_device(rtwdev, pdev); 3038 rtw89_core_deinit(rtwdev); 3039 ieee80211_free_hw(hw); 3040 } 3041 3042 static const struct pci_device_id rtw89_pci_id_table[] = { 3043 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8852), .driver_data = RTL8852A }, 3044 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xa85a), .driver_data = RTL8852A }, 3045 {}, 3046 }; 3047 MODULE_DEVICE_TABLE(pci, rtw89_pci_id_table); 3048 3049 static struct pci_driver rtw89_pci_driver = { 3050 .name = "rtw89_pci", 3051 .id_table = rtw89_pci_id_table, 3052 .probe = rtw89_pci_probe, 3053 .remove = rtw89_pci_remove, 3054 .driver.pm = &rtw89_pm_ops, 3055 }; 3056 module_pci_driver(rtw89_pci_driver); 3057 3058 MODULE_AUTHOR("Realtek Corporation"); 3059 MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver"); 3060 MODULE_LICENSE("Dual BSD/GPL"); 3061