1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2020 Realtek Corporation 3 */ 4 5 #include <linux/pci.h> 6 7 #include "mac.h" 8 #include "pci.h" 9 #include "reg.h" 10 #include "ser.h" 11 12 static bool rtw89_pci_disable_clkreq; 13 static bool rtw89_pci_disable_aspm_l1; 14 static bool rtw89_pci_disable_l1ss; 15 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644); 16 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644); 17 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644); 18 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support"); 19 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support"); 20 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support"); 21 22 static int rtw89_pci_rst_bdram_pcie(struct rtw89_dev *rtwdev) 23 { 24 u32 val; 25 int ret; 26 27 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, 28 rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | B_AX_RST_BDRAM); 29 30 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM), 31 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false, 32 rtwdev, R_AX_PCIE_INIT_CFG1); 33 34 if (ret) 35 return -EBUSY; 36 37 return 0; 38 } 39 40 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev, 41 struct rtw89_pci_dma_ring *bd_ring, 42 u32 cur_idx, bool tx) 43 { 44 u32 cnt, cur_rp, wp, rp, len; 45 46 rp = bd_ring->rp; 47 wp = bd_ring->wp; 48 len = bd_ring->len; 49 50 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 51 if (tx) 52 cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp); 53 else 54 cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp); 55 56 bd_ring->rp = cur_rp; 57 58 return cnt; 59 } 60 61 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev, 62 struct rtw89_pci_tx_ring *tx_ring) 63 { 64 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 65 u32 addr_idx = bd_ring->addr.idx; 66 u32 cnt, idx; 67 68 idx = rtw89_read32(rtwdev, addr_idx); 69 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true); 70 71 return cnt; 72 } 73 74 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev, 75 struct rtw89_pci *rtwpci, 76 u32 cnt, bool release_all) 77 { 78 struct rtw89_pci_tx_data *tx_data; 79 struct sk_buff *skb; 80 u32 qlen; 81 82 while (cnt--) { 83 skb = skb_dequeue(&rtwpci->h2c_queue); 84 if (!skb) { 85 rtw89_err(rtwdev, "failed to pre-release fwcmd\n"); 86 return; 87 } 88 skb_queue_tail(&rtwpci->h2c_release_queue, skb); 89 } 90 91 qlen = skb_queue_len(&rtwpci->h2c_release_queue); 92 if (!release_all) 93 qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0; 94 95 while (qlen--) { 96 skb = skb_dequeue(&rtwpci->h2c_release_queue); 97 if (!skb) { 98 rtw89_err(rtwdev, "failed to release fwcmd\n"); 99 return; 100 } 101 tx_data = RTW89_PCI_TX_SKB_CB(skb); 102 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 103 DMA_TO_DEVICE); 104 dev_kfree_skb_any(skb); 105 } 106 } 107 108 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev, 109 struct rtw89_pci *rtwpci) 110 { 111 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 112 u32 cnt; 113 114 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 115 if (!cnt) 116 return; 117 rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false); 118 } 119 120 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev, 121 struct rtw89_pci_rx_ring *rx_ring) 122 { 123 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 124 u32 addr_idx = bd_ring->addr.idx; 125 u32 cnt, idx; 126 127 idx = rtw89_read32(rtwdev, addr_idx); 128 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false); 129 130 return cnt; 131 } 132 133 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev, 134 struct sk_buff *skb) 135 { 136 struct rtw89_pci_rx_info *rx_info; 137 dma_addr_t dma; 138 139 rx_info = RTW89_PCI_RX_SKB_CB(skb); 140 dma = rx_info->dma; 141 dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 142 DMA_FROM_DEVICE); 143 } 144 145 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, 146 struct sk_buff *skb) 147 { 148 struct rtw89_pci_rx_info *rx_info; 149 dma_addr_t dma; 150 151 rx_info = RTW89_PCI_RX_SKB_CB(skb); 152 dma = rx_info->dma; 153 dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 154 DMA_FROM_DEVICE); 155 } 156 157 static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, 158 struct sk_buff *skb) 159 { 160 struct rtw89_pci_rxbd_info *rxbd_info; 161 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 162 163 rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; 164 rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS); 165 rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS); 166 rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE); 167 rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG); 168 169 return 0; 170 } 171 172 static void rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev *rtwdev, bool enable) 173 { 174 const struct rtw89_pci_info *info = rtwdev->pci_info; 175 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 176 const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2; 177 178 if (enable) { 179 rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask); 180 if (dma_stop2->addr) 181 rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask); 182 } else { 183 rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask); 184 if (dma_stop2->addr) 185 rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask); 186 } 187 } 188 189 static bool 190 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls, 191 struct sk_buff *new, 192 const struct sk_buff *skb, u32 offset, 193 const struct rtw89_pci_rx_info *rx_info, 194 const struct rtw89_rx_desc_info *desc_info) 195 { 196 u32 copy_len = rx_info->len - offset; 197 198 if (unlikely(skb_tailroom(new) < copy_len)) { 199 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 200 "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n", 201 rx_info->len, desc_info->pkt_size, offset, fs, ls); 202 rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ", 203 skb->data, rx_info->len); 204 /* length of a single segment skb is desc_info->pkt_size */ 205 if (fs && ls) { 206 copy_len = desc_info->pkt_size; 207 } else { 208 rtw89_info(rtwdev, "drop rx data due to invalid length\n"); 209 return false; 210 } 211 } 212 213 skb_put_data(new, skb->data + offset, copy_len); 214 215 return true; 216 } 217 218 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev, 219 struct rtw89_pci_rx_ring *rx_ring) 220 { 221 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 222 struct rtw89_pci_rx_info *rx_info; 223 struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc; 224 struct sk_buff *new = rx_ring->diliver_skb; 225 struct sk_buff *skb; 226 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 227 u32 offset; 228 u32 cnt = 1; 229 bool fs, ls; 230 int ret; 231 232 skb = rx_ring->buf[bd_ring->wp]; 233 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 234 235 ret = rtw89_pci_rxbd_info_update(rtwdev, skb); 236 if (ret) { 237 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 238 bd_ring->wp, ret); 239 goto err_sync_device; 240 } 241 242 rx_info = RTW89_PCI_RX_SKB_CB(skb); 243 fs = rx_info->fs; 244 ls = rx_info->ls; 245 246 if (fs) { 247 if (new) { 248 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 249 "skb should not be ready before first segment start\n"); 250 goto err_sync_device; 251 } 252 if (desc_info->ready) { 253 rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n"); 254 goto err_sync_device; 255 } 256 257 rtw89_core_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size); 258 259 new = dev_alloc_skb(desc_info->pkt_size); 260 if (!new) 261 goto err_sync_device; 262 263 rx_ring->diliver_skb = new; 264 265 /* first segment has RX desc */ 266 offset = desc_info->offset; 267 offset += desc_info->long_rxdesc ? sizeof(struct rtw89_rxdesc_long) : 268 sizeof(struct rtw89_rxdesc_short); 269 } else { 270 offset = sizeof(struct rtw89_pci_rxbd_info); 271 if (!new) { 272 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n"); 273 goto err_sync_device; 274 } 275 } 276 if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info)) 277 goto err_sync_device; 278 rtw89_pci_sync_skb_for_device(rtwdev, skb); 279 rtw89_pci_rxbd_increase(rx_ring, 1); 280 281 if (!desc_info->ready) { 282 rtw89_warn(rtwdev, "no rx desc information\n"); 283 goto err_free_resource; 284 } 285 if (ls) { 286 rtw89_core_rx(rtwdev, desc_info, new); 287 rx_ring->diliver_skb = NULL; 288 desc_info->ready = false; 289 } 290 291 return cnt; 292 293 err_sync_device: 294 rtw89_pci_sync_skb_for_device(rtwdev, skb); 295 rtw89_pci_rxbd_increase(rx_ring, 1); 296 err_free_resource: 297 if (new) 298 dev_kfree_skb_any(new); 299 rx_ring->diliver_skb = NULL; 300 desc_info->ready = false; 301 302 return cnt; 303 } 304 305 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev, 306 struct rtw89_pci_rx_ring *rx_ring, 307 u32 cnt) 308 { 309 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 310 u32 rx_cnt; 311 312 while (cnt && rtwdev->napi_budget_countdown > 0) { 313 rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring); 314 if (!rx_cnt) { 315 rtw89_err(rtwdev, "failed to deliver RXBD skb\n"); 316 317 /* skip the rest RXBD bufs */ 318 rtw89_pci_rxbd_increase(rx_ring, cnt); 319 break; 320 } 321 322 cnt -= rx_cnt; 323 } 324 325 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 326 } 327 328 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev, 329 struct rtw89_pci *rtwpci, int budget) 330 { 331 struct rtw89_pci_rx_ring *rx_ring; 332 int countdown = rtwdev->napi_budget_countdown; 333 u32 cnt; 334 335 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ]; 336 337 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 338 if (!cnt) 339 return 0; 340 341 cnt = min_t(u32, budget, cnt); 342 343 rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt); 344 345 /* In case of flushing pending SKBs, the countdown may exceed. */ 346 if (rtwdev->napi_budget_countdown <= 0) 347 return budget; 348 349 return budget - countdown; 350 } 351 352 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev, 353 struct rtw89_pci_tx_ring *tx_ring, 354 struct sk_buff *skb, u8 tx_status) 355 { 356 struct ieee80211_tx_info *info; 357 358 info = IEEE80211_SKB_CB(skb); 359 ieee80211_tx_info_clear_status(info); 360 361 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 362 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 363 if (tx_status == RTW89_TX_DONE) { 364 info->flags |= IEEE80211_TX_STAT_ACK; 365 tx_ring->tx_acked++; 366 } else { 367 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) 368 rtw89_debug(rtwdev, RTW89_DBG_FW, 369 "failed to TX of status %x\n", tx_status); 370 switch (tx_status) { 371 case RTW89_TX_RETRY_LIMIT: 372 tx_ring->tx_retry_lmt++; 373 break; 374 case RTW89_TX_LIFE_TIME: 375 tx_ring->tx_life_time++; 376 break; 377 case RTW89_TX_MACID_DROP: 378 tx_ring->tx_mac_id_drop++; 379 break; 380 default: 381 rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status); 382 break; 383 } 384 } 385 386 ieee80211_tx_status_ni(rtwdev->hw, skb); 387 } 388 389 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 390 { 391 struct rtw89_pci_tx_wd *txwd; 392 u32 cnt; 393 394 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 395 while (cnt--) { 396 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 397 if (!txwd) { 398 rtw89_warn(rtwdev, "No busy txwd pages available\n"); 399 break; 400 } 401 402 list_del_init(&txwd->list); 403 404 /* this skb has been freed by RPP */ 405 if (skb_queue_len(&txwd->queue) == 0) 406 rtw89_pci_enqueue_txwd(tx_ring, txwd); 407 } 408 } 409 410 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev, 411 struct rtw89_pci_tx_ring *tx_ring) 412 { 413 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 414 struct rtw89_pci_tx_wd *txwd; 415 int i; 416 417 for (i = 0; i < wd_ring->page_num; i++) { 418 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 419 if (!txwd) 420 break; 421 422 list_del_init(&txwd->list); 423 } 424 } 425 426 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, 427 struct rtw89_pci_tx_ring *tx_ring, 428 struct rtw89_pci_tx_wd *txwd, u16 seq, 429 u8 tx_status) 430 { 431 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 432 struct rtw89_pci_tx_data *tx_data; 433 struct sk_buff *skb, *tmp; 434 u8 txch = tx_ring->txch; 435 436 if (!list_empty(&txwd->list)) { 437 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 438 /* In low power mode, RPP can receive before updating of TX BD. 439 * In normal mode, it should not happen so give it a warning. 440 */ 441 if (!rtwpci->low_power && !list_empty(&txwd->list)) 442 rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n", 443 txch, seq); 444 } 445 446 skb_queue_walk_safe(&txwd->queue, skb, tmp) { 447 skb_unlink(skb, &txwd->queue); 448 449 tx_data = RTW89_PCI_TX_SKB_CB(skb); 450 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 451 DMA_TO_DEVICE); 452 453 rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status); 454 } 455 456 if (list_empty(&txwd->list)) 457 rtw89_pci_enqueue_txwd(tx_ring, txwd); 458 } 459 460 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, 461 struct rtw89_pci_rpp_fmt *rpp) 462 { 463 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 464 struct rtw89_pci_tx_ring *tx_ring; 465 struct rtw89_pci_tx_wd_ring *wd_ring; 466 struct rtw89_pci_tx_wd *txwd; 467 u16 seq; 468 u8 qsel, tx_status, txch; 469 470 seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ); 471 qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL); 472 tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS); 473 txch = rtw89_core_get_ch_dma(rtwdev, qsel); 474 475 if (txch == RTW89_TXCH_CH12) { 476 rtw89_warn(rtwdev, "should no fwcmd release report\n"); 477 return; 478 } 479 480 tx_ring = &rtwpci->tx_rings[txch]; 481 wd_ring = &tx_ring->wd_ring; 482 txwd = &wd_ring->pages[seq]; 483 484 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status); 485 } 486 487 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev, 488 struct rtw89_pci_tx_ring *tx_ring) 489 { 490 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 491 struct rtw89_pci_tx_wd *txwd; 492 int i; 493 494 for (i = 0; i < wd_ring->page_num; i++) { 495 txwd = &wd_ring->pages[i]; 496 497 if (!list_empty(&txwd->list)) 498 continue; 499 500 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP); 501 } 502 } 503 504 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev, 505 struct rtw89_pci_rx_ring *rx_ring, 506 u32 max_cnt) 507 { 508 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 509 struct rtw89_pci_rx_info *rx_info; 510 struct rtw89_pci_rpp_fmt *rpp; 511 struct rtw89_rx_desc_info desc_info = {}; 512 struct sk_buff *skb; 513 u32 cnt = 0; 514 u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt); 515 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 516 u32 offset; 517 int ret; 518 519 skb = rx_ring->buf[bd_ring->wp]; 520 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 521 522 ret = rtw89_pci_rxbd_info_update(rtwdev, skb); 523 if (ret) { 524 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 525 bd_ring->wp, ret); 526 goto err_sync_device; 527 } 528 529 rx_info = RTW89_PCI_RX_SKB_CB(skb); 530 if (!rx_info->fs || !rx_info->ls) { 531 rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n"); 532 return cnt; 533 } 534 535 rtw89_core_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size); 536 537 /* first segment has RX desc */ 538 offset = desc_info.offset; 539 offset += desc_info.long_rxdesc ? sizeof(struct rtw89_rxdesc_long) : 540 sizeof(struct rtw89_rxdesc_short); 541 for (; offset + rpp_size <= rx_info->len; offset += rpp_size) { 542 rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset); 543 rtw89_pci_release_rpp(rtwdev, rpp); 544 } 545 546 rtw89_pci_sync_skb_for_device(rtwdev, skb); 547 rtw89_pci_rxbd_increase(rx_ring, 1); 548 cnt++; 549 550 return cnt; 551 552 err_sync_device: 553 rtw89_pci_sync_skb_for_device(rtwdev, skb); 554 return 0; 555 } 556 557 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev, 558 struct rtw89_pci_rx_ring *rx_ring, 559 u32 cnt) 560 { 561 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 562 u32 release_cnt; 563 564 while (cnt) { 565 release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt); 566 if (!release_cnt) { 567 rtw89_err(rtwdev, "failed to release TX skbs\n"); 568 569 /* skip the rest RXBD bufs */ 570 rtw89_pci_rxbd_increase(rx_ring, cnt); 571 break; 572 } 573 574 cnt -= release_cnt; 575 } 576 577 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 578 } 579 580 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev, 581 struct rtw89_pci *rtwpci, int budget) 582 { 583 struct rtw89_pci_rx_ring *rx_ring; 584 u32 cnt; 585 int work_done; 586 587 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 588 589 spin_lock_bh(&rtwpci->trx_lock); 590 591 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 592 if (cnt == 0) 593 goto out_unlock; 594 595 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 596 597 out_unlock: 598 spin_unlock_bh(&rtwpci->trx_lock); 599 600 /* always release all RPQ */ 601 work_done = min_t(int, cnt, budget); 602 rtwdev->napi_budget_countdown -= work_done; 603 604 return work_done; 605 } 606 607 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev, 608 struct rtw89_pci *rtwpci) 609 { 610 struct rtw89_pci_rx_ring *rx_ring; 611 struct rtw89_pci_dma_ring *bd_ring; 612 u32 reg_idx; 613 u16 hw_idx, hw_idx_next, host_idx; 614 int i; 615 616 for (i = 0; i < RTW89_RXCH_NUM; i++) { 617 rx_ring = &rtwpci->rx_rings[i]; 618 bd_ring = &rx_ring->bd_ring; 619 620 reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 621 hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx); 622 host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx); 623 hw_idx_next = (hw_idx + 1) % bd_ring->len; 624 625 if (hw_idx_next == host_idx) 626 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i); 627 628 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 629 "%d RXD unavailable, idx=0x%08x, len=%d\n", 630 i, reg_idx, bd_ring->len); 631 } 632 } 633 634 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, 635 struct rtw89_pci *rtwpci, 636 struct rtw89_pci_isrs *isrs) 637 { 638 isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs; 639 isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0]; 640 isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1]; 641 642 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 643 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]); 644 rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]); 645 } 646 EXPORT_SYMBOL(rtw89_pci_recognize_intrs); 647 648 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev, 649 struct rtw89_pci *rtwpci, 650 struct rtw89_pci_isrs *isrs) 651 { 652 isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs; 653 isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ? 654 rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0; 655 isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ? 656 rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0; 657 isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ? 658 rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0; 659 660 if (isrs->halt_c2h_isrs) 661 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 662 if (isrs->isrs[0]) 663 rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]); 664 if (isrs->isrs[1]) 665 rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]); 666 } 667 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1); 668 669 static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00) 670 { 671 /* write 1 clear */ 672 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isr00); 673 } 674 675 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 676 { 677 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 678 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]); 679 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]); 680 } 681 EXPORT_SYMBOL(rtw89_pci_enable_intr); 682 683 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 684 { 685 rtw89_write32(rtwdev, R_AX_HIMR0, 0); 686 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0); 687 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); 688 } 689 EXPORT_SYMBOL(rtw89_pci_disable_intr); 690 691 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 692 { 693 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs); 694 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 695 rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]); 696 rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]); 697 } 698 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1); 699 700 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 701 { 702 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0); 703 } 704 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1); 705 706 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev) 707 { 708 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 709 unsigned long flags; 710 711 spin_lock_irqsave(&rtwpci->irq_lock, flags); 712 rtw89_chip_disable_intr(rtwdev, rtwpci); 713 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START); 714 rtw89_chip_enable_intr(rtwdev, rtwpci); 715 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 716 } 717 718 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev) 719 { 720 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 721 unsigned long flags; 722 723 spin_lock_irqsave(&rtwpci->irq_lock, flags); 724 rtw89_chip_disable_intr(rtwdev, rtwpci); 725 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE); 726 rtw89_chip_enable_intr(rtwdev, rtwpci); 727 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 728 } 729 730 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev) 731 { 732 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 733 int budget = NAPI_POLL_WEIGHT; 734 735 /* To prevent RXQ get stuck due to run out of budget. */ 736 rtwdev->napi_budget_countdown = budget; 737 738 rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget); 739 rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget); 740 } 741 742 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) 743 { 744 struct rtw89_dev *rtwdev = dev; 745 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 746 struct rtw89_pci_isrs isrs; 747 unsigned long flags; 748 749 spin_lock_irqsave(&rtwpci->irq_lock, flags); 750 rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs); 751 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 752 753 if (unlikely(isrs.isrs[0] & B_AX_RDU_INT)) 754 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci); 755 756 if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN)) 757 rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev)); 758 759 if (unlikely(isrs.halt_c2h_isrs & B_AX_WDT_TIMEOUT_INT_EN)) 760 rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT); 761 762 if (unlikely(rtwpci->under_recovery)) 763 goto enable_intr; 764 765 if (unlikely(rtwpci->low_power)) { 766 rtw89_pci_low_power_interrupt_handler(rtwdev); 767 goto enable_intr; 768 } 769 770 if (likely(rtwpci->running)) { 771 local_bh_disable(); 772 napi_schedule(&rtwdev->napi); 773 local_bh_enable(); 774 } 775 776 return IRQ_HANDLED; 777 778 enable_intr: 779 spin_lock_irqsave(&rtwpci->irq_lock, flags); 780 if (likely(rtwpci->running)) 781 rtw89_chip_enable_intr(rtwdev, rtwpci); 782 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 783 return IRQ_HANDLED; 784 } 785 786 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev) 787 { 788 struct rtw89_dev *rtwdev = dev; 789 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 790 unsigned long flags; 791 irqreturn_t irqret = IRQ_WAKE_THREAD; 792 793 spin_lock_irqsave(&rtwpci->irq_lock, flags); 794 795 /* If interrupt event is on the road, it is still trigger interrupt 796 * even we have done pci_stop() to turn off IMR. 797 */ 798 if (unlikely(!rtwpci->running)) { 799 irqret = IRQ_HANDLED; 800 goto exit; 801 } 802 803 rtw89_chip_disable_intr(rtwdev, rtwpci); 804 exit: 805 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 806 807 return irqret; 808 } 809 810 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \ 811 [RTW89_TXCH_##txch] = { \ 812 .num = R_AX_##txch##_TXBD_NUM ##v, \ 813 .idx = R_AX_##txch##_TXBD_IDX ##v, \ 814 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 815 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 816 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 817 } 818 819 #define DEF_TXCHADDRS(info, txch, v...) \ 820 [RTW89_TXCH_##txch] = { \ 821 .num = R_AX_##txch##_TXBD_NUM, \ 822 .idx = R_AX_##txch##_TXBD_IDX, \ 823 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 824 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 825 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 826 } 827 828 #define DEF_RXCHADDRS(info, rxch, v...) \ 829 [RTW89_RXCH_##rxch] = { \ 830 .num = R_AX_##rxch##_RXBD_NUM ##v, \ 831 .idx = R_AX_##rxch##_RXBD_IDX ##v, \ 832 .desa_l = R_AX_##rxch##_RXBD_DESA_L ##v, \ 833 .desa_h = R_AX_##rxch##_RXBD_DESA_H ##v, \ 834 } 835 836 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = { 837 .tx = { 838 DEF_TXCHADDRS(info, ACH0), 839 DEF_TXCHADDRS(info, ACH1), 840 DEF_TXCHADDRS(info, ACH2), 841 DEF_TXCHADDRS(info, ACH3), 842 DEF_TXCHADDRS(info, ACH4), 843 DEF_TXCHADDRS(info, ACH5), 844 DEF_TXCHADDRS(info, ACH6), 845 DEF_TXCHADDRS(info, ACH7), 846 DEF_TXCHADDRS(info, CH8), 847 DEF_TXCHADDRS(info, CH9), 848 DEF_TXCHADDRS_TYPE1(info, CH10), 849 DEF_TXCHADDRS_TYPE1(info, CH11), 850 DEF_TXCHADDRS(info, CH12), 851 }, 852 .rx = { 853 DEF_RXCHADDRS(info, RXQ), 854 DEF_RXCHADDRS(info, RPQ), 855 }, 856 }; 857 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set); 858 859 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = { 860 .tx = { 861 DEF_TXCHADDRS(info, ACH0, _V1), 862 DEF_TXCHADDRS(info, ACH1, _V1), 863 DEF_TXCHADDRS(info, ACH2, _V1), 864 DEF_TXCHADDRS(info, ACH3, _V1), 865 DEF_TXCHADDRS(info, ACH4, _V1), 866 DEF_TXCHADDRS(info, ACH5, _V1), 867 DEF_TXCHADDRS(info, ACH6, _V1), 868 DEF_TXCHADDRS(info, ACH7, _V1), 869 DEF_TXCHADDRS(info, CH8, _V1), 870 DEF_TXCHADDRS(info, CH9, _V1), 871 DEF_TXCHADDRS_TYPE1(info, CH10, _V1), 872 DEF_TXCHADDRS_TYPE1(info, CH11, _V1), 873 DEF_TXCHADDRS(info, CH12, _V1), 874 }, 875 .rx = { 876 DEF_RXCHADDRS(info, RXQ, _V1), 877 DEF_RXCHADDRS(info, RPQ, _V1), 878 }, 879 }; 880 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1); 881 882 #undef DEF_TXCHADDRS_TYPE1 883 #undef DEF_TXCHADDRS 884 #undef DEF_RXCHADDRS 885 886 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev, 887 enum rtw89_tx_channel txch, 888 const struct rtw89_pci_ch_dma_addr **addr) 889 { 890 const struct rtw89_pci_info *info = rtwdev->pci_info; 891 892 if (txch >= RTW89_TXCH_NUM) 893 return -EINVAL; 894 895 *addr = &info->dma_addr_set->tx[txch]; 896 897 return 0; 898 } 899 900 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev, 901 enum rtw89_rx_channel rxch, 902 const struct rtw89_pci_ch_dma_addr **addr) 903 { 904 const struct rtw89_pci_info *info = rtwdev->pci_info; 905 906 if (rxch >= RTW89_RXCH_NUM) 907 return -EINVAL; 908 909 *addr = &info->dma_addr_set->rx[rxch]; 910 911 return 0; 912 } 913 914 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring) 915 { 916 struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring; 917 918 /* reserved 1 desc check ring is full or not */ 919 if (bd_ring->rp > bd_ring->wp) 920 return bd_ring->rp - bd_ring->wp - 1; 921 922 return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1; 923 } 924 925 static 926 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev) 927 { 928 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 929 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 930 u32 cnt; 931 932 spin_lock_bh(&rtwpci->trx_lock); 933 rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci); 934 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 935 spin_unlock_bh(&rtwpci->trx_lock); 936 937 return cnt; 938 } 939 940 static 941 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev, 942 u8 txch) 943 { 944 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 945 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 946 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 947 u32 cnt; 948 949 spin_lock_bh(&rtwpci->trx_lock); 950 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 951 cnt = min(cnt, wd_ring->curr_num); 952 spin_unlock_bh(&rtwpci->trx_lock); 953 954 return cnt; 955 } 956 957 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 958 u8 txch) 959 { 960 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 961 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 962 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 963 u32 bd_cnt, wd_cnt, min_cnt = 0; 964 struct rtw89_pci_rx_ring *rx_ring; 965 u32 cnt; 966 967 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 968 969 spin_lock_bh(&rtwpci->trx_lock); 970 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 971 wd_cnt = wd_ring->curr_num; 972 973 if (wd_cnt == 0 || bd_cnt == 0) { 974 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 975 if (cnt) 976 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 977 else if (wd_cnt == 0) 978 goto out_unlock; 979 980 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 981 if (bd_cnt == 0) 982 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 983 } 984 985 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 986 wd_cnt = wd_ring->curr_num; 987 min_cnt = min(bd_cnt, wd_cnt); 988 if (min_cnt == 0) 989 rtw89_debug(rtwdev, rtwpci->low_power ? RTW89_DBG_TXRX : RTW89_DBG_UNEXP, 990 "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n", 991 wd_cnt, bd_cnt); 992 993 out_unlock: 994 spin_unlock_bh(&rtwpci->trx_lock); 995 996 return min_cnt; 997 } 998 999 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 1000 u8 txch) 1001 { 1002 if (rtwdev->hci.paused) 1003 return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch); 1004 1005 if (txch == RTW89_TXCH_CH12) 1006 return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev); 1007 1008 return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch); 1009 } 1010 1011 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 1012 { 1013 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1014 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1015 u32 host_idx, addr; 1016 1017 spin_lock_bh(&rtwpci->trx_lock); 1018 1019 addr = bd_ring->addr.idx; 1020 host_idx = bd_ring->wp; 1021 rtw89_write16(rtwdev, addr, host_idx); 1022 1023 spin_unlock_bh(&rtwpci->trx_lock); 1024 } 1025 1026 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, 1027 int n_txbd) 1028 { 1029 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1030 u32 host_idx, len; 1031 1032 len = bd_ring->len; 1033 host_idx = bd_ring->wp + n_txbd; 1034 host_idx = host_idx < len ? host_idx : host_idx - len; 1035 1036 bd_ring->wp = host_idx; 1037 } 1038 1039 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) 1040 { 1041 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1042 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1043 1044 if (rtwdev->hci.paused) { 1045 set_bit(txch, rtwpci->kick_map); 1046 return; 1047 } 1048 1049 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1050 } 1051 1052 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev) 1053 { 1054 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1055 struct rtw89_pci_tx_ring *tx_ring; 1056 int txch; 1057 1058 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1059 if (!test_and_clear_bit(txch, rtwpci->kick_map)) 1060 continue; 1061 1062 tx_ring = &rtwpci->tx_rings[txch]; 1063 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1064 } 1065 } 1066 1067 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop) 1068 { 1069 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1070 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1071 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1072 u32 cur_idx, cur_rp; 1073 u8 i; 1074 1075 /* Because the time taked by the I/O is a bit dynamic, it's hard to 1076 * define a reasonable fixed total timeout to use read_poll_timeout* 1077 * helper. Instead, we can ensure a reasonable polling times, so we 1078 * just use for loop with udelay here. 1079 */ 1080 for (i = 0; i < 60; i++) { 1081 cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 1082 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 1083 if (cur_rp == bd_ring->wp) 1084 return; 1085 1086 udelay(1); 1087 } 1088 1089 if (!drop) 1090 rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch); 1091 } 1092 1093 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs, 1094 bool drop) 1095 { 1096 const struct rtw89_pci_info *info = rtwdev->pci_info; 1097 u8 i; 1098 1099 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1100 /* It may be unnecessary to flush FWCMD queue. */ 1101 if (i == RTW89_TXCH_CH12) 1102 continue; 1103 if (info->tx_dma_ch_mask & BIT(i)) 1104 continue; 1105 1106 if (txchs & BIT(i)) 1107 __pci_flush_txch(rtwdev, i, drop); 1108 } 1109 } 1110 1111 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues, 1112 bool drop) 1113 { 1114 __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop); 1115 } 1116 1117 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, 1118 void *txaddr_info_addr, u32 total_len, 1119 dma_addr_t dma, u8 *add_info_nr) 1120 { 1121 struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr; 1122 1123 txaddr_info->length = cpu_to_le16(total_len); 1124 txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | 1125 RTW89_PCI_ADDR_NUM(1)); 1126 txaddr_info->dma = cpu_to_le32(dma); 1127 1128 *add_info_nr = 1; 1129 1130 return sizeof(*txaddr_info); 1131 } 1132 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info); 1133 1134 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, 1135 void *txaddr_info_addr, u32 total_len, 1136 dma_addr_t dma, u8 *add_info_nr) 1137 { 1138 struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr; 1139 u32 remain = total_len; 1140 u32 len; 1141 u16 length_option; 1142 int n; 1143 1144 for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) { 1145 len = remain >= TXADDR_INFO_LENTHG_V1_MAX ? 1146 TXADDR_INFO_LENTHG_V1_MAX : remain; 1147 remain -= len; 1148 1149 length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) | 1150 FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) | 1151 FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0); 1152 txaddr_info->length_opt = cpu_to_le16(length_option); 1153 txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma)); 1154 txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma)); 1155 1156 dma += len; 1157 txaddr_info++; 1158 } 1159 1160 WARN_ONCE(remain, "length overflow remain=%u total_len=%u", 1161 remain, total_len); 1162 1163 *add_info_nr = n; 1164 1165 return n * sizeof(*txaddr_info); 1166 } 1167 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1); 1168 1169 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, 1170 struct rtw89_pci_tx_ring *tx_ring, 1171 struct rtw89_pci_tx_wd *txwd, 1172 struct rtw89_core_tx_request *tx_req) 1173 { 1174 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1175 const struct rtw89_chip_info *chip = rtwdev->chip; 1176 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1177 struct rtw89_txwd_info *txwd_info; 1178 struct rtw89_pci_tx_wp_info *txwp_info; 1179 void *txaddr_info_addr; 1180 struct pci_dev *pdev = rtwpci->pdev; 1181 struct sk_buff *skb = tx_req->skb; 1182 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1183 bool en_wd_info = desc_info->en_wd_info; 1184 u32 txwd_len; 1185 u32 txwp_len; 1186 u32 txaddr_info_len; 1187 dma_addr_t dma; 1188 int ret; 1189 1190 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1191 if (dma_mapping_error(&pdev->dev, dma)) { 1192 rtw89_err(rtwdev, "failed to map skb dma data\n"); 1193 ret = -EBUSY; 1194 goto err; 1195 } 1196 1197 tx_data->dma = dma; 1198 1199 txwp_len = sizeof(*txwp_info); 1200 txwd_len = chip->txwd_body_size; 1201 txwd_len += en_wd_info ? sizeof(*txwd_info) : 0; 1202 1203 txwp_info = txwd->vaddr + txwd_len; 1204 txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID); 1205 txwp_info->seq1 = 0; 1206 txwp_info->seq2 = 0; 1207 txwp_info->seq3 = 0; 1208 1209 tx_ring->tx_cnt++; 1210 txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len; 1211 txaddr_info_len = 1212 rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len, 1213 dma, &desc_info->addr_info_nr); 1214 1215 txwd->len = txwd_len + txwp_len + txaddr_info_len; 1216 1217 rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr); 1218 1219 skb_queue_tail(&txwd->queue, skb); 1220 1221 return 0; 1222 1223 err: 1224 return ret; 1225 } 1226 1227 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, 1228 struct rtw89_pci_tx_ring *tx_ring, 1229 struct rtw89_pci_tx_bd_32 *txbd, 1230 struct rtw89_core_tx_request *tx_req) 1231 { 1232 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1233 const struct rtw89_chip_info *chip = rtwdev->chip; 1234 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1235 void *txdesc; 1236 int txdesc_size = chip->h2c_desc_size; 1237 struct pci_dev *pdev = rtwpci->pdev; 1238 struct sk_buff *skb = tx_req->skb; 1239 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1240 dma_addr_t dma; 1241 1242 txdesc = skb_push(skb, txdesc_size); 1243 memset(txdesc, 0, txdesc_size); 1244 rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc); 1245 1246 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1247 if (dma_mapping_error(&pdev->dev, dma)) { 1248 rtw89_err(rtwdev, "failed to map fwcmd dma data\n"); 1249 return -EBUSY; 1250 } 1251 1252 tx_data->dma = dma; 1253 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); 1254 txbd->length = cpu_to_le16(skb->len); 1255 txbd->dma = cpu_to_le32(tx_data->dma); 1256 skb_queue_tail(&rtwpci->h2c_queue, skb); 1257 1258 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1259 1260 return 0; 1261 } 1262 1263 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, 1264 struct rtw89_pci_tx_ring *tx_ring, 1265 struct rtw89_pci_tx_bd_32 *txbd, 1266 struct rtw89_core_tx_request *tx_req) 1267 { 1268 struct rtw89_pci_tx_wd *txwd; 1269 int ret; 1270 1271 /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD 1272 * buffer with WD BODY only. So here we don't need to check the free 1273 * pages of the wd ring. 1274 */ 1275 if (tx_ring->txch == RTW89_TXCH_CH12) 1276 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); 1277 1278 txwd = rtw89_pci_dequeue_txwd(tx_ring); 1279 if (!txwd) { 1280 rtw89_err(rtwdev, "no available TXWD\n"); 1281 ret = -ENOSPC; 1282 goto err; 1283 } 1284 1285 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); 1286 if (ret) { 1287 rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq); 1288 goto err_enqueue_wd; 1289 } 1290 1291 list_add_tail(&txwd->list, &tx_ring->busy_pages); 1292 1293 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); 1294 txbd->length = cpu_to_le16(txwd->len); 1295 txbd->dma = cpu_to_le32(txwd->paddr); 1296 1297 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1298 1299 return 0; 1300 1301 err_enqueue_wd: 1302 rtw89_pci_enqueue_txwd(tx_ring, txwd); 1303 err: 1304 return ret; 1305 } 1306 1307 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, 1308 u8 txch) 1309 { 1310 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1311 struct rtw89_pci_tx_ring *tx_ring; 1312 struct rtw89_pci_tx_bd_32 *txbd; 1313 u32 n_avail_txbd; 1314 int ret = 0; 1315 1316 /* check the tx type and dma channel for fw cmd queue */ 1317 if ((txch == RTW89_TXCH_CH12 || 1318 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) && 1319 (txch != RTW89_TXCH_CH12 || 1320 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) { 1321 rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n"); 1322 return -EINVAL; 1323 } 1324 1325 tx_ring = &rtwpci->tx_rings[txch]; 1326 spin_lock_bh(&rtwpci->trx_lock); 1327 1328 n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring); 1329 if (n_avail_txbd == 0) { 1330 rtw89_err(rtwdev, "no available TXBD\n"); 1331 ret = -ENOSPC; 1332 goto err_unlock; 1333 } 1334 1335 txbd = rtw89_pci_get_next_txbd(tx_ring); 1336 ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req); 1337 if (ret) { 1338 rtw89_err(rtwdev, "failed to submit TXBD\n"); 1339 goto err_unlock; 1340 } 1341 1342 spin_unlock_bh(&rtwpci->trx_lock); 1343 return 0; 1344 1345 err_unlock: 1346 spin_unlock_bh(&rtwpci->trx_lock); 1347 return ret; 1348 } 1349 1350 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) 1351 { 1352 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1353 int ret; 1354 1355 ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma); 1356 if (ret) { 1357 rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma); 1358 return ret; 1359 } 1360 1361 return 0; 1362 } 1363 1364 static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = { 1365 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1366 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1367 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1368 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1369 [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2}, 1370 [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2}, 1371 [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2}, 1372 [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2}, 1373 [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1}, 1374 [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1}, 1375 [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1}, 1376 [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1}, 1377 [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1}, 1378 }; 1379 1380 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) 1381 { 1382 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1383 const struct rtw89_pci_info *info = rtwdev->pci_info; 1384 struct rtw89_pci_tx_ring *tx_ring; 1385 struct rtw89_pci_rx_ring *rx_ring; 1386 struct rtw89_pci_dma_ring *bd_ring; 1387 const struct rtw89_pci_bd_ram *bd_ram; 1388 u32 addr_num; 1389 u32 addr_bdram; 1390 u32 addr_desa_l; 1391 u32 val32; 1392 int i; 1393 1394 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1395 if (info->tx_dma_ch_mask & BIT(i)) 1396 continue; 1397 1398 tx_ring = &rtwpci->tx_rings[i]; 1399 bd_ring = &tx_ring->bd_ring; 1400 bd_ram = &bd_ram_table[i]; 1401 addr_num = bd_ring->addr.num; 1402 addr_bdram = bd_ring->addr.bdram; 1403 addr_desa_l = bd_ring->addr.desa_l; 1404 bd_ring->wp = 0; 1405 bd_ring->rp = 0; 1406 1407 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) | 1408 FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) | 1409 FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num); 1410 1411 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1412 rtw89_write32(rtwdev, addr_bdram, val32); 1413 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1414 } 1415 1416 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1417 rx_ring = &rtwpci->rx_rings[i]; 1418 bd_ring = &rx_ring->bd_ring; 1419 addr_num = bd_ring->addr.num; 1420 addr_desa_l = bd_ring->addr.desa_l; 1421 bd_ring->wp = 0; 1422 bd_ring->rp = 0; 1423 rx_ring->diliver_skb = NULL; 1424 rx_ring->diliver_desc.ready = false; 1425 1426 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1427 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1428 } 1429 } 1430 1431 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev, 1432 struct rtw89_pci_tx_ring *tx_ring) 1433 { 1434 rtw89_pci_release_busy_txwd(rtwdev, tx_ring); 1435 rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring); 1436 } 1437 1438 static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev) 1439 { 1440 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1441 const struct rtw89_pci_info *info = rtwdev->pci_info; 1442 int txch; 1443 1444 rtw89_pci_reset_trx_rings(rtwdev); 1445 1446 spin_lock_bh(&rtwpci->trx_lock); 1447 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1448 if (info->tx_dma_ch_mask & BIT(txch)) 1449 continue; 1450 if (txch == RTW89_TXCH_CH12) { 1451 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 1452 skb_queue_len(&rtwpci->h2c_queue), true); 1453 continue; 1454 } 1455 rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]); 1456 } 1457 spin_unlock_bh(&rtwpci->trx_lock); 1458 } 1459 1460 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev) 1461 { 1462 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1463 unsigned long flags; 1464 1465 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1466 rtwpci->running = true; 1467 rtw89_chip_enable_intr(rtwdev, rtwpci); 1468 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1469 } 1470 1471 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev) 1472 { 1473 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1474 unsigned long flags; 1475 1476 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1477 rtwpci->running = false; 1478 rtw89_chip_disable_intr(rtwdev, rtwpci); 1479 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1480 } 1481 1482 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev) 1483 { 1484 rtw89_core_napi_start(rtwdev); 1485 rtw89_pci_enable_intr_lock(rtwdev); 1486 1487 return 0; 1488 } 1489 1490 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) 1491 { 1492 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1493 struct pci_dev *pdev = rtwpci->pdev; 1494 1495 rtw89_pci_disable_intr_lock(rtwdev); 1496 synchronize_irq(pdev->irq); 1497 rtw89_core_napi_stop(rtwdev); 1498 } 1499 1500 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause) 1501 { 1502 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1503 struct pci_dev *pdev = rtwpci->pdev; 1504 1505 if (pause) { 1506 rtw89_pci_disable_intr_lock(rtwdev); 1507 synchronize_irq(pdev->irq); 1508 if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) 1509 napi_synchronize(&rtwdev->napi); 1510 } else { 1511 rtw89_pci_enable_intr_lock(rtwdev); 1512 rtw89_pci_tx_kick_off_pending(rtwdev); 1513 } 1514 } 1515 1516 static 1517 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power) 1518 { 1519 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1520 const struct rtw89_pci_info *info = rtwdev->pci_info; 1521 const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power; 1522 const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set; 1523 struct rtw89_pci_tx_ring *tx_ring; 1524 struct rtw89_pci_rx_ring *rx_ring; 1525 int i; 1526 1527 if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n")) 1528 return; 1529 1530 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1531 tx_ring = &rtwpci->tx_rings[i]; 1532 tx_ring->bd_ring.addr.idx = low_power ? 1533 bd_idx_addr->tx_bd_addrs[i] : 1534 dma_addr_set->tx[i].idx; 1535 } 1536 1537 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1538 rx_ring = &rtwpci->rx_rings[i]; 1539 rx_ring->bd_ring.addr.idx = low_power ? 1540 bd_idx_addr->rx_bd_addrs[i] : 1541 dma_addr_set->rx[i].idx; 1542 } 1543 } 1544 1545 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power) 1546 { 1547 enum rtw89_pci_intr_mask_cfg cfg; 1548 1549 WARN(!rtwdev->hci.paused, "HCI isn't paused\n"); 1550 1551 cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL; 1552 rtw89_chip_config_intr_mask(rtwdev, cfg); 1553 rtw89_pci_switch_bd_idx_addr(rtwdev, low_power); 1554 } 1555 1556 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data); 1557 1558 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr) 1559 { 1560 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1561 u32 val = readl(rtwpci->mmap + addr); 1562 int count; 1563 1564 for (count = 0; ; count++) { 1565 if (val != RTW89_R32_DEAD) 1566 return val; 1567 if (count >= MAC_REG_POOL_COUNT) { 1568 rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val); 1569 return RTW89_R32_DEAD; 1570 } 1571 rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN); 1572 val = readl(rtwpci->mmap + addr); 1573 } 1574 1575 return val; 1576 } 1577 1578 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr) 1579 { 1580 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1581 u32 addr32, val32, shift; 1582 1583 if (!ACCESS_CMAC(addr)) 1584 return readb(rtwpci->mmap + addr); 1585 1586 addr32 = addr & ~0x3; 1587 shift = (addr & 0x3) * 8; 1588 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1589 return val32 >> shift; 1590 } 1591 1592 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr) 1593 { 1594 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1595 u32 addr32, val32, shift; 1596 1597 if (!ACCESS_CMAC(addr)) 1598 return readw(rtwpci->mmap + addr); 1599 1600 addr32 = addr & ~0x3; 1601 shift = (addr & 0x3) * 8; 1602 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1603 return val32 >> shift; 1604 } 1605 1606 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr) 1607 { 1608 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1609 1610 if (!ACCESS_CMAC(addr)) 1611 return readl(rtwpci->mmap + addr); 1612 1613 return rtw89_pci_ops_read32_cmac(rtwdev, addr); 1614 } 1615 1616 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) 1617 { 1618 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1619 1620 writeb(data, rtwpci->mmap + addr); 1621 } 1622 1623 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) 1624 { 1625 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1626 1627 writew(data, rtwpci->mmap + addr); 1628 } 1629 1630 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) 1631 { 1632 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1633 1634 writel(data, rtwpci->mmap + addr); 1635 } 1636 1637 static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable) 1638 { 1639 const struct rtw89_pci_info *info = rtwdev->pci_info; 1640 1641 if (enable) 1642 rtw89_write32_set(rtwdev, info->init_cfg_reg, 1643 info->rxhci_en_bit | info->txhci_en_bit); 1644 else 1645 rtw89_write32_clr(rtwdev, info->init_cfg_reg, 1646 info->rxhci_en_bit | info->txhci_en_bit); 1647 } 1648 1649 static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable) 1650 { 1651 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1652 u32 reg, mask; 1653 1654 if (chip_id == RTL8852C) { 1655 reg = R_AX_HAXI_INIT_CFG1; 1656 mask = B_AX_STOP_AXI_MST; 1657 } else { 1658 reg = R_AX_PCIE_DMA_STOP1; 1659 mask = B_AX_STOP_PCIEIO; 1660 } 1661 1662 if (enable) 1663 rtw89_write32_clr(rtwdev, reg, mask); 1664 else 1665 rtw89_write32_set(rtwdev, reg, mask); 1666 } 1667 1668 static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable) 1669 { 1670 rtw89_pci_ctrl_dma_io(rtwdev, enable); 1671 rtw89_pci_ctrl_dma_trx(rtwdev, enable); 1672 } 1673 1674 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit) 1675 { 1676 u16 val; 1677 1678 rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F); 1679 1680 val = rtw89_read16(rtwdev, R_AX_MDIO_CFG); 1681 switch (speed) { 1682 case PCIE_PHY_GEN1: 1683 if (addr < 0x20) 1684 val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK); 1685 else 1686 val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK); 1687 break; 1688 case PCIE_PHY_GEN2: 1689 if (addr < 0x20) 1690 val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK); 1691 else 1692 val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK); 1693 break; 1694 default: 1695 rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed); 1696 return -EINVAL; 1697 } 1698 rtw89_write16(rtwdev, R_AX_MDIO_CFG, val); 1699 rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit); 1700 1701 return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000, 1702 false, rtwdev, R_AX_MDIO_CFG); 1703 } 1704 1705 static int 1706 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val) 1707 { 1708 int ret; 1709 1710 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG); 1711 if (ret) { 1712 rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret); 1713 return ret; 1714 } 1715 *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA); 1716 1717 return 0; 1718 } 1719 1720 static int 1721 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed) 1722 { 1723 int ret; 1724 1725 rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data); 1726 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG); 1727 if (ret) { 1728 rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret); 1729 return ret; 1730 } 1731 1732 return 0; 1733 } 1734 1735 static int 1736 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed) 1737 { 1738 u32 shift; 1739 int ret; 1740 u16 val; 1741 1742 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1743 if (ret) 1744 return ret; 1745 1746 shift = __ffs(mask); 1747 val &= ~mask; 1748 val |= ((data << shift) & mask); 1749 1750 ret = rtw89_write16_mdio(rtwdev, addr, val, speed); 1751 if (ret) 1752 return ret; 1753 1754 return 0; 1755 } 1756 1757 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1758 { 1759 int ret; 1760 u16 val; 1761 1762 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1763 if (ret) 1764 return ret; 1765 ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed); 1766 if (ret) 1767 return ret; 1768 1769 return 0; 1770 } 1771 1772 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1773 { 1774 int ret; 1775 u16 val; 1776 1777 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1778 if (ret) 1779 return ret; 1780 ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed); 1781 if (ret) 1782 return ret; 1783 1784 return 0; 1785 } 1786 1787 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr, 1788 u8 data) 1789 { 1790 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1791 struct pci_dev *pdev = rtwpci->pdev; 1792 1793 return pci_write_config_byte(pdev, addr, data); 1794 } 1795 1796 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr, 1797 u8 *value) 1798 { 1799 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1800 struct pci_dev *pdev = rtwpci->pdev; 1801 1802 return pci_read_config_byte(pdev, addr, value); 1803 } 1804 1805 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr, 1806 u8 bit) 1807 { 1808 u8 value; 1809 int ret; 1810 1811 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 1812 if (ret) 1813 return ret; 1814 1815 value |= bit; 1816 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 1817 1818 return ret; 1819 } 1820 1821 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr, 1822 u8 bit) 1823 { 1824 u8 value; 1825 int ret; 1826 1827 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 1828 if (ret) 1829 return ret; 1830 1831 value &= ~bit; 1832 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 1833 1834 return ret; 1835 } 1836 1837 static int 1838 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate) 1839 { 1840 u16 val, tar; 1841 int ret; 1842 1843 /* Enable counter */ 1844 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val); 1845 if (ret) 1846 return ret; 1847 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 1848 phy_rate); 1849 if (ret) 1850 return ret; 1851 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN, 1852 phy_rate); 1853 if (ret) 1854 return ret; 1855 1856 fsleep(300); 1857 1858 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar); 1859 if (ret) 1860 return ret; 1861 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 1862 phy_rate); 1863 if (ret) 1864 return ret; 1865 1866 tar = tar & 0x0FFF; 1867 if (tar == 0 || tar == 0x0FFF) { 1868 rtw89_err(rtwdev, "[ERR]Get target failed.\n"); 1869 return -EINVAL; 1870 } 1871 1872 *target = tar; 1873 1874 return 0; 1875 } 1876 1877 static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev) 1878 { 1879 int ret; 1880 1881 if (rtwdev->chip->chip_id != RTL8852B) 1882 return 0; 1883 1884 ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK, 1885 PCIE_AUTOK_4, PCIE_PHY_GEN1); 1886 return ret; 1887 } 1888 1889 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) 1890 { 1891 enum rtw89_pcie_phy phy_rate; 1892 u16 val16, mgn_set, div_set, tar; 1893 u8 val8, bdr_ori; 1894 bool l1_flag = false; 1895 int ret = 0; 1896 1897 if (rtwdev->chip->chip_id != RTL8852B) 1898 return 0; 1899 1900 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8); 1901 if (ret) { 1902 rtw89_err(rtwdev, "[ERR]pci config read %X\n", 1903 RTW89_PCIE_PHY_RATE); 1904 return ret; 1905 } 1906 1907 if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) { 1908 phy_rate = PCIE_PHY_GEN1; 1909 } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) { 1910 phy_rate = PCIE_PHY_GEN2; 1911 } else { 1912 rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8); 1913 return -EOPNOTSUPP; 1914 } 1915 /* Disable L1BD */ 1916 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori); 1917 if (ret) { 1918 rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL); 1919 return ret; 1920 } 1921 1922 if (bdr_ori & RTW89_PCIE_BIT_L1) { 1923 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 1924 bdr_ori & ~RTW89_PCIE_BIT_L1); 1925 if (ret) { 1926 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 1927 RTW89_PCIE_L1_CTRL); 1928 return ret; 1929 } 1930 l1_flag = true; 1931 } 1932 1933 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 1934 if (ret) { 1935 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 1936 goto end; 1937 } 1938 1939 if (val16 & B_AX_CALIB_EN) { 1940 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, 1941 val16 & ~B_AX_CALIB_EN, phy_rate); 1942 if (ret) { 1943 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1944 goto end; 1945 } 1946 } 1947 1948 if (!autook_en) 1949 goto end; 1950 /* Set div */ 1951 ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate); 1952 if (ret) { 1953 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1954 goto end; 1955 } 1956 1957 /* Obtain div and margin */ 1958 ret = __get_target(rtwdev, &tar, phy_rate); 1959 if (ret) { 1960 rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret); 1961 goto end; 1962 } 1963 1964 mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar; 1965 1966 if (mgn_set >= 128) { 1967 div_set = 0x0003; 1968 mgn_set = 0x000F; 1969 } else if (mgn_set >= 64) { 1970 div_set = 0x0003; 1971 mgn_set >>= 3; 1972 } else if (mgn_set >= 32) { 1973 div_set = 0x0002; 1974 mgn_set >>= 2; 1975 } else if (mgn_set >= 16) { 1976 div_set = 0x0001; 1977 mgn_set >>= 1; 1978 } else if (mgn_set == 0) { 1979 rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar); 1980 goto end; 1981 } else { 1982 div_set = 0x0000; 1983 } 1984 1985 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 1986 if (ret) { 1987 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 1988 goto end; 1989 } 1990 1991 val16 |= u16_encode_bits(div_set, B_AX_DIV); 1992 1993 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate); 1994 if (ret) { 1995 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1996 goto end; 1997 } 1998 1999 ret = __get_target(rtwdev, &tar, phy_rate); 2000 if (ret) { 2001 rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret); 2002 goto end; 2003 } 2004 2005 rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n", 2006 tar, div_set, mgn_set); 2007 ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1, 2008 (tar & 0x0FFF) | (mgn_set << 12), phy_rate); 2009 if (ret) { 2010 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1); 2011 goto end; 2012 } 2013 2014 /* Enable function */ 2015 ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate); 2016 if (ret) { 2017 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2018 goto end; 2019 } 2020 2021 /* CLK delay = 0 */ 2022 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 2023 PCIE_CLKDLY_HW_0); 2024 2025 end: 2026 /* Set L1BD to ori */ 2027 if (l1_flag) { 2028 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 2029 bdr_ori); 2030 if (ret) { 2031 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 2032 RTW89_PCIE_L1_CTRL); 2033 return ret; 2034 } 2035 } 2036 2037 return ret; 2038 } 2039 2040 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) 2041 { 2042 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2043 int ret; 2044 2045 if (chip_id == RTL8852A) { 2046 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2047 PCIE_PHY_GEN1); 2048 if (ret) 2049 return ret; 2050 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2051 PCIE_PHY_GEN2); 2052 if (ret) 2053 return ret; 2054 } else if (chip_id == RTL8852C) { 2055 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2, 2056 B_AX_DEGLITCH); 2057 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2, 2058 B_AX_DEGLITCH); 2059 } 2060 2061 return 0; 2062 } 2063 2064 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) 2065 { 2066 if (rtwdev->chip->chip_id != RTL8852A) 2067 return; 2068 2069 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE); 2070 } 2071 2072 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) 2073 { 2074 if (rtwdev->chip->chip_id != RTL8852A && rtwdev->chip->chip_id != RTL8852B) 2075 return; 2076 2077 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); 2078 } 2079 2080 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev) 2081 { 2082 int ret; 2083 2084 if (rtwdev->chip->chip_id != RTL8852A) 2085 return 0; 2086 2087 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2088 PCIE_PHY_GEN1); 2089 if (ret) 2090 return ret; 2091 2092 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2093 PCIE_PHY_GEN2); 2094 if (ret) 2095 return ret; 2096 2097 return 0; 2098 } 2099 2100 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) 2101 { 2102 if (rtwdev->chip->chip_id != RTL8852A && rtwdev->chip->chip_id != RTL8852B) 2103 return; 2104 2105 rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN); 2106 } 2107 2108 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) 2109 { 2110 if (rtwdev->chip->chip_id == RTL8852A || 2111 rtwdev->chip->chip_id == RTL8852B) { 2112 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 2113 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2114 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2115 B_AX_PCIE_DIS_WLSUS_AFT_PDN); 2116 } else if (rtwdev->chip->chip_id == RTL8852C) { 2117 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2118 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2119 } 2120 } 2121 2122 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev) 2123 { 2124 if (rtwdev->chip->chip_id != RTL8852B) 2125 return 0; 2126 2127 return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK, 2128 PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1); 2129 } 2130 2131 static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up) 2132 { 2133 if (pwr_up) 2134 rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2135 else 2136 rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2137 } 2138 2139 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev) 2140 { 2141 if (rtwdev->chip->chip_id != RTL8852C) 2142 return; 2143 2144 rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2145 rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2146 } 2147 2148 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev) 2149 { 2150 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2151 return; 2152 2153 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT); 2154 } 2155 2156 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev) 2157 { 2158 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2159 return; 2160 2161 rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, 2162 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2163 rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3); 2164 rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, 2165 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2166 } 2167 2168 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev) 2169 { 2170 if (rtwdev->chip->chip_id != RTL8852C) 2171 return; 2172 2173 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1); 2174 } 2175 2176 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev) 2177 { 2178 if (rtwdev->chip->chip_id != RTL8852C) 2179 return; 2180 2181 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN); 2182 } 2183 2184 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) 2185 { 2186 if (rtwdev->chip->chip_id == RTL8852C) 2187 return; 2188 2189 rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL, 2190 B_AX_SIC_EN_FORCE_CLKREQ); 2191 } 2192 2193 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev) 2194 { 2195 const struct rtw89_pci_info *info = rtwdev->pci_info; 2196 u32 lbc; 2197 2198 if (rtwdev->chip->chip_id == RTL8852C) 2199 return; 2200 2201 lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); 2202 if (info->lbc_en == MAC_AX_PCIE_ENABLE) { 2203 lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER); 2204 lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; 2205 rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2206 } else { 2207 lbc &= ~B_AX_LBC_EN; 2208 } 2209 rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2210 } 2211 2212 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev) 2213 { 2214 const struct rtw89_pci_info *info = rtwdev->pci_info; 2215 u32 val32; 2216 2217 if (rtwdev->chip->chip_id != RTL8852C) 2218 return; 2219 2220 if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) { 2221 val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK, 2222 info->io_rcy_tmr); 2223 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32); 2224 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32); 2225 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32); 2226 2227 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2228 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2229 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2230 } else { 2231 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2232 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2233 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2234 } 2235 2236 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1); 2237 } 2238 2239 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) 2240 { 2241 if (rtwdev->chip->chip_id == RTL8852C) 2242 return; 2243 2244 rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL, 2245 B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG); 2246 2247 if (rtwdev->chip->chip_id == RTL8852A) 2248 rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL, 2249 B_AX_EN_CHKDSC_NO_RX_STUCK); 2250 } 2251 2252 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev) 2253 { 2254 if (rtwdev->chip->chip_id == RTL8852C) 2255 return; 2256 2257 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 2258 B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); 2259 } 2260 2261 static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev) 2262 { 2263 const struct rtw89_pci_info *info = rtwdev->pci_info; 2264 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2265 u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX | 2266 B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX | 2267 B_AX_CLR_CH12_IDX; 2268 u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg; 2269 u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg; 2270 2271 if (chip_id == RTL8852A || chip_id == RTL8852C) 2272 val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX | 2273 B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX; 2274 /* clear DMA indexes */ 2275 rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val); 2276 if (chip_id == RTL8852A || chip_id == RTL8852C) 2277 rtw89_write32_set(rtwdev, txbd_rwptr_clr2, 2278 B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX); 2279 rtw89_write32_set(rtwdev, rxbd_rwptr_clr, 2280 B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); 2281 } 2282 2283 static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev) 2284 { 2285 const struct rtw89_pci_info *info = rtwdev->pci_info; 2286 u32 ret, check, dma_busy; 2287 u32 dma_busy1 = info->dma_busy1.addr; 2288 u32 dma_busy2 = info->dma_busy2_reg; 2289 2290 check = info->dma_busy1.mask; 2291 2292 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2293 10, 100, false, rtwdev, dma_busy1); 2294 if (ret) 2295 return ret; 2296 2297 if (!dma_busy2) 2298 return 0; 2299 2300 check = B_AX_CH10_BUSY | B_AX_CH11_BUSY; 2301 2302 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2303 10, 100, false, rtwdev, dma_busy2); 2304 if (ret) 2305 return ret; 2306 2307 return 0; 2308 } 2309 2310 static int rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev *rtwdev) 2311 { 2312 const struct rtw89_pci_info *info = rtwdev->pci_info; 2313 u32 ret, check, dma_busy; 2314 u32 dma_busy3 = info->dma_busy3_reg; 2315 2316 check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY; 2317 2318 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2319 10, 100, false, rtwdev, dma_busy3); 2320 if (ret) 2321 return ret; 2322 2323 return 0; 2324 } 2325 2326 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev) 2327 { 2328 u32 ret; 2329 2330 ret = rtw89_poll_txdma_ch_idle_pcie(rtwdev); 2331 if (ret) { 2332 rtw89_err(rtwdev, "txdma ch busy\n"); 2333 return ret; 2334 } 2335 2336 ret = rtw89_poll_rxdma_ch_idle_pcie(rtwdev); 2337 if (ret) { 2338 rtw89_err(rtwdev, "rxdma ch busy\n"); 2339 return ret; 2340 } 2341 2342 return 0; 2343 } 2344 2345 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) 2346 { 2347 const struct rtw89_pci_info *info = rtwdev->pci_info; 2348 enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode; 2349 enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode; 2350 enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode; 2351 enum mac_ax_tag_mode tag_mode = info->tag_mode; 2352 enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl; 2353 enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl; 2354 enum mac_ax_tx_burst tx_burst = info->tx_burst; 2355 enum mac_ax_rx_burst rx_burst = info->rx_burst; 2356 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2357 u8 cv = rtwdev->hal.cv; 2358 u32 val32; 2359 2360 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2361 if (chip_id == RTL8852A && cv == CHIP_CBV) 2362 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2363 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2364 if (chip_id == RTL8852A || chip_id == RTL8852B) 2365 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2366 } 2367 2368 if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) { 2369 if (chip_id == RTL8852A && cv == CHIP_CBV) 2370 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2371 } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) { 2372 if (chip_id == RTL8852A || chip_id == RTL8852B) 2373 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2374 } 2375 2376 if (rxbd_mode == MAC_AX_RXBD_PKT) { 2377 rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2378 } else if (rxbd_mode == MAC_AX_RXBD_SEP) { 2379 rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2380 2381 if (chip_id == RTL8852A || chip_id == RTL8852B) 2382 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, 2383 B_AX_PCIE_RX_APPLEN_MASK, 0); 2384 } 2385 2386 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2387 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst); 2388 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst); 2389 } else if (chip_id == RTL8852C) { 2390 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst); 2391 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst); 2392 } 2393 2394 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2395 if (tag_mode == MAC_AX_TAG_SGL) { 2396 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) & 2397 ~B_AX_LATENCY_CONTROL; 2398 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2399 } else if (tag_mode == MAC_AX_TAG_MULTI) { 2400 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | 2401 B_AX_LATENCY_CONTROL; 2402 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2403 } 2404 } 2405 2406 rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask, 2407 info->multi_tag_num); 2408 2409 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2410 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, 2411 wd_dma_idle_intvl); 2412 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, 2413 wd_dma_act_intvl); 2414 } else if (chip_id == RTL8852C) { 2415 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK, 2416 wd_dma_idle_intvl); 2417 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK, 2418 wd_dma_act_intvl); 2419 } 2420 2421 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2422 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2423 B_AX_HOST_ADDR_INFO_8B_SEL); 2424 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2425 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2426 rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2427 B_AX_HOST_ADDR_INFO_8B_SEL); 2428 rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2429 } 2430 2431 return 0; 2432 } 2433 2434 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) 2435 { 2436 const struct rtw89_pci_info *info = rtwdev->pci_info; 2437 2438 if (rtwdev->chip->chip_id == RTL8852A) { 2439 /* ltr sw trigger */ 2440 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE); 2441 } 2442 info->ltr_set(rtwdev, false); 2443 rtw89_pci_ctrl_dma_all(rtwdev, false); 2444 rtw89_pci_clr_idx_all(rtwdev); 2445 2446 return 0; 2447 } 2448 2449 static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev) 2450 { 2451 const struct rtw89_pci_info *info = rtwdev->pci_info; 2452 int ret; 2453 2454 rtw89_pci_rxdma_prefth(rtwdev); 2455 rtw89_pci_l1off_pwroff(rtwdev); 2456 rtw89_pci_deglitch_setting(rtwdev); 2457 ret = rtw89_pci_l2_rxen_lat(rtwdev); 2458 if (ret) { 2459 rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret); 2460 return ret; 2461 } 2462 2463 rtw89_pci_aphy_pwrcut(rtwdev); 2464 rtw89_pci_hci_ldo(rtwdev); 2465 rtw89_pci_dphy_delay(rtwdev); 2466 2467 ret = rtw89_pci_autok_x(rtwdev); 2468 if (ret) { 2469 rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret); 2470 return ret; 2471 } 2472 2473 ret = rtw89_pci_auto_refclk_cal(rtwdev, false); 2474 if (ret) { 2475 rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret); 2476 return ret; 2477 } 2478 2479 rtw89_pci_power_wake(rtwdev, true); 2480 rtw89_pci_autoload_hang(rtwdev); 2481 rtw89_pci_l12_vmain(rtwdev); 2482 rtw89_pci_gen2_force_ib(rtwdev); 2483 rtw89_pci_l1_ent_lat(rtwdev); 2484 rtw89_pci_wd_exit_l1(rtwdev); 2485 rtw89_pci_set_sic(rtwdev); 2486 rtw89_pci_set_lbc(rtwdev); 2487 rtw89_pci_set_io_rcy(rtwdev); 2488 rtw89_pci_set_dbg(rtwdev); 2489 rtw89_pci_set_keep_reg(rtwdev); 2490 2491 rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA); 2492 2493 /* stop DMA activities */ 2494 rtw89_pci_ctrl_dma_all(rtwdev, false); 2495 2496 ret = rtw89_pci_poll_dma_all_idle(rtwdev); 2497 if (ret) { 2498 rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n"); 2499 return ret; 2500 } 2501 2502 rtw89_pci_clr_idx_all(rtwdev); 2503 rtw89_pci_mode_op(rtwdev); 2504 2505 /* fill TRX BD indexes */ 2506 rtw89_pci_ops_reset(rtwdev); 2507 2508 ret = rtw89_pci_rst_bdram_pcie(rtwdev); 2509 if (ret) { 2510 rtw89_warn(rtwdev, "reset bdram busy\n"); 2511 return ret; 2512 } 2513 2514 /* disable all channels except to FW CMD channel to download firmware */ 2515 rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, false); 2516 rtw89_write32_clr(rtwdev, info->dma_stop1.addr, B_AX_STOP_CH12); 2517 2518 /* start DMA activities */ 2519 rtw89_pci_ctrl_dma_all(rtwdev, true); 2520 2521 return 0; 2522 } 2523 2524 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en) 2525 { 2526 u32 val; 2527 2528 if (!en) 2529 return 0; 2530 2531 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2532 if (rtw89_pci_ltr_is_err_reg_val(val)) 2533 return -EINVAL; 2534 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2535 if (rtw89_pci_ltr_is_err_reg_val(val)) 2536 return -EINVAL; 2537 val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY); 2538 if (rtw89_pci_ltr_is_err_reg_val(val)) 2539 return -EINVAL; 2540 val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY); 2541 if (rtw89_pci_ltr_is_err_reg_val(val)) 2542 return -EINVAL; 2543 2544 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN | 2545 B_AX_LTR_WD_NOEMP_CHK); 2546 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK, 2547 PCI_LTR_SPC_500US); 2548 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2549 PCI_LTR_IDLE_TIMER_3_2MS); 2550 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2551 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2552 rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003); 2553 rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b); 2554 2555 return 0; 2556 } 2557 EXPORT_SYMBOL(rtw89_pci_ltr_set); 2558 2559 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en) 2560 { 2561 u32 dec_ctrl; 2562 u32 val32; 2563 2564 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2565 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2566 return -EINVAL; 2567 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2568 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2569 return -EINVAL; 2570 dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL); 2571 if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl)) 2572 return -EINVAL; 2573 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3); 2574 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2575 return -EINVAL; 2576 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0); 2577 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2578 return -EINVAL; 2579 2580 if (!en) { 2581 dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN); 2582 dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) | 2583 B_AX_LTR_REQ_DRV; 2584 } else { 2585 dec_ctrl |= B_AX_LTR_HW_DEC_EN; 2586 } 2587 2588 dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK; 2589 dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US); 2590 2591 if (en) 2592 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, 2593 B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN); 2594 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2595 PCI_LTR_IDLE_TIMER_3_2MS); 2596 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2597 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2598 rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl); 2599 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003); 2600 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b); 2601 2602 return 0; 2603 } 2604 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1); 2605 2606 static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev) 2607 { 2608 const struct rtw89_pci_info *info = rtwdev->pci_info; 2609 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2610 int ret; 2611 2612 ret = info->ltr_set(rtwdev, true); 2613 if (ret) { 2614 rtw89_err(rtwdev, "pci ltr set fail\n"); 2615 return ret; 2616 } 2617 if (chip_id == RTL8852A) { 2618 /* ltr sw trigger */ 2619 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); 2620 } 2621 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2622 /* ADDR info 8-byte mode */ 2623 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2624 B_AX_HOST_ADDR_INFO_8B_SEL); 2625 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2626 } 2627 2628 /* enable DMA for all queues */ 2629 rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, true); 2630 2631 /* Release PCI IO */ 2632 rtw89_write32_clr(rtwdev, info->dma_stop1.addr, 2633 B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO); 2634 2635 return 0; 2636 } 2637 2638 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev, 2639 struct pci_dev *pdev) 2640 { 2641 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2642 int ret; 2643 2644 ret = pci_enable_device(pdev); 2645 if (ret) { 2646 rtw89_err(rtwdev, "failed to enable pci device\n"); 2647 return ret; 2648 } 2649 2650 pci_set_master(pdev); 2651 pci_set_drvdata(pdev, rtwdev->hw); 2652 2653 rtwpci->pdev = pdev; 2654 2655 return 0; 2656 } 2657 2658 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, 2659 struct pci_dev *pdev) 2660 { 2661 pci_clear_master(pdev); 2662 pci_disable_device(pdev); 2663 } 2664 2665 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, 2666 struct pci_dev *pdev) 2667 { 2668 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2669 unsigned long resource_len; 2670 u8 bar_id = 2; 2671 int ret; 2672 2673 ret = pci_request_regions(pdev, KBUILD_MODNAME); 2674 if (ret) { 2675 rtw89_err(rtwdev, "failed to request pci regions\n"); 2676 goto err; 2677 } 2678 2679 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 2680 if (ret) { 2681 rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n"); 2682 goto err_release_regions; 2683 } 2684 2685 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 2686 if (ret) { 2687 rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n"); 2688 goto err_release_regions; 2689 } 2690 2691 resource_len = pci_resource_len(pdev, bar_id); 2692 rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len); 2693 if (!rtwpci->mmap) { 2694 rtw89_err(rtwdev, "failed to map pci io\n"); 2695 ret = -EIO; 2696 goto err_release_regions; 2697 } 2698 2699 return 0; 2700 2701 err_release_regions: 2702 pci_release_regions(pdev); 2703 err: 2704 return ret; 2705 } 2706 2707 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev, 2708 struct pci_dev *pdev) 2709 { 2710 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2711 2712 if (rtwpci->mmap) { 2713 pci_iounmap(pdev, rtwpci->mmap); 2714 pci_release_regions(pdev); 2715 } 2716 } 2717 2718 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev, 2719 struct pci_dev *pdev, 2720 struct rtw89_pci_tx_ring *tx_ring) 2721 { 2722 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 2723 u8 *head = wd_ring->head; 2724 dma_addr_t dma = wd_ring->dma; 2725 u32 page_size = wd_ring->page_size; 2726 u32 page_num = wd_ring->page_num; 2727 u32 ring_sz = page_size * page_num; 2728 2729 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2730 wd_ring->head = NULL; 2731 } 2732 2733 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev, 2734 struct pci_dev *pdev, 2735 struct rtw89_pci_tx_ring *tx_ring) 2736 { 2737 int ring_sz; 2738 u8 *head; 2739 dma_addr_t dma; 2740 2741 head = tx_ring->bd_ring.head; 2742 dma = tx_ring->bd_ring.dma; 2743 ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len; 2744 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2745 2746 tx_ring->bd_ring.head = NULL; 2747 } 2748 2749 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev, 2750 struct pci_dev *pdev) 2751 { 2752 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2753 const struct rtw89_pci_info *info = rtwdev->pci_info; 2754 struct rtw89_pci_tx_ring *tx_ring; 2755 int i; 2756 2757 for (i = 0; i < RTW89_TXCH_NUM; i++) { 2758 if (info->tx_dma_ch_mask & BIT(i)) 2759 continue; 2760 tx_ring = &rtwpci->tx_rings[i]; 2761 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 2762 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 2763 } 2764 } 2765 2766 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev, 2767 struct pci_dev *pdev, 2768 struct rtw89_pci_rx_ring *rx_ring) 2769 { 2770 struct rtw89_pci_rx_info *rx_info; 2771 struct sk_buff *skb; 2772 dma_addr_t dma; 2773 u32 buf_sz; 2774 u8 *head; 2775 int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len; 2776 int i; 2777 2778 buf_sz = rx_ring->buf_sz; 2779 for (i = 0; i < rx_ring->bd_ring.len; i++) { 2780 skb = rx_ring->buf[i]; 2781 if (!skb) 2782 continue; 2783 2784 rx_info = RTW89_PCI_RX_SKB_CB(skb); 2785 dma = rx_info->dma; 2786 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 2787 dev_kfree_skb(skb); 2788 rx_ring->buf[i] = NULL; 2789 } 2790 2791 head = rx_ring->bd_ring.head; 2792 dma = rx_ring->bd_ring.dma; 2793 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2794 2795 rx_ring->bd_ring.head = NULL; 2796 } 2797 2798 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev, 2799 struct pci_dev *pdev) 2800 { 2801 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2802 struct rtw89_pci_rx_ring *rx_ring; 2803 int i; 2804 2805 for (i = 0; i < RTW89_RXCH_NUM; i++) { 2806 rx_ring = &rtwpci->rx_rings[i]; 2807 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 2808 } 2809 } 2810 2811 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev, 2812 struct pci_dev *pdev) 2813 { 2814 rtw89_pci_free_rx_rings(rtwdev, pdev); 2815 rtw89_pci_free_tx_rings(rtwdev, pdev); 2816 } 2817 2818 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev, 2819 struct rtw89_pci_rx_ring *rx_ring, 2820 struct sk_buff *skb, int buf_sz, u32 idx) 2821 { 2822 struct rtw89_pci_rx_info *rx_info; 2823 struct rtw89_pci_rx_bd_32 *rx_bd; 2824 dma_addr_t dma; 2825 2826 if (!skb) 2827 return -EINVAL; 2828 2829 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); 2830 if (dma_mapping_error(&pdev->dev, dma)) 2831 return -EBUSY; 2832 2833 rx_info = RTW89_PCI_RX_SKB_CB(skb); 2834 rx_bd = RTW89_PCI_RX_BD(rx_ring, idx); 2835 2836 memset(rx_bd, 0, sizeof(*rx_bd)); 2837 rx_bd->buf_size = cpu_to_le16(buf_sz); 2838 rx_bd->dma = cpu_to_le32(dma); 2839 rx_info->dma = dma; 2840 2841 return 0; 2842 } 2843 2844 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev, 2845 struct pci_dev *pdev, 2846 struct rtw89_pci_tx_ring *tx_ring, 2847 enum rtw89_tx_channel txch) 2848 { 2849 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 2850 struct rtw89_pci_tx_wd *txwd; 2851 dma_addr_t dma; 2852 dma_addr_t cur_paddr; 2853 u8 *head; 2854 u8 *cur_vaddr; 2855 u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE; 2856 u32 page_num = RTW89_PCI_TXWD_NUM_MAX; 2857 u32 ring_sz = page_size * page_num; 2858 u32 page_offset; 2859 int i; 2860 2861 /* FWCMD queue doesn't use txwd as pages */ 2862 if (txch == RTW89_TXCH_CH12) 2863 return 0; 2864 2865 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2866 if (!head) 2867 return -ENOMEM; 2868 2869 INIT_LIST_HEAD(&wd_ring->free_pages); 2870 wd_ring->head = head; 2871 wd_ring->dma = dma; 2872 wd_ring->page_size = page_size; 2873 wd_ring->page_num = page_num; 2874 2875 page_offset = 0; 2876 for (i = 0; i < page_num; i++) { 2877 txwd = &wd_ring->pages[i]; 2878 cur_paddr = dma + page_offset; 2879 cur_vaddr = head + page_offset; 2880 2881 skb_queue_head_init(&txwd->queue); 2882 INIT_LIST_HEAD(&txwd->list); 2883 txwd->paddr = cur_paddr; 2884 txwd->vaddr = cur_vaddr; 2885 txwd->len = page_size; 2886 txwd->seq = i; 2887 rtw89_pci_enqueue_txwd(tx_ring, txwd); 2888 2889 page_offset += page_size; 2890 } 2891 2892 return 0; 2893 } 2894 2895 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev, 2896 struct pci_dev *pdev, 2897 struct rtw89_pci_tx_ring *tx_ring, 2898 u32 desc_size, u32 len, 2899 enum rtw89_tx_channel txch) 2900 { 2901 const struct rtw89_pci_ch_dma_addr *txch_addr; 2902 int ring_sz = desc_size * len; 2903 u8 *head; 2904 dma_addr_t dma; 2905 int ret; 2906 2907 ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch); 2908 if (ret) { 2909 rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch); 2910 goto err; 2911 } 2912 2913 ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr); 2914 if (ret) { 2915 rtw89_err(rtwdev, "failed to get address of txch %d", txch); 2916 goto err_free_wd_ring; 2917 } 2918 2919 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2920 if (!head) { 2921 ret = -ENOMEM; 2922 goto err_free_wd_ring; 2923 } 2924 2925 INIT_LIST_HEAD(&tx_ring->busy_pages); 2926 tx_ring->bd_ring.head = head; 2927 tx_ring->bd_ring.dma = dma; 2928 tx_ring->bd_ring.len = len; 2929 tx_ring->bd_ring.desc_size = desc_size; 2930 tx_ring->bd_ring.addr = *txch_addr; 2931 tx_ring->bd_ring.wp = 0; 2932 tx_ring->bd_ring.rp = 0; 2933 tx_ring->txch = txch; 2934 2935 return 0; 2936 2937 err_free_wd_ring: 2938 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 2939 err: 2940 return ret; 2941 } 2942 2943 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev, 2944 struct pci_dev *pdev) 2945 { 2946 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2947 const struct rtw89_pci_info *info = rtwdev->pci_info; 2948 struct rtw89_pci_tx_ring *tx_ring; 2949 u32 desc_size; 2950 u32 len; 2951 u32 i, tx_allocated; 2952 int ret; 2953 2954 for (i = 0; i < RTW89_TXCH_NUM; i++) { 2955 if (info->tx_dma_ch_mask & BIT(i)) 2956 continue; 2957 tx_ring = &rtwpci->tx_rings[i]; 2958 desc_size = sizeof(struct rtw89_pci_tx_bd_32); 2959 len = RTW89_PCI_TXBD_NUM_MAX; 2960 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring, 2961 desc_size, len, i); 2962 if (ret) { 2963 rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i); 2964 goto err_free; 2965 } 2966 } 2967 2968 return 0; 2969 2970 err_free: 2971 tx_allocated = i; 2972 for (i = 0; i < tx_allocated; i++) { 2973 tx_ring = &rtwpci->tx_rings[i]; 2974 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 2975 } 2976 2977 return ret; 2978 } 2979 2980 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev, 2981 struct pci_dev *pdev, 2982 struct rtw89_pci_rx_ring *rx_ring, 2983 u32 desc_size, u32 len, u32 rxch) 2984 { 2985 const struct rtw89_pci_ch_dma_addr *rxch_addr; 2986 struct sk_buff *skb; 2987 u8 *head; 2988 dma_addr_t dma; 2989 int ring_sz = desc_size * len; 2990 int buf_sz = RTW89_PCI_RX_BUF_SIZE; 2991 int i, allocated; 2992 int ret; 2993 2994 ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr); 2995 if (ret) { 2996 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch); 2997 return ret; 2998 } 2999 3000 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3001 if (!head) { 3002 ret = -ENOMEM; 3003 goto err; 3004 } 3005 3006 rx_ring->bd_ring.head = head; 3007 rx_ring->bd_ring.dma = dma; 3008 rx_ring->bd_ring.len = len; 3009 rx_ring->bd_ring.desc_size = desc_size; 3010 rx_ring->bd_ring.addr = *rxch_addr; 3011 rx_ring->bd_ring.wp = 0; 3012 rx_ring->bd_ring.rp = 0; 3013 rx_ring->buf_sz = buf_sz; 3014 rx_ring->diliver_skb = NULL; 3015 rx_ring->diliver_desc.ready = false; 3016 3017 for (i = 0; i < len; i++) { 3018 skb = dev_alloc_skb(buf_sz); 3019 if (!skb) { 3020 ret = -ENOMEM; 3021 goto err_free; 3022 } 3023 3024 memset(skb->data, 0, buf_sz); 3025 rx_ring->buf[i] = skb; 3026 ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb, 3027 buf_sz, i); 3028 if (ret) { 3029 rtw89_err(rtwdev, "failed to init rx buf %d\n", i); 3030 dev_kfree_skb_any(skb); 3031 rx_ring->buf[i] = NULL; 3032 goto err_free; 3033 } 3034 } 3035 3036 return 0; 3037 3038 err_free: 3039 allocated = i; 3040 for (i = 0; i < allocated; i++) { 3041 skb = rx_ring->buf[i]; 3042 if (!skb) 3043 continue; 3044 dma = *((dma_addr_t *)skb->cb); 3045 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 3046 dev_kfree_skb(skb); 3047 rx_ring->buf[i] = NULL; 3048 } 3049 3050 head = rx_ring->bd_ring.head; 3051 dma = rx_ring->bd_ring.dma; 3052 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3053 3054 rx_ring->bd_ring.head = NULL; 3055 err: 3056 return ret; 3057 } 3058 3059 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev, 3060 struct pci_dev *pdev) 3061 { 3062 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3063 struct rtw89_pci_rx_ring *rx_ring; 3064 u32 desc_size; 3065 u32 len; 3066 int i, rx_allocated; 3067 int ret; 3068 3069 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3070 rx_ring = &rtwpci->rx_rings[i]; 3071 desc_size = sizeof(struct rtw89_pci_rx_bd_32); 3072 len = RTW89_PCI_RXBD_NUM_MAX; 3073 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring, 3074 desc_size, len, i); 3075 if (ret) { 3076 rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i); 3077 goto err_free; 3078 } 3079 } 3080 3081 return 0; 3082 3083 err_free: 3084 rx_allocated = i; 3085 for (i = 0; i < rx_allocated; i++) { 3086 rx_ring = &rtwpci->rx_rings[i]; 3087 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3088 } 3089 3090 return ret; 3091 } 3092 3093 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev, 3094 struct pci_dev *pdev) 3095 { 3096 int ret; 3097 3098 ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev); 3099 if (ret) { 3100 rtw89_err(rtwdev, "failed to alloc dma tx rings\n"); 3101 goto err; 3102 } 3103 3104 ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev); 3105 if (ret) { 3106 rtw89_err(rtwdev, "failed to alloc dma rx rings\n"); 3107 goto err_free_tx_rings; 3108 } 3109 3110 return 0; 3111 3112 err_free_tx_rings: 3113 rtw89_pci_free_tx_rings(rtwdev, pdev); 3114 err: 3115 return ret; 3116 } 3117 3118 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev, 3119 struct rtw89_pci *rtwpci) 3120 { 3121 skb_queue_head_init(&rtwpci->h2c_queue); 3122 skb_queue_head_init(&rtwpci->h2c_release_queue); 3123 } 3124 3125 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev, 3126 struct pci_dev *pdev) 3127 { 3128 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3129 int ret; 3130 3131 ret = rtw89_pci_setup_mapping(rtwdev, pdev); 3132 if (ret) { 3133 rtw89_err(rtwdev, "failed to setup pci mapping\n"); 3134 goto err; 3135 } 3136 3137 ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev); 3138 if (ret) { 3139 rtw89_err(rtwdev, "failed to alloc pci trx rings\n"); 3140 goto err_pci_unmap; 3141 } 3142 3143 rtw89_pci_h2c_init(rtwdev, rtwpci); 3144 3145 spin_lock_init(&rtwpci->irq_lock); 3146 spin_lock_init(&rtwpci->trx_lock); 3147 3148 return 0; 3149 3150 err_pci_unmap: 3151 rtw89_pci_clear_mapping(rtwdev, pdev); 3152 err: 3153 return ret; 3154 } 3155 3156 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev, 3157 struct pci_dev *pdev) 3158 { 3159 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3160 3161 rtw89_pci_free_trx_rings(rtwdev, pdev); 3162 rtw89_pci_clear_mapping(rtwdev, pdev); 3163 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 3164 skb_queue_len(&rtwpci->h2c_queue), true); 3165 } 3166 3167 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev) 3168 { 3169 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3170 3171 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0; 3172 3173 if (rtwpci->under_recovery) { 3174 rtwpci->intrs[0] = B_AX_HS0ISR_IND_INT_EN; 3175 rtwpci->intrs[1] = 0; 3176 } else { 3177 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3178 B_AX_RXDMA_INT_EN | 3179 B_AX_RXP1DMA_INT_EN | 3180 B_AX_RPQDMA_INT_EN | 3181 B_AX_RXDMA_STUCK_INT_EN | 3182 B_AX_RDU_INT_EN | 3183 B_AX_RPQBD_FULL_INT_EN | 3184 B_AX_HS0ISR_IND_INT_EN; 3185 3186 rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN; 3187 } 3188 } 3189 EXPORT_SYMBOL(rtw89_pci_config_intr_mask); 3190 3191 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev) 3192 { 3193 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3194 3195 rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN; 3196 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3197 rtwpci->intrs[0] = 0; 3198 rtwpci->intrs[1] = 0; 3199 } 3200 3201 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev) 3202 { 3203 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3204 3205 rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN | 3206 B_AX_HS1ISR_IND_INT_EN | 3207 B_AX_HS0ISR_IND_INT_EN; 3208 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3209 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3210 B_AX_RXDMA_INT_EN | 3211 B_AX_RXP1DMA_INT_EN | 3212 B_AX_RPQDMA_INT_EN | 3213 B_AX_RXDMA_STUCK_INT_EN | 3214 B_AX_RDU_INT_EN | 3215 B_AX_RPQBD_FULL_INT_EN; 3216 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3217 } 3218 3219 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev) 3220 { 3221 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3222 3223 rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN | 3224 B_AX_HS0ISR_IND_INT_EN; 3225 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3226 rtwpci->intrs[0] = 0; 3227 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3228 } 3229 3230 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev) 3231 { 3232 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3233 3234 if (rtwpci->under_recovery) 3235 rtw89_pci_recovery_intr_mask_v1(rtwdev); 3236 else if (rtwpci->low_power) 3237 rtw89_pci_low_power_intr_mask_v1(rtwdev); 3238 else 3239 rtw89_pci_default_intr_mask_v1(rtwdev); 3240 } 3241 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1); 3242 3243 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, 3244 struct pci_dev *pdev) 3245 { 3246 unsigned long flags = 0; 3247 int ret; 3248 3249 flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI; 3250 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); 3251 if (ret < 0) { 3252 rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret); 3253 goto err; 3254 } 3255 3256 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, 3257 rtw89_pci_interrupt_handler, 3258 rtw89_pci_interrupt_threadfn, 3259 IRQF_SHARED, KBUILD_MODNAME, rtwdev); 3260 if (ret) { 3261 rtw89_err(rtwdev, "failed to request threaded irq\n"); 3262 goto err_free_vector; 3263 } 3264 3265 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET); 3266 3267 return 0; 3268 3269 err_free_vector: 3270 pci_free_irq_vectors(pdev); 3271 err: 3272 return ret; 3273 } 3274 3275 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev, 3276 struct pci_dev *pdev) 3277 { 3278 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); 3279 pci_free_irq_vectors(pdev); 3280 } 3281 3282 static u16 gray_code_to_bin(u16 gray_code, u32 bit_num) 3283 { 3284 u16 bin = 0, gray_bit; 3285 u32 bit_idx; 3286 3287 for (bit_idx = 0; bit_idx < bit_num; bit_idx++) { 3288 gray_bit = (gray_code >> bit_idx) & 0x1; 3289 if (bit_num - bit_idx > 1) 3290 gray_bit ^= (gray_code >> (bit_idx + 1)) & 0x1; 3291 bin |= (gray_bit << bit_idx); 3292 } 3293 3294 return bin; 3295 } 3296 3297 static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev) 3298 { 3299 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3300 struct pci_dev *pdev = rtwpci->pdev; 3301 u16 val16, filter_out_val; 3302 u32 val, phy_offset; 3303 int ret; 3304 3305 if (rtwdev->chip->chip_id != RTL8852C) 3306 return 0; 3307 3308 val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); 3309 if (val == B_AX_ASPM_CTRL_L1) 3310 return 0; 3311 3312 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val); 3313 if (ret) 3314 return ret; 3315 3316 val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val); 3317 if (val == RTW89_PCIE_GEN1_SPEED) { 3318 phy_offset = R_RAC_DIRECT_OFFSET_G1; 3319 } else if (val == RTW89_PCIE_GEN2_SPEED) { 3320 phy_offset = R_RAC_DIRECT_OFFSET_G2; 3321 val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT); 3322 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, 3323 val16 | B_PCIE_BIT_PINOUT_DIS); 3324 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, 3325 val16 & ~B_PCIE_BIT_RD_SEL); 3326 3327 val16 = rtw89_read16_mask(rtwdev, 3328 phy_offset + RAC_ANA1F * RAC_MULT, 3329 FILTER_OUT_EQ_MASK); 3330 val16 = gray_code_to_bin(val16, hweight16(val16)); 3331 filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 * 3332 RAC_MULT); 3333 filter_out_val &= ~REG_FILTER_OUT_MASK; 3334 filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16); 3335 3336 rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT, 3337 filter_out_val); 3338 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT, 3339 B_BAC_EQ_SEL); 3340 rtw89_write16_set(rtwdev, 3341 R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT, 3342 B_PCIE_BIT_PSAVE); 3343 } else { 3344 return -EOPNOTSUPP; 3345 } 3346 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT, 3347 B_PCIE_BIT_PSAVE); 3348 3349 return 0; 3350 } 3351 3352 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable) 3353 { 3354 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3355 int ret; 3356 3357 if (rtw89_pci_disable_clkreq) 3358 return; 3359 3360 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 3361 PCIE_CLKDLY_HW_30US); 3362 if (ret) 3363 rtw89_err(rtwdev, "failed to set CLKREQ Delay\n"); 3364 3365 if (chip_id == RTL8852A) { 3366 if (enable) 3367 ret = rtw89_pci_config_byte_set(rtwdev, 3368 RTW89_PCIE_L1_CTRL, 3369 RTW89_PCIE_BIT_CLK); 3370 else 3371 ret = rtw89_pci_config_byte_clr(rtwdev, 3372 RTW89_PCIE_L1_CTRL, 3373 RTW89_PCIE_BIT_CLK); 3374 if (ret) 3375 rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d", 3376 enable ? "set" : "unset", ret); 3377 } else if (chip_id == RTL8852C) { 3378 rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL, 3379 B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL); 3380 if (enable) 3381 rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL, 3382 B_AX_CLK_REQ_N); 3383 else 3384 rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL, 3385 B_AX_CLK_REQ_N); 3386 } 3387 } 3388 3389 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable) 3390 { 3391 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3392 u8 value = 0; 3393 int ret; 3394 3395 if (rtw89_pci_disable_aspm_l1) 3396 return; 3397 3398 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value); 3399 if (ret) 3400 rtw89_err(rtwdev, "failed to read ASPM Delay\n"); 3401 3402 value &= ~(RTW89_L1DLY_MASK | RTW89_L0DLY_MASK); 3403 value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) | 3404 FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US); 3405 3406 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value); 3407 if (ret) 3408 rtw89_err(rtwdev, "failed to read ASPM Delay\n"); 3409 3410 if (chip_id == RTL8852A || chip_id == RTL8852B) { 3411 if (enable) 3412 ret = rtw89_pci_config_byte_set(rtwdev, 3413 RTW89_PCIE_L1_CTRL, 3414 RTW89_PCIE_BIT_L1); 3415 else 3416 ret = rtw89_pci_config_byte_clr(rtwdev, 3417 RTW89_PCIE_L1_CTRL, 3418 RTW89_PCIE_BIT_L1); 3419 } else if (chip_id == RTL8852C) { 3420 if (enable) 3421 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3422 B_AX_ASPM_CTRL_L1); 3423 else 3424 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3425 B_AX_ASPM_CTRL_L1); 3426 } 3427 if (ret) 3428 rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d", 3429 enable ? "set" : "unset", ret); 3430 } 3431 3432 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev) 3433 { 3434 struct rtw89_traffic_stats *stats = &rtwdev->stats; 3435 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; 3436 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; 3437 u32 val = 0; 3438 3439 if (!rtwdev->scanning && 3440 (tx_tfc_lv >= RTW89_TFC_HIGH || rx_tfc_lv >= RTW89_TFC_HIGH)) 3441 val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL | 3442 FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) | 3443 FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) | 3444 FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64); 3445 3446 rtw89_write32(rtwdev, R_AX_INT_MIT_RX, val); 3447 } 3448 3449 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev) 3450 { 3451 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3452 struct pci_dev *pdev = rtwpci->pdev; 3453 u16 link_ctrl; 3454 int ret; 3455 3456 /* Though there is standard PCIE configuration space to set the 3457 * link control register, but by Realtek's design, driver should 3458 * check if host supports CLKREQ/ASPM to enable the HW module. 3459 * 3460 * These functions are implemented by two HW modules associated, 3461 * one is responsible to access PCIE configuration space to 3462 * follow the host settings, and another is in charge of doing 3463 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes 3464 * the host does not support it, and due to some reasons or wrong 3465 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device 3466 * loss if HW misbehaves on the link. 3467 * 3468 * Hence it's designed that driver should first check the PCIE 3469 * configuration space is sync'ed and enabled, then driver can turn 3470 * on the other module that is actually working on the mechanism. 3471 */ 3472 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); 3473 if (ret) { 3474 rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); 3475 return; 3476 } 3477 3478 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) 3479 rtw89_pci_clkreq_set(rtwdev, true); 3480 3481 if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) 3482 rtw89_pci_aspm_set(rtwdev, true); 3483 } 3484 3485 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable) 3486 { 3487 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3488 int ret; 3489 3490 if (chip_id == RTL8852A || chip_id == RTL8852B) { 3491 if (enable) 3492 ret = rtw89_pci_config_byte_set(rtwdev, 3493 RTW89_PCIE_TIMER_CTRL, 3494 RTW89_PCIE_BIT_L1SUB); 3495 else 3496 ret = rtw89_pci_config_byte_clr(rtwdev, 3497 RTW89_PCIE_TIMER_CTRL, 3498 RTW89_PCIE_BIT_L1SUB); 3499 if (ret) 3500 rtw89_err(rtwdev, "failed to %s L1SS, ret=%d", 3501 enable ? "set" : "unset", ret); 3502 } else if (chip_id == RTL8852C) { 3503 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1, 3504 RTW89_PCIE_BIT_ASPM_L11 | 3505 RTW89_PCIE_BIT_PCI_L11); 3506 if (ret) 3507 rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret); 3508 if (enable) 3509 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3510 B_AX_L1SUB_DISABLE); 3511 else 3512 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3513 B_AX_L1SUB_DISABLE); 3514 } 3515 } 3516 3517 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev) 3518 { 3519 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3520 struct pci_dev *pdev = rtwpci->pdev; 3521 u32 l1ss_cap_ptr, l1ss_ctrl; 3522 3523 if (rtw89_pci_disable_l1ss) 3524 return; 3525 3526 l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); 3527 if (!l1ss_cap_ptr) 3528 return; 3529 3530 pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl); 3531 3532 if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK) 3533 rtw89_pci_l1ss_set(rtwdev, true); 3534 } 3535 3536 static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev) 3537 { 3538 int ret = 0; 3539 u32 sts; 3540 u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; 3541 3542 ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0, 3543 10, 1000, false, rtwdev, 3544 R_AX_PCIE_DMA_BUSY1); 3545 if (ret) { 3546 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n", 3547 rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1)); 3548 return -EINVAL; 3549 } 3550 return ret; 3551 } 3552 3553 static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev) 3554 { 3555 u32 val; 3556 int ret; 3557 3558 if (rtwdev->chip->chip_id == RTL8852C) 3559 return 0; 3560 3561 rtw89_pci_ctrl_dma_all(rtwdev, false); 3562 ret = rtw89_pci_poll_io_idle(rtwdev); 3563 if (ret) { 3564 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 3565 rtw89_debug(rtwdev, RTW89_DBG_HCI, 3566 "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n", 3567 R_AX_DBG_ERR_FLAG, val); 3568 if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0) 3569 rtw89_mac_ctrl_hci_dma_tx(rtwdev, false); 3570 if (val & B_AX_RX_STUCK) 3571 rtw89_mac_ctrl_hci_dma_rx(rtwdev, false); 3572 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 3573 ret = rtw89_pci_poll_io_idle(rtwdev); 3574 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 3575 rtw89_debug(rtwdev, RTW89_DBG_HCI, 3576 "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n", 3577 R_AX_DBG_ERR_FLAG, val); 3578 } 3579 3580 return ret; 3581 } 3582 3583 3584 3585 static int rtw89_pci_rst_bdram(struct rtw89_dev *rtwdev) 3586 { 3587 int ret = 0; 3588 u32 val32, sts; 3589 3590 val32 = B_AX_RST_BDRAM; 3591 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 3592 3593 ret = read_poll_timeout_atomic(rtw89_read32, sts, 3594 (sts & B_AX_RST_BDRAM) == 0x0, 1, 100, 3595 true, rtwdev, R_AX_PCIE_INIT_CFG1); 3596 return ret; 3597 } 3598 3599 static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev) 3600 { 3601 u32 ret; 3602 3603 if (rtwdev->chip->chip_id == RTL8852C) 3604 return 0; 3605 3606 rtw89_mac_ctrl_hci_dma_trx(rtwdev, false); 3607 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 3608 rtw89_pci_clr_idx_all(rtwdev); 3609 3610 ret = rtw89_pci_rst_bdram(rtwdev); 3611 if (ret) 3612 return ret; 3613 3614 rtw89_pci_ctrl_dma_all(rtwdev, true); 3615 return ret; 3616 } 3617 3618 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev, 3619 enum rtw89_lv1_rcvy_step step) 3620 { 3621 int ret; 3622 3623 switch (step) { 3624 case RTW89_LV1_RCVY_STEP_1: 3625 ret = rtw89_pci_lv1rst_stop_dma(rtwdev); 3626 if (ret) 3627 rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n"); 3628 3629 break; 3630 3631 case RTW89_LV1_RCVY_STEP_2: 3632 ret = rtw89_pci_lv1rst_start_dma(rtwdev); 3633 if (ret) 3634 rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n"); 3635 break; 3636 3637 default: 3638 return -EINVAL; 3639 } 3640 3641 return ret; 3642 } 3643 3644 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev) 3645 { 3646 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n", 3647 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); 3648 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 3649 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG)); 3650 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 3651 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG)); 3652 } 3653 3654 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget) 3655 { 3656 struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi); 3657 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3658 unsigned long flags; 3659 int work_done; 3660 3661 rtwdev->napi_budget_countdown = budget; 3662 3663 rtw89_pci_clear_isr0(rtwdev, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT); 3664 work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 3665 if (work_done == budget) 3666 return budget; 3667 3668 rtw89_pci_clear_isr0(rtwdev, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | B_AX_RDU_INT); 3669 work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 3670 if (work_done < budget && napi_complete_done(napi, work_done)) { 3671 spin_lock_irqsave(&rtwpci->irq_lock, flags); 3672 if (likely(rtwpci->running)) 3673 rtw89_chip_enable_intr(rtwdev, rtwpci); 3674 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 3675 } 3676 3677 return work_done; 3678 } 3679 3680 static int __maybe_unused rtw89_pci_suspend(struct device *dev) 3681 { 3682 struct ieee80211_hw *hw = dev_get_drvdata(dev); 3683 struct rtw89_dev *rtwdev = hw->priv; 3684 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3685 3686 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3687 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 3688 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3689 if (chip_id == RTL8852A || chip_id == RTL8852B) { 3690 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 3691 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 3692 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 3693 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 3694 } else { 3695 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3696 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 3697 } 3698 3699 return 0; 3700 } 3701 3702 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev) 3703 { 3704 if (rtwdev->chip->chip_id == RTL8852C) 3705 return; 3706 3707 /* Hardware need write the reg twice to ensure the setting work */ 3708 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 3709 RTW89_PCIE_BIT_CFG_RST_MSTATE); 3710 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 3711 RTW89_PCIE_BIT_CFG_RST_MSTATE); 3712 } 3713 3714 static int __maybe_unused rtw89_pci_resume(struct device *dev) 3715 { 3716 struct ieee80211_hw *hw = dev_get_drvdata(dev); 3717 struct rtw89_dev *rtwdev = hw->priv; 3718 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3719 3720 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3721 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 3722 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3723 if (chip_id == RTL8852A || chip_id == RTL8852B) { 3724 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 3725 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 3726 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 3727 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 3728 } else { 3729 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3730 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 3731 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3732 B_AX_SEL_REQ_ENTR_L1); 3733 } 3734 rtw89_pci_l2_hci_ldo(rtwdev); 3735 rtw89_pci_filter_out(rtwdev); 3736 rtw89_pci_link_cfg(rtwdev); 3737 rtw89_pci_l1ss_cfg(rtwdev); 3738 3739 return 0; 3740 } 3741 3742 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume); 3743 EXPORT_SYMBOL(rtw89_pm_ops); 3744 3745 static const struct rtw89_hci_ops rtw89_pci_ops = { 3746 .tx_write = rtw89_pci_ops_tx_write, 3747 .tx_kick_off = rtw89_pci_ops_tx_kick_off, 3748 .flush_queues = rtw89_pci_ops_flush_queues, 3749 .reset = rtw89_pci_ops_reset, 3750 .start = rtw89_pci_ops_start, 3751 .stop = rtw89_pci_ops_stop, 3752 .pause = rtw89_pci_ops_pause, 3753 .switch_mode = rtw89_pci_ops_switch_mode, 3754 .recalc_int_mit = rtw89_pci_recalc_int_mit, 3755 3756 .read8 = rtw89_pci_ops_read8, 3757 .read16 = rtw89_pci_ops_read16, 3758 .read32 = rtw89_pci_ops_read32, 3759 .write8 = rtw89_pci_ops_write8, 3760 .write16 = rtw89_pci_ops_write16, 3761 .write32 = rtw89_pci_ops_write32, 3762 3763 .mac_pre_init = rtw89_pci_ops_mac_pre_init, 3764 .mac_post_init = rtw89_pci_ops_mac_post_init, 3765 .deinit = rtw89_pci_ops_deinit, 3766 3767 .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource, 3768 .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery, 3769 .dump_err_status = rtw89_pci_ops_dump_err_status, 3770 .napi_poll = rtw89_pci_napi_poll, 3771 3772 .recovery_start = rtw89_pci_ops_recovery_start, 3773 .recovery_complete = rtw89_pci_ops_recovery_complete, 3774 }; 3775 3776 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 3777 { 3778 struct rtw89_dev *rtwdev; 3779 const struct rtw89_driver_info *info; 3780 const struct rtw89_pci_info *pci_info; 3781 int ret; 3782 3783 info = (const struct rtw89_driver_info *)id->driver_data; 3784 3785 rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev, 3786 sizeof(struct rtw89_pci), 3787 info->chip); 3788 if (!rtwdev) { 3789 dev_err(&pdev->dev, "failed to allocate hw\n"); 3790 return -ENOMEM; 3791 } 3792 3793 pci_info = info->bus.pci; 3794 3795 rtwdev->pci_info = info->bus.pci; 3796 rtwdev->hci.ops = &rtw89_pci_ops; 3797 rtwdev->hci.type = RTW89_HCI_TYPE_PCIE; 3798 rtwdev->hci.rpwm_addr = pci_info->rpwm_addr; 3799 rtwdev->hci.cpwm_addr = pci_info->cpwm_addr; 3800 3801 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); 3802 3803 ret = rtw89_core_init(rtwdev); 3804 if (ret) { 3805 rtw89_err(rtwdev, "failed to initialise core\n"); 3806 goto err_release_hw; 3807 } 3808 3809 ret = rtw89_pci_claim_device(rtwdev, pdev); 3810 if (ret) { 3811 rtw89_err(rtwdev, "failed to claim pci device\n"); 3812 goto err_core_deinit; 3813 } 3814 3815 ret = rtw89_pci_setup_resource(rtwdev, pdev); 3816 if (ret) { 3817 rtw89_err(rtwdev, "failed to setup pci resource\n"); 3818 goto err_declaim_pci; 3819 } 3820 3821 ret = rtw89_chip_info_setup(rtwdev); 3822 if (ret) { 3823 rtw89_err(rtwdev, "failed to setup chip information\n"); 3824 goto err_clear_resource; 3825 } 3826 3827 rtw89_pci_filter_out(rtwdev); 3828 rtw89_pci_link_cfg(rtwdev); 3829 rtw89_pci_l1ss_cfg(rtwdev); 3830 3831 ret = rtw89_core_register(rtwdev); 3832 if (ret) { 3833 rtw89_err(rtwdev, "failed to register core\n"); 3834 goto err_clear_resource; 3835 } 3836 3837 rtw89_core_napi_init(rtwdev); 3838 3839 ret = rtw89_pci_request_irq(rtwdev, pdev); 3840 if (ret) { 3841 rtw89_err(rtwdev, "failed to request pci irq\n"); 3842 goto err_unregister; 3843 } 3844 3845 return 0; 3846 3847 err_unregister: 3848 rtw89_core_napi_deinit(rtwdev); 3849 rtw89_core_unregister(rtwdev); 3850 err_clear_resource: 3851 rtw89_pci_clear_resource(rtwdev, pdev); 3852 err_declaim_pci: 3853 rtw89_pci_declaim_device(rtwdev, pdev); 3854 err_core_deinit: 3855 rtw89_core_deinit(rtwdev); 3856 err_release_hw: 3857 rtw89_free_ieee80211_hw(rtwdev); 3858 3859 return ret; 3860 } 3861 EXPORT_SYMBOL(rtw89_pci_probe); 3862 3863 void rtw89_pci_remove(struct pci_dev *pdev) 3864 { 3865 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 3866 struct rtw89_dev *rtwdev; 3867 3868 rtwdev = hw->priv; 3869 3870 rtw89_pci_free_irq(rtwdev, pdev); 3871 rtw89_core_napi_deinit(rtwdev); 3872 rtw89_core_unregister(rtwdev); 3873 rtw89_pci_clear_resource(rtwdev, pdev); 3874 rtw89_pci_declaim_device(rtwdev, pdev); 3875 rtw89_core_deinit(rtwdev); 3876 rtw89_free_ieee80211_hw(rtwdev); 3877 } 3878 EXPORT_SYMBOL(rtw89_pci_remove); 3879 3880 MODULE_AUTHOR("Realtek Corporation"); 3881 MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver"); 3882 MODULE_LICENSE("Dual BSD/GPL"); 3883