1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2020 Realtek Corporation 3 */ 4 5 #include <linux/pci.h> 6 7 #include "mac.h" 8 #include "pci.h" 9 #include "reg.h" 10 #include "ser.h" 11 12 static bool rtw89_pci_disable_clkreq; 13 static bool rtw89_pci_disable_aspm_l1; 14 static bool rtw89_pci_disable_l1ss; 15 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644); 16 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644); 17 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644); 18 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support"); 19 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support"); 20 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support"); 21 22 static int rtw89_pci_rst_bdram_pcie(struct rtw89_dev *rtwdev) 23 { 24 u32 val; 25 int ret; 26 27 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, 28 rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | B_AX_RST_BDRAM); 29 30 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM), 31 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false, 32 rtwdev, R_AX_PCIE_INIT_CFG1); 33 34 if (ret) 35 return -EBUSY; 36 37 return 0; 38 } 39 40 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev, 41 struct rtw89_pci_dma_ring *bd_ring, 42 u32 cur_idx, bool tx) 43 { 44 u32 cnt, cur_rp, wp, rp, len; 45 46 rp = bd_ring->rp; 47 wp = bd_ring->wp; 48 len = bd_ring->len; 49 50 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 51 if (tx) 52 cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp); 53 else 54 cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp); 55 56 bd_ring->rp = cur_rp; 57 58 return cnt; 59 } 60 61 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev, 62 struct rtw89_pci_tx_ring *tx_ring) 63 { 64 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 65 u32 addr_idx = bd_ring->addr.idx; 66 u32 cnt, idx; 67 68 idx = rtw89_read32(rtwdev, addr_idx); 69 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true); 70 71 return cnt; 72 } 73 74 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev, 75 struct rtw89_pci *rtwpci, 76 u32 cnt, bool release_all) 77 { 78 struct rtw89_pci_tx_data *tx_data; 79 struct sk_buff *skb; 80 u32 qlen; 81 82 while (cnt--) { 83 skb = skb_dequeue(&rtwpci->h2c_queue); 84 if (!skb) { 85 rtw89_err(rtwdev, "failed to pre-release fwcmd\n"); 86 return; 87 } 88 skb_queue_tail(&rtwpci->h2c_release_queue, skb); 89 } 90 91 qlen = skb_queue_len(&rtwpci->h2c_release_queue); 92 if (!release_all) 93 qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0; 94 95 while (qlen--) { 96 skb = skb_dequeue(&rtwpci->h2c_release_queue); 97 if (!skb) { 98 rtw89_err(rtwdev, "failed to release fwcmd\n"); 99 return; 100 } 101 tx_data = RTW89_PCI_TX_SKB_CB(skb); 102 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 103 DMA_TO_DEVICE); 104 dev_kfree_skb_any(skb); 105 } 106 } 107 108 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev, 109 struct rtw89_pci *rtwpci) 110 { 111 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 112 u32 cnt; 113 114 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 115 if (!cnt) 116 return; 117 rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false); 118 } 119 120 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev, 121 struct rtw89_pci_rx_ring *rx_ring) 122 { 123 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 124 u32 addr_idx = bd_ring->addr.idx; 125 u32 cnt, idx; 126 127 idx = rtw89_read32(rtwdev, addr_idx); 128 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false); 129 130 return cnt; 131 } 132 133 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev, 134 struct sk_buff *skb) 135 { 136 struct rtw89_pci_rx_info *rx_info; 137 dma_addr_t dma; 138 139 rx_info = RTW89_PCI_RX_SKB_CB(skb); 140 dma = rx_info->dma; 141 dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 142 DMA_FROM_DEVICE); 143 } 144 145 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, 146 struct sk_buff *skb) 147 { 148 struct rtw89_pci_rx_info *rx_info; 149 dma_addr_t dma; 150 151 rx_info = RTW89_PCI_RX_SKB_CB(skb); 152 dma = rx_info->dma; 153 dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 154 DMA_FROM_DEVICE); 155 } 156 157 static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, 158 struct sk_buff *skb) 159 { 160 struct rtw89_pci_rxbd_info *rxbd_info; 161 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 162 163 rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; 164 rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS); 165 rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS); 166 rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE); 167 rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG); 168 169 return 0; 170 } 171 172 static void rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev *rtwdev, bool enable) 173 { 174 const struct rtw89_pci_info *info = rtwdev->pci_info; 175 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 176 const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2; 177 178 if (enable) { 179 rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask); 180 if (dma_stop2->addr) 181 rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask); 182 } else { 183 rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask); 184 if (dma_stop2->addr) 185 rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask); 186 } 187 } 188 189 static void rtw89_pci_ctrl_txdma_fw_ch_pcie(struct rtw89_dev *rtwdev, bool enable) 190 { 191 const struct rtw89_pci_info *info = rtwdev->pci_info; 192 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 193 194 if (enable) 195 rtw89_write32_clr(rtwdev, dma_stop1->addr, B_AX_STOP_CH12); 196 else 197 rtw89_write32_set(rtwdev, dma_stop1->addr, B_AX_STOP_CH12); 198 } 199 200 static bool 201 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls, 202 struct sk_buff *new, 203 const struct sk_buff *skb, u32 offset, 204 const struct rtw89_pci_rx_info *rx_info, 205 const struct rtw89_rx_desc_info *desc_info) 206 { 207 u32 copy_len = rx_info->len - offset; 208 209 if (unlikely(skb_tailroom(new) < copy_len)) { 210 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 211 "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n", 212 rx_info->len, desc_info->pkt_size, offset, fs, ls); 213 rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ", 214 skb->data, rx_info->len); 215 /* length of a single segment skb is desc_info->pkt_size */ 216 if (fs && ls) { 217 copy_len = desc_info->pkt_size; 218 } else { 219 rtw89_info(rtwdev, "drop rx data due to invalid length\n"); 220 return false; 221 } 222 } 223 224 skb_put_data(new, skb->data + offset, copy_len); 225 226 return true; 227 } 228 229 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev, 230 struct rtw89_pci_rx_ring *rx_ring) 231 { 232 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 233 struct rtw89_pci_rx_info *rx_info; 234 struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc; 235 struct sk_buff *new = rx_ring->diliver_skb; 236 struct sk_buff *skb; 237 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 238 u32 offset; 239 u32 cnt = 1; 240 bool fs, ls; 241 int ret; 242 243 skb = rx_ring->buf[bd_ring->wp]; 244 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 245 246 ret = rtw89_pci_rxbd_info_update(rtwdev, skb); 247 if (ret) { 248 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 249 bd_ring->wp, ret); 250 goto err_sync_device; 251 } 252 253 rx_info = RTW89_PCI_RX_SKB_CB(skb); 254 fs = rx_info->fs; 255 ls = rx_info->ls; 256 257 if (fs) { 258 if (new) { 259 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 260 "skb should not be ready before first segment start\n"); 261 goto err_sync_device; 262 } 263 if (desc_info->ready) { 264 rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n"); 265 goto err_sync_device; 266 } 267 268 rtw89_core_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size); 269 270 new = rtw89_alloc_skb_for_rx(rtwdev, desc_info->pkt_size); 271 if (!new) 272 goto err_sync_device; 273 274 rx_ring->diliver_skb = new; 275 276 /* first segment has RX desc */ 277 offset = desc_info->offset; 278 offset += desc_info->long_rxdesc ? sizeof(struct rtw89_rxdesc_long) : 279 sizeof(struct rtw89_rxdesc_short); 280 } else { 281 offset = sizeof(struct rtw89_pci_rxbd_info); 282 if (!new) { 283 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n"); 284 goto err_sync_device; 285 } 286 } 287 if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info)) 288 goto err_sync_device; 289 rtw89_pci_sync_skb_for_device(rtwdev, skb); 290 rtw89_pci_rxbd_increase(rx_ring, 1); 291 292 if (!desc_info->ready) { 293 rtw89_warn(rtwdev, "no rx desc information\n"); 294 goto err_free_resource; 295 } 296 if (ls) { 297 rtw89_core_rx(rtwdev, desc_info, new); 298 rx_ring->diliver_skb = NULL; 299 desc_info->ready = false; 300 } 301 302 return cnt; 303 304 err_sync_device: 305 rtw89_pci_sync_skb_for_device(rtwdev, skb); 306 rtw89_pci_rxbd_increase(rx_ring, 1); 307 err_free_resource: 308 if (new) 309 dev_kfree_skb_any(new); 310 rx_ring->diliver_skb = NULL; 311 desc_info->ready = false; 312 313 return cnt; 314 } 315 316 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev, 317 struct rtw89_pci_rx_ring *rx_ring, 318 u32 cnt) 319 { 320 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 321 u32 rx_cnt; 322 323 while (cnt && rtwdev->napi_budget_countdown > 0) { 324 rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring); 325 if (!rx_cnt) { 326 rtw89_err(rtwdev, "failed to deliver RXBD skb\n"); 327 328 /* skip the rest RXBD bufs */ 329 rtw89_pci_rxbd_increase(rx_ring, cnt); 330 break; 331 } 332 333 cnt -= rx_cnt; 334 } 335 336 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 337 } 338 339 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev, 340 struct rtw89_pci *rtwpci, int budget) 341 { 342 struct rtw89_pci_rx_ring *rx_ring; 343 int countdown = rtwdev->napi_budget_countdown; 344 u32 cnt; 345 346 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ]; 347 348 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 349 if (!cnt) 350 return 0; 351 352 cnt = min_t(u32, budget, cnt); 353 354 rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt); 355 356 /* In case of flushing pending SKBs, the countdown may exceed. */ 357 if (rtwdev->napi_budget_countdown <= 0) 358 return budget; 359 360 return budget - countdown; 361 } 362 363 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev, 364 struct rtw89_pci_tx_ring *tx_ring, 365 struct sk_buff *skb, u8 tx_status) 366 { 367 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 368 struct ieee80211_tx_info *info; 369 370 rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE); 371 372 info = IEEE80211_SKB_CB(skb); 373 ieee80211_tx_info_clear_status(info); 374 375 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 376 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 377 if (tx_status == RTW89_TX_DONE) { 378 info->flags |= IEEE80211_TX_STAT_ACK; 379 tx_ring->tx_acked++; 380 } else { 381 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) 382 rtw89_debug(rtwdev, RTW89_DBG_FW, 383 "failed to TX of status %x\n", tx_status); 384 switch (tx_status) { 385 case RTW89_TX_RETRY_LIMIT: 386 tx_ring->tx_retry_lmt++; 387 break; 388 case RTW89_TX_LIFE_TIME: 389 tx_ring->tx_life_time++; 390 break; 391 case RTW89_TX_MACID_DROP: 392 tx_ring->tx_mac_id_drop++; 393 break; 394 default: 395 rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status); 396 break; 397 } 398 } 399 400 ieee80211_tx_status_ni(rtwdev->hw, skb); 401 } 402 403 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 404 { 405 struct rtw89_pci_tx_wd *txwd; 406 u32 cnt; 407 408 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 409 while (cnt--) { 410 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 411 if (!txwd) { 412 rtw89_warn(rtwdev, "No busy txwd pages available\n"); 413 break; 414 } 415 416 list_del_init(&txwd->list); 417 418 /* this skb has been freed by RPP */ 419 if (skb_queue_len(&txwd->queue) == 0) 420 rtw89_pci_enqueue_txwd(tx_ring, txwd); 421 } 422 } 423 424 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev, 425 struct rtw89_pci_tx_ring *tx_ring) 426 { 427 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 428 struct rtw89_pci_tx_wd *txwd; 429 int i; 430 431 for (i = 0; i < wd_ring->page_num; i++) { 432 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 433 if (!txwd) 434 break; 435 436 list_del_init(&txwd->list); 437 } 438 } 439 440 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, 441 struct rtw89_pci_tx_ring *tx_ring, 442 struct rtw89_pci_tx_wd *txwd, u16 seq, 443 u8 tx_status) 444 { 445 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 446 struct rtw89_pci_tx_data *tx_data; 447 struct sk_buff *skb, *tmp; 448 u8 txch = tx_ring->txch; 449 450 if (!list_empty(&txwd->list)) { 451 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 452 /* In low power mode, RPP can receive before updating of TX BD. 453 * In normal mode, it should not happen so give it a warning. 454 */ 455 if (!rtwpci->low_power && !list_empty(&txwd->list)) 456 rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n", 457 txch, seq); 458 } 459 460 skb_queue_walk_safe(&txwd->queue, skb, tmp) { 461 skb_unlink(skb, &txwd->queue); 462 463 tx_data = RTW89_PCI_TX_SKB_CB(skb); 464 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 465 DMA_TO_DEVICE); 466 467 rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status); 468 } 469 470 if (list_empty(&txwd->list)) 471 rtw89_pci_enqueue_txwd(tx_ring, txwd); 472 } 473 474 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, 475 struct rtw89_pci_rpp_fmt *rpp) 476 { 477 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 478 struct rtw89_pci_tx_ring *tx_ring; 479 struct rtw89_pci_tx_wd_ring *wd_ring; 480 struct rtw89_pci_tx_wd *txwd; 481 u16 seq; 482 u8 qsel, tx_status, txch; 483 484 seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ); 485 qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL); 486 tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS); 487 txch = rtw89_core_get_ch_dma(rtwdev, qsel); 488 489 if (txch == RTW89_TXCH_CH12) { 490 rtw89_warn(rtwdev, "should no fwcmd release report\n"); 491 return; 492 } 493 494 tx_ring = &rtwpci->tx_rings[txch]; 495 wd_ring = &tx_ring->wd_ring; 496 txwd = &wd_ring->pages[seq]; 497 498 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status); 499 } 500 501 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev, 502 struct rtw89_pci_tx_ring *tx_ring) 503 { 504 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 505 struct rtw89_pci_tx_wd *txwd; 506 int i; 507 508 for (i = 0; i < wd_ring->page_num; i++) { 509 txwd = &wd_ring->pages[i]; 510 511 if (!list_empty(&txwd->list)) 512 continue; 513 514 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP); 515 } 516 } 517 518 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev, 519 struct rtw89_pci_rx_ring *rx_ring, 520 u32 max_cnt) 521 { 522 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 523 struct rtw89_pci_rx_info *rx_info; 524 struct rtw89_pci_rpp_fmt *rpp; 525 struct rtw89_rx_desc_info desc_info = {}; 526 struct sk_buff *skb; 527 u32 cnt = 0; 528 u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt); 529 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 530 u32 offset; 531 int ret; 532 533 skb = rx_ring->buf[bd_ring->wp]; 534 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 535 536 ret = rtw89_pci_rxbd_info_update(rtwdev, skb); 537 if (ret) { 538 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 539 bd_ring->wp, ret); 540 goto err_sync_device; 541 } 542 543 rx_info = RTW89_PCI_RX_SKB_CB(skb); 544 if (!rx_info->fs || !rx_info->ls) { 545 rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n"); 546 return cnt; 547 } 548 549 rtw89_core_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size); 550 551 /* first segment has RX desc */ 552 offset = desc_info.offset; 553 offset += desc_info.long_rxdesc ? sizeof(struct rtw89_rxdesc_long) : 554 sizeof(struct rtw89_rxdesc_short); 555 for (; offset + rpp_size <= rx_info->len; offset += rpp_size) { 556 rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset); 557 rtw89_pci_release_rpp(rtwdev, rpp); 558 } 559 560 rtw89_pci_sync_skb_for_device(rtwdev, skb); 561 rtw89_pci_rxbd_increase(rx_ring, 1); 562 cnt++; 563 564 return cnt; 565 566 err_sync_device: 567 rtw89_pci_sync_skb_for_device(rtwdev, skb); 568 return 0; 569 } 570 571 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev, 572 struct rtw89_pci_rx_ring *rx_ring, 573 u32 cnt) 574 { 575 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 576 u32 release_cnt; 577 578 while (cnt) { 579 release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt); 580 if (!release_cnt) { 581 rtw89_err(rtwdev, "failed to release TX skbs\n"); 582 583 /* skip the rest RXBD bufs */ 584 rtw89_pci_rxbd_increase(rx_ring, cnt); 585 break; 586 } 587 588 cnt -= release_cnt; 589 } 590 591 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 592 } 593 594 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev, 595 struct rtw89_pci *rtwpci, int budget) 596 { 597 struct rtw89_pci_rx_ring *rx_ring; 598 u32 cnt; 599 int work_done; 600 601 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 602 603 spin_lock_bh(&rtwpci->trx_lock); 604 605 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 606 if (cnt == 0) 607 goto out_unlock; 608 609 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 610 611 out_unlock: 612 spin_unlock_bh(&rtwpci->trx_lock); 613 614 /* always release all RPQ */ 615 work_done = min_t(int, cnt, budget); 616 rtwdev->napi_budget_countdown -= work_done; 617 618 return work_done; 619 } 620 621 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev, 622 struct rtw89_pci *rtwpci) 623 { 624 struct rtw89_pci_rx_ring *rx_ring; 625 struct rtw89_pci_dma_ring *bd_ring; 626 u32 reg_idx; 627 u16 hw_idx, hw_idx_next, host_idx; 628 int i; 629 630 for (i = 0; i < RTW89_RXCH_NUM; i++) { 631 rx_ring = &rtwpci->rx_rings[i]; 632 bd_ring = &rx_ring->bd_ring; 633 634 reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 635 hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx); 636 host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx); 637 hw_idx_next = (hw_idx + 1) % bd_ring->len; 638 639 if (hw_idx_next == host_idx) 640 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i); 641 642 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 643 "%d RXD unavailable, idx=0x%08x, len=%d\n", 644 i, reg_idx, bd_ring->len); 645 } 646 } 647 648 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, 649 struct rtw89_pci *rtwpci, 650 struct rtw89_pci_isrs *isrs) 651 { 652 isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs; 653 isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0]; 654 isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1]; 655 656 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 657 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]); 658 rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]); 659 } 660 EXPORT_SYMBOL(rtw89_pci_recognize_intrs); 661 662 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev, 663 struct rtw89_pci *rtwpci, 664 struct rtw89_pci_isrs *isrs) 665 { 666 isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs; 667 isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ? 668 rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0; 669 isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ? 670 rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0; 671 isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ? 672 rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0; 673 674 if (isrs->halt_c2h_isrs) 675 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 676 if (isrs->isrs[0]) 677 rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]); 678 if (isrs->isrs[1]) 679 rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]); 680 } 681 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1); 682 683 static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00) 684 { 685 /* write 1 clear */ 686 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isr00); 687 } 688 689 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 690 { 691 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 692 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]); 693 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]); 694 } 695 EXPORT_SYMBOL(rtw89_pci_enable_intr); 696 697 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 698 { 699 rtw89_write32(rtwdev, R_AX_HIMR0, 0); 700 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0); 701 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); 702 } 703 EXPORT_SYMBOL(rtw89_pci_disable_intr); 704 705 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 706 { 707 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs); 708 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 709 rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]); 710 rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]); 711 } 712 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1); 713 714 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 715 { 716 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0); 717 } 718 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1); 719 720 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev) 721 { 722 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 723 unsigned long flags; 724 725 spin_lock_irqsave(&rtwpci->irq_lock, flags); 726 rtw89_chip_disable_intr(rtwdev, rtwpci); 727 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START); 728 rtw89_chip_enable_intr(rtwdev, rtwpci); 729 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 730 } 731 732 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev) 733 { 734 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 735 unsigned long flags; 736 737 spin_lock_irqsave(&rtwpci->irq_lock, flags); 738 rtw89_chip_disable_intr(rtwdev, rtwpci); 739 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE); 740 rtw89_chip_enable_intr(rtwdev, rtwpci); 741 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 742 } 743 744 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev) 745 { 746 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 747 int budget = NAPI_POLL_WEIGHT; 748 749 /* To prevent RXQ get stuck due to run out of budget. */ 750 rtwdev->napi_budget_countdown = budget; 751 752 rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget); 753 rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget); 754 } 755 756 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) 757 { 758 struct rtw89_dev *rtwdev = dev; 759 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 760 struct rtw89_pci_isrs isrs; 761 unsigned long flags; 762 763 spin_lock_irqsave(&rtwpci->irq_lock, flags); 764 rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs); 765 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 766 767 if (unlikely(isrs.isrs[0] & B_AX_RDU_INT)) 768 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci); 769 770 if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN)) 771 rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev)); 772 773 if (unlikely(isrs.halt_c2h_isrs & B_AX_WDT_TIMEOUT_INT_EN)) 774 rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT); 775 776 if (unlikely(rtwpci->under_recovery)) 777 goto enable_intr; 778 779 if (unlikely(rtwpci->low_power)) { 780 rtw89_pci_low_power_interrupt_handler(rtwdev); 781 goto enable_intr; 782 } 783 784 if (likely(rtwpci->running)) { 785 local_bh_disable(); 786 napi_schedule(&rtwdev->napi); 787 local_bh_enable(); 788 } 789 790 return IRQ_HANDLED; 791 792 enable_intr: 793 spin_lock_irqsave(&rtwpci->irq_lock, flags); 794 if (likely(rtwpci->running)) 795 rtw89_chip_enable_intr(rtwdev, rtwpci); 796 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 797 return IRQ_HANDLED; 798 } 799 800 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev) 801 { 802 struct rtw89_dev *rtwdev = dev; 803 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 804 unsigned long flags; 805 irqreturn_t irqret = IRQ_WAKE_THREAD; 806 807 spin_lock_irqsave(&rtwpci->irq_lock, flags); 808 809 /* If interrupt event is on the road, it is still trigger interrupt 810 * even we have done pci_stop() to turn off IMR. 811 */ 812 if (unlikely(!rtwpci->running)) { 813 irqret = IRQ_HANDLED; 814 goto exit; 815 } 816 817 rtw89_chip_disable_intr(rtwdev, rtwpci); 818 exit: 819 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 820 821 return irqret; 822 } 823 824 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \ 825 [RTW89_TXCH_##txch] = { \ 826 .num = R_AX_##txch##_TXBD_NUM ##v, \ 827 .idx = R_AX_##txch##_TXBD_IDX ##v, \ 828 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 829 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 830 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 831 } 832 833 #define DEF_TXCHADDRS(info, txch, v...) \ 834 [RTW89_TXCH_##txch] = { \ 835 .num = R_AX_##txch##_TXBD_NUM, \ 836 .idx = R_AX_##txch##_TXBD_IDX, \ 837 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 838 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 839 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 840 } 841 842 #define DEF_RXCHADDRS(info, rxch, v...) \ 843 [RTW89_RXCH_##rxch] = { \ 844 .num = R_AX_##rxch##_RXBD_NUM ##v, \ 845 .idx = R_AX_##rxch##_RXBD_IDX ##v, \ 846 .desa_l = R_AX_##rxch##_RXBD_DESA_L ##v, \ 847 .desa_h = R_AX_##rxch##_RXBD_DESA_H ##v, \ 848 } 849 850 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = { 851 .tx = { 852 DEF_TXCHADDRS(info, ACH0), 853 DEF_TXCHADDRS(info, ACH1), 854 DEF_TXCHADDRS(info, ACH2), 855 DEF_TXCHADDRS(info, ACH3), 856 DEF_TXCHADDRS(info, ACH4), 857 DEF_TXCHADDRS(info, ACH5), 858 DEF_TXCHADDRS(info, ACH6), 859 DEF_TXCHADDRS(info, ACH7), 860 DEF_TXCHADDRS(info, CH8), 861 DEF_TXCHADDRS(info, CH9), 862 DEF_TXCHADDRS_TYPE1(info, CH10), 863 DEF_TXCHADDRS_TYPE1(info, CH11), 864 DEF_TXCHADDRS(info, CH12), 865 }, 866 .rx = { 867 DEF_RXCHADDRS(info, RXQ), 868 DEF_RXCHADDRS(info, RPQ), 869 }, 870 }; 871 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set); 872 873 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = { 874 .tx = { 875 DEF_TXCHADDRS(info, ACH0, _V1), 876 DEF_TXCHADDRS(info, ACH1, _V1), 877 DEF_TXCHADDRS(info, ACH2, _V1), 878 DEF_TXCHADDRS(info, ACH3, _V1), 879 DEF_TXCHADDRS(info, ACH4, _V1), 880 DEF_TXCHADDRS(info, ACH5, _V1), 881 DEF_TXCHADDRS(info, ACH6, _V1), 882 DEF_TXCHADDRS(info, ACH7, _V1), 883 DEF_TXCHADDRS(info, CH8, _V1), 884 DEF_TXCHADDRS(info, CH9, _V1), 885 DEF_TXCHADDRS_TYPE1(info, CH10, _V1), 886 DEF_TXCHADDRS_TYPE1(info, CH11, _V1), 887 DEF_TXCHADDRS(info, CH12, _V1), 888 }, 889 .rx = { 890 DEF_RXCHADDRS(info, RXQ, _V1), 891 DEF_RXCHADDRS(info, RPQ, _V1), 892 }, 893 }; 894 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1); 895 896 #undef DEF_TXCHADDRS_TYPE1 897 #undef DEF_TXCHADDRS 898 #undef DEF_RXCHADDRS 899 900 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev, 901 enum rtw89_tx_channel txch, 902 const struct rtw89_pci_ch_dma_addr **addr) 903 { 904 const struct rtw89_pci_info *info = rtwdev->pci_info; 905 906 if (txch >= RTW89_TXCH_NUM) 907 return -EINVAL; 908 909 *addr = &info->dma_addr_set->tx[txch]; 910 911 return 0; 912 } 913 914 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev, 915 enum rtw89_rx_channel rxch, 916 const struct rtw89_pci_ch_dma_addr **addr) 917 { 918 const struct rtw89_pci_info *info = rtwdev->pci_info; 919 920 if (rxch >= RTW89_RXCH_NUM) 921 return -EINVAL; 922 923 *addr = &info->dma_addr_set->rx[rxch]; 924 925 return 0; 926 } 927 928 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring) 929 { 930 struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring; 931 932 /* reserved 1 desc check ring is full or not */ 933 if (bd_ring->rp > bd_ring->wp) 934 return bd_ring->rp - bd_ring->wp - 1; 935 936 return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1; 937 } 938 939 static 940 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev) 941 { 942 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 943 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 944 u32 cnt; 945 946 spin_lock_bh(&rtwpci->trx_lock); 947 rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci); 948 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 949 spin_unlock_bh(&rtwpci->trx_lock); 950 951 return cnt; 952 } 953 954 static 955 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev, 956 u8 txch) 957 { 958 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 959 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 960 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 961 u32 cnt; 962 963 spin_lock_bh(&rtwpci->trx_lock); 964 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 965 cnt = min(cnt, wd_ring->curr_num); 966 spin_unlock_bh(&rtwpci->trx_lock); 967 968 return cnt; 969 } 970 971 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 972 u8 txch) 973 { 974 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 975 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 976 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 977 const struct rtw89_chip_info *chip = rtwdev->chip; 978 u32 bd_cnt, wd_cnt, min_cnt = 0; 979 struct rtw89_pci_rx_ring *rx_ring; 980 enum rtw89_debug_mask debug_mask; 981 u32 cnt; 982 983 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 984 985 spin_lock_bh(&rtwpci->trx_lock); 986 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 987 wd_cnt = wd_ring->curr_num; 988 989 if (wd_cnt == 0 || bd_cnt == 0) { 990 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 991 if (cnt) 992 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 993 else if (wd_cnt == 0) 994 goto out_unlock; 995 996 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 997 if (bd_cnt == 0) 998 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 999 } 1000 1001 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1002 wd_cnt = wd_ring->curr_num; 1003 min_cnt = min(bd_cnt, wd_cnt); 1004 if (min_cnt == 0) { 1005 /* This message can be frequently shown in low power mode or 1006 * high traffic with 8852B, and we have recognized it as normal 1007 * behavior, so print with mask RTW89_DBG_TXRX in these situations. 1008 */ 1009 if (rtwpci->low_power || chip->chip_id == RTL8852B) 1010 debug_mask = RTW89_DBG_TXRX; 1011 else 1012 debug_mask = RTW89_DBG_UNEXP; 1013 1014 rtw89_debug(rtwdev, debug_mask, 1015 "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n", 1016 wd_cnt, bd_cnt); 1017 } 1018 1019 out_unlock: 1020 spin_unlock_bh(&rtwpci->trx_lock); 1021 1022 return min_cnt; 1023 } 1024 1025 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 1026 u8 txch) 1027 { 1028 if (rtwdev->hci.paused) 1029 return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch); 1030 1031 if (txch == RTW89_TXCH_CH12) 1032 return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev); 1033 1034 return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch); 1035 } 1036 1037 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 1038 { 1039 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1040 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1041 u32 host_idx, addr; 1042 1043 spin_lock_bh(&rtwpci->trx_lock); 1044 1045 addr = bd_ring->addr.idx; 1046 host_idx = bd_ring->wp; 1047 rtw89_write16(rtwdev, addr, host_idx); 1048 1049 spin_unlock_bh(&rtwpci->trx_lock); 1050 } 1051 1052 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, 1053 int n_txbd) 1054 { 1055 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1056 u32 host_idx, len; 1057 1058 len = bd_ring->len; 1059 host_idx = bd_ring->wp + n_txbd; 1060 host_idx = host_idx < len ? host_idx : host_idx - len; 1061 1062 bd_ring->wp = host_idx; 1063 } 1064 1065 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) 1066 { 1067 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1068 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1069 1070 if (rtwdev->hci.paused) { 1071 set_bit(txch, rtwpci->kick_map); 1072 return; 1073 } 1074 1075 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1076 } 1077 1078 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev) 1079 { 1080 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1081 struct rtw89_pci_tx_ring *tx_ring; 1082 int txch; 1083 1084 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1085 if (!test_and_clear_bit(txch, rtwpci->kick_map)) 1086 continue; 1087 1088 tx_ring = &rtwpci->tx_rings[txch]; 1089 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1090 } 1091 } 1092 1093 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop) 1094 { 1095 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1096 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1097 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1098 u32 cur_idx, cur_rp; 1099 u8 i; 1100 1101 /* Because the time taked by the I/O is a bit dynamic, it's hard to 1102 * define a reasonable fixed total timeout to use read_poll_timeout* 1103 * helper. Instead, we can ensure a reasonable polling times, so we 1104 * just use for loop with udelay here. 1105 */ 1106 for (i = 0; i < 60; i++) { 1107 cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 1108 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 1109 if (cur_rp == bd_ring->wp) 1110 return; 1111 1112 udelay(1); 1113 } 1114 1115 if (!drop) 1116 rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch); 1117 } 1118 1119 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs, 1120 bool drop) 1121 { 1122 const struct rtw89_pci_info *info = rtwdev->pci_info; 1123 u8 i; 1124 1125 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1126 /* It may be unnecessary to flush FWCMD queue. */ 1127 if (i == RTW89_TXCH_CH12) 1128 continue; 1129 if (info->tx_dma_ch_mask & BIT(i)) 1130 continue; 1131 1132 if (txchs & BIT(i)) 1133 __pci_flush_txch(rtwdev, i, drop); 1134 } 1135 } 1136 1137 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues, 1138 bool drop) 1139 { 1140 __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop); 1141 } 1142 1143 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, 1144 void *txaddr_info_addr, u32 total_len, 1145 dma_addr_t dma, u8 *add_info_nr) 1146 { 1147 struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr; 1148 1149 txaddr_info->length = cpu_to_le16(total_len); 1150 txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | 1151 RTW89_PCI_ADDR_NUM(1)); 1152 txaddr_info->dma = cpu_to_le32(dma); 1153 1154 *add_info_nr = 1; 1155 1156 return sizeof(*txaddr_info); 1157 } 1158 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info); 1159 1160 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, 1161 void *txaddr_info_addr, u32 total_len, 1162 dma_addr_t dma, u8 *add_info_nr) 1163 { 1164 struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr; 1165 u32 remain = total_len; 1166 u32 len; 1167 u16 length_option; 1168 int n; 1169 1170 for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) { 1171 len = remain >= TXADDR_INFO_LENTHG_V1_MAX ? 1172 TXADDR_INFO_LENTHG_V1_MAX : remain; 1173 remain -= len; 1174 1175 length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) | 1176 FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) | 1177 FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0); 1178 txaddr_info->length_opt = cpu_to_le16(length_option); 1179 txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma)); 1180 txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma)); 1181 1182 dma += len; 1183 txaddr_info++; 1184 } 1185 1186 WARN_ONCE(remain, "length overflow remain=%u total_len=%u", 1187 remain, total_len); 1188 1189 *add_info_nr = n; 1190 1191 return n * sizeof(*txaddr_info); 1192 } 1193 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1); 1194 1195 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, 1196 struct rtw89_pci_tx_ring *tx_ring, 1197 struct rtw89_pci_tx_wd *txwd, 1198 struct rtw89_core_tx_request *tx_req) 1199 { 1200 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1201 const struct rtw89_chip_info *chip = rtwdev->chip; 1202 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1203 struct rtw89_txwd_info *txwd_info; 1204 struct rtw89_pci_tx_wp_info *txwp_info; 1205 void *txaddr_info_addr; 1206 struct pci_dev *pdev = rtwpci->pdev; 1207 struct sk_buff *skb = tx_req->skb; 1208 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1209 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 1210 bool en_wd_info = desc_info->en_wd_info; 1211 u32 txwd_len; 1212 u32 txwp_len; 1213 u32 txaddr_info_len; 1214 dma_addr_t dma; 1215 int ret; 1216 1217 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1218 if (dma_mapping_error(&pdev->dev, dma)) { 1219 rtw89_err(rtwdev, "failed to map skb dma data\n"); 1220 ret = -EBUSY; 1221 goto err; 1222 } 1223 1224 tx_data->dma = dma; 1225 rcu_assign_pointer(skb_data->wait, NULL); 1226 1227 txwp_len = sizeof(*txwp_info); 1228 txwd_len = chip->txwd_body_size; 1229 txwd_len += en_wd_info ? sizeof(*txwd_info) : 0; 1230 1231 txwp_info = txwd->vaddr + txwd_len; 1232 txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID); 1233 txwp_info->seq1 = 0; 1234 txwp_info->seq2 = 0; 1235 txwp_info->seq3 = 0; 1236 1237 tx_ring->tx_cnt++; 1238 txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len; 1239 txaddr_info_len = 1240 rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len, 1241 dma, &desc_info->addr_info_nr); 1242 1243 txwd->len = txwd_len + txwp_len + txaddr_info_len; 1244 1245 rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr); 1246 1247 skb_queue_tail(&txwd->queue, skb); 1248 1249 return 0; 1250 1251 err: 1252 return ret; 1253 } 1254 1255 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, 1256 struct rtw89_pci_tx_ring *tx_ring, 1257 struct rtw89_pci_tx_bd_32 *txbd, 1258 struct rtw89_core_tx_request *tx_req) 1259 { 1260 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1261 const struct rtw89_chip_info *chip = rtwdev->chip; 1262 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1263 void *txdesc; 1264 int txdesc_size = chip->h2c_desc_size; 1265 struct pci_dev *pdev = rtwpci->pdev; 1266 struct sk_buff *skb = tx_req->skb; 1267 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1268 dma_addr_t dma; 1269 1270 txdesc = skb_push(skb, txdesc_size); 1271 memset(txdesc, 0, txdesc_size); 1272 rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc); 1273 1274 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1275 if (dma_mapping_error(&pdev->dev, dma)) { 1276 rtw89_err(rtwdev, "failed to map fwcmd dma data\n"); 1277 return -EBUSY; 1278 } 1279 1280 tx_data->dma = dma; 1281 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); 1282 txbd->length = cpu_to_le16(skb->len); 1283 txbd->dma = cpu_to_le32(tx_data->dma); 1284 skb_queue_tail(&rtwpci->h2c_queue, skb); 1285 1286 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1287 1288 return 0; 1289 } 1290 1291 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, 1292 struct rtw89_pci_tx_ring *tx_ring, 1293 struct rtw89_pci_tx_bd_32 *txbd, 1294 struct rtw89_core_tx_request *tx_req) 1295 { 1296 struct rtw89_pci_tx_wd *txwd; 1297 int ret; 1298 1299 /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD 1300 * buffer with WD BODY only. So here we don't need to check the free 1301 * pages of the wd ring. 1302 */ 1303 if (tx_ring->txch == RTW89_TXCH_CH12) 1304 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); 1305 1306 txwd = rtw89_pci_dequeue_txwd(tx_ring); 1307 if (!txwd) { 1308 rtw89_err(rtwdev, "no available TXWD\n"); 1309 ret = -ENOSPC; 1310 goto err; 1311 } 1312 1313 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); 1314 if (ret) { 1315 rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq); 1316 goto err_enqueue_wd; 1317 } 1318 1319 list_add_tail(&txwd->list, &tx_ring->busy_pages); 1320 1321 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); 1322 txbd->length = cpu_to_le16(txwd->len); 1323 txbd->dma = cpu_to_le32(txwd->paddr); 1324 1325 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1326 1327 return 0; 1328 1329 err_enqueue_wd: 1330 rtw89_pci_enqueue_txwd(tx_ring, txwd); 1331 err: 1332 return ret; 1333 } 1334 1335 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, 1336 u8 txch) 1337 { 1338 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1339 struct rtw89_pci_tx_ring *tx_ring; 1340 struct rtw89_pci_tx_bd_32 *txbd; 1341 u32 n_avail_txbd; 1342 int ret = 0; 1343 1344 /* check the tx type and dma channel for fw cmd queue */ 1345 if ((txch == RTW89_TXCH_CH12 || 1346 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) && 1347 (txch != RTW89_TXCH_CH12 || 1348 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) { 1349 rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n"); 1350 return -EINVAL; 1351 } 1352 1353 tx_ring = &rtwpci->tx_rings[txch]; 1354 spin_lock_bh(&rtwpci->trx_lock); 1355 1356 n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring); 1357 if (n_avail_txbd == 0) { 1358 rtw89_err(rtwdev, "no available TXBD\n"); 1359 ret = -ENOSPC; 1360 goto err_unlock; 1361 } 1362 1363 txbd = rtw89_pci_get_next_txbd(tx_ring); 1364 ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req); 1365 if (ret) { 1366 rtw89_err(rtwdev, "failed to submit TXBD\n"); 1367 goto err_unlock; 1368 } 1369 1370 spin_unlock_bh(&rtwpci->trx_lock); 1371 return 0; 1372 1373 err_unlock: 1374 spin_unlock_bh(&rtwpci->trx_lock); 1375 return ret; 1376 } 1377 1378 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) 1379 { 1380 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1381 int ret; 1382 1383 ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma); 1384 if (ret) { 1385 rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma); 1386 return ret; 1387 } 1388 1389 return 0; 1390 } 1391 1392 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = { 1393 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1394 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1395 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1396 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1397 [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2}, 1398 [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2}, 1399 [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2}, 1400 [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2}, 1401 [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1}, 1402 [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1}, 1403 [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1}, 1404 [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1}, 1405 [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1}, 1406 }; 1407 EXPORT_SYMBOL(rtw89_bd_ram_table_dual); 1408 1409 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = { 1410 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1411 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1412 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1413 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1414 [RTW89_TXCH_CH8] = {.start_idx = 20, .max_num = 4, .min_num = 1}, 1415 [RTW89_TXCH_CH9] = {.start_idx = 24, .max_num = 4, .min_num = 1}, 1416 [RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1}, 1417 }; 1418 EXPORT_SYMBOL(rtw89_bd_ram_table_single); 1419 1420 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) 1421 { 1422 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1423 const struct rtw89_pci_info *info = rtwdev->pci_info; 1424 const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table; 1425 struct rtw89_pci_tx_ring *tx_ring; 1426 struct rtw89_pci_rx_ring *rx_ring; 1427 struct rtw89_pci_dma_ring *bd_ring; 1428 const struct rtw89_pci_bd_ram *bd_ram; 1429 u32 addr_num; 1430 u32 addr_bdram; 1431 u32 addr_desa_l; 1432 u32 val32; 1433 int i; 1434 1435 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1436 if (info->tx_dma_ch_mask & BIT(i)) 1437 continue; 1438 1439 tx_ring = &rtwpci->tx_rings[i]; 1440 bd_ring = &tx_ring->bd_ring; 1441 bd_ram = &bd_ram_table[i]; 1442 addr_num = bd_ring->addr.num; 1443 addr_bdram = bd_ring->addr.bdram; 1444 addr_desa_l = bd_ring->addr.desa_l; 1445 bd_ring->wp = 0; 1446 bd_ring->rp = 0; 1447 1448 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) | 1449 FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) | 1450 FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num); 1451 1452 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1453 rtw89_write32(rtwdev, addr_bdram, val32); 1454 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1455 } 1456 1457 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1458 rx_ring = &rtwpci->rx_rings[i]; 1459 bd_ring = &rx_ring->bd_ring; 1460 addr_num = bd_ring->addr.num; 1461 addr_desa_l = bd_ring->addr.desa_l; 1462 bd_ring->wp = 0; 1463 bd_ring->rp = 0; 1464 rx_ring->diliver_skb = NULL; 1465 rx_ring->diliver_desc.ready = false; 1466 1467 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1468 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1469 } 1470 } 1471 1472 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev, 1473 struct rtw89_pci_tx_ring *tx_ring) 1474 { 1475 rtw89_pci_release_busy_txwd(rtwdev, tx_ring); 1476 rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring); 1477 } 1478 1479 static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev) 1480 { 1481 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1482 const struct rtw89_pci_info *info = rtwdev->pci_info; 1483 int txch; 1484 1485 rtw89_pci_reset_trx_rings(rtwdev); 1486 1487 spin_lock_bh(&rtwpci->trx_lock); 1488 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1489 if (info->tx_dma_ch_mask & BIT(txch)) 1490 continue; 1491 if (txch == RTW89_TXCH_CH12) { 1492 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 1493 skb_queue_len(&rtwpci->h2c_queue), true); 1494 continue; 1495 } 1496 rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]); 1497 } 1498 spin_unlock_bh(&rtwpci->trx_lock); 1499 } 1500 1501 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev) 1502 { 1503 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1504 unsigned long flags; 1505 1506 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1507 rtwpci->running = true; 1508 rtw89_chip_enable_intr(rtwdev, rtwpci); 1509 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1510 } 1511 1512 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev) 1513 { 1514 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1515 unsigned long flags; 1516 1517 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1518 rtwpci->running = false; 1519 rtw89_chip_disable_intr(rtwdev, rtwpci); 1520 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1521 } 1522 1523 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev) 1524 { 1525 rtw89_core_napi_start(rtwdev); 1526 rtw89_pci_enable_intr_lock(rtwdev); 1527 1528 return 0; 1529 } 1530 1531 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) 1532 { 1533 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1534 struct pci_dev *pdev = rtwpci->pdev; 1535 1536 rtw89_pci_disable_intr_lock(rtwdev); 1537 synchronize_irq(pdev->irq); 1538 rtw89_core_napi_stop(rtwdev); 1539 } 1540 1541 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause) 1542 { 1543 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1544 struct pci_dev *pdev = rtwpci->pdev; 1545 1546 if (pause) { 1547 rtw89_pci_disable_intr_lock(rtwdev); 1548 synchronize_irq(pdev->irq); 1549 if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) 1550 napi_synchronize(&rtwdev->napi); 1551 } else { 1552 rtw89_pci_enable_intr_lock(rtwdev); 1553 rtw89_pci_tx_kick_off_pending(rtwdev); 1554 } 1555 } 1556 1557 static 1558 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power) 1559 { 1560 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1561 const struct rtw89_pci_info *info = rtwdev->pci_info; 1562 const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power; 1563 const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set; 1564 struct rtw89_pci_tx_ring *tx_ring; 1565 struct rtw89_pci_rx_ring *rx_ring; 1566 int i; 1567 1568 if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n")) 1569 return; 1570 1571 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1572 tx_ring = &rtwpci->tx_rings[i]; 1573 tx_ring->bd_ring.addr.idx = low_power ? 1574 bd_idx_addr->tx_bd_addrs[i] : 1575 dma_addr_set->tx[i].idx; 1576 } 1577 1578 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1579 rx_ring = &rtwpci->rx_rings[i]; 1580 rx_ring->bd_ring.addr.idx = low_power ? 1581 bd_idx_addr->rx_bd_addrs[i] : 1582 dma_addr_set->rx[i].idx; 1583 } 1584 } 1585 1586 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power) 1587 { 1588 enum rtw89_pci_intr_mask_cfg cfg; 1589 1590 WARN(!rtwdev->hci.paused, "HCI isn't paused\n"); 1591 1592 cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL; 1593 rtw89_chip_config_intr_mask(rtwdev, cfg); 1594 rtw89_pci_switch_bd_idx_addr(rtwdev, low_power); 1595 } 1596 1597 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data); 1598 1599 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr) 1600 { 1601 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1602 u32 val = readl(rtwpci->mmap + addr); 1603 int count; 1604 1605 for (count = 0; ; count++) { 1606 if (val != RTW89_R32_DEAD) 1607 return val; 1608 if (count >= MAC_REG_POOL_COUNT) { 1609 rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val); 1610 return RTW89_R32_DEAD; 1611 } 1612 rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN); 1613 val = readl(rtwpci->mmap + addr); 1614 } 1615 1616 return val; 1617 } 1618 1619 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr) 1620 { 1621 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1622 u32 addr32, val32, shift; 1623 1624 if (!ACCESS_CMAC(addr)) 1625 return readb(rtwpci->mmap + addr); 1626 1627 addr32 = addr & ~0x3; 1628 shift = (addr & 0x3) * 8; 1629 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1630 return val32 >> shift; 1631 } 1632 1633 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr) 1634 { 1635 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1636 u32 addr32, val32, shift; 1637 1638 if (!ACCESS_CMAC(addr)) 1639 return readw(rtwpci->mmap + addr); 1640 1641 addr32 = addr & ~0x3; 1642 shift = (addr & 0x3) * 8; 1643 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1644 return val32 >> shift; 1645 } 1646 1647 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr) 1648 { 1649 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1650 1651 if (!ACCESS_CMAC(addr)) 1652 return readl(rtwpci->mmap + addr); 1653 1654 return rtw89_pci_ops_read32_cmac(rtwdev, addr); 1655 } 1656 1657 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) 1658 { 1659 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1660 1661 writeb(data, rtwpci->mmap + addr); 1662 } 1663 1664 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) 1665 { 1666 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1667 1668 writew(data, rtwpci->mmap + addr); 1669 } 1670 1671 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) 1672 { 1673 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1674 1675 writel(data, rtwpci->mmap + addr); 1676 } 1677 1678 static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable) 1679 { 1680 const struct rtw89_pci_info *info = rtwdev->pci_info; 1681 1682 if (enable) 1683 rtw89_write32_set(rtwdev, info->init_cfg_reg, 1684 info->rxhci_en_bit | info->txhci_en_bit); 1685 else 1686 rtw89_write32_clr(rtwdev, info->init_cfg_reg, 1687 info->rxhci_en_bit | info->txhci_en_bit); 1688 } 1689 1690 static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable) 1691 { 1692 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1693 u32 reg, mask; 1694 1695 if (chip_id == RTL8852C) { 1696 reg = R_AX_HAXI_INIT_CFG1; 1697 mask = B_AX_STOP_AXI_MST; 1698 } else { 1699 reg = R_AX_PCIE_DMA_STOP1; 1700 mask = B_AX_STOP_PCIEIO; 1701 } 1702 1703 if (enable) 1704 rtw89_write32_clr(rtwdev, reg, mask); 1705 else 1706 rtw89_write32_set(rtwdev, reg, mask); 1707 } 1708 1709 static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable) 1710 { 1711 rtw89_pci_ctrl_dma_io(rtwdev, enable); 1712 rtw89_pci_ctrl_dma_trx(rtwdev, enable); 1713 } 1714 1715 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit) 1716 { 1717 u16 val; 1718 1719 rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F); 1720 1721 val = rtw89_read16(rtwdev, R_AX_MDIO_CFG); 1722 switch (speed) { 1723 case PCIE_PHY_GEN1: 1724 if (addr < 0x20) 1725 val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK); 1726 else 1727 val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK); 1728 break; 1729 case PCIE_PHY_GEN2: 1730 if (addr < 0x20) 1731 val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK); 1732 else 1733 val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK); 1734 break; 1735 default: 1736 rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed); 1737 return -EINVAL; 1738 } 1739 rtw89_write16(rtwdev, R_AX_MDIO_CFG, val); 1740 rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit); 1741 1742 return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000, 1743 false, rtwdev, R_AX_MDIO_CFG); 1744 } 1745 1746 static int 1747 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val) 1748 { 1749 int ret; 1750 1751 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG); 1752 if (ret) { 1753 rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret); 1754 return ret; 1755 } 1756 *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA); 1757 1758 return 0; 1759 } 1760 1761 static int 1762 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed) 1763 { 1764 int ret; 1765 1766 rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data); 1767 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG); 1768 if (ret) { 1769 rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret); 1770 return ret; 1771 } 1772 1773 return 0; 1774 } 1775 1776 static int 1777 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed) 1778 { 1779 u32 shift; 1780 int ret; 1781 u16 val; 1782 1783 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1784 if (ret) 1785 return ret; 1786 1787 shift = __ffs(mask); 1788 val &= ~mask; 1789 val |= ((data << shift) & mask); 1790 1791 ret = rtw89_write16_mdio(rtwdev, addr, val, speed); 1792 if (ret) 1793 return ret; 1794 1795 return 0; 1796 } 1797 1798 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1799 { 1800 int ret; 1801 u16 val; 1802 1803 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1804 if (ret) 1805 return ret; 1806 ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed); 1807 if (ret) 1808 return ret; 1809 1810 return 0; 1811 } 1812 1813 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1814 { 1815 int ret; 1816 u16 val; 1817 1818 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1819 if (ret) 1820 return ret; 1821 ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed); 1822 if (ret) 1823 return ret; 1824 1825 return 0; 1826 } 1827 1828 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr, 1829 u8 data) 1830 { 1831 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1832 struct pci_dev *pdev = rtwpci->pdev; 1833 1834 return pci_write_config_byte(pdev, addr, data); 1835 } 1836 1837 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr, 1838 u8 *value) 1839 { 1840 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1841 struct pci_dev *pdev = rtwpci->pdev; 1842 1843 return pci_read_config_byte(pdev, addr, value); 1844 } 1845 1846 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr, 1847 u8 bit) 1848 { 1849 u8 value; 1850 int ret; 1851 1852 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 1853 if (ret) 1854 return ret; 1855 1856 value |= bit; 1857 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 1858 1859 return ret; 1860 } 1861 1862 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr, 1863 u8 bit) 1864 { 1865 u8 value; 1866 int ret; 1867 1868 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 1869 if (ret) 1870 return ret; 1871 1872 value &= ~bit; 1873 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 1874 1875 return ret; 1876 } 1877 1878 static int 1879 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate) 1880 { 1881 u16 val, tar; 1882 int ret; 1883 1884 /* Enable counter */ 1885 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val); 1886 if (ret) 1887 return ret; 1888 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 1889 phy_rate); 1890 if (ret) 1891 return ret; 1892 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN, 1893 phy_rate); 1894 if (ret) 1895 return ret; 1896 1897 fsleep(300); 1898 1899 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar); 1900 if (ret) 1901 return ret; 1902 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 1903 phy_rate); 1904 if (ret) 1905 return ret; 1906 1907 tar = tar & 0x0FFF; 1908 if (tar == 0 || tar == 0x0FFF) { 1909 rtw89_err(rtwdev, "[ERR]Get target failed.\n"); 1910 return -EINVAL; 1911 } 1912 1913 *target = tar; 1914 1915 return 0; 1916 } 1917 1918 static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev) 1919 { 1920 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1921 int ret; 1922 1923 if (chip_id != RTL8852B && chip_id != RTL8851B) 1924 return 0; 1925 1926 ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK, 1927 PCIE_AUTOK_4, PCIE_PHY_GEN1); 1928 return ret; 1929 } 1930 1931 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) 1932 { 1933 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1934 enum rtw89_pcie_phy phy_rate; 1935 u16 val16, mgn_set, div_set, tar; 1936 u8 val8, bdr_ori; 1937 bool l1_flag = false; 1938 int ret = 0; 1939 1940 if (chip_id != RTL8852B && chip_id != RTL8851B) 1941 return 0; 1942 1943 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8); 1944 if (ret) { 1945 rtw89_err(rtwdev, "[ERR]pci config read %X\n", 1946 RTW89_PCIE_PHY_RATE); 1947 return ret; 1948 } 1949 1950 if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) { 1951 phy_rate = PCIE_PHY_GEN1; 1952 } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) { 1953 phy_rate = PCIE_PHY_GEN2; 1954 } else { 1955 rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8); 1956 return -EOPNOTSUPP; 1957 } 1958 /* Disable L1BD */ 1959 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori); 1960 if (ret) { 1961 rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL); 1962 return ret; 1963 } 1964 1965 if (bdr_ori & RTW89_PCIE_BIT_L1) { 1966 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 1967 bdr_ori & ~RTW89_PCIE_BIT_L1); 1968 if (ret) { 1969 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 1970 RTW89_PCIE_L1_CTRL); 1971 return ret; 1972 } 1973 l1_flag = true; 1974 } 1975 1976 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 1977 if (ret) { 1978 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 1979 goto end; 1980 } 1981 1982 if (val16 & B_AX_CALIB_EN) { 1983 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, 1984 val16 & ~B_AX_CALIB_EN, phy_rate); 1985 if (ret) { 1986 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1987 goto end; 1988 } 1989 } 1990 1991 if (!autook_en) 1992 goto end; 1993 /* Set div */ 1994 ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate); 1995 if (ret) { 1996 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1997 goto end; 1998 } 1999 2000 /* Obtain div and margin */ 2001 ret = __get_target(rtwdev, &tar, phy_rate); 2002 if (ret) { 2003 rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret); 2004 goto end; 2005 } 2006 2007 mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar; 2008 2009 if (mgn_set >= 128) { 2010 div_set = 0x0003; 2011 mgn_set = 0x000F; 2012 } else if (mgn_set >= 64) { 2013 div_set = 0x0003; 2014 mgn_set >>= 3; 2015 } else if (mgn_set >= 32) { 2016 div_set = 0x0002; 2017 mgn_set >>= 2; 2018 } else if (mgn_set >= 16) { 2019 div_set = 0x0001; 2020 mgn_set >>= 1; 2021 } else if (mgn_set == 0) { 2022 rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar); 2023 goto end; 2024 } else { 2025 div_set = 0x0000; 2026 } 2027 2028 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 2029 if (ret) { 2030 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 2031 goto end; 2032 } 2033 2034 val16 |= u16_encode_bits(div_set, B_AX_DIV); 2035 2036 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate); 2037 if (ret) { 2038 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2039 goto end; 2040 } 2041 2042 ret = __get_target(rtwdev, &tar, phy_rate); 2043 if (ret) { 2044 rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret); 2045 goto end; 2046 } 2047 2048 rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n", 2049 tar, div_set, mgn_set); 2050 ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1, 2051 (tar & 0x0FFF) | (mgn_set << 12), phy_rate); 2052 if (ret) { 2053 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1); 2054 goto end; 2055 } 2056 2057 /* Enable function */ 2058 ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate); 2059 if (ret) { 2060 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2061 goto end; 2062 } 2063 2064 /* CLK delay = 0 */ 2065 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 2066 PCIE_CLKDLY_HW_0); 2067 2068 end: 2069 /* Set L1BD to ori */ 2070 if (l1_flag) { 2071 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 2072 bdr_ori); 2073 if (ret) { 2074 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 2075 RTW89_PCIE_L1_CTRL); 2076 return ret; 2077 } 2078 } 2079 2080 return ret; 2081 } 2082 2083 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) 2084 { 2085 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2086 int ret; 2087 2088 if (chip_id == RTL8852A) { 2089 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2090 PCIE_PHY_GEN1); 2091 if (ret) 2092 return ret; 2093 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2094 PCIE_PHY_GEN2); 2095 if (ret) 2096 return ret; 2097 } else if (chip_id == RTL8852C) { 2098 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2, 2099 B_AX_DEGLITCH); 2100 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2, 2101 B_AX_DEGLITCH); 2102 } 2103 2104 return 0; 2105 } 2106 2107 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) 2108 { 2109 if (rtwdev->chip->chip_id != RTL8852A) 2110 return; 2111 2112 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE); 2113 } 2114 2115 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) 2116 { 2117 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2118 2119 if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B) 2120 return; 2121 2122 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); 2123 } 2124 2125 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev) 2126 { 2127 int ret; 2128 2129 if (rtwdev->chip->chip_id != RTL8852A) 2130 return 0; 2131 2132 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2133 PCIE_PHY_GEN1); 2134 if (ret) 2135 return ret; 2136 2137 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2138 PCIE_PHY_GEN2); 2139 if (ret) 2140 return ret; 2141 2142 return 0; 2143 } 2144 2145 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) 2146 { 2147 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2148 2149 if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B) 2150 return; 2151 2152 rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN); 2153 } 2154 2155 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) 2156 { 2157 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2158 2159 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { 2160 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 2161 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2162 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2163 B_AX_PCIE_DIS_WLSUS_AFT_PDN); 2164 } else if (rtwdev->chip->chip_id == RTL8852C) { 2165 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2166 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2167 } 2168 } 2169 2170 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev) 2171 { 2172 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2173 2174 if (chip_id != RTL8852B && chip_id != RTL8851B) 2175 return 0; 2176 2177 return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK, 2178 PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1); 2179 } 2180 2181 static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up) 2182 { 2183 if (pwr_up) 2184 rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2185 else 2186 rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2187 } 2188 2189 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev) 2190 { 2191 if (rtwdev->chip->chip_id != RTL8852C) 2192 return; 2193 2194 rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2195 rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2196 } 2197 2198 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev) 2199 { 2200 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2201 return; 2202 2203 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT); 2204 } 2205 2206 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev) 2207 { 2208 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2209 return; 2210 2211 rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, 2212 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2213 rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3); 2214 rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, 2215 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2216 } 2217 2218 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev) 2219 { 2220 if (rtwdev->chip->chip_id != RTL8852C) 2221 return; 2222 2223 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1); 2224 } 2225 2226 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev) 2227 { 2228 if (rtwdev->chip->chip_id != RTL8852C) 2229 return; 2230 2231 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN); 2232 } 2233 2234 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) 2235 { 2236 if (rtwdev->chip->chip_id == RTL8852C) 2237 return; 2238 2239 rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL, 2240 B_AX_SIC_EN_FORCE_CLKREQ); 2241 } 2242 2243 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev) 2244 { 2245 const struct rtw89_pci_info *info = rtwdev->pci_info; 2246 u32 lbc; 2247 2248 if (rtwdev->chip->chip_id == RTL8852C) 2249 return; 2250 2251 lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); 2252 if (info->lbc_en == MAC_AX_PCIE_ENABLE) { 2253 lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER); 2254 lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; 2255 rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2256 } else { 2257 lbc &= ~B_AX_LBC_EN; 2258 } 2259 rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2260 } 2261 2262 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev) 2263 { 2264 const struct rtw89_pci_info *info = rtwdev->pci_info; 2265 u32 val32; 2266 2267 if (rtwdev->chip->chip_id != RTL8852C) 2268 return; 2269 2270 if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) { 2271 val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK, 2272 info->io_rcy_tmr); 2273 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32); 2274 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32); 2275 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32); 2276 2277 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2278 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2279 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2280 } else { 2281 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2282 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2283 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2284 } 2285 2286 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1); 2287 } 2288 2289 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) 2290 { 2291 if (rtwdev->chip->chip_id == RTL8852C) 2292 return; 2293 2294 rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL, 2295 B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG); 2296 2297 if (rtwdev->chip->chip_id == RTL8852A) 2298 rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL, 2299 B_AX_EN_CHKDSC_NO_RX_STUCK); 2300 } 2301 2302 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev) 2303 { 2304 if (rtwdev->chip->chip_id == RTL8852C) 2305 return; 2306 2307 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 2308 B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); 2309 } 2310 2311 static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev) 2312 { 2313 const struct rtw89_pci_info *info = rtwdev->pci_info; 2314 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2315 u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX | 2316 B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX | 2317 B_AX_CLR_CH12_IDX; 2318 u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg; 2319 u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg; 2320 2321 if (chip_id == RTL8852A || chip_id == RTL8852C) 2322 val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX | 2323 B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX; 2324 /* clear DMA indexes */ 2325 rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val); 2326 if (chip_id == RTL8852A || chip_id == RTL8852C) 2327 rtw89_write32_set(rtwdev, txbd_rwptr_clr2, 2328 B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX); 2329 rtw89_write32_set(rtwdev, rxbd_rwptr_clr, 2330 B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); 2331 } 2332 2333 static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev) 2334 { 2335 const struct rtw89_pci_info *info = rtwdev->pci_info; 2336 u32 ret, check, dma_busy; 2337 u32 dma_busy1 = info->dma_busy1.addr; 2338 u32 dma_busy2 = info->dma_busy2_reg; 2339 2340 check = info->dma_busy1.mask; 2341 2342 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2343 10, 100, false, rtwdev, dma_busy1); 2344 if (ret) 2345 return ret; 2346 2347 if (!dma_busy2) 2348 return 0; 2349 2350 check = B_AX_CH10_BUSY | B_AX_CH11_BUSY; 2351 2352 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2353 10, 100, false, rtwdev, dma_busy2); 2354 if (ret) 2355 return ret; 2356 2357 return 0; 2358 } 2359 2360 static int rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev *rtwdev) 2361 { 2362 const struct rtw89_pci_info *info = rtwdev->pci_info; 2363 u32 ret, check, dma_busy; 2364 u32 dma_busy3 = info->dma_busy3_reg; 2365 2366 check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY; 2367 2368 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2369 10, 100, false, rtwdev, dma_busy3); 2370 if (ret) 2371 return ret; 2372 2373 return 0; 2374 } 2375 2376 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev) 2377 { 2378 u32 ret; 2379 2380 ret = rtw89_poll_txdma_ch_idle_pcie(rtwdev); 2381 if (ret) { 2382 rtw89_err(rtwdev, "txdma ch busy\n"); 2383 return ret; 2384 } 2385 2386 ret = rtw89_poll_rxdma_ch_idle_pcie(rtwdev); 2387 if (ret) { 2388 rtw89_err(rtwdev, "rxdma ch busy\n"); 2389 return ret; 2390 } 2391 2392 return 0; 2393 } 2394 2395 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) 2396 { 2397 const struct rtw89_pci_info *info = rtwdev->pci_info; 2398 enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode; 2399 enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode; 2400 enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode; 2401 enum mac_ax_tag_mode tag_mode = info->tag_mode; 2402 enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl; 2403 enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl; 2404 enum mac_ax_tx_burst tx_burst = info->tx_burst; 2405 enum mac_ax_rx_burst rx_burst = info->rx_burst; 2406 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2407 u8 cv = rtwdev->hal.cv; 2408 u32 val32; 2409 2410 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2411 if (chip_id == RTL8852A && cv == CHIP_CBV) 2412 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2413 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2414 if (chip_id == RTL8852A || chip_id == RTL8852B) 2415 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2416 } 2417 2418 if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) { 2419 if (chip_id == RTL8852A && cv == CHIP_CBV) 2420 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2421 } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) { 2422 if (chip_id == RTL8852A || chip_id == RTL8852B) 2423 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2424 } 2425 2426 if (rxbd_mode == MAC_AX_RXBD_PKT) { 2427 rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2428 } else if (rxbd_mode == MAC_AX_RXBD_SEP) { 2429 rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2430 2431 if (chip_id == RTL8852A || chip_id == RTL8852B) 2432 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, 2433 B_AX_PCIE_RX_APPLEN_MASK, 0); 2434 } 2435 2436 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2437 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst); 2438 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst); 2439 } else if (chip_id == RTL8852C) { 2440 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst); 2441 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst); 2442 } 2443 2444 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2445 if (tag_mode == MAC_AX_TAG_SGL) { 2446 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) & 2447 ~B_AX_LATENCY_CONTROL; 2448 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2449 } else if (tag_mode == MAC_AX_TAG_MULTI) { 2450 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | 2451 B_AX_LATENCY_CONTROL; 2452 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2453 } 2454 } 2455 2456 rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask, 2457 info->multi_tag_num); 2458 2459 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2460 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, 2461 wd_dma_idle_intvl); 2462 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, 2463 wd_dma_act_intvl); 2464 } else if (chip_id == RTL8852C) { 2465 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK, 2466 wd_dma_idle_intvl); 2467 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK, 2468 wd_dma_act_intvl); 2469 } 2470 2471 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2472 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2473 B_AX_HOST_ADDR_INFO_8B_SEL); 2474 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2475 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2476 rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2477 B_AX_HOST_ADDR_INFO_8B_SEL); 2478 rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2479 } 2480 2481 return 0; 2482 } 2483 2484 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) 2485 { 2486 const struct rtw89_pci_info *info = rtwdev->pci_info; 2487 2488 if (rtwdev->chip->chip_id == RTL8852A) { 2489 /* ltr sw trigger */ 2490 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE); 2491 } 2492 info->ltr_set(rtwdev, false); 2493 rtw89_pci_ctrl_dma_all(rtwdev, false); 2494 rtw89_pci_clr_idx_all(rtwdev); 2495 2496 return 0; 2497 } 2498 2499 static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev) 2500 { 2501 const struct rtw89_pci_info *info = rtwdev->pci_info; 2502 int ret; 2503 2504 rtw89_pci_rxdma_prefth(rtwdev); 2505 rtw89_pci_l1off_pwroff(rtwdev); 2506 rtw89_pci_deglitch_setting(rtwdev); 2507 ret = rtw89_pci_l2_rxen_lat(rtwdev); 2508 if (ret) { 2509 rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret); 2510 return ret; 2511 } 2512 2513 rtw89_pci_aphy_pwrcut(rtwdev); 2514 rtw89_pci_hci_ldo(rtwdev); 2515 rtw89_pci_dphy_delay(rtwdev); 2516 2517 ret = rtw89_pci_autok_x(rtwdev); 2518 if (ret) { 2519 rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret); 2520 return ret; 2521 } 2522 2523 ret = rtw89_pci_auto_refclk_cal(rtwdev, false); 2524 if (ret) { 2525 rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret); 2526 return ret; 2527 } 2528 2529 rtw89_pci_power_wake(rtwdev, true); 2530 rtw89_pci_autoload_hang(rtwdev); 2531 rtw89_pci_l12_vmain(rtwdev); 2532 rtw89_pci_gen2_force_ib(rtwdev); 2533 rtw89_pci_l1_ent_lat(rtwdev); 2534 rtw89_pci_wd_exit_l1(rtwdev); 2535 rtw89_pci_set_sic(rtwdev); 2536 rtw89_pci_set_lbc(rtwdev); 2537 rtw89_pci_set_io_rcy(rtwdev); 2538 rtw89_pci_set_dbg(rtwdev); 2539 rtw89_pci_set_keep_reg(rtwdev); 2540 2541 rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA); 2542 2543 /* stop DMA activities */ 2544 rtw89_pci_ctrl_dma_all(rtwdev, false); 2545 2546 ret = rtw89_pci_poll_dma_all_idle(rtwdev); 2547 if (ret) { 2548 rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n"); 2549 return ret; 2550 } 2551 2552 rtw89_pci_clr_idx_all(rtwdev); 2553 rtw89_pci_mode_op(rtwdev); 2554 2555 /* fill TRX BD indexes */ 2556 rtw89_pci_ops_reset(rtwdev); 2557 2558 ret = rtw89_pci_rst_bdram_pcie(rtwdev); 2559 if (ret) { 2560 rtw89_warn(rtwdev, "reset bdram busy\n"); 2561 return ret; 2562 } 2563 2564 /* disable all channels except to FW CMD channel to download firmware */ 2565 rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, false); 2566 rtw89_pci_ctrl_txdma_fw_ch_pcie(rtwdev, true); 2567 2568 /* start DMA activities */ 2569 rtw89_pci_ctrl_dma_all(rtwdev, true); 2570 2571 return 0; 2572 } 2573 2574 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en) 2575 { 2576 u32 val; 2577 2578 if (!en) 2579 return 0; 2580 2581 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2582 if (rtw89_pci_ltr_is_err_reg_val(val)) 2583 return -EINVAL; 2584 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2585 if (rtw89_pci_ltr_is_err_reg_val(val)) 2586 return -EINVAL; 2587 val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY); 2588 if (rtw89_pci_ltr_is_err_reg_val(val)) 2589 return -EINVAL; 2590 val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY); 2591 if (rtw89_pci_ltr_is_err_reg_val(val)) 2592 return -EINVAL; 2593 2594 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN | 2595 B_AX_LTR_WD_NOEMP_CHK); 2596 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK, 2597 PCI_LTR_SPC_500US); 2598 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2599 PCI_LTR_IDLE_TIMER_3_2MS); 2600 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2601 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2602 rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003); 2603 rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b); 2604 2605 return 0; 2606 } 2607 EXPORT_SYMBOL(rtw89_pci_ltr_set); 2608 2609 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en) 2610 { 2611 u32 dec_ctrl; 2612 u32 val32; 2613 2614 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2615 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2616 return -EINVAL; 2617 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2618 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2619 return -EINVAL; 2620 dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL); 2621 if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl)) 2622 return -EINVAL; 2623 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3); 2624 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2625 return -EINVAL; 2626 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0); 2627 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2628 return -EINVAL; 2629 2630 if (!en) { 2631 dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN); 2632 dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) | 2633 B_AX_LTR_REQ_DRV; 2634 } else { 2635 dec_ctrl |= B_AX_LTR_HW_DEC_EN; 2636 } 2637 2638 dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK; 2639 dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US); 2640 2641 if (en) 2642 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, 2643 B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN); 2644 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2645 PCI_LTR_IDLE_TIMER_3_2MS); 2646 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2647 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2648 rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl); 2649 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003); 2650 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b); 2651 2652 return 0; 2653 } 2654 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1); 2655 2656 static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev) 2657 { 2658 const struct rtw89_pci_info *info = rtwdev->pci_info; 2659 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2660 int ret; 2661 2662 ret = info->ltr_set(rtwdev, true); 2663 if (ret) { 2664 rtw89_err(rtwdev, "pci ltr set fail\n"); 2665 return ret; 2666 } 2667 if (chip_id == RTL8852A) { 2668 /* ltr sw trigger */ 2669 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); 2670 } 2671 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2672 /* ADDR info 8-byte mode */ 2673 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2674 B_AX_HOST_ADDR_INFO_8B_SEL); 2675 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2676 } 2677 2678 /* enable DMA for all queues */ 2679 rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, true); 2680 2681 /* Release PCI IO */ 2682 rtw89_write32_clr(rtwdev, info->dma_stop1.addr, 2683 B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO); 2684 2685 return 0; 2686 } 2687 2688 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev, 2689 struct pci_dev *pdev) 2690 { 2691 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2692 int ret; 2693 2694 ret = pci_enable_device(pdev); 2695 if (ret) { 2696 rtw89_err(rtwdev, "failed to enable pci device\n"); 2697 return ret; 2698 } 2699 2700 pci_set_master(pdev); 2701 pci_set_drvdata(pdev, rtwdev->hw); 2702 2703 rtwpci->pdev = pdev; 2704 2705 return 0; 2706 } 2707 2708 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, 2709 struct pci_dev *pdev) 2710 { 2711 pci_disable_device(pdev); 2712 } 2713 2714 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, 2715 struct pci_dev *pdev) 2716 { 2717 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2718 unsigned long resource_len; 2719 u8 bar_id = 2; 2720 int ret; 2721 2722 ret = pci_request_regions(pdev, KBUILD_MODNAME); 2723 if (ret) { 2724 rtw89_err(rtwdev, "failed to request pci regions\n"); 2725 goto err; 2726 } 2727 2728 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 2729 if (ret) { 2730 rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n"); 2731 goto err_release_regions; 2732 } 2733 2734 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 2735 if (ret) { 2736 rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n"); 2737 goto err_release_regions; 2738 } 2739 2740 resource_len = pci_resource_len(pdev, bar_id); 2741 rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len); 2742 if (!rtwpci->mmap) { 2743 rtw89_err(rtwdev, "failed to map pci io\n"); 2744 ret = -EIO; 2745 goto err_release_regions; 2746 } 2747 2748 return 0; 2749 2750 err_release_regions: 2751 pci_release_regions(pdev); 2752 err: 2753 return ret; 2754 } 2755 2756 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev, 2757 struct pci_dev *pdev) 2758 { 2759 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2760 2761 if (rtwpci->mmap) { 2762 pci_iounmap(pdev, rtwpci->mmap); 2763 pci_release_regions(pdev); 2764 } 2765 } 2766 2767 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev, 2768 struct pci_dev *pdev, 2769 struct rtw89_pci_tx_ring *tx_ring) 2770 { 2771 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 2772 u8 *head = wd_ring->head; 2773 dma_addr_t dma = wd_ring->dma; 2774 u32 page_size = wd_ring->page_size; 2775 u32 page_num = wd_ring->page_num; 2776 u32 ring_sz = page_size * page_num; 2777 2778 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2779 wd_ring->head = NULL; 2780 } 2781 2782 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev, 2783 struct pci_dev *pdev, 2784 struct rtw89_pci_tx_ring *tx_ring) 2785 { 2786 int ring_sz; 2787 u8 *head; 2788 dma_addr_t dma; 2789 2790 head = tx_ring->bd_ring.head; 2791 dma = tx_ring->bd_ring.dma; 2792 ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len; 2793 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2794 2795 tx_ring->bd_ring.head = NULL; 2796 } 2797 2798 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev, 2799 struct pci_dev *pdev) 2800 { 2801 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2802 const struct rtw89_pci_info *info = rtwdev->pci_info; 2803 struct rtw89_pci_tx_ring *tx_ring; 2804 int i; 2805 2806 for (i = 0; i < RTW89_TXCH_NUM; i++) { 2807 if (info->tx_dma_ch_mask & BIT(i)) 2808 continue; 2809 tx_ring = &rtwpci->tx_rings[i]; 2810 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 2811 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 2812 } 2813 } 2814 2815 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev, 2816 struct pci_dev *pdev, 2817 struct rtw89_pci_rx_ring *rx_ring) 2818 { 2819 struct rtw89_pci_rx_info *rx_info; 2820 struct sk_buff *skb; 2821 dma_addr_t dma; 2822 u32 buf_sz; 2823 u8 *head; 2824 int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len; 2825 int i; 2826 2827 buf_sz = rx_ring->buf_sz; 2828 for (i = 0; i < rx_ring->bd_ring.len; i++) { 2829 skb = rx_ring->buf[i]; 2830 if (!skb) 2831 continue; 2832 2833 rx_info = RTW89_PCI_RX_SKB_CB(skb); 2834 dma = rx_info->dma; 2835 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 2836 dev_kfree_skb(skb); 2837 rx_ring->buf[i] = NULL; 2838 } 2839 2840 head = rx_ring->bd_ring.head; 2841 dma = rx_ring->bd_ring.dma; 2842 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2843 2844 rx_ring->bd_ring.head = NULL; 2845 } 2846 2847 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev, 2848 struct pci_dev *pdev) 2849 { 2850 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2851 struct rtw89_pci_rx_ring *rx_ring; 2852 int i; 2853 2854 for (i = 0; i < RTW89_RXCH_NUM; i++) { 2855 rx_ring = &rtwpci->rx_rings[i]; 2856 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 2857 } 2858 } 2859 2860 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev, 2861 struct pci_dev *pdev) 2862 { 2863 rtw89_pci_free_rx_rings(rtwdev, pdev); 2864 rtw89_pci_free_tx_rings(rtwdev, pdev); 2865 } 2866 2867 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev, 2868 struct rtw89_pci_rx_ring *rx_ring, 2869 struct sk_buff *skb, int buf_sz, u32 idx) 2870 { 2871 struct rtw89_pci_rx_info *rx_info; 2872 struct rtw89_pci_rx_bd_32 *rx_bd; 2873 dma_addr_t dma; 2874 2875 if (!skb) 2876 return -EINVAL; 2877 2878 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); 2879 if (dma_mapping_error(&pdev->dev, dma)) 2880 return -EBUSY; 2881 2882 rx_info = RTW89_PCI_RX_SKB_CB(skb); 2883 rx_bd = RTW89_PCI_RX_BD(rx_ring, idx); 2884 2885 memset(rx_bd, 0, sizeof(*rx_bd)); 2886 rx_bd->buf_size = cpu_to_le16(buf_sz); 2887 rx_bd->dma = cpu_to_le32(dma); 2888 rx_info->dma = dma; 2889 2890 return 0; 2891 } 2892 2893 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev, 2894 struct pci_dev *pdev, 2895 struct rtw89_pci_tx_ring *tx_ring, 2896 enum rtw89_tx_channel txch) 2897 { 2898 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 2899 struct rtw89_pci_tx_wd *txwd; 2900 dma_addr_t dma; 2901 dma_addr_t cur_paddr; 2902 u8 *head; 2903 u8 *cur_vaddr; 2904 u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE; 2905 u32 page_num = RTW89_PCI_TXWD_NUM_MAX; 2906 u32 ring_sz = page_size * page_num; 2907 u32 page_offset; 2908 int i; 2909 2910 /* FWCMD queue doesn't use txwd as pages */ 2911 if (txch == RTW89_TXCH_CH12) 2912 return 0; 2913 2914 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2915 if (!head) 2916 return -ENOMEM; 2917 2918 INIT_LIST_HEAD(&wd_ring->free_pages); 2919 wd_ring->head = head; 2920 wd_ring->dma = dma; 2921 wd_ring->page_size = page_size; 2922 wd_ring->page_num = page_num; 2923 2924 page_offset = 0; 2925 for (i = 0; i < page_num; i++) { 2926 txwd = &wd_ring->pages[i]; 2927 cur_paddr = dma + page_offset; 2928 cur_vaddr = head + page_offset; 2929 2930 skb_queue_head_init(&txwd->queue); 2931 INIT_LIST_HEAD(&txwd->list); 2932 txwd->paddr = cur_paddr; 2933 txwd->vaddr = cur_vaddr; 2934 txwd->len = page_size; 2935 txwd->seq = i; 2936 rtw89_pci_enqueue_txwd(tx_ring, txwd); 2937 2938 page_offset += page_size; 2939 } 2940 2941 return 0; 2942 } 2943 2944 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev, 2945 struct pci_dev *pdev, 2946 struct rtw89_pci_tx_ring *tx_ring, 2947 u32 desc_size, u32 len, 2948 enum rtw89_tx_channel txch) 2949 { 2950 const struct rtw89_pci_ch_dma_addr *txch_addr; 2951 int ring_sz = desc_size * len; 2952 u8 *head; 2953 dma_addr_t dma; 2954 int ret; 2955 2956 ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch); 2957 if (ret) { 2958 rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch); 2959 goto err; 2960 } 2961 2962 ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr); 2963 if (ret) { 2964 rtw89_err(rtwdev, "failed to get address of txch %d", txch); 2965 goto err_free_wd_ring; 2966 } 2967 2968 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2969 if (!head) { 2970 ret = -ENOMEM; 2971 goto err_free_wd_ring; 2972 } 2973 2974 INIT_LIST_HEAD(&tx_ring->busy_pages); 2975 tx_ring->bd_ring.head = head; 2976 tx_ring->bd_ring.dma = dma; 2977 tx_ring->bd_ring.len = len; 2978 tx_ring->bd_ring.desc_size = desc_size; 2979 tx_ring->bd_ring.addr = *txch_addr; 2980 tx_ring->bd_ring.wp = 0; 2981 tx_ring->bd_ring.rp = 0; 2982 tx_ring->txch = txch; 2983 2984 return 0; 2985 2986 err_free_wd_ring: 2987 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 2988 err: 2989 return ret; 2990 } 2991 2992 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev, 2993 struct pci_dev *pdev) 2994 { 2995 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2996 const struct rtw89_pci_info *info = rtwdev->pci_info; 2997 struct rtw89_pci_tx_ring *tx_ring; 2998 u32 desc_size; 2999 u32 len; 3000 u32 i, tx_allocated; 3001 int ret; 3002 3003 for (i = 0; i < RTW89_TXCH_NUM; i++) { 3004 if (info->tx_dma_ch_mask & BIT(i)) 3005 continue; 3006 tx_ring = &rtwpci->tx_rings[i]; 3007 desc_size = sizeof(struct rtw89_pci_tx_bd_32); 3008 len = RTW89_PCI_TXBD_NUM_MAX; 3009 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring, 3010 desc_size, len, i); 3011 if (ret) { 3012 rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i); 3013 goto err_free; 3014 } 3015 } 3016 3017 return 0; 3018 3019 err_free: 3020 tx_allocated = i; 3021 for (i = 0; i < tx_allocated; i++) { 3022 tx_ring = &rtwpci->tx_rings[i]; 3023 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 3024 } 3025 3026 return ret; 3027 } 3028 3029 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev, 3030 struct pci_dev *pdev, 3031 struct rtw89_pci_rx_ring *rx_ring, 3032 u32 desc_size, u32 len, u32 rxch) 3033 { 3034 const struct rtw89_pci_ch_dma_addr *rxch_addr; 3035 struct sk_buff *skb; 3036 u8 *head; 3037 dma_addr_t dma; 3038 int ring_sz = desc_size * len; 3039 int buf_sz = RTW89_PCI_RX_BUF_SIZE; 3040 int i, allocated; 3041 int ret; 3042 3043 ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr); 3044 if (ret) { 3045 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch); 3046 return ret; 3047 } 3048 3049 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3050 if (!head) { 3051 ret = -ENOMEM; 3052 goto err; 3053 } 3054 3055 rx_ring->bd_ring.head = head; 3056 rx_ring->bd_ring.dma = dma; 3057 rx_ring->bd_ring.len = len; 3058 rx_ring->bd_ring.desc_size = desc_size; 3059 rx_ring->bd_ring.addr = *rxch_addr; 3060 rx_ring->bd_ring.wp = 0; 3061 rx_ring->bd_ring.rp = 0; 3062 rx_ring->buf_sz = buf_sz; 3063 rx_ring->diliver_skb = NULL; 3064 rx_ring->diliver_desc.ready = false; 3065 3066 for (i = 0; i < len; i++) { 3067 skb = dev_alloc_skb(buf_sz); 3068 if (!skb) { 3069 ret = -ENOMEM; 3070 goto err_free; 3071 } 3072 3073 memset(skb->data, 0, buf_sz); 3074 rx_ring->buf[i] = skb; 3075 ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb, 3076 buf_sz, i); 3077 if (ret) { 3078 rtw89_err(rtwdev, "failed to init rx buf %d\n", i); 3079 dev_kfree_skb_any(skb); 3080 rx_ring->buf[i] = NULL; 3081 goto err_free; 3082 } 3083 } 3084 3085 return 0; 3086 3087 err_free: 3088 allocated = i; 3089 for (i = 0; i < allocated; i++) { 3090 skb = rx_ring->buf[i]; 3091 if (!skb) 3092 continue; 3093 dma = *((dma_addr_t *)skb->cb); 3094 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 3095 dev_kfree_skb(skb); 3096 rx_ring->buf[i] = NULL; 3097 } 3098 3099 head = rx_ring->bd_ring.head; 3100 dma = rx_ring->bd_ring.dma; 3101 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3102 3103 rx_ring->bd_ring.head = NULL; 3104 err: 3105 return ret; 3106 } 3107 3108 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev, 3109 struct pci_dev *pdev) 3110 { 3111 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3112 struct rtw89_pci_rx_ring *rx_ring; 3113 u32 desc_size; 3114 u32 len; 3115 int i, rx_allocated; 3116 int ret; 3117 3118 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3119 rx_ring = &rtwpci->rx_rings[i]; 3120 desc_size = sizeof(struct rtw89_pci_rx_bd_32); 3121 len = RTW89_PCI_RXBD_NUM_MAX; 3122 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring, 3123 desc_size, len, i); 3124 if (ret) { 3125 rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i); 3126 goto err_free; 3127 } 3128 } 3129 3130 return 0; 3131 3132 err_free: 3133 rx_allocated = i; 3134 for (i = 0; i < rx_allocated; i++) { 3135 rx_ring = &rtwpci->rx_rings[i]; 3136 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3137 } 3138 3139 return ret; 3140 } 3141 3142 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev, 3143 struct pci_dev *pdev) 3144 { 3145 int ret; 3146 3147 ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev); 3148 if (ret) { 3149 rtw89_err(rtwdev, "failed to alloc dma tx rings\n"); 3150 goto err; 3151 } 3152 3153 ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev); 3154 if (ret) { 3155 rtw89_err(rtwdev, "failed to alloc dma rx rings\n"); 3156 goto err_free_tx_rings; 3157 } 3158 3159 return 0; 3160 3161 err_free_tx_rings: 3162 rtw89_pci_free_tx_rings(rtwdev, pdev); 3163 err: 3164 return ret; 3165 } 3166 3167 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev, 3168 struct rtw89_pci *rtwpci) 3169 { 3170 skb_queue_head_init(&rtwpci->h2c_queue); 3171 skb_queue_head_init(&rtwpci->h2c_release_queue); 3172 } 3173 3174 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev, 3175 struct pci_dev *pdev) 3176 { 3177 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3178 int ret; 3179 3180 ret = rtw89_pci_setup_mapping(rtwdev, pdev); 3181 if (ret) { 3182 rtw89_err(rtwdev, "failed to setup pci mapping\n"); 3183 goto err; 3184 } 3185 3186 ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev); 3187 if (ret) { 3188 rtw89_err(rtwdev, "failed to alloc pci trx rings\n"); 3189 goto err_pci_unmap; 3190 } 3191 3192 rtw89_pci_h2c_init(rtwdev, rtwpci); 3193 3194 spin_lock_init(&rtwpci->irq_lock); 3195 spin_lock_init(&rtwpci->trx_lock); 3196 3197 return 0; 3198 3199 err_pci_unmap: 3200 rtw89_pci_clear_mapping(rtwdev, pdev); 3201 err: 3202 return ret; 3203 } 3204 3205 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev, 3206 struct pci_dev *pdev) 3207 { 3208 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3209 3210 rtw89_pci_free_trx_rings(rtwdev, pdev); 3211 rtw89_pci_clear_mapping(rtwdev, pdev); 3212 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 3213 skb_queue_len(&rtwpci->h2c_queue), true); 3214 } 3215 3216 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev) 3217 { 3218 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3219 3220 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0; 3221 3222 if (rtwpci->under_recovery) { 3223 rtwpci->intrs[0] = B_AX_HS0ISR_IND_INT_EN; 3224 rtwpci->intrs[1] = 0; 3225 } else { 3226 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3227 B_AX_RXDMA_INT_EN | 3228 B_AX_RXP1DMA_INT_EN | 3229 B_AX_RPQDMA_INT_EN | 3230 B_AX_RXDMA_STUCK_INT_EN | 3231 B_AX_RDU_INT_EN | 3232 B_AX_RPQBD_FULL_INT_EN | 3233 B_AX_HS0ISR_IND_INT_EN; 3234 3235 rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN; 3236 } 3237 } 3238 EXPORT_SYMBOL(rtw89_pci_config_intr_mask); 3239 3240 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev) 3241 { 3242 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3243 3244 rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN; 3245 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3246 rtwpci->intrs[0] = 0; 3247 rtwpci->intrs[1] = 0; 3248 } 3249 3250 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev) 3251 { 3252 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3253 3254 rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN | 3255 B_AX_HS1ISR_IND_INT_EN | 3256 B_AX_HS0ISR_IND_INT_EN; 3257 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3258 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3259 B_AX_RXDMA_INT_EN | 3260 B_AX_RXP1DMA_INT_EN | 3261 B_AX_RPQDMA_INT_EN | 3262 B_AX_RXDMA_STUCK_INT_EN | 3263 B_AX_RDU_INT_EN | 3264 B_AX_RPQBD_FULL_INT_EN; 3265 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3266 } 3267 3268 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev) 3269 { 3270 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3271 3272 rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN | 3273 B_AX_HS0ISR_IND_INT_EN; 3274 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3275 rtwpci->intrs[0] = 0; 3276 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3277 } 3278 3279 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev) 3280 { 3281 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3282 3283 if (rtwpci->under_recovery) 3284 rtw89_pci_recovery_intr_mask_v1(rtwdev); 3285 else if (rtwpci->low_power) 3286 rtw89_pci_low_power_intr_mask_v1(rtwdev); 3287 else 3288 rtw89_pci_default_intr_mask_v1(rtwdev); 3289 } 3290 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1); 3291 3292 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, 3293 struct pci_dev *pdev) 3294 { 3295 unsigned long flags = 0; 3296 int ret; 3297 3298 flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI; 3299 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); 3300 if (ret < 0) { 3301 rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret); 3302 goto err; 3303 } 3304 3305 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, 3306 rtw89_pci_interrupt_handler, 3307 rtw89_pci_interrupt_threadfn, 3308 IRQF_SHARED, KBUILD_MODNAME, rtwdev); 3309 if (ret) { 3310 rtw89_err(rtwdev, "failed to request threaded irq\n"); 3311 goto err_free_vector; 3312 } 3313 3314 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET); 3315 3316 return 0; 3317 3318 err_free_vector: 3319 pci_free_irq_vectors(pdev); 3320 err: 3321 return ret; 3322 } 3323 3324 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev, 3325 struct pci_dev *pdev) 3326 { 3327 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); 3328 pci_free_irq_vectors(pdev); 3329 } 3330 3331 static u16 gray_code_to_bin(u16 gray_code, u32 bit_num) 3332 { 3333 u16 bin = 0, gray_bit; 3334 u32 bit_idx; 3335 3336 for (bit_idx = 0; bit_idx < bit_num; bit_idx++) { 3337 gray_bit = (gray_code >> bit_idx) & 0x1; 3338 if (bit_num - bit_idx > 1) 3339 gray_bit ^= (gray_code >> (bit_idx + 1)) & 0x1; 3340 bin |= (gray_bit << bit_idx); 3341 } 3342 3343 return bin; 3344 } 3345 3346 static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev) 3347 { 3348 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3349 struct pci_dev *pdev = rtwpci->pdev; 3350 u16 val16, filter_out_val; 3351 u32 val, phy_offset; 3352 int ret; 3353 3354 if (rtwdev->chip->chip_id != RTL8852C) 3355 return 0; 3356 3357 val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); 3358 if (val == B_AX_ASPM_CTRL_L1) 3359 return 0; 3360 3361 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val); 3362 if (ret) 3363 return ret; 3364 3365 val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val); 3366 if (val == RTW89_PCIE_GEN1_SPEED) { 3367 phy_offset = R_RAC_DIRECT_OFFSET_G1; 3368 } else if (val == RTW89_PCIE_GEN2_SPEED) { 3369 phy_offset = R_RAC_DIRECT_OFFSET_G2; 3370 val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT); 3371 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, 3372 val16 | B_PCIE_BIT_PINOUT_DIS); 3373 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, 3374 val16 & ~B_PCIE_BIT_RD_SEL); 3375 3376 val16 = rtw89_read16_mask(rtwdev, 3377 phy_offset + RAC_ANA1F * RAC_MULT, 3378 FILTER_OUT_EQ_MASK); 3379 val16 = gray_code_to_bin(val16, hweight16(val16)); 3380 filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 * 3381 RAC_MULT); 3382 filter_out_val &= ~REG_FILTER_OUT_MASK; 3383 filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16); 3384 3385 rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT, 3386 filter_out_val); 3387 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT, 3388 B_BAC_EQ_SEL); 3389 rtw89_write16_set(rtwdev, 3390 R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT, 3391 B_PCIE_BIT_PSAVE); 3392 } else { 3393 return -EOPNOTSUPP; 3394 } 3395 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT, 3396 B_PCIE_BIT_PSAVE); 3397 3398 return 0; 3399 } 3400 3401 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable) 3402 { 3403 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3404 int ret; 3405 3406 if (rtw89_pci_disable_clkreq) 3407 return; 3408 3409 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 3410 PCIE_CLKDLY_HW_30US); 3411 if (ret) 3412 rtw89_err(rtwdev, "failed to set CLKREQ Delay\n"); 3413 3414 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { 3415 if (enable) 3416 ret = rtw89_pci_config_byte_set(rtwdev, 3417 RTW89_PCIE_L1_CTRL, 3418 RTW89_PCIE_BIT_CLK); 3419 else 3420 ret = rtw89_pci_config_byte_clr(rtwdev, 3421 RTW89_PCIE_L1_CTRL, 3422 RTW89_PCIE_BIT_CLK); 3423 if (ret) 3424 rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d", 3425 enable ? "set" : "unset", ret); 3426 } else if (chip_id == RTL8852C) { 3427 rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL, 3428 B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL); 3429 if (enable) 3430 rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL, 3431 B_AX_CLK_REQ_N); 3432 else 3433 rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL, 3434 B_AX_CLK_REQ_N); 3435 } 3436 } 3437 3438 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable) 3439 { 3440 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3441 u8 value = 0; 3442 int ret; 3443 3444 if (rtw89_pci_disable_aspm_l1) 3445 return; 3446 3447 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value); 3448 if (ret) 3449 rtw89_err(rtwdev, "failed to read ASPM Delay\n"); 3450 3451 value &= ~(RTW89_L1DLY_MASK | RTW89_L0DLY_MASK); 3452 value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) | 3453 FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US); 3454 3455 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value); 3456 if (ret) 3457 rtw89_err(rtwdev, "failed to read ASPM Delay\n"); 3458 3459 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { 3460 if (enable) 3461 ret = rtw89_pci_config_byte_set(rtwdev, 3462 RTW89_PCIE_L1_CTRL, 3463 RTW89_PCIE_BIT_L1); 3464 else 3465 ret = rtw89_pci_config_byte_clr(rtwdev, 3466 RTW89_PCIE_L1_CTRL, 3467 RTW89_PCIE_BIT_L1); 3468 } else if (chip_id == RTL8852C) { 3469 if (enable) 3470 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3471 B_AX_ASPM_CTRL_L1); 3472 else 3473 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3474 B_AX_ASPM_CTRL_L1); 3475 } 3476 if (ret) 3477 rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d", 3478 enable ? "set" : "unset", ret); 3479 } 3480 3481 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev) 3482 { 3483 struct rtw89_traffic_stats *stats = &rtwdev->stats; 3484 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; 3485 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; 3486 u32 val = 0; 3487 3488 if (!rtwdev->scanning && 3489 (tx_tfc_lv >= RTW89_TFC_HIGH || rx_tfc_lv >= RTW89_TFC_HIGH)) 3490 val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL | 3491 FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) | 3492 FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) | 3493 FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64); 3494 3495 rtw89_write32(rtwdev, R_AX_INT_MIT_RX, val); 3496 } 3497 3498 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev) 3499 { 3500 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3501 struct pci_dev *pdev = rtwpci->pdev; 3502 u16 link_ctrl; 3503 int ret; 3504 3505 /* Though there is standard PCIE configuration space to set the 3506 * link control register, but by Realtek's design, driver should 3507 * check if host supports CLKREQ/ASPM to enable the HW module. 3508 * 3509 * These functions are implemented by two HW modules associated, 3510 * one is responsible to access PCIE configuration space to 3511 * follow the host settings, and another is in charge of doing 3512 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes 3513 * the host does not support it, and due to some reasons or wrong 3514 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device 3515 * loss if HW misbehaves on the link. 3516 * 3517 * Hence it's designed that driver should first check the PCIE 3518 * configuration space is sync'ed and enabled, then driver can turn 3519 * on the other module that is actually working on the mechanism. 3520 */ 3521 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); 3522 if (ret) { 3523 rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); 3524 return; 3525 } 3526 3527 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) 3528 rtw89_pci_clkreq_set(rtwdev, true); 3529 3530 if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) 3531 rtw89_pci_aspm_set(rtwdev, true); 3532 } 3533 3534 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable) 3535 { 3536 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3537 int ret; 3538 3539 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { 3540 if (enable) 3541 ret = rtw89_pci_config_byte_set(rtwdev, 3542 RTW89_PCIE_TIMER_CTRL, 3543 RTW89_PCIE_BIT_L1SUB); 3544 else 3545 ret = rtw89_pci_config_byte_clr(rtwdev, 3546 RTW89_PCIE_TIMER_CTRL, 3547 RTW89_PCIE_BIT_L1SUB); 3548 if (ret) 3549 rtw89_err(rtwdev, "failed to %s L1SS, ret=%d", 3550 enable ? "set" : "unset", ret); 3551 } else if (chip_id == RTL8852C) { 3552 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1, 3553 RTW89_PCIE_BIT_ASPM_L11 | 3554 RTW89_PCIE_BIT_PCI_L11); 3555 if (ret) 3556 rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret); 3557 if (enable) 3558 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3559 B_AX_L1SUB_DISABLE); 3560 else 3561 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3562 B_AX_L1SUB_DISABLE); 3563 } 3564 } 3565 3566 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev) 3567 { 3568 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3569 struct pci_dev *pdev = rtwpci->pdev; 3570 u32 l1ss_cap_ptr, l1ss_ctrl; 3571 3572 if (rtw89_pci_disable_l1ss) 3573 return; 3574 3575 l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); 3576 if (!l1ss_cap_ptr) 3577 return; 3578 3579 pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl); 3580 3581 if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK) 3582 rtw89_pci_l1ss_set(rtwdev, true); 3583 } 3584 3585 static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev) 3586 { 3587 int ret = 0; 3588 u32 sts; 3589 u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; 3590 3591 ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0, 3592 10, 1000, false, rtwdev, 3593 R_AX_PCIE_DMA_BUSY1); 3594 if (ret) { 3595 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n", 3596 rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1)); 3597 return -EINVAL; 3598 } 3599 return ret; 3600 } 3601 3602 static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev) 3603 { 3604 u32 val; 3605 int ret; 3606 3607 if (rtwdev->chip->chip_id == RTL8852C) 3608 return 0; 3609 3610 rtw89_pci_ctrl_dma_all(rtwdev, false); 3611 ret = rtw89_pci_poll_io_idle(rtwdev); 3612 if (ret) { 3613 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 3614 rtw89_debug(rtwdev, RTW89_DBG_HCI, 3615 "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n", 3616 R_AX_DBG_ERR_FLAG, val); 3617 if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0) 3618 rtw89_mac_ctrl_hci_dma_tx(rtwdev, false); 3619 if (val & B_AX_RX_STUCK) 3620 rtw89_mac_ctrl_hci_dma_rx(rtwdev, false); 3621 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 3622 ret = rtw89_pci_poll_io_idle(rtwdev); 3623 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 3624 rtw89_debug(rtwdev, RTW89_DBG_HCI, 3625 "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n", 3626 R_AX_DBG_ERR_FLAG, val); 3627 } 3628 3629 return ret; 3630 } 3631 3632 3633 3634 static int rtw89_pci_rst_bdram(struct rtw89_dev *rtwdev) 3635 { 3636 int ret = 0; 3637 u32 val32, sts; 3638 3639 val32 = B_AX_RST_BDRAM; 3640 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 3641 3642 ret = read_poll_timeout_atomic(rtw89_read32, sts, 3643 (sts & B_AX_RST_BDRAM) == 0x0, 1, 100, 3644 true, rtwdev, R_AX_PCIE_INIT_CFG1); 3645 return ret; 3646 } 3647 3648 static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev) 3649 { 3650 u32 ret; 3651 3652 if (rtwdev->chip->chip_id == RTL8852C) 3653 return 0; 3654 3655 rtw89_mac_ctrl_hci_dma_trx(rtwdev, false); 3656 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 3657 rtw89_pci_clr_idx_all(rtwdev); 3658 3659 ret = rtw89_pci_rst_bdram(rtwdev); 3660 if (ret) 3661 return ret; 3662 3663 rtw89_pci_ctrl_dma_all(rtwdev, true); 3664 return ret; 3665 } 3666 3667 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev, 3668 enum rtw89_lv1_rcvy_step step) 3669 { 3670 int ret; 3671 3672 switch (step) { 3673 case RTW89_LV1_RCVY_STEP_1: 3674 ret = rtw89_pci_lv1rst_stop_dma(rtwdev); 3675 if (ret) 3676 rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n"); 3677 3678 break; 3679 3680 case RTW89_LV1_RCVY_STEP_2: 3681 ret = rtw89_pci_lv1rst_start_dma(rtwdev); 3682 if (ret) 3683 rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n"); 3684 break; 3685 3686 default: 3687 return -EINVAL; 3688 } 3689 3690 return ret; 3691 } 3692 3693 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev) 3694 { 3695 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n", 3696 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); 3697 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 3698 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG)); 3699 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 3700 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG)); 3701 } 3702 3703 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget) 3704 { 3705 struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi); 3706 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3707 unsigned long flags; 3708 int work_done; 3709 3710 rtwdev->napi_budget_countdown = budget; 3711 3712 rtw89_pci_clear_isr0(rtwdev, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT); 3713 work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 3714 if (work_done == budget) 3715 return budget; 3716 3717 rtw89_pci_clear_isr0(rtwdev, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | B_AX_RDU_INT); 3718 work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 3719 if (work_done < budget && napi_complete_done(napi, work_done)) { 3720 spin_lock_irqsave(&rtwpci->irq_lock, flags); 3721 if (likely(rtwpci->running)) 3722 rtw89_chip_enable_intr(rtwdev, rtwpci); 3723 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 3724 } 3725 3726 return work_done; 3727 } 3728 3729 static int __maybe_unused rtw89_pci_suspend(struct device *dev) 3730 { 3731 struct ieee80211_hw *hw = dev_get_drvdata(dev); 3732 struct rtw89_dev *rtwdev = hw->priv; 3733 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3734 3735 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3736 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 3737 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3738 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { 3739 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 3740 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 3741 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 3742 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 3743 } else { 3744 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3745 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 3746 } 3747 3748 return 0; 3749 } 3750 3751 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev) 3752 { 3753 if (rtwdev->chip->chip_id == RTL8852C) 3754 return; 3755 3756 /* Hardware need write the reg twice to ensure the setting work */ 3757 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 3758 RTW89_PCIE_BIT_CFG_RST_MSTATE); 3759 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 3760 RTW89_PCIE_BIT_CFG_RST_MSTATE); 3761 } 3762 3763 static int __maybe_unused rtw89_pci_resume(struct device *dev) 3764 { 3765 struct ieee80211_hw *hw = dev_get_drvdata(dev); 3766 struct rtw89_dev *rtwdev = hw->priv; 3767 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3768 3769 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3770 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 3771 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3772 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { 3773 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 3774 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 3775 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 3776 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 3777 } else { 3778 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3779 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 3780 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3781 B_AX_SEL_REQ_ENTR_L1); 3782 } 3783 rtw89_pci_l2_hci_ldo(rtwdev); 3784 rtw89_pci_filter_out(rtwdev); 3785 rtw89_pci_link_cfg(rtwdev); 3786 rtw89_pci_l1ss_cfg(rtwdev); 3787 3788 return 0; 3789 } 3790 3791 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume); 3792 EXPORT_SYMBOL(rtw89_pm_ops); 3793 3794 static const struct rtw89_hci_ops rtw89_pci_ops = { 3795 .tx_write = rtw89_pci_ops_tx_write, 3796 .tx_kick_off = rtw89_pci_ops_tx_kick_off, 3797 .flush_queues = rtw89_pci_ops_flush_queues, 3798 .reset = rtw89_pci_ops_reset, 3799 .start = rtw89_pci_ops_start, 3800 .stop = rtw89_pci_ops_stop, 3801 .pause = rtw89_pci_ops_pause, 3802 .switch_mode = rtw89_pci_ops_switch_mode, 3803 .recalc_int_mit = rtw89_pci_recalc_int_mit, 3804 3805 .read8 = rtw89_pci_ops_read8, 3806 .read16 = rtw89_pci_ops_read16, 3807 .read32 = rtw89_pci_ops_read32, 3808 .write8 = rtw89_pci_ops_write8, 3809 .write16 = rtw89_pci_ops_write16, 3810 .write32 = rtw89_pci_ops_write32, 3811 3812 .mac_pre_init = rtw89_pci_ops_mac_pre_init, 3813 .mac_post_init = rtw89_pci_ops_mac_post_init, 3814 .deinit = rtw89_pci_ops_deinit, 3815 3816 .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource, 3817 .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery, 3818 .dump_err_status = rtw89_pci_ops_dump_err_status, 3819 .napi_poll = rtw89_pci_napi_poll, 3820 3821 .recovery_start = rtw89_pci_ops_recovery_start, 3822 .recovery_complete = rtw89_pci_ops_recovery_complete, 3823 3824 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_pcie, 3825 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_pcie, 3826 .ctrl_trxhci = rtw89_pci_ctrl_dma_trx, 3827 .poll_txdma_ch = rtw89_poll_txdma_ch_idle_pcie, 3828 .clr_idx_all = rtw89_pci_clr_idx_all, 3829 .clear = rtw89_pci_clear_resource, 3830 .disable_intr = rtw89_pci_disable_intr_lock, 3831 .enable_intr = rtw89_pci_enable_intr_lock, 3832 .rst_bdram = rtw89_pci_rst_bdram_pcie, 3833 }; 3834 3835 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 3836 { 3837 struct rtw89_dev *rtwdev; 3838 const struct rtw89_driver_info *info; 3839 const struct rtw89_pci_info *pci_info; 3840 int ret; 3841 3842 info = (const struct rtw89_driver_info *)id->driver_data; 3843 3844 rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev, 3845 sizeof(struct rtw89_pci), 3846 info->chip); 3847 if (!rtwdev) { 3848 dev_err(&pdev->dev, "failed to allocate hw\n"); 3849 return -ENOMEM; 3850 } 3851 3852 pci_info = info->bus.pci; 3853 3854 rtwdev->pci_info = info->bus.pci; 3855 rtwdev->hci.ops = &rtw89_pci_ops; 3856 rtwdev->hci.type = RTW89_HCI_TYPE_PCIE; 3857 rtwdev->hci.rpwm_addr = pci_info->rpwm_addr; 3858 rtwdev->hci.cpwm_addr = pci_info->cpwm_addr; 3859 3860 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); 3861 3862 ret = rtw89_core_init(rtwdev); 3863 if (ret) { 3864 rtw89_err(rtwdev, "failed to initialise core\n"); 3865 goto err_release_hw; 3866 } 3867 3868 ret = rtw89_pci_claim_device(rtwdev, pdev); 3869 if (ret) { 3870 rtw89_err(rtwdev, "failed to claim pci device\n"); 3871 goto err_core_deinit; 3872 } 3873 3874 ret = rtw89_pci_setup_resource(rtwdev, pdev); 3875 if (ret) { 3876 rtw89_err(rtwdev, "failed to setup pci resource\n"); 3877 goto err_declaim_pci; 3878 } 3879 3880 ret = rtw89_chip_info_setup(rtwdev); 3881 if (ret) { 3882 rtw89_err(rtwdev, "failed to setup chip information\n"); 3883 goto err_clear_resource; 3884 } 3885 3886 rtw89_pci_filter_out(rtwdev); 3887 rtw89_pci_link_cfg(rtwdev); 3888 rtw89_pci_l1ss_cfg(rtwdev); 3889 3890 rtw89_core_napi_init(rtwdev); 3891 3892 ret = rtw89_pci_request_irq(rtwdev, pdev); 3893 if (ret) { 3894 rtw89_err(rtwdev, "failed to request pci irq\n"); 3895 goto err_deinit_napi; 3896 } 3897 3898 ret = rtw89_core_register(rtwdev); 3899 if (ret) { 3900 rtw89_err(rtwdev, "failed to register core\n"); 3901 goto err_free_irq; 3902 } 3903 3904 return 0; 3905 3906 err_free_irq: 3907 rtw89_pci_free_irq(rtwdev, pdev); 3908 err_deinit_napi: 3909 rtw89_core_napi_deinit(rtwdev); 3910 err_clear_resource: 3911 rtw89_pci_clear_resource(rtwdev, pdev); 3912 err_declaim_pci: 3913 rtw89_pci_declaim_device(rtwdev, pdev); 3914 err_core_deinit: 3915 rtw89_core_deinit(rtwdev); 3916 err_release_hw: 3917 rtw89_free_ieee80211_hw(rtwdev); 3918 3919 return ret; 3920 } 3921 EXPORT_SYMBOL(rtw89_pci_probe); 3922 3923 void rtw89_pci_remove(struct pci_dev *pdev) 3924 { 3925 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 3926 struct rtw89_dev *rtwdev; 3927 3928 rtwdev = hw->priv; 3929 3930 rtw89_pci_free_irq(rtwdev, pdev); 3931 rtw89_core_napi_deinit(rtwdev); 3932 rtw89_core_unregister(rtwdev); 3933 rtw89_pci_clear_resource(rtwdev, pdev); 3934 rtw89_pci_declaim_device(rtwdev, pdev); 3935 rtw89_core_deinit(rtwdev); 3936 rtw89_free_ieee80211_hw(rtwdev); 3937 } 3938 EXPORT_SYMBOL(rtw89_pci_remove); 3939 3940 MODULE_AUTHOR("Realtek Corporation"); 3941 MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver"); 3942 MODULE_LICENSE("Dual BSD/GPL"); 3943