1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2020 Realtek Corporation 3 */ 4 5 #include <linux/pci.h> 6 7 #include "mac.h" 8 #include "pci.h" 9 #include "reg.h" 10 #include "ser.h" 11 12 static bool rtw89_pci_disable_clkreq; 13 static bool rtw89_pci_disable_aspm_l1; 14 static bool rtw89_pci_disable_l1ss; 15 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644); 16 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644); 17 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644); 18 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support"); 19 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support"); 20 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support"); 21 22 static int rtw89_pci_rst_bdram_pcie(struct rtw89_dev *rtwdev) 23 { 24 u32 val; 25 int ret; 26 27 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, 28 rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | B_AX_RST_BDRAM); 29 30 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM), 31 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false, 32 rtwdev, R_AX_PCIE_INIT_CFG1); 33 34 if (ret) 35 return -EBUSY; 36 37 return 0; 38 } 39 40 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev, 41 struct rtw89_pci_dma_ring *bd_ring, 42 u32 cur_idx, bool tx) 43 { 44 u32 cnt, cur_rp, wp, rp, len; 45 46 rp = bd_ring->rp; 47 wp = bd_ring->wp; 48 len = bd_ring->len; 49 50 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 51 if (tx) 52 cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp); 53 else 54 cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp); 55 56 bd_ring->rp = cur_rp; 57 58 return cnt; 59 } 60 61 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev, 62 struct rtw89_pci_tx_ring *tx_ring) 63 { 64 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 65 u32 addr_idx = bd_ring->addr.idx; 66 u32 cnt, idx; 67 68 idx = rtw89_read32(rtwdev, addr_idx); 69 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true); 70 71 return cnt; 72 } 73 74 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev, 75 struct rtw89_pci *rtwpci, 76 u32 cnt, bool release_all) 77 { 78 struct rtw89_pci_tx_data *tx_data; 79 struct sk_buff *skb; 80 u32 qlen; 81 82 while (cnt--) { 83 skb = skb_dequeue(&rtwpci->h2c_queue); 84 if (!skb) { 85 rtw89_err(rtwdev, "failed to pre-release fwcmd\n"); 86 return; 87 } 88 skb_queue_tail(&rtwpci->h2c_release_queue, skb); 89 } 90 91 qlen = skb_queue_len(&rtwpci->h2c_release_queue); 92 if (!release_all) 93 qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0; 94 95 while (qlen--) { 96 skb = skb_dequeue(&rtwpci->h2c_release_queue); 97 if (!skb) { 98 rtw89_err(rtwdev, "failed to release fwcmd\n"); 99 return; 100 } 101 tx_data = RTW89_PCI_TX_SKB_CB(skb); 102 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 103 DMA_TO_DEVICE); 104 dev_kfree_skb_any(skb); 105 } 106 } 107 108 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev, 109 struct rtw89_pci *rtwpci) 110 { 111 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 112 u32 cnt; 113 114 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 115 if (!cnt) 116 return; 117 rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false); 118 } 119 120 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev, 121 struct rtw89_pci_rx_ring *rx_ring) 122 { 123 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 124 u32 addr_idx = bd_ring->addr.idx; 125 u32 cnt, idx; 126 127 idx = rtw89_read32(rtwdev, addr_idx); 128 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false); 129 130 return cnt; 131 } 132 133 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev, 134 struct sk_buff *skb) 135 { 136 struct rtw89_pci_rx_info *rx_info; 137 dma_addr_t dma; 138 139 rx_info = RTW89_PCI_RX_SKB_CB(skb); 140 dma = rx_info->dma; 141 dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 142 DMA_FROM_DEVICE); 143 } 144 145 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, 146 struct sk_buff *skb) 147 { 148 struct rtw89_pci_rx_info *rx_info; 149 dma_addr_t dma; 150 151 rx_info = RTW89_PCI_RX_SKB_CB(skb); 152 dma = rx_info->dma; 153 dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 154 DMA_FROM_DEVICE); 155 } 156 157 static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, 158 struct sk_buff *skb) 159 { 160 struct rtw89_pci_rxbd_info *rxbd_info; 161 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 162 163 rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; 164 rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS); 165 rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS); 166 rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE); 167 rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG); 168 169 return 0; 170 } 171 172 static void rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev *rtwdev, bool enable) 173 { 174 const struct rtw89_pci_info *info = rtwdev->pci_info; 175 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 176 const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2; 177 178 if (enable) { 179 rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask); 180 if (dma_stop2->addr) 181 rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask); 182 } else { 183 rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask); 184 if (dma_stop2->addr) 185 rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask); 186 } 187 } 188 189 static void rtw89_pci_ctrl_txdma_fw_ch_pcie(struct rtw89_dev *rtwdev, bool enable) 190 { 191 const struct rtw89_pci_info *info = rtwdev->pci_info; 192 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 193 194 if (enable) 195 rtw89_write32_clr(rtwdev, dma_stop1->addr, B_AX_STOP_CH12); 196 else 197 rtw89_write32_set(rtwdev, dma_stop1->addr, B_AX_STOP_CH12); 198 } 199 200 static bool 201 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls, 202 struct sk_buff *new, 203 const struct sk_buff *skb, u32 offset, 204 const struct rtw89_pci_rx_info *rx_info, 205 const struct rtw89_rx_desc_info *desc_info) 206 { 207 u32 copy_len = rx_info->len - offset; 208 209 if (unlikely(skb_tailroom(new) < copy_len)) { 210 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 211 "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n", 212 rx_info->len, desc_info->pkt_size, offset, fs, ls); 213 rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ", 214 skb->data, rx_info->len); 215 /* length of a single segment skb is desc_info->pkt_size */ 216 if (fs && ls) { 217 copy_len = desc_info->pkt_size; 218 } else { 219 rtw89_info(rtwdev, "drop rx data due to invalid length\n"); 220 return false; 221 } 222 } 223 224 skb_put_data(new, skb->data + offset, copy_len); 225 226 return true; 227 } 228 229 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev, 230 struct rtw89_pci_rx_ring *rx_ring) 231 { 232 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 233 struct rtw89_pci_rx_info *rx_info; 234 struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc; 235 struct sk_buff *new = rx_ring->diliver_skb; 236 struct sk_buff *skb; 237 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 238 u32 offset; 239 u32 cnt = 1; 240 bool fs, ls; 241 int ret; 242 243 skb = rx_ring->buf[bd_ring->wp]; 244 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 245 246 ret = rtw89_pci_rxbd_info_update(rtwdev, skb); 247 if (ret) { 248 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 249 bd_ring->wp, ret); 250 goto err_sync_device; 251 } 252 253 rx_info = RTW89_PCI_RX_SKB_CB(skb); 254 fs = rx_info->fs; 255 ls = rx_info->ls; 256 257 if (fs) { 258 if (new) { 259 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 260 "skb should not be ready before first segment start\n"); 261 goto err_sync_device; 262 } 263 if (desc_info->ready) { 264 rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n"); 265 goto err_sync_device; 266 } 267 268 rtw89_core_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size); 269 270 new = rtw89_alloc_skb_for_rx(rtwdev, desc_info->pkt_size); 271 if (!new) 272 goto err_sync_device; 273 274 rx_ring->diliver_skb = new; 275 276 /* first segment has RX desc */ 277 offset = desc_info->offset; 278 offset += desc_info->long_rxdesc ? sizeof(struct rtw89_rxdesc_long) : 279 sizeof(struct rtw89_rxdesc_short); 280 } else { 281 offset = sizeof(struct rtw89_pci_rxbd_info); 282 if (!new) { 283 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n"); 284 goto err_sync_device; 285 } 286 } 287 if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info)) 288 goto err_sync_device; 289 rtw89_pci_sync_skb_for_device(rtwdev, skb); 290 rtw89_pci_rxbd_increase(rx_ring, 1); 291 292 if (!desc_info->ready) { 293 rtw89_warn(rtwdev, "no rx desc information\n"); 294 goto err_free_resource; 295 } 296 if (ls) { 297 rtw89_core_rx(rtwdev, desc_info, new); 298 rx_ring->diliver_skb = NULL; 299 desc_info->ready = false; 300 } 301 302 return cnt; 303 304 err_sync_device: 305 rtw89_pci_sync_skb_for_device(rtwdev, skb); 306 rtw89_pci_rxbd_increase(rx_ring, 1); 307 err_free_resource: 308 if (new) 309 dev_kfree_skb_any(new); 310 rx_ring->diliver_skb = NULL; 311 desc_info->ready = false; 312 313 return cnt; 314 } 315 316 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev, 317 struct rtw89_pci_rx_ring *rx_ring, 318 u32 cnt) 319 { 320 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 321 u32 rx_cnt; 322 323 while (cnt && rtwdev->napi_budget_countdown > 0) { 324 rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring); 325 if (!rx_cnt) { 326 rtw89_err(rtwdev, "failed to deliver RXBD skb\n"); 327 328 /* skip the rest RXBD bufs */ 329 rtw89_pci_rxbd_increase(rx_ring, cnt); 330 break; 331 } 332 333 cnt -= rx_cnt; 334 } 335 336 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 337 } 338 339 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev, 340 struct rtw89_pci *rtwpci, int budget) 341 { 342 struct rtw89_pci_rx_ring *rx_ring; 343 int countdown = rtwdev->napi_budget_countdown; 344 u32 cnt; 345 346 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ]; 347 348 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 349 if (!cnt) 350 return 0; 351 352 cnt = min_t(u32, budget, cnt); 353 354 rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt); 355 356 /* In case of flushing pending SKBs, the countdown may exceed. */ 357 if (rtwdev->napi_budget_countdown <= 0) 358 return budget; 359 360 return budget - countdown; 361 } 362 363 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev, 364 struct rtw89_pci_tx_ring *tx_ring, 365 struct sk_buff *skb, u8 tx_status) 366 { 367 struct ieee80211_tx_info *info; 368 369 info = IEEE80211_SKB_CB(skb); 370 ieee80211_tx_info_clear_status(info); 371 372 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 373 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 374 if (tx_status == RTW89_TX_DONE) { 375 info->flags |= IEEE80211_TX_STAT_ACK; 376 tx_ring->tx_acked++; 377 } else { 378 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) 379 rtw89_debug(rtwdev, RTW89_DBG_FW, 380 "failed to TX of status %x\n", tx_status); 381 switch (tx_status) { 382 case RTW89_TX_RETRY_LIMIT: 383 tx_ring->tx_retry_lmt++; 384 break; 385 case RTW89_TX_LIFE_TIME: 386 tx_ring->tx_life_time++; 387 break; 388 case RTW89_TX_MACID_DROP: 389 tx_ring->tx_mac_id_drop++; 390 break; 391 default: 392 rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status); 393 break; 394 } 395 } 396 397 ieee80211_tx_status_ni(rtwdev->hw, skb); 398 } 399 400 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 401 { 402 struct rtw89_pci_tx_wd *txwd; 403 u32 cnt; 404 405 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 406 while (cnt--) { 407 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 408 if (!txwd) { 409 rtw89_warn(rtwdev, "No busy txwd pages available\n"); 410 break; 411 } 412 413 list_del_init(&txwd->list); 414 415 /* this skb has been freed by RPP */ 416 if (skb_queue_len(&txwd->queue) == 0) 417 rtw89_pci_enqueue_txwd(tx_ring, txwd); 418 } 419 } 420 421 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev, 422 struct rtw89_pci_tx_ring *tx_ring) 423 { 424 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 425 struct rtw89_pci_tx_wd *txwd; 426 int i; 427 428 for (i = 0; i < wd_ring->page_num; i++) { 429 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 430 if (!txwd) 431 break; 432 433 list_del_init(&txwd->list); 434 } 435 } 436 437 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, 438 struct rtw89_pci_tx_ring *tx_ring, 439 struct rtw89_pci_tx_wd *txwd, u16 seq, 440 u8 tx_status) 441 { 442 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 443 struct rtw89_pci_tx_data *tx_data; 444 struct sk_buff *skb, *tmp; 445 u8 txch = tx_ring->txch; 446 447 if (!list_empty(&txwd->list)) { 448 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 449 /* In low power mode, RPP can receive before updating of TX BD. 450 * In normal mode, it should not happen so give it a warning. 451 */ 452 if (!rtwpci->low_power && !list_empty(&txwd->list)) 453 rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n", 454 txch, seq); 455 } 456 457 skb_queue_walk_safe(&txwd->queue, skb, tmp) { 458 skb_unlink(skb, &txwd->queue); 459 460 tx_data = RTW89_PCI_TX_SKB_CB(skb); 461 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 462 DMA_TO_DEVICE); 463 464 rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status); 465 } 466 467 if (list_empty(&txwd->list)) 468 rtw89_pci_enqueue_txwd(tx_ring, txwd); 469 } 470 471 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, 472 struct rtw89_pci_rpp_fmt *rpp) 473 { 474 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 475 struct rtw89_pci_tx_ring *tx_ring; 476 struct rtw89_pci_tx_wd_ring *wd_ring; 477 struct rtw89_pci_tx_wd *txwd; 478 u16 seq; 479 u8 qsel, tx_status, txch; 480 481 seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ); 482 qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL); 483 tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS); 484 txch = rtw89_core_get_ch_dma(rtwdev, qsel); 485 486 if (txch == RTW89_TXCH_CH12) { 487 rtw89_warn(rtwdev, "should no fwcmd release report\n"); 488 return; 489 } 490 491 tx_ring = &rtwpci->tx_rings[txch]; 492 wd_ring = &tx_ring->wd_ring; 493 txwd = &wd_ring->pages[seq]; 494 495 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status); 496 } 497 498 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev, 499 struct rtw89_pci_tx_ring *tx_ring) 500 { 501 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 502 struct rtw89_pci_tx_wd *txwd; 503 int i; 504 505 for (i = 0; i < wd_ring->page_num; i++) { 506 txwd = &wd_ring->pages[i]; 507 508 if (!list_empty(&txwd->list)) 509 continue; 510 511 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP); 512 } 513 } 514 515 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev, 516 struct rtw89_pci_rx_ring *rx_ring, 517 u32 max_cnt) 518 { 519 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 520 struct rtw89_pci_rx_info *rx_info; 521 struct rtw89_pci_rpp_fmt *rpp; 522 struct rtw89_rx_desc_info desc_info = {}; 523 struct sk_buff *skb; 524 u32 cnt = 0; 525 u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt); 526 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 527 u32 offset; 528 int ret; 529 530 skb = rx_ring->buf[bd_ring->wp]; 531 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 532 533 ret = rtw89_pci_rxbd_info_update(rtwdev, skb); 534 if (ret) { 535 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 536 bd_ring->wp, ret); 537 goto err_sync_device; 538 } 539 540 rx_info = RTW89_PCI_RX_SKB_CB(skb); 541 if (!rx_info->fs || !rx_info->ls) { 542 rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n"); 543 return cnt; 544 } 545 546 rtw89_core_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size); 547 548 /* first segment has RX desc */ 549 offset = desc_info.offset; 550 offset += desc_info.long_rxdesc ? sizeof(struct rtw89_rxdesc_long) : 551 sizeof(struct rtw89_rxdesc_short); 552 for (; offset + rpp_size <= rx_info->len; offset += rpp_size) { 553 rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset); 554 rtw89_pci_release_rpp(rtwdev, rpp); 555 } 556 557 rtw89_pci_sync_skb_for_device(rtwdev, skb); 558 rtw89_pci_rxbd_increase(rx_ring, 1); 559 cnt++; 560 561 return cnt; 562 563 err_sync_device: 564 rtw89_pci_sync_skb_for_device(rtwdev, skb); 565 return 0; 566 } 567 568 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev, 569 struct rtw89_pci_rx_ring *rx_ring, 570 u32 cnt) 571 { 572 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 573 u32 release_cnt; 574 575 while (cnt) { 576 release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt); 577 if (!release_cnt) { 578 rtw89_err(rtwdev, "failed to release TX skbs\n"); 579 580 /* skip the rest RXBD bufs */ 581 rtw89_pci_rxbd_increase(rx_ring, cnt); 582 break; 583 } 584 585 cnt -= release_cnt; 586 } 587 588 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 589 } 590 591 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev, 592 struct rtw89_pci *rtwpci, int budget) 593 { 594 struct rtw89_pci_rx_ring *rx_ring; 595 u32 cnt; 596 int work_done; 597 598 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 599 600 spin_lock_bh(&rtwpci->trx_lock); 601 602 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 603 if (cnt == 0) 604 goto out_unlock; 605 606 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 607 608 out_unlock: 609 spin_unlock_bh(&rtwpci->trx_lock); 610 611 /* always release all RPQ */ 612 work_done = min_t(int, cnt, budget); 613 rtwdev->napi_budget_countdown -= work_done; 614 615 return work_done; 616 } 617 618 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev, 619 struct rtw89_pci *rtwpci) 620 { 621 struct rtw89_pci_rx_ring *rx_ring; 622 struct rtw89_pci_dma_ring *bd_ring; 623 u32 reg_idx; 624 u16 hw_idx, hw_idx_next, host_idx; 625 int i; 626 627 for (i = 0; i < RTW89_RXCH_NUM; i++) { 628 rx_ring = &rtwpci->rx_rings[i]; 629 bd_ring = &rx_ring->bd_ring; 630 631 reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 632 hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx); 633 host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx); 634 hw_idx_next = (hw_idx + 1) % bd_ring->len; 635 636 if (hw_idx_next == host_idx) 637 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i); 638 639 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 640 "%d RXD unavailable, idx=0x%08x, len=%d\n", 641 i, reg_idx, bd_ring->len); 642 } 643 } 644 645 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, 646 struct rtw89_pci *rtwpci, 647 struct rtw89_pci_isrs *isrs) 648 { 649 isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs; 650 isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0]; 651 isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1]; 652 653 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 654 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]); 655 rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]); 656 } 657 EXPORT_SYMBOL(rtw89_pci_recognize_intrs); 658 659 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev, 660 struct rtw89_pci *rtwpci, 661 struct rtw89_pci_isrs *isrs) 662 { 663 isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs; 664 isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ? 665 rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0; 666 isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ? 667 rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0; 668 isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ? 669 rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0; 670 671 if (isrs->halt_c2h_isrs) 672 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 673 if (isrs->isrs[0]) 674 rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]); 675 if (isrs->isrs[1]) 676 rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]); 677 } 678 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1); 679 680 static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00) 681 { 682 /* write 1 clear */ 683 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isr00); 684 } 685 686 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 687 { 688 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 689 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]); 690 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]); 691 } 692 EXPORT_SYMBOL(rtw89_pci_enable_intr); 693 694 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 695 { 696 rtw89_write32(rtwdev, R_AX_HIMR0, 0); 697 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0); 698 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); 699 } 700 EXPORT_SYMBOL(rtw89_pci_disable_intr); 701 702 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 703 { 704 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs); 705 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 706 rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]); 707 rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]); 708 } 709 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1); 710 711 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 712 { 713 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0); 714 } 715 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1); 716 717 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev) 718 { 719 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 720 unsigned long flags; 721 722 spin_lock_irqsave(&rtwpci->irq_lock, flags); 723 rtw89_chip_disable_intr(rtwdev, rtwpci); 724 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START); 725 rtw89_chip_enable_intr(rtwdev, rtwpci); 726 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 727 } 728 729 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev) 730 { 731 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 732 unsigned long flags; 733 734 spin_lock_irqsave(&rtwpci->irq_lock, flags); 735 rtw89_chip_disable_intr(rtwdev, rtwpci); 736 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE); 737 rtw89_chip_enable_intr(rtwdev, rtwpci); 738 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 739 } 740 741 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev) 742 { 743 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 744 int budget = NAPI_POLL_WEIGHT; 745 746 /* To prevent RXQ get stuck due to run out of budget. */ 747 rtwdev->napi_budget_countdown = budget; 748 749 rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget); 750 rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget); 751 } 752 753 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) 754 { 755 struct rtw89_dev *rtwdev = dev; 756 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 757 struct rtw89_pci_isrs isrs; 758 unsigned long flags; 759 760 spin_lock_irqsave(&rtwpci->irq_lock, flags); 761 rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs); 762 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 763 764 if (unlikely(isrs.isrs[0] & B_AX_RDU_INT)) 765 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci); 766 767 if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN)) 768 rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev)); 769 770 if (unlikely(isrs.halt_c2h_isrs & B_AX_WDT_TIMEOUT_INT_EN)) 771 rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT); 772 773 if (unlikely(rtwpci->under_recovery)) 774 goto enable_intr; 775 776 if (unlikely(rtwpci->low_power)) { 777 rtw89_pci_low_power_interrupt_handler(rtwdev); 778 goto enable_intr; 779 } 780 781 if (likely(rtwpci->running)) { 782 local_bh_disable(); 783 napi_schedule(&rtwdev->napi); 784 local_bh_enable(); 785 } 786 787 return IRQ_HANDLED; 788 789 enable_intr: 790 spin_lock_irqsave(&rtwpci->irq_lock, flags); 791 if (likely(rtwpci->running)) 792 rtw89_chip_enable_intr(rtwdev, rtwpci); 793 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 794 return IRQ_HANDLED; 795 } 796 797 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev) 798 { 799 struct rtw89_dev *rtwdev = dev; 800 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 801 unsigned long flags; 802 irqreturn_t irqret = IRQ_WAKE_THREAD; 803 804 spin_lock_irqsave(&rtwpci->irq_lock, flags); 805 806 /* If interrupt event is on the road, it is still trigger interrupt 807 * even we have done pci_stop() to turn off IMR. 808 */ 809 if (unlikely(!rtwpci->running)) { 810 irqret = IRQ_HANDLED; 811 goto exit; 812 } 813 814 rtw89_chip_disable_intr(rtwdev, rtwpci); 815 exit: 816 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 817 818 return irqret; 819 } 820 821 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \ 822 [RTW89_TXCH_##txch] = { \ 823 .num = R_AX_##txch##_TXBD_NUM ##v, \ 824 .idx = R_AX_##txch##_TXBD_IDX ##v, \ 825 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 826 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 827 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 828 } 829 830 #define DEF_TXCHADDRS(info, txch, v...) \ 831 [RTW89_TXCH_##txch] = { \ 832 .num = R_AX_##txch##_TXBD_NUM, \ 833 .idx = R_AX_##txch##_TXBD_IDX, \ 834 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 835 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 836 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 837 } 838 839 #define DEF_RXCHADDRS(info, rxch, v...) \ 840 [RTW89_RXCH_##rxch] = { \ 841 .num = R_AX_##rxch##_RXBD_NUM ##v, \ 842 .idx = R_AX_##rxch##_RXBD_IDX ##v, \ 843 .desa_l = R_AX_##rxch##_RXBD_DESA_L ##v, \ 844 .desa_h = R_AX_##rxch##_RXBD_DESA_H ##v, \ 845 } 846 847 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = { 848 .tx = { 849 DEF_TXCHADDRS(info, ACH0), 850 DEF_TXCHADDRS(info, ACH1), 851 DEF_TXCHADDRS(info, ACH2), 852 DEF_TXCHADDRS(info, ACH3), 853 DEF_TXCHADDRS(info, ACH4), 854 DEF_TXCHADDRS(info, ACH5), 855 DEF_TXCHADDRS(info, ACH6), 856 DEF_TXCHADDRS(info, ACH7), 857 DEF_TXCHADDRS(info, CH8), 858 DEF_TXCHADDRS(info, CH9), 859 DEF_TXCHADDRS_TYPE1(info, CH10), 860 DEF_TXCHADDRS_TYPE1(info, CH11), 861 DEF_TXCHADDRS(info, CH12), 862 }, 863 .rx = { 864 DEF_RXCHADDRS(info, RXQ), 865 DEF_RXCHADDRS(info, RPQ), 866 }, 867 }; 868 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set); 869 870 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = { 871 .tx = { 872 DEF_TXCHADDRS(info, ACH0, _V1), 873 DEF_TXCHADDRS(info, ACH1, _V1), 874 DEF_TXCHADDRS(info, ACH2, _V1), 875 DEF_TXCHADDRS(info, ACH3, _V1), 876 DEF_TXCHADDRS(info, ACH4, _V1), 877 DEF_TXCHADDRS(info, ACH5, _V1), 878 DEF_TXCHADDRS(info, ACH6, _V1), 879 DEF_TXCHADDRS(info, ACH7, _V1), 880 DEF_TXCHADDRS(info, CH8, _V1), 881 DEF_TXCHADDRS(info, CH9, _V1), 882 DEF_TXCHADDRS_TYPE1(info, CH10, _V1), 883 DEF_TXCHADDRS_TYPE1(info, CH11, _V1), 884 DEF_TXCHADDRS(info, CH12, _V1), 885 }, 886 .rx = { 887 DEF_RXCHADDRS(info, RXQ, _V1), 888 DEF_RXCHADDRS(info, RPQ, _V1), 889 }, 890 }; 891 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1); 892 893 #undef DEF_TXCHADDRS_TYPE1 894 #undef DEF_TXCHADDRS 895 #undef DEF_RXCHADDRS 896 897 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev, 898 enum rtw89_tx_channel txch, 899 const struct rtw89_pci_ch_dma_addr **addr) 900 { 901 const struct rtw89_pci_info *info = rtwdev->pci_info; 902 903 if (txch >= RTW89_TXCH_NUM) 904 return -EINVAL; 905 906 *addr = &info->dma_addr_set->tx[txch]; 907 908 return 0; 909 } 910 911 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev, 912 enum rtw89_rx_channel rxch, 913 const struct rtw89_pci_ch_dma_addr **addr) 914 { 915 const struct rtw89_pci_info *info = rtwdev->pci_info; 916 917 if (rxch >= RTW89_RXCH_NUM) 918 return -EINVAL; 919 920 *addr = &info->dma_addr_set->rx[rxch]; 921 922 return 0; 923 } 924 925 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring) 926 { 927 struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring; 928 929 /* reserved 1 desc check ring is full or not */ 930 if (bd_ring->rp > bd_ring->wp) 931 return bd_ring->rp - bd_ring->wp - 1; 932 933 return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1; 934 } 935 936 static 937 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev) 938 { 939 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 940 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 941 u32 cnt; 942 943 spin_lock_bh(&rtwpci->trx_lock); 944 rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci); 945 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 946 spin_unlock_bh(&rtwpci->trx_lock); 947 948 return cnt; 949 } 950 951 static 952 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev, 953 u8 txch) 954 { 955 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 956 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 957 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 958 u32 cnt; 959 960 spin_lock_bh(&rtwpci->trx_lock); 961 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 962 cnt = min(cnt, wd_ring->curr_num); 963 spin_unlock_bh(&rtwpci->trx_lock); 964 965 return cnt; 966 } 967 968 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 969 u8 txch) 970 { 971 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 972 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 973 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 974 const struct rtw89_chip_info *chip = rtwdev->chip; 975 u32 bd_cnt, wd_cnt, min_cnt = 0; 976 struct rtw89_pci_rx_ring *rx_ring; 977 enum rtw89_debug_mask debug_mask; 978 u32 cnt; 979 980 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 981 982 spin_lock_bh(&rtwpci->trx_lock); 983 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 984 wd_cnt = wd_ring->curr_num; 985 986 if (wd_cnt == 0 || bd_cnt == 0) { 987 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 988 if (cnt) 989 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 990 else if (wd_cnt == 0) 991 goto out_unlock; 992 993 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 994 if (bd_cnt == 0) 995 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 996 } 997 998 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 999 wd_cnt = wd_ring->curr_num; 1000 min_cnt = min(bd_cnt, wd_cnt); 1001 if (min_cnt == 0) { 1002 /* This message can be frequently shown in low power mode or 1003 * high traffic with 8852B, and we have recognized it as normal 1004 * behavior, so print with mask RTW89_DBG_TXRX in these situations. 1005 */ 1006 if (rtwpci->low_power || chip->chip_id == RTL8852B) 1007 debug_mask = RTW89_DBG_TXRX; 1008 else 1009 debug_mask = RTW89_DBG_UNEXP; 1010 1011 rtw89_debug(rtwdev, debug_mask, 1012 "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n", 1013 wd_cnt, bd_cnt); 1014 } 1015 1016 out_unlock: 1017 spin_unlock_bh(&rtwpci->trx_lock); 1018 1019 return min_cnt; 1020 } 1021 1022 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 1023 u8 txch) 1024 { 1025 if (rtwdev->hci.paused) 1026 return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch); 1027 1028 if (txch == RTW89_TXCH_CH12) 1029 return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev); 1030 1031 return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch); 1032 } 1033 1034 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 1035 { 1036 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1037 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1038 u32 host_idx, addr; 1039 1040 spin_lock_bh(&rtwpci->trx_lock); 1041 1042 addr = bd_ring->addr.idx; 1043 host_idx = bd_ring->wp; 1044 rtw89_write16(rtwdev, addr, host_idx); 1045 1046 spin_unlock_bh(&rtwpci->trx_lock); 1047 } 1048 1049 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, 1050 int n_txbd) 1051 { 1052 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1053 u32 host_idx, len; 1054 1055 len = bd_ring->len; 1056 host_idx = bd_ring->wp + n_txbd; 1057 host_idx = host_idx < len ? host_idx : host_idx - len; 1058 1059 bd_ring->wp = host_idx; 1060 } 1061 1062 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) 1063 { 1064 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1065 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1066 1067 if (rtwdev->hci.paused) { 1068 set_bit(txch, rtwpci->kick_map); 1069 return; 1070 } 1071 1072 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1073 } 1074 1075 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev) 1076 { 1077 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1078 struct rtw89_pci_tx_ring *tx_ring; 1079 int txch; 1080 1081 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1082 if (!test_and_clear_bit(txch, rtwpci->kick_map)) 1083 continue; 1084 1085 tx_ring = &rtwpci->tx_rings[txch]; 1086 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1087 } 1088 } 1089 1090 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop) 1091 { 1092 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1093 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1094 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1095 u32 cur_idx, cur_rp; 1096 u8 i; 1097 1098 /* Because the time taked by the I/O is a bit dynamic, it's hard to 1099 * define a reasonable fixed total timeout to use read_poll_timeout* 1100 * helper. Instead, we can ensure a reasonable polling times, so we 1101 * just use for loop with udelay here. 1102 */ 1103 for (i = 0; i < 60; i++) { 1104 cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 1105 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 1106 if (cur_rp == bd_ring->wp) 1107 return; 1108 1109 udelay(1); 1110 } 1111 1112 if (!drop) 1113 rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch); 1114 } 1115 1116 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs, 1117 bool drop) 1118 { 1119 const struct rtw89_pci_info *info = rtwdev->pci_info; 1120 u8 i; 1121 1122 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1123 /* It may be unnecessary to flush FWCMD queue. */ 1124 if (i == RTW89_TXCH_CH12) 1125 continue; 1126 if (info->tx_dma_ch_mask & BIT(i)) 1127 continue; 1128 1129 if (txchs & BIT(i)) 1130 __pci_flush_txch(rtwdev, i, drop); 1131 } 1132 } 1133 1134 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues, 1135 bool drop) 1136 { 1137 __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop); 1138 } 1139 1140 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, 1141 void *txaddr_info_addr, u32 total_len, 1142 dma_addr_t dma, u8 *add_info_nr) 1143 { 1144 struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr; 1145 1146 txaddr_info->length = cpu_to_le16(total_len); 1147 txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | 1148 RTW89_PCI_ADDR_NUM(1)); 1149 txaddr_info->dma = cpu_to_le32(dma); 1150 1151 *add_info_nr = 1; 1152 1153 return sizeof(*txaddr_info); 1154 } 1155 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info); 1156 1157 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, 1158 void *txaddr_info_addr, u32 total_len, 1159 dma_addr_t dma, u8 *add_info_nr) 1160 { 1161 struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr; 1162 u32 remain = total_len; 1163 u32 len; 1164 u16 length_option; 1165 int n; 1166 1167 for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) { 1168 len = remain >= TXADDR_INFO_LENTHG_V1_MAX ? 1169 TXADDR_INFO_LENTHG_V1_MAX : remain; 1170 remain -= len; 1171 1172 length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) | 1173 FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) | 1174 FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0); 1175 txaddr_info->length_opt = cpu_to_le16(length_option); 1176 txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma)); 1177 txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma)); 1178 1179 dma += len; 1180 txaddr_info++; 1181 } 1182 1183 WARN_ONCE(remain, "length overflow remain=%u total_len=%u", 1184 remain, total_len); 1185 1186 *add_info_nr = n; 1187 1188 return n * sizeof(*txaddr_info); 1189 } 1190 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1); 1191 1192 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, 1193 struct rtw89_pci_tx_ring *tx_ring, 1194 struct rtw89_pci_tx_wd *txwd, 1195 struct rtw89_core_tx_request *tx_req) 1196 { 1197 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1198 const struct rtw89_chip_info *chip = rtwdev->chip; 1199 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1200 struct rtw89_txwd_info *txwd_info; 1201 struct rtw89_pci_tx_wp_info *txwp_info; 1202 void *txaddr_info_addr; 1203 struct pci_dev *pdev = rtwpci->pdev; 1204 struct sk_buff *skb = tx_req->skb; 1205 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1206 bool en_wd_info = desc_info->en_wd_info; 1207 u32 txwd_len; 1208 u32 txwp_len; 1209 u32 txaddr_info_len; 1210 dma_addr_t dma; 1211 int ret; 1212 1213 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1214 if (dma_mapping_error(&pdev->dev, dma)) { 1215 rtw89_err(rtwdev, "failed to map skb dma data\n"); 1216 ret = -EBUSY; 1217 goto err; 1218 } 1219 1220 tx_data->dma = dma; 1221 1222 txwp_len = sizeof(*txwp_info); 1223 txwd_len = chip->txwd_body_size; 1224 txwd_len += en_wd_info ? sizeof(*txwd_info) : 0; 1225 1226 txwp_info = txwd->vaddr + txwd_len; 1227 txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID); 1228 txwp_info->seq1 = 0; 1229 txwp_info->seq2 = 0; 1230 txwp_info->seq3 = 0; 1231 1232 tx_ring->tx_cnt++; 1233 txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len; 1234 txaddr_info_len = 1235 rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len, 1236 dma, &desc_info->addr_info_nr); 1237 1238 txwd->len = txwd_len + txwp_len + txaddr_info_len; 1239 1240 rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr); 1241 1242 skb_queue_tail(&txwd->queue, skb); 1243 1244 return 0; 1245 1246 err: 1247 return ret; 1248 } 1249 1250 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, 1251 struct rtw89_pci_tx_ring *tx_ring, 1252 struct rtw89_pci_tx_bd_32 *txbd, 1253 struct rtw89_core_tx_request *tx_req) 1254 { 1255 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1256 const struct rtw89_chip_info *chip = rtwdev->chip; 1257 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1258 void *txdesc; 1259 int txdesc_size = chip->h2c_desc_size; 1260 struct pci_dev *pdev = rtwpci->pdev; 1261 struct sk_buff *skb = tx_req->skb; 1262 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1263 dma_addr_t dma; 1264 1265 txdesc = skb_push(skb, txdesc_size); 1266 memset(txdesc, 0, txdesc_size); 1267 rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc); 1268 1269 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1270 if (dma_mapping_error(&pdev->dev, dma)) { 1271 rtw89_err(rtwdev, "failed to map fwcmd dma data\n"); 1272 return -EBUSY; 1273 } 1274 1275 tx_data->dma = dma; 1276 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); 1277 txbd->length = cpu_to_le16(skb->len); 1278 txbd->dma = cpu_to_le32(tx_data->dma); 1279 skb_queue_tail(&rtwpci->h2c_queue, skb); 1280 1281 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1282 1283 return 0; 1284 } 1285 1286 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, 1287 struct rtw89_pci_tx_ring *tx_ring, 1288 struct rtw89_pci_tx_bd_32 *txbd, 1289 struct rtw89_core_tx_request *tx_req) 1290 { 1291 struct rtw89_pci_tx_wd *txwd; 1292 int ret; 1293 1294 /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD 1295 * buffer with WD BODY only. So here we don't need to check the free 1296 * pages of the wd ring. 1297 */ 1298 if (tx_ring->txch == RTW89_TXCH_CH12) 1299 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); 1300 1301 txwd = rtw89_pci_dequeue_txwd(tx_ring); 1302 if (!txwd) { 1303 rtw89_err(rtwdev, "no available TXWD\n"); 1304 ret = -ENOSPC; 1305 goto err; 1306 } 1307 1308 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); 1309 if (ret) { 1310 rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq); 1311 goto err_enqueue_wd; 1312 } 1313 1314 list_add_tail(&txwd->list, &tx_ring->busy_pages); 1315 1316 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); 1317 txbd->length = cpu_to_le16(txwd->len); 1318 txbd->dma = cpu_to_le32(txwd->paddr); 1319 1320 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1321 1322 return 0; 1323 1324 err_enqueue_wd: 1325 rtw89_pci_enqueue_txwd(tx_ring, txwd); 1326 err: 1327 return ret; 1328 } 1329 1330 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, 1331 u8 txch) 1332 { 1333 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1334 struct rtw89_pci_tx_ring *tx_ring; 1335 struct rtw89_pci_tx_bd_32 *txbd; 1336 u32 n_avail_txbd; 1337 int ret = 0; 1338 1339 /* check the tx type and dma channel for fw cmd queue */ 1340 if ((txch == RTW89_TXCH_CH12 || 1341 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) && 1342 (txch != RTW89_TXCH_CH12 || 1343 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) { 1344 rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n"); 1345 return -EINVAL; 1346 } 1347 1348 tx_ring = &rtwpci->tx_rings[txch]; 1349 spin_lock_bh(&rtwpci->trx_lock); 1350 1351 n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring); 1352 if (n_avail_txbd == 0) { 1353 rtw89_err(rtwdev, "no available TXBD\n"); 1354 ret = -ENOSPC; 1355 goto err_unlock; 1356 } 1357 1358 txbd = rtw89_pci_get_next_txbd(tx_ring); 1359 ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req); 1360 if (ret) { 1361 rtw89_err(rtwdev, "failed to submit TXBD\n"); 1362 goto err_unlock; 1363 } 1364 1365 spin_unlock_bh(&rtwpci->trx_lock); 1366 return 0; 1367 1368 err_unlock: 1369 spin_unlock_bh(&rtwpci->trx_lock); 1370 return ret; 1371 } 1372 1373 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) 1374 { 1375 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1376 int ret; 1377 1378 ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma); 1379 if (ret) { 1380 rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma); 1381 return ret; 1382 } 1383 1384 return 0; 1385 } 1386 1387 static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = { 1388 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1389 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1390 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1391 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1392 [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2}, 1393 [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2}, 1394 [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2}, 1395 [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2}, 1396 [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1}, 1397 [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1}, 1398 [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1}, 1399 [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1}, 1400 [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1}, 1401 }; 1402 1403 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) 1404 { 1405 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1406 const struct rtw89_pci_info *info = rtwdev->pci_info; 1407 struct rtw89_pci_tx_ring *tx_ring; 1408 struct rtw89_pci_rx_ring *rx_ring; 1409 struct rtw89_pci_dma_ring *bd_ring; 1410 const struct rtw89_pci_bd_ram *bd_ram; 1411 u32 addr_num; 1412 u32 addr_bdram; 1413 u32 addr_desa_l; 1414 u32 val32; 1415 int i; 1416 1417 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1418 if (info->tx_dma_ch_mask & BIT(i)) 1419 continue; 1420 1421 tx_ring = &rtwpci->tx_rings[i]; 1422 bd_ring = &tx_ring->bd_ring; 1423 bd_ram = &bd_ram_table[i]; 1424 addr_num = bd_ring->addr.num; 1425 addr_bdram = bd_ring->addr.bdram; 1426 addr_desa_l = bd_ring->addr.desa_l; 1427 bd_ring->wp = 0; 1428 bd_ring->rp = 0; 1429 1430 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) | 1431 FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) | 1432 FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num); 1433 1434 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1435 rtw89_write32(rtwdev, addr_bdram, val32); 1436 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1437 } 1438 1439 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1440 rx_ring = &rtwpci->rx_rings[i]; 1441 bd_ring = &rx_ring->bd_ring; 1442 addr_num = bd_ring->addr.num; 1443 addr_desa_l = bd_ring->addr.desa_l; 1444 bd_ring->wp = 0; 1445 bd_ring->rp = 0; 1446 rx_ring->diliver_skb = NULL; 1447 rx_ring->diliver_desc.ready = false; 1448 1449 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1450 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1451 } 1452 } 1453 1454 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev, 1455 struct rtw89_pci_tx_ring *tx_ring) 1456 { 1457 rtw89_pci_release_busy_txwd(rtwdev, tx_ring); 1458 rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring); 1459 } 1460 1461 static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev) 1462 { 1463 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1464 const struct rtw89_pci_info *info = rtwdev->pci_info; 1465 int txch; 1466 1467 rtw89_pci_reset_trx_rings(rtwdev); 1468 1469 spin_lock_bh(&rtwpci->trx_lock); 1470 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1471 if (info->tx_dma_ch_mask & BIT(txch)) 1472 continue; 1473 if (txch == RTW89_TXCH_CH12) { 1474 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 1475 skb_queue_len(&rtwpci->h2c_queue), true); 1476 continue; 1477 } 1478 rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]); 1479 } 1480 spin_unlock_bh(&rtwpci->trx_lock); 1481 } 1482 1483 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev) 1484 { 1485 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1486 unsigned long flags; 1487 1488 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1489 rtwpci->running = true; 1490 rtw89_chip_enable_intr(rtwdev, rtwpci); 1491 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1492 } 1493 1494 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev) 1495 { 1496 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1497 unsigned long flags; 1498 1499 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1500 rtwpci->running = false; 1501 rtw89_chip_disable_intr(rtwdev, rtwpci); 1502 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1503 } 1504 1505 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev) 1506 { 1507 rtw89_core_napi_start(rtwdev); 1508 rtw89_pci_enable_intr_lock(rtwdev); 1509 1510 return 0; 1511 } 1512 1513 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) 1514 { 1515 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1516 struct pci_dev *pdev = rtwpci->pdev; 1517 1518 rtw89_pci_disable_intr_lock(rtwdev); 1519 synchronize_irq(pdev->irq); 1520 rtw89_core_napi_stop(rtwdev); 1521 } 1522 1523 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause) 1524 { 1525 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1526 struct pci_dev *pdev = rtwpci->pdev; 1527 1528 if (pause) { 1529 rtw89_pci_disable_intr_lock(rtwdev); 1530 synchronize_irq(pdev->irq); 1531 if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) 1532 napi_synchronize(&rtwdev->napi); 1533 } else { 1534 rtw89_pci_enable_intr_lock(rtwdev); 1535 rtw89_pci_tx_kick_off_pending(rtwdev); 1536 } 1537 } 1538 1539 static 1540 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power) 1541 { 1542 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1543 const struct rtw89_pci_info *info = rtwdev->pci_info; 1544 const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power; 1545 const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set; 1546 struct rtw89_pci_tx_ring *tx_ring; 1547 struct rtw89_pci_rx_ring *rx_ring; 1548 int i; 1549 1550 if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n")) 1551 return; 1552 1553 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1554 tx_ring = &rtwpci->tx_rings[i]; 1555 tx_ring->bd_ring.addr.idx = low_power ? 1556 bd_idx_addr->tx_bd_addrs[i] : 1557 dma_addr_set->tx[i].idx; 1558 } 1559 1560 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1561 rx_ring = &rtwpci->rx_rings[i]; 1562 rx_ring->bd_ring.addr.idx = low_power ? 1563 bd_idx_addr->rx_bd_addrs[i] : 1564 dma_addr_set->rx[i].idx; 1565 } 1566 } 1567 1568 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power) 1569 { 1570 enum rtw89_pci_intr_mask_cfg cfg; 1571 1572 WARN(!rtwdev->hci.paused, "HCI isn't paused\n"); 1573 1574 cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL; 1575 rtw89_chip_config_intr_mask(rtwdev, cfg); 1576 rtw89_pci_switch_bd_idx_addr(rtwdev, low_power); 1577 } 1578 1579 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data); 1580 1581 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr) 1582 { 1583 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1584 u32 val = readl(rtwpci->mmap + addr); 1585 int count; 1586 1587 for (count = 0; ; count++) { 1588 if (val != RTW89_R32_DEAD) 1589 return val; 1590 if (count >= MAC_REG_POOL_COUNT) { 1591 rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val); 1592 return RTW89_R32_DEAD; 1593 } 1594 rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN); 1595 val = readl(rtwpci->mmap + addr); 1596 } 1597 1598 return val; 1599 } 1600 1601 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr) 1602 { 1603 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1604 u32 addr32, val32, shift; 1605 1606 if (!ACCESS_CMAC(addr)) 1607 return readb(rtwpci->mmap + addr); 1608 1609 addr32 = addr & ~0x3; 1610 shift = (addr & 0x3) * 8; 1611 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1612 return val32 >> shift; 1613 } 1614 1615 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr) 1616 { 1617 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1618 u32 addr32, val32, shift; 1619 1620 if (!ACCESS_CMAC(addr)) 1621 return readw(rtwpci->mmap + addr); 1622 1623 addr32 = addr & ~0x3; 1624 shift = (addr & 0x3) * 8; 1625 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1626 return val32 >> shift; 1627 } 1628 1629 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr) 1630 { 1631 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1632 1633 if (!ACCESS_CMAC(addr)) 1634 return readl(rtwpci->mmap + addr); 1635 1636 return rtw89_pci_ops_read32_cmac(rtwdev, addr); 1637 } 1638 1639 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) 1640 { 1641 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1642 1643 writeb(data, rtwpci->mmap + addr); 1644 } 1645 1646 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) 1647 { 1648 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1649 1650 writew(data, rtwpci->mmap + addr); 1651 } 1652 1653 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) 1654 { 1655 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1656 1657 writel(data, rtwpci->mmap + addr); 1658 } 1659 1660 static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable) 1661 { 1662 const struct rtw89_pci_info *info = rtwdev->pci_info; 1663 1664 if (enable) 1665 rtw89_write32_set(rtwdev, info->init_cfg_reg, 1666 info->rxhci_en_bit | info->txhci_en_bit); 1667 else 1668 rtw89_write32_clr(rtwdev, info->init_cfg_reg, 1669 info->rxhci_en_bit | info->txhci_en_bit); 1670 } 1671 1672 static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable) 1673 { 1674 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1675 u32 reg, mask; 1676 1677 if (chip_id == RTL8852C) { 1678 reg = R_AX_HAXI_INIT_CFG1; 1679 mask = B_AX_STOP_AXI_MST; 1680 } else { 1681 reg = R_AX_PCIE_DMA_STOP1; 1682 mask = B_AX_STOP_PCIEIO; 1683 } 1684 1685 if (enable) 1686 rtw89_write32_clr(rtwdev, reg, mask); 1687 else 1688 rtw89_write32_set(rtwdev, reg, mask); 1689 } 1690 1691 static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable) 1692 { 1693 rtw89_pci_ctrl_dma_io(rtwdev, enable); 1694 rtw89_pci_ctrl_dma_trx(rtwdev, enable); 1695 } 1696 1697 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit) 1698 { 1699 u16 val; 1700 1701 rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F); 1702 1703 val = rtw89_read16(rtwdev, R_AX_MDIO_CFG); 1704 switch (speed) { 1705 case PCIE_PHY_GEN1: 1706 if (addr < 0x20) 1707 val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK); 1708 else 1709 val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK); 1710 break; 1711 case PCIE_PHY_GEN2: 1712 if (addr < 0x20) 1713 val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK); 1714 else 1715 val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK); 1716 break; 1717 default: 1718 rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed); 1719 return -EINVAL; 1720 } 1721 rtw89_write16(rtwdev, R_AX_MDIO_CFG, val); 1722 rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit); 1723 1724 return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000, 1725 false, rtwdev, R_AX_MDIO_CFG); 1726 } 1727 1728 static int 1729 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val) 1730 { 1731 int ret; 1732 1733 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG); 1734 if (ret) { 1735 rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret); 1736 return ret; 1737 } 1738 *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA); 1739 1740 return 0; 1741 } 1742 1743 static int 1744 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed) 1745 { 1746 int ret; 1747 1748 rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data); 1749 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG); 1750 if (ret) { 1751 rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret); 1752 return ret; 1753 } 1754 1755 return 0; 1756 } 1757 1758 static int 1759 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed) 1760 { 1761 u32 shift; 1762 int ret; 1763 u16 val; 1764 1765 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1766 if (ret) 1767 return ret; 1768 1769 shift = __ffs(mask); 1770 val &= ~mask; 1771 val |= ((data << shift) & mask); 1772 1773 ret = rtw89_write16_mdio(rtwdev, addr, val, speed); 1774 if (ret) 1775 return ret; 1776 1777 return 0; 1778 } 1779 1780 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1781 { 1782 int ret; 1783 u16 val; 1784 1785 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1786 if (ret) 1787 return ret; 1788 ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed); 1789 if (ret) 1790 return ret; 1791 1792 return 0; 1793 } 1794 1795 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1796 { 1797 int ret; 1798 u16 val; 1799 1800 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1801 if (ret) 1802 return ret; 1803 ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed); 1804 if (ret) 1805 return ret; 1806 1807 return 0; 1808 } 1809 1810 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr, 1811 u8 data) 1812 { 1813 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1814 struct pci_dev *pdev = rtwpci->pdev; 1815 1816 return pci_write_config_byte(pdev, addr, data); 1817 } 1818 1819 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr, 1820 u8 *value) 1821 { 1822 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1823 struct pci_dev *pdev = rtwpci->pdev; 1824 1825 return pci_read_config_byte(pdev, addr, value); 1826 } 1827 1828 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr, 1829 u8 bit) 1830 { 1831 u8 value; 1832 int ret; 1833 1834 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 1835 if (ret) 1836 return ret; 1837 1838 value |= bit; 1839 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 1840 1841 return ret; 1842 } 1843 1844 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr, 1845 u8 bit) 1846 { 1847 u8 value; 1848 int ret; 1849 1850 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 1851 if (ret) 1852 return ret; 1853 1854 value &= ~bit; 1855 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 1856 1857 return ret; 1858 } 1859 1860 static int 1861 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate) 1862 { 1863 u16 val, tar; 1864 int ret; 1865 1866 /* Enable counter */ 1867 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val); 1868 if (ret) 1869 return ret; 1870 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 1871 phy_rate); 1872 if (ret) 1873 return ret; 1874 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN, 1875 phy_rate); 1876 if (ret) 1877 return ret; 1878 1879 fsleep(300); 1880 1881 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar); 1882 if (ret) 1883 return ret; 1884 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 1885 phy_rate); 1886 if (ret) 1887 return ret; 1888 1889 tar = tar & 0x0FFF; 1890 if (tar == 0 || tar == 0x0FFF) { 1891 rtw89_err(rtwdev, "[ERR]Get target failed.\n"); 1892 return -EINVAL; 1893 } 1894 1895 *target = tar; 1896 1897 return 0; 1898 } 1899 1900 static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev) 1901 { 1902 int ret; 1903 1904 if (rtwdev->chip->chip_id != RTL8852B) 1905 return 0; 1906 1907 ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK, 1908 PCIE_AUTOK_4, PCIE_PHY_GEN1); 1909 return ret; 1910 } 1911 1912 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) 1913 { 1914 enum rtw89_pcie_phy phy_rate; 1915 u16 val16, mgn_set, div_set, tar; 1916 u8 val8, bdr_ori; 1917 bool l1_flag = false; 1918 int ret = 0; 1919 1920 if (rtwdev->chip->chip_id != RTL8852B) 1921 return 0; 1922 1923 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8); 1924 if (ret) { 1925 rtw89_err(rtwdev, "[ERR]pci config read %X\n", 1926 RTW89_PCIE_PHY_RATE); 1927 return ret; 1928 } 1929 1930 if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) { 1931 phy_rate = PCIE_PHY_GEN1; 1932 } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) { 1933 phy_rate = PCIE_PHY_GEN2; 1934 } else { 1935 rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8); 1936 return -EOPNOTSUPP; 1937 } 1938 /* Disable L1BD */ 1939 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori); 1940 if (ret) { 1941 rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL); 1942 return ret; 1943 } 1944 1945 if (bdr_ori & RTW89_PCIE_BIT_L1) { 1946 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 1947 bdr_ori & ~RTW89_PCIE_BIT_L1); 1948 if (ret) { 1949 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 1950 RTW89_PCIE_L1_CTRL); 1951 return ret; 1952 } 1953 l1_flag = true; 1954 } 1955 1956 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 1957 if (ret) { 1958 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 1959 goto end; 1960 } 1961 1962 if (val16 & B_AX_CALIB_EN) { 1963 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, 1964 val16 & ~B_AX_CALIB_EN, phy_rate); 1965 if (ret) { 1966 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1967 goto end; 1968 } 1969 } 1970 1971 if (!autook_en) 1972 goto end; 1973 /* Set div */ 1974 ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate); 1975 if (ret) { 1976 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1977 goto end; 1978 } 1979 1980 /* Obtain div and margin */ 1981 ret = __get_target(rtwdev, &tar, phy_rate); 1982 if (ret) { 1983 rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret); 1984 goto end; 1985 } 1986 1987 mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar; 1988 1989 if (mgn_set >= 128) { 1990 div_set = 0x0003; 1991 mgn_set = 0x000F; 1992 } else if (mgn_set >= 64) { 1993 div_set = 0x0003; 1994 mgn_set >>= 3; 1995 } else if (mgn_set >= 32) { 1996 div_set = 0x0002; 1997 mgn_set >>= 2; 1998 } else if (mgn_set >= 16) { 1999 div_set = 0x0001; 2000 mgn_set >>= 1; 2001 } else if (mgn_set == 0) { 2002 rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar); 2003 goto end; 2004 } else { 2005 div_set = 0x0000; 2006 } 2007 2008 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 2009 if (ret) { 2010 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 2011 goto end; 2012 } 2013 2014 val16 |= u16_encode_bits(div_set, B_AX_DIV); 2015 2016 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate); 2017 if (ret) { 2018 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2019 goto end; 2020 } 2021 2022 ret = __get_target(rtwdev, &tar, phy_rate); 2023 if (ret) { 2024 rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret); 2025 goto end; 2026 } 2027 2028 rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n", 2029 tar, div_set, mgn_set); 2030 ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1, 2031 (tar & 0x0FFF) | (mgn_set << 12), phy_rate); 2032 if (ret) { 2033 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1); 2034 goto end; 2035 } 2036 2037 /* Enable function */ 2038 ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate); 2039 if (ret) { 2040 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2041 goto end; 2042 } 2043 2044 /* CLK delay = 0 */ 2045 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 2046 PCIE_CLKDLY_HW_0); 2047 2048 end: 2049 /* Set L1BD to ori */ 2050 if (l1_flag) { 2051 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 2052 bdr_ori); 2053 if (ret) { 2054 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 2055 RTW89_PCIE_L1_CTRL); 2056 return ret; 2057 } 2058 } 2059 2060 return ret; 2061 } 2062 2063 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) 2064 { 2065 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2066 int ret; 2067 2068 if (chip_id == RTL8852A) { 2069 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2070 PCIE_PHY_GEN1); 2071 if (ret) 2072 return ret; 2073 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2074 PCIE_PHY_GEN2); 2075 if (ret) 2076 return ret; 2077 } else if (chip_id == RTL8852C) { 2078 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2, 2079 B_AX_DEGLITCH); 2080 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2, 2081 B_AX_DEGLITCH); 2082 } 2083 2084 return 0; 2085 } 2086 2087 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) 2088 { 2089 if (rtwdev->chip->chip_id != RTL8852A) 2090 return; 2091 2092 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE); 2093 } 2094 2095 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) 2096 { 2097 if (rtwdev->chip->chip_id != RTL8852A && rtwdev->chip->chip_id != RTL8852B) 2098 return; 2099 2100 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); 2101 } 2102 2103 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev) 2104 { 2105 int ret; 2106 2107 if (rtwdev->chip->chip_id != RTL8852A) 2108 return 0; 2109 2110 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2111 PCIE_PHY_GEN1); 2112 if (ret) 2113 return ret; 2114 2115 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2116 PCIE_PHY_GEN2); 2117 if (ret) 2118 return ret; 2119 2120 return 0; 2121 } 2122 2123 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) 2124 { 2125 if (rtwdev->chip->chip_id != RTL8852A && rtwdev->chip->chip_id != RTL8852B) 2126 return; 2127 2128 rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN); 2129 } 2130 2131 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) 2132 { 2133 if (rtwdev->chip->chip_id == RTL8852A || 2134 rtwdev->chip->chip_id == RTL8852B) { 2135 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 2136 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2137 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2138 B_AX_PCIE_DIS_WLSUS_AFT_PDN); 2139 } else if (rtwdev->chip->chip_id == RTL8852C) { 2140 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2141 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2142 } 2143 } 2144 2145 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev) 2146 { 2147 if (rtwdev->chip->chip_id != RTL8852B) 2148 return 0; 2149 2150 return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK, 2151 PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1); 2152 } 2153 2154 static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up) 2155 { 2156 if (pwr_up) 2157 rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2158 else 2159 rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2160 } 2161 2162 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev) 2163 { 2164 if (rtwdev->chip->chip_id != RTL8852C) 2165 return; 2166 2167 rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2168 rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2169 } 2170 2171 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev) 2172 { 2173 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2174 return; 2175 2176 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT); 2177 } 2178 2179 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev) 2180 { 2181 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2182 return; 2183 2184 rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, 2185 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2186 rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3); 2187 rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, 2188 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2189 } 2190 2191 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev) 2192 { 2193 if (rtwdev->chip->chip_id != RTL8852C) 2194 return; 2195 2196 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1); 2197 } 2198 2199 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev) 2200 { 2201 if (rtwdev->chip->chip_id != RTL8852C) 2202 return; 2203 2204 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN); 2205 } 2206 2207 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) 2208 { 2209 if (rtwdev->chip->chip_id == RTL8852C) 2210 return; 2211 2212 rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL, 2213 B_AX_SIC_EN_FORCE_CLKREQ); 2214 } 2215 2216 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev) 2217 { 2218 const struct rtw89_pci_info *info = rtwdev->pci_info; 2219 u32 lbc; 2220 2221 if (rtwdev->chip->chip_id == RTL8852C) 2222 return; 2223 2224 lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); 2225 if (info->lbc_en == MAC_AX_PCIE_ENABLE) { 2226 lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER); 2227 lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; 2228 rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2229 } else { 2230 lbc &= ~B_AX_LBC_EN; 2231 } 2232 rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2233 } 2234 2235 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev) 2236 { 2237 const struct rtw89_pci_info *info = rtwdev->pci_info; 2238 u32 val32; 2239 2240 if (rtwdev->chip->chip_id != RTL8852C) 2241 return; 2242 2243 if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) { 2244 val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK, 2245 info->io_rcy_tmr); 2246 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32); 2247 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32); 2248 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32); 2249 2250 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2251 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2252 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2253 } else { 2254 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2255 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2256 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2257 } 2258 2259 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1); 2260 } 2261 2262 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) 2263 { 2264 if (rtwdev->chip->chip_id == RTL8852C) 2265 return; 2266 2267 rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL, 2268 B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG); 2269 2270 if (rtwdev->chip->chip_id == RTL8852A) 2271 rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL, 2272 B_AX_EN_CHKDSC_NO_RX_STUCK); 2273 } 2274 2275 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev) 2276 { 2277 if (rtwdev->chip->chip_id == RTL8852C) 2278 return; 2279 2280 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 2281 B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); 2282 } 2283 2284 static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev) 2285 { 2286 const struct rtw89_pci_info *info = rtwdev->pci_info; 2287 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2288 u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX | 2289 B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX | 2290 B_AX_CLR_CH12_IDX; 2291 u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg; 2292 u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg; 2293 2294 if (chip_id == RTL8852A || chip_id == RTL8852C) 2295 val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX | 2296 B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX; 2297 /* clear DMA indexes */ 2298 rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val); 2299 if (chip_id == RTL8852A || chip_id == RTL8852C) 2300 rtw89_write32_set(rtwdev, txbd_rwptr_clr2, 2301 B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX); 2302 rtw89_write32_set(rtwdev, rxbd_rwptr_clr, 2303 B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); 2304 } 2305 2306 static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev) 2307 { 2308 const struct rtw89_pci_info *info = rtwdev->pci_info; 2309 u32 ret, check, dma_busy; 2310 u32 dma_busy1 = info->dma_busy1.addr; 2311 u32 dma_busy2 = info->dma_busy2_reg; 2312 2313 check = info->dma_busy1.mask; 2314 2315 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2316 10, 100, false, rtwdev, dma_busy1); 2317 if (ret) 2318 return ret; 2319 2320 if (!dma_busy2) 2321 return 0; 2322 2323 check = B_AX_CH10_BUSY | B_AX_CH11_BUSY; 2324 2325 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2326 10, 100, false, rtwdev, dma_busy2); 2327 if (ret) 2328 return ret; 2329 2330 return 0; 2331 } 2332 2333 static int rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev *rtwdev) 2334 { 2335 const struct rtw89_pci_info *info = rtwdev->pci_info; 2336 u32 ret, check, dma_busy; 2337 u32 dma_busy3 = info->dma_busy3_reg; 2338 2339 check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY; 2340 2341 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2342 10, 100, false, rtwdev, dma_busy3); 2343 if (ret) 2344 return ret; 2345 2346 return 0; 2347 } 2348 2349 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev) 2350 { 2351 u32 ret; 2352 2353 ret = rtw89_poll_txdma_ch_idle_pcie(rtwdev); 2354 if (ret) { 2355 rtw89_err(rtwdev, "txdma ch busy\n"); 2356 return ret; 2357 } 2358 2359 ret = rtw89_poll_rxdma_ch_idle_pcie(rtwdev); 2360 if (ret) { 2361 rtw89_err(rtwdev, "rxdma ch busy\n"); 2362 return ret; 2363 } 2364 2365 return 0; 2366 } 2367 2368 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) 2369 { 2370 const struct rtw89_pci_info *info = rtwdev->pci_info; 2371 enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode; 2372 enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode; 2373 enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode; 2374 enum mac_ax_tag_mode tag_mode = info->tag_mode; 2375 enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl; 2376 enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl; 2377 enum mac_ax_tx_burst tx_burst = info->tx_burst; 2378 enum mac_ax_rx_burst rx_burst = info->rx_burst; 2379 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2380 u8 cv = rtwdev->hal.cv; 2381 u32 val32; 2382 2383 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2384 if (chip_id == RTL8852A && cv == CHIP_CBV) 2385 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2386 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2387 if (chip_id == RTL8852A || chip_id == RTL8852B) 2388 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2389 } 2390 2391 if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) { 2392 if (chip_id == RTL8852A && cv == CHIP_CBV) 2393 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2394 } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) { 2395 if (chip_id == RTL8852A || chip_id == RTL8852B) 2396 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2397 } 2398 2399 if (rxbd_mode == MAC_AX_RXBD_PKT) { 2400 rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2401 } else if (rxbd_mode == MAC_AX_RXBD_SEP) { 2402 rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2403 2404 if (chip_id == RTL8852A || chip_id == RTL8852B) 2405 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, 2406 B_AX_PCIE_RX_APPLEN_MASK, 0); 2407 } 2408 2409 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2410 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst); 2411 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst); 2412 } else if (chip_id == RTL8852C) { 2413 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst); 2414 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst); 2415 } 2416 2417 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2418 if (tag_mode == MAC_AX_TAG_SGL) { 2419 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) & 2420 ~B_AX_LATENCY_CONTROL; 2421 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2422 } else if (tag_mode == MAC_AX_TAG_MULTI) { 2423 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | 2424 B_AX_LATENCY_CONTROL; 2425 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2426 } 2427 } 2428 2429 rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask, 2430 info->multi_tag_num); 2431 2432 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2433 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, 2434 wd_dma_idle_intvl); 2435 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, 2436 wd_dma_act_intvl); 2437 } else if (chip_id == RTL8852C) { 2438 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK, 2439 wd_dma_idle_intvl); 2440 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK, 2441 wd_dma_act_intvl); 2442 } 2443 2444 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2445 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2446 B_AX_HOST_ADDR_INFO_8B_SEL); 2447 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2448 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2449 rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2450 B_AX_HOST_ADDR_INFO_8B_SEL); 2451 rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2452 } 2453 2454 return 0; 2455 } 2456 2457 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) 2458 { 2459 const struct rtw89_pci_info *info = rtwdev->pci_info; 2460 2461 if (rtwdev->chip->chip_id == RTL8852A) { 2462 /* ltr sw trigger */ 2463 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE); 2464 } 2465 info->ltr_set(rtwdev, false); 2466 rtw89_pci_ctrl_dma_all(rtwdev, false); 2467 rtw89_pci_clr_idx_all(rtwdev); 2468 2469 return 0; 2470 } 2471 2472 static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev) 2473 { 2474 const struct rtw89_pci_info *info = rtwdev->pci_info; 2475 int ret; 2476 2477 rtw89_pci_rxdma_prefth(rtwdev); 2478 rtw89_pci_l1off_pwroff(rtwdev); 2479 rtw89_pci_deglitch_setting(rtwdev); 2480 ret = rtw89_pci_l2_rxen_lat(rtwdev); 2481 if (ret) { 2482 rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret); 2483 return ret; 2484 } 2485 2486 rtw89_pci_aphy_pwrcut(rtwdev); 2487 rtw89_pci_hci_ldo(rtwdev); 2488 rtw89_pci_dphy_delay(rtwdev); 2489 2490 ret = rtw89_pci_autok_x(rtwdev); 2491 if (ret) { 2492 rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret); 2493 return ret; 2494 } 2495 2496 ret = rtw89_pci_auto_refclk_cal(rtwdev, false); 2497 if (ret) { 2498 rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret); 2499 return ret; 2500 } 2501 2502 rtw89_pci_power_wake(rtwdev, true); 2503 rtw89_pci_autoload_hang(rtwdev); 2504 rtw89_pci_l12_vmain(rtwdev); 2505 rtw89_pci_gen2_force_ib(rtwdev); 2506 rtw89_pci_l1_ent_lat(rtwdev); 2507 rtw89_pci_wd_exit_l1(rtwdev); 2508 rtw89_pci_set_sic(rtwdev); 2509 rtw89_pci_set_lbc(rtwdev); 2510 rtw89_pci_set_io_rcy(rtwdev); 2511 rtw89_pci_set_dbg(rtwdev); 2512 rtw89_pci_set_keep_reg(rtwdev); 2513 2514 rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA); 2515 2516 /* stop DMA activities */ 2517 rtw89_pci_ctrl_dma_all(rtwdev, false); 2518 2519 ret = rtw89_pci_poll_dma_all_idle(rtwdev); 2520 if (ret) { 2521 rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n"); 2522 return ret; 2523 } 2524 2525 rtw89_pci_clr_idx_all(rtwdev); 2526 rtw89_pci_mode_op(rtwdev); 2527 2528 /* fill TRX BD indexes */ 2529 rtw89_pci_ops_reset(rtwdev); 2530 2531 ret = rtw89_pci_rst_bdram_pcie(rtwdev); 2532 if (ret) { 2533 rtw89_warn(rtwdev, "reset bdram busy\n"); 2534 return ret; 2535 } 2536 2537 /* disable all channels except to FW CMD channel to download firmware */ 2538 rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, false); 2539 rtw89_pci_ctrl_txdma_fw_ch_pcie(rtwdev, true); 2540 2541 /* start DMA activities */ 2542 rtw89_pci_ctrl_dma_all(rtwdev, true); 2543 2544 return 0; 2545 } 2546 2547 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en) 2548 { 2549 u32 val; 2550 2551 if (!en) 2552 return 0; 2553 2554 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2555 if (rtw89_pci_ltr_is_err_reg_val(val)) 2556 return -EINVAL; 2557 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2558 if (rtw89_pci_ltr_is_err_reg_val(val)) 2559 return -EINVAL; 2560 val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY); 2561 if (rtw89_pci_ltr_is_err_reg_val(val)) 2562 return -EINVAL; 2563 val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY); 2564 if (rtw89_pci_ltr_is_err_reg_val(val)) 2565 return -EINVAL; 2566 2567 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN | 2568 B_AX_LTR_WD_NOEMP_CHK); 2569 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK, 2570 PCI_LTR_SPC_500US); 2571 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2572 PCI_LTR_IDLE_TIMER_3_2MS); 2573 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2574 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2575 rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003); 2576 rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b); 2577 2578 return 0; 2579 } 2580 EXPORT_SYMBOL(rtw89_pci_ltr_set); 2581 2582 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en) 2583 { 2584 u32 dec_ctrl; 2585 u32 val32; 2586 2587 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2588 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2589 return -EINVAL; 2590 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2591 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2592 return -EINVAL; 2593 dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL); 2594 if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl)) 2595 return -EINVAL; 2596 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3); 2597 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2598 return -EINVAL; 2599 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0); 2600 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2601 return -EINVAL; 2602 2603 if (!en) { 2604 dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN); 2605 dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) | 2606 B_AX_LTR_REQ_DRV; 2607 } else { 2608 dec_ctrl |= B_AX_LTR_HW_DEC_EN; 2609 } 2610 2611 dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK; 2612 dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US); 2613 2614 if (en) 2615 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, 2616 B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN); 2617 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2618 PCI_LTR_IDLE_TIMER_3_2MS); 2619 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2620 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2621 rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl); 2622 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003); 2623 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b); 2624 2625 return 0; 2626 } 2627 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1); 2628 2629 static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev) 2630 { 2631 const struct rtw89_pci_info *info = rtwdev->pci_info; 2632 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2633 int ret; 2634 2635 ret = info->ltr_set(rtwdev, true); 2636 if (ret) { 2637 rtw89_err(rtwdev, "pci ltr set fail\n"); 2638 return ret; 2639 } 2640 if (chip_id == RTL8852A) { 2641 /* ltr sw trigger */ 2642 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); 2643 } 2644 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2645 /* ADDR info 8-byte mode */ 2646 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2647 B_AX_HOST_ADDR_INFO_8B_SEL); 2648 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2649 } 2650 2651 /* enable DMA for all queues */ 2652 rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, true); 2653 2654 /* Release PCI IO */ 2655 rtw89_write32_clr(rtwdev, info->dma_stop1.addr, 2656 B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO); 2657 2658 return 0; 2659 } 2660 2661 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev, 2662 struct pci_dev *pdev) 2663 { 2664 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2665 int ret; 2666 2667 ret = pci_enable_device(pdev); 2668 if (ret) { 2669 rtw89_err(rtwdev, "failed to enable pci device\n"); 2670 return ret; 2671 } 2672 2673 pci_set_master(pdev); 2674 pci_set_drvdata(pdev, rtwdev->hw); 2675 2676 rtwpci->pdev = pdev; 2677 2678 return 0; 2679 } 2680 2681 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, 2682 struct pci_dev *pdev) 2683 { 2684 pci_clear_master(pdev); 2685 pci_disable_device(pdev); 2686 } 2687 2688 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, 2689 struct pci_dev *pdev) 2690 { 2691 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2692 unsigned long resource_len; 2693 u8 bar_id = 2; 2694 int ret; 2695 2696 ret = pci_request_regions(pdev, KBUILD_MODNAME); 2697 if (ret) { 2698 rtw89_err(rtwdev, "failed to request pci regions\n"); 2699 goto err; 2700 } 2701 2702 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 2703 if (ret) { 2704 rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n"); 2705 goto err_release_regions; 2706 } 2707 2708 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 2709 if (ret) { 2710 rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n"); 2711 goto err_release_regions; 2712 } 2713 2714 resource_len = pci_resource_len(pdev, bar_id); 2715 rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len); 2716 if (!rtwpci->mmap) { 2717 rtw89_err(rtwdev, "failed to map pci io\n"); 2718 ret = -EIO; 2719 goto err_release_regions; 2720 } 2721 2722 return 0; 2723 2724 err_release_regions: 2725 pci_release_regions(pdev); 2726 err: 2727 return ret; 2728 } 2729 2730 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev, 2731 struct pci_dev *pdev) 2732 { 2733 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2734 2735 if (rtwpci->mmap) { 2736 pci_iounmap(pdev, rtwpci->mmap); 2737 pci_release_regions(pdev); 2738 } 2739 } 2740 2741 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev, 2742 struct pci_dev *pdev, 2743 struct rtw89_pci_tx_ring *tx_ring) 2744 { 2745 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 2746 u8 *head = wd_ring->head; 2747 dma_addr_t dma = wd_ring->dma; 2748 u32 page_size = wd_ring->page_size; 2749 u32 page_num = wd_ring->page_num; 2750 u32 ring_sz = page_size * page_num; 2751 2752 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2753 wd_ring->head = NULL; 2754 } 2755 2756 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev, 2757 struct pci_dev *pdev, 2758 struct rtw89_pci_tx_ring *tx_ring) 2759 { 2760 int ring_sz; 2761 u8 *head; 2762 dma_addr_t dma; 2763 2764 head = tx_ring->bd_ring.head; 2765 dma = tx_ring->bd_ring.dma; 2766 ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len; 2767 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2768 2769 tx_ring->bd_ring.head = NULL; 2770 } 2771 2772 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev, 2773 struct pci_dev *pdev) 2774 { 2775 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2776 const struct rtw89_pci_info *info = rtwdev->pci_info; 2777 struct rtw89_pci_tx_ring *tx_ring; 2778 int i; 2779 2780 for (i = 0; i < RTW89_TXCH_NUM; i++) { 2781 if (info->tx_dma_ch_mask & BIT(i)) 2782 continue; 2783 tx_ring = &rtwpci->tx_rings[i]; 2784 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 2785 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 2786 } 2787 } 2788 2789 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev, 2790 struct pci_dev *pdev, 2791 struct rtw89_pci_rx_ring *rx_ring) 2792 { 2793 struct rtw89_pci_rx_info *rx_info; 2794 struct sk_buff *skb; 2795 dma_addr_t dma; 2796 u32 buf_sz; 2797 u8 *head; 2798 int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len; 2799 int i; 2800 2801 buf_sz = rx_ring->buf_sz; 2802 for (i = 0; i < rx_ring->bd_ring.len; i++) { 2803 skb = rx_ring->buf[i]; 2804 if (!skb) 2805 continue; 2806 2807 rx_info = RTW89_PCI_RX_SKB_CB(skb); 2808 dma = rx_info->dma; 2809 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 2810 dev_kfree_skb(skb); 2811 rx_ring->buf[i] = NULL; 2812 } 2813 2814 head = rx_ring->bd_ring.head; 2815 dma = rx_ring->bd_ring.dma; 2816 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2817 2818 rx_ring->bd_ring.head = NULL; 2819 } 2820 2821 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev, 2822 struct pci_dev *pdev) 2823 { 2824 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2825 struct rtw89_pci_rx_ring *rx_ring; 2826 int i; 2827 2828 for (i = 0; i < RTW89_RXCH_NUM; i++) { 2829 rx_ring = &rtwpci->rx_rings[i]; 2830 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 2831 } 2832 } 2833 2834 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev, 2835 struct pci_dev *pdev) 2836 { 2837 rtw89_pci_free_rx_rings(rtwdev, pdev); 2838 rtw89_pci_free_tx_rings(rtwdev, pdev); 2839 } 2840 2841 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev, 2842 struct rtw89_pci_rx_ring *rx_ring, 2843 struct sk_buff *skb, int buf_sz, u32 idx) 2844 { 2845 struct rtw89_pci_rx_info *rx_info; 2846 struct rtw89_pci_rx_bd_32 *rx_bd; 2847 dma_addr_t dma; 2848 2849 if (!skb) 2850 return -EINVAL; 2851 2852 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); 2853 if (dma_mapping_error(&pdev->dev, dma)) 2854 return -EBUSY; 2855 2856 rx_info = RTW89_PCI_RX_SKB_CB(skb); 2857 rx_bd = RTW89_PCI_RX_BD(rx_ring, idx); 2858 2859 memset(rx_bd, 0, sizeof(*rx_bd)); 2860 rx_bd->buf_size = cpu_to_le16(buf_sz); 2861 rx_bd->dma = cpu_to_le32(dma); 2862 rx_info->dma = dma; 2863 2864 return 0; 2865 } 2866 2867 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev, 2868 struct pci_dev *pdev, 2869 struct rtw89_pci_tx_ring *tx_ring, 2870 enum rtw89_tx_channel txch) 2871 { 2872 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 2873 struct rtw89_pci_tx_wd *txwd; 2874 dma_addr_t dma; 2875 dma_addr_t cur_paddr; 2876 u8 *head; 2877 u8 *cur_vaddr; 2878 u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE; 2879 u32 page_num = RTW89_PCI_TXWD_NUM_MAX; 2880 u32 ring_sz = page_size * page_num; 2881 u32 page_offset; 2882 int i; 2883 2884 /* FWCMD queue doesn't use txwd as pages */ 2885 if (txch == RTW89_TXCH_CH12) 2886 return 0; 2887 2888 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2889 if (!head) 2890 return -ENOMEM; 2891 2892 INIT_LIST_HEAD(&wd_ring->free_pages); 2893 wd_ring->head = head; 2894 wd_ring->dma = dma; 2895 wd_ring->page_size = page_size; 2896 wd_ring->page_num = page_num; 2897 2898 page_offset = 0; 2899 for (i = 0; i < page_num; i++) { 2900 txwd = &wd_ring->pages[i]; 2901 cur_paddr = dma + page_offset; 2902 cur_vaddr = head + page_offset; 2903 2904 skb_queue_head_init(&txwd->queue); 2905 INIT_LIST_HEAD(&txwd->list); 2906 txwd->paddr = cur_paddr; 2907 txwd->vaddr = cur_vaddr; 2908 txwd->len = page_size; 2909 txwd->seq = i; 2910 rtw89_pci_enqueue_txwd(tx_ring, txwd); 2911 2912 page_offset += page_size; 2913 } 2914 2915 return 0; 2916 } 2917 2918 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev, 2919 struct pci_dev *pdev, 2920 struct rtw89_pci_tx_ring *tx_ring, 2921 u32 desc_size, u32 len, 2922 enum rtw89_tx_channel txch) 2923 { 2924 const struct rtw89_pci_ch_dma_addr *txch_addr; 2925 int ring_sz = desc_size * len; 2926 u8 *head; 2927 dma_addr_t dma; 2928 int ret; 2929 2930 ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch); 2931 if (ret) { 2932 rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch); 2933 goto err; 2934 } 2935 2936 ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr); 2937 if (ret) { 2938 rtw89_err(rtwdev, "failed to get address of txch %d", txch); 2939 goto err_free_wd_ring; 2940 } 2941 2942 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2943 if (!head) { 2944 ret = -ENOMEM; 2945 goto err_free_wd_ring; 2946 } 2947 2948 INIT_LIST_HEAD(&tx_ring->busy_pages); 2949 tx_ring->bd_ring.head = head; 2950 tx_ring->bd_ring.dma = dma; 2951 tx_ring->bd_ring.len = len; 2952 tx_ring->bd_ring.desc_size = desc_size; 2953 tx_ring->bd_ring.addr = *txch_addr; 2954 tx_ring->bd_ring.wp = 0; 2955 tx_ring->bd_ring.rp = 0; 2956 tx_ring->txch = txch; 2957 2958 return 0; 2959 2960 err_free_wd_ring: 2961 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 2962 err: 2963 return ret; 2964 } 2965 2966 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev, 2967 struct pci_dev *pdev) 2968 { 2969 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2970 const struct rtw89_pci_info *info = rtwdev->pci_info; 2971 struct rtw89_pci_tx_ring *tx_ring; 2972 u32 desc_size; 2973 u32 len; 2974 u32 i, tx_allocated; 2975 int ret; 2976 2977 for (i = 0; i < RTW89_TXCH_NUM; i++) { 2978 if (info->tx_dma_ch_mask & BIT(i)) 2979 continue; 2980 tx_ring = &rtwpci->tx_rings[i]; 2981 desc_size = sizeof(struct rtw89_pci_tx_bd_32); 2982 len = RTW89_PCI_TXBD_NUM_MAX; 2983 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring, 2984 desc_size, len, i); 2985 if (ret) { 2986 rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i); 2987 goto err_free; 2988 } 2989 } 2990 2991 return 0; 2992 2993 err_free: 2994 tx_allocated = i; 2995 for (i = 0; i < tx_allocated; i++) { 2996 tx_ring = &rtwpci->tx_rings[i]; 2997 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 2998 } 2999 3000 return ret; 3001 } 3002 3003 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev, 3004 struct pci_dev *pdev, 3005 struct rtw89_pci_rx_ring *rx_ring, 3006 u32 desc_size, u32 len, u32 rxch) 3007 { 3008 const struct rtw89_pci_ch_dma_addr *rxch_addr; 3009 struct sk_buff *skb; 3010 u8 *head; 3011 dma_addr_t dma; 3012 int ring_sz = desc_size * len; 3013 int buf_sz = RTW89_PCI_RX_BUF_SIZE; 3014 int i, allocated; 3015 int ret; 3016 3017 ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr); 3018 if (ret) { 3019 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch); 3020 return ret; 3021 } 3022 3023 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3024 if (!head) { 3025 ret = -ENOMEM; 3026 goto err; 3027 } 3028 3029 rx_ring->bd_ring.head = head; 3030 rx_ring->bd_ring.dma = dma; 3031 rx_ring->bd_ring.len = len; 3032 rx_ring->bd_ring.desc_size = desc_size; 3033 rx_ring->bd_ring.addr = *rxch_addr; 3034 rx_ring->bd_ring.wp = 0; 3035 rx_ring->bd_ring.rp = 0; 3036 rx_ring->buf_sz = buf_sz; 3037 rx_ring->diliver_skb = NULL; 3038 rx_ring->diliver_desc.ready = false; 3039 3040 for (i = 0; i < len; i++) { 3041 skb = dev_alloc_skb(buf_sz); 3042 if (!skb) { 3043 ret = -ENOMEM; 3044 goto err_free; 3045 } 3046 3047 memset(skb->data, 0, buf_sz); 3048 rx_ring->buf[i] = skb; 3049 ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb, 3050 buf_sz, i); 3051 if (ret) { 3052 rtw89_err(rtwdev, "failed to init rx buf %d\n", i); 3053 dev_kfree_skb_any(skb); 3054 rx_ring->buf[i] = NULL; 3055 goto err_free; 3056 } 3057 } 3058 3059 return 0; 3060 3061 err_free: 3062 allocated = i; 3063 for (i = 0; i < allocated; i++) { 3064 skb = rx_ring->buf[i]; 3065 if (!skb) 3066 continue; 3067 dma = *((dma_addr_t *)skb->cb); 3068 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 3069 dev_kfree_skb(skb); 3070 rx_ring->buf[i] = NULL; 3071 } 3072 3073 head = rx_ring->bd_ring.head; 3074 dma = rx_ring->bd_ring.dma; 3075 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3076 3077 rx_ring->bd_ring.head = NULL; 3078 err: 3079 return ret; 3080 } 3081 3082 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev, 3083 struct pci_dev *pdev) 3084 { 3085 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3086 struct rtw89_pci_rx_ring *rx_ring; 3087 u32 desc_size; 3088 u32 len; 3089 int i, rx_allocated; 3090 int ret; 3091 3092 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3093 rx_ring = &rtwpci->rx_rings[i]; 3094 desc_size = sizeof(struct rtw89_pci_rx_bd_32); 3095 len = RTW89_PCI_RXBD_NUM_MAX; 3096 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring, 3097 desc_size, len, i); 3098 if (ret) { 3099 rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i); 3100 goto err_free; 3101 } 3102 } 3103 3104 return 0; 3105 3106 err_free: 3107 rx_allocated = i; 3108 for (i = 0; i < rx_allocated; i++) { 3109 rx_ring = &rtwpci->rx_rings[i]; 3110 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3111 } 3112 3113 return ret; 3114 } 3115 3116 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev, 3117 struct pci_dev *pdev) 3118 { 3119 int ret; 3120 3121 ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev); 3122 if (ret) { 3123 rtw89_err(rtwdev, "failed to alloc dma tx rings\n"); 3124 goto err; 3125 } 3126 3127 ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev); 3128 if (ret) { 3129 rtw89_err(rtwdev, "failed to alloc dma rx rings\n"); 3130 goto err_free_tx_rings; 3131 } 3132 3133 return 0; 3134 3135 err_free_tx_rings: 3136 rtw89_pci_free_tx_rings(rtwdev, pdev); 3137 err: 3138 return ret; 3139 } 3140 3141 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev, 3142 struct rtw89_pci *rtwpci) 3143 { 3144 skb_queue_head_init(&rtwpci->h2c_queue); 3145 skb_queue_head_init(&rtwpci->h2c_release_queue); 3146 } 3147 3148 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev, 3149 struct pci_dev *pdev) 3150 { 3151 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3152 int ret; 3153 3154 ret = rtw89_pci_setup_mapping(rtwdev, pdev); 3155 if (ret) { 3156 rtw89_err(rtwdev, "failed to setup pci mapping\n"); 3157 goto err; 3158 } 3159 3160 ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev); 3161 if (ret) { 3162 rtw89_err(rtwdev, "failed to alloc pci trx rings\n"); 3163 goto err_pci_unmap; 3164 } 3165 3166 rtw89_pci_h2c_init(rtwdev, rtwpci); 3167 3168 spin_lock_init(&rtwpci->irq_lock); 3169 spin_lock_init(&rtwpci->trx_lock); 3170 3171 return 0; 3172 3173 err_pci_unmap: 3174 rtw89_pci_clear_mapping(rtwdev, pdev); 3175 err: 3176 return ret; 3177 } 3178 3179 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev, 3180 struct pci_dev *pdev) 3181 { 3182 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3183 3184 rtw89_pci_free_trx_rings(rtwdev, pdev); 3185 rtw89_pci_clear_mapping(rtwdev, pdev); 3186 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 3187 skb_queue_len(&rtwpci->h2c_queue), true); 3188 } 3189 3190 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev) 3191 { 3192 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3193 3194 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0; 3195 3196 if (rtwpci->under_recovery) { 3197 rtwpci->intrs[0] = B_AX_HS0ISR_IND_INT_EN; 3198 rtwpci->intrs[1] = 0; 3199 } else { 3200 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3201 B_AX_RXDMA_INT_EN | 3202 B_AX_RXP1DMA_INT_EN | 3203 B_AX_RPQDMA_INT_EN | 3204 B_AX_RXDMA_STUCK_INT_EN | 3205 B_AX_RDU_INT_EN | 3206 B_AX_RPQBD_FULL_INT_EN | 3207 B_AX_HS0ISR_IND_INT_EN; 3208 3209 rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN; 3210 } 3211 } 3212 EXPORT_SYMBOL(rtw89_pci_config_intr_mask); 3213 3214 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev) 3215 { 3216 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3217 3218 rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN; 3219 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3220 rtwpci->intrs[0] = 0; 3221 rtwpci->intrs[1] = 0; 3222 } 3223 3224 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev) 3225 { 3226 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3227 3228 rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN | 3229 B_AX_HS1ISR_IND_INT_EN | 3230 B_AX_HS0ISR_IND_INT_EN; 3231 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3232 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3233 B_AX_RXDMA_INT_EN | 3234 B_AX_RXP1DMA_INT_EN | 3235 B_AX_RPQDMA_INT_EN | 3236 B_AX_RXDMA_STUCK_INT_EN | 3237 B_AX_RDU_INT_EN | 3238 B_AX_RPQBD_FULL_INT_EN; 3239 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3240 } 3241 3242 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev) 3243 { 3244 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3245 3246 rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN | 3247 B_AX_HS0ISR_IND_INT_EN; 3248 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3249 rtwpci->intrs[0] = 0; 3250 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3251 } 3252 3253 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev) 3254 { 3255 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3256 3257 if (rtwpci->under_recovery) 3258 rtw89_pci_recovery_intr_mask_v1(rtwdev); 3259 else if (rtwpci->low_power) 3260 rtw89_pci_low_power_intr_mask_v1(rtwdev); 3261 else 3262 rtw89_pci_default_intr_mask_v1(rtwdev); 3263 } 3264 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1); 3265 3266 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, 3267 struct pci_dev *pdev) 3268 { 3269 unsigned long flags = 0; 3270 int ret; 3271 3272 flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI; 3273 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); 3274 if (ret < 0) { 3275 rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret); 3276 goto err; 3277 } 3278 3279 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, 3280 rtw89_pci_interrupt_handler, 3281 rtw89_pci_interrupt_threadfn, 3282 IRQF_SHARED, KBUILD_MODNAME, rtwdev); 3283 if (ret) { 3284 rtw89_err(rtwdev, "failed to request threaded irq\n"); 3285 goto err_free_vector; 3286 } 3287 3288 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET); 3289 3290 return 0; 3291 3292 err_free_vector: 3293 pci_free_irq_vectors(pdev); 3294 err: 3295 return ret; 3296 } 3297 3298 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev, 3299 struct pci_dev *pdev) 3300 { 3301 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); 3302 pci_free_irq_vectors(pdev); 3303 } 3304 3305 static u16 gray_code_to_bin(u16 gray_code, u32 bit_num) 3306 { 3307 u16 bin = 0, gray_bit; 3308 u32 bit_idx; 3309 3310 for (bit_idx = 0; bit_idx < bit_num; bit_idx++) { 3311 gray_bit = (gray_code >> bit_idx) & 0x1; 3312 if (bit_num - bit_idx > 1) 3313 gray_bit ^= (gray_code >> (bit_idx + 1)) & 0x1; 3314 bin |= (gray_bit << bit_idx); 3315 } 3316 3317 return bin; 3318 } 3319 3320 static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev) 3321 { 3322 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3323 struct pci_dev *pdev = rtwpci->pdev; 3324 u16 val16, filter_out_val; 3325 u32 val, phy_offset; 3326 int ret; 3327 3328 if (rtwdev->chip->chip_id != RTL8852C) 3329 return 0; 3330 3331 val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); 3332 if (val == B_AX_ASPM_CTRL_L1) 3333 return 0; 3334 3335 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val); 3336 if (ret) 3337 return ret; 3338 3339 val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val); 3340 if (val == RTW89_PCIE_GEN1_SPEED) { 3341 phy_offset = R_RAC_DIRECT_OFFSET_G1; 3342 } else if (val == RTW89_PCIE_GEN2_SPEED) { 3343 phy_offset = R_RAC_DIRECT_OFFSET_G2; 3344 val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT); 3345 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, 3346 val16 | B_PCIE_BIT_PINOUT_DIS); 3347 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, 3348 val16 & ~B_PCIE_BIT_RD_SEL); 3349 3350 val16 = rtw89_read16_mask(rtwdev, 3351 phy_offset + RAC_ANA1F * RAC_MULT, 3352 FILTER_OUT_EQ_MASK); 3353 val16 = gray_code_to_bin(val16, hweight16(val16)); 3354 filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 * 3355 RAC_MULT); 3356 filter_out_val &= ~REG_FILTER_OUT_MASK; 3357 filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16); 3358 3359 rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT, 3360 filter_out_val); 3361 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT, 3362 B_BAC_EQ_SEL); 3363 rtw89_write16_set(rtwdev, 3364 R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT, 3365 B_PCIE_BIT_PSAVE); 3366 } else { 3367 return -EOPNOTSUPP; 3368 } 3369 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT, 3370 B_PCIE_BIT_PSAVE); 3371 3372 return 0; 3373 } 3374 3375 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable) 3376 { 3377 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3378 int ret; 3379 3380 if (rtw89_pci_disable_clkreq) 3381 return; 3382 3383 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 3384 PCIE_CLKDLY_HW_30US); 3385 if (ret) 3386 rtw89_err(rtwdev, "failed to set CLKREQ Delay\n"); 3387 3388 if (chip_id == RTL8852A) { 3389 if (enable) 3390 ret = rtw89_pci_config_byte_set(rtwdev, 3391 RTW89_PCIE_L1_CTRL, 3392 RTW89_PCIE_BIT_CLK); 3393 else 3394 ret = rtw89_pci_config_byte_clr(rtwdev, 3395 RTW89_PCIE_L1_CTRL, 3396 RTW89_PCIE_BIT_CLK); 3397 if (ret) 3398 rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d", 3399 enable ? "set" : "unset", ret); 3400 } else if (chip_id == RTL8852C) { 3401 rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL, 3402 B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL); 3403 if (enable) 3404 rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL, 3405 B_AX_CLK_REQ_N); 3406 else 3407 rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL, 3408 B_AX_CLK_REQ_N); 3409 } 3410 } 3411 3412 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable) 3413 { 3414 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3415 u8 value = 0; 3416 int ret; 3417 3418 if (rtw89_pci_disable_aspm_l1) 3419 return; 3420 3421 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value); 3422 if (ret) 3423 rtw89_err(rtwdev, "failed to read ASPM Delay\n"); 3424 3425 value &= ~(RTW89_L1DLY_MASK | RTW89_L0DLY_MASK); 3426 value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) | 3427 FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US); 3428 3429 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value); 3430 if (ret) 3431 rtw89_err(rtwdev, "failed to read ASPM Delay\n"); 3432 3433 if (chip_id == RTL8852A || chip_id == RTL8852B) { 3434 if (enable) 3435 ret = rtw89_pci_config_byte_set(rtwdev, 3436 RTW89_PCIE_L1_CTRL, 3437 RTW89_PCIE_BIT_L1); 3438 else 3439 ret = rtw89_pci_config_byte_clr(rtwdev, 3440 RTW89_PCIE_L1_CTRL, 3441 RTW89_PCIE_BIT_L1); 3442 } else if (chip_id == RTL8852C) { 3443 if (enable) 3444 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3445 B_AX_ASPM_CTRL_L1); 3446 else 3447 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3448 B_AX_ASPM_CTRL_L1); 3449 } 3450 if (ret) 3451 rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d", 3452 enable ? "set" : "unset", ret); 3453 } 3454 3455 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev) 3456 { 3457 struct rtw89_traffic_stats *stats = &rtwdev->stats; 3458 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; 3459 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; 3460 u32 val = 0; 3461 3462 if (!rtwdev->scanning && 3463 (tx_tfc_lv >= RTW89_TFC_HIGH || rx_tfc_lv >= RTW89_TFC_HIGH)) 3464 val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL | 3465 FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) | 3466 FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) | 3467 FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64); 3468 3469 rtw89_write32(rtwdev, R_AX_INT_MIT_RX, val); 3470 } 3471 3472 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev) 3473 { 3474 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3475 struct pci_dev *pdev = rtwpci->pdev; 3476 u16 link_ctrl; 3477 int ret; 3478 3479 /* Though there is standard PCIE configuration space to set the 3480 * link control register, but by Realtek's design, driver should 3481 * check if host supports CLKREQ/ASPM to enable the HW module. 3482 * 3483 * These functions are implemented by two HW modules associated, 3484 * one is responsible to access PCIE configuration space to 3485 * follow the host settings, and another is in charge of doing 3486 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes 3487 * the host does not support it, and due to some reasons or wrong 3488 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device 3489 * loss if HW misbehaves on the link. 3490 * 3491 * Hence it's designed that driver should first check the PCIE 3492 * configuration space is sync'ed and enabled, then driver can turn 3493 * on the other module that is actually working on the mechanism. 3494 */ 3495 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); 3496 if (ret) { 3497 rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); 3498 return; 3499 } 3500 3501 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) 3502 rtw89_pci_clkreq_set(rtwdev, true); 3503 3504 if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) 3505 rtw89_pci_aspm_set(rtwdev, true); 3506 } 3507 3508 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable) 3509 { 3510 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3511 int ret; 3512 3513 if (chip_id == RTL8852A || chip_id == RTL8852B) { 3514 if (enable) 3515 ret = rtw89_pci_config_byte_set(rtwdev, 3516 RTW89_PCIE_TIMER_CTRL, 3517 RTW89_PCIE_BIT_L1SUB); 3518 else 3519 ret = rtw89_pci_config_byte_clr(rtwdev, 3520 RTW89_PCIE_TIMER_CTRL, 3521 RTW89_PCIE_BIT_L1SUB); 3522 if (ret) 3523 rtw89_err(rtwdev, "failed to %s L1SS, ret=%d", 3524 enable ? "set" : "unset", ret); 3525 } else if (chip_id == RTL8852C) { 3526 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1, 3527 RTW89_PCIE_BIT_ASPM_L11 | 3528 RTW89_PCIE_BIT_PCI_L11); 3529 if (ret) 3530 rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret); 3531 if (enable) 3532 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3533 B_AX_L1SUB_DISABLE); 3534 else 3535 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3536 B_AX_L1SUB_DISABLE); 3537 } 3538 } 3539 3540 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev) 3541 { 3542 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3543 struct pci_dev *pdev = rtwpci->pdev; 3544 u32 l1ss_cap_ptr, l1ss_ctrl; 3545 3546 if (rtw89_pci_disable_l1ss) 3547 return; 3548 3549 l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); 3550 if (!l1ss_cap_ptr) 3551 return; 3552 3553 pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl); 3554 3555 if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK) 3556 rtw89_pci_l1ss_set(rtwdev, true); 3557 } 3558 3559 static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev) 3560 { 3561 int ret = 0; 3562 u32 sts; 3563 u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; 3564 3565 ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0, 3566 10, 1000, false, rtwdev, 3567 R_AX_PCIE_DMA_BUSY1); 3568 if (ret) { 3569 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n", 3570 rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1)); 3571 return -EINVAL; 3572 } 3573 return ret; 3574 } 3575 3576 static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev) 3577 { 3578 u32 val; 3579 int ret; 3580 3581 if (rtwdev->chip->chip_id == RTL8852C) 3582 return 0; 3583 3584 rtw89_pci_ctrl_dma_all(rtwdev, false); 3585 ret = rtw89_pci_poll_io_idle(rtwdev); 3586 if (ret) { 3587 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 3588 rtw89_debug(rtwdev, RTW89_DBG_HCI, 3589 "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n", 3590 R_AX_DBG_ERR_FLAG, val); 3591 if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0) 3592 rtw89_mac_ctrl_hci_dma_tx(rtwdev, false); 3593 if (val & B_AX_RX_STUCK) 3594 rtw89_mac_ctrl_hci_dma_rx(rtwdev, false); 3595 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 3596 ret = rtw89_pci_poll_io_idle(rtwdev); 3597 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 3598 rtw89_debug(rtwdev, RTW89_DBG_HCI, 3599 "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n", 3600 R_AX_DBG_ERR_FLAG, val); 3601 } 3602 3603 return ret; 3604 } 3605 3606 3607 3608 static int rtw89_pci_rst_bdram(struct rtw89_dev *rtwdev) 3609 { 3610 int ret = 0; 3611 u32 val32, sts; 3612 3613 val32 = B_AX_RST_BDRAM; 3614 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 3615 3616 ret = read_poll_timeout_atomic(rtw89_read32, sts, 3617 (sts & B_AX_RST_BDRAM) == 0x0, 1, 100, 3618 true, rtwdev, R_AX_PCIE_INIT_CFG1); 3619 return ret; 3620 } 3621 3622 static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev) 3623 { 3624 u32 ret; 3625 3626 if (rtwdev->chip->chip_id == RTL8852C) 3627 return 0; 3628 3629 rtw89_mac_ctrl_hci_dma_trx(rtwdev, false); 3630 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 3631 rtw89_pci_clr_idx_all(rtwdev); 3632 3633 ret = rtw89_pci_rst_bdram(rtwdev); 3634 if (ret) 3635 return ret; 3636 3637 rtw89_pci_ctrl_dma_all(rtwdev, true); 3638 return ret; 3639 } 3640 3641 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev, 3642 enum rtw89_lv1_rcvy_step step) 3643 { 3644 int ret; 3645 3646 switch (step) { 3647 case RTW89_LV1_RCVY_STEP_1: 3648 ret = rtw89_pci_lv1rst_stop_dma(rtwdev); 3649 if (ret) 3650 rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n"); 3651 3652 break; 3653 3654 case RTW89_LV1_RCVY_STEP_2: 3655 ret = rtw89_pci_lv1rst_start_dma(rtwdev); 3656 if (ret) 3657 rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n"); 3658 break; 3659 3660 default: 3661 return -EINVAL; 3662 } 3663 3664 return ret; 3665 } 3666 3667 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev) 3668 { 3669 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n", 3670 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); 3671 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 3672 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG)); 3673 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 3674 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG)); 3675 } 3676 3677 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget) 3678 { 3679 struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi); 3680 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3681 unsigned long flags; 3682 int work_done; 3683 3684 rtwdev->napi_budget_countdown = budget; 3685 3686 rtw89_pci_clear_isr0(rtwdev, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT); 3687 work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 3688 if (work_done == budget) 3689 return budget; 3690 3691 rtw89_pci_clear_isr0(rtwdev, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | B_AX_RDU_INT); 3692 work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 3693 if (work_done < budget && napi_complete_done(napi, work_done)) { 3694 spin_lock_irqsave(&rtwpci->irq_lock, flags); 3695 if (likely(rtwpci->running)) 3696 rtw89_chip_enable_intr(rtwdev, rtwpci); 3697 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 3698 } 3699 3700 return work_done; 3701 } 3702 3703 static int __maybe_unused rtw89_pci_suspend(struct device *dev) 3704 { 3705 struct ieee80211_hw *hw = dev_get_drvdata(dev); 3706 struct rtw89_dev *rtwdev = hw->priv; 3707 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3708 3709 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3710 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 3711 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3712 if (chip_id == RTL8852A || chip_id == RTL8852B) { 3713 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 3714 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 3715 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 3716 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 3717 } else { 3718 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3719 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 3720 } 3721 3722 return 0; 3723 } 3724 3725 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev) 3726 { 3727 if (rtwdev->chip->chip_id == RTL8852C) 3728 return; 3729 3730 /* Hardware need write the reg twice to ensure the setting work */ 3731 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 3732 RTW89_PCIE_BIT_CFG_RST_MSTATE); 3733 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 3734 RTW89_PCIE_BIT_CFG_RST_MSTATE); 3735 } 3736 3737 static int __maybe_unused rtw89_pci_resume(struct device *dev) 3738 { 3739 struct ieee80211_hw *hw = dev_get_drvdata(dev); 3740 struct rtw89_dev *rtwdev = hw->priv; 3741 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3742 3743 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3744 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 3745 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3746 if (chip_id == RTL8852A || chip_id == RTL8852B) { 3747 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 3748 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 3749 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 3750 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 3751 } else { 3752 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3753 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 3754 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3755 B_AX_SEL_REQ_ENTR_L1); 3756 } 3757 rtw89_pci_l2_hci_ldo(rtwdev); 3758 rtw89_pci_filter_out(rtwdev); 3759 rtw89_pci_link_cfg(rtwdev); 3760 rtw89_pci_l1ss_cfg(rtwdev); 3761 3762 return 0; 3763 } 3764 3765 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume); 3766 EXPORT_SYMBOL(rtw89_pm_ops); 3767 3768 static const struct rtw89_hci_ops rtw89_pci_ops = { 3769 .tx_write = rtw89_pci_ops_tx_write, 3770 .tx_kick_off = rtw89_pci_ops_tx_kick_off, 3771 .flush_queues = rtw89_pci_ops_flush_queues, 3772 .reset = rtw89_pci_ops_reset, 3773 .start = rtw89_pci_ops_start, 3774 .stop = rtw89_pci_ops_stop, 3775 .pause = rtw89_pci_ops_pause, 3776 .switch_mode = rtw89_pci_ops_switch_mode, 3777 .recalc_int_mit = rtw89_pci_recalc_int_mit, 3778 3779 .read8 = rtw89_pci_ops_read8, 3780 .read16 = rtw89_pci_ops_read16, 3781 .read32 = rtw89_pci_ops_read32, 3782 .write8 = rtw89_pci_ops_write8, 3783 .write16 = rtw89_pci_ops_write16, 3784 .write32 = rtw89_pci_ops_write32, 3785 3786 .mac_pre_init = rtw89_pci_ops_mac_pre_init, 3787 .mac_post_init = rtw89_pci_ops_mac_post_init, 3788 .deinit = rtw89_pci_ops_deinit, 3789 3790 .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource, 3791 .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery, 3792 .dump_err_status = rtw89_pci_ops_dump_err_status, 3793 .napi_poll = rtw89_pci_napi_poll, 3794 3795 .recovery_start = rtw89_pci_ops_recovery_start, 3796 .recovery_complete = rtw89_pci_ops_recovery_complete, 3797 3798 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_pcie, 3799 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_pcie, 3800 .ctrl_trxhci = rtw89_pci_ctrl_dma_trx, 3801 .poll_txdma_ch = rtw89_poll_txdma_ch_idle_pcie, 3802 .clr_idx_all = rtw89_pci_clr_idx_all, 3803 .clear = rtw89_pci_clear_resource, 3804 .disable_intr = rtw89_pci_disable_intr_lock, 3805 .enable_intr = rtw89_pci_enable_intr_lock, 3806 .rst_bdram = rtw89_pci_rst_bdram_pcie, 3807 }; 3808 3809 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 3810 { 3811 struct rtw89_dev *rtwdev; 3812 const struct rtw89_driver_info *info; 3813 const struct rtw89_pci_info *pci_info; 3814 int ret; 3815 3816 info = (const struct rtw89_driver_info *)id->driver_data; 3817 3818 rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev, 3819 sizeof(struct rtw89_pci), 3820 info->chip); 3821 if (!rtwdev) { 3822 dev_err(&pdev->dev, "failed to allocate hw\n"); 3823 return -ENOMEM; 3824 } 3825 3826 pci_info = info->bus.pci; 3827 3828 rtwdev->pci_info = info->bus.pci; 3829 rtwdev->hci.ops = &rtw89_pci_ops; 3830 rtwdev->hci.type = RTW89_HCI_TYPE_PCIE; 3831 rtwdev->hci.rpwm_addr = pci_info->rpwm_addr; 3832 rtwdev->hci.cpwm_addr = pci_info->cpwm_addr; 3833 3834 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); 3835 3836 ret = rtw89_core_init(rtwdev); 3837 if (ret) { 3838 rtw89_err(rtwdev, "failed to initialise core\n"); 3839 goto err_release_hw; 3840 } 3841 3842 ret = rtw89_pci_claim_device(rtwdev, pdev); 3843 if (ret) { 3844 rtw89_err(rtwdev, "failed to claim pci device\n"); 3845 goto err_core_deinit; 3846 } 3847 3848 ret = rtw89_pci_setup_resource(rtwdev, pdev); 3849 if (ret) { 3850 rtw89_err(rtwdev, "failed to setup pci resource\n"); 3851 goto err_declaim_pci; 3852 } 3853 3854 ret = rtw89_chip_info_setup(rtwdev); 3855 if (ret) { 3856 rtw89_err(rtwdev, "failed to setup chip information\n"); 3857 goto err_clear_resource; 3858 } 3859 3860 rtw89_pci_filter_out(rtwdev); 3861 rtw89_pci_link_cfg(rtwdev); 3862 rtw89_pci_l1ss_cfg(rtwdev); 3863 3864 ret = rtw89_core_register(rtwdev); 3865 if (ret) { 3866 rtw89_err(rtwdev, "failed to register core\n"); 3867 goto err_clear_resource; 3868 } 3869 3870 rtw89_core_napi_init(rtwdev); 3871 3872 ret = rtw89_pci_request_irq(rtwdev, pdev); 3873 if (ret) { 3874 rtw89_err(rtwdev, "failed to request pci irq\n"); 3875 goto err_unregister; 3876 } 3877 3878 return 0; 3879 3880 err_unregister: 3881 rtw89_core_napi_deinit(rtwdev); 3882 rtw89_core_unregister(rtwdev); 3883 err_clear_resource: 3884 rtw89_pci_clear_resource(rtwdev, pdev); 3885 err_declaim_pci: 3886 rtw89_pci_declaim_device(rtwdev, pdev); 3887 err_core_deinit: 3888 rtw89_core_deinit(rtwdev); 3889 err_release_hw: 3890 rtw89_free_ieee80211_hw(rtwdev); 3891 3892 return ret; 3893 } 3894 EXPORT_SYMBOL(rtw89_pci_probe); 3895 3896 void rtw89_pci_remove(struct pci_dev *pdev) 3897 { 3898 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 3899 struct rtw89_dev *rtwdev; 3900 3901 rtwdev = hw->priv; 3902 3903 rtw89_pci_free_irq(rtwdev, pdev); 3904 rtw89_core_napi_deinit(rtwdev); 3905 rtw89_core_unregister(rtwdev); 3906 rtw89_pci_clear_resource(rtwdev, pdev); 3907 rtw89_pci_declaim_device(rtwdev, pdev); 3908 rtw89_core_deinit(rtwdev); 3909 rtw89_free_ieee80211_hw(rtwdev); 3910 } 3911 EXPORT_SYMBOL(rtw89_pci_remove); 3912 3913 MODULE_AUTHOR("Realtek Corporation"); 3914 MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver"); 3915 MODULE_LICENSE("Dual BSD/GPL"); 3916