1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2018-2019 Realtek Corporation 3 */ 4 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include "main.h" 8 #include "pci.h" 9 #include "tx.h" 10 #include "rx.h" 11 #include "debug.h" 12 13 static u32 rtw_pci_tx_queue_idx_addr[] = { 14 [RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ, 15 [RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ, 16 [RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ, 17 [RTW_TX_QUEUE_VO] = RTK_PCI_TXBD_IDX_VOQ, 18 [RTW_TX_QUEUE_MGMT] = RTK_PCI_TXBD_IDX_MGMTQ, 19 [RTW_TX_QUEUE_HI0] = RTK_PCI_TXBD_IDX_HI0Q, 20 [RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ, 21 }; 22 23 static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue) 24 { 25 switch (queue) { 26 case RTW_TX_QUEUE_BCN: 27 return TX_DESC_QSEL_BEACON; 28 case RTW_TX_QUEUE_H2C: 29 return TX_DESC_QSEL_H2C; 30 case RTW_TX_QUEUE_MGMT: 31 return TX_DESC_QSEL_MGMT; 32 case RTW_TX_QUEUE_HI0: 33 return TX_DESC_QSEL_HIGH; 34 default: 35 return skb->priority; 36 } 37 }; 38 39 static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr) 40 { 41 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 42 43 return readb(rtwpci->mmap + addr); 44 } 45 46 static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr) 47 { 48 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 49 50 return readw(rtwpci->mmap + addr); 51 } 52 53 static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr) 54 { 55 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 56 57 return readl(rtwpci->mmap + addr); 58 } 59 60 static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val) 61 { 62 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 63 64 writeb(val, rtwpci->mmap + addr); 65 } 66 67 static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val) 68 { 69 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 70 71 writew(val, rtwpci->mmap + addr); 72 } 73 74 static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val) 75 { 76 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 77 78 writel(val, rtwpci->mmap + addr); 79 } 80 81 static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx) 82 { 83 int offset = tx_ring->r.desc_size * idx; 84 85 return tx_ring->r.head + offset; 86 } 87 88 static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev, 89 struct rtw_pci_tx_ring *tx_ring) 90 { 91 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 92 struct rtw_pci_tx_data *tx_data; 93 struct sk_buff *skb, *tmp; 94 dma_addr_t dma; 95 u8 *head = tx_ring->r.head; 96 u32 len = tx_ring->r.len; 97 int ring_sz = len * tx_ring->r.desc_size; 98 99 /* free every skb remained in tx list */ 100 skb_queue_walk_safe(&tx_ring->queue, skb, tmp) { 101 __skb_unlink(skb, &tx_ring->queue); 102 tx_data = rtw_pci_get_tx_data(skb); 103 dma = tx_data->dma; 104 105 pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE); 106 dev_kfree_skb_any(skb); 107 } 108 109 /* free the ring itself */ 110 pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma); 111 tx_ring->r.head = NULL; 112 } 113 114 static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev, 115 struct rtw_pci_rx_ring *rx_ring) 116 { 117 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 118 struct sk_buff *skb; 119 dma_addr_t dma; 120 u8 *head = rx_ring->r.head; 121 int buf_sz = RTK_PCI_RX_BUF_SIZE; 122 int ring_sz = rx_ring->r.desc_size * rx_ring->r.len; 123 int i; 124 125 for (i = 0; i < rx_ring->r.len; i++) { 126 skb = rx_ring->buf[i]; 127 if (!skb) 128 continue; 129 130 dma = *((dma_addr_t *)skb->cb); 131 pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE); 132 dev_kfree_skb(skb); 133 rx_ring->buf[i] = NULL; 134 } 135 136 pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma); 137 } 138 139 static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev) 140 { 141 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 142 struct rtw_pci_tx_ring *tx_ring; 143 struct rtw_pci_rx_ring *rx_ring; 144 int i; 145 146 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) { 147 tx_ring = &rtwpci->tx_rings[i]; 148 rtw_pci_free_tx_ring(rtwdev, tx_ring); 149 } 150 151 for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) { 152 rx_ring = &rtwpci->rx_rings[i]; 153 rtw_pci_free_rx_ring(rtwdev, rx_ring); 154 } 155 } 156 157 static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev, 158 struct rtw_pci_tx_ring *tx_ring, 159 u8 desc_size, u32 len) 160 { 161 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 162 int ring_sz = desc_size * len; 163 dma_addr_t dma; 164 u8 *head; 165 166 head = pci_zalloc_consistent(pdev, ring_sz, &dma); 167 if (!head) { 168 rtw_err(rtwdev, "failed to allocate tx ring\n"); 169 return -ENOMEM; 170 } 171 172 skb_queue_head_init(&tx_ring->queue); 173 tx_ring->r.head = head; 174 tx_ring->r.dma = dma; 175 tx_ring->r.len = len; 176 tx_ring->r.desc_size = desc_size; 177 tx_ring->r.wp = 0; 178 tx_ring->r.rp = 0; 179 180 return 0; 181 } 182 183 static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb, 184 struct rtw_pci_rx_ring *rx_ring, 185 u32 idx, u32 desc_sz) 186 { 187 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 188 struct rtw_pci_rx_buffer_desc *buf_desc; 189 int buf_sz = RTK_PCI_RX_BUF_SIZE; 190 dma_addr_t dma; 191 192 if (!skb) 193 return -EINVAL; 194 195 dma = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE); 196 if (pci_dma_mapping_error(pdev, dma)) 197 return -EBUSY; 198 199 *((dma_addr_t *)skb->cb) = dma; 200 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + 201 idx * desc_sz); 202 memset(buf_desc, 0, sizeof(*buf_desc)); 203 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); 204 buf_desc->dma = cpu_to_le32(dma); 205 206 return 0; 207 } 208 209 static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev, 210 struct rtw_pci_rx_ring *rx_ring, 211 u8 desc_size, u32 len) 212 { 213 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); 214 struct sk_buff *skb = NULL; 215 dma_addr_t dma; 216 u8 *head; 217 int ring_sz = desc_size * len; 218 int buf_sz = RTK_PCI_RX_BUF_SIZE; 219 int i, allocated; 220 int ret = 0; 221 222 head = pci_zalloc_consistent(pdev, ring_sz, &dma); 223 if (!head) { 224 rtw_err(rtwdev, "failed to allocate rx ring\n"); 225 return -ENOMEM; 226 } 227 rx_ring->r.head = head; 228 229 for (i = 0; i < len; i++) { 230 skb = dev_alloc_skb(buf_sz); 231 if (!skb) { 232 allocated = i; 233 ret = -ENOMEM; 234 goto err_out; 235 } 236 237 memset(skb->data, 0, buf_sz); 238 rx_ring->buf[i] = skb; 239 ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size); 240 if (ret) { 241 allocated = i; 242 dev_kfree_skb_any(skb); 243 goto err_out; 244 } 245 } 246 247 rx_ring->r.dma = dma; 248 rx_ring->r.len = len; 249 rx_ring->r.desc_size = desc_size; 250 rx_ring->r.wp = 0; 251 rx_ring->r.rp = 0; 252 253 return 0; 254 255 err_out: 256 for (i = 0; i < allocated; i++) { 257 skb = rx_ring->buf[i]; 258 if (!skb) 259 continue; 260 dma = *((dma_addr_t *)skb->cb); 261 pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE); 262 dev_kfree_skb_any(skb); 263 rx_ring->buf[i] = NULL; 264 } 265 pci_free_consistent(pdev, ring_sz, head, dma); 266 267 rtw_err(rtwdev, "failed to init rx buffer\n"); 268 269 return ret; 270 } 271 272 static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev) 273 { 274 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 275 struct rtw_pci_tx_ring *tx_ring; 276 struct rtw_pci_rx_ring *rx_ring; 277 struct rtw_chip_info *chip = rtwdev->chip; 278 int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0; 279 int tx_desc_size, rx_desc_size; 280 u32 len; 281 int ret; 282 283 tx_desc_size = chip->tx_buf_desc_sz; 284 285 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) { 286 tx_ring = &rtwpci->tx_rings[i]; 287 len = max_num_of_tx_queue(i); 288 ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len); 289 if (ret) 290 goto out; 291 } 292 293 rx_desc_size = chip->rx_buf_desc_sz; 294 295 for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) { 296 rx_ring = &rtwpci->rx_rings[j]; 297 ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size, 298 RTK_MAX_RX_DESC_NUM); 299 if (ret) 300 goto out; 301 } 302 303 return 0; 304 305 out: 306 tx_alloced = i; 307 for (i = 0; i < tx_alloced; i++) { 308 tx_ring = &rtwpci->tx_rings[i]; 309 rtw_pci_free_tx_ring(rtwdev, tx_ring); 310 } 311 312 rx_alloced = j; 313 for (j = 0; j < rx_alloced; j++) { 314 rx_ring = &rtwpci->rx_rings[j]; 315 rtw_pci_free_rx_ring(rtwdev, rx_ring); 316 } 317 318 return ret; 319 } 320 321 static void rtw_pci_deinit(struct rtw_dev *rtwdev) 322 { 323 rtw_pci_free_trx_ring(rtwdev); 324 } 325 326 static int rtw_pci_init(struct rtw_dev *rtwdev) 327 { 328 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 329 int ret = 0; 330 331 rtwpci->irq_mask[0] = IMR_HIGHDOK | 332 IMR_MGNTDOK | 333 IMR_BKDOK | 334 IMR_BEDOK | 335 IMR_VIDOK | 336 IMR_VODOK | 337 IMR_ROK | 338 IMR_BCNDMAINT_E | 339 0; 340 rtwpci->irq_mask[1] = IMR_TXFOVW | 341 0; 342 rtwpci->irq_mask[3] = IMR_H2CDOK | 343 0; 344 spin_lock_init(&rtwpci->irq_lock); 345 ret = rtw_pci_init_trx_ring(rtwdev); 346 347 return ret; 348 } 349 350 static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev) 351 { 352 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 353 u32 len; 354 u8 tmp; 355 dma_addr_t dma; 356 357 tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3); 358 rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7); 359 360 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma; 361 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma); 362 363 len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len; 364 dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma; 365 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0; 366 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0; 367 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len); 368 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma); 369 370 len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len; 371 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma; 372 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0; 373 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0; 374 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len); 375 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma); 376 377 len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len; 378 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma; 379 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0; 380 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0; 381 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len); 382 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma); 383 384 len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len; 385 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma; 386 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0; 387 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0; 388 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len); 389 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma); 390 391 len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len; 392 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma; 393 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0; 394 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0; 395 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len); 396 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma); 397 398 len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len; 399 dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma; 400 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0; 401 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0; 402 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len); 403 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma); 404 405 len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len; 406 dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma; 407 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0; 408 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0; 409 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len); 410 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma); 411 412 len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len; 413 dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma; 414 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0; 415 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0; 416 rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & 0xfff); 417 rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma); 418 419 /* reset read/write point */ 420 rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff); 421 422 /* rest H2C Queue index */ 423 rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HOST_IDX); 424 rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HW_IDX); 425 } 426 427 static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev) 428 { 429 rtw_pci_reset_buf_desc(rtwdev); 430 } 431 432 static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev, 433 struct rtw_pci *rtwpci) 434 { 435 rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]); 436 rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]); 437 rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]); 438 rtwpci->irq_enabled = true; 439 } 440 441 static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev, 442 struct rtw_pci *rtwpci) 443 { 444 rtw_write32(rtwdev, RTK_PCI_HIMR0, 0); 445 rtw_write32(rtwdev, RTK_PCI_HIMR1, 0); 446 rtw_write32(rtwdev, RTK_PCI_HIMR3, 0); 447 rtwpci->irq_enabled = false; 448 } 449 450 static int rtw_pci_setup(struct rtw_dev *rtwdev) 451 { 452 rtw_pci_reset_trx_ring(rtwdev); 453 454 return 0; 455 } 456 457 static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci) 458 { 459 /* reset dma and rx tag */ 460 rtw_write32_set(rtwdev, RTK_PCI_CTRL, 461 BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN); 462 rtwpci->rx_tag = 0; 463 } 464 465 static int rtw_pci_start(struct rtw_dev *rtwdev) 466 { 467 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 468 unsigned long flags; 469 470 rtw_pci_dma_reset(rtwdev, rtwpci); 471 472 spin_lock_irqsave(&rtwpci->irq_lock, flags); 473 rtw_pci_enable_interrupt(rtwdev, rtwpci); 474 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 475 476 return 0; 477 } 478 479 static void rtw_pci_stop(struct rtw_dev *rtwdev) 480 { 481 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 482 unsigned long flags; 483 484 spin_lock_irqsave(&rtwpci->irq_lock, flags); 485 rtw_pci_disable_interrupt(rtwdev, rtwpci); 486 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 487 } 488 489 static u8 ac_to_hwq[] = { 490 [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO, 491 [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI, 492 [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE, 493 [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK, 494 }; 495 496 static u8 rtw_hw_queue_mapping(struct sk_buff *skb) 497 { 498 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 499 __le16 fc = hdr->frame_control; 500 u8 q_mapping = skb_get_queue_mapping(skb); 501 u8 queue; 502 503 if (unlikely(ieee80211_is_beacon(fc))) 504 queue = RTW_TX_QUEUE_BCN; 505 else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))) 506 queue = RTW_TX_QUEUE_MGMT; 507 else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq))) 508 queue = ac_to_hwq[IEEE80211_AC_BE]; 509 else 510 queue = ac_to_hwq[q_mapping]; 511 512 return queue; 513 } 514 515 static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci, 516 struct rtw_pci_tx_ring *ring) 517 { 518 struct sk_buff *prev = skb_dequeue(&ring->queue); 519 struct rtw_pci_tx_data *tx_data; 520 dma_addr_t dma; 521 522 if (!prev) 523 return; 524 525 tx_data = rtw_pci_get_tx_data(prev); 526 dma = tx_data->dma; 527 pci_unmap_single(rtwpci->pdev, dma, prev->len, 528 PCI_DMA_TODEVICE); 529 dev_kfree_skb_any(prev); 530 } 531 532 static void rtw_pci_dma_check(struct rtw_dev *rtwdev, 533 struct rtw_pci_rx_ring *rx_ring, 534 u32 idx) 535 { 536 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 537 struct rtw_chip_info *chip = rtwdev->chip; 538 struct rtw_pci_rx_buffer_desc *buf_desc; 539 u32 desc_sz = chip->rx_buf_desc_sz; 540 u16 total_pkt_size; 541 542 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + 543 idx * desc_sz); 544 total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size); 545 546 /* rx tag mismatch, throw a warning */ 547 if (total_pkt_size != rtwpci->rx_tag) 548 rtw_warn(rtwdev, "pci bus timeout, check dma status\n"); 549 550 rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX; 551 } 552 553 static int rtw_pci_xmit(struct rtw_dev *rtwdev, 554 struct rtw_tx_pkt_info *pkt_info, 555 struct sk_buff *skb, u8 queue) 556 { 557 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 558 struct rtw_chip_info *chip = rtwdev->chip; 559 struct rtw_pci_tx_ring *ring; 560 struct rtw_pci_tx_data *tx_data; 561 dma_addr_t dma; 562 u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz; 563 u32 tx_buf_desc_sz = chip->tx_buf_desc_sz; 564 u32 size; 565 u32 psb_len; 566 u8 *pkt_desc; 567 struct rtw_pci_tx_buffer_desc *buf_desc; 568 u32 bd_idx; 569 570 ring = &rtwpci->tx_rings[queue]; 571 572 size = skb->len; 573 574 if (queue == RTW_TX_QUEUE_BCN) 575 rtw_pci_release_rsvd_page(rtwpci, ring); 576 else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len)) 577 return -ENOSPC; 578 579 pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); 580 memset(pkt_desc, 0, tx_pkt_desc_sz); 581 pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue); 582 rtw_tx_fill_tx_desc(pkt_info, skb); 583 dma = pci_map_single(rtwpci->pdev, skb->data, skb->len, 584 PCI_DMA_TODEVICE); 585 if (pci_dma_mapping_error(rtwpci->pdev, dma)) 586 return -EBUSY; 587 588 /* after this we got dma mapped, there is no way back */ 589 buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz); 590 memset(buf_desc, 0, tx_buf_desc_sz); 591 psb_len = (skb->len - 1) / 128 + 1; 592 if (queue == RTW_TX_QUEUE_BCN) 593 psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET; 594 595 buf_desc[0].psb_len = cpu_to_le16(psb_len); 596 buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz); 597 buf_desc[0].dma = cpu_to_le32(dma); 598 buf_desc[1].buf_size = cpu_to_le16(size); 599 buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz); 600 601 tx_data = rtw_pci_get_tx_data(skb); 602 tx_data->dma = dma; 603 tx_data->sn = pkt_info->sn; 604 skb_queue_tail(&ring->queue, skb); 605 606 /* kick off tx queue */ 607 if (queue != RTW_TX_QUEUE_BCN) { 608 if (++ring->r.wp >= ring->r.len) 609 ring->r.wp = 0; 610 bd_idx = rtw_pci_tx_queue_idx_addr[queue]; 611 rtw_write16(rtwdev, bd_idx, ring->r.wp & 0xfff); 612 } else { 613 u32 reg_bcn_work; 614 615 reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK); 616 reg_bcn_work |= BIT_PCI_BCNQ_FLAG; 617 rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work); 618 } 619 620 return 0; 621 } 622 623 static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, 624 u32 size) 625 { 626 struct sk_buff *skb; 627 struct rtw_tx_pkt_info pkt_info; 628 u32 tx_pkt_desc_sz; 629 u32 length; 630 631 tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz; 632 length = size + tx_pkt_desc_sz; 633 skb = dev_alloc_skb(length); 634 if (!skb) 635 return -ENOMEM; 636 637 skb_reserve(skb, tx_pkt_desc_sz); 638 memcpy((u8 *)skb_put(skb, size), buf, size); 639 memset(&pkt_info, 0, sizeof(pkt_info)); 640 pkt_info.tx_pkt_size = size; 641 pkt_info.offset = tx_pkt_desc_sz; 642 643 return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN); 644 } 645 646 static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size) 647 { 648 struct sk_buff *skb; 649 struct rtw_tx_pkt_info pkt_info; 650 u32 tx_pkt_desc_sz; 651 u32 length; 652 653 tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz; 654 length = size + tx_pkt_desc_sz; 655 skb = dev_alloc_skb(length); 656 if (!skb) 657 return -ENOMEM; 658 659 skb_reserve(skb, tx_pkt_desc_sz); 660 memcpy((u8 *)skb_put(skb, size), buf, size); 661 memset(&pkt_info, 0, sizeof(pkt_info)); 662 pkt_info.tx_pkt_size = size; 663 664 return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C); 665 } 666 667 static int rtw_pci_tx(struct rtw_dev *rtwdev, 668 struct rtw_tx_pkt_info *pkt_info, 669 struct sk_buff *skb) 670 { 671 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 672 struct rtw_pci_tx_ring *ring; 673 u8 queue = rtw_hw_queue_mapping(skb); 674 int ret; 675 676 ret = rtw_pci_xmit(rtwdev, pkt_info, skb, queue); 677 if (ret) 678 return ret; 679 680 ring = &rtwpci->tx_rings[queue]; 681 if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) { 682 ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb)); 683 ring->queue_stopped = true; 684 } 685 686 return 0; 687 } 688 689 static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, 690 u8 hw_queue) 691 { 692 struct ieee80211_hw *hw = rtwdev->hw; 693 struct ieee80211_tx_info *info; 694 struct rtw_pci_tx_ring *ring; 695 struct rtw_pci_tx_data *tx_data; 696 struct sk_buff *skb; 697 u32 count; 698 u32 bd_idx_addr; 699 u32 bd_idx, cur_rp; 700 u16 q_map; 701 702 ring = &rtwpci->tx_rings[hw_queue]; 703 704 bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue]; 705 bd_idx = rtw_read32(rtwdev, bd_idx_addr); 706 cur_rp = bd_idx >> 16; 707 cur_rp &= 0xfff; 708 if (cur_rp >= ring->r.rp) 709 count = cur_rp - ring->r.rp; 710 else 711 count = ring->r.len - (ring->r.rp - cur_rp); 712 713 while (count--) { 714 skb = skb_dequeue(&ring->queue); 715 tx_data = rtw_pci_get_tx_data(skb); 716 pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len, 717 PCI_DMA_TODEVICE); 718 719 /* just free command packets from host to card */ 720 if (hw_queue == RTW_TX_QUEUE_H2C) { 721 dev_kfree_skb_irq(skb); 722 continue; 723 } 724 725 if (ring->queue_stopped && 726 avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) { 727 q_map = skb_get_queue_mapping(skb); 728 ieee80211_wake_queue(hw, q_map); 729 ring->queue_stopped = false; 730 } 731 732 skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz); 733 734 info = IEEE80211_SKB_CB(skb); 735 736 /* enqueue to wait for tx report */ 737 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { 738 rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn); 739 continue; 740 } 741 742 /* always ACK for others, then they won't be marked as drop */ 743 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 744 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 745 else 746 info->flags |= IEEE80211_TX_STAT_ACK; 747 748 ieee80211_tx_info_clear_status(info); 749 ieee80211_tx_status_irqsafe(hw, skb); 750 } 751 752 ring->r.rp = cur_rp; 753 } 754 755 static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci, 756 u8 hw_queue) 757 { 758 struct rtw_chip_info *chip = rtwdev->chip; 759 struct rtw_pci_rx_ring *ring; 760 struct rtw_rx_pkt_stat pkt_stat; 761 struct ieee80211_rx_status rx_status; 762 struct sk_buff *skb, *new; 763 u32 cur_wp, cur_rp, tmp; 764 u32 count; 765 u32 pkt_offset; 766 u32 pkt_desc_sz = chip->rx_pkt_desc_sz; 767 u32 buf_desc_sz = chip->rx_buf_desc_sz; 768 u8 *rx_desc; 769 dma_addr_t dma; 770 771 ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; 772 773 tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ); 774 cur_wp = tmp >> 16; 775 cur_wp &= 0xfff; 776 if (cur_wp >= ring->r.wp) 777 count = cur_wp - ring->r.wp; 778 else 779 count = ring->r.len - (ring->r.wp - cur_wp); 780 781 cur_rp = ring->r.rp; 782 while (count--) { 783 rtw_pci_dma_check(rtwdev, ring, cur_rp); 784 skb = ring->buf[cur_rp]; 785 dma = *((dma_addr_t *)skb->cb); 786 pci_unmap_single(rtwpci->pdev, dma, RTK_PCI_RX_BUF_SIZE, 787 PCI_DMA_FROMDEVICE); 788 rx_desc = skb->data; 789 chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status); 790 791 /* offset from rx_desc to payload */ 792 pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz + 793 pkt_stat.shift; 794 795 if (pkt_stat.is_c2h) { 796 /* keep rx_desc, halmac needs it */ 797 skb_put(skb, pkt_stat.pkt_len + pkt_offset); 798 799 /* pass offset for further operation */ 800 *((u32 *)skb->cb) = pkt_offset; 801 skb_queue_tail(&rtwdev->c2h_queue, skb); 802 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 803 } else { 804 /* remove rx_desc, maybe use skb_pull? */ 805 skb_put(skb, pkt_stat.pkt_len); 806 skb_reserve(skb, pkt_offset); 807 808 /* alloc a smaller skb to mac80211 */ 809 new = dev_alloc_skb(pkt_stat.pkt_len); 810 if (!new) { 811 new = skb; 812 } else { 813 skb_put_data(new, skb->data, skb->len); 814 dev_kfree_skb_any(skb); 815 } 816 /* TODO: merge into rx.c */ 817 rtw_rx_stats(rtwdev, pkt_stat.vif, skb); 818 memcpy(new->cb, &rx_status, sizeof(rx_status)); 819 ieee80211_rx_irqsafe(rtwdev->hw, new); 820 } 821 822 /* skb delivered to mac80211, alloc a new one in rx ring */ 823 new = dev_alloc_skb(RTK_PCI_RX_BUF_SIZE); 824 if (WARN(!new, "rx routine starvation\n")) 825 return; 826 827 ring->buf[cur_rp] = new; 828 rtw_pci_reset_rx_desc(rtwdev, new, ring, cur_rp, buf_desc_sz); 829 830 /* host read next element in ring */ 831 if (++cur_rp >= ring->r.len) 832 cur_rp = 0; 833 } 834 835 ring->r.rp = cur_rp; 836 ring->r.wp = cur_wp; 837 rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp); 838 } 839 840 static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev, 841 struct rtw_pci *rtwpci, u32 *irq_status) 842 { 843 irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0); 844 irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1); 845 irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3); 846 irq_status[0] &= rtwpci->irq_mask[0]; 847 irq_status[1] &= rtwpci->irq_mask[1]; 848 irq_status[3] &= rtwpci->irq_mask[3]; 849 rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]); 850 rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]); 851 rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]); 852 } 853 854 static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev) 855 { 856 struct rtw_dev *rtwdev = dev; 857 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 858 u32 irq_status[4]; 859 860 spin_lock(&rtwpci->irq_lock); 861 if (!rtwpci->irq_enabled) 862 goto out; 863 864 rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status); 865 866 if (irq_status[0] & IMR_MGNTDOK) 867 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT); 868 if (irq_status[0] & IMR_HIGHDOK) 869 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0); 870 if (irq_status[0] & IMR_BEDOK) 871 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE); 872 if (irq_status[0] & IMR_BKDOK) 873 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK); 874 if (irq_status[0] & IMR_VODOK) 875 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO); 876 if (irq_status[0] & IMR_VIDOK) 877 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI); 878 if (irq_status[3] & IMR_H2CDOK) 879 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C); 880 if (irq_status[0] & IMR_ROK) 881 rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU); 882 883 out: 884 spin_unlock(&rtwpci->irq_lock); 885 886 return IRQ_HANDLED; 887 } 888 889 static int rtw_pci_io_mapping(struct rtw_dev *rtwdev, 890 struct pci_dev *pdev) 891 { 892 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 893 unsigned long len; 894 u8 bar_id = 2; 895 int ret; 896 897 ret = pci_request_regions(pdev, KBUILD_MODNAME); 898 if (ret) { 899 rtw_err(rtwdev, "failed to request pci regions\n"); 900 return ret; 901 } 902 903 len = pci_resource_len(pdev, bar_id); 904 rtwpci->mmap = pci_iomap(pdev, bar_id, len); 905 if (!rtwpci->mmap) { 906 rtw_err(rtwdev, "failed to map pci memory\n"); 907 return -ENOMEM; 908 } 909 910 return 0; 911 } 912 913 static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev, 914 struct pci_dev *pdev) 915 { 916 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 917 918 if (rtwpci->mmap) { 919 pci_iounmap(pdev, rtwpci->mmap); 920 pci_release_regions(pdev); 921 } 922 } 923 924 static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data) 925 { 926 u16 write_addr; 927 u16 remainder = addr & 0x3; 928 u8 flag; 929 u8 cnt = 20; 930 931 write_addr = ((addr & 0x0ffc) | (BIT(0) << (remainder + 12))); 932 rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data); 933 rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr); 934 rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, 0x01); 935 936 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2); 937 while (flag && (cnt != 0)) { 938 udelay(10); 939 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2); 940 cnt--; 941 } 942 943 WARN(flag, "DBI write fail\n"); 944 } 945 946 static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1) 947 { 948 u8 page; 949 u8 wflag; 950 u8 cnt; 951 952 rtw_write16(rtwdev, REG_MDIO_V1, data); 953 954 page = addr < 0x20 ? 0 : 1; 955 page += g1 ? 0 : 2; 956 rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & 0x1f); 957 rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page); 958 959 rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1); 960 wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1); 961 962 cnt = 20; 963 while (wflag && (cnt != 0)) { 964 udelay(10); 965 wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, 966 BIT_MDIO_WFLAG_V1); 967 cnt--; 968 } 969 970 WARN(wflag, "MDIO write fail\n"); 971 } 972 973 static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev) 974 { 975 struct rtw_chip_info *chip = rtwdev->chip; 976 struct rtw_intf_phy_para *para; 977 u16 cut; 978 u16 value; 979 u16 offset; 980 u16 ip_sel; 981 int i; 982 983 cut = BIT(0) << rtwdev->hal.cut_version; 984 985 for (i = 0; i < chip->intf_table->n_gen1_para; i++) { 986 para = &chip->intf_table->gen1_para[i]; 987 if (!(para->cut_mask & cut)) 988 continue; 989 if (para->offset == 0xffff) 990 break; 991 offset = para->offset; 992 value = para->value; 993 ip_sel = para->ip_sel; 994 if (para->ip_sel == RTW_IP_SEL_PHY) 995 rtw_mdio_write(rtwdev, offset, value, true); 996 else 997 rtw_dbi_write8(rtwdev, offset, value); 998 } 999 1000 for (i = 0; i < chip->intf_table->n_gen2_para; i++) { 1001 para = &chip->intf_table->gen2_para[i]; 1002 if (!(para->cut_mask & cut)) 1003 continue; 1004 if (para->offset == 0xffff) 1005 break; 1006 offset = para->offset; 1007 value = para->value; 1008 ip_sel = para->ip_sel; 1009 if (para->ip_sel == RTW_IP_SEL_PHY) 1010 rtw_mdio_write(rtwdev, offset, value, false); 1011 else 1012 rtw_dbi_write8(rtwdev, offset, value); 1013 } 1014 } 1015 1016 static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1017 { 1018 int ret; 1019 1020 ret = pci_enable_device(pdev); 1021 if (ret) { 1022 rtw_err(rtwdev, "failed to enable pci device\n"); 1023 return ret; 1024 } 1025 1026 pci_set_master(pdev); 1027 pci_set_drvdata(pdev, rtwdev->hw); 1028 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); 1029 1030 return 0; 1031 } 1032 1033 static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1034 { 1035 pci_clear_master(pdev); 1036 pci_disable_device(pdev); 1037 } 1038 1039 static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1040 { 1041 struct rtw_pci *rtwpci; 1042 int ret; 1043 1044 rtwpci = (struct rtw_pci *)rtwdev->priv; 1045 rtwpci->pdev = pdev; 1046 1047 /* after this driver can access to hw registers */ 1048 ret = rtw_pci_io_mapping(rtwdev, pdev); 1049 if (ret) { 1050 rtw_err(rtwdev, "failed to request pci io region\n"); 1051 goto err_out; 1052 } 1053 1054 ret = rtw_pci_init(rtwdev); 1055 if (ret) { 1056 rtw_err(rtwdev, "failed to allocate pci resources\n"); 1057 goto err_io_unmap; 1058 } 1059 1060 rtw_pci_phy_cfg(rtwdev); 1061 1062 return 0; 1063 1064 err_io_unmap: 1065 rtw_pci_io_unmapping(rtwdev, pdev); 1066 1067 err_out: 1068 return ret; 1069 } 1070 1071 static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev) 1072 { 1073 rtw_pci_deinit(rtwdev); 1074 rtw_pci_io_unmapping(rtwdev, pdev); 1075 } 1076 1077 static struct rtw_hci_ops rtw_pci_ops = { 1078 .tx = rtw_pci_tx, 1079 .setup = rtw_pci_setup, 1080 .start = rtw_pci_start, 1081 .stop = rtw_pci_stop, 1082 1083 .read8 = rtw_pci_read8, 1084 .read16 = rtw_pci_read16, 1085 .read32 = rtw_pci_read32, 1086 .write8 = rtw_pci_write8, 1087 .write16 = rtw_pci_write16, 1088 .write32 = rtw_pci_write32, 1089 .write_data_rsvd_page = rtw_pci_write_data_rsvd_page, 1090 .write_data_h2c = rtw_pci_write_data_h2c, 1091 }; 1092 1093 static int rtw_pci_probe(struct pci_dev *pdev, 1094 const struct pci_device_id *id) 1095 { 1096 struct ieee80211_hw *hw; 1097 struct rtw_dev *rtwdev; 1098 int drv_data_size; 1099 int ret; 1100 1101 drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci); 1102 hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops); 1103 if (!hw) { 1104 dev_err(&pdev->dev, "failed to allocate hw\n"); 1105 return -ENOMEM; 1106 } 1107 1108 rtwdev = hw->priv; 1109 rtwdev->hw = hw; 1110 rtwdev->dev = &pdev->dev; 1111 rtwdev->chip = (struct rtw_chip_info *)id->driver_data; 1112 rtwdev->hci.ops = &rtw_pci_ops; 1113 rtwdev->hci.type = RTW_HCI_TYPE_PCIE; 1114 1115 ret = rtw_core_init(rtwdev); 1116 if (ret) 1117 goto err_release_hw; 1118 1119 rtw_dbg(rtwdev, RTW_DBG_PCI, 1120 "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n", 1121 pdev->vendor, pdev->device, pdev->revision); 1122 1123 ret = rtw_pci_claim(rtwdev, pdev); 1124 if (ret) { 1125 rtw_err(rtwdev, "failed to claim pci device\n"); 1126 goto err_deinit_core; 1127 } 1128 1129 ret = rtw_pci_setup_resource(rtwdev, pdev); 1130 if (ret) { 1131 rtw_err(rtwdev, "failed to setup pci resources\n"); 1132 goto err_pci_declaim; 1133 } 1134 1135 ret = rtw_chip_info_setup(rtwdev); 1136 if (ret) { 1137 rtw_err(rtwdev, "failed to setup chip information\n"); 1138 goto err_destroy_pci; 1139 } 1140 1141 ret = rtw_register_hw(rtwdev, hw); 1142 if (ret) { 1143 rtw_err(rtwdev, "failed to register hw\n"); 1144 goto err_destroy_pci; 1145 } 1146 1147 ret = request_irq(pdev->irq, &rtw_pci_interrupt_handler, 1148 IRQF_SHARED, KBUILD_MODNAME, rtwdev); 1149 if (ret) { 1150 ieee80211_unregister_hw(hw); 1151 goto err_destroy_pci; 1152 } 1153 1154 return 0; 1155 1156 err_destroy_pci: 1157 rtw_pci_destroy(rtwdev, pdev); 1158 1159 err_pci_declaim: 1160 rtw_pci_declaim(rtwdev, pdev); 1161 1162 err_deinit_core: 1163 rtw_core_deinit(rtwdev); 1164 1165 err_release_hw: 1166 ieee80211_free_hw(hw); 1167 1168 return ret; 1169 } 1170 1171 static void rtw_pci_remove(struct pci_dev *pdev) 1172 { 1173 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 1174 struct rtw_dev *rtwdev; 1175 struct rtw_pci *rtwpci; 1176 1177 if (!hw) 1178 return; 1179 1180 rtwdev = hw->priv; 1181 rtwpci = (struct rtw_pci *)rtwdev->priv; 1182 1183 rtw_unregister_hw(rtwdev, hw); 1184 rtw_pci_disable_interrupt(rtwdev, rtwpci); 1185 rtw_pci_destroy(rtwdev, pdev); 1186 rtw_pci_declaim(rtwdev, pdev); 1187 free_irq(rtwpci->pdev->irq, rtwdev); 1188 rtw_core_deinit(rtwdev); 1189 ieee80211_free_hw(hw); 1190 } 1191 1192 static const struct pci_device_id rtw_pci_id_table[] = { 1193 #ifdef CONFIG_RTW88_8822BE 1194 { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB822, rtw8822b_hw_spec) }, 1195 #endif 1196 #ifdef CONFIG_RTW88_8822CE 1197 { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822, rtw8822c_hw_spec) }, 1198 #endif 1199 {}, 1200 }; 1201 MODULE_DEVICE_TABLE(pci, rtw_pci_id_table); 1202 1203 static struct pci_driver rtw_pci_driver = { 1204 .name = "rtw_pci", 1205 .id_table = rtw_pci_id_table, 1206 .probe = rtw_pci_probe, 1207 .remove = rtw_pci_remove, 1208 }; 1209 module_pci_driver(rtw_pci_driver); 1210 1211 MODULE_AUTHOR("Realtek Corporation"); 1212 MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver"); 1213 MODULE_LICENSE("Dual BSD/GPL"); 1214