1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #include <linux/dma-mapping.h> 7 #include "mt76.h" 8 #include "dma.h" 9 10 #if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) 11 12 #define Q_READ(_dev, _q, _field) ({ \ 13 u32 _offset = offsetof(struct mt76_queue_regs, _field); \ 14 u32 _val; \ 15 if ((_q)->flags & MT_QFLAG_WED) \ 16 _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \ 17 ((_q)->wed_regs + \ 18 _offset)); \ 19 else \ 20 _val = readl(&(_q)->regs->_field); \ 21 _val; \ 22 }) 23 24 #define Q_WRITE(_dev, _q, _field, _val) do { \ 25 u32 _offset = offsetof(struct mt76_queue_regs, _field); \ 26 if ((_q)->flags & MT_QFLAG_WED) \ 27 mtk_wed_device_reg_write(&(_dev)->mmio.wed, \ 28 ((_q)->wed_regs + _offset), \ 29 _val); \ 30 else \ 31 writel(_val, &(_q)->regs->_field); \ 32 } while (0) 33 34 #else 35 36 #define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field) 37 #define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field) 38 39 #endif 40 41 static struct mt76_txwi_cache * 42 mt76_alloc_txwi(struct mt76_dev *dev) 43 { 44 struct mt76_txwi_cache *t; 45 dma_addr_t addr; 46 u8 *txwi; 47 int size; 48 49 size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t)); 50 txwi = kzalloc(size, GFP_ATOMIC); 51 if (!txwi) 52 return NULL; 53 54 addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size, 55 DMA_TO_DEVICE); 56 t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); 57 t->dma_addr = addr; 58 59 return t; 60 } 61 62 static struct mt76_txwi_cache * 63 mt76_alloc_rxwi(struct mt76_dev *dev) 64 { 65 struct mt76_txwi_cache *t; 66 67 t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC); 68 if (!t) 69 return NULL; 70 71 t->ptr = NULL; 72 return t; 73 } 74 75 static struct mt76_txwi_cache * 76 __mt76_get_txwi(struct mt76_dev *dev) 77 { 78 struct mt76_txwi_cache *t = NULL; 79 80 spin_lock(&dev->lock); 81 if (!list_empty(&dev->txwi_cache)) { 82 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, 83 list); 84 list_del(&t->list); 85 } 86 spin_unlock(&dev->lock); 87 88 return t; 89 } 90 91 static struct mt76_txwi_cache * 92 __mt76_get_rxwi(struct mt76_dev *dev) 93 { 94 struct mt76_txwi_cache *t = NULL; 95 96 spin_lock_bh(&dev->wed_lock); 97 if (!list_empty(&dev->rxwi_cache)) { 98 t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache, 99 list); 100 list_del(&t->list); 101 } 102 spin_unlock_bh(&dev->wed_lock); 103 104 return t; 105 } 106 107 static struct mt76_txwi_cache * 108 mt76_get_txwi(struct mt76_dev *dev) 109 { 110 struct mt76_txwi_cache *t = __mt76_get_txwi(dev); 111 112 if (t) 113 return t; 114 115 return mt76_alloc_txwi(dev); 116 } 117 118 struct mt76_txwi_cache * 119 mt76_get_rxwi(struct mt76_dev *dev) 120 { 121 struct mt76_txwi_cache *t = __mt76_get_rxwi(dev); 122 123 if (t) 124 return t; 125 126 return mt76_alloc_rxwi(dev); 127 } 128 EXPORT_SYMBOL_GPL(mt76_get_rxwi); 129 130 void 131 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) 132 { 133 if (!t) 134 return; 135 136 spin_lock(&dev->lock); 137 list_add(&t->list, &dev->txwi_cache); 138 spin_unlock(&dev->lock); 139 } 140 EXPORT_SYMBOL_GPL(mt76_put_txwi); 141 142 void 143 mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) 144 { 145 if (!t) 146 return; 147 148 spin_lock_bh(&dev->wed_lock); 149 list_add(&t->list, &dev->rxwi_cache); 150 spin_unlock_bh(&dev->wed_lock); 151 } 152 EXPORT_SYMBOL_GPL(mt76_put_rxwi); 153 154 static void 155 mt76_free_pending_txwi(struct mt76_dev *dev) 156 { 157 struct mt76_txwi_cache *t; 158 159 local_bh_disable(); 160 while ((t = __mt76_get_txwi(dev)) != NULL) { 161 dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, 162 DMA_TO_DEVICE); 163 kfree(mt76_get_txwi_ptr(dev, t)); 164 } 165 local_bh_enable(); 166 } 167 168 void 169 mt76_free_pending_rxwi(struct mt76_dev *dev) 170 { 171 struct mt76_txwi_cache *t; 172 173 local_bh_disable(); 174 while ((t = __mt76_get_rxwi(dev)) != NULL) { 175 if (t->ptr) 176 mt76_put_page_pool_buf(t->ptr, false); 177 kfree(t); 178 } 179 local_bh_enable(); 180 } 181 EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi); 182 183 static void 184 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) 185 { 186 Q_WRITE(dev, q, desc_base, q->desc_dma); 187 Q_WRITE(dev, q, ring_size, q->ndesc); 188 q->head = Q_READ(dev, q, dma_idx); 189 q->tail = q->head; 190 } 191 192 static void 193 mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) 194 { 195 int i; 196 197 if (!q || !q->ndesc) 198 return; 199 200 /* clear descriptors */ 201 for (i = 0; i < q->ndesc; i++) 202 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); 203 204 Q_WRITE(dev, q, cpu_idx, 0); 205 Q_WRITE(dev, q, dma_idx, 0); 206 mt76_dma_sync_idx(dev, q); 207 } 208 209 static int 210 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, 211 struct mt76_queue_buf *buf, void *data) 212 { 213 struct mt76_desc *desc = &q->desc[q->head]; 214 struct mt76_queue_entry *entry = &q->entry[q->head]; 215 struct mt76_txwi_cache *txwi = NULL; 216 u32 buf1 = 0, ctrl; 217 int idx = q->head; 218 int rx_token; 219 220 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); 221 222 if (mt76_queue_is_wed_rx(q)) { 223 txwi = mt76_get_rxwi(dev); 224 if (!txwi) 225 return -ENOMEM; 226 227 rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr); 228 if (rx_token < 0) { 229 mt76_put_rxwi(dev, txwi); 230 return -ENOMEM; 231 } 232 233 buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token); 234 ctrl |= MT_DMA_CTL_TO_HOST; 235 } 236 237 WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr)); 238 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); 239 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); 240 WRITE_ONCE(desc->info, 0); 241 242 entry->dma_addr[0] = buf->addr; 243 entry->dma_len[0] = buf->len; 244 entry->txwi = txwi; 245 entry->buf = data; 246 entry->wcid = 0xffff; 247 entry->skip_buf1 = true; 248 q->head = (q->head + 1) % q->ndesc; 249 q->queued++; 250 251 return idx; 252 } 253 254 static int 255 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, 256 struct mt76_queue_buf *buf, int nbufs, u32 info, 257 struct sk_buff *skb, void *txwi) 258 { 259 struct mt76_queue_entry *entry; 260 struct mt76_desc *desc; 261 int i, idx = -1; 262 u32 ctrl, next; 263 264 if (txwi) { 265 q->entry[q->head].txwi = DMA_DUMMY_DATA; 266 q->entry[q->head].skip_buf0 = true; 267 } 268 269 for (i = 0; i < nbufs; i += 2, buf += 2) { 270 u32 buf0 = buf[0].addr, buf1 = 0; 271 272 idx = q->head; 273 next = (q->head + 1) % q->ndesc; 274 275 desc = &q->desc[idx]; 276 entry = &q->entry[idx]; 277 278 if (buf[0].skip_unmap) 279 entry->skip_buf0 = true; 280 entry->skip_buf1 = i == nbufs - 1; 281 282 entry->dma_addr[0] = buf[0].addr; 283 entry->dma_len[0] = buf[0].len; 284 285 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); 286 if (i < nbufs - 1) { 287 entry->dma_addr[1] = buf[1].addr; 288 entry->dma_len[1] = buf[1].len; 289 buf1 = buf[1].addr; 290 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); 291 if (buf[1].skip_unmap) 292 entry->skip_buf1 = true; 293 } 294 295 if (i == nbufs - 1) 296 ctrl |= MT_DMA_CTL_LAST_SEC0; 297 else if (i == nbufs - 2) 298 ctrl |= MT_DMA_CTL_LAST_SEC1; 299 300 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); 301 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); 302 WRITE_ONCE(desc->info, cpu_to_le32(info)); 303 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); 304 305 q->head = next; 306 q->queued++; 307 } 308 309 q->entry[idx].txwi = txwi; 310 q->entry[idx].skb = skb; 311 q->entry[idx].wcid = 0xffff; 312 313 return idx; 314 } 315 316 static void 317 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, 318 struct mt76_queue_entry *prev_e) 319 { 320 struct mt76_queue_entry *e = &q->entry[idx]; 321 322 if (!e->skip_buf0) 323 dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0], 324 DMA_TO_DEVICE); 325 326 if (!e->skip_buf1) 327 dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1], 328 DMA_TO_DEVICE); 329 330 if (e->txwi == DMA_DUMMY_DATA) 331 e->txwi = NULL; 332 333 *prev_e = *e; 334 memset(e, 0, sizeof(*e)); 335 } 336 337 static void 338 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) 339 { 340 wmb(); 341 Q_WRITE(dev, q, cpu_idx, q->head); 342 } 343 344 static void 345 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) 346 { 347 struct mt76_queue_entry entry; 348 int last; 349 350 if (!q || !q->ndesc) 351 return; 352 353 spin_lock_bh(&q->cleanup_lock); 354 if (flush) 355 last = -1; 356 else 357 last = Q_READ(dev, q, dma_idx); 358 359 while (q->queued > 0 && q->tail != last) { 360 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); 361 mt76_queue_tx_complete(dev, q, &entry); 362 363 if (entry.txwi) { 364 if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE)) 365 mt76_put_txwi(dev, entry.txwi); 366 } 367 368 if (!flush && q->tail == last) 369 last = Q_READ(dev, q, dma_idx); 370 } 371 spin_unlock_bh(&q->cleanup_lock); 372 373 if (flush) { 374 spin_lock_bh(&q->lock); 375 mt76_dma_sync_idx(dev, q); 376 mt76_dma_kick_queue(dev, q); 377 spin_unlock_bh(&q->lock); 378 } 379 380 if (!q->queued) 381 wake_up(&dev->tx_wait); 382 } 383 384 static void * 385 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, 386 int *len, u32 *info, bool *more, bool *drop) 387 { 388 struct mt76_queue_entry *e = &q->entry[idx]; 389 struct mt76_desc *desc = &q->desc[idx]; 390 void *buf; 391 392 if (len) { 393 u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); 394 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl); 395 *more = !(ctrl & MT_DMA_CTL_LAST_SEC0); 396 } 397 398 if (info) 399 *info = le32_to_cpu(desc->info); 400 401 if (mt76_queue_is_wed_rx(q)) { 402 u32 buf1 = le32_to_cpu(desc->buf1); 403 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1); 404 struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token); 405 406 if (!t) 407 return NULL; 408 409 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, 410 SKB_WITH_OVERHEAD(q->buf_size), 411 page_pool_get_dma_dir(q->page_pool)); 412 413 buf = t->ptr; 414 t->dma_addr = 0; 415 t->ptr = NULL; 416 417 mt76_put_rxwi(dev, t); 418 419 if (drop) { 420 u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); 421 422 *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A | 423 MT_DMA_CTL_DROP)); 424 425 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP); 426 } 427 } else { 428 buf = e->buf; 429 e->buf = NULL; 430 dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0], 431 SKB_WITH_OVERHEAD(q->buf_size), 432 page_pool_get_dma_dir(q->page_pool)); 433 } 434 435 return buf; 436 } 437 438 static void * 439 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 440 int *len, u32 *info, bool *more, bool *drop) 441 { 442 int idx = q->tail; 443 444 *more = false; 445 if (!q->queued) 446 return NULL; 447 448 if (flush) 449 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); 450 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) 451 return NULL; 452 453 q->tail = (q->tail + 1) % q->ndesc; 454 q->queued--; 455 456 return mt76_dma_get_buf(dev, q, idx, len, info, more, drop); 457 } 458 459 static int 460 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, 461 struct sk_buff *skb, u32 tx_info) 462 { 463 struct mt76_queue_buf buf = {}; 464 dma_addr_t addr; 465 466 if (test_bit(MT76_MCU_RESET, &dev->phy.state)) 467 goto error; 468 469 if (q->queued + 1 >= q->ndesc - 1) 470 goto error; 471 472 addr = dma_map_single(dev->dma_dev, skb->data, skb->len, 473 DMA_TO_DEVICE); 474 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) 475 goto error; 476 477 buf.addr = addr; 478 buf.len = skb->len; 479 480 spin_lock_bh(&q->lock); 481 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); 482 mt76_dma_kick_queue(dev, q); 483 spin_unlock_bh(&q->lock); 484 485 return 0; 486 487 error: 488 dev_kfree_skb(skb); 489 return -ENOMEM; 490 } 491 492 static int 493 mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, 494 enum mt76_txq_id qid, struct sk_buff *skb, 495 struct mt76_wcid *wcid, struct ieee80211_sta *sta) 496 { 497 struct ieee80211_tx_status status = { 498 .sta = sta, 499 }; 500 struct mt76_tx_info tx_info = { 501 .skb = skb, 502 }; 503 struct ieee80211_hw *hw; 504 int len, n = 0, ret = -ENOMEM; 505 struct mt76_txwi_cache *t; 506 struct sk_buff *iter; 507 dma_addr_t addr; 508 u8 *txwi; 509 510 if (test_bit(MT76_RESET, &dev->phy.state)) 511 goto free_skb; 512 513 t = mt76_get_txwi(dev); 514 if (!t) 515 goto free_skb; 516 517 txwi = mt76_get_txwi_ptr(dev, t); 518 519 skb->prev = skb->next = NULL; 520 if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS) 521 mt76_insert_hdr_pad(skb); 522 523 len = skb_headlen(skb); 524 addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE); 525 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) 526 goto free; 527 528 tx_info.buf[n].addr = t->dma_addr; 529 tx_info.buf[n++].len = dev->drv->txwi_size; 530 tx_info.buf[n].addr = addr; 531 tx_info.buf[n++].len = len; 532 533 skb_walk_frags(skb, iter) { 534 if (n == ARRAY_SIZE(tx_info.buf)) 535 goto unmap; 536 537 addr = dma_map_single(dev->dma_dev, iter->data, iter->len, 538 DMA_TO_DEVICE); 539 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) 540 goto unmap; 541 542 tx_info.buf[n].addr = addr; 543 tx_info.buf[n++].len = iter->len; 544 } 545 tx_info.nbuf = n; 546 547 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { 548 ret = -ENOMEM; 549 goto unmap; 550 } 551 552 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, 553 DMA_TO_DEVICE); 554 ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info); 555 dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size, 556 DMA_TO_DEVICE); 557 if (ret < 0) 558 goto unmap; 559 560 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, 561 tx_info.info, tx_info.skb, t); 562 563 unmap: 564 for (n--; n > 0; n--) 565 dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr, 566 tx_info.buf[n].len, DMA_TO_DEVICE); 567 568 free: 569 #ifdef CONFIG_NL80211_TESTMODE 570 /* fix tx_done accounting on queue overflow */ 571 if (mt76_is_testmode_skb(dev, skb, &hw)) { 572 struct mt76_phy *phy = hw->priv; 573 574 if (tx_info.skb == phy->test.tx_skb) 575 phy->test.tx_done--; 576 } 577 #endif 578 579 mt76_put_txwi(dev, t); 580 581 free_skb: 582 status.skb = tx_info.skb; 583 hw = mt76_tx_status_get_hw(dev, tx_info.skb); 584 spin_lock_bh(&dev->rx_lock); 585 ieee80211_tx_status_ext(hw, &status); 586 spin_unlock_bh(&dev->rx_lock); 587 588 return ret; 589 } 590 591 static int 592 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, 593 bool allow_direct) 594 { 595 int len = SKB_WITH_OVERHEAD(q->buf_size); 596 int frames = 0; 597 598 if (!q->ndesc) 599 return 0; 600 601 spin_lock_bh(&q->lock); 602 603 while (q->queued < q->ndesc - 1) { 604 enum dma_data_direction dir; 605 struct mt76_queue_buf qbuf; 606 dma_addr_t addr; 607 int offset; 608 void *buf; 609 610 buf = mt76_get_page_pool_buf(q, &offset, q->buf_size); 611 if (!buf) 612 break; 613 614 addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset; 615 dir = page_pool_get_dma_dir(q->page_pool); 616 dma_sync_single_for_device(dev->dma_dev, addr, len, dir); 617 618 qbuf.addr = addr + q->buf_offset; 619 qbuf.len = len - q->buf_offset; 620 qbuf.skip_unmap = false; 621 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) { 622 mt76_put_page_pool_buf(buf, allow_direct); 623 break; 624 } 625 frames++; 626 } 627 628 if (frames) 629 mt76_dma_kick_queue(dev, q); 630 631 spin_unlock_bh(&q->lock); 632 633 return frames; 634 } 635 636 int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) 637 { 638 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 639 struct mtk_wed_device *wed = &dev->mmio.wed; 640 int ret, type, ring; 641 u8 flags; 642 643 if (!q || !q->ndesc) 644 return -EINVAL; 645 646 flags = q->flags; 647 if (!mtk_wed_device_active(wed)) 648 q->flags &= ~MT_QFLAG_WED; 649 650 if (!(q->flags & MT_QFLAG_WED)) 651 return 0; 652 653 type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags); 654 ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags); 655 656 switch (type) { 657 case MT76_WED_Q_TX: 658 ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, reset); 659 if (!ret) 660 q->wed_regs = wed->tx_ring[ring].reg_base; 661 break; 662 case MT76_WED_Q_TXFREE: 663 /* WED txfree queue needs ring to be initialized before setup */ 664 q->flags = 0; 665 mt76_dma_queue_reset(dev, q); 666 mt76_dma_rx_fill(dev, q, false); 667 q->flags = flags; 668 669 ret = mtk_wed_device_txfree_ring_setup(wed, q->regs); 670 if (!ret) 671 q->wed_regs = wed->txfree_ring.reg_base; 672 break; 673 case MT76_WED_Q_RX: 674 ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset); 675 if (!ret) 676 q->wed_regs = wed->rx_ring[ring].reg_base; 677 break; 678 default: 679 ret = -EINVAL; 680 } 681 682 return ret; 683 #else 684 return 0; 685 #endif 686 } 687 EXPORT_SYMBOL_GPL(mt76_dma_wed_setup); 688 689 static int 690 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, 691 int idx, int n_desc, int bufsize, 692 u32 ring_base) 693 { 694 int ret, size; 695 696 spin_lock_init(&q->lock); 697 spin_lock_init(&q->cleanup_lock); 698 699 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; 700 q->ndesc = n_desc; 701 q->buf_size = bufsize; 702 q->hw_idx = idx; 703 704 size = q->ndesc * sizeof(struct mt76_desc); 705 q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL); 706 if (!q->desc) 707 return -ENOMEM; 708 709 size = q->ndesc * sizeof(*q->entry); 710 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); 711 if (!q->entry) 712 return -ENOMEM; 713 714 ret = mt76_create_page_pool(dev, q); 715 if (ret) 716 return ret; 717 718 ret = mt76_dma_wed_setup(dev, q, false); 719 if (ret) 720 return ret; 721 722 if (q->flags != MT_WED_Q_TXFREE) 723 mt76_dma_queue_reset(dev, q); 724 725 return 0; 726 } 727 728 static void 729 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) 730 { 731 void *buf; 732 bool more; 733 734 if (!q->ndesc) 735 return; 736 737 spin_lock_bh(&q->lock); 738 739 do { 740 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL); 741 if (!buf) 742 break; 743 744 mt76_put_page_pool_buf(buf, false); 745 } while (1); 746 747 if (q->rx_head) { 748 dev_kfree_skb(q->rx_head); 749 q->rx_head = NULL; 750 } 751 752 spin_unlock_bh(&q->lock); 753 } 754 755 static void 756 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) 757 { 758 struct mt76_queue *q = &dev->q_rx[qid]; 759 int i; 760 761 if (!q->ndesc) 762 return; 763 764 for (i = 0; i < q->ndesc; i++) 765 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); 766 767 mt76_dma_rx_cleanup(dev, q); 768 769 /* reset WED rx queues */ 770 mt76_dma_wed_setup(dev, q, true); 771 if (q->flags != MT_WED_Q_TXFREE) { 772 mt76_dma_sync_idx(dev, q); 773 mt76_dma_rx_fill(dev, q, false); 774 } 775 } 776 777 static void 778 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, 779 int len, bool more, u32 info, bool allow_direct) 780 { 781 struct sk_buff *skb = q->rx_head; 782 struct skb_shared_info *shinfo = skb_shinfo(skb); 783 int nr_frags = shinfo->nr_frags; 784 785 if (nr_frags < ARRAY_SIZE(shinfo->frags)) { 786 struct page *page = virt_to_head_page(data); 787 int offset = data - page_address(page) + q->buf_offset; 788 789 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); 790 } else { 791 mt76_put_page_pool_buf(data, allow_direct); 792 } 793 794 if (more) 795 return; 796 797 q->rx_head = NULL; 798 if (nr_frags < ARRAY_SIZE(shinfo->frags)) 799 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); 800 else 801 dev_kfree_skb(skb); 802 } 803 804 static int 805 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) 806 { 807 int len, data_len, done = 0, dma_idx; 808 struct sk_buff *skb; 809 unsigned char *data; 810 bool check_ddone = false; 811 bool allow_direct = !mt76_queue_is_wed_rx(q); 812 bool more; 813 814 if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) && 815 q->flags == MT_WED_Q_TXFREE) { 816 dma_idx = Q_READ(dev, q, dma_idx); 817 check_ddone = true; 818 } 819 820 while (done < budget) { 821 bool drop = false; 822 u32 info; 823 824 if (check_ddone) { 825 if (q->tail == dma_idx) 826 dma_idx = Q_READ(dev, q, dma_idx); 827 828 if (q->tail == dma_idx) 829 break; 830 } 831 832 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more, 833 &drop); 834 if (!data) 835 break; 836 837 if (drop) 838 goto free_frag; 839 840 if (q->rx_head) 841 data_len = q->buf_size; 842 else 843 data_len = SKB_WITH_OVERHEAD(q->buf_size); 844 845 if (data_len < len + q->buf_offset) { 846 dev_kfree_skb(q->rx_head); 847 q->rx_head = NULL; 848 goto free_frag; 849 } 850 851 if (q->rx_head) { 852 mt76_add_fragment(dev, q, data, len, more, info, 853 allow_direct); 854 continue; 855 } 856 857 if (!more && dev->drv->rx_check && 858 !(dev->drv->rx_check(dev, data, len))) 859 goto free_frag; 860 861 skb = napi_build_skb(data, q->buf_size); 862 if (!skb) 863 goto free_frag; 864 865 skb_reserve(skb, q->buf_offset); 866 skb_mark_for_recycle(skb); 867 868 *(u32 *)skb->cb = info; 869 870 __skb_put(skb, len); 871 done++; 872 873 if (more) { 874 q->rx_head = skb; 875 continue; 876 } 877 878 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); 879 continue; 880 881 free_frag: 882 mt76_put_page_pool_buf(data, allow_direct); 883 } 884 885 mt76_dma_rx_fill(dev, q, true); 886 return done; 887 } 888 889 int mt76_dma_rx_poll(struct napi_struct *napi, int budget) 890 { 891 struct mt76_dev *dev; 892 int qid, done = 0, cur; 893 894 dev = container_of(napi->dev, struct mt76_dev, napi_dev); 895 qid = napi - dev->napi; 896 897 rcu_read_lock(); 898 899 do { 900 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done); 901 mt76_rx_poll_complete(dev, qid, napi); 902 done += cur; 903 } while (cur && done < budget); 904 905 rcu_read_unlock(); 906 907 if (done < budget && napi_complete(napi)) 908 dev->drv->rx_poll_complete(dev, qid); 909 910 return done; 911 } 912 EXPORT_SYMBOL_GPL(mt76_dma_rx_poll); 913 914 static int 915 mt76_dma_init(struct mt76_dev *dev, 916 int (*poll)(struct napi_struct *napi, int budget)) 917 { 918 int i; 919 920 init_dummy_netdev(&dev->napi_dev); 921 init_dummy_netdev(&dev->tx_napi_dev); 922 snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s", 923 wiphy_name(dev->hw->wiphy)); 924 dev->napi_dev.threaded = 1; 925 init_completion(&dev->mmio.wed_reset); 926 init_completion(&dev->mmio.wed_reset_complete); 927 928 mt76_for_each_q_rx(dev, i) { 929 netif_napi_add(&dev->napi_dev, &dev->napi[i], poll); 930 mt76_dma_rx_fill(dev, &dev->q_rx[i], false); 931 napi_enable(&dev->napi[i]); 932 } 933 934 return 0; 935 } 936 937 static const struct mt76_queue_ops mt76_dma_ops = { 938 .init = mt76_dma_init, 939 .alloc = mt76_dma_alloc_queue, 940 .reset_q = mt76_dma_queue_reset, 941 .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw, 942 .tx_queue_skb = mt76_dma_tx_queue_skb, 943 .tx_cleanup = mt76_dma_tx_cleanup, 944 .rx_cleanup = mt76_dma_rx_cleanup, 945 .rx_reset = mt76_dma_rx_reset, 946 .kick = mt76_dma_kick_queue, 947 }; 948 949 void mt76_dma_attach(struct mt76_dev *dev) 950 { 951 dev->queue_ops = &mt76_dma_ops; 952 } 953 EXPORT_SYMBOL_GPL(mt76_dma_attach); 954 955 void mt76_dma_cleanup(struct mt76_dev *dev) 956 { 957 int i; 958 959 mt76_worker_disable(&dev->tx_worker); 960 netif_napi_del(&dev->tx_napi); 961 962 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { 963 struct mt76_phy *phy = dev->phys[i]; 964 int j; 965 966 if (!phy) 967 continue; 968 969 for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++) 970 mt76_dma_tx_cleanup(dev, phy->q_tx[j], true); 971 } 972 973 for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++) 974 mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true); 975 976 mt76_for_each_q_rx(dev, i) { 977 struct mt76_queue *q = &dev->q_rx[i]; 978 979 netif_napi_del(&dev->napi[i]); 980 mt76_dma_rx_cleanup(dev, q); 981 982 page_pool_destroy(q->page_pool); 983 } 984 985 mt76_free_pending_txwi(dev); 986 mt76_free_pending_rxwi(dev); 987 988 if (mtk_wed_device_active(&dev->mmio.wed)) 989 mtk_wed_device_detach(&dev->mmio.wed); 990 } 991 EXPORT_SYMBOL_GPL(mt76_dma_cleanup); 992