1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #include <linux/dma-mapping.h> 7 #include "mt76.h" 8 #include "dma.h" 9 10 static struct mt76_txwi_cache * 11 mt76_alloc_txwi(struct mt76_dev *dev) 12 { 13 struct mt76_txwi_cache *t; 14 dma_addr_t addr; 15 u8 *txwi; 16 int size; 17 18 size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t)); 19 txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC); 20 if (!txwi) 21 return NULL; 22 23 addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size, 24 DMA_TO_DEVICE); 25 t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size); 26 t->dma_addr = addr; 27 28 return t; 29 } 30 31 static struct mt76_txwi_cache * 32 __mt76_get_txwi(struct mt76_dev *dev) 33 { 34 struct mt76_txwi_cache *t = NULL; 35 36 spin_lock(&dev->lock); 37 if (!list_empty(&dev->txwi_cache)) { 38 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache, 39 list); 40 list_del(&t->list); 41 } 42 spin_unlock(&dev->lock); 43 44 return t; 45 } 46 47 static struct mt76_txwi_cache * 48 mt76_get_txwi(struct mt76_dev *dev) 49 { 50 struct mt76_txwi_cache *t = __mt76_get_txwi(dev); 51 52 if (t) 53 return t; 54 55 return mt76_alloc_txwi(dev); 56 } 57 58 void 59 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t) 60 { 61 if (!t) 62 return; 63 64 spin_lock(&dev->lock); 65 list_add(&t->list, &dev->txwi_cache); 66 spin_unlock(&dev->lock); 67 } 68 EXPORT_SYMBOL_GPL(mt76_put_txwi); 69 70 static void 71 mt76_free_pending_txwi(struct mt76_dev *dev) 72 { 73 struct mt76_txwi_cache *t; 74 75 while ((t = __mt76_get_txwi(dev)) != NULL) 76 dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size, 77 DMA_TO_DEVICE); 78 } 79 80 static int 81 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, 82 int idx, int n_desc, int bufsize, 83 u32 ring_base) 84 { 85 int size; 86 int i; 87 88 spin_lock_init(&q->lock); 89 90 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; 91 q->ndesc = n_desc; 92 q->buf_size = bufsize; 93 q->hw_idx = idx; 94 95 size = q->ndesc * sizeof(struct mt76_desc); 96 q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL); 97 if (!q->desc) 98 return -ENOMEM; 99 100 size = q->ndesc * sizeof(*q->entry); 101 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); 102 if (!q->entry) 103 return -ENOMEM; 104 105 /* clear descriptors */ 106 for (i = 0; i < q->ndesc; i++) 107 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); 108 109 writel(q->desc_dma, &q->regs->desc_base); 110 writel(0, &q->regs->cpu_idx); 111 writel(0, &q->regs->dma_idx); 112 writel(q->ndesc, &q->regs->ring_size); 113 114 return 0; 115 } 116 117 static int 118 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, 119 struct mt76_queue_buf *buf, int nbufs, u32 info, 120 struct sk_buff *skb, void *txwi) 121 { 122 struct mt76_queue_entry *entry; 123 struct mt76_desc *desc; 124 u32 ctrl; 125 int i, idx = -1; 126 127 if (txwi) { 128 q->entry[q->head].txwi = DMA_DUMMY_DATA; 129 q->entry[q->head].skip_buf0 = true; 130 } 131 132 for (i = 0; i < nbufs; i += 2, buf += 2) { 133 u32 buf0 = buf[0].addr, buf1 = 0; 134 135 idx = q->head; 136 q->head = (q->head + 1) % q->ndesc; 137 138 desc = &q->desc[idx]; 139 entry = &q->entry[idx]; 140 141 if (buf[0].skip_unmap) 142 entry->skip_buf0 = true; 143 entry->skip_buf1 = i == nbufs - 1; 144 145 entry->dma_addr[0] = buf[0].addr; 146 entry->dma_len[0] = buf[0].len; 147 148 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); 149 if (i < nbufs - 1) { 150 entry->dma_addr[1] = buf[1].addr; 151 entry->dma_len[1] = buf[1].len; 152 buf1 = buf[1].addr; 153 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); 154 if (buf[1].skip_unmap) 155 entry->skip_buf1 = true; 156 } 157 158 if (i == nbufs - 1) 159 ctrl |= MT_DMA_CTL_LAST_SEC0; 160 else if (i == nbufs - 2) 161 ctrl |= MT_DMA_CTL_LAST_SEC1; 162 163 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); 164 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); 165 WRITE_ONCE(desc->info, cpu_to_le32(info)); 166 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); 167 168 q->queued++; 169 } 170 171 q->entry[idx].txwi = txwi; 172 q->entry[idx].skb = skb; 173 174 return idx; 175 } 176 177 static void 178 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, 179 struct mt76_queue_entry *prev_e) 180 { 181 struct mt76_queue_entry *e = &q->entry[idx]; 182 183 if (!e->skip_buf0) 184 dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0], 185 DMA_TO_DEVICE); 186 187 if (!e->skip_buf1) 188 dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1], 189 DMA_TO_DEVICE); 190 191 if (e->txwi == DMA_DUMMY_DATA) 192 e->txwi = NULL; 193 194 if (e->skb == DMA_DUMMY_DATA) 195 e->skb = NULL; 196 197 *prev_e = *e; 198 memset(e, 0, sizeof(*e)); 199 } 200 201 static void 202 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) 203 { 204 writel(q->desc_dma, &q->regs->desc_base); 205 writel(q->ndesc, &q->regs->ring_size); 206 q->head = readl(&q->regs->dma_idx); 207 q->tail = q->head; 208 } 209 210 static void 211 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) 212 { 213 wmb(); 214 writel(q->head, &q->regs->cpu_idx); 215 } 216 217 static void 218 mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) 219 { 220 struct mt76_queue *q = dev->q_tx[qid]; 221 struct mt76_queue_entry entry; 222 bool wake = false; 223 int last; 224 225 if (!q) 226 return; 227 228 if (flush) 229 last = -1; 230 else 231 last = readl(&q->regs->dma_idx); 232 233 while (q->queued > 0 && q->tail != last) { 234 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); 235 mt76_queue_tx_complete(dev, q, &entry); 236 237 if (entry.txwi) { 238 if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE)) 239 mt76_put_txwi(dev, entry.txwi); 240 wake = !flush; 241 } 242 243 if (!flush && q->tail == last) 244 last = readl(&q->regs->dma_idx); 245 246 } 247 248 if (flush) { 249 spin_lock_bh(&q->lock); 250 mt76_dma_sync_idx(dev, q); 251 mt76_dma_kick_queue(dev, q); 252 spin_unlock_bh(&q->lock); 253 } 254 255 wake = wake && q->stopped && 256 qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; 257 if (wake) 258 q->stopped = false; 259 260 if (!q->queued) 261 wake_up(&dev->tx_wait); 262 263 if (wake) 264 ieee80211_wake_queue(dev->hw, qid); 265 } 266 267 static void * 268 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, 269 int *len, u32 *info, bool *more) 270 { 271 struct mt76_queue_entry *e = &q->entry[idx]; 272 struct mt76_desc *desc = &q->desc[idx]; 273 dma_addr_t buf_addr; 274 void *buf = e->buf; 275 int buf_len = SKB_WITH_OVERHEAD(q->buf_size); 276 277 buf_addr = e->dma_addr[0]; 278 if (len) { 279 u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl)); 280 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl); 281 *more = !(ctl & MT_DMA_CTL_LAST_SEC0); 282 } 283 284 if (info) 285 *info = le32_to_cpu(desc->info); 286 287 dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE); 288 e->buf = NULL; 289 290 return buf; 291 } 292 293 static void * 294 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 295 int *len, u32 *info, bool *more) 296 { 297 int idx = q->tail; 298 299 *more = false; 300 if (!q->queued) 301 return NULL; 302 303 if (flush) 304 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); 305 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) 306 return NULL; 307 308 q->tail = (q->tail + 1) % q->ndesc; 309 q->queued--; 310 311 return mt76_dma_get_buf(dev, q, idx, len, info, more); 312 } 313 314 static int 315 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, 316 struct sk_buff *skb, u32 tx_info) 317 { 318 struct mt76_queue *q = dev->q_tx[qid]; 319 struct mt76_queue_buf buf; 320 dma_addr_t addr; 321 322 if (q->queued + 1 >= q->ndesc - 1) 323 goto error; 324 325 addr = dma_map_single(dev->dev, skb->data, skb->len, 326 DMA_TO_DEVICE); 327 if (unlikely(dma_mapping_error(dev->dev, addr))) 328 goto error; 329 330 buf.addr = addr; 331 buf.len = skb->len; 332 333 spin_lock_bh(&q->lock); 334 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); 335 mt76_dma_kick_queue(dev, q); 336 spin_unlock_bh(&q->lock); 337 338 return 0; 339 340 error: 341 dev_kfree_skb(skb); 342 return -ENOMEM; 343 } 344 345 static int 346 mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, 347 struct sk_buff *skb, struct mt76_wcid *wcid, 348 struct ieee80211_sta *sta) 349 { 350 struct mt76_queue *q = dev->q_tx[qid]; 351 struct mt76_tx_info tx_info = { 352 .skb = skb, 353 }; 354 struct ieee80211_hw *hw; 355 int len, n = 0, ret = -ENOMEM; 356 struct mt76_queue_entry e; 357 struct mt76_txwi_cache *t; 358 struct sk_buff *iter; 359 dma_addr_t addr; 360 u8 *txwi; 361 362 t = mt76_get_txwi(dev); 363 if (!t) { 364 hw = mt76_tx_status_get_hw(dev, skb); 365 ieee80211_free_txskb(hw, skb); 366 return -ENOMEM; 367 } 368 txwi = mt76_get_txwi_ptr(dev, t); 369 370 skb->prev = skb->next = NULL; 371 if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS) 372 mt76_insert_hdr_pad(skb); 373 374 len = skb_headlen(skb); 375 addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE); 376 if (unlikely(dma_mapping_error(dev->dev, addr))) 377 goto free; 378 379 tx_info.buf[n].addr = t->dma_addr; 380 tx_info.buf[n++].len = dev->drv->txwi_size; 381 tx_info.buf[n].addr = addr; 382 tx_info.buf[n++].len = len; 383 384 skb_walk_frags(skb, iter) { 385 if (n == ARRAY_SIZE(tx_info.buf)) 386 goto unmap; 387 388 addr = dma_map_single(dev->dev, iter->data, iter->len, 389 DMA_TO_DEVICE); 390 if (unlikely(dma_mapping_error(dev->dev, addr))) 391 goto unmap; 392 393 tx_info.buf[n].addr = addr; 394 tx_info.buf[n++].len = iter->len; 395 } 396 tx_info.nbuf = n; 397 398 dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size, 399 DMA_TO_DEVICE); 400 ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info); 401 dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size, 402 DMA_TO_DEVICE); 403 if (ret < 0) 404 goto unmap; 405 406 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { 407 ret = -ENOMEM; 408 goto unmap; 409 } 410 411 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, 412 tx_info.info, tx_info.skb, t); 413 414 unmap: 415 for (n--; n > 0; n--) 416 dma_unmap_single(dev->dev, tx_info.buf[n].addr, 417 tx_info.buf[n].len, DMA_TO_DEVICE); 418 419 free: 420 #ifdef CONFIG_NL80211_TESTMODE 421 /* fix tx_done accounting on queue overflow */ 422 if (tx_info.skb == dev->test.tx_skb) 423 dev->test.tx_done--; 424 #endif 425 426 e.skb = tx_info.skb; 427 e.txwi = t; 428 dev->drv->tx_complete_skb(dev, &e); 429 mt76_put_txwi(dev, t); 430 return ret; 431 } 432 433 static int 434 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) 435 { 436 dma_addr_t addr; 437 void *buf; 438 int frames = 0; 439 int len = SKB_WITH_OVERHEAD(q->buf_size); 440 int offset = q->buf_offset; 441 442 spin_lock_bh(&q->lock); 443 444 while (q->queued < q->ndesc - 1) { 445 struct mt76_queue_buf qbuf; 446 447 buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); 448 if (!buf) 449 break; 450 451 addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE); 452 if (unlikely(dma_mapping_error(dev->dev, addr))) { 453 skb_free_frag(buf); 454 break; 455 } 456 457 qbuf.addr = addr + offset; 458 qbuf.len = len - offset; 459 mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL); 460 frames++; 461 } 462 463 if (frames) 464 mt76_dma_kick_queue(dev, q); 465 466 spin_unlock_bh(&q->lock); 467 468 return frames; 469 } 470 471 static void 472 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) 473 { 474 struct page *page; 475 void *buf; 476 bool more; 477 478 spin_lock_bh(&q->lock); 479 do { 480 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more); 481 if (!buf) 482 break; 483 484 skb_free_frag(buf); 485 } while (1); 486 spin_unlock_bh(&q->lock); 487 488 if (!q->rx_page.va) 489 return; 490 491 page = virt_to_page(q->rx_page.va); 492 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); 493 memset(&q->rx_page, 0, sizeof(q->rx_page)); 494 } 495 496 static void 497 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) 498 { 499 struct mt76_queue *q = &dev->q_rx[qid]; 500 int i; 501 502 for (i = 0; i < q->ndesc; i++) 503 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); 504 505 mt76_dma_rx_cleanup(dev, q); 506 mt76_dma_sync_idx(dev, q); 507 mt76_dma_rx_fill(dev, q); 508 509 if (!q->rx_head) 510 return; 511 512 dev_kfree_skb(q->rx_head); 513 q->rx_head = NULL; 514 } 515 516 static void 517 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, 518 int len, bool more) 519 { 520 struct page *page = virt_to_head_page(data); 521 int offset = data - page_address(page); 522 struct sk_buff *skb = q->rx_head; 523 struct skb_shared_info *shinfo = skb_shinfo(skb); 524 525 if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) { 526 offset += q->buf_offset; 527 skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len, 528 q->buf_size); 529 } 530 531 if (more) 532 return; 533 534 q->rx_head = NULL; 535 dev->drv->rx_skb(dev, q - dev->q_rx, skb); 536 } 537 538 static int 539 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) 540 { 541 int len, data_len, done = 0; 542 struct sk_buff *skb; 543 unsigned char *data; 544 bool more; 545 546 while (done < budget) { 547 u32 info; 548 549 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more); 550 if (!data) 551 break; 552 553 if (q->rx_head) 554 data_len = q->buf_size; 555 else 556 data_len = SKB_WITH_OVERHEAD(q->buf_size); 557 558 if (data_len < len + q->buf_offset) { 559 dev_kfree_skb(q->rx_head); 560 q->rx_head = NULL; 561 562 skb_free_frag(data); 563 continue; 564 } 565 566 if (q->rx_head) { 567 mt76_add_fragment(dev, q, data, len, more); 568 continue; 569 } 570 571 skb = build_skb(data, q->buf_size); 572 if (!skb) { 573 skb_free_frag(data); 574 continue; 575 } 576 skb_reserve(skb, q->buf_offset); 577 578 if (q == &dev->q_rx[MT_RXQ_MCU]) { 579 u32 *rxfce = (u32 *)skb->cb; 580 *rxfce = info; 581 } 582 583 __skb_put(skb, len); 584 done++; 585 586 if (more) { 587 q->rx_head = skb; 588 continue; 589 } 590 591 dev->drv->rx_skb(dev, q - dev->q_rx, skb); 592 } 593 594 mt76_dma_rx_fill(dev, q); 595 return done; 596 } 597 598 static int 599 mt76_dma_rx_poll(struct napi_struct *napi, int budget) 600 { 601 struct mt76_dev *dev; 602 int qid, done = 0, cur; 603 604 dev = container_of(napi->dev, struct mt76_dev, napi_dev); 605 qid = napi - dev->napi; 606 607 local_bh_disable(); 608 rcu_read_lock(); 609 610 do { 611 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done); 612 mt76_rx_poll_complete(dev, qid, napi); 613 done += cur; 614 } while (cur && done < budget); 615 616 rcu_read_unlock(); 617 local_bh_enable(); 618 619 if (done < budget && napi_complete(napi)) 620 dev->drv->rx_poll_complete(dev, qid); 621 622 return done; 623 } 624 625 static int 626 mt76_dma_init(struct mt76_dev *dev) 627 { 628 int i; 629 630 init_dummy_netdev(&dev->napi_dev); 631 632 mt76_for_each_q_rx(dev, i) { 633 netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll, 634 64); 635 mt76_dma_rx_fill(dev, &dev->q_rx[i]); 636 napi_enable(&dev->napi[i]); 637 } 638 639 return 0; 640 } 641 642 static const struct mt76_queue_ops mt76_dma_ops = { 643 .init = mt76_dma_init, 644 .alloc = mt76_dma_alloc_queue, 645 .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw, 646 .tx_queue_skb = mt76_dma_tx_queue_skb, 647 .tx_cleanup = mt76_dma_tx_cleanup, 648 .rx_reset = mt76_dma_rx_reset, 649 .kick = mt76_dma_kick_queue, 650 }; 651 652 void mt76_dma_attach(struct mt76_dev *dev) 653 { 654 dev->queue_ops = &mt76_dma_ops; 655 } 656 EXPORT_SYMBOL_GPL(mt76_dma_attach); 657 658 void mt76_dma_cleanup(struct mt76_dev *dev) 659 { 660 int i; 661 662 mt76_worker_disable(&dev->tx_worker); 663 netif_napi_del(&dev->tx_napi); 664 for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) 665 mt76_dma_tx_cleanup(dev, i, true); 666 667 mt76_for_each_q_rx(dev, i) { 668 netif_napi_del(&dev->napi[i]); 669 mt76_dma_rx_cleanup(dev, &dev->q_rx[i]); 670 } 671 672 mt76_free_pending_txwi(dev); 673 } 674 EXPORT_SYMBOL_GPL(mt76_dma_cleanup); 675