1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2020 MediaTek Inc. 3 * 4 * This file is written based on mt76/usb.c. 5 * 6 * Author: Felix Fietkau <nbd@nbd.name> 7 * Lorenzo Bianconi <lorenzo@kernel.org> 8 * Sean Wang <sean.wang@mediatek.com> 9 */ 10 11 #include <linux/iopoll.h> 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/mmc/sdio_func.h> 15 #include <linux/mmc/card.h> 16 #include <linux/mmc/host.h> 17 #include <linux/sched.h> 18 #include <linux/kthread.h> 19 20 #include "mt76.h" 21 #include "sdio.h" 22 23 static u32 mt76s_read_whisr(struct mt76_dev *dev) 24 { 25 return sdio_readl(dev->sdio.func, MCR_WHISR, NULL); 26 } 27 28 u32 mt76s_read_pcr(struct mt76_dev *dev) 29 { 30 struct mt76_sdio *sdio = &dev->sdio; 31 32 return sdio_readl(sdio->func, MCR_WHLPCR, NULL); 33 } 34 EXPORT_SYMBOL_GPL(mt76s_read_pcr); 35 36 static u32 mt76s_read_mailbox(struct mt76_dev *dev, u32 offset) 37 { 38 struct sdio_func *func = dev->sdio.func; 39 u32 val = ~0, status; 40 int err; 41 42 sdio_claim_host(func); 43 44 sdio_writel(func, offset, MCR_H2DSM0R, &err); 45 if (err < 0) { 46 dev_err(dev->dev, "failed setting address [err=%d]\n", err); 47 goto out; 48 } 49 50 sdio_writel(func, H2D_SW_INT_READ, MCR_WSICR, &err); 51 if (err < 0) { 52 dev_err(dev->dev, "failed setting read mode [err=%d]\n", err); 53 goto out; 54 } 55 56 err = readx_poll_timeout(mt76s_read_whisr, dev, status, 57 status & H2D_SW_INT_READ, 0, 1000000); 58 if (err < 0) { 59 dev_err(dev->dev, "query whisr timeout\n"); 60 goto out; 61 } 62 63 sdio_writel(func, H2D_SW_INT_READ, MCR_WHISR, &err); 64 if (err < 0) { 65 dev_err(dev->dev, "failed setting read mode [err=%d]\n", err); 66 goto out; 67 } 68 69 val = sdio_readl(func, MCR_H2DSM0R, &err); 70 if (err < 0) { 71 dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err); 72 goto out; 73 } 74 75 if (val != offset) { 76 dev_err(dev->dev, "register mismatch\n"); 77 val = ~0; 78 goto out; 79 } 80 81 val = sdio_readl(func, MCR_D2HRM1R, &err); 82 if (err < 0) 83 dev_err(dev->dev, "failed reading d2hrm1r [err=%d]\n", err); 84 85 out: 86 sdio_release_host(func); 87 88 return val; 89 } 90 91 static void mt76s_write_mailbox(struct mt76_dev *dev, u32 offset, u32 val) 92 { 93 struct sdio_func *func = dev->sdio.func; 94 u32 status; 95 int err; 96 97 sdio_claim_host(func); 98 99 sdio_writel(func, offset, MCR_H2DSM0R, &err); 100 if (err < 0) { 101 dev_err(dev->dev, "failed setting address [err=%d]\n", err); 102 goto out; 103 } 104 105 sdio_writel(func, val, MCR_H2DSM1R, &err); 106 if (err < 0) { 107 dev_err(dev->dev, 108 "failed setting write value [err=%d]\n", err); 109 goto out; 110 } 111 112 sdio_writel(func, H2D_SW_INT_WRITE, MCR_WSICR, &err); 113 if (err < 0) { 114 dev_err(dev->dev, "failed setting write mode [err=%d]\n", err); 115 goto out; 116 } 117 118 err = readx_poll_timeout(mt76s_read_whisr, dev, status, 119 status & H2D_SW_INT_WRITE, 0, 1000000); 120 if (err < 0) { 121 dev_err(dev->dev, "query whisr timeout\n"); 122 goto out; 123 } 124 125 sdio_writel(func, H2D_SW_INT_WRITE, MCR_WHISR, &err); 126 if (err < 0) { 127 dev_err(dev->dev, "failed setting write mode [err=%d]\n", err); 128 goto out; 129 } 130 131 val = sdio_readl(func, MCR_H2DSM0R, &err); 132 if (err < 0) { 133 dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err); 134 goto out; 135 } 136 137 if (val != offset) 138 dev_err(dev->dev, "register mismatch\n"); 139 140 out: 141 sdio_release_host(func); 142 } 143 144 u32 mt76s_rr(struct mt76_dev *dev, u32 offset) 145 { 146 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) 147 return dev->mcu_ops->mcu_rr(dev, offset); 148 else 149 return mt76s_read_mailbox(dev, offset); 150 } 151 EXPORT_SYMBOL_GPL(mt76s_rr); 152 153 void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val) 154 { 155 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) 156 dev->mcu_ops->mcu_wr(dev, offset, val); 157 else 158 mt76s_write_mailbox(dev, offset, val); 159 } 160 EXPORT_SYMBOL_GPL(mt76s_wr); 161 162 u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val) 163 { 164 val |= mt76s_rr(dev, offset) & ~mask; 165 mt76s_wr(dev, offset, val); 166 167 return val; 168 } 169 EXPORT_SYMBOL_GPL(mt76s_rmw); 170 171 void mt76s_write_copy(struct mt76_dev *dev, u32 offset, 172 const void *data, int len) 173 { 174 const u32 *val = data; 175 int i; 176 177 for (i = 0; i < len / sizeof(u32); i++) { 178 mt76s_wr(dev, offset, val[i]); 179 offset += sizeof(u32); 180 } 181 } 182 EXPORT_SYMBOL_GPL(mt76s_write_copy); 183 184 void mt76s_read_copy(struct mt76_dev *dev, u32 offset, 185 void *data, int len) 186 { 187 u32 *val = data; 188 int i; 189 190 for (i = 0; i < len / sizeof(u32); i++) { 191 val[i] = mt76s_rr(dev, offset); 192 offset += sizeof(u32); 193 } 194 } 195 EXPORT_SYMBOL_GPL(mt76s_read_copy); 196 197 int mt76s_wr_rp(struct mt76_dev *dev, u32 base, 198 const struct mt76_reg_pair *data, 199 int len) 200 { 201 int i; 202 203 for (i = 0; i < len; i++) { 204 mt76s_wr(dev, data->reg, data->value); 205 data++; 206 } 207 208 return 0; 209 } 210 EXPORT_SYMBOL_GPL(mt76s_wr_rp); 211 212 int mt76s_rd_rp(struct mt76_dev *dev, u32 base, 213 struct mt76_reg_pair *data, int len) 214 { 215 int i; 216 217 for (i = 0; i < len; i++) { 218 data->value = mt76s_rr(dev, data->reg); 219 data++; 220 } 221 222 return 0; 223 } 224 EXPORT_SYMBOL_GPL(mt76s_rd_rp); 225 226 int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func, int hw_ver) 227 { 228 u32 status, ctrl; 229 int ret; 230 231 dev->sdio.hw_ver = hw_ver; 232 233 sdio_claim_host(func); 234 235 ret = sdio_enable_func(func); 236 if (ret < 0) 237 goto release; 238 239 /* Get ownership from the device */ 240 sdio_writel(func, WHLPCR_INT_EN_CLR | WHLPCR_FW_OWN_REQ_CLR, 241 MCR_WHLPCR, &ret); 242 if (ret < 0) 243 goto disable_func; 244 245 ret = readx_poll_timeout(mt76s_read_pcr, dev, status, 246 status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000); 247 if (ret < 0) { 248 dev_err(dev->dev, "Cannot get ownership from device"); 249 goto disable_func; 250 } 251 252 ret = sdio_set_block_size(func, 512); 253 if (ret < 0) 254 goto disable_func; 255 256 /* Enable interrupt */ 257 sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, &ret); 258 if (ret < 0) 259 goto disable_func; 260 261 ctrl = WHIER_RX0_DONE_INT_EN | WHIER_TX_DONE_INT_EN; 262 if (hw_ver == MT76_CONNAC2_SDIO) 263 ctrl |= WHIER_RX1_DONE_INT_EN; 264 sdio_writel(func, ctrl, MCR_WHIER, &ret); 265 if (ret < 0) 266 goto disable_func; 267 268 switch (hw_ver) { 269 case MT76_CONNAC_SDIO: 270 /* set WHISR as read clear and Rx aggregation number as 16 */ 271 ctrl = FIELD_PREP(MAX_HIF_RX_LEN_NUM, 16); 272 break; 273 default: 274 ctrl = sdio_readl(func, MCR_WHCR, &ret); 275 if (ret < 0) 276 goto disable_func; 277 ctrl &= ~MAX_HIF_RX_LEN_NUM_CONNAC2; 278 ctrl &= ~W_INT_CLR_CTRL; /* read clear */ 279 ctrl |= FIELD_PREP(MAX_HIF_RX_LEN_NUM_CONNAC2, 0); 280 break; 281 } 282 283 sdio_writel(func, ctrl, MCR_WHCR, &ret); 284 if (ret < 0) 285 goto disable_func; 286 287 ret = sdio_claim_irq(func, mt76s_sdio_irq); 288 if (ret < 0) 289 goto disable_func; 290 291 sdio_release_host(func); 292 293 return 0; 294 295 disable_func: 296 sdio_disable_func(func); 297 release: 298 sdio_release_host(func); 299 300 return ret; 301 } 302 EXPORT_SYMBOL_GPL(mt76s_hw_init); 303 304 int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid) 305 { 306 struct mt76_queue *q = &dev->q_rx[qid]; 307 308 spin_lock_init(&q->lock); 309 q->entry = devm_kcalloc(dev->dev, 310 MT76S_NUM_RX_ENTRIES, sizeof(*q->entry), 311 GFP_KERNEL); 312 if (!q->entry) 313 return -ENOMEM; 314 315 q->ndesc = MT76S_NUM_RX_ENTRIES; 316 q->head = q->tail = 0; 317 q->queued = 0; 318 319 return 0; 320 } 321 EXPORT_SYMBOL_GPL(mt76s_alloc_rx_queue); 322 323 static struct mt76_queue *mt76s_alloc_tx_queue(struct mt76_dev *dev) 324 { 325 struct mt76_queue *q; 326 327 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); 328 if (!q) 329 return ERR_PTR(-ENOMEM); 330 331 spin_lock_init(&q->lock); 332 q->entry = devm_kcalloc(dev->dev, 333 MT76S_NUM_TX_ENTRIES, sizeof(*q->entry), 334 GFP_KERNEL); 335 if (!q->entry) 336 return ERR_PTR(-ENOMEM); 337 338 q->ndesc = MT76S_NUM_TX_ENTRIES; 339 340 return q; 341 } 342 343 int mt76s_alloc_tx(struct mt76_dev *dev) 344 { 345 struct mt76_queue *q; 346 int i; 347 348 for (i = 0; i <= MT_TXQ_PSD; i++) { 349 q = mt76s_alloc_tx_queue(dev); 350 if (IS_ERR(q)) 351 return PTR_ERR(q); 352 353 dev->phy.q_tx[i] = q; 354 } 355 356 q = mt76s_alloc_tx_queue(dev); 357 if (IS_ERR(q)) 358 return PTR_ERR(q); 359 360 dev->q_mcu[MT_MCUQ_WM] = q; 361 362 return 0; 363 } 364 EXPORT_SYMBOL_GPL(mt76s_alloc_tx); 365 366 static struct mt76_queue_entry * 367 mt76s_get_next_rx_entry(struct mt76_queue *q) 368 { 369 struct mt76_queue_entry *e = NULL; 370 371 spin_lock_bh(&q->lock); 372 if (q->queued > 0) { 373 e = &q->entry[q->tail]; 374 q->tail = (q->tail + 1) % q->ndesc; 375 q->queued--; 376 } 377 spin_unlock_bh(&q->lock); 378 379 return e; 380 } 381 382 static int 383 mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) 384 { 385 int qid = q - &dev->q_rx[MT_RXQ_MAIN]; 386 int nframes = 0; 387 388 while (true) { 389 struct mt76_queue_entry *e; 390 391 if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state)) 392 break; 393 394 e = mt76s_get_next_rx_entry(q); 395 if (!e || !e->skb) 396 break; 397 398 dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb, NULL); 399 e->skb = NULL; 400 nframes++; 401 } 402 if (qid == MT_RXQ_MAIN) 403 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL); 404 405 return nframes; 406 } 407 408 static void mt76s_net_worker(struct mt76_worker *w) 409 { 410 struct mt76_sdio *sdio = container_of(w, struct mt76_sdio, 411 net_worker); 412 struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio); 413 int i, nframes; 414 415 do { 416 nframes = 0; 417 418 local_bh_disable(); 419 rcu_read_lock(); 420 421 mt76_for_each_q_rx(dev, i) 422 nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]); 423 424 rcu_read_unlock(); 425 local_bh_enable(); 426 } while (nframes > 0); 427 } 428 429 static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q) 430 { 431 struct mt76_queue_entry entry; 432 int nframes = 0; 433 bool mcu; 434 435 if (!q) 436 return 0; 437 438 mcu = q == dev->q_mcu[MT_MCUQ_WM]; 439 while (q->queued > 0) { 440 if (!q->entry[q->tail].done) 441 break; 442 443 entry = q->entry[q->tail]; 444 q->entry[q->tail].done = false; 445 446 if (mcu) { 447 dev_kfree_skb(entry.skb); 448 entry.skb = NULL; 449 } 450 451 mt76_queue_tx_complete(dev, q, &entry); 452 nframes++; 453 } 454 455 if (!q->queued) 456 wake_up(&dev->tx_wait); 457 458 return nframes; 459 } 460 461 static void mt76s_status_worker(struct mt76_worker *w) 462 { 463 struct mt76_sdio *sdio = container_of(w, struct mt76_sdio, 464 status_worker); 465 struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio); 466 bool resched = false; 467 int i, nframes; 468 469 do { 470 int ndata_frames = 0; 471 472 nframes = mt76s_process_tx_queue(dev, dev->q_mcu[MT_MCUQ_WM]); 473 474 for (i = 0; i <= MT_TXQ_PSD; i++) 475 ndata_frames += mt76s_process_tx_queue(dev, 476 dev->phy.q_tx[i]); 477 nframes += ndata_frames; 478 if (ndata_frames > 0) 479 resched = true; 480 481 if (dev->drv->tx_status_data && ndata_frames > 0 && 482 !test_and_set_bit(MT76_READING_STATS, &dev->phy.state) && 483 !test_bit(MT76_STATE_SUSPEND, &dev->phy.state)) 484 mt76_worker_schedule(&sdio->stat_worker); 485 } while (nframes > 0); 486 487 if (resched) 488 mt76_worker_schedule(&dev->tx_worker); 489 } 490 491 static void mt76s_tx_status_data(struct mt76_worker *worker) 492 { 493 struct mt76_sdio *sdio; 494 struct mt76_dev *dev; 495 u8 update = 1; 496 u16 count = 0; 497 498 sdio = container_of(worker, struct mt76_sdio, stat_worker); 499 dev = container_of(sdio, struct mt76_dev, sdio); 500 501 while (true) { 502 if (test_bit(MT76_RESET, &dev->phy.state) || 503 test_bit(MT76_REMOVED, &dev->phy.state)) 504 break; 505 506 if (!dev->drv->tx_status_data(dev, &update)) 507 break; 508 count++; 509 } 510 511 if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state)) 512 mt76_worker_schedule(&sdio->status_worker); 513 else 514 clear_bit(MT76_READING_STATS, &dev->phy.state); 515 } 516 517 static int 518 mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, 519 enum mt76_txq_id qid, struct sk_buff *skb, 520 struct mt76_wcid *wcid, struct ieee80211_sta *sta) 521 { 522 struct mt76_tx_info tx_info = { 523 .skb = skb, 524 }; 525 int err, len = skb->len; 526 u16 idx = q->head; 527 528 if (q->queued == q->ndesc) 529 return -ENOSPC; 530 531 skb->prev = skb->next = NULL; 532 err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info); 533 if (err < 0) 534 return err; 535 536 q->entry[q->head].skb = tx_info.skb; 537 q->entry[q->head].buf_sz = len; 538 q->entry[q->head].wcid = 0xffff; 539 540 smp_wmb(); 541 542 q->head = (q->head + 1) % q->ndesc; 543 q->queued++; 544 545 return idx; 546 } 547 548 static int 549 mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, 550 struct sk_buff *skb, u32 tx_info) 551 { 552 int ret = -ENOSPC, len = skb->len, pad; 553 554 if (q->queued == q->ndesc) 555 goto error; 556 557 pad = round_up(skb->len, 4) - skb->len; 558 ret = mt76_skb_adjust_pad(skb, pad); 559 if (ret) 560 goto error; 561 562 spin_lock_bh(&q->lock); 563 564 q->entry[q->head].buf_sz = len; 565 q->entry[q->head].skb = skb; 566 567 /* ensure the entry fully updated before bus access */ 568 smp_wmb(); 569 570 q->head = (q->head + 1) % q->ndesc; 571 q->queued++; 572 573 spin_unlock_bh(&q->lock); 574 575 return 0; 576 577 error: 578 dev_kfree_skb(skb); 579 580 return ret; 581 } 582 583 static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) 584 { 585 struct mt76_sdio *sdio = &dev->sdio; 586 587 mt76_worker_schedule(&sdio->txrx_worker); 588 } 589 590 static const struct mt76_queue_ops sdio_queue_ops = { 591 .tx_queue_skb = mt76s_tx_queue_skb, 592 .kick = mt76s_tx_kick, 593 .tx_queue_skb_raw = mt76s_tx_queue_skb_raw, 594 }; 595 596 void mt76s_deinit(struct mt76_dev *dev) 597 { 598 struct mt76_sdio *sdio = &dev->sdio; 599 int i; 600 601 mt76_worker_teardown(&sdio->txrx_worker); 602 mt76_worker_teardown(&sdio->status_worker); 603 mt76_worker_teardown(&sdio->net_worker); 604 mt76_worker_teardown(&sdio->stat_worker); 605 606 clear_bit(MT76_READING_STATS, &dev->phy.state); 607 608 mt76_tx_status_check(dev, true); 609 610 sdio_claim_host(sdio->func); 611 sdio_release_irq(sdio->func); 612 sdio_release_host(sdio->func); 613 614 mt76_for_each_q_rx(dev, i) { 615 struct mt76_queue *q = &dev->q_rx[i]; 616 int j; 617 618 for (j = 0; j < q->ndesc; j++) { 619 struct mt76_queue_entry *e = &q->entry[j]; 620 621 if (!e->skb) 622 continue; 623 624 dev_kfree_skb(e->skb); 625 e->skb = NULL; 626 } 627 } 628 } 629 EXPORT_SYMBOL_GPL(mt76s_deinit); 630 631 int mt76s_init(struct mt76_dev *dev, struct sdio_func *func, 632 const struct mt76_bus_ops *bus_ops) 633 { 634 struct mt76_sdio *sdio = &dev->sdio; 635 u32 host_max_cap; 636 int err; 637 638 err = mt76_worker_setup(dev->hw, &sdio->status_worker, 639 mt76s_status_worker, "sdio-status"); 640 if (err) 641 return err; 642 643 err = mt76_worker_setup(dev->hw, &sdio->net_worker, mt76s_net_worker, 644 "sdio-net"); 645 if (err) 646 return err; 647 648 err = mt76_worker_setup(dev->hw, &sdio->stat_worker, mt76s_tx_status_data, 649 "sdio-sta"); 650 if (err) 651 return err; 652 653 sched_set_fifo_low(sdio->status_worker.task); 654 sched_set_fifo_low(sdio->net_worker.task); 655 sched_set_fifo_low(sdio->stat_worker.task); 656 657 dev->queue_ops = &sdio_queue_ops; 658 dev->bus = bus_ops; 659 dev->sdio.func = func; 660 661 host_max_cap = min_t(u32, func->card->host->max_req_size, 662 func->cur_blksize * 663 func->card->host->max_blk_count); 664 dev->sdio.xmit_buf_sz = min_t(u32, host_max_cap, MT76S_XMIT_BUF_SZ); 665 dev->sdio.xmit_buf = devm_kmalloc(dev->dev, dev->sdio.xmit_buf_sz, 666 GFP_KERNEL); 667 if (!dev->sdio.xmit_buf) 668 err = -ENOMEM; 669 670 return err; 671 } 672 EXPORT_SYMBOL_GPL(mt76s_init); 673 674 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); 675 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); 676 MODULE_LICENSE("Dual BSD/GPL"); 677