1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2020 MediaTek Inc. 3 * 4 * This file is written based on mt76/usb.c. 5 * 6 * Author: Felix Fietkau <nbd@nbd.name> 7 * Lorenzo Bianconi <lorenzo@kernel.org> 8 * Sean Wang <sean.wang@mediatek.com> 9 */ 10 11 #include <linux/iopoll.h> 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/mmc/sdio_func.h> 15 #include <linux/sched.h> 16 #include <linux/kthread.h> 17 18 #include "mt76.h" 19 #include "sdio.h" 20 21 static u32 mt76s_read_whisr(struct mt76_dev *dev) 22 { 23 return sdio_readl(dev->sdio.func, MCR_WHISR, NULL); 24 } 25 26 u32 mt76s_read_pcr(struct mt76_dev *dev) 27 { 28 struct mt76_sdio *sdio = &dev->sdio; 29 30 return sdio_readl(sdio->func, MCR_WHLPCR, NULL); 31 } 32 EXPORT_SYMBOL_GPL(mt76s_read_pcr); 33 34 static u32 mt76s_read_mailbox(struct mt76_dev *dev, u32 offset) 35 { 36 struct sdio_func *func = dev->sdio.func; 37 u32 val = ~0, status; 38 int err; 39 40 sdio_claim_host(func); 41 42 sdio_writel(func, offset, MCR_H2DSM0R, &err); 43 if (err < 0) { 44 dev_err(dev->dev, "failed setting address [err=%d]\n", err); 45 goto out; 46 } 47 48 sdio_writel(func, H2D_SW_INT_READ, MCR_WSICR, &err); 49 if (err < 0) { 50 dev_err(dev->dev, "failed setting read mode [err=%d]\n", err); 51 goto out; 52 } 53 54 err = readx_poll_timeout(mt76s_read_whisr, dev, status, 55 status & H2D_SW_INT_READ, 0, 1000000); 56 if (err < 0) { 57 dev_err(dev->dev, "query whisr timeout\n"); 58 goto out; 59 } 60 61 sdio_writel(func, H2D_SW_INT_READ, MCR_WHISR, &err); 62 if (err < 0) { 63 dev_err(dev->dev, "failed setting read mode [err=%d]\n", err); 64 goto out; 65 } 66 67 val = sdio_readl(func, MCR_H2DSM0R, &err); 68 if (err < 0) { 69 dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err); 70 goto out; 71 } 72 73 if (val != offset) { 74 dev_err(dev->dev, "register mismatch\n"); 75 val = ~0; 76 goto out; 77 } 78 79 val = sdio_readl(func, MCR_D2HRM1R, &err); 80 if (err < 0) 81 dev_err(dev->dev, "failed reading d2hrm1r [err=%d]\n", err); 82 83 out: 84 sdio_release_host(func); 85 86 return val; 87 } 88 89 static void mt76s_write_mailbox(struct mt76_dev *dev, u32 offset, u32 val) 90 { 91 struct sdio_func *func = dev->sdio.func; 92 u32 status; 93 int err; 94 95 sdio_claim_host(func); 96 97 sdio_writel(func, offset, MCR_H2DSM0R, &err); 98 if (err < 0) { 99 dev_err(dev->dev, "failed setting address [err=%d]\n", err); 100 goto out; 101 } 102 103 sdio_writel(func, val, MCR_H2DSM1R, &err); 104 if (err < 0) { 105 dev_err(dev->dev, 106 "failed setting write value [err=%d]\n", err); 107 goto out; 108 } 109 110 sdio_writel(func, H2D_SW_INT_WRITE, MCR_WSICR, &err); 111 if (err < 0) { 112 dev_err(dev->dev, "failed setting write mode [err=%d]\n", err); 113 goto out; 114 } 115 116 err = readx_poll_timeout(mt76s_read_whisr, dev, status, 117 status & H2D_SW_INT_WRITE, 0, 1000000); 118 if (err < 0) { 119 dev_err(dev->dev, "query whisr timeout\n"); 120 goto out; 121 } 122 123 sdio_writel(func, H2D_SW_INT_WRITE, MCR_WHISR, &err); 124 if (err < 0) { 125 dev_err(dev->dev, "failed setting write mode [err=%d]\n", err); 126 goto out; 127 } 128 129 val = sdio_readl(func, MCR_H2DSM0R, &err); 130 if (err < 0) { 131 dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err); 132 goto out; 133 } 134 135 if (val != offset) 136 dev_err(dev->dev, "register mismatch\n"); 137 138 out: 139 sdio_release_host(func); 140 } 141 142 u32 mt76s_rr(struct mt76_dev *dev, u32 offset) 143 { 144 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) 145 return dev->mcu_ops->mcu_rr(dev, offset); 146 else 147 return mt76s_read_mailbox(dev, offset); 148 } 149 EXPORT_SYMBOL_GPL(mt76s_rr); 150 151 void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val) 152 { 153 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) 154 dev->mcu_ops->mcu_wr(dev, offset, val); 155 else 156 mt76s_write_mailbox(dev, offset, val); 157 } 158 EXPORT_SYMBOL_GPL(mt76s_wr); 159 160 u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val) 161 { 162 val |= mt76s_rr(dev, offset) & ~mask; 163 mt76s_wr(dev, offset, val); 164 165 return val; 166 } 167 EXPORT_SYMBOL_GPL(mt76s_rmw); 168 169 void mt76s_write_copy(struct mt76_dev *dev, u32 offset, 170 const void *data, int len) 171 { 172 const u32 *val = data; 173 int i; 174 175 for (i = 0; i < len / sizeof(u32); i++) { 176 mt76s_wr(dev, offset, val[i]); 177 offset += sizeof(u32); 178 } 179 } 180 EXPORT_SYMBOL_GPL(mt76s_write_copy); 181 182 void mt76s_read_copy(struct mt76_dev *dev, u32 offset, 183 void *data, int len) 184 { 185 u32 *val = data; 186 int i; 187 188 for (i = 0; i < len / sizeof(u32); i++) { 189 val[i] = mt76s_rr(dev, offset); 190 offset += sizeof(u32); 191 } 192 } 193 EXPORT_SYMBOL_GPL(mt76s_read_copy); 194 195 int mt76s_wr_rp(struct mt76_dev *dev, u32 base, 196 const struct mt76_reg_pair *data, 197 int len) 198 { 199 int i; 200 201 for (i = 0; i < len; i++) { 202 mt76s_wr(dev, data->reg, data->value); 203 data++; 204 } 205 206 return 0; 207 } 208 EXPORT_SYMBOL_GPL(mt76s_wr_rp); 209 210 int mt76s_rd_rp(struct mt76_dev *dev, u32 base, 211 struct mt76_reg_pair *data, int len) 212 { 213 int i; 214 215 for (i = 0; i < len; i++) { 216 data->value = mt76s_rr(dev, data->reg); 217 data++; 218 } 219 220 return 0; 221 } 222 EXPORT_SYMBOL_GPL(mt76s_rd_rp); 223 224 int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func, int hw_ver) 225 { 226 u32 status, ctrl; 227 int ret; 228 229 dev->sdio.hw_ver = hw_ver; 230 231 sdio_claim_host(func); 232 233 ret = sdio_enable_func(func); 234 if (ret < 0) 235 goto release; 236 237 /* Get ownership from the device */ 238 sdio_writel(func, WHLPCR_INT_EN_CLR | WHLPCR_FW_OWN_REQ_CLR, 239 MCR_WHLPCR, &ret); 240 if (ret < 0) 241 goto disable_func; 242 243 ret = readx_poll_timeout(mt76s_read_pcr, dev, status, 244 status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000); 245 if (ret < 0) { 246 dev_err(dev->dev, "Cannot get ownership from device"); 247 goto disable_func; 248 } 249 250 ret = sdio_set_block_size(func, 512); 251 if (ret < 0) 252 goto disable_func; 253 254 /* Enable interrupt */ 255 sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, &ret); 256 if (ret < 0) 257 goto disable_func; 258 259 ctrl = WHIER_RX0_DONE_INT_EN | WHIER_TX_DONE_INT_EN; 260 if (hw_ver == MT76_CONNAC2_SDIO) 261 ctrl |= WHIER_RX1_DONE_INT_EN; 262 sdio_writel(func, ctrl, MCR_WHIER, &ret); 263 if (ret < 0) 264 goto disable_func; 265 266 switch (hw_ver) { 267 case MT76_CONNAC_SDIO: 268 /* set WHISR as read clear and Rx aggregation number as 16 */ 269 ctrl = FIELD_PREP(MAX_HIF_RX_LEN_NUM, 16); 270 break; 271 default: 272 ctrl = sdio_readl(func, MCR_WHCR, &ret); 273 if (ret < 0) 274 goto disable_func; 275 ctrl &= ~MAX_HIF_RX_LEN_NUM_CONNAC2; 276 ctrl &= ~W_INT_CLR_CTRL; /* read clear */ 277 ctrl |= FIELD_PREP(MAX_HIF_RX_LEN_NUM_CONNAC2, 0); 278 break; 279 } 280 281 sdio_writel(func, ctrl, MCR_WHCR, &ret); 282 if (ret < 0) 283 goto disable_func; 284 285 ret = sdio_claim_irq(func, mt76s_sdio_irq); 286 if (ret < 0) 287 goto disable_func; 288 289 sdio_release_host(func); 290 291 return 0; 292 293 disable_func: 294 sdio_disable_func(func); 295 release: 296 sdio_release_host(func); 297 298 return ret; 299 } 300 EXPORT_SYMBOL_GPL(mt76s_hw_init); 301 302 int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid) 303 { 304 struct mt76_queue *q = &dev->q_rx[qid]; 305 306 spin_lock_init(&q->lock); 307 q->entry = devm_kcalloc(dev->dev, 308 MT76S_NUM_RX_ENTRIES, sizeof(*q->entry), 309 GFP_KERNEL); 310 if (!q->entry) 311 return -ENOMEM; 312 313 q->ndesc = MT76S_NUM_RX_ENTRIES; 314 q->head = q->tail = 0; 315 q->queued = 0; 316 317 return 0; 318 } 319 EXPORT_SYMBOL_GPL(mt76s_alloc_rx_queue); 320 321 static struct mt76_queue *mt76s_alloc_tx_queue(struct mt76_dev *dev) 322 { 323 struct mt76_queue *q; 324 325 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); 326 if (!q) 327 return ERR_PTR(-ENOMEM); 328 329 spin_lock_init(&q->lock); 330 q->entry = devm_kcalloc(dev->dev, 331 MT76S_NUM_TX_ENTRIES, sizeof(*q->entry), 332 GFP_KERNEL); 333 if (!q->entry) 334 return ERR_PTR(-ENOMEM); 335 336 q->ndesc = MT76S_NUM_TX_ENTRIES; 337 338 return q; 339 } 340 341 int mt76s_alloc_tx(struct mt76_dev *dev) 342 { 343 struct mt76_queue *q; 344 int i; 345 346 for (i = 0; i <= MT_TXQ_PSD; i++) { 347 q = mt76s_alloc_tx_queue(dev); 348 if (IS_ERR(q)) 349 return PTR_ERR(q); 350 351 q->qid = i; 352 dev->phy.q_tx[i] = q; 353 } 354 355 q = mt76s_alloc_tx_queue(dev); 356 if (IS_ERR(q)) 357 return PTR_ERR(q); 358 359 q->qid = MT_MCUQ_WM; 360 dev->q_mcu[MT_MCUQ_WM] = q; 361 362 return 0; 363 } 364 EXPORT_SYMBOL_GPL(mt76s_alloc_tx); 365 366 static struct mt76_queue_entry * 367 mt76s_get_next_rx_entry(struct mt76_queue *q) 368 { 369 struct mt76_queue_entry *e = NULL; 370 371 spin_lock_bh(&q->lock); 372 if (q->queued > 0) { 373 e = &q->entry[q->tail]; 374 q->tail = (q->tail + 1) % q->ndesc; 375 q->queued--; 376 } 377 spin_unlock_bh(&q->lock); 378 379 return e; 380 } 381 382 static int 383 mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) 384 { 385 int qid = q - &dev->q_rx[MT_RXQ_MAIN]; 386 int nframes = 0; 387 388 while (true) { 389 struct mt76_queue_entry *e; 390 391 if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state)) 392 break; 393 394 e = mt76s_get_next_rx_entry(q); 395 if (!e || !e->skb) 396 break; 397 398 dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb); 399 e->skb = NULL; 400 nframes++; 401 } 402 if (qid == MT_RXQ_MAIN) 403 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL); 404 405 return nframes; 406 } 407 408 static void mt76s_net_worker(struct mt76_worker *w) 409 { 410 struct mt76_sdio *sdio = container_of(w, struct mt76_sdio, 411 net_worker); 412 struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio); 413 int i, nframes; 414 415 do { 416 nframes = 0; 417 418 local_bh_disable(); 419 rcu_read_lock(); 420 421 mt76_for_each_q_rx(dev, i) 422 nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]); 423 424 rcu_read_unlock(); 425 local_bh_enable(); 426 } while (nframes > 0); 427 } 428 429 static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q) 430 { 431 struct mt76_queue_entry entry; 432 int nframes = 0; 433 bool mcu; 434 435 if (!q) 436 return 0; 437 438 mcu = q == dev->q_mcu[MT_MCUQ_WM]; 439 while (q->queued > 0) { 440 if (!q->entry[q->tail].done) 441 break; 442 443 entry = q->entry[q->tail]; 444 q->entry[q->tail].done = false; 445 446 if (mcu) { 447 dev_kfree_skb(entry.skb); 448 entry.skb = NULL; 449 } 450 451 mt76_queue_tx_complete(dev, q, &entry); 452 nframes++; 453 } 454 455 if (!q->queued) 456 wake_up(&dev->tx_wait); 457 458 return nframes; 459 } 460 461 static void mt76s_status_worker(struct mt76_worker *w) 462 { 463 struct mt76_sdio *sdio = container_of(w, struct mt76_sdio, 464 status_worker); 465 struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio); 466 bool resched = false; 467 int i, nframes; 468 469 do { 470 int ndata_frames = 0; 471 472 nframes = mt76s_process_tx_queue(dev, dev->q_mcu[MT_MCUQ_WM]); 473 474 for (i = 0; i <= MT_TXQ_PSD; i++) 475 ndata_frames += mt76s_process_tx_queue(dev, 476 dev->phy.q_tx[i]); 477 nframes += ndata_frames; 478 if (ndata_frames > 0) 479 resched = true; 480 481 if (dev->drv->tx_status_data && 482 !test_and_set_bit(MT76_READING_STATS, &dev->phy.state) && 483 !test_bit(MT76_STATE_SUSPEND, &dev->phy.state)) 484 queue_work(dev->wq, &dev->sdio.stat_work); 485 } while (nframes > 0); 486 487 if (resched) 488 mt76_worker_schedule(&dev->sdio.txrx_worker); 489 } 490 491 static void mt76s_tx_status_data(struct work_struct *work) 492 { 493 struct mt76_sdio *sdio; 494 struct mt76_dev *dev; 495 u8 update = 1; 496 u16 count = 0; 497 498 sdio = container_of(work, struct mt76_sdio, stat_work); 499 dev = container_of(sdio, struct mt76_dev, sdio); 500 501 while (true) { 502 if (test_bit(MT76_REMOVED, &dev->phy.state)) 503 break; 504 505 if (!dev->drv->tx_status_data(dev, &update)) 506 break; 507 count++; 508 } 509 510 if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state)) 511 queue_work(dev->wq, &sdio->stat_work); 512 else 513 clear_bit(MT76_READING_STATS, &dev->phy.state); 514 } 515 516 static int 517 mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, 518 struct sk_buff *skb, struct mt76_wcid *wcid, 519 struct ieee80211_sta *sta) 520 { 521 struct mt76_tx_info tx_info = { 522 .skb = skb, 523 }; 524 int err, len = skb->len; 525 u16 idx = q->head; 526 527 if (q->queued == q->ndesc) 528 return -ENOSPC; 529 530 skb->prev = skb->next = NULL; 531 err = dev->drv->tx_prepare_skb(dev, NULL, q->qid, wcid, sta, &tx_info); 532 if (err < 0) 533 return err; 534 535 q->entry[q->head].skb = tx_info.skb; 536 q->entry[q->head].buf_sz = len; 537 q->entry[q->head].wcid = 0xffff; 538 539 smp_wmb(); 540 541 q->head = (q->head + 1) % q->ndesc; 542 q->queued++; 543 544 return idx; 545 } 546 547 static int 548 mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, 549 struct sk_buff *skb, u32 tx_info) 550 { 551 int ret = -ENOSPC, len = skb->len, pad; 552 553 if (q->queued == q->ndesc) 554 goto error; 555 556 pad = round_up(skb->len, 4) - skb->len; 557 ret = mt76_skb_adjust_pad(skb, pad); 558 if (ret) 559 goto error; 560 561 spin_lock_bh(&q->lock); 562 563 q->entry[q->head].buf_sz = len; 564 q->entry[q->head].skb = skb; 565 q->head = (q->head + 1) % q->ndesc; 566 q->queued++; 567 568 spin_unlock_bh(&q->lock); 569 570 return 0; 571 572 error: 573 dev_kfree_skb(skb); 574 575 return ret; 576 } 577 578 static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) 579 { 580 struct mt76_sdio *sdio = &dev->sdio; 581 582 mt76_worker_schedule(&sdio->txrx_worker); 583 } 584 585 static const struct mt76_queue_ops sdio_queue_ops = { 586 .tx_queue_skb = mt76s_tx_queue_skb, 587 .kick = mt76s_tx_kick, 588 .tx_queue_skb_raw = mt76s_tx_queue_skb_raw, 589 }; 590 591 void mt76s_deinit(struct mt76_dev *dev) 592 { 593 struct mt76_sdio *sdio = &dev->sdio; 594 int i; 595 596 mt76_worker_teardown(&sdio->txrx_worker); 597 mt76_worker_teardown(&sdio->status_worker); 598 mt76_worker_teardown(&sdio->net_worker); 599 600 cancel_work_sync(&sdio->stat_work); 601 clear_bit(MT76_READING_STATS, &dev->phy.state); 602 603 mt76_tx_status_check(dev, true); 604 605 sdio_claim_host(sdio->func); 606 sdio_release_irq(sdio->func); 607 sdio_release_host(sdio->func); 608 609 mt76_for_each_q_rx(dev, i) { 610 struct mt76_queue *q = &dev->q_rx[i]; 611 int j; 612 613 for (j = 0; j < q->ndesc; j++) { 614 struct mt76_queue_entry *e = &q->entry[j]; 615 616 if (!e->skb) 617 continue; 618 619 dev_kfree_skb(e->skb); 620 e->skb = NULL; 621 } 622 } 623 } 624 EXPORT_SYMBOL_GPL(mt76s_deinit); 625 626 int mt76s_init(struct mt76_dev *dev, struct sdio_func *func, 627 const struct mt76_bus_ops *bus_ops) 628 { 629 struct mt76_sdio *sdio = &dev->sdio; 630 int err; 631 632 err = mt76_worker_setup(dev->hw, &sdio->status_worker, 633 mt76s_status_worker, "sdio-status"); 634 if (err) 635 return err; 636 637 err = mt76_worker_setup(dev->hw, &sdio->net_worker, mt76s_net_worker, 638 "sdio-net"); 639 if (err) 640 return err; 641 642 sched_set_fifo_low(sdio->status_worker.task); 643 sched_set_fifo_low(sdio->net_worker.task); 644 645 INIT_WORK(&sdio->stat_work, mt76s_tx_status_data); 646 647 dev->queue_ops = &sdio_queue_ops; 648 dev->bus = bus_ops; 649 dev->sdio.func = func; 650 651 return 0; 652 } 653 EXPORT_SYMBOL_GPL(mt76s_init); 654 655 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); 656 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); 657 MODULE_LICENSE("Dual BSD/GPL"); 658