1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2019 MediaTek Inc. 3 4 /* 5 * Bluetooth support for MediaTek SDIO devices 6 * 7 * This file is written based on btsdio.c and btmtkuart.c. 8 * 9 * Author: Sean Wang <sean.wang@mediatek.com> 10 * 11 */ 12 13 #include <asm/unaligned.h> 14 #include <linux/atomic.h> 15 #include <linux/init.h> 16 #include <linux/iopoll.h> 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/pm_runtime.h> 20 #include <linux/skbuff.h> 21 22 #include <linux/mmc/host.h> 23 #include <linux/mmc/sdio_ids.h> 24 #include <linux/mmc/sdio_func.h> 25 26 #include <net/bluetooth/bluetooth.h> 27 #include <net/bluetooth/hci_core.h> 28 29 #include "h4_recv.h" 30 #include "btmtk.h" 31 32 #define VERSION "0.1" 33 34 #define MTKBTSDIO_AUTOSUSPEND_DELAY 8000 35 36 static bool enable_autosuspend; 37 38 struct btmtksdio_data { 39 const char *fwname; 40 u16 chipid; 41 }; 42 43 static const struct btmtksdio_data mt7663_data = { 44 .fwname = FIRMWARE_MT7663, 45 .chipid = 0x7663, 46 }; 47 48 static const struct btmtksdio_data mt7668_data = { 49 .fwname = FIRMWARE_MT7668, 50 .chipid = 0x7668, 51 }; 52 53 static const struct btmtksdio_data mt7921_data = { 54 .fwname = FIRMWARE_MT7961, 55 .chipid = 0x7921, 56 }; 57 58 static const struct sdio_device_id btmtksdio_table[] = { 59 {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7663), 60 .driver_data = (kernel_ulong_t)&mt7663_data }, 61 {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7668), 62 .driver_data = (kernel_ulong_t)&mt7668_data }, 63 {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7961), 64 .driver_data = (kernel_ulong_t)&mt7921_data }, 65 { } /* Terminating entry */ 66 }; 67 MODULE_DEVICE_TABLE(sdio, btmtksdio_table); 68 69 #define MTK_REG_CHLPCR 0x4 /* W1S */ 70 #define C_INT_EN_SET BIT(0) 71 #define C_INT_EN_CLR BIT(1) 72 #define C_FW_OWN_REQ_SET BIT(8) /* For write */ 73 #define C_COM_DRV_OWN BIT(8) /* For read */ 74 #define C_FW_OWN_REQ_CLR BIT(9) 75 76 #define MTK_REG_CSDIOCSR 0x8 77 #define SDIO_RE_INIT_EN BIT(0) 78 #define SDIO_INT_CTL BIT(2) 79 80 #define MTK_REG_CHCR 0xc 81 #define C_INT_CLR_CTRL BIT(1) 82 83 /* CHISR have the same bits field definition with CHIER */ 84 #define MTK_REG_CHISR 0x10 85 #define MTK_REG_CHIER 0x14 86 #define FW_OWN_BACK_INT BIT(0) 87 #define RX_DONE_INT BIT(1) 88 #define TX_EMPTY BIT(2) 89 #define TX_FIFO_OVERFLOW BIT(8) 90 #define RX_PKT_LEN GENMASK(31, 16) 91 92 #define MTK_REG_CTDR 0x18 93 94 #define MTK_REG_CRDR 0x1c 95 96 #define MTK_REG_CRPLR 0x24 97 98 #define MTK_SDIO_BLOCK_SIZE 256 99 100 #define BTMTKSDIO_TX_WAIT_VND_EVT 1 101 #define BTMTKSDIO_HW_TX_READY 2 102 #define BTMTKSDIO_FUNC_ENABLED 3 103 104 struct mtkbtsdio_hdr { 105 __le16 len; 106 __le16 reserved; 107 u8 bt_type; 108 } __packed; 109 110 struct btmtksdio_dev { 111 struct hci_dev *hdev; 112 struct sdio_func *func; 113 struct device *dev; 114 115 struct work_struct txrx_work; 116 unsigned long tx_state; 117 struct sk_buff_head txq; 118 119 struct sk_buff *evt_skb; 120 121 const struct btmtksdio_data *data; 122 }; 123 124 static int mtk_hci_wmt_sync(struct hci_dev *hdev, 125 struct btmtk_hci_wmt_params *wmt_params) 126 { 127 struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); 128 struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc; 129 struct btmtk_hci_wmt_evt_reg *wmt_evt_reg; 130 u32 hlen, status = BTMTK_WMT_INVALID; 131 struct btmtk_hci_wmt_evt *wmt_evt; 132 struct btmtk_hci_wmt_cmd *wc; 133 struct btmtk_wmt_hdr *hdr; 134 int err; 135 136 /* Send the WMT command and wait until the WMT event returns */ 137 hlen = sizeof(*hdr) + wmt_params->dlen; 138 if (hlen > 255) 139 return -EINVAL; 140 141 wc = kzalloc(hlen, GFP_KERNEL); 142 if (!wc) 143 return -ENOMEM; 144 145 hdr = &wc->hdr; 146 hdr->dir = 1; 147 hdr->op = wmt_params->op; 148 hdr->dlen = cpu_to_le16(wmt_params->dlen + 1); 149 hdr->flag = wmt_params->flag; 150 memcpy(wc->data, wmt_params->data, wmt_params->dlen); 151 152 set_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state); 153 154 err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc); 155 if (err < 0) { 156 clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state); 157 goto err_free_wc; 158 } 159 160 /* The vendor specific WMT commands are all answered by a vendor 161 * specific event and will not have the Command Status or Command 162 * Complete as with usual HCI command flow control. 163 * 164 * After sending the command, wait for BTMTKSDIO_TX_WAIT_VND_EVT 165 * state to be cleared. The driver specific event receive routine 166 * will clear that state and with that indicate completion of the 167 * WMT command. 168 */ 169 err = wait_on_bit_timeout(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT, 170 TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT); 171 if (err == -EINTR) { 172 bt_dev_err(hdev, "Execution of wmt command interrupted"); 173 clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state); 174 goto err_free_wc; 175 } 176 177 if (err) { 178 bt_dev_err(hdev, "Execution of wmt command timed out"); 179 clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state); 180 err = -ETIMEDOUT; 181 goto err_free_wc; 182 } 183 184 /* Parse and handle the return WMT event */ 185 wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data; 186 if (wmt_evt->whdr.op != hdr->op) { 187 bt_dev_err(hdev, "Wrong op received %d expected %d", 188 wmt_evt->whdr.op, hdr->op); 189 err = -EIO; 190 goto err_free_skb; 191 } 192 193 switch (wmt_evt->whdr.op) { 194 case BTMTK_WMT_SEMAPHORE: 195 if (wmt_evt->whdr.flag == 2) 196 status = BTMTK_WMT_PATCH_UNDONE; 197 else 198 status = BTMTK_WMT_PATCH_DONE; 199 break; 200 case BTMTK_WMT_FUNC_CTRL: 201 wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt; 202 if (be16_to_cpu(wmt_evt_funcc->status) == 0x404) 203 status = BTMTK_WMT_ON_DONE; 204 else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420) 205 status = BTMTK_WMT_ON_PROGRESS; 206 else 207 status = BTMTK_WMT_ON_UNDONE; 208 break; 209 case BTMTK_WMT_PATCH_DWNLD: 210 if (wmt_evt->whdr.flag == 2) 211 status = BTMTK_WMT_PATCH_DONE; 212 else if (wmt_evt->whdr.flag == 1) 213 status = BTMTK_WMT_PATCH_PROGRESS; 214 else 215 status = BTMTK_WMT_PATCH_UNDONE; 216 break; 217 case BTMTK_WMT_REGISTER: 218 wmt_evt_reg = (struct btmtk_hci_wmt_evt_reg *)wmt_evt; 219 if (le16_to_cpu(wmt_evt->whdr.dlen) == 12) 220 status = le32_to_cpu(wmt_evt_reg->val); 221 break; 222 } 223 224 if (wmt_params->status) 225 *wmt_params->status = status; 226 227 err_free_skb: 228 kfree_skb(bdev->evt_skb); 229 bdev->evt_skb = NULL; 230 err_free_wc: 231 kfree(wc); 232 233 return err; 234 } 235 236 static int btmtksdio_tx_packet(struct btmtksdio_dev *bdev, 237 struct sk_buff *skb) 238 { 239 struct mtkbtsdio_hdr *sdio_hdr; 240 int err; 241 242 /* Make sure that there are enough rooms for SDIO header */ 243 if (unlikely(skb_headroom(skb) < sizeof(*sdio_hdr))) { 244 err = pskb_expand_head(skb, sizeof(*sdio_hdr), 0, 245 GFP_ATOMIC); 246 if (err < 0) 247 return err; 248 } 249 250 /* Prepend MediaTek SDIO Specific Header */ 251 skb_push(skb, sizeof(*sdio_hdr)); 252 253 sdio_hdr = (void *)skb->data; 254 sdio_hdr->len = cpu_to_le16(skb->len); 255 sdio_hdr->reserved = cpu_to_le16(0); 256 sdio_hdr->bt_type = hci_skb_pkt_type(skb); 257 258 clear_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state); 259 err = sdio_writesb(bdev->func, MTK_REG_CTDR, skb->data, 260 round_up(skb->len, MTK_SDIO_BLOCK_SIZE)); 261 if (err < 0) 262 goto err_skb_pull; 263 264 bdev->hdev->stat.byte_tx += skb->len; 265 266 kfree_skb(skb); 267 268 return 0; 269 270 err_skb_pull: 271 skb_pull(skb, sizeof(*sdio_hdr)); 272 273 return err; 274 } 275 276 static u32 btmtksdio_drv_own_query(struct btmtksdio_dev *bdev) 277 { 278 return sdio_readl(bdev->func, MTK_REG_CHLPCR, NULL); 279 } 280 281 static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb) 282 { 283 struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); 284 struct hci_event_hdr *hdr = (void *)skb->data; 285 int err; 286 287 /* Fix up the vendor event id with 0xff for vendor specific instead 288 * of 0xe4 so that event send via monitoring socket can be parsed 289 * properly. 290 */ 291 if (hdr->evt == 0xe4) 292 hdr->evt = HCI_EV_VENDOR; 293 294 /* When someone waits for the WMT event, the skb is being cloned 295 * and being processed the events from there then. 296 */ 297 if (test_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state)) { 298 bdev->evt_skb = skb_clone(skb, GFP_KERNEL); 299 if (!bdev->evt_skb) { 300 err = -ENOMEM; 301 goto err_out; 302 } 303 } 304 305 err = hci_recv_frame(hdev, skb); 306 if (err < 0) 307 goto err_free_skb; 308 309 if (hdr->evt == HCI_EV_VENDOR) { 310 if (test_and_clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, 311 &bdev->tx_state)) { 312 /* Barrier to sync with other CPUs */ 313 smp_mb__after_atomic(); 314 wake_up_bit(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT); 315 } 316 } 317 318 return 0; 319 320 err_free_skb: 321 kfree_skb(bdev->evt_skb); 322 bdev->evt_skb = NULL; 323 324 err_out: 325 return err; 326 } 327 328 static int btmtksdio_recv_acl(struct hci_dev *hdev, struct sk_buff *skb) 329 { 330 struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); 331 u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle); 332 333 switch (handle) { 334 case 0xfc6f: 335 /* Firmware dump from device: when the firmware hangs, the 336 * device can no longer suspend and thus disable auto-suspend. 337 */ 338 pm_runtime_forbid(bdev->dev); 339 fallthrough; 340 case 0x05ff: 341 case 0x05fe: 342 /* Firmware debug logging */ 343 return hci_recv_diag(hdev, skb); 344 } 345 346 return hci_recv_frame(hdev, skb); 347 } 348 349 static const struct h4_recv_pkt mtk_recv_pkts[] = { 350 { H4_RECV_ACL, .recv = btmtksdio_recv_acl }, 351 { H4_RECV_SCO, .recv = hci_recv_frame }, 352 { H4_RECV_EVENT, .recv = btmtksdio_recv_event }, 353 }; 354 355 static int btmtksdio_rx_packet(struct btmtksdio_dev *bdev, u16 rx_size) 356 { 357 const struct h4_recv_pkt *pkts = mtk_recv_pkts; 358 int pkts_count = ARRAY_SIZE(mtk_recv_pkts); 359 struct mtkbtsdio_hdr *sdio_hdr; 360 int err, i, pad_size; 361 struct sk_buff *skb; 362 u16 dlen; 363 364 if (rx_size < sizeof(*sdio_hdr)) 365 return -EILSEQ; 366 367 /* A SDIO packet is exactly containing a Bluetooth packet */ 368 skb = bt_skb_alloc(rx_size, GFP_KERNEL); 369 if (!skb) 370 return -ENOMEM; 371 372 skb_put(skb, rx_size); 373 374 err = sdio_readsb(bdev->func, skb->data, MTK_REG_CRDR, rx_size); 375 if (err < 0) 376 goto err_kfree_skb; 377 378 sdio_hdr = (void *)skb->data; 379 380 /* We assume the default error as -EILSEQ simply to make the error path 381 * be cleaner. 382 */ 383 err = -EILSEQ; 384 385 if (rx_size != le16_to_cpu(sdio_hdr->len)) { 386 bt_dev_err(bdev->hdev, "Rx size in sdio header is mismatched "); 387 goto err_kfree_skb; 388 } 389 390 hci_skb_pkt_type(skb) = sdio_hdr->bt_type; 391 392 /* Remove MediaTek SDIO header */ 393 skb_pull(skb, sizeof(*sdio_hdr)); 394 395 /* We have to dig into the packet to get payload size and then know how 396 * many padding bytes at the tail, these padding bytes should be removed 397 * before the packet is indicated to the core layer. 398 */ 399 for (i = 0; i < pkts_count; i++) { 400 if (sdio_hdr->bt_type == (&pkts[i])->type) 401 break; 402 } 403 404 if (i >= pkts_count) { 405 bt_dev_err(bdev->hdev, "Invalid bt type 0x%02x", 406 sdio_hdr->bt_type); 407 goto err_kfree_skb; 408 } 409 410 /* Remaining bytes cannot hold a header*/ 411 if (skb->len < (&pkts[i])->hlen) { 412 bt_dev_err(bdev->hdev, "The size of bt header is mismatched"); 413 goto err_kfree_skb; 414 } 415 416 switch ((&pkts[i])->lsize) { 417 case 1: 418 dlen = skb->data[(&pkts[i])->loff]; 419 break; 420 case 2: 421 dlen = get_unaligned_le16(skb->data + 422 (&pkts[i])->loff); 423 break; 424 default: 425 goto err_kfree_skb; 426 } 427 428 pad_size = skb->len - (&pkts[i])->hlen - dlen; 429 430 /* Remaining bytes cannot hold a payload */ 431 if (pad_size < 0) { 432 bt_dev_err(bdev->hdev, "The size of bt payload is mismatched"); 433 goto err_kfree_skb; 434 } 435 436 /* Remove padding bytes */ 437 skb_trim(skb, skb->len - pad_size); 438 439 /* Complete frame */ 440 (&pkts[i])->recv(bdev->hdev, skb); 441 442 bdev->hdev->stat.byte_rx += rx_size; 443 444 return 0; 445 446 err_kfree_skb: 447 kfree_skb(skb); 448 449 return err; 450 } 451 452 static void btmtksdio_txrx_work(struct work_struct *work) 453 { 454 struct btmtksdio_dev *bdev = container_of(work, struct btmtksdio_dev, 455 txrx_work); 456 unsigned long txrx_timeout; 457 u32 int_status, rx_size; 458 struct sk_buff *skb; 459 int err; 460 461 pm_runtime_get_sync(bdev->dev); 462 463 sdio_claim_host(bdev->func); 464 465 /* Disable interrupt */ 466 sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0); 467 468 txrx_timeout = jiffies + 5 * HZ; 469 470 do { 471 int_status = sdio_readl(bdev->func, MTK_REG_CHISR, NULL); 472 473 /* Ack an interrupt as soon as possible before any operation on 474 * hardware. 475 * 476 * Note that we don't ack any status during operations to avoid race 477 * condition between the host and the device such as it's possible to 478 * mistakenly ack RX_DONE for the next packet and then cause interrupts 479 * not be raised again but there is still pending data in the hardware 480 * FIFO. 481 */ 482 sdio_writel(bdev->func, int_status, MTK_REG_CHISR, NULL); 483 484 if (int_status & FW_OWN_BACK_INT) 485 bt_dev_dbg(bdev->hdev, "Get fw own back"); 486 487 if (int_status & TX_EMPTY) 488 set_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state); 489 490 else if (unlikely(int_status & TX_FIFO_OVERFLOW)) 491 bt_dev_warn(bdev->hdev, "Tx fifo overflow"); 492 493 if (test_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state)) { 494 skb = skb_dequeue(&bdev->txq); 495 if (skb) { 496 err = btmtksdio_tx_packet(bdev, skb); 497 if (err < 0) { 498 bdev->hdev->stat.err_tx++; 499 skb_queue_head(&bdev->txq, skb); 500 } 501 } 502 } 503 504 if (int_status & RX_DONE_INT) { 505 rx_size = sdio_readl(bdev->func, MTK_REG_CRPLR, NULL); 506 rx_size = (rx_size & RX_PKT_LEN) >> 16; 507 if (btmtksdio_rx_packet(bdev, rx_size) < 0) 508 bdev->hdev->stat.err_rx++; 509 } 510 } while (int_status || time_is_before_jiffies(txrx_timeout)); 511 512 /* Enable interrupt */ 513 sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, 0); 514 515 sdio_release_host(bdev->func); 516 517 pm_runtime_mark_last_busy(bdev->dev); 518 pm_runtime_put_autosuspend(bdev->dev); 519 } 520 521 static void btmtksdio_interrupt(struct sdio_func *func) 522 { 523 struct btmtksdio_dev *bdev = sdio_get_drvdata(func); 524 525 /* Disable interrupt */ 526 sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0); 527 528 schedule_work(&bdev->txrx_work); 529 } 530 531 static int btmtksdio_open(struct hci_dev *hdev) 532 { 533 struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); 534 u32 status, val; 535 int err; 536 537 sdio_claim_host(bdev->func); 538 539 err = sdio_enable_func(bdev->func); 540 if (err < 0) 541 goto err_release_host; 542 543 set_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state); 544 545 /* Get ownership from the device */ 546 sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err); 547 if (err < 0) 548 goto err_disable_func; 549 550 err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status, 551 status & C_COM_DRV_OWN, 2000, 1000000); 552 if (err < 0) { 553 bt_dev_err(bdev->hdev, "Cannot get ownership from device"); 554 goto err_disable_func; 555 } 556 557 /* Disable interrupt & mask out all interrupt sources */ 558 sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, &err); 559 if (err < 0) 560 goto err_disable_func; 561 562 sdio_writel(bdev->func, 0, MTK_REG_CHIER, &err); 563 if (err < 0) 564 goto err_disable_func; 565 566 err = sdio_claim_irq(bdev->func, btmtksdio_interrupt); 567 if (err < 0) 568 goto err_disable_func; 569 570 err = sdio_set_block_size(bdev->func, MTK_SDIO_BLOCK_SIZE); 571 if (err < 0) 572 goto err_release_irq; 573 574 /* SDIO CMD 5 allows the SDIO device back to idle state an 575 * synchronous interrupt is supported in SDIO 4-bit mode 576 */ 577 val = sdio_readl(bdev->func, MTK_REG_CSDIOCSR, &err); 578 if (err < 0) 579 goto err_release_irq; 580 581 val |= SDIO_INT_CTL; 582 sdio_writel(bdev->func, val, MTK_REG_CSDIOCSR, &err); 583 if (err < 0) 584 goto err_release_irq; 585 586 /* Explitly set write-1-clear method */ 587 val = sdio_readl(bdev->func, MTK_REG_CHCR, &err); 588 if (err < 0) 589 goto err_release_irq; 590 591 val |= C_INT_CLR_CTRL; 592 sdio_writel(bdev->func, val, MTK_REG_CHCR, &err); 593 if (err < 0) 594 goto err_release_irq; 595 596 /* Setup interrupt sources */ 597 sdio_writel(bdev->func, RX_DONE_INT | TX_EMPTY | TX_FIFO_OVERFLOW, 598 MTK_REG_CHIER, &err); 599 if (err < 0) 600 goto err_release_irq; 601 602 /* Enable interrupt */ 603 sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, &err); 604 if (err < 0) 605 goto err_release_irq; 606 607 sdio_release_host(bdev->func); 608 609 return 0; 610 611 err_release_irq: 612 sdio_release_irq(bdev->func); 613 614 err_disable_func: 615 sdio_disable_func(bdev->func); 616 617 err_release_host: 618 sdio_release_host(bdev->func); 619 620 return err; 621 } 622 623 static int btmtksdio_close(struct hci_dev *hdev) 624 { 625 struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); 626 u32 status; 627 int err; 628 629 sdio_claim_host(bdev->func); 630 631 /* Disable interrupt */ 632 sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL); 633 634 sdio_release_irq(bdev->func); 635 636 cancel_work_sync(&bdev->txrx_work); 637 638 /* Return ownership to the device */ 639 sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, NULL); 640 641 err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status, 642 !(status & C_COM_DRV_OWN), 2000, 1000000); 643 if (err < 0) 644 bt_dev_err(bdev->hdev, "Cannot return ownership to device"); 645 646 clear_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state); 647 sdio_disable_func(bdev->func); 648 649 sdio_release_host(bdev->func); 650 651 return 0; 652 } 653 654 static int btmtksdio_flush(struct hci_dev *hdev) 655 { 656 struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); 657 658 skb_queue_purge(&bdev->txq); 659 660 cancel_work_sync(&bdev->txrx_work); 661 662 return 0; 663 } 664 665 static int btmtksdio_func_query(struct hci_dev *hdev) 666 { 667 struct btmtk_hci_wmt_params wmt_params; 668 int status, err; 669 u8 param = 0; 670 671 /* Query whether the function is enabled */ 672 wmt_params.op = BTMTK_WMT_FUNC_CTRL; 673 wmt_params.flag = 4; 674 wmt_params.dlen = sizeof(param); 675 wmt_params.data = ¶m; 676 wmt_params.status = &status; 677 678 err = mtk_hci_wmt_sync(hdev, &wmt_params); 679 if (err < 0) { 680 bt_dev_err(hdev, "Failed to query function status (%d)", err); 681 return err; 682 } 683 684 return status; 685 } 686 687 static int mt76xx_setup(struct hci_dev *hdev, const char *fwname) 688 { 689 struct btmtk_hci_wmt_params wmt_params; 690 struct btmtk_tci_sleep tci_sleep; 691 struct sk_buff *skb; 692 int err, status; 693 u8 param = 0x1; 694 695 /* Query whether the firmware is already download */ 696 wmt_params.op = BTMTK_WMT_SEMAPHORE; 697 wmt_params.flag = 1; 698 wmt_params.dlen = 0; 699 wmt_params.data = NULL; 700 wmt_params.status = &status; 701 702 err = mtk_hci_wmt_sync(hdev, &wmt_params); 703 if (err < 0) { 704 bt_dev_err(hdev, "Failed to query firmware status (%d)", err); 705 return err; 706 } 707 708 if (status == BTMTK_WMT_PATCH_DONE) { 709 bt_dev_info(hdev, "Firmware already downloaded"); 710 goto ignore_setup_fw; 711 } 712 713 /* Setup a firmware which the device definitely requires */ 714 err = btmtk_setup_firmware(hdev, fwname, mtk_hci_wmt_sync); 715 if (err < 0) 716 return err; 717 718 ignore_setup_fw: 719 /* Query whether the device is already enabled */ 720 err = readx_poll_timeout(btmtksdio_func_query, hdev, status, 721 status < 0 || status != BTMTK_WMT_ON_PROGRESS, 722 2000, 5000000); 723 /* -ETIMEDOUT happens */ 724 if (err < 0) 725 return err; 726 727 /* The other errors happen in btusb_mtk_func_query */ 728 if (status < 0) 729 return status; 730 731 if (status == BTMTK_WMT_ON_DONE) { 732 bt_dev_info(hdev, "function already on"); 733 goto ignore_func_on; 734 } 735 736 /* Enable Bluetooth protocol */ 737 wmt_params.op = BTMTK_WMT_FUNC_CTRL; 738 wmt_params.flag = 0; 739 wmt_params.dlen = sizeof(param); 740 wmt_params.data = ¶m; 741 wmt_params.status = NULL; 742 743 err = mtk_hci_wmt_sync(hdev, &wmt_params); 744 if (err < 0) { 745 bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); 746 return err; 747 } 748 749 ignore_func_on: 750 /* Apply the low power environment setup */ 751 tci_sleep.mode = 0x5; 752 tci_sleep.duration = cpu_to_le16(0x640); 753 tci_sleep.host_duration = cpu_to_le16(0x640); 754 tci_sleep.host_wakeup_pin = 0; 755 tci_sleep.time_compensation = 0; 756 757 skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep, 758 HCI_INIT_TIMEOUT); 759 if (IS_ERR(skb)) { 760 err = PTR_ERR(skb); 761 bt_dev_err(hdev, "Failed to apply low power setting (%d)", err); 762 return err; 763 } 764 kfree_skb(skb); 765 766 return 0; 767 } 768 769 static int mt79xx_setup(struct hci_dev *hdev, const char *fwname) 770 { 771 struct btmtk_hci_wmt_params wmt_params; 772 u8 param = 0x1; 773 int err; 774 775 err = btmtk_setup_firmware_79xx(hdev, fwname, mtk_hci_wmt_sync); 776 if (err < 0) { 777 bt_dev_err(hdev, "Failed to setup 79xx firmware (%d)", err); 778 return err; 779 } 780 781 /* Enable Bluetooth protocol */ 782 wmt_params.op = BTMTK_WMT_FUNC_CTRL; 783 wmt_params.flag = 0; 784 wmt_params.dlen = sizeof(param); 785 wmt_params.data = ¶m; 786 wmt_params.status = NULL; 787 788 err = mtk_hci_wmt_sync(hdev, &wmt_params); 789 if (err < 0) { 790 bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); 791 return err; 792 } 793 794 hci_set_msft_opcode(hdev, 0xFD30); 795 hci_set_aosp_capable(hdev); 796 797 return err; 798 } 799 800 static int btsdio_mtk_reg_read(struct hci_dev *hdev, u32 reg, u32 *val) 801 { 802 struct btmtk_hci_wmt_params wmt_params; 803 struct reg_read_cmd { 804 u8 type; 805 u8 rsv; 806 u8 num; 807 __le32 addr; 808 } __packed reg_read = { 809 .type = 1, 810 .num = 1, 811 }; 812 u32 status; 813 int err; 814 815 reg_read.addr = cpu_to_le32(reg); 816 wmt_params.op = BTMTK_WMT_REGISTER; 817 wmt_params.flag = BTMTK_WMT_REG_READ; 818 wmt_params.dlen = sizeof(reg_read); 819 wmt_params.data = ®_read; 820 wmt_params.status = &status; 821 822 err = mtk_hci_wmt_sync(hdev, &wmt_params); 823 if (err < 0) { 824 bt_dev_err(hdev, "Failed to read reg(%d)", err); 825 return err; 826 } 827 828 *val = status; 829 830 return err; 831 } 832 833 static int btmtksdio_setup(struct hci_dev *hdev) 834 { 835 struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); 836 ktime_t calltime, delta, rettime; 837 unsigned long long duration; 838 char fwname[64]; 839 int err, dev_id; 840 u32 fw_version = 0; 841 842 calltime = ktime_get(); 843 set_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state); 844 845 switch (bdev->data->chipid) { 846 case 0x7921: 847 err = btsdio_mtk_reg_read(hdev, 0x70010200, &dev_id); 848 if (err < 0) { 849 bt_dev_err(hdev, "Failed to get device id (%d)", err); 850 return err; 851 } 852 853 err = btsdio_mtk_reg_read(hdev, 0x80021004, &fw_version); 854 if (err < 0) { 855 bt_dev_err(hdev, "Failed to get fw version (%d)", err); 856 return err; 857 } 858 859 snprintf(fwname, sizeof(fwname), 860 "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", 861 dev_id & 0xffff, (fw_version & 0xff) + 1); 862 err = mt79xx_setup(hdev, fwname); 863 if (err < 0) 864 return err; 865 break; 866 case 0x7663: 867 case 0x7668: 868 err = mt76xx_setup(hdev, bdev->data->fwname); 869 if (err < 0) 870 return err; 871 break; 872 default: 873 return -ENODEV; 874 } 875 876 rettime = ktime_get(); 877 delta = ktime_sub(rettime, calltime); 878 duration = (unsigned long long)ktime_to_ns(delta) >> 10; 879 880 pm_runtime_set_autosuspend_delay(bdev->dev, 881 MTKBTSDIO_AUTOSUSPEND_DELAY); 882 pm_runtime_use_autosuspend(bdev->dev); 883 884 err = pm_runtime_set_active(bdev->dev); 885 if (err < 0) 886 return err; 887 888 /* Default forbid runtime auto suspend, that can be allowed by 889 * enable_autosuspend flag or the PM runtime entry under sysfs. 890 */ 891 pm_runtime_forbid(bdev->dev); 892 pm_runtime_enable(bdev->dev); 893 894 if (enable_autosuspend) 895 pm_runtime_allow(bdev->dev); 896 897 bt_dev_info(hdev, "Device setup in %llu usecs", duration); 898 899 return 0; 900 } 901 902 static int btmtksdio_shutdown(struct hci_dev *hdev) 903 { 904 struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); 905 struct btmtk_hci_wmt_params wmt_params; 906 u8 param = 0x0; 907 int err; 908 909 /* Get back the state to be consistent with the state 910 * in btmtksdio_setup. 911 */ 912 pm_runtime_get_sync(bdev->dev); 913 914 /* Disable the device */ 915 wmt_params.op = BTMTK_WMT_FUNC_CTRL; 916 wmt_params.flag = 0; 917 wmt_params.dlen = sizeof(param); 918 wmt_params.data = ¶m; 919 wmt_params.status = NULL; 920 921 err = mtk_hci_wmt_sync(hdev, &wmt_params); 922 if (err < 0) { 923 bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); 924 return err; 925 } 926 927 pm_runtime_put_noidle(bdev->dev); 928 pm_runtime_disable(bdev->dev); 929 930 return 0; 931 } 932 933 static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb) 934 { 935 struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); 936 937 switch (hci_skb_pkt_type(skb)) { 938 case HCI_COMMAND_PKT: 939 hdev->stat.cmd_tx++; 940 break; 941 942 case HCI_ACLDATA_PKT: 943 hdev->stat.acl_tx++; 944 break; 945 946 case HCI_SCODATA_PKT: 947 hdev->stat.sco_tx++; 948 break; 949 950 default: 951 return -EILSEQ; 952 } 953 954 skb_queue_tail(&bdev->txq, skb); 955 956 schedule_work(&bdev->txrx_work); 957 958 return 0; 959 } 960 961 static int btmtksdio_probe(struct sdio_func *func, 962 const struct sdio_device_id *id) 963 { 964 struct btmtksdio_dev *bdev; 965 struct hci_dev *hdev; 966 int err; 967 968 bdev = devm_kzalloc(&func->dev, sizeof(*bdev), GFP_KERNEL); 969 if (!bdev) 970 return -ENOMEM; 971 972 bdev->data = (void *)id->driver_data; 973 if (!bdev->data) 974 return -ENODEV; 975 976 bdev->dev = &func->dev; 977 bdev->func = func; 978 979 INIT_WORK(&bdev->txrx_work, btmtksdio_txrx_work); 980 skb_queue_head_init(&bdev->txq); 981 982 /* Initialize and register HCI device */ 983 hdev = hci_alloc_dev(); 984 if (!hdev) { 985 dev_err(&func->dev, "Can't allocate HCI device\n"); 986 return -ENOMEM; 987 } 988 989 bdev->hdev = hdev; 990 991 hdev->bus = HCI_SDIO; 992 hci_set_drvdata(hdev, bdev); 993 994 hdev->open = btmtksdio_open; 995 hdev->close = btmtksdio_close; 996 hdev->flush = btmtksdio_flush; 997 hdev->setup = btmtksdio_setup; 998 hdev->shutdown = btmtksdio_shutdown; 999 hdev->send = btmtksdio_send_frame; 1000 hdev->set_bdaddr = btmtk_set_bdaddr; 1001 1002 SET_HCIDEV_DEV(hdev, &func->dev); 1003 1004 hdev->manufacturer = 70; 1005 set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); 1006 1007 err = hci_register_dev(hdev); 1008 if (err < 0) { 1009 dev_err(&func->dev, "Can't register HCI device\n"); 1010 hci_free_dev(hdev); 1011 return err; 1012 } 1013 1014 sdio_set_drvdata(func, bdev); 1015 1016 /* pm_runtime_enable would be done after the firmware is being 1017 * downloaded because the core layer probably already enables 1018 * runtime PM for this func such as the case host->caps & 1019 * MMC_CAP_POWER_OFF_CARD. 1020 */ 1021 if (pm_runtime_enabled(bdev->dev)) 1022 pm_runtime_disable(bdev->dev); 1023 1024 /* As explaination in drivers/mmc/core/sdio_bus.c tells us: 1025 * Unbound SDIO functions are always suspended. 1026 * During probe, the function is set active and the usage count 1027 * is incremented. If the driver supports runtime PM, 1028 * it should call pm_runtime_put_noidle() in its probe routine and 1029 * pm_runtime_get_noresume() in its remove routine. 1030 * 1031 * So, put a pm_runtime_put_noidle here ! 1032 */ 1033 pm_runtime_put_noidle(bdev->dev); 1034 1035 return 0; 1036 } 1037 1038 static void btmtksdio_remove(struct sdio_func *func) 1039 { 1040 struct btmtksdio_dev *bdev = sdio_get_drvdata(func); 1041 struct hci_dev *hdev; 1042 1043 if (!bdev) 1044 return; 1045 1046 /* Be consistent the state in btmtksdio_probe */ 1047 pm_runtime_get_noresume(bdev->dev); 1048 1049 hdev = bdev->hdev; 1050 1051 sdio_set_drvdata(func, NULL); 1052 hci_unregister_dev(hdev); 1053 hci_free_dev(hdev); 1054 } 1055 1056 #ifdef CONFIG_PM 1057 static int btmtksdio_runtime_suspend(struct device *dev) 1058 { 1059 struct sdio_func *func = dev_to_sdio_func(dev); 1060 struct btmtksdio_dev *bdev; 1061 u32 status; 1062 int err; 1063 1064 bdev = sdio_get_drvdata(func); 1065 if (!bdev) 1066 return 0; 1067 1068 if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) 1069 return 0; 1070 1071 sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); 1072 1073 sdio_claim_host(bdev->func); 1074 1075 sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err); 1076 if (err < 0) 1077 goto out; 1078 1079 err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status, 1080 !(status & C_COM_DRV_OWN), 2000, 1000000); 1081 out: 1082 bt_dev_info(bdev->hdev, "status (%d) return ownership to device", err); 1083 1084 sdio_release_host(bdev->func); 1085 1086 return err; 1087 } 1088 1089 static int btmtksdio_runtime_resume(struct device *dev) 1090 { 1091 struct sdio_func *func = dev_to_sdio_func(dev); 1092 struct btmtksdio_dev *bdev; 1093 u32 status; 1094 int err; 1095 1096 bdev = sdio_get_drvdata(func); 1097 if (!bdev) 1098 return 0; 1099 1100 if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) 1101 return 0; 1102 1103 sdio_claim_host(bdev->func); 1104 1105 sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err); 1106 if (err < 0) 1107 goto out; 1108 1109 err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status, 1110 status & C_COM_DRV_OWN, 2000, 1000000); 1111 out: 1112 bt_dev_info(bdev->hdev, "status (%d) get ownership from device", err); 1113 1114 sdio_release_host(bdev->func); 1115 1116 return err; 1117 } 1118 1119 static UNIVERSAL_DEV_PM_OPS(btmtksdio_pm_ops, btmtksdio_runtime_suspend, 1120 btmtksdio_runtime_resume, NULL); 1121 #define BTMTKSDIO_PM_OPS (&btmtksdio_pm_ops) 1122 #else /* CONFIG_PM */ 1123 #define BTMTKSDIO_PM_OPS NULL 1124 #endif /* CONFIG_PM */ 1125 1126 static struct sdio_driver btmtksdio_driver = { 1127 .name = "btmtksdio", 1128 .probe = btmtksdio_probe, 1129 .remove = btmtksdio_remove, 1130 .id_table = btmtksdio_table, 1131 .drv = { 1132 .owner = THIS_MODULE, 1133 .pm = BTMTKSDIO_PM_OPS, 1134 } 1135 }; 1136 1137 module_sdio_driver(btmtksdio_driver); 1138 1139 module_param(enable_autosuspend, bool, 0644); 1140 MODULE_PARM_DESC(enable_autosuspend, "Enable autosuspend by default"); 1141 1142 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); 1143 MODULE_DESCRIPTION("MediaTek Bluetooth SDIO driver ver " VERSION); 1144 MODULE_VERSION(VERSION); 1145 MODULE_LICENSE("GPL"); 1146