xref: /openbmc/linux/drivers/bluetooth/btmtksdio.c (revision 7d545e77)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2019 MediaTek Inc.
3 
4 /*
5  * Bluetooth support for MediaTek SDIO devices
6  *
7  * This file is written based on btsdio.c and btmtkuart.c.
8  *
9  * Author: Sean Wang <sean.wang@mediatek.com>
10  *
11  */
12 
13 #include <asm/unaligned.h>
14 #include <linux/atomic.h>
15 #include <linux/firmware.h>
16 #include <linux/init.h>
17 #include <linux/iopoll.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/skbuff.h>
22 
23 #include <linux/mmc/host.h>
24 #include <linux/mmc/sdio_ids.h>
25 #include <linux/mmc/sdio_func.h>
26 
27 #include <net/bluetooth/bluetooth.h>
28 #include <net/bluetooth/hci_core.h>
29 
30 #include "h4_recv.h"
31 
32 #define VERSION "0.1"
33 
34 #define FIRMWARE_MT7663		"mediatek/mt7663pr2h.bin"
35 #define FIRMWARE_MT7668		"mediatek/mt7668pr2h.bin"
36 
37 #define MTKBTSDIO_AUTOSUSPEND_DELAY	8000
38 
39 static bool enable_autosuspend;
40 
41 struct btmtksdio_data {
42 	const char *fwname;
43 };
44 
45 static const struct btmtksdio_data mt7663_data = {
46 	.fwname = FIRMWARE_MT7663,
47 };
48 
49 static const struct btmtksdio_data mt7668_data = {
50 	.fwname = FIRMWARE_MT7668,
51 };
52 
53 static const struct sdio_device_id btmtksdio_table[] = {
54 	{SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7663),
55 	 .driver_data = (kernel_ulong_t)&mt7663_data },
56 	{SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7668),
57 	 .driver_data = (kernel_ulong_t)&mt7668_data },
58 	{ }	/* Terminating entry */
59 };
60 
61 #define MTK_REG_CHLPCR		0x4	/* W1S */
62 #define C_INT_EN_SET		BIT(0)
63 #define C_INT_EN_CLR		BIT(1)
64 #define C_FW_OWN_REQ_SET	BIT(8)  /* For write */
65 #define C_COM_DRV_OWN		BIT(8)  /* For read */
66 #define C_FW_OWN_REQ_CLR	BIT(9)
67 
68 #define MTK_REG_CSDIOCSR	0x8
69 #define SDIO_RE_INIT_EN		BIT(0)
70 #define SDIO_INT_CTL		BIT(2)
71 
72 #define MTK_REG_CHCR		0xc
73 #define C_INT_CLR_CTRL		BIT(1)
74 
75 /* CHISR have the same bits field definition with CHIER */
76 #define MTK_REG_CHISR		0x10
77 #define MTK_REG_CHIER		0x14
78 #define FW_OWN_BACK_INT		BIT(0)
79 #define RX_DONE_INT		BIT(1)
80 #define TX_EMPTY		BIT(2)
81 #define TX_FIFO_OVERFLOW	BIT(8)
82 #define RX_PKT_LEN		GENMASK(31, 16)
83 
84 #define MTK_REG_CTDR		0x18
85 
86 #define MTK_REG_CRDR		0x1c
87 
88 #define MTK_SDIO_BLOCK_SIZE	256
89 
90 #define BTMTKSDIO_TX_WAIT_VND_EVT	1
91 
92 enum {
93 	MTK_WMT_PATCH_DWNLD = 0x1,
94 	MTK_WMT_TEST = 0x2,
95 	MTK_WMT_WAKEUP = 0x3,
96 	MTK_WMT_HIF = 0x4,
97 	MTK_WMT_FUNC_CTRL = 0x6,
98 	MTK_WMT_RST = 0x7,
99 	MTK_WMT_SEMAPHORE = 0x17,
100 };
101 
102 enum {
103 	BTMTK_WMT_INVALID,
104 	BTMTK_WMT_PATCH_UNDONE,
105 	BTMTK_WMT_PATCH_DONE,
106 	BTMTK_WMT_ON_UNDONE,
107 	BTMTK_WMT_ON_DONE,
108 	BTMTK_WMT_ON_PROGRESS,
109 };
110 
111 struct mtkbtsdio_hdr {
112 	__le16	len;
113 	__le16	reserved;
114 	u8	bt_type;
115 } __packed;
116 
117 struct mtk_wmt_hdr {
118 	u8	dir;
119 	u8	op;
120 	__le16	dlen;
121 	u8	flag;
122 } __packed;
123 
124 struct mtk_hci_wmt_cmd {
125 	struct mtk_wmt_hdr hdr;
126 	u8 data[256];
127 } __packed;
128 
129 struct btmtk_hci_wmt_evt {
130 	struct hci_event_hdr hhdr;
131 	struct mtk_wmt_hdr whdr;
132 } __packed;
133 
134 struct btmtk_hci_wmt_evt_funcc {
135 	struct btmtk_hci_wmt_evt hwhdr;
136 	__be16 status;
137 } __packed;
138 
139 struct btmtk_tci_sleep {
140 	u8 mode;
141 	__le16 duration;
142 	__le16 host_duration;
143 	u8 host_wakeup_pin;
144 	u8 time_compensation;
145 } __packed;
146 
147 struct btmtk_hci_wmt_params {
148 	u8 op;
149 	u8 flag;
150 	u16 dlen;
151 	const void *data;
152 	u32 *status;
153 };
154 
155 struct btmtksdio_dev {
156 	struct hci_dev *hdev;
157 	struct sdio_func *func;
158 	struct device *dev;
159 
160 	struct work_struct tx_work;
161 	unsigned long tx_state;
162 	struct sk_buff_head txq;
163 
164 	struct sk_buff *evt_skb;
165 
166 	const struct btmtksdio_data *data;
167 };
168 
169 static int mtk_hci_wmt_sync(struct hci_dev *hdev,
170 			    struct btmtk_hci_wmt_params *wmt_params)
171 {
172 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
173 	struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
174 	u32 hlen, status = BTMTK_WMT_INVALID;
175 	struct btmtk_hci_wmt_evt *wmt_evt;
176 	struct mtk_hci_wmt_cmd wc;
177 	struct mtk_wmt_hdr *hdr;
178 	int err;
179 
180 	hlen = sizeof(*hdr) + wmt_params->dlen;
181 	if (hlen > 255)
182 		return -EINVAL;
183 
184 	hdr = (struct mtk_wmt_hdr *)&wc;
185 	hdr->dir = 1;
186 	hdr->op = wmt_params->op;
187 	hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
188 	hdr->flag = wmt_params->flag;
189 	memcpy(wc.data, wmt_params->data, wmt_params->dlen);
190 
191 	set_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
192 
193 	err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
194 	if (err < 0) {
195 		clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
196 		return err;
197 	}
198 
199 	/* The vendor specific WMT commands are all answered by a vendor
200 	 * specific event and will not have the Command Status or Command
201 	 * Complete as with usual HCI command flow control.
202 	 *
203 	 * After sending the command, wait for BTMTKSDIO_TX_WAIT_VND_EVT
204 	 * state to be cleared. The driver specific event receive routine
205 	 * will clear that state and with that indicate completion of the
206 	 * WMT command.
207 	 */
208 	err = wait_on_bit_timeout(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT,
209 				  TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
210 	if (err == -EINTR) {
211 		bt_dev_err(hdev, "Execution of wmt command interrupted");
212 		clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
213 		return err;
214 	}
215 
216 	if (err) {
217 		bt_dev_err(hdev, "Execution of wmt command timed out");
218 		clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
219 		return -ETIMEDOUT;
220 	}
221 
222 	/* Parse and handle the return WMT event */
223 	wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data;
224 	if (wmt_evt->whdr.op != hdr->op) {
225 		bt_dev_err(hdev, "Wrong op received %d expected %d",
226 			   wmt_evt->whdr.op, hdr->op);
227 		err = -EIO;
228 		goto err_free_skb;
229 	}
230 
231 	switch (wmt_evt->whdr.op) {
232 	case MTK_WMT_SEMAPHORE:
233 		if (wmt_evt->whdr.flag == 2)
234 			status = BTMTK_WMT_PATCH_UNDONE;
235 		else
236 			status = BTMTK_WMT_PATCH_DONE;
237 		break;
238 	case MTK_WMT_FUNC_CTRL:
239 		wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
240 		if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
241 			status = BTMTK_WMT_ON_DONE;
242 		else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
243 			status = BTMTK_WMT_ON_PROGRESS;
244 		else
245 			status = BTMTK_WMT_ON_UNDONE;
246 		break;
247 	}
248 
249 	if (wmt_params->status)
250 		*wmt_params->status = status;
251 
252 err_free_skb:
253 	kfree_skb(bdev->evt_skb);
254 	bdev->evt_skb = NULL;
255 
256 	return err;
257 }
258 
259 static int btmtksdio_tx_packet(struct btmtksdio_dev *bdev,
260 			       struct sk_buff *skb)
261 {
262 	struct mtkbtsdio_hdr *sdio_hdr;
263 	int err;
264 
265 	/* Make sure that there are enough rooms for SDIO header */
266 	if (unlikely(skb_headroom(skb) < sizeof(*sdio_hdr))) {
267 		err = pskb_expand_head(skb, sizeof(*sdio_hdr), 0,
268 				       GFP_ATOMIC);
269 		if (err < 0)
270 			return err;
271 	}
272 
273 	/* Prepend MediaTek SDIO Specific Header */
274 	skb_push(skb, sizeof(*sdio_hdr));
275 
276 	sdio_hdr = (void *)skb->data;
277 	sdio_hdr->len = cpu_to_le16(skb->len);
278 	sdio_hdr->reserved = cpu_to_le16(0);
279 	sdio_hdr->bt_type = hci_skb_pkt_type(skb);
280 
281 	err = sdio_writesb(bdev->func, MTK_REG_CTDR, skb->data,
282 			   round_up(skb->len, MTK_SDIO_BLOCK_SIZE));
283 	if (err < 0)
284 		goto err_skb_pull;
285 
286 	bdev->hdev->stat.byte_tx += skb->len;
287 
288 	kfree_skb(skb);
289 
290 	return 0;
291 
292 err_skb_pull:
293 	skb_pull(skb, sizeof(*sdio_hdr));
294 
295 	return err;
296 }
297 
298 static u32 btmtksdio_drv_own_query(struct btmtksdio_dev *bdev)
299 {
300 	return sdio_readl(bdev->func, MTK_REG_CHLPCR, NULL);
301 }
302 
303 static void btmtksdio_tx_work(struct work_struct *work)
304 {
305 	struct btmtksdio_dev *bdev = container_of(work, struct btmtksdio_dev,
306 						  tx_work);
307 	struct sk_buff *skb;
308 	int err;
309 
310 	pm_runtime_get_sync(bdev->dev);
311 
312 	sdio_claim_host(bdev->func);
313 
314 	while ((skb = skb_dequeue(&bdev->txq))) {
315 		err = btmtksdio_tx_packet(bdev, skb);
316 		if (err < 0) {
317 			bdev->hdev->stat.err_tx++;
318 			skb_queue_head(&bdev->txq, skb);
319 			break;
320 		}
321 	}
322 
323 	sdio_release_host(bdev->func);
324 
325 	pm_runtime_mark_last_busy(bdev->dev);
326 	pm_runtime_put_autosuspend(bdev->dev);
327 }
328 
329 static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
330 {
331 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
332 	struct hci_event_hdr *hdr = (void *)skb->data;
333 	int err;
334 
335 	/* Fix up the vendor event id with 0xff for vendor specific instead
336 	 * of 0xe4 so that event send via monitoring socket can be parsed
337 	 * properly.
338 	 */
339 	if (hdr->evt == 0xe4)
340 		hdr->evt = HCI_EV_VENDOR;
341 
342 	/* When someone waits for the WMT event, the skb is being cloned
343 	 * and being processed the events from there then.
344 	 */
345 	if (test_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state)) {
346 		bdev->evt_skb = skb_clone(skb, GFP_KERNEL);
347 		if (!bdev->evt_skb) {
348 			err = -ENOMEM;
349 			goto err_out;
350 		}
351 	}
352 
353 	err = hci_recv_frame(hdev, skb);
354 	if (err < 0)
355 		goto err_free_skb;
356 
357 	if (hdr->evt == HCI_EV_VENDOR) {
358 		if (test_and_clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT,
359 				       &bdev->tx_state)) {
360 			/* Barrier to sync with other CPUs */
361 			smp_mb__after_atomic();
362 			wake_up_bit(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT);
363 		}
364 	}
365 
366 	return 0;
367 
368 err_free_skb:
369 	kfree_skb(bdev->evt_skb);
370 	bdev->evt_skb = NULL;
371 
372 err_out:
373 	return err;
374 }
375 
376 static const struct h4_recv_pkt mtk_recv_pkts[] = {
377 	{ H4_RECV_ACL,      .recv = hci_recv_frame },
378 	{ H4_RECV_SCO,      .recv = hci_recv_frame },
379 	{ H4_RECV_EVENT,    .recv = btmtksdio_recv_event },
380 };
381 
382 static int btmtksdio_rx_packet(struct btmtksdio_dev *bdev, u16 rx_size)
383 {
384 	const struct h4_recv_pkt *pkts = mtk_recv_pkts;
385 	int pkts_count = ARRAY_SIZE(mtk_recv_pkts);
386 	struct mtkbtsdio_hdr *sdio_hdr;
387 	int err, i, pad_size;
388 	struct sk_buff *skb;
389 	u16 dlen;
390 
391 	if (rx_size < sizeof(*sdio_hdr))
392 		return -EILSEQ;
393 
394 	/* A SDIO packet is exactly containing a Bluetooth packet */
395 	skb = bt_skb_alloc(rx_size, GFP_KERNEL);
396 	if (!skb)
397 		return -ENOMEM;
398 
399 	skb_put(skb, rx_size);
400 
401 	err = sdio_readsb(bdev->func, skb->data, MTK_REG_CRDR, rx_size);
402 	if (err < 0)
403 		goto err_kfree_skb;
404 
405 	sdio_hdr = (void *)skb->data;
406 
407 	/* We assume the default error as -EILSEQ simply to make the error path
408 	 * be cleaner.
409 	 */
410 	err = -EILSEQ;
411 
412 	if (rx_size != le16_to_cpu(sdio_hdr->len)) {
413 		bt_dev_err(bdev->hdev, "Rx size in sdio header is mismatched ");
414 		goto err_kfree_skb;
415 	}
416 
417 	hci_skb_pkt_type(skb) = sdio_hdr->bt_type;
418 
419 	/* Remove MediaTek SDIO header */
420 	skb_pull(skb, sizeof(*sdio_hdr));
421 
422 	/* We have to dig into the packet to get payload size and then know how
423 	 * many padding bytes at the tail, these padding bytes should be removed
424 	 * before the packet is indicated to the core layer.
425 	 */
426 	for (i = 0; i < pkts_count; i++) {
427 		if (sdio_hdr->bt_type == (&pkts[i])->type)
428 			break;
429 	}
430 
431 	if (i >= pkts_count) {
432 		bt_dev_err(bdev->hdev, "Invalid bt type 0x%02x",
433 			   sdio_hdr->bt_type);
434 		goto err_kfree_skb;
435 	}
436 
437 	/* Remaining bytes cannot hold a header*/
438 	if (skb->len < (&pkts[i])->hlen) {
439 		bt_dev_err(bdev->hdev, "The size of bt header is mismatched");
440 		goto err_kfree_skb;
441 	}
442 
443 	switch ((&pkts[i])->lsize) {
444 		case 1:
445 			dlen = skb->data[(&pkts[i])->loff];
446 			break;
447 		case 2:
448 			dlen = get_unaligned_le16(skb->data +
449 						  (&pkts[i])->loff);
450 			break;
451 		default:
452 			goto err_kfree_skb;
453 	}
454 
455 	pad_size = skb->len - (&pkts[i])->hlen -  dlen;
456 
457 	/* Remaining bytes cannot hold a payload */
458 	if (pad_size < 0) {
459 		bt_dev_err(bdev->hdev, "The size of bt payload is mismatched");
460 		goto err_kfree_skb;
461 	}
462 
463 	/* Remove padding bytes */
464 	skb_trim(skb, skb->len - pad_size);
465 
466 	/* Complete frame */
467 	(&pkts[i])->recv(bdev->hdev, skb);
468 
469 	bdev->hdev->stat.byte_rx += rx_size;
470 
471 	return 0;
472 
473 err_kfree_skb:
474 	kfree_skb(skb);
475 
476 	return err;
477 }
478 
479 static void btmtksdio_interrupt(struct sdio_func *func)
480 {
481 	struct btmtksdio_dev *bdev = sdio_get_drvdata(func);
482 	u32 int_status;
483 	u16 rx_size;
484 
485 	/* It is required that the host gets ownership from the device before
486 	 * accessing any register, however, if SDIO host is not being released,
487 	 * a potential deadlock probably happens in a circular wait between SDIO
488 	 * IRQ work and PM runtime work. So, we have to explicitly release SDIO
489 	 * host here and claim again after the PM runtime work is all done.
490 	 */
491 	sdio_release_host(bdev->func);
492 
493 	pm_runtime_get_sync(bdev->dev);
494 
495 	sdio_claim_host(bdev->func);
496 
497 	/* Disable interrupt */
498 	sdio_writel(func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0);
499 
500 	int_status = sdio_readl(func, MTK_REG_CHISR, NULL);
501 
502 	/* Ack an interrupt as soon as possible before any operation on
503 	 * hardware.
504 	 *
505 	 * Note that we don't ack any status during operations to avoid race
506 	 * condition between the host and the device such as it's possible to
507 	 * mistakenly ack RX_DONE for the next packet and then cause interrupts
508 	 * not be raised again but there is still pending data in the hardware
509 	 * FIFO.
510 	 */
511 	sdio_writel(func, int_status, MTK_REG_CHISR, NULL);
512 
513 	if (unlikely(!int_status))
514 		bt_dev_err(bdev->hdev, "CHISR is 0");
515 
516 	if (int_status & FW_OWN_BACK_INT)
517 		bt_dev_dbg(bdev->hdev, "Get fw own back");
518 
519 	if (int_status & TX_EMPTY)
520 		schedule_work(&bdev->tx_work);
521 	else if (unlikely(int_status & TX_FIFO_OVERFLOW))
522 		bt_dev_warn(bdev->hdev, "Tx fifo overflow");
523 
524 	if (int_status & RX_DONE_INT) {
525 		rx_size = (int_status & RX_PKT_LEN) >> 16;
526 
527 		if (btmtksdio_rx_packet(bdev, rx_size) < 0)
528 			bdev->hdev->stat.err_rx++;
529 	}
530 
531 	/* Enable interrupt */
532 	sdio_writel(func, C_INT_EN_SET, MTK_REG_CHLPCR, 0);
533 
534 	pm_runtime_mark_last_busy(bdev->dev);
535 	pm_runtime_put_autosuspend(bdev->dev);
536 }
537 
538 static int btmtksdio_open(struct hci_dev *hdev)
539 {
540 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
541 	int err;
542 	u32 status;
543 
544 	sdio_claim_host(bdev->func);
545 
546 	err = sdio_enable_func(bdev->func);
547 	if (err < 0)
548 		goto err_release_host;
549 
550 	/* Get ownership from the device */
551 	sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
552 	if (err < 0)
553 		goto err_disable_func;
554 
555 	err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
556 				 status & C_COM_DRV_OWN, 2000, 1000000);
557 	if (err < 0) {
558 		bt_dev_err(bdev->hdev, "Cannot get ownership from device");
559 		goto err_disable_func;
560 	}
561 
562 	/* Disable interrupt & mask out all interrupt sources */
563 	sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, &err);
564 	if (err < 0)
565 		goto err_disable_func;
566 
567 	sdio_writel(bdev->func, 0, MTK_REG_CHIER, &err);
568 	if (err < 0)
569 		goto err_disable_func;
570 
571 	err = sdio_claim_irq(bdev->func, btmtksdio_interrupt);
572 	if (err < 0)
573 		goto err_disable_func;
574 
575 	err = sdio_set_block_size(bdev->func, MTK_SDIO_BLOCK_SIZE);
576 	if (err < 0)
577 		goto err_release_irq;
578 
579 	/* SDIO CMD 5 allows the SDIO device back to idle state an
580 	 * synchronous interrupt is supported in SDIO 4-bit mode
581 	 */
582 	sdio_writel(bdev->func, SDIO_INT_CTL | SDIO_RE_INIT_EN,
583 		    MTK_REG_CSDIOCSR, &err);
584 	if (err < 0)
585 		goto err_release_irq;
586 
587 	/* Setup write-1-clear for CHISR register */
588 	sdio_writel(bdev->func, C_INT_CLR_CTRL, MTK_REG_CHCR, &err);
589 	if (err < 0)
590 		goto err_release_irq;
591 
592 	/* Setup interrupt sources */
593 	sdio_writel(bdev->func, RX_DONE_INT | TX_EMPTY | TX_FIFO_OVERFLOW,
594 		    MTK_REG_CHIER, &err);
595 	if (err < 0)
596 		goto err_release_irq;
597 
598 	/* Enable interrupt */
599 	sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, &err);
600 	if (err < 0)
601 		goto err_release_irq;
602 
603 	sdio_release_host(bdev->func);
604 
605 	return 0;
606 
607 err_release_irq:
608 	sdio_release_irq(bdev->func);
609 
610 err_disable_func:
611 	sdio_disable_func(bdev->func);
612 
613 err_release_host:
614 	sdio_release_host(bdev->func);
615 
616 	return err;
617 }
618 
619 static int btmtksdio_close(struct hci_dev *hdev)
620 {
621 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
622 	u32 status;
623 	int err;
624 
625 	sdio_claim_host(bdev->func);
626 
627 	/* Disable interrupt */
628 	sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
629 
630 	sdio_release_irq(bdev->func);
631 
632 	/* Return ownership to the device */
633 	sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, NULL);
634 
635 	err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
636 				 !(status & C_COM_DRV_OWN), 2000, 1000000);
637 	if (err < 0)
638 		bt_dev_err(bdev->hdev, "Cannot return ownership to device");
639 
640 	sdio_disable_func(bdev->func);
641 
642 	sdio_release_host(bdev->func);
643 
644 	return 0;
645 }
646 
647 static int btmtksdio_flush(struct hci_dev *hdev)
648 {
649 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
650 
651 	skb_queue_purge(&bdev->txq);
652 
653 	cancel_work_sync(&bdev->tx_work);
654 
655 	return 0;
656 }
657 
658 static int btmtksdio_func_query(struct hci_dev *hdev)
659 {
660 	struct btmtk_hci_wmt_params wmt_params;
661 	int status, err;
662 	u8 param = 0;
663 
664 	/* Query whether the function is enabled */
665 	wmt_params.op = MTK_WMT_FUNC_CTRL;
666 	wmt_params.flag = 4;
667 	wmt_params.dlen = sizeof(param);
668 	wmt_params.data = &param;
669 	wmt_params.status = &status;
670 
671 	err = mtk_hci_wmt_sync(hdev, &wmt_params);
672 	if (err < 0) {
673 		bt_dev_err(hdev, "Failed to query function status (%d)", err);
674 		return err;
675 	}
676 
677 	return status;
678 }
679 
680 static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
681 {
682 	struct btmtk_hci_wmt_params wmt_params;
683 	const struct firmware *fw;
684 	const u8 *fw_ptr;
685 	size_t fw_size;
686 	int err, dlen;
687 	u8 flag;
688 
689 	err = request_firmware(&fw, fwname, &hdev->dev);
690 	if (err < 0) {
691 		bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
692 		return err;
693 	}
694 
695 	fw_ptr = fw->data;
696 	fw_size = fw->size;
697 
698 	/* The size of patch header is 30 bytes, should be skip */
699 	if (fw_size < 30) {
700 		err = -EINVAL;
701 		goto free_fw;
702 	}
703 
704 	fw_size -= 30;
705 	fw_ptr += 30;
706 	flag = 1;
707 
708 	wmt_params.op = MTK_WMT_PATCH_DWNLD;
709 	wmt_params.status = NULL;
710 
711 	while (fw_size > 0) {
712 		dlen = min_t(int, 250, fw_size);
713 
714 		/* Tell device the position in sequence */
715 		if (fw_size - dlen <= 0)
716 			flag = 3;
717 		else if (fw_size < fw->size - 30)
718 			flag = 2;
719 
720 		wmt_params.flag = flag;
721 		wmt_params.dlen = dlen;
722 		wmt_params.data = fw_ptr;
723 
724 		err = mtk_hci_wmt_sync(hdev, &wmt_params);
725 		if (err < 0) {
726 			bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
727 				   err);
728 			goto free_fw;
729 		}
730 
731 		fw_size -= dlen;
732 		fw_ptr += dlen;
733 	}
734 
735 	wmt_params.op = MTK_WMT_RST;
736 	wmt_params.flag = 4;
737 	wmt_params.dlen = 0;
738 	wmt_params.data = NULL;
739 	wmt_params.status = NULL;
740 
741 	/* Activate funciton the firmware providing to */
742 	err = mtk_hci_wmt_sync(hdev, &wmt_params);
743 	if (err < 0) {
744 		bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
745 		goto free_fw;
746 	}
747 
748 	/* Wait a few moments for firmware activation done */
749 	usleep_range(10000, 12000);
750 
751 free_fw:
752 	release_firmware(fw);
753 	return err;
754 }
755 
756 static int btmtksdio_setup(struct hci_dev *hdev)
757 {
758 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
759 	struct btmtk_hci_wmt_params wmt_params;
760 	ktime_t calltime, delta, rettime;
761 	struct btmtk_tci_sleep tci_sleep;
762 	unsigned long long duration;
763 	struct sk_buff *skb;
764 	int err, status;
765 	u8 param = 0x1;
766 
767 	calltime = ktime_get();
768 
769 	/* Query whether the firmware is already download */
770 	wmt_params.op = MTK_WMT_SEMAPHORE;
771 	wmt_params.flag = 1;
772 	wmt_params.dlen = 0;
773 	wmt_params.data = NULL;
774 	wmt_params.status = &status;
775 
776 	err = mtk_hci_wmt_sync(hdev, &wmt_params);
777 	if (err < 0) {
778 		bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
779 		return err;
780 	}
781 
782 	if (status == BTMTK_WMT_PATCH_DONE) {
783 		bt_dev_info(hdev, "Firmware already downloaded");
784 		goto ignore_setup_fw;
785 	}
786 
787 	/* Setup a firmware which the device definitely requires */
788 	err = mtk_setup_firmware(hdev, bdev->data->fwname);
789 	if (err < 0)
790 		return err;
791 
792 ignore_setup_fw:
793 	/* Query whether the device is already enabled */
794 	err = readx_poll_timeout(btmtksdio_func_query, hdev, status,
795 				 status < 0 || status != BTMTK_WMT_ON_PROGRESS,
796 				 2000, 5000000);
797 	/* -ETIMEDOUT happens */
798 	if (err < 0)
799 		return err;
800 
801 	/* The other errors happen in btusb_mtk_func_query */
802 	if (status < 0)
803 		return status;
804 
805 	if (status == BTMTK_WMT_ON_DONE) {
806 		bt_dev_info(hdev, "function already on");
807 		goto ignore_func_on;
808 	}
809 
810 	/* Enable Bluetooth protocol */
811 	wmt_params.op = MTK_WMT_FUNC_CTRL;
812 	wmt_params.flag = 0;
813 	wmt_params.dlen = sizeof(param);
814 	wmt_params.data = &param;
815 	wmt_params.status = NULL;
816 
817 	err = mtk_hci_wmt_sync(hdev, &wmt_params);
818 	if (err < 0) {
819 		bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
820 		return err;
821 	}
822 
823 ignore_func_on:
824 	/* Apply the low power environment setup */
825 	tci_sleep.mode = 0x5;
826 	tci_sleep.duration = cpu_to_le16(0x640);
827 	tci_sleep.host_duration = cpu_to_le16(0x640);
828 	tci_sleep.host_wakeup_pin = 0;
829 	tci_sleep.time_compensation = 0;
830 
831 	skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
832 			     HCI_INIT_TIMEOUT);
833 	if (IS_ERR(skb)) {
834 		err = PTR_ERR(skb);
835 		bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
836 		return err;
837 	}
838 	kfree_skb(skb);
839 
840 	rettime = ktime_get();
841 	delta = ktime_sub(rettime, calltime);
842 	duration = (unsigned long long)ktime_to_ns(delta) >> 10;
843 
844 	pm_runtime_set_autosuspend_delay(bdev->dev,
845 					 MTKBTSDIO_AUTOSUSPEND_DELAY);
846 	pm_runtime_use_autosuspend(bdev->dev);
847 
848 	err = pm_runtime_set_active(bdev->dev);
849 	if (err < 0)
850 		return err;
851 
852 	/* Default forbid runtime auto suspend, that can be allowed by
853 	 * enable_autosuspend flag or the PM runtime entry under sysfs.
854 	 */
855 	pm_runtime_forbid(bdev->dev);
856 	pm_runtime_enable(bdev->dev);
857 
858 	if (enable_autosuspend)
859 		pm_runtime_allow(bdev->dev);
860 
861 	bt_dev_info(hdev, "Device setup in %llu usecs", duration);
862 
863 	return 0;
864 }
865 
866 static int btmtksdio_shutdown(struct hci_dev *hdev)
867 {
868 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
869 	struct btmtk_hci_wmt_params wmt_params;
870 	u8 param = 0x0;
871 	int err;
872 
873 	/* Get back the state to be consistent with the state
874 	 * in btmtksdio_setup.
875 	 */
876 	pm_runtime_get_sync(bdev->dev);
877 
878 	/* Disable the device */
879 	wmt_params.op = MTK_WMT_FUNC_CTRL;
880 	wmt_params.flag = 0;
881 	wmt_params.dlen = sizeof(param);
882 	wmt_params.data = &param;
883 	wmt_params.status = NULL;
884 
885 	err = mtk_hci_wmt_sync(hdev, &wmt_params);
886 	if (err < 0) {
887 		bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
888 		return err;
889 	}
890 
891 	pm_runtime_put_noidle(bdev->dev);
892 	pm_runtime_disable(bdev->dev);
893 
894 	return 0;
895 }
896 
897 static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
898 {
899 	struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
900 
901 	switch (hci_skb_pkt_type(skb)) {
902 	case HCI_COMMAND_PKT:
903 		hdev->stat.cmd_tx++;
904 		break;
905 
906 	case HCI_ACLDATA_PKT:
907 		hdev->stat.acl_tx++;
908 		break;
909 
910 	case HCI_SCODATA_PKT:
911 		hdev->stat.sco_tx++;
912 		break;
913 
914 	default:
915 		return -EILSEQ;
916 	}
917 
918 	skb_queue_tail(&bdev->txq, skb);
919 
920 	schedule_work(&bdev->tx_work);
921 
922 	return 0;
923 }
924 
925 static int btmtksdio_probe(struct sdio_func *func,
926 			   const struct sdio_device_id *id)
927 {
928 	struct btmtksdio_dev *bdev;
929 	struct hci_dev *hdev;
930 	int err;
931 
932 	bdev = devm_kzalloc(&func->dev, sizeof(*bdev), GFP_KERNEL);
933 	if (!bdev)
934 		return -ENOMEM;
935 
936 	bdev->data = (void *)id->driver_data;
937 	if (!bdev->data)
938 		return -ENODEV;
939 
940 	bdev->dev = &func->dev;
941 	bdev->func = func;
942 
943 	INIT_WORK(&bdev->tx_work, btmtksdio_tx_work);
944 	skb_queue_head_init(&bdev->txq);
945 
946 	/* Initialize and register HCI device */
947 	hdev = hci_alloc_dev();
948 	if (!hdev) {
949 		dev_err(&func->dev, "Can't allocate HCI device\n");
950 		return -ENOMEM;
951 	}
952 
953 	bdev->hdev = hdev;
954 
955 	hdev->bus = HCI_SDIO;
956 	hci_set_drvdata(hdev, bdev);
957 
958 	hdev->open     = btmtksdio_open;
959 	hdev->close    = btmtksdio_close;
960 	hdev->flush    = btmtksdio_flush;
961 	hdev->setup    = btmtksdio_setup;
962 	hdev->shutdown = btmtksdio_shutdown;
963 	hdev->send     = btmtksdio_send_frame;
964 	SET_HCIDEV_DEV(hdev, &func->dev);
965 
966 	hdev->manufacturer = 70;
967 	set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
968 
969 	err = hci_register_dev(hdev);
970 	if (err < 0) {
971 		dev_err(&func->dev, "Can't register HCI device\n");
972 		hci_free_dev(hdev);
973 		return err;
974 	}
975 
976 	sdio_set_drvdata(func, bdev);
977 
978 	/* pm_runtime_enable would be done after the firmware is being
979 	 * downloaded because the core layer probably already enables
980 	 * runtime PM for this func such as the case host->caps &
981 	 * MMC_CAP_POWER_OFF_CARD.
982 	 */
983 	if (pm_runtime_enabled(bdev->dev))
984 		pm_runtime_disable(bdev->dev);
985 
986 	/* As explaination in drivers/mmc/core/sdio_bus.c tells us:
987 	 * Unbound SDIO functions are always suspended.
988 	 * During probe, the function is set active and the usage count
989 	 * is incremented.  If the driver supports runtime PM,
990 	 * it should call pm_runtime_put_noidle() in its probe routine and
991 	 * pm_runtime_get_noresume() in its remove routine.
992 	 *
993 	 * So, put a pm_runtime_put_noidle here !
994 	 */
995 	pm_runtime_put_noidle(bdev->dev);
996 
997 	return 0;
998 }
999 
1000 static void btmtksdio_remove(struct sdio_func *func)
1001 {
1002 	struct btmtksdio_dev *bdev = sdio_get_drvdata(func);
1003 	struct hci_dev *hdev;
1004 
1005 	if (!bdev)
1006 		return;
1007 
1008 	/* Be consistent the state in btmtksdio_probe */
1009 	pm_runtime_get_noresume(bdev->dev);
1010 
1011 	hdev = bdev->hdev;
1012 
1013 	sdio_set_drvdata(func, NULL);
1014 	hci_unregister_dev(hdev);
1015 	hci_free_dev(hdev);
1016 }
1017 
1018 #ifdef CONFIG_PM
1019 static int btmtksdio_runtime_suspend(struct device *dev)
1020 {
1021 	struct sdio_func *func = dev_to_sdio_func(dev);
1022 	struct btmtksdio_dev *bdev;
1023 	u32 status;
1024 	int err;
1025 
1026 	bdev = sdio_get_drvdata(func);
1027 	if (!bdev)
1028 		return 0;
1029 
1030 	sdio_claim_host(bdev->func);
1031 
1032 	sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err);
1033 	if (err < 0)
1034 		goto out;
1035 
1036 	err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
1037 				 !(status & C_COM_DRV_OWN), 2000, 1000000);
1038 out:
1039 	bt_dev_info(bdev->hdev, "status (%d) return ownership to device", err);
1040 
1041 	sdio_release_host(bdev->func);
1042 
1043 	return err;
1044 }
1045 
1046 static int btmtksdio_runtime_resume(struct device *dev)
1047 {
1048 	struct sdio_func *func = dev_to_sdio_func(dev);
1049 	struct btmtksdio_dev *bdev;
1050 	u32 status;
1051 	int err;
1052 
1053 	bdev = sdio_get_drvdata(func);
1054 	if (!bdev)
1055 		return 0;
1056 
1057 	sdio_claim_host(bdev->func);
1058 
1059 	sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
1060 	if (err < 0)
1061 		goto out;
1062 
1063 	err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
1064 				 status & C_COM_DRV_OWN, 2000, 1000000);
1065 out:
1066 	bt_dev_info(bdev->hdev, "status (%d) get ownership from device", err);
1067 
1068 	sdio_release_host(bdev->func);
1069 
1070 	return err;
1071 }
1072 
1073 static UNIVERSAL_DEV_PM_OPS(btmtksdio_pm_ops, btmtksdio_runtime_suspend,
1074 			    btmtksdio_runtime_resume, NULL);
1075 #define BTMTKSDIO_PM_OPS (&btmtksdio_pm_ops)
1076 #else	/* CONFIG_PM */
1077 #define BTMTKSDIO_PM_OPS NULL
1078 #endif	/* CONFIG_PM */
1079 
1080 static struct sdio_driver btmtksdio_driver = {
1081 	.name		= "btmtksdio",
1082 	.probe		= btmtksdio_probe,
1083 	.remove		= btmtksdio_remove,
1084 	.id_table	= btmtksdio_table,
1085 	.drv = {
1086 		.owner = THIS_MODULE,
1087 		.pm = BTMTKSDIO_PM_OPS,
1088 	}
1089 };
1090 
1091 module_sdio_driver(btmtksdio_driver);
1092 
1093 module_param(enable_autosuspend, bool, 0644);
1094 MODULE_PARM_DESC(enable_autosuspend, "Enable autosuspend by default");
1095 
1096 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
1097 MODULE_DESCRIPTION("MediaTek Bluetooth SDIO driver ver " VERSION);
1098 MODULE_VERSION(VERSION);
1099 MODULE_LICENSE("GPL");
1100 MODULE_FIRMWARE(FIRMWARE_MT7663);
1101 MODULE_FIRMWARE(FIRMWARE_MT7668);
1102