xref: /openbmc/linux/drivers/bluetooth/hci_h5.c (revision 3e09b155)
1 /*
2  *
3  *  Bluetooth HCI Three-wire UART driver
4  *
5  *  Copyright (C) 2012  Intel Corporation
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2 of the License, or
11  *  (at your option) any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, write to the Free Software
20  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 #include <linux/kernel.h>
25 #include <linux/errno.h>
26 #include <linux/skbuff.h>
27 
28 #include <net/bluetooth/bluetooth.h>
29 #include <net/bluetooth/hci_core.h>
30 
31 #include "hci_uart.h"
32 
33 #define HCI_3WIRE_ACK_PKT	0
34 #define HCI_3WIRE_LINK_PKT	15
35 
36 /* Sliding window size */
37 #define H5_TX_WIN_MAX		4
38 
39 #define H5_ACK_TIMEOUT	msecs_to_jiffies(250)
40 #define H5_SYNC_TIMEOUT	msecs_to_jiffies(100)
41 
42 /*
43  * Maximum Three-wire packet:
44  *     4 byte header + max value for 12-bit length + 2 bytes for CRC
45  */
46 #define H5_MAX_LEN (4 + 0xfff + 2)
47 
48 /* Convenience macros for reading Three-wire header values */
49 #define H5_HDR_SEQ(hdr)		((hdr)[0] & 0x07)
50 #define H5_HDR_ACK(hdr)		(((hdr)[0] >> 3) & 0x07)
51 #define H5_HDR_CRC(hdr)		(((hdr)[0] >> 6) & 0x01)
52 #define H5_HDR_RELIABLE(hdr)	(((hdr)[0] >> 7) & 0x01)
53 #define H5_HDR_PKT_TYPE(hdr)	((hdr)[1] & 0x0f)
54 #define H5_HDR_LEN(hdr)		((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4))
55 
56 #define SLIP_DELIMITER	0xc0
57 #define SLIP_ESC	0xdb
58 #define SLIP_ESC_DELIM	0xdc
59 #define SLIP_ESC_ESC	0xdd
60 
61 /* H5 state flags */
62 enum {
63 	H5_RX_ESC,	/* SLIP escape mode */
64 	H5_TX_ACK_REQ,	/* Pending ack to send */
65 };
66 
67 struct h5 {
68 	struct sk_buff_head	unack;		/* Unack'ed packets queue */
69 	struct sk_buff_head	rel;		/* Reliable packets queue */
70 	struct sk_buff_head	unrel;		/* Unreliable packets queue */
71 
72 	unsigned long		flags;
73 
74 	struct sk_buff		*rx_skb;	/* Receive buffer */
75 	size_t			rx_pending;	/* Expecting more bytes */
76 	u8			rx_ack;		/* Last ack number received */
77 
78 	int			(*rx_func)(struct hci_uart *hu, u8 c);
79 
80 	struct timer_list	timer;		/* Retransmission timer */
81 
82 	u8			tx_seq;		/* Next seq number to send */
83 	u8			tx_ack;		/* Next ack number to send */
84 	u8			tx_win;		/* Sliding window size */
85 
86 	enum {
87 		H5_UNINITIALIZED,
88 		H5_INITIALIZED,
89 		H5_ACTIVE,
90 	} state;
91 
92 	enum {
93 		H5_AWAKE,
94 		H5_SLEEPING,
95 		H5_WAKING_UP,
96 	} sleep;
97 };
98 
99 static void h5_reset_rx(struct h5 *h5);
100 
101 static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
102 {
103 	struct h5 *h5 = hu->priv;
104 	struct sk_buff *nskb;
105 
106 	nskb = alloc_skb(3, GFP_ATOMIC);
107 	if (!nskb)
108 		return;
109 
110 	hci_skb_pkt_type(nskb) = HCI_3WIRE_LINK_PKT;
111 
112 	skb_put_data(nskb, data, len);
113 
114 	skb_queue_tail(&h5->unrel, nskb);
115 }
116 
117 static u8 h5_cfg_field(struct h5 *h5)
118 {
119 	/* Sliding window size (first 3 bits) */
120 	return h5->tx_win & 0x07;
121 }
122 
123 static void h5_timed_event(unsigned long arg)
124 {
125 	const unsigned char sync_req[] = { 0x01, 0x7e };
126 	unsigned char conf_req[3] = { 0x03, 0xfc };
127 	struct hci_uart *hu = (struct hci_uart *)arg;
128 	struct h5 *h5 = hu->priv;
129 	struct sk_buff *skb;
130 	unsigned long flags;
131 
132 	BT_DBG("%s", hu->hdev->name);
133 
134 	if (h5->state == H5_UNINITIALIZED)
135 		h5_link_control(hu, sync_req, sizeof(sync_req));
136 
137 	if (h5->state == H5_INITIALIZED) {
138 		conf_req[2] = h5_cfg_field(h5);
139 		h5_link_control(hu, conf_req, sizeof(conf_req));
140 	}
141 
142 	if (h5->state != H5_ACTIVE) {
143 		mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
144 		goto wakeup;
145 	}
146 
147 	if (h5->sleep != H5_AWAKE) {
148 		h5->sleep = H5_SLEEPING;
149 		goto wakeup;
150 	}
151 
152 	BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
153 
154 	spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
155 
156 	while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
157 		h5->tx_seq = (h5->tx_seq - 1) & 0x07;
158 		skb_queue_head(&h5->rel, skb);
159 	}
160 
161 	spin_unlock_irqrestore(&h5->unack.lock, flags);
162 
163 wakeup:
164 	hci_uart_tx_wakeup(hu);
165 }
166 
167 static void h5_peer_reset(struct hci_uart *hu)
168 {
169 	struct h5 *h5 = hu->priv;
170 
171 	BT_ERR("Peer device has reset");
172 
173 	h5->state = H5_UNINITIALIZED;
174 
175 	del_timer(&h5->timer);
176 
177 	skb_queue_purge(&h5->rel);
178 	skb_queue_purge(&h5->unrel);
179 	skb_queue_purge(&h5->unack);
180 
181 	h5->tx_seq = 0;
182 	h5->tx_ack = 0;
183 
184 	/* Send reset request to upper stack */
185 	hci_reset_dev(hu->hdev);
186 }
187 
188 static int h5_open(struct hci_uart *hu)
189 {
190 	struct h5 *h5;
191 	const unsigned char sync[] = { 0x01, 0x7e };
192 
193 	BT_DBG("hu %p", hu);
194 
195 	h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
196 	if (!h5)
197 		return -ENOMEM;
198 
199 	hu->priv = h5;
200 
201 	skb_queue_head_init(&h5->unack);
202 	skb_queue_head_init(&h5->rel);
203 	skb_queue_head_init(&h5->unrel);
204 
205 	h5_reset_rx(h5);
206 
207 	setup_timer(&h5->timer, h5_timed_event, (unsigned long)hu);
208 
209 	h5->tx_win = H5_TX_WIN_MAX;
210 
211 	set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
212 
213 	/* Send initial sync request */
214 	h5_link_control(hu, sync, sizeof(sync));
215 	mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
216 
217 	return 0;
218 }
219 
220 static int h5_close(struct hci_uart *hu)
221 {
222 	struct h5 *h5 = hu->priv;
223 
224 	del_timer_sync(&h5->timer);
225 
226 	skb_queue_purge(&h5->unack);
227 	skb_queue_purge(&h5->rel);
228 	skb_queue_purge(&h5->unrel);
229 
230 	kfree(h5);
231 
232 	return 0;
233 }
234 
235 static void h5_pkt_cull(struct h5 *h5)
236 {
237 	struct sk_buff *skb, *tmp;
238 	unsigned long flags;
239 	int i, to_remove;
240 	u8 seq;
241 
242 	spin_lock_irqsave(&h5->unack.lock, flags);
243 
244 	to_remove = skb_queue_len(&h5->unack);
245 	if (to_remove == 0)
246 		goto unlock;
247 
248 	seq = h5->tx_seq;
249 
250 	while (to_remove > 0) {
251 		if (h5->rx_ack == seq)
252 			break;
253 
254 		to_remove--;
255 		seq = (seq - 1) & 0x07;
256 	}
257 
258 	if (seq != h5->rx_ack)
259 		BT_ERR("Controller acked invalid packet");
260 
261 	i = 0;
262 	skb_queue_walk_safe(&h5->unack, skb, tmp) {
263 		if (i++ >= to_remove)
264 			break;
265 
266 		__skb_unlink(skb, &h5->unack);
267 		kfree_skb(skb);
268 	}
269 
270 	if (skb_queue_empty(&h5->unack))
271 		del_timer(&h5->timer);
272 
273 unlock:
274 	spin_unlock_irqrestore(&h5->unack.lock, flags);
275 }
276 
277 static void h5_handle_internal_rx(struct hci_uart *hu)
278 {
279 	struct h5 *h5 = hu->priv;
280 	const unsigned char sync_req[] = { 0x01, 0x7e };
281 	const unsigned char sync_rsp[] = { 0x02, 0x7d };
282 	unsigned char conf_req[3] = { 0x03, 0xfc };
283 	const unsigned char conf_rsp[] = { 0x04, 0x7b };
284 	const unsigned char wakeup_req[] = { 0x05, 0xfa };
285 	const unsigned char woken_req[] = { 0x06, 0xf9 };
286 	const unsigned char sleep_req[] = { 0x07, 0x78 };
287 	const unsigned char *hdr = h5->rx_skb->data;
288 	const unsigned char *data = &h5->rx_skb->data[4];
289 
290 	BT_DBG("%s", hu->hdev->name);
291 
292 	if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
293 		return;
294 
295 	if (H5_HDR_LEN(hdr) < 2)
296 		return;
297 
298 	conf_req[2] = h5_cfg_field(h5);
299 
300 	if (memcmp(data, sync_req, 2) == 0) {
301 		if (h5->state == H5_ACTIVE)
302 			h5_peer_reset(hu);
303 		h5_link_control(hu, sync_rsp, 2);
304 	} else if (memcmp(data, sync_rsp, 2) == 0) {
305 		if (h5->state == H5_ACTIVE)
306 			h5_peer_reset(hu);
307 		h5->state = H5_INITIALIZED;
308 		h5_link_control(hu, conf_req, 3);
309 	} else if (memcmp(data, conf_req, 2) == 0) {
310 		h5_link_control(hu, conf_rsp, 2);
311 		h5_link_control(hu, conf_req, 3);
312 	} else if (memcmp(data, conf_rsp, 2) == 0) {
313 		if (H5_HDR_LEN(hdr) > 2)
314 			h5->tx_win = (data[2] & 0x07);
315 		BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
316 		h5->state = H5_ACTIVE;
317 		hci_uart_init_ready(hu);
318 		return;
319 	} else if (memcmp(data, sleep_req, 2) == 0) {
320 		BT_DBG("Peer went to sleep");
321 		h5->sleep = H5_SLEEPING;
322 		return;
323 	} else if (memcmp(data, woken_req, 2) == 0) {
324 		BT_DBG("Peer woke up");
325 		h5->sleep = H5_AWAKE;
326 	} else if (memcmp(data, wakeup_req, 2) == 0) {
327 		BT_DBG("Peer requested wakeup");
328 		h5_link_control(hu, woken_req, 2);
329 		h5->sleep = H5_AWAKE;
330 	} else {
331 		BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
332 		return;
333 	}
334 
335 	hci_uart_tx_wakeup(hu);
336 }
337 
338 static void h5_complete_rx_pkt(struct hci_uart *hu)
339 {
340 	struct h5 *h5 = hu->priv;
341 	const unsigned char *hdr = h5->rx_skb->data;
342 
343 	if (H5_HDR_RELIABLE(hdr)) {
344 		h5->tx_ack = (h5->tx_ack + 1) % 8;
345 		set_bit(H5_TX_ACK_REQ, &h5->flags);
346 		hci_uart_tx_wakeup(hu);
347 	}
348 
349 	h5->rx_ack = H5_HDR_ACK(hdr);
350 
351 	h5_pkt_cull(h5);
352 
353 	switch (H5_HDR_PKT_TYPE(hdr)) {
354 	case HCI_EVENT_PKT:
355 	case HCI_ACLDATA_PKT:
356 	case HCI_SCODATA_PKT:
357 		hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr);
358 
359 		/* Remove Three-wire header */
360 		skb_pull(h5->rx_skb, 4);
361 
362 		hci_recv_frame(hu->hdev, h5->rx_skb);
363 		h5->rx_skb = NULL;
364 
365 		break;
366 
367 	default:
368 		h5_handle_internal_rx(hu);
369 		break;
370 	}
371 
372 	h5_reset_rx(h5);
373 }
374 
375 static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
376 {
377 	h5_complete_rx_pkt(hu);
378 
379 	return 0;
380 }
381 
382 static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
383 {
384 	struct h5 *h5 = hu->priv;
385 	const unsigned char *hdr = h5->rx_skb->data;
386 
387 	if (H5_HDR_CRC(hdr)) {
388 		h5->rx_func = h5_rx_crc;
389 		h5->rx_pending = 2;
390 	} else {
391 		h5_complete_rx_pkt(hu);
392 	}
393 
394 	return 0;
395 }
396 
397 static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
398 {
399 	struct h5 *h5 = hu->priv;
400 	const unsigned char *hdr = h5->rx_skb->data;
401 
402 	BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
403 	       hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
404 	       H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
405 	       H5_HDR_LEN(hdr));
406 
407 	if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
408 		BT_ERR("Invalid header checksum");
409 		h5_reset_rx(h5);
410 		return 0;
411 	}
412 
413 	if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
414 		BT_ERR("Out-of-order packet arrived (%u != %u)",
415 		       H5_HDR_SEQ(hdr), h5->tx_ack);
416 		h5_reset_rx(h5);
417 		return 0;
418 	}
419 
420 	if (h5->state != H5_ACTIVE &&
421 	    H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
422 		BT_ERR("Non-link packet received in non-active state");
423 		h5_reset_rx(h5);
424 		return 0;
425 	}
426 
427 	h5->rx_func = h5_rx_payload;
428 	h5->rx_pending = H5_HDR_LEN(hdr);
429 
430 	return 0;
431 }
432 
433 static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
434 {
435 	struct h5 *h5 = hu->priv;
436 
437 	if (c == SLIP_DELIMITER)
438 		return 1;
439 
440 	h5->rx_func = h5_rx_3wire_hdr;
441 	h5->rx_pending = 4;
442 
443 	h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
444 	if (!h5->rx_skb) {
445 		BT_ERR("Can't allocate mem for new packet");
446 		h5_reset_rx(h5);
447 		return -ENOMEM;
448 	}
449 
450 	h5->rx_skb->dev = (void *)hu->hdev;
451 
452 	return 0;
453 }
454 
455 static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
456 {
457 	struct h5 *h5 = hu->priv;
458 
459 	if (c == SLIP_DELIMITER)
460 		h5->rx_func = h5_rx_pkt_start;
461 
462 	return 1;
463 }
464 
465 static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
466 {
467 	const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
468 	const u8 *byte = &c;
469 
470 	if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
471 		set_bit(H5_RX_ESC, &h5->flags);
472 		return;
473 	}
474 
475 	if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
476 		switch (c) {
477 		case SLIP_ESC_DELIM:
478 			byte = &delim;
479 			break;
480 		case SLIP_ESC_ESC:
481 			byte = &esc;
482 			break;
483 		default:
484 			BT_ERR("Invalid esc byte 0x%02hhx", c);
485 			h5_reset_rx(h5);
486 			return;
487 		}
488 	}
489 
490 	skb_put_data(h5->rx_skb, byte, 1);
491 	h5->rx_pending--;
492 
493 	BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
494 }
495 
496 static void h5_reset_rx(struct h5 *h5)
497 {
498 	if (h5->rx_skb) {
499 		kfree_skb(h5->rx_skb);
500 		h5->rx_skb = NULL;
501 	}
502 
503 	h5->rx_func = h5_rx_delimiter;
504 	h5->rx_pending = 0;
505 	clear_bit(H5_RX_ESC, &h5->flags);
506 }
507 
508 static int h5_recv(struct hci_uart *hu, const void *data, int count)
509 {
510 	struct h5 *h5 = hu->priv;
511 	const unsigned char *ptr = data;
512 
513 	BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
514 	       count);
515 
516 	while (count > 0) {
517 		int processed;
518 
519 		if (h5->rx_pending > 0) {
520 			if (*ptr == SLIP_DELIMITER) {
521 				BT_ERR("Too short H5 packet");
522 				h5_reset_rx(h5);
523 				continue;
524 			}
525 
526 			h5_unslip_one_byte(h5, *ptr);
527 
528 			ptr++; count--;
529 			continue;
530 		}
531 
532 		processed = h5->rx_func(hu, *ptr);
533 		if (processed < 0)
534 			return processed;
535 
536 		ptr += processed;
537 		count -= processed;
538 	}
539 
540 	return 0;
541 }
542 
543 static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
544 {
545 	struct h5 *h5 = hu->priv;
546 
547 	if (skb->len > 0xfff) {
548 		BT_ERR("Packet too long (%u bytes)", skb->len);
549 		kfree_skb(skb);
550 		return 0;
551 	}
552 
553 	if (h5->state != H5_ACTIVE) {
554 		BT_ERR("Ignoring HCI data in non-active state");
555 		kfree_skb(skb);
556 		return 0;
557 	}
558 
559 	switch (hci_skb_pkt_type(skb)) {
560 	case HCI_ACLDATA_PKT:
561 	case HCI_COMMAND_PKT:
562 		skb_queue_tail(&h5->rel, skb);
563 		break;
564 
565 	case HCI_SCODATA_PKT:
566 		skb_queue_tail(&h5->unrel, skb);
567 		break;
568 
569 	default:
570 		BT_ERR("Unknown packet type %u", hci_skb_pkt_type(skb));
571 		kfree_skb(skb);
572 		break;
573 	}
574 
575 	return 0;
576 }
577 
578 static void h5_slip_delim(struct sk_buff *skb)
579 {
580 	const char delim = SLIP_DELIMITER;
581 
582 	skb_put_data(skb, &delim, 1);
583 }
584 
585 static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
586 {
587 	const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
588 	const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
589 
590 	switch (c) {
591 	case SLIP_DELIMITER:
592 		skb_put_data(skb, &esc_delim, 2);
593 		break;
594 	case SLIP_ESC:
595 		skb_put_data(skb, &esc_esc, 2);
596 		break;
597 	default:
598 		skb_put_data(skb, &c, 1);
599 	}
600 }
601 
602 static bool valid_packet_type(u8 type)
603 {
604 	switch (type) {
605 	case HCI_ACLDATA_PKT:
606 	case HCI_COMMAND_PKT:
607 	case HCI_SCODATA_PKT:
608 	case HCI_3WIRE_LINK_PKT:
609 	case HCI_3WIRE_ACK_PKT:
610 		return true;
611 	default:
612 		return false;
613 	}
614 }
615 
616 static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
617 				      const u8 *data, size_t len)
618 {
619 	struct h5 *h5 = hu->priv;
620 	struct sk_buff *nskb;
621 	u8 hdr[4];
622 	int i;
623 
624 	if (!valid_packet_type(pkt_type)) {
625 		BT_ERR("Unknown packet type %u", pkt_type);
626 		return NULL;
627 	}
628 
629 	/*
630 	 * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
631 	 * (because bytes 0xc0 and 0xdb are escaped, worst case is when
632 	 * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
633 	 * delimiters at start and end).
634 	 */
635 	nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
636 	if (!nskb)
637 		return NULL;
638 
639 	hci_skb_pkt_type(nskb) = pkt_type;
640 
641 	h5_slip_delim(nskb);
642 
643 	hdr[0] = h5->tx_ack << 3;
644 	clear_bit(H5_TX_ACK_REQ, &h5->flags);
645 
646 	/* Reliable packet? */
647 	if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
648 		hdr[0] |= 1 << 7;
649 		hdr[0] |= h5->tx_seq;
650 		h5->tx_seq = (h5->tx_seq + 1) % 8;
651 	}
652 
653 	hdr[1] = pkt_type | ((len & 0x0f) << 4);
654 	hdr[2] = len >> 4;
655 	hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
656 
657 	BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
658 	       hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
659 	       H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
660 	       H5_HDR_LEN(hdr));
661 
662 	for (i = 0; i < 4; i++)
663 		h5_slip_one_byte(nskb, hdr[i]);
664 
665 	for (i = 0; i < len; i++)
666 		h5_slip_one_byte(nskb, data[i]);
667 
668 	h5_slip_delim(nskb);
669 
670 	return nskb;
671 }
672 
673 static struct sk_buff *h5_dequeue(struct hci_uart *hu)
674 {
675 	struct h5 *h5 = hu->priv;
676 	unsigned long flags;
677 	struct sk_buff *skb, *nskb;
678 
679 	if (h5->sleep != H5_AWAKE) {
680 		const unsigned char wakeup_req[] = { 0x05, 0xfa };
681 
682 		if (h5->sleep == H5_WAKING_UP)
683 			return NULL;
684 
685 		h5->sleep = H5_WAKING_UP;
686 		BT_DBG("Sending wakeup request");
687 
688 		mod_timer(&h5->timer, jiffies + HZ / 100);
689 		return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
690 	}
691 
692 	skb = skb_dequeue(&h5->unrel);
693 	if (skb) {
694 		nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
695 				      skb->data, skb->len);
696 		if (nskb) {
697 			kfree_skb(skb);
698 			return nskb;
699 		}
700 
701 		skb_queue_head(&h5->unrel, skb);
702 		BT_ERR("Could not dequeue pkt because alloc_skb failed");
703 	}
704 
705 	spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
706 
707 	if (h5->unack.qlen >= h5->tx_win)
708 		goto unlock;
709 
710 	skb = skb_dequeue(&h5->rel);
711 	if (skb) {
712 		nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
713 				      skb->data, skb->len);
714 		if (nskb) {
715 			__skb_queue_tail(&h5->unack, skb);
716 			mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
717 			spin_unlock_irqrestore(&h5->unack.lock, flags);
718 			return nskb;
719 		}
720 
721 		skb_queue_head(&h5->rel, skb);
722 		BT_ERR("Could not dequeue pkt because alloc_skb failed");
723 	}
724 
725 unlock:
726 	spin_unlock_irqrestore(&h5->unack.lock, flags);
727 
728 	if (test_bit(H5_TX_ACK_REQ, &h5->flags))
729 		return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
730 
731 	return NULL;
732 }
733 
734 static int h5_flush(struct hci_uart *hu)
735 {
736 	BT_DBG("hu %p", hu);
737 	return 0;
738 }
739 
740 static const struct hci_uart_proto h5p = {
741 	.id		= HCI_UART_3WIRE,
742 	.name		= "Three-wire (H5)",
743 	.open		= h5_open,
744 	.close		= h5_close,
745 	.recv		= h5_recv,
746 	.enqueue	= h5_enqueue,
747 	.dequeue	= h5_dequeue,
748 	.flush		= h5_flush,
749 };
750 
751 int __init h5_init(void)
752 {
753 	return hci_uart_register_proto(&h5p);
754 }
755 
756 int __exit h5_deinit(void)
757 {
758 	return hci_uart_unregister_proto(&h5p);
759 }
760