xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision 37cf4d1a)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6 
7    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License version 2 as
11    published by the Free Software Foundation;
12 
13    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 
22    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24    SOFTWARE IS DISCLAIMED.
25 */
26 
27 /* Bluetooth L2CAP core. */
28 
29 #include <linux/module.h>
30 
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50 
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53 
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
58 
59 int disable_ertm;
60 
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 
64 static struct workqueue_struct *_busy_wq;
65 
66 static LIST_HEAD(chan_list);
67 static DEFINE_RWLOCK(chan_list_lock);
68 
69 static void l2cap_busy_work(struct work_struct *work);
70 
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 				u8 code, u8 ident, u16 dlen, void *data);
73 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
74 								void *data);
75 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
76 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
77 				struct l2cap_chan *chan, int err);
78 
79 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
80 
81 /* ---- L2CAP channels ---- */
82 
83 static inline void chan_hold(struct l2cap_chan *c)
84 {
85 	atomic_inc(&c->refcnt);
86 }
87 
88 static inline void chan_put(struct l2cap_chan *c)
89 {
90 	if (atomic_dec_and_test(&c->refcnt))
91 		kfree(c);
92 }
93 
94 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
95 {
96 	struct l2cap_chan *c;
97 
98 	list_for_each_entry(c, &conn->chan_l, list) {
99 		if (c->dcid == cid)
100 			return c;
101 	}
102 	return NULL;
103 
104 }
105 
106 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
107 {
108 	struct l2cap_chan *c;
109 
110 	list_for_each_entry(c, &conn->chan_l, list) {
111 		if (c->scid == cid)
112 			return c;
113 	}
114 	return NULL;
115 }
116 
117 /* Find channel with given SCID.
118  * Returns locked socket */
119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
120 {
121 	struct l2cap_chan *c;
122 
123 	read_lock(&conn->chan_lock);
124 	c = __l2cap_get_chan_by_scid(conn, cid);
125 	if (c)
126 		bh_lock_sock(c->sk);
127 	read_unlock(&conn->chan_lock);
128 	return c;
129 }
130 
131 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
132 {
133 	struct l2cap_chan *c;
134 
135 	list_for_each_entry(c, &conn->chan_l, list) {
136 		if (c->ident == ident)
137 			return c;
138 	}
139 	return NULL;
140 }
141 
142 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
143 {
144 	struct l2cap_chan *c;
145 
146 	read_lock(&conn->chan_lock);
147 	c = __l2cap_get_chan_by_ident(conn, ident);
148 	if (c)
149 		bh_lock_sock(c->sk);
150 	read_unlock(&conn->chan_lock);
151 	return c;
152 }
153 
154 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
155 {
156 	struct l2cap_chan *c;
157 
158 	list_for_each_entry(c, &chan_list, global_l) {
159 		if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
160 			goto found;
161 	}
162 
163 	c = NULL;
164 found:
165 	return c;
166 }
167 
168 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
169 {
170 	int err;
171 
172 	write_lock_bh(&chan_list_lock);
173 
174 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
175 		err = -EADDRINUSE;
176 		goto done;
177 	}
178 
179 	if (psm) {
180 		chan->psm = psm;
181 		chan->sport = psm;
182 		err = 0;
183 	} else {
184 		u16 p;
185 
186 		err = -EINVAL;
187 		for (p = 0x1001; p < 0x1100; p += 2)
188 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
189 				chan->psm   = cpu_to_le16(p);
190 				chan->sport = cpu_to_le16(p);
191 				err = 0;
192 				break;
193 			}
194 	}
195 
196 done:
197 	write_unlock_bh(&chan_list_lock);
198 	return err;
199 }
200 
201 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
202 {
203 	write_lock_bh(&chan_list_lock);
204 
205 	chan->scid = scid;
206 
207 	write_unlock_bh(&chan_list_lock);
208 
209 	return 0;
210 }
211 
212 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
213 {
214 	u16 cid = L2CAP_CID_DYN_START;
215 
216 	for (; cid < L2CAP_CID_DYN_END; cid++) {
217 		if (!__l2cap_get_chan_by_scid(conn, cid))
218 			return cid;
219 	}
220 
221 	return 0;
222 }
223 
224 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
225 {
226        BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
227 
228        if (!mod_timer(timer, jiffies + timeout))
229 	       chan_hold(chan);
230 }
231 
232 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
233 {
234        BT_DBG("chan %p state %d", chan, chan->state);
235 
236        if (timer_pending(timer) && del_timer(timer))
237 	       chan_put(chan);
238 }
239 
240 static void l2cap_state_change(struct l2cap_chan *chan, int state)
241 {
242 	chan->state = state;
243 	chan->ops->state_change(chan->data, state);
244 }
245 
246 static void l2cap_chan_timeout(unsigned long arg)
247 {
248 	struct l2cap_chan *chan = (struct l2cap_chan *) arg;
249 	struct sock *sk = chan->sk;
250 	int reason;
251 
252 	BT_DBG("chan %p state %d", chan, chan->state);
253 
254 	bh_lock_sock(sk);
255 
256 	if (sock_owned_by_user(sk)) {
257 		/* sk is owned by user. Try again later */
258 		__set_chan_timer(chan, HZ / 5);
259 		bh_unlock_sock(sk);
260 		chan_put(chan);
261 		return;
262 	}
263 
264 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
265 		reason = ECONNREFUSED;
266 	else if (chan->state == BT_CONNECT &&
267 					chan->sec_level != BT_SECURITY_SDP)
268 		reason = ECONNREFUSED;
269 	else
270 		reason = ETIMEDOUT;
271 
272 	l2cap_chan_close(chan, reason);
273 
274 	bh_unlock_sock(sk);
275 
276 	chan->ops->close(chan->data);
277 	chan_put(chan);
278 }
279 
280 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
281 {
282 	struct l2cap_chan *chan;
283 
284 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
285 	if (!chan)
286 		return NULL;
287 
288 	chan->sk = sk;
289 
290 	write_lock_bh(&chan_list_lock);
291 	list_add(&chan->global_l, &chan_list);
292 	write_unlock_bh(&chan_list_lock);
293 
294 	setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
295 
296 	chan->state = BT_OPEN;
297 
298 	atomic_set(&chan->refcnt, 1);
299 
300 	return chan;
301 }
302 
303 void l2cap_chan_destroy(struct l2cap_chan *chan)
304 {
305 	write_lock_bh(&chan_list_lock);
306 	list_del(&chan->global_l);
307 	write_unlock_bh(&chan_list_lock);
308 
309 	chan_put(chan);
310 }
311 
312 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
313 {
314 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
315 			chan->psm, chan->dcid);
316 
317 	conn->disc_reason = 0x13;
318 
319 	chan->conn = conn;
320 
321 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
322 		if (conn->hcon->type == LE_LINK) {
323 			/* LE connection */
324 			chan->omtu = L2CAP_LE_DEFAULT_MTU;
325 			chan->scid = L2CAP_CID_LE_DATA;
326 			chan->dcid = L2CAP_CID_LE_DATA;
327 		} else {
328 			/* Alloc CID for connection-oriented socket */
329 			chan->scid = l2cap_alloc_cid(conn);
330 			chan->omtu = L2CAP_DEFAULT_MTU;
331 		}
332 	} else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
333 		/* Connectionless socket */
334 		chan->scid = L2CAP_CID_CONN_LESS;
335 		chan->dcid = L2CAP_CID_CONN_LESS;
336 		chan->omtu = L2CAP_DEFAULT_MTU;
337 	} else {
338 		/* Raw socket can send/recv signalling messages only */
339 		chan->scid = L2CAP_CID_SIGNALING;
340 		chan->dcid = L2CAP_CID_SIGNALING;
341 		chan->omtu = L2CAP_DEFAULT_MTU;
342 	}
343 
344 	chan_hold(chan);
345 
346 	list_add(&chan->list, &conn->chan_l);
347 }
348 
349 /* Delete channel.
350  * Must be called on the locked socket. */
351 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
352 {
353 	struct sock *sk = chan->sk;
354 	struct l2cap_conn *conn = chan->conn;
355 	struct sock *parent = bt_sk(sk)->parent;
356 
357 	__clear_chan_timer(chan);
358 
359 	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
360 
361 	if (conn) {
362 		/* Delete from channel list */
363 		write_lock_bh(&conn->chan_lock);
364 		list_del(&chan->list);
365 		write_unlock_bh(&conn->chan_lock);
366 		chan_put(chan);
367 
368 		chan->conn = NULL;
369 		hci_conn_put(conn->hcon);
370 	}
371 
372 	l2cap_state_change(chan, BT_CLOSED);
373 	sock_set_flag(sk, SOCK_ZAPPED);
374 
375 	if (err)
376 		sk->sk_err = err;
377 
378 	if (parent) {
379 		bt_accept_unlink(sk);
380 		parent->sk_data_ready(parent, 0);
381 	} else
382 		sk->sk_state_change(sk);
383 
384 	if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
385 			test_bit(CONF_INPUT_DONE, &chan->conf_state)))
386 		return;
387 
388 	skb_queue_purge(&chan->tx_q);
389 
390 	if (chan->mode == L2CAP_MODE_ERTM) {
391 		struct srej_list *l, *tmp;
392 
393 		__clear_retrans_timer(chan);
394 		__clear_monitor_timer(chan);
395 		__clear_ack_timer(chan);
396 
397 		skb_queue_purge(&chan->srej_q);
398 		skb_queue_purge(&chan->busy_q);
399 
400 		list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
401 			list_del(&l->list);
402 			kfree(l);
403 		}
404 	}
405 }
406 
407 static void l2cap_chan_cleanup_listen(struct sock *parent)
408 {
409 	struct sock *sk;
410 
411 	BT_DBG("parent %p", parent);
412 
413 	/* Close not yet accepted channels */
414 	while ((sk = bt_accept_dequeue(parent, NULL))) {
415 		struct l2cap_chan *chan = l2cap_pi(sk)->chan;
416 		__clear_chan_timer(chan);
417 		lock_sock(sk);
418 		l2cap_chan_close(chan, ECONNRESET);
419 		release_sock(sk);
420 		chan->ops->close(chan->data);
421 	}
422 }
423 
424 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
425 {
426 	struct l2cap_conn *conn = chan->conn;
427 	struct sock *sk = chan->sk;
428 
429 	BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
430 
431 	switch (chan->state) {
432 	case BT_LISTEN:
433 		l2cap_chan_cleanup_listen(sk);
434 
435 		l2cap_state_change(chan, BT_CLOSED);
436 		sock_set_flag(sk, SOCK_ZAPPED);
437 		break;
438 
439 	case BT_CONNECTED:
440 	case BT_CONFIG:
441 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
442 					conn->hcon->type == ACL_LINK) {
443 			__clear_chan_timer(chan);
444 			__set_chan_timer(chan, sk->sk_sndtimeo);
445 			l2cap_send_disconn_req(conn, chan, reason);
446 		} else
447 			l2cap_chan_del(chan, reason);
448 		break;
449 
450 	case BT_CONNECT2:
451 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
452 					conn->hcon->type == ACL_LINK) {
453 			struct l2cap_conn_rsp rsp;
454 			__u16 result;
455 
456 			if (bt_sk(sk)->defer_setup)
457 				result = L2CAP_CR_SEC_BLOCK;
458 			else
459 				result = L2CAP_CR_BAD_PSM;
460 			l2cap_state_change(chan, BT_DISCONN);
461 
462 			rsp.scid   = cpu_to_le16(chan->dcid);
463 			rsp.dcid   = cpu_to_le16(chan->scid);
464 			rsp.result = cpu_to_le16(result);
465 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
466 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
467 							sizeof(rsp), &rsp);
468 		}
469 
470 		l2cap_chan_del(chan, reason);
471 		break;
472 
473 	case BT_CONNECT:
474 	case BT_DISCONN:
475 		l2cap_chan_del(chan, reason);
476 		break;
477 
478 	default:
479 		sock_set_flag(sk, SOCK_ZAPPED);
480 		break;
481 	}
482 }
483 
484 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
485 {
486 	if (chan->chan_type == L2CAP_CHAN_RAW) {
487 		switch (chan->sec_level) {
488 		case BT_SECURITY_HIGH:
489 			return HCI_AT_DEDICATED_BONDING_MITM;
490 		case BT_SECURITY_MEDIUM:
491 			return HCI_AT_DEDICATED_BONDING;
492 		default:
493 			return HCI_AT_NO_BONDING;
494 		}
495 	} else if (chan->psm == cpu_to_le16(0x0001)) {
496 		if (chan->sec_level == BT_SECURITY_LOW)
497 			chan->sec_level = BT_SECURITY_SDP;
498 
499 		if (chan->sec_level == BT_SECURITY_HIGH)
500 			return HCI_AT_NO_BONDING_MITM;
501 		else
502 			return HCI_AT_NO_BONDING;
503 	} else {
504 		switch (chan->sec_level) {
505 		case BT_SECURITY_HIGH:
506 			return HCI_AT_GENERAL_BONDING_MITM;
507 		case BT_SECURITY_MEDIUM:
508 			return HCI_AT_GENERAL_BONDING;
509 		default:
510 			return HCI_AT_NO_BONDING;
511 		}
512 	}
513 }
514 
515 /* Service level security */
516 static inline int l2cap_check_security(struct l2cap_chan *chan)
517 {
518 	struct l2cap_conn *conn = chan->conn;
519 	__u8 auth_type;
520 
521 	auth_type = l2cap_get_auth_type(chan);
522 
523 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
524 }
525 
526 static u8 l2cap_get_ident(struct l2cap_conn *conn)
527 {
528 	u8 id;
529 
530 	/* Get next available identificator.
531 	 *    1 - 128 are used by kernel.
532 	 *  129 - 199 are reserved.
533 	 *  200 - 254 are used by utilities like l2ping, etc.
534 	 */
535 
536 	spin_lock_bh(&conn->lock);
537 
538 	if (++conn->tx_ident > 128)
539 		conn->tx_ident = 1;
540 
541 	id = conn->tx_ident;
542 
543 	spin_unlock_bh(&conn->lock);
544 
545 	return id;
546 }
547 
548 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
549 {
550 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
551 	u8 flags;
552 
553 	BT_DBG("code 0x%2.2x", code);
554 
555 	if (!skb)
556 		return;
557 
558 	if (lmp_no_flush_capable(conn->hcon->hdev))
559 		flags = ACL_START_NO_FLUSH;
560 	else
561 		flags = ACL_START;
562 
563 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
564 
565 	hci_send_acl(conn->hcon, skb, flags);
566 }
567 
568 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
569 {
570 	struct sk_buff *skb;
571 	struct l2cap_hdr *lh;
572 	struct l2cap_conn *conn = chan->conn;
573 	int count, hlen = L2CAP_HDR_SIZE + 2;
574 	u8 flags;
575 
576 	if (chan->state != BT_CONNECTED)
577 		return;
578 
579 	if (chan->fcs == L2CAP_FCS_CRC16)
580 		hlen += 2;
581 
582 	BT_DBG("chan %p, control 0x%2.2x", chan, control);
583 
584 	count = min_t(unsigned int, conn->mtu, hlen);
585 	control |= L2CAP_CTRL_FRAME_TYPE;
586 
587 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
588 		control |= L2CAP_CTRL_FINAL;
589 
590 	if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
591 		control |= L2CAP_CTRL_POLL;
592 
593 	skb = bt_skb_alloc(count, GFP_ATOMIC);
594 	if (!skb)
595 		return;
596 
597 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
598 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
599 	lh->cid = cpu_to_le16(chan->dcid);
600 	put_unaligned_le16(control, skb_put(skb, 2));
601 
602 	if (chan->fcs == L2CAP_FCS_CRC16) {
603 		u16 fcs = crc16(0, (u8 *)lh, count - 2);
604 		put_unaligned_le16(fcs, skb_put(skb, 2));
605 	}
606 
607 	if (lmp_no_flush_capable(conn->hcon->hdev))
608 		flags = ACL_START_NO_FLUSH;
609 	else
610 		flags = ACL_START;
611 
612 	bt_cb(skb)->force_active = chan->force_active;
613 
614 	hci_send_acl(chan->conn->hcon, skb, flags);
615 }
616 
617 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
618 {
619 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
620 		control |= L2CAP_SUPER_RCV_NOT_READY;
621 		set_bit(CONN_RNR_SENT, &chan->conn_state);
622 	} else
623 		control |= L2CAP_SUPER_RCV_READY;
624 
625 	control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
626 
627 	l2cap_send_sframe(chan, control);
628 }
629 
630 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
631 {
632 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
633 }
634 
635 static void l2cap_do_start(struct l2cap_chan *chan)
636 {
637 	struct l2cap_conn *conn = chan->conn;
638 
639 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
640 		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
641 			return;
642 
643 		if (l2cap_check_security(chan) &&
644 				__l2cap_no_conn_pending(chan)) {
645 			struct l2cap_conn_req req;
646 			req.scid = cpu_to_le16(chan->scid);
647 			req.psm  = chan->psm;
648 
649 			chan->ident = l2cap_get_ident(conn);
650 			set_bit(CONF_CONNECT_PEND, &chan->conf_state);
651 
652 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
653 							sizeof(req), &req);
654 		}
655 	} else {
656 		struct l2cap_info_req req;
657 		req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
658 
659 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
660 		conn->info_ident = l2cap_get_ident(conn);
661 
662 		mod_timer(&conn->info_timer, jiffies +
663 					msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
664 
665 		l2cap_send_cmd(conn, conn->info_ident,
666 					L2CAP_INFO_REQ, sizeof(req), &req);
667 	}
668 }
669 
670 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
671 {
672 	u32 local_feat_mask = l2cap_feat_mask;
673 	if (!disable_ertm)
674 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
675 
676 	switch (mode) {
677 	case L2CAP_MODE_ERTM:
678 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
679 	case L2CAP_MODE_STREAMING:
680 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
681 	default:
682 		return 0x00;
683 	}
684 }
685 
686 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
687 {
688 	struct sock *sk;
689 	struct l2cap_disconn_req req;
690 
691 	if (!conn)
692 		return;
693 
694 	sk = chan->sk;
695 
696 	if (chan->mode == L2CAP_MODE_ERTM) {
697 		__clear_retrans_timer(chan);
698 		__clear_monitor_timer(chan);
699 		__clear_ack_timer(chan);
700 	}
701 
702 	req.dcid = cpu_to_le16(chan->dcid);
703 	req.scid = cpu_to_le16(chan->scid);
704 	l2cap_send_cmd(conn, l2cap_get_ident(conn),
705 			L2CAP_DISCONN_REQ, sizeof(req), &req);
706 
707 	l2cap_state_change(chan, BT_DISCONN);
708 	sk->sk_err = err;
709 }
710 
711 /* ---- L2CAP connections ---- */
712 static void l2cap_conn_start(struct l2cap_conn *conn)
713 {
714 	struct l2cap_chan *chan, *tmp;
715 
716 	BT_DBG("conn %p", conn);
717 
718 	read_lock(&conn->chan_lock);
719 
720 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
721 		struct sock *sk = chan->sk;
722 
723 		bh_lock_sock(sk);
724 
725 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
726 			bh_unlock_sock(sk);
727 			continue;
728 		}
729 
730 		if (chan->state == BT_CONNECT) {
731 			struct l2cap_conn_req req;
732 
733 			if (!l2cap_check_security(chan) ||
734 					!__l2cap_no_conn_pending(chan)) {
735 				bh_unlock_sock(sk);
736 				continue;
737 			}
738 
739 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
740 					&& test_bit(CONF_STATE2_DEVICE,
741 					&chan->conf_state)) {
742 				/* l2cap_chan_close() calls list_del(chan)
743 				 * so release the lock */
744 				read_unlock_bh(&conn->chan_lock);
745 				l2cap_chan_close(chan, ECONNRESET);
746 				read_lock_bh(&conn->chan_lock);
747 				bh_unlock_sock(sk);
748 				continue;
749 			}
750 
751 			req.scid = cpu_to_le16(chan->scid);
752 			req.psm  = chan->psm;
753 
754 			chan->ident = l2cap_get_ident(conn);
755 			set_bit(CONF_CONNECT_PEND, &chan->conf_state);
756 
757 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
758 							sizeof(req), &req);
759 
760 		} else if (chan->state == BT_CONNECT2) {
761 			struct l2cap_conn_rsp rsp;
762 			char buf[128];
763 			rsp.scid = cpu_to_le16(chan->dcid);
764 			rsp.dcid = cpu_to_le16(chan->scid);
765 
766 			if (l2cap_check_security(chan)) {
767 				if (bt_sk(sk)->defer_setup) {
768 					struct sock *parent = bt_sk(sk)->parent;
769 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
770 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
771 					parent->sk_data_ready(parent, 0);
772 
773 				} else {
774 					l2cap_state_change(chan, BT_CONFIG);
775 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
776 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
777 				}
778 			} else {
779 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
780 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
781 			}
782 
783 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
784 							sizeof(rsp), &rsp);
785 
786 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
787 					rsp.result != L2CAP_CR_SUCCESS) {
788 				bh_unlock_sock(sk);
789 				continue;
790 			}
791 
792 			set_bit(CONF_REQ_SENT, &chan->conf_state);
793 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
794 						l2cap_build_conf_req(chan, buf), buf);
795 			chan->num_conf_req++;
796 		}
797 
798 		bh_unlock_sock(sk);
799 	}
800 
801 	read_unlock(&conn->chan_lock);
802 }
803 
804 /* Find socket with cid and source bdaddr.
805  * Returns closest match, locked.
806  */
807 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
808 {
809 	struct l2cap_chan *c, *c1 = NULL;
810 
811 	read_lock(&chan_list_lock);
812 
813 	list_for_each_entry(c, &chan_list, global_l) {
814 		struct sock *sk = c->sk;
815 
816 		if (state && c->state != state)
817 			continue;
818 
819 		if (c->scid == cid) {
820 			/* Exact match. */
821 			if (!bacmp(&bt_sk(sk)->src, src)) {
822 				read_unlock(&chan_list_lock);
823 				return c;
824 			}
825 
826 			/* Closest match */
827 			if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
828 				c1 = c;
829 		}
830 	}
831 
832 	read_unlock(&chan_list_lock);
833 
834 	return c1;
835 }
836 
837 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
838 {
839 	struct sock *parent, *sk;
840 	struct l2cap_chan *chan, *pchan;
841 
842 	BT_DBG("");
843 
844 	/* Check if we have socket listening on cid */
845 	pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
846 							conn->src);
847 	if (!pchan)
848 		return;
849 
850 	parent = pchan->sk;
851 
852 	bh_lock_sock(parent);
853 
854 	/* Check for backlog size */
855 	if (sk_acceptq_is_full(parent)) {
856 		BT_DBG("backlog full %d", parent->sk_ack_backlog);
857 		goto clean;
858 	}
859 
860 	chan = pchan->ops->new_connection(pchan->data);
861 	if (!chan)
862 		goto clean;
863 
864 	sk = chan->sk;
865 
866 	write_lock_bh(&conn->chan_lock);
867 
868 	hci_conn_hold(conn->hcon);
869 
870 	bacpy(&bt_sk(sk)->src, conn->src);
871 	bacpy(&bt_sk(sk)->dst, conn->dst);
872 
873 	bt_accept_enqueue(parent, sk);
874 
875 	__l2cap_chan_add(conn, chan);
876 
877 	__set_chan_timer(chan, sk->sk_sndtimeo);
878 
879 	l2cap_state_change(chan, BT_CONNECTED);
880 	parent->sk_data_ready(parent, 0);
881 
882 	write_unlock_bh(&conn->chan_lock);
883 
884 clean:
885 	bh_unlock_sock(parent);
886 }
887 
888 static void l2cap_chan_ready(struct sock *sk)
889 {
890 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
891 	struct sock *parent = bt_sk(sk)->parent;
892 
893 	BT_DBG("sk %p, parent %p", sk, parent);
894 
895 	chan->conf_state = 0;
896 	__clear_chan_timer(chan);
897 
898 	l2cap_state_change(chan, BT_CONNECTED);
899 	sk->sk_state_change(sk);
900 
901 	if (parent)
902 		parent->sk_data_ready(parent, 0);
903 }
904 
905 static void l2cap_conn_ready(struct l2cap_conn *conn)
906 {
907 	struct l2cap_chan *chan;
908 
909 	BT_DBG("conn %p", conn);
910 
911 	if (!conn->hcon->out && conn->hcon->type == LE_LINK)
912 		l2cap_le_conn_ready(conn);
913 
914 	read_lock(&conn->chan_lock);
915 
916 	list_for_each_entry(chan, &conn->chan_l, list) {
917 		struct sock *sk = chan->sk;
918 
919 		bh_lock_sock(sk);
920 
921 		if (conn->hcon->type == LE_LINK) {
922 			if (smp_conn_security(conn, chan->sec_level))
923 				l2cap_chan_ready(sk);
924 
925 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
926 			__clear_chan_timer(chan);
927 			l2cap_state_change(chan, BT_CONNECTED);
928 			sk->sk_state_change(sk);
929 
930 		} else if (chan->state == BT_CONNECT)
931 			l2cap_do_start(chan);
932 
933 		bh_unlock_sock(sk);
934 	}
935 
936 	read_unlock(&conn->chan_lock);
937 }
938 
939 /* Notify sockets that we cannot guaranty reliability anymore */
940 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
941 {
942 	struct l2cap_chan *chan;
943 
944 	BT_DBG("conn %p", conn);
945 
946 	read_lock(&conn->chan_lock);
947 
948 	list_for_each_entry(chan, &conn->chan_l, list) {
949 		struct sock *sk = chan->sk;
950 
951 		if (chan->force_reliable)
952 			sk->sk_err = err;
953 	}
954 
955 	read_unlock(&conn->chan_lock);
956 }
957 
958 static void l2cap_info_timeout(unsigned long arg)
959 {
960 	struct l2cap_conn *conn = (void *) arg;
961 
962 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
963 	conn->info_ident = 0;
964 
965 	l2cap_conn_start(conn);
966 }
967 
968 static void l2cap_conn_del(struct hci_conn *hcon, int err)
969 {
970 	struct l2cap_conn *conn = hcon->l2cap_data;
971 	struct l2cap_chan *chan, *l;
972 	struct sock *sk;
973 
974 	if (!conn)
975 		return;
976 
977 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
978 
979 	kfree_skb(conn->rx_skb);
980 
981 	/* Kill channels */
982 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
983 		sk = chan->sk;
984 		bh_lock_sock(sk);
985 		l2cap_chan_del(chan, err);
986 		bh_unlock_sock(sk);
987 		chan->ops->close(chan->data);
988 	}
989 
990 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
991 		del_timer_sync(&conn->info_timer);
992 
993 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
994 		del_timer(&conn->security_timer);
995 
996 	hcon->l2cap_data = NULL;
997 	kfree(conn);
998 }
999 
1000 static void security_timeout(unsigned long arg)
1001 {
1002 	struct l2cap_conn *conn = (void *) arg;
1003 
1004 	l2cap_conn_del(conn->hcon, ETIMEDOUT);
1005 }
1006 
1007 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1008 {
1009 	struct l2cap_conn *conn = hcon->l2cap_data;
1010 
1011 	if (conn || status)
1012 		return conn;
1013 
1014 	conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1015 	if (!conn)
1016 		return NULL;
1017 
1018 	hcon->l2cap_data = conn;
1019 	conn->hcon = hcon;
1020 
1021 	BT_DBG("hcon %p conn %p", hcon, conn);
1022 
1023 	if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1024 		conn->mtu = hcon->hdev->le_mtu;
1025 	else
1026 		conn->mtu = hcon->hdev->acl_mtu;
1027 
1028 	conn->src = &hcon->hdev->bdaddr;
1029 	conn->dst = &hcon->dst;
1030 
1031 	conn->feat_mask = 0;
1032 
1033 	spin_lock_init(&conn->lock);
1034 	rwlock_init(&conn->chan_lock);
1035 
1036 	INIT_LIST_HEAD(&conn->chan_l);
1037 
1038 	if (hcon->type == LE_LINK)
1039 		setup_timer(&conn->security_timer, security_timeout,
1040 						(unsigned long) conn);
1041 	else
1042 		setup_timer(&conn->info_timer, l2cap_info_timeout,
1043 						(unsigned long) conn);
1044 
1045 	conn->disc_reason = 0x13;
1046 
1047 	return conn;
1048 }
1049 
1050 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1051 {
1052 	write_lock_bh(&conn->chan_lock);
1053 	__l2cap_chan_add(conn, chan);
1054 	write_unlock_bh(&conn->chan_lock);
1055 }
1056 
1057 /* ---- Socket interface ---- */
1058 
1059 /* Find socket with psm and source bdaddr.
1060  * Returns closest match.
1061  */
1062 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1063 {
1064 	struct l2cap_chan *c, *c1 = NULL;
1065 
1066 	read_lock(&chan_list_lock);
1067 
1068 	list_for_each_entry(c, &chan_list, global_l) {
1069 		struct sock *sk = c->sk;
1070 
1071 		if (state && c->state != state)
1072 			continue;
1073 
1074 		if (c->psm == psm) {
1075 			/* Exact match. */
1076 			if (!bacmp(&bt_sk(sk)->src, src)) {
1077 				read_unlock(&chan_list_lock);
1078 				return c;
1079 			}
1080 
1081 			/* Closest match */
1082 			if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1083 				c1 = c;
1084 		}
1085 	}
1086 
1087 	read_unlock(&chan_list_lock);
1088 
1089 	return c1;
1090 }
1091 
1092 int l2cap_chan_connect(struct l2cap_chan *chan)
1093 {
1094 	struct sock *sk = chan->sk;
1095 	bdaddr_t *src = &bt_sk(sk)->src;
1096 	bdaddr_t *dst = &bt_sk(sk)->dst;
1097 	struct l2cap_conn *conn;
1098 	struct hci_conn *hcon;
1099 	struct hci_dev *hdev;
1100 	__u8 auth_type;
1101 	int err;
1102 
1103 	BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1104 							chan->psm);
1105 
1106 	hdev = hci_get_route(dst, src);
1107 	if (!hdev)
1108 		return -EHOSTUNREACH;
1109 
1110 	hci_dev_lock_bh(hdev);
1111 
1112 	auth_type = l2cap_get_auth_type(chan);
1113 
1114 	if (chan->dcid == L2CAP_CID_LE_DATA)
1115 		hcon = hci_connect(hdev, LE_LINK, dst,
1116 					chan->sec_level, auth_type);
1117 	else
1118 		hcon = hci_connect(hdev, ACL_LINK, dst,
1119 					chan->sec_level, auth_type);
1120 
1121 	if (IS_ERR(hcon)) {
1122 		err = PTR_ERR(hcon);
1123 		goto done;
1124 	}
1125 
1126 	conn = l2cap_conn_add(hcon, 0);
1127 	if (!conn) {
1128 		hci_conn_put(hcon);
1129 		err = -ENOMEM;
1130 		goto done;
1131 	}
1132 
1133 	/* Update source addr of the socket */
1134 	bacpy(src, conn->src);
1135 
1136 	l2cap_chan_add(conn, chan);
1137 
1138 	l2cap_state_change(chan, BT_CONNECT);
1139 	__set_chan_timer(chan, sk->sk_sndtimeo);
1140 
1141 	if (hcon->state == BT_CONNECTED) {
1142 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1143 			__clear_chan_timer(chan);
1144 			if (l2cap_check_security(chan))
1145 				l2cap_state_change(chan, BT_CONNECTED);
1146 		} else
1147 			l2cap_do_start(chan);
1148 	}
1149 
1150 	err = 0;
1151 
1152 done:
1153 	hci_dev_unlock_bh(hdev);
1154 	hci_dev_put(hdev);
1155 	return err;
1156 }
1157 
1158 int __l2cap_wait_ack(struct sock *sk)
1159 {
1160 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1161 	DECLARE_WAITQUEUE(wait, current);
1162 	int err = 0;
1163 	int timeo = HZ/5;
1164 
1165 	add_wait_queue(sk_sleep(sk), &wait);
1166 	while ((chan->unacked_frames > 0 && chan->conn)) {
1167 		set_current_state(TASK_INTERRUPTIBLE);
1168 
1169 		if (!timeo)
1170 			timeo = HZ/5;
1171 
1172 		if (signal_pending(current)) {
1173 			err = sock_intr_errno(timeo);
1174 			break;
1175 		}
1176 
1177 		release_sock(sk);
1178 		timeo = schedule_timeout(timeo);
1179 		lock_sock(sk);
1180 
1181 		err = sock_error(sk);
1182 		if (err)
1183 			break;
1184 	}
1185 	set_current_state(TASK_RUNNING);
1186 	remove_wait_queue(sk_sleep(sk), &wait);
1187 	return err;
1188 }
1189 
1190 static void l2cap_monitor_timeout(unsigned long arg)
1191 {
1192 	struct l2cap_chan *chan = (void *) arg;
1193 	struct sock *sk = chan->sk;
1194 
1195 	BT_DBG("chan %p", chan);
1196 
1197 	bh_lock_sock(sk);
1198 	if (chan->retry_count >= chan->remote_max_tx) {
1199 		l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1200 		bh_unlock_sock(sk);
1201 		return;
1202 	}
1203 
1204 	chan->retry_count++;
1205 	__set_monitor_timer(chan);
1206 
1207 	l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1208 	bh_unlock_sock(sk);
1209 }
1210 
1211 static void l2cap_retrans_timeout(unsigned long arg)
1212 {
1213 	struct l2cap_chan *chan = (void *) arg;
1214 	struct sock *sk = chan->sk;
1215 
1216 	BT_DBG("chan %p", chan);
1217 
1218 	bh_lock_sock(sk);
1219 	chan->retry_count = 1;
1220 	__set_monitor_timer(chan);
1221 
1222 	set_bit(CONN_WAIT_F, &chan->conn_state);
1223 
1224 	l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1225 	bh_unlock_sock(sk);
1226 }
1227 
1228 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1229 {
1230 	struct sk_buff *skb;
1231 
1232 	while ((skb = skb_peek(&chan->tx_q)) &&
1233 			chan->unacked_frames) {
1234 		if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1235 			break;
1236 
1237 		skb = skb_dequeue(&chan->tx_q);
1238 		kfree_skb(skb);
1239 
1240 		chan->unacked_frames--;
1241 	}
1242 
1243 	if (!chan->unacked_frames)
1244 		__clear_retrans_timer(chan);
1245 }
1246 
1247 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1248 {
1249 	struct hci_conn *hcon = chan->conn->hcon;
1250 	u16 flags;
1251 
1252 	BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1253 
1254 	if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1255 		flags = ACL_START_NO_FLUSH;
1256 	else
1257 		flags = ACL_START;
1258 
1259 	bt_cb(skb)->force_active = chan->force_active;
1260 	hci_send_acl(hcon, skb, flags);
1261 }
1262 
1263 void l2cap_streaming_send(struct l2cap_chan *chan)
1264 {
1265 	struct sk_buff *skb;
1266 	u16 control, fcs;
1267 
1268 	while ((skb = skb_dequeue(&chan->tx_q))) {
1269 		control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1270 		control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1271 		put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1272 
1273 		if (chan->fcs == L2CAP_FCS_CRC16) {
1274 			fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1275 			put_unaligned_le16(fcs, skb->data + skb->len - 2);
1276 		}
1277 
1278 		l2cap_do_send(chan, skb);
1279 
1280 		chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1281 	}
1282 }
1283 
1284 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1285 {
1286 	struct sk_buff *skb, *tx_skb;
1287 	u16 control, fcs;
1288 
1289 	skb = skb_peek(&chan->tx_q);
1290 	if (!skb)
1291 		return;
1292 
1293 	do {
1294 		if (bt_cb(skb)->tx_seq == tx_seq)
1295 			break;
1296 
1297 		if (skb_queue_is_last(&chan->tx_q, skb))
1298 			return;
1299 
1300 	} while ((skb = skb_queue_next(&chan->tx_q, skb)));
1301 
1302 	if (chan->remote_max_tx &&
1303 			bt_cb(skb)->retries == chan->remote_max_tx) {
1304 		l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1305 		return;
1306 	}
1307 
1308 	tx_skb = skb_clone(skb, GFP_ATOMIC);
1309 	bt_cb(skb)->retries++;
1310 	control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1311 	control &= L2CAP_CTRL_SAR;
1312 
1313 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1314 		control |= L2CAP_CTRL_FINAL;
1315 
1316 	control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1317 			| (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1318 
1319 	put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1320 
1321 	if (chan->fcs == L2CAP_FCS_CRC16) {
1322 		fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1323 		put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1324 	}
1325 
1326 	l2cap_do_send(chan, tx_skb);
1327 }
1328 
1329 int l2cap_ertm_send(struct l2cap_chan *chan)
1330 {
1331 	struct sk_buff *skb, *tx_skb;
1332 	u16 control, fcs;
1333 	int nsent = 0;
1334 
1335 	if (chan->state != BT_CONNECTED)
1336 		return -ENOTCONN;
1337 
1338 	while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1339 
1340 		if (chan->remote_max_tx &&
1341 				bt_cb(skb)->retries == chan->remote_max_tx) {
1342 			l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1343 			break;
1344 		}
1345 
1346 		tx_skb = skb_clone(skb, GFP_ATOMIC);
1347 
1348 		bt_cb(skb)->retries++;
1349 
1350 		control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1351 		control &= L2CAP_CTRL_SAR;
1352 
1353 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1354 			control |= L2CAP_CTRL_FINAL;
1355 
1356 		control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1357 				| (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1358 		put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1359 
1360 
1361 		if (chan->fcs == L2CAP_FCS_CRC16) {
1362 			fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1363 			put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1364 		}
1365 
1366 		l2cap_do_send(chan, tx_skb);
1367 
1368 		__set_retrans_timer(chan);
1369 
1370 		bt_cb(skb)->tx_seq = chan->next_tx_seq;
1371 		chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1372 
1373 		if (bt_cb(skb)->retries == 1)
1374 			chan->unacked_frames++;
1375 
1376 		chan->frames_sent++;
1377 
1378 		if (skb_queue_is_last(&chan->tx_q, skb))
1379 			chan->tx_send_head = NULL;
1380 		else
1381 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1382 
1383 		nsent++;
1384 	}
1385 
1386 	return nsent;
1387 }
1388 
1389 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1390 {
1391 	int ret;
1392 
1393 	if (!skb_queue_empty(&chan->tx_q))
1394 		chan->tx_send_head = chan->tx_q.next;
1395 
1396 	chan->next_tx_seq = chan->expected_ack_seq;
1397 	ret = l2cap_ertm_send(chan);
1398 	return ret;
1399 }
1400 
1401 static void l2cap_send_ack(struct l2cap_chan *chan)
1402 {
1403 	u16 control = 0;
1404 
1405 	control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1406 
1407 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1408 		control |= L2CAP_SUPER_RCV_NOT_READY;
1409 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1410 		l2cap_send_sframe(chan, control);
1411 		return;
1412 	}
1413 
1414 	if (l2cap_ertm_send(chan) > 0)
1415 		return;
1416 
1417 	control |= L2CAP_SUPER_RCV_READY;
1418 	l2cap_send_sframe(chan, control);
1419 }
1420 
1421 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1422 {
1423 	struct srej_list *tail;
1424 	u16 control;
1425 
1426 	control = L2CAP_SUPER_SELECT_REJECT;
1427 	control |= L2CAP_CTRL_FINAL;
1428 
1429 	tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1430 	control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1431 
1432 	l2cap_send_sframe(chan, control);
1433 }
1434 
1435 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1436 {
1437 	struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1438 	struct sk_buff **frag;
1439 	int err, sent = 0;
1440 
1441 	if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1442 		return -EFAULT;
1443 
1444 	sent += count;
1445 	len  -= count;
1446 
1447 	/* Continuation fragments (no L2CAP header) */
1448 	frag = &skb_shinfo(skb)->frag_list;
1449 	while (len) {
1450 		count = min_t(unsigned int, conn->mtu, len);
1451 
1452 		*frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1453 		if (!*frag)
1454 			return err;
1455 		if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1456 			return -EFAULT;
1457 
1458 		sent += count;
1459 		len  -= count;
1460 
1461 		frag = &(*frag)->next;
1462 	}
1463 
1464 	return sent;
1465 }
1466 
1467 struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1468 {
1469 	struct sock *sk = chan->sk;
1470 	struct l2cap_conn *conn = chan->conn;
1471 	struct sk_buff *skb;
1472 	int err, count, hlen = L2CAP_HDR_SIZE + 2;
1473 	struct l2cap_hdr *lh;
1474 
1475 	BT_DBG("sk %p len %d", sk, (int)len);
1476 
1477 	count = min_t(unsigned int, (conn->mtu - hlen), len);
1478 	skb = bt_skb_send_alloc(sk, count + hlen,
1479 			msg->msg_flags & MSG_DONTWAIT, &err);
1480 	if (!skb)
1481 		return ERR_PTR(err);
1482 
1483 	/* Create L2CAP header */
1484 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1485 	lh->cid = cpu_to_le16(chan->dcid);
1486 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1487 	put_unaligned_le16(chan->psm, skb_put(skb, 2));
1488 
1489 	err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1490 	if (unlikely(err < 0)) {
1491 		kfree_skb(skb);
1492 		return ERR_PTR(err);
1493 	}
1494 	return skb;
1495 }
1496 
1497 struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1498 {
1499 	struct sock *sk = chan->sk;
1500 	struct l2cap_conn *conn = chan->conn;
1501 	struct sk_buff *skb;
1502 	int err, count, hlen = L2CAP_HDR_SIZE;
1503 	struct l2cap_hdr *lh;
1504 
1505 	BT_DBG("sk %p len %d", sk, (int)len);
1506 
1507 	count = min_t(unsigned int, (conn->mtu - hlen), len);
1508 	skb = bt_skb_send_alloc(sk, count + hlen,
1509 			msg->msg_flags & MSG_DONTWAIT, &err);
1510 	if (!skb)
1511 		return ERR_PTR(err);
1512 
1513 	/* Create L2CAP header */
1514 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1515 	lh->cid = cpu_to_le16(chan->dcid);
1516 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1517 
1518 	err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1519 	if (unlikely(err < 0)) {
1520 		kfree_skb(skb);
1521 		return ERR_PTR(err);
1522 	}
1523 	return skb;
1524 }
1525 
1526 struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1527 {
1528 	struct sock *sk = chan->sk;
1529 	struct l2cap_conn *conn = chan->conn;
1530 	struct sk_buff *skb;
1531 	int err, count, hlen = L2CAP_HDR_SIZE + 2;
1532 	struct l2cap_hdr *lh;
1533 
1534 	BT_DBG("sk %p len %d", sk, (int)len);
1535 
1536 	if (!conn)
1537 		return ERR_PTR(-ENOTCONN);
1538 
1539 	if (sdulen)
1540 		hlen += 2;
1541 
1542 	if (chan->fcs == L2CAP_FCS_CRC16)
1543 		hlen += 2;
1544 
1545 	count = min_t(unsigned int, (conn->mtu - hlen), len);
1546 	skb = bt_skb_send_alloc(sk, count + hlen,
1547 			msg->msg_flags & MSG_DONTWAIT, &err);
1548 	if (!skb)
1549 		return ERR_PTR(err);
1550 
1551 	/* Create L2CAP header */
1552 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1553 	lh->cid = cpu_to_le16(chan->dcid);
1554 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1555 	put_unaligned_le16(control, skb_put(skb, 2));
1556 	if (sdulen)
1557 		put_unaligned_le16(sdulen, skb_put(skb, 2));
1558 
1559 	err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1560 	if (unlikely(err < 0)) {
1561 		kfree_skb(skb);
1562 		return ERR_PTR(err);
1563 	}
1564 
1565 	if (chan->fcs == L2CAP_FCS_CRC16)
1566 		put_unaligned_le16(0, skb_put(skb, 2));
1567 
1568 	bt_cb(skb)->retries = 0;
1569 	return skb;
1570 }
1571 
1572 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1573 {
1574 	struct sk_buff *skb;
1575 	struct sk_buff_head sar_queue;
1576 	u16 control;
1577 	size_t size = 0;
1578 
1579 	skb_queue_head_init(&sar_queue);
1580 	control = L2CAP_SDU_START;
1581 	skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1582 	if (IS_ERR(skb))
1583 		return PTR_ERR(skb);
1584 
1585 	__skb_queue_tail(&sar_queue, skb);
1586 	len -= chan->remote_mps;
1587 	size += chan->remote_mps;
1588 
1589 	while (len > 0) {
1590 		size_t buflen;
1591 
1592 		if (len > chan->remote_mps) {
1593 			control = L2CAP_SDU_CONTINUE;
1594 			buflen = chan->remote_mps;
1595 		} else {
1596 			control = L2CAP_SDU_END;
1597 			buflen = len;
1598 		}
1599 
1600 		skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1601 		if (IS_ERR(skb)) {
1602 			skb_queue_purge(&sar_queue);
1603 			return PTR_ERR(skb);
1604 		}
1605 
1606 		__skb_queue_tail(&sar_queue, skb);
1607 		len -= buflen;
1608 		size += buflen;
1609 	}
1610 	skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1611 	if (chan->tx_send_head == NULL)
1612 		chan->tx_send_head = sar_queue.next;
1613 
1614 	return size;
1615 }
1616 
1617 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1618 {
1619 	struct sk_buff *skb;
1620 	u16 control;
1621 	int err;
1622 
1623 	/* Connectionless channel */
1624 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1625 		skb = l2cap_create_connless_pdu(chan, msg, len);
1626 		if (IS_ERR(skb))
1627 			return PTR_ERR(skb);
1628 
1629 		l2cap_do_send(chan, skb);
1630 		return len;
1631 	}
1632 
1633 	switch (chan->mode) {
1634 	case L2CAP_MODE_BASIC:
1635 		/* Check outgoing MTU */
1636 		if (len > chan->omtu)
1637 			return -EMSGSIZE;
1638 
1639 		/* Create a basic PDU */
1640 		skb = l2cap_create_basic_pdu(chan, msg, len);
1641 		if (IS_ERR(skb))
1642 			return PTR_ERR(skb);
1643 
1644 		l2cap_do_send(chan, skb);
1645 		err = len;
1646 		break;
1647 
1648 	case L2CAP_MODE_ERTM:
1649 	case L2CAP_MODE_STREAMING:
1650 		/* Entire SDU fits into one PDU */
1651 		if (len <= chan->remote_mps) {
1652 			control = L2CAP_SDU_UNSEGMENTED;
1653 			skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1654 									0);
1655 			if (IS_ERR(skb))
1656 				return PTR_ERR(skb);
1657 
1658 			__skb_queue_tail(&chan->tx_q, skb);
1659 
1660 			if (chan->tx_send_head == NULL)
1661 				chan->tx_send_head = skb;
1662 
1663 		} else {
1664 			/* Segment SDU into multiples PDUs */
1665 			err = l2cap_sar_segment_sdu(chan, msg, len);
1666 			if (err < 0)
1667 				return err;
1668 		}
1669 
1670 		if (chan->mode == L2CAP_MODE_STREAMING) {
1671 			l2cap_streaming_send(chan);
1672 			err = len;
1673 			break;
1674 		}
1675 
1676 		if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1677 				test_bit(CONN_WAIT_F, &chan->conn_state)) {
1678 			err = len;
1679 			break;
1680 		}
1681 
1682 		err = l2cap_ertm_send(chan);
1683 		if (err >= 0)
1684 			err = len;
1685 
1686 		break;
1687 
1688 	default:
1689 		BT_DBG("bad state %1.1x", chan->mode);
1690 		err = -EBADFD;
1691 	}
1692 
1693 	return err;
1694 }
1695 
1696 /* Copy frame to all raw sockets on that connection */
1697 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1698 {
1699 	struct sk_buff *nskb;
1700 	struct l2cap_chan *chan;
1701 
1702 	BT_DBG("conn %p", conn);
1703 
1704 	read_lock(&conn->chan_lock);
1705 	list_for_each_entry(chan, &conn->chan_l, list) {
1706 		struct sock *sk = chan->sk;
1707 		if (chan->chan_type != L2CAP_CHAN_RAW)
1708 			continue;
1709 
1710 		/* Don't send frame to the socket it came from */
1711 		if (skb->sk == sk)
1712 			continue;
1713 		nskb = skb_clone(skb, GFP_ATOMIC);
1714 		if (!nskb)
1715 			continue;
1716 
1717 		if (chan->ops->recv(chan->data, nskb))
1718 			kfree_skb(nskb);
1719 	}
1720 	read_unlock(&conn->chan_lock);
1721 }
1722 
1723 /* ---- L2CAP signalling commands ---- */
1724 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1725 				u8 code, u8 ident, u16 dlen, void *data)
1726 {
1727 	struct sk_buff *skb, **frag;
1728 	struct l2cap_cmd_hdr *cmd;
1729 	struct l2cap_hdr *lh;
1730 	int len, count;
1731 
1732 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1733 			conn, code, ident, dlen);
1734 
1735 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1736 	count = min_t(unsigned int, conn->mtu, len);
1737 
1738 	skb = bt_skb_alloc(count, GFP_ATOMIC);
1739 	if (!skb)
1740 		return NULL;
1741 
1742 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1743 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1744 
1745 	if (conn->hcon->type == LE_LINK)
1746 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1747 	else
1748 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1749 
1750 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1751 	cmd->code  = code;
1752 	cmd->ident = ident;
1753 	cmd->len   = cpu_to_le16(dlen);
1754 
1755 	if (dlen) {
1756 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1757 		memcpy(skb_put(skb, count), data, count);
1758 		data += count;
1759 	}
1760 
1761 	len -= skb->len;
1762 
1763 	/* Continuation fragments (no L2CAP header) */
1764 	frag = &skb_shinfo(skb)->frag_list;
1765 	while (len) {
1766 		count = min_t(unsigned int, conn->mtu, len);
1767 
1768 		*frag = bt_skb_alloc(count, GFP_ATOMIC);
1769 		if (!*frag)
1770 			goto fail;
1771 
1772 		memcpy(skb_put(*frag, count), data, count);
1773 
1774 		len  -= count;
1775 		data += count;
1776 
1777 		frag = &(*frag)->next;
1778 	}
1779 
1780 	return skb;
1781 
1782 fail:
1783 	kfree_skb(skb);
1784 	return NULL;
1785 }
1786 
1787 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1788 {
1789 	struct l2cap_conf_opt *opt = *ptr;
1790 	int len;
1791 
1792 	len = L2CAP_CONF_OPT_SIZE + opt->len;
1793 	*ptr += len;
1794 
1795 	*type = opt->type;
1796 	*olen = opt->len;
1797 
1798 	switch (opt->len) {
1799 	case 1:
1800 		*val = *((u8 *) opt->val);
1801 		break;
1802 
1803 	case 2:
1804 		*val = get_unaligned_le16(opt->val);
1805 		break;
1806 
1807 	case 4:
1808 		*val = get_unaligned_le32(opt->val);
1809 		break;
1810 
1811 	default:
1812 		*val = (unsigned long) opt->val;
1813 		break;
1814 	}
1815 
1816 	BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1817 	return len;
1818 }
1819 
1820 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1821 {
1822 	struct l2cap_conf_opt *opt = *ptr;
1823 
1824 	BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1825 
1826 	opt->type = type;
1827 	opt->len  = len;
1828 
1829 	switch (len) {
1830 	case 1:
1831 		*((u8 *) opt->val)  = val;
1832 		break;
1833 
1834 	case 2:
1835 		put_unaligned_le16(val, opt->val);
1836 		break;
1837 
1838 	case 4:
1839 		put_unaligned_le32(val, opt->val);
1840 		break;
1841 
1842 	default:
1843 		memcpy(opt->val, (void *) val, len);
1844 		break;
1845 	}
1846 
1847 	*ptr += L2CAP_CONF_OPT_SIZE + len;
1848 }
1849 
1850 static void l2cap_ack_timeout(unsigned long arg)
1851 {
1852 	struct l2cap_chan *chan = (void *) arg;
1853 
1854 	bh_lock_sock(chan->sk);
1855 	l2cap_send_ack(chan);
1856 	bh_unlock_sock(chan->sk);
1857 }
1858 
1859 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1860 {
1861 	struct sock *sk = chan->sk;
1862 
1863 	chan->expected_ack_seq = 0;
1864 	chan->unacked_frames = 0;
1865 	chan->buffer_seq = 0;
1866 	chan->num_acked = 0;
1867 	chan->frames_sent = 0;
1868 
1869 	setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1870 							(unsigned long) chan);
1871 	setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1872 							(unsigned long) chan);
1873 	setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1874 
1875 	skb_queue_head_init(&chan->srej_q);
1876 	skb_queue_head_init(&chan->busy_q);
1877 
1878 	INIT_LIST_HEAD(&chan->srej_l);
1879 
1880 	INIT_WORK(&chan->busy_work, l2cap_busy_work);
1881 
1882 	sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1883 }
1884 
1885 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1886 {
1887 	switch (mode) {
1888 	case L2CAP_MODE_STREAMING:
1889 	case L2CAP_MODE_ERTM:
1890 		if (l2cap_mode_supported(mode, remote_feat_mask))
1891 			return mode;
1892 		/* fall through */
1893 	default:
1894 		return L2CAP_MODE_BASIC;
1895 	}
1896 }
1897 
1898 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1899 {
1900 	struct l2cap_conf_req *req = data;
1901 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1902 	void *ptr = req->data;
1903 
1904 	BT_DBG("chan %p", chan);
1905 
1906 	if (chan->num_conf_req || chan->num_conf_rsp)
1907 		goto done;
1908 
1909 	switch (chan->mode) {
1910 	case L2CAP_MODE_STREAMING:
1911 	case L2CAP_MODE_ERTM:
1912 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1913 			break;
1914 
1915 		/* fall through */
1916 	default:
1917 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1918 		break;
1919 	}
1920 
1921 done:
1922 	if (chan->imtu != L2CAP_DEFAULT_MTU)
1923 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1924 
1925 	switch (chan->mode) {
1926 	case L2CAP_MODE_BASIC:
1927 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1928 				!(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1929 			break;
1930 
1931 		rfc.mode            = L2CAP_MODE_BASIC;
1932 		rfc.txwin_size      = 0;
1933 		rfc.max_transmit    = 0;
1934 		rfc.retrans_timeout = 0;
1935 		rfc.monitor_timeout = 0;
1936 		rfc.max_pdu_size    = 0;
1937 
1938 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1939 							(unsigned long) &rfc);
1940 		break;
1941 
1942 	case L2CAP_MODE_ERTM:
1943 		rfc.mode            = L2CAP_MODE_ERTM;
1944 		rfc.txwin_size      = chan->tx_win;
1945 		rfc.max_transmit    = chan->max_tx;
1946 		rfc.retrans_timeout = 0;
1947 		rfc.monitor_timeout = 0;
1948 		rfc.max_pdu_size    = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1949 		if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1950 			rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1951 
1952 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1953 							(unsigned long) &rfc);
1954 
1955 		if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1956 			break;
1957 
1958 		if (chan->fcs == L2CAP_FCS_NONE ||
1959 				test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1960 			chan->fcs = L2CAP_FCS_NONE;
1961 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1962 		}
1963 		break;
1964 
1965 	case L2CAP_MODE_STREAMING:
1966 		rfc.mode            = L2CAP_MODE_STREAMING;
1967 		rfc.txwin_size      = 0;
1968 		rfc.max_transmit    = 0;
1969 		rfc.retrans_timeout = 0;
1970 		rfc.monitor_timeout = 0;
1971 		rfc.max_pdu_size    = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1972 		if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1973 			rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1974 
1975 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1976 							(unsigned long) &rfc);
1977 
1978 		if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1979 			break;
1980 
1981 		if (chan->fcs == L2CAP_FCS_NONE ||
1982 				test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1983 			chan->fcs = L2CAP_FCS_NONE;
1984 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1985 		}
1986 		break;
1987 	}
1988 
1989 	req->dcid  = cpu_to_le16(chan->dcid);
1990 	req->flags = cpu_to_le16(0);
1991 
1992 	return ptr - data;
1993 }
1994 
1995 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1996 {
1997 	struct l2cap_conf_rsp *rsp = data;
1998 	void *ptr = rsp->data;
1999 	void *req = chan->conf_req;
2000 	int len = chan->conf_len;
2001 	int type, hint, olen;
2002 	unsigned long val;
2003 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2004 	u16 mtu = L2CAP_DEFAULT_MTU;
2005 	u16 result = L2CAP_CONF_SUCCESS;
2006 
2007 	BT_DBG("chan %p", chan);
2008 
2009 	while (len >= L2CAP_CONF_OPT_SIZE) {
2010 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2011 
2012 		hint  = type & L2CAP_CONF_HINT;
2013 		type &= L2CAP_CONF_MASK;
2014 
2015 		switch (type) {
2016 		case L2CAP_CONF_MTU:
2017 			mtu = val;
2018 			break;
2019 
2020 		case L2CAP_CONF_FLUSH_TO:
2021 			chan->flush_to = val;
2022 			break;
2023 
2024 		case L2CAP_CONF_QOS:
2025 			break;
2026 
2027 		case L2CAP_CONF_RFC:
2028 			if (olen == sizeof(rfc))
2029 				memcpy(&rfc, (void *) val, olen);
2030 			break;
2031 
2032 		case L2CAP_CONF_FCS:
2033 			if (val == L2CAP_FCS_NONE)
2034 				set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2035 
2036 			break;
2037 
2038 		default:
2039 			if (hint)
2040 				break;
2041 
2042 			result = L2CAP_CONF_UNKNOWN;
2043 			*((u8 *) ptr++) = type;
2044 			break;
2045 		}
2046 	}
2047 
2048 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
2049 		goto done;
2050 
2051 	switch (chan->mode) {
2052 	case L2CAP_MODE_STREAMING:
2053 	case L2CAP_MODE_ERTM:
2054 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2055 			chan->mode = l2cap_select_mode(rfc.mode,
2056 					chan->conn->feat_mask);
2057 			break;
2058 		}
2059 
2060 		if (chan->mode != rfc.mode)
2061 			return -ECONNREFUSED;
2062 
2063 		break;
2064 	}
2065 
2066 done:
2067 	if (chan->mode != rfc.mode) {
2068 		result = L2CAP_CONF_UNACCEPT;
2069 		rfc.mode = chan->mode;
2070 
2071 		if (chan->num_conf_rsp == 1)
2072 			return -ECONNREFUSED;
2073 
2074 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2075 					sizeof(rfc), (unsigned long) &rfc);
2076 	}
2077 
2078 
2079 	if (result == L2CAP_CONF_SUCCESS) {
2080 		/* Configure output options and let the other side know
2081 		 * which ones we don't like. */
2082 
2083 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
2084 			result = L2CAP_CONF_UNACCEPT;
2085 		else {
2086 			chan->omtu = mtu;
2087 			set_bit(CONF_MTU_DONE, &chan->conf_state);
2088 		}
2089 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2090 
2091 		switch (rfc.mode) {
2092 		case L2CAP_MODE_BASIC:
2093 			chan->fcs = L2CAP_FCS_NONE;
2094 			set_bit(CONF_MODE_DONE, &chan->conf_state);
2095 			break;
2096 
2097 		case L2CAP_MODE_ERTM:
2098 			chan->remote_tx_win = rfc.txwin_size;
2099 			chan->remote_max_tx = rfc.max_transmit;
2100 
2101 			if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2102 				rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2103 
2104 			chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2105 
2106 			rfc.retrans_timeout =
2107 				le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2108 			rfc.monitor_timeout =
2109 				le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2110 
2111 			set_bit(CONF_MODE_DONE, &chan->conf_state);
2112 
2113 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2114 					sizeof(rfc), (unsigned long) &rfc);
2115 
2116 			break;
2117 
2118 		case L2CAP_MODE_STREAMING:
2119 			if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2120 				rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2121 
2122 			chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2123 
2124 			set_bit(CONF_MODE_DONE, &chan->conf_state);
2125 
2126 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2127 					sizeof(rfc), (unsigned long) &rfc);
2128 
2129 			break;
2130 
2131 		default:
2132 			result = L2CAP_CONF_UNACCEPT;
2133 
2134 			memset(&rfc, 0, sizeof(rfc));
2135 			rfc.mode = chan->mode;
2136 		}
2137 
2138 		if (result == L2CAP_CONF_SUCCESS)
2139 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2140 	}
2141 	rsp->scid   = cpu_to_le16(chan->dcid);
2142 	rsp->result = cpu_to_le16(result);
2143 	rsp->flags  = cpu_to_le16(0x0000);
2144 
2145 	return ptr - data;
2146 }
2147 
2148 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2149 {
2150 	struct l2cap_conf_req *req = data;
2151 	void *ptr = req->data;
2152 	int type, olen;
2153 	unsigned long val;
2154 	struct l2cap_conf_rfc rfc;
2155 
2156 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2157 
2158 	while (len >= L2CAP_CONF_OPT_SIZE) {
2159 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2160 
2161 		switch (type) {
2162 		case L2CAP_CONF_MTU:
2163 			if (val < L2CAP_DEFAULT_MIN_MTU) {
2164 				*result = L2CAP_CONF_UNACCEPT;
2165 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2166 			} else
2167 				chan->imtu = val;
2168 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2169 			break;
2170 
2171 		case L2CAP_CONF_FLUSH_TO:
2172 			chan->flush_to = val;
2173 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2174 							2, chan->flush_to);
2175 			break;
2176 
2177 		case L2CAP_CONF_RFC:
2178 			if (olen == sizeof(rfc))
2179 				memcpy(&rfc, (void *)val, olen);
2180 
2181 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2182 							rfc.mode != chan->mode)
2183 				return -ECONNREFUSED;
2184 
2185 			chan->fcs = 0;
2186 
2187 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2188 					sizeof(rfc), (unsigned long) &rfc);
2189 			break;
2190 		}
2191 	}
2192 
2193 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2194 		return -ECONNREFUSED;
2195 
2196 	chan->mode = rfc.mode;
2197 
2198 	if (*result == L2CAP_CONF_SUCCESS) {
2199 		switch (rfc.mode) {
2200 		case L2CAP_MODE_ERTM:
2201 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2202 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2203 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
2204 			break;
2205 		case L2CAP_MODE_STREAMING:
2206 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
2207 		}
2208 	}
2209 
2210 	req->dcid   = cpu_to_le16(chan->dcid);
2211 	req->flags  = cpu_to_le16(0x0000);
2212 
2213 	return ptr - data;
2214 }
2215 
2216 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2217 {
2218 	struct l2cap_conf_rsp *rsp = data;
2219 	void *ptr = rsp->data;
2220 
2221 	BT_DBG("chan %p", chan);
2222 
2223 	rsp->scid   = cpu_to_le16(chan->dcid);
2224 	rsp->result = cpu_to_le16(result);
2225 	rsp->flags  = cpu_to_le16(flags);
2226 
2227 	return ptr - data;
2228 }
2229 
2230 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2231 {
2232 	struct l2cap_conn_rsp rsp;
2233 	struct l2cap_conn *conn = chan->conn;
2234 	u8 buf[128];
2235 
2236 	rsp.scid   = cpu_to_le16(chan->dcid);
2237 	rsp.dcid   = cpu_to_le16(chan->scid);
2238 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2239 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2240 	l2cap_send_cmd(conn, chan->ident,
2241 				L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2242 
2243 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2244 		return;
2245 
2246 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2247 			l2cap_build_conf_req(chan, buf), buf);
2248 	chan->num_conf_req++;
2249 }
2250 
2251 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2252 {
2253 	int type, olen;
2254 	unsigned long val;
2255 	struct l2cap_conf_rfc rfc;
2256 
2257 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2258 
2259 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2260 		return;
2261 
2262 	while (len >= L2CAP_CONF_OPT_SIZE) {
2263 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2264 
2265 		switch (type) {
2266 		case L2CAP_CONF_RFC:
2267 			if (olen == sizeof(rfc))
2268 				memcpy(&rfc, (void *)val, olen);
2269 			goto done;
2270 		}
2271 	}
2272 
2273 done:
2274 	switch (rfc.mode) {
2275 	case L2CAP_MODE_ERTM:
2276 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2277 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2278 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
2279 		break;
2280 	case L2CAP_MODE_STREAMING:
2281 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
2282 	}
2283 }
2284 
2285 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2286 {
2287 	struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2288 
2289 	if (rej->reason != 0x0000)
2290 		return 0;
2291 
2292 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2293 					cmd->ident == conn->info_ident) {
2294 		del_timer(&conn->info_timer);
2295 
2296 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2297 		conn->info_ident = 0;
2298 
2299 		l2cap_conn_start(conn);
2300 	}
2301 
2302 	return 0;
2303 }
2304 
2305 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2306 {
2307 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2308 	struct l2cap_conn_rsp rsp;
2309 	struct l2cap_chan *chan = NULL, *pchan;
2310 	struct sock *parent, *sk = NULL;
2311 	int result, status = L2CAP_CS_NO_INFO;
2312 
2313 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2314 	__le16 psm = req->psm;
2315 
2316 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2317 
2318 	/* Check if we have socket listening on psm */
2319 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2320 	if (!pchan) {
2321 		result = L2CAP_CR_BAD_PSM;
2322 		goto sendresp;
2323 	}
2324 
2325 	parent = pchan->sk;
2326 
2327 	bh_lock_sock(parent);
2328 
2329 	/* Check if the ACL is secure enough (if not SDP) */
2330 	if (psm != cpu_to_le16(0x0001) &&
2331 				!hci_conn_check_link_mode(conn->hcon)) {
2332 		conn->disc_reason = 0x05;
2333 		result = L2CAP_CR_SEC_BLOCK;
2334 		goto response;
2335 	}
2336 
2337 	result = L2CAP_CR_NO_MEM;
2338 
2339 	/* Check for backlog size */
2340 	if (sk_acceptq_is_full(parent)) {
2341 		BT_DBG("backlog full %d", parent->sk_ack_backlog);
2342 		goto response;
2343 	}
2344 
2345 	chan = pchan->ops->new_connection(pchan->data);
2346 	if (!chan)
2347 		goto response;
2348 
2349 	sk = chan->sk;
2350 
2351 	write_lock_bh(&conn->chan_lock);
2352 
2353 	/* Check if we already have channel with that dcid */
2354 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
2355 		write_unlock_bh(&conn->chan_lock);
2356 		sock_set_flag(sk, SOCK_ZAPPED);
2357 		chan->ops->close(chan->data);
2358 		goto response;
2359 	}
2360 
2361 	hci_conn_hold(conn->hcon);
2362 
2363 	bacpy(&bt_sk(sk)->src, conn->src);
2364 	bacpy(&bt_sk(sk)->dst, conn->dst);
2365 	chan->psm  = psm;
2366 	chan->dcid = scid;
2367 
2368 	bt_accept_enqueue(parent, sk);
2369 
2370 	__l2cap_chan_add(conn, chan);
2371 
2372 	dcid = chan->scid;
2373 
2374 	__set_chan_timer(chan, sk->sk_sndtimeo);
2375 
2376 	chan->ident = cmd->ident;
2377 
2378 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2379 		if (l2cap_check_security(chan)) {
2380 			if (bt_sk(sk)->defer_setup) {
2381 				l2cap_state_change(chan, BT_CONNECT2);
2382 				result = L2CAP_CR_PEND;
2383 				status = L2CAP_CS_AUTHOR_PEND;
2384 				parent->sk_data_ready(parent, 0);
2385 			} else {
2386 				l2cap_state_change(chan, BT_CONFIG);
2387 				result = L2CAP_CR_SUCCESS;
2388 				status = L2CAP_CS_NO_INFO;
2389 			}
2390 		} else {
2391 			l2cap_state_change(chan, BT_CONNECT2);
2392 			result = L2CAP_CR_PEND;
2393 			status = L2CAP_CS_AUTHEN_PEND;
2394 		}
2395 	} else {
2396 		l2cap_state_change(chan, BT_CONNECT2);
2397 		result = L2CAP_CR_PEND;
2398 		status = L2CAP_CS_NO_INFO;
2399 	}
2400 
2401 	write_unlock_bh(&conn->chan_lock);
2402 
2403 response:
2404 	bh_unlock_sock(parent);
2405 
2406 sendresp:
2407 	rsp.scid   = cpu_to_le16(scid);
2408 	rsp.dcid   = cpu_to_le16(dcid);
2409 	rsp.result = cpu_to_le16(result);
2410 	rsp.status = cpu_to_le16(status);
2411 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2412 
2413 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2414 		struct l2cap_info_req info;
2415 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2416 
2417 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2418 		conn->info_ident = l2cap_get_ident(conn);
2419 
2420 		mod_timer(&conn->info_timer, jiffies +
2421 					msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2422 
2423 		l2cap_send_cmd(conn, conn->info_ident,
2424 					L2CAP_INFO_REQ, sizeof(info), &info);
2425 	}
2426 
2427 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2428 				result == L2CAP_CR_SUCCESS) {
2429 		u8 buf[128];
2430 		set_bit(CONF_REQ_SENT, &chan->conf_state);
2431 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2432 					l2cap_build_conf_req(chan, buf), buf);
2433 		chan->num_conf_req++;
2434 	}
2435 
2436 	return 0;
2437 }
2438 
2439 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2440 {
2441 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2442 	u16 scid, dcid, result, status;
2443 	struct l2cap_chan *chan;
2444 	struct sock *sk;
2445 	u8 req[128];
2446 
2447 	scid   = __le16_to_cpu(rsp->scid);
2448 	dcid   = __le16_to_cpu(rsp->dcid);
2449 	result = __le16_to_cpu(rsp->result);
2450 	status = __le16_to_cpu(rsp->status);
2451 
2452 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2453 
2454 	if (scid) {
2455 		chan = l2cap_get_chan_by_scid(conn, scid);
2456 		if (!chan)
2457 			return -EFAULT;
2458 	} else {
2459 		chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2460 		if (!chan)
2461 			return -EFAULT;
2462 	}
2463 
2464 	sk = chan->sk;
2465 
2466 	switch (result) {
2467 	case L2CAP_CR_SUCCESS:
2468 		l2cap_state_change(chan, BT_CONFIG);
2469 		chan->ident = 0;
2470 		chan->dcid = dcid;
2471 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2472 
2473 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2474 			break;
2475 
2476 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2477 					l2cap_build_conf_req(chan, req), req);
2478 		chan->num_conf_req++;
2479 		break;
2480 
2481 	case L2CAP_CR_PEND:
2482 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2483 		break;
2484 
2485 	default:
2486 		/* don't delete l2cap channel if sk is owned by user */
2487 		if (sock_owned_by_user(sk)) {
2488 			l2cap_state_change(chan, BT_DISCONN);
2489 			__clear_chan_timer(chan);
2490 			__set_chan_timer(chan, HZ / 5);
2491 			break;
2492 		}
2493 
2494 		l2cap_chan_del(chan, ECONNREFUSED);
2495 		break;
2496 	}
2497 
2498 	bh_unlock_sock(sk);
2499 	return 0;
2500 }
2501 
2502 static inline void set_default_fcs(struct l2cap_chan *chan)
2503 {
2504 	/* FCS is enabled only in ERTM or streaming mode, if one or both
2505 	 * sides request it.
2506 	 */
2507 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2508 		chan->fcs = L2CAP_FCS_NONE;
2509 	else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2510 		chan->fcs = L2CAP_FCS_CRC16;
2511 }
2512 
2513 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2514 {
2515 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2516 	u16 dcid, flags;
2517 	u8 rsp[64];
2518 	struct l2cap_chan *chan;
2519 	struct sock *sk;
2520 	int len;
2521 
2522 	dcid  = __le16_to_cpu(req->dcid);
2523 	flags = __le16_to_cpu(req->flags);
2524 
2525 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2526 
2527 	chan = l2cap_get_chan_by_scid(conn, dcid);
2528 	if (!chan)
2529 		return -ENOENT;
2530 
2531 	sk = chan->sk;
2532 
2533 	if (chan->state != BT_CONFIG) {
2534 		struct l2cap_cmd_rej rej;
2535 
2536 		rej.reason = cpu_to_le16(0x0002);
2537 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2538 				sizeof(rej), &rej);
2539 		goto unlock;
2540 	}
2541 
2542 	/* Reject if config buffer is too small. */
2543 	len = cmd_len - sizeof(*req);
2544 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
2545 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2546 				l2cap_build_conf_rsp(chan, rsp,
2547 					L2CAP_CONF_REJECT, flags), rsp);
2548 		goto unlock;
2549 	}
2550 
2551 	/* Store config. */
2552 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
2553 	chan->conf_len += len;
2554 
2555 	if (flags & 0x0001) {
2556 		/* Incomplete config. Send empty response. */
2557 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2558 				l2cap_build_conf_rsp(chan, rsp,
2559 					L2CAP_CONF_SUCCESS, 0x0001), rsp);
2560 		goto unlock;
2561 	}
2562 
2563 	/* Complete config. */
2564 	len = l2cap_parse_conf_req(chan, rsp);
2565 	if (len < 0) {
2566 		l2cap_send_disconn_req(conn, chan, ECONNRESET);
2567 		goto unlock;
2568 	}
2569 
2570 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2571 	chan->num_conf_rsp++;
2572 
2573 	/* Reset config buffer. */
2574 	chan->conf_len = 0;
2575 
2576 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2577 		goto unlock;
2578 
2579 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2580 		set_default_fcs(chan);
2581 
2582 		l2cap_state_change(chan, BT_CONNECTED);
2583 
2584 		chan->next_tx_seq = 0;
2585 		chan->expected_tx_seq = 0;
2586 		skb_queue_head_init(&chan->tx_q);
2587 		if (chan->mode == L2CAP_MODE_ERTM)
2588 			l2cap_ertm_init(chan);
2589 
2590 		l2cap_chan_ready(sk);
2591 		goto unlock;
2592 	}
2593 
2594 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2595 		u8 buf[64];
2596 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2597 					l2cap_build_conf_req(chan, buf), buf);
2598 		chan->num_conf_req++;
2599 	}
2600 
2601 unlock:
2602 	bh_unlock_sock(sk);
2603 	return 0;
2604 }
2605 
2606 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2607 {
2608 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2609 	u16 scid, flags, result;
2610 	struct l2cap_chan *chan;
2611 	struct sock *sk;
2612 	int len = cmd->len - sizeof(*rsp);
2613 
2614 	scid   = __le16_to_cpu(rsp->scid);
2615 	flags  = __le16_to_cpu(rsp->flags);
2616 	result = __le16_to_cpu(rsp->result);
2617 
2618 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2619 			scid, flags, result);
2620 
2621 	chan = l2cap_get_chan_by_scid(conn, scid);
2622 	if (!chan)
2623 		return 0;
2624 
2625 	sk = chan->sk;
2626 
2627 	switch (result) {
2628 	case L2CAP_CONF_SUCCESS:
2629 		l2cap_conf_rfc_get(chan, rsp->data, len);
2630 		break;
2631 
2632 	case L2CAP_CONF_UNACCEPT:
2633 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2634 			char req[64];
2635 
2636 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2637 				l2cap_send_disconn_req(conn, chan, ECONNRESET);
2638 				goto done;
2639 			}
2640 
2641 			/* throw out any old stored conf requests */
2642 			result = L2CAP_CONF_SUCCESS;
2643 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2644 								req, &result);
2645 			if (len < 0) {
2646 				l2cap_send_disconn_req(conn, chan, ECONNRESET);
2647 				goto done;
2648 			}
2649 
2650 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
2651 						L2CAP_CONF_REQ, len, req);
2652 			chan->num_conf_req++;
2653 			if (result != L2CAP_CONF_SUCCESS)
2654 				goto done;
2655 			break;
2656 		}
2657 
2658 	default:
2659 		sk->sk_err = ECONNRESET;
2660 		__set_chan_timer(chan, HZ * 5);
2661 		l2cap_send_disconn_req(conn, chan, ECONNRESET);
2662 		goto done;
2663 	}
2664 
2665 	if (flags & 0x01)
2666 		goto done;
2667 
2668 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
2669 
2670 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2671 		set_default_fcs(chan);
2672 
2673 		l2cap_state_change(chan, BT_CONNECTED);
2674 		chan->next_tx_seq = 0;
2675 		chan->expected_tx_seq = 0;
2676 		skb_queue_head_init(&chan->tx_q);
2677 		if (chan->mode ==  L2CAP_MODE_ERTM)
2678 			l2cap_ertm_init(chan);
2679 
2680 		l2cap_chan_ready(sk);
2681 	}
2682 
2683 done:
2684 	bh_unlock_sock(sk);
2685 	return 0;
2686 }
2687 
2688 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2689 {
2690 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2691 	struct l2cap_disconn_rsp rsp;
2692 	u16 dcid, scid;
2693 	struct l2cap_chan *chan;
2694 	struct sock *sk;
2695 
2696 	scid = __le16_to_cpu(req->scid);
2697 	dcid = __le16_to_cpu(req->dcid);
2698 
2699 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2700 
2701 	chan = l2cap_get_chan_by_scid(conn, dcid);
2702 	if (!chan)
2703 		return 0;
2704 
2705 	sk = chan->sk;
2706 
2707 	rsp.dcid = cpu_to_le16(chan->scid);
2708 	rsp.scid = cpu_to_le16(chan->dcid);
2709 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2710 
2711 	sk->sk_shutdown = SHUTDOWN_MASK;
2712 
2713 	/* don't delete l2cap channel if sk is owned by user */
2714 	if (sock_owned_by_user(sk)) {
2715 		l2cap_state_change(chan, BT_DISCONN);
2716 		__clear_chan_timer(chan);
2717 		__set_chan_timer(chan, HZ / 5);
2718 		bh_unlock_sock(sk);
2719 		return 0;
2720 	}
2721 
2722 	l2cap_chan_del(chan, ECONNRESET);
2723 	bh_unlock_sock(sk);
2724 
2725 	chan->ops->close(chan->data);
2726 	return 0;
2727 }
2728 
2729 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2730 {
2731 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2732 	u16 dcid, scid;
2733 	struct l2cap_chan *chan;
2734 	struct sock *sk;
2735 
2736 	scid = __le16_to_cpu(rsp->scid);
2737 	dcid = __le16_to_cpu(rsp->dcid);
2738 
2739 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2740 
2741 	chan = l2cap_get_chan_by_scid(conn, scid);
2742 	if (!chan)
2743 		return 0;
2744 
2745 	sk = chan->sk;
2746 
2747 	/* don't delete l2cap channel if sk is owned by user */
2748 	if (sock_owned_by_user(sk)) {
2749 		l2cap_state_change(chan,BT_DISCONN);
2750 		__clear_chan_timer(chan);
2751 		__set_chan_timer(chan, HZ / 5);
2752 		bh_unlock_sock(sk);
2753 		return 0;
2754 	}
2755 
2756 	l2cap_chan_del(chan, 0);
2757 	bh_unlock_sock(sk);
2758 
2759 	chan->ops->close(chan->data);
2760 	return 0;
2761 }
2762 
2763 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2764 {
2765 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2766 	u16 type;
2767 
2768 	type = __le16_to_cpu(req->type);
2769 
2770 	BT_DBG("type 0x%4.4x", type);
2771 
2772 	if (type == L2CAP_IT_FEAT_MASK) {
2773 		u8 buf[8];
2774 		u32 feat_mask = l2cap_feat_mask;
2775 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2776 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2777 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2778 		if (!disable_ertm)
2779 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2780 							 | L2CAP_FEAT_FCS;
2781 		put_unaligned_le32(feat_mask, rsp->data);
2782 		l2cap_send_cmd(conn, cmd->ident,
2783 					L2CAP_INFO_RSP, sizeof(buf), buf);
2784 	} else if (type == L2CAP_IT_FIXED_CHAN) {
2785 		u8 buf[12];
2786 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2787 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2788 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2789 		memcpy(buf + 4, l2cap_fixed_chan, 8);
2790 		l2cap_send_cmd(conn, cmd->ident,
2791 					L2CAP_INFO_RSP, sizeof(buf), buf);
2792 	} else {
2793 		struct l2cap_info_rsp rsp;
2794 		rsp.type   = cpu_to_le16(type);
2795 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2796 		l2cap_send_cmd(conn, cmd->ident,
2797 					L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2798 	}
2799 
2800 	return 0;
2801 }
2802 
2803 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2804 {
2805 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2806 	u16 type, result;
2807 
2808 	type   = __le16_to_cpu(rsp->type);
2809 	result = __le16_to_cpu(rsp->result);
2810 
2811 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2812 
2813 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
2814 	if (cmd->ident != conn->info_ident ||
2815 			conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2816 		return 0;
2817 
2818 	del_timer(&conn->info_timer);
2819 
2820 	if (result != L2CAP_IR_SUCCESS) {
2821 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2822 		conn->info_ident = 0;
2823 
2824 		l2cap_conn_start(conn);
2825 
2826 		return 0;
2827 	}
2828 
2829 	if (type == L2CAP_IT_FEAT_MASK) {
2830 		conn->feat_mask = get_unaligned_le32(rsp->data);
2831 
2832 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2833 			struct l2cap_info_req req;
2834 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2835 
2836 			conn->info_ident = l2cap_get_ident(conn);
2837 
2838 			l2cap_send_cmd(conn, conn->info_ident,
2839 					L2CAP_INFO_REQ, sizeof(req), &req);
2840 		} else {
2841 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2842 			conn->info_ident = 0;
2843 
2844 			l2cap_conn_start(conn);
2845 		}
2846 	} else if (type == L2CAP_IT_FIXED_CHAN) {
2847 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2848 		conn->info_ident = 0;
2849 
2850 		l2cap_conn_start(conn);
2851 	}
2852 
2853 	return 0;
2854 }
2855 
2856 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2857 							u16 to_multiplier)
2858 {
2859 	u16 max_latency;
2860 
2861 	if (min > max || min < 6 || max > 3200)
2862 		return -EINVAL;
2863 
2864 	if (to_multiplier < 10 || to_multiplier > 3200)
2865 		return -EINVAL;
2866 
2867 	if (max >= to_multiplier * 8)
2868 		return -EINVAL;
2869 
2870 	max_latency = (to_multiplier * 8 / max) - 1;
2871 	if (latency > 499 || latency > max_latency)
2872 		return -EINVAL;
2873 
2874 	return 0;
2875 }
2876 
2877 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2878 					struct l2cap_cmd_hdr *cmd, u8 *data)
2879 {
2880 	struct hci_conn *hcon = conn->hcon;
2881 	struct l2cap_conn_param_update_req *req;
2882 	struct l2cap_conn_param_update_rsp rsp;
2883 	u16 min, max, latency, to_multiplier, cmd_len;
2884 	int err;
2885 
2886 	if (!(hcon->link_mode & HCI_LM_MASTER))
2887 		return -EINVAL;
2888 
2889 	cmd_len = __le16_to_cpu(cmd->len);
2890 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2891 		return -EPROTO;
2892 
2893 	req = (struct l2cap_conn_param_update_req *) data;
2894 	min		= __le16_to_cpu(req->min);
2895 	max		= __le16_to_cpu(req->max);
2896 	latency		= __le16_to_cpu(req->latency);
2897 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
2898 
2899 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2900 						min, max, latency, to_multiplier);
2901 
2902 	memset(&rsp, 0, sizeof(rsp));
2903 
2904 	err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2905 	if (err)
2906 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2907 	else
2908 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2909 
2910 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2911 							sizeof(rsp), &rsp);
2912 
2913 	if (!err)
2914 		hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2915 
2916 	return 0;
2917 }
2918 
2919 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2920 			struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2921 {
2922 	int err = 0;
2923 
2924 	switch (cmd->code) {
2925 	case L2CAP_COMMAND_REJ:
2926 		l2cap_command_rej(conn, cmd, data);
2927 		break;
2928 
2929 	case L2CAP_CONN_REQ:
2930 		err = l2cap_connect_req(conn, cmd, data);
2931 		break;
2932 
2933 	case L2CAP_CONN_RSP:
2934 		err = l2cap_connect_rsp(conn, cmd, data);
2935 		break;
2936 
2937 	case L2CAP_CONF_REQ:
2938 		err = l2cap_config_req(conn, cmd, cmd_len, data);
2939 		break;
2940 
2941 	case L2CAP_CONF_RSP:
2942 		err = l2cap_config_rsp(conn, cmd, data);
2943 		break;
2944 
2945 	case L2CAP_DISCONN_REQ:
2946 		err = l2cap_disconnect_req(conn, cmd, data);
2947 		break;
2948 
2949 	case L2CAP_DISCONN_RSP:
2950 		err = l2cap_disconnect_rsp(conn, cmd, data);
2951 		break;
2952 
2953 	case L2CAP_ECHO_REQ:
2954 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2955 		break;
2956 
2957 	case L2CAP_ECHO_RSP:
2958 		break;
2959 
2960 	case L2CAP_INFO_REQ:
2961 		err = l2cap_information_req(conn, cmd, data);
2962 		break;
2963 
2964 	case L2CAP_INFO_RSP:
2965 		err = l2cap_information_rsp(conn, cmd, data);
2966 		break;
2967 
2968 	default:
2969 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2970 		err = -EINVAL;
2971 		break;
2972 	}
2973 
2974 	return err;
2975 }
2976 
2977 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2978 					struct l2cap_cmd_hdr *cmd, u8 *data)
2979 {
2980 	switch (cmd->code) {
2981 	case L2CAP_COMMAND_REJ:
2982 		return 0;
2983 
2984 	case L2CAP_CONN_PARAM_UPDATE_REQ:
2985 		return l2cap_conn_param_update_req(conn, cmd, data);
2986 
2987 	case L2CAP_CONN_PARAM_UPDATE_RSP:
2988 		return 0;
2989 
2990 	default:
2991 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2992 		return -EINVAL;
2993 	}
2994 }
2995 
2996 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2997 							struct sk_buff *skb)
2998 {
2999 	u8 *data = skb->data;
3000 	int len = skb->len;
3001 	struct l2cap_cmd_hdr cmd;
3002 	int err;
3003 
3004 	l2cap_raw_recv(conn, skb);
3005 
3006 	while (len >= L2CAP_CMD_HDR_SIZE) {
3007 		u16 cmd_len;
3008 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3009 		data += L2CAP_CMD_HDR_SIZE;
3010 		len  -= L2CAP_CMD_HDR_SIZE;
3011 
3012 		cmd_len = le16_to_cpu(cmd.len);
3013 
3014 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3015 
3016 		if (cmd_len > len || !cmd.ident) {
3017 			BT_DBG("corrupted command");
3018 			break;
3019 		}
3020 
3021 		if (conn->hcon->type == LE_LINK)
3022 			err = l2cap_le_sig_cmd(conn, &cmd, data);
3023 		else
3024 			err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3025 
3026 		if (err) {
3027 			struct l2cap_cmd_rej rej;
3028 
3029 			BT_ERR("Wrong link type (%d)", err);
3030 
3031 			/* FIXME: Map err to a valid reason */
3032 			rej.reason = cpu_to_le16(0);
3033 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3034 		}
3035 
3036 		data += cmd_len;
3037 		len  -= cmd_len;
3038 	}
3039 
3040 	kfree_skb(skb);
3041 }
3042 
3043 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
3044 {
3045 	u16 our_fcs, rcv_fcs;
3046 	int hdr_size = L2CAP_HDR_SIZE + 2;
3047 
3048 	if (chan->fcs == L2CAP_FCS_CRC16) {
3049 		skb_trim(skb, skb->len - 2);
3050 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3051 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3052 
3053 		if (our_fcs != rcv_fcs)
3054 			return -EBADMSG;
3055 	}
3056 	return 0;
3057 }
3058 
3059 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3060 {
3061 	u16 control = 0;
3062 
3063 	chan->frames_sent = 0;
3064 
3065 	control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3066 
3067 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3068 		control |= L2CAP_SUPER_RCV_NOT_READY;
3069 		l2cap_send_sframe(chan, control);
3070 		set_bit(CONN_RNR_SENT, &chan->conn_state);
3071 	}
3072 
3073 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3074 		l2cap_retransmit_frames(chan);
3075 
3076 	l2cap_ertm_send(chan);
3077 
3078 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3079 			chan->frames_sent == 0) {
3080 		control |= L2CAP_SUPER_RCV_READY;
3081 		l2cap_send_sframe(chan, control);
3082 	}
3083 }
3084 
3085 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3086 {
3087 	struct sk_buff *next_skb;
3088 	int tx_seq_offset, next_tx_seq_offset;
3089 
3090 	bt_cb(skb)->tx_seq = tx_seq;
3091 	bt_cb(skb)->sar = sar;
3092 
3093 	next_skb = skb_peek(&chan->srej_q);
3094 	if (!next_skb) {
3095 		__skb_queue_tail(&chan->srej_q, skb);
3096 		return 0;
3097 	}
3098 
3099 	tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3100 	if (tx_seq_offset < 0)
3101 		tx_seq_offset += 64;
3102 
3103 	do {
3104 		if (bt_cb(next_skb)->tx_seq == tx_seq)
3105 			return -EINVAL;
3106 
3107 		next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3108 						chan->buffer_seq) % 64;
3109 		if (next_tx_seq_offset < 0)
3110 			next_tx_seq_offset += 64;
3111 
3112 		if (next_tx_seq_offset > tx_seq_offset) {
3113 			__skb_queue_before(&chan->srej_q, next_skb, skb);
3114 			return 0;
3115 		}
3116 
3117 		if (skb_queue_is_last(&chan->srej_q, next_skb))
3118 			break;
3119 
3120 	} while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3121 
3122 	__skb_queue_tail(&chan->srej_q, skb);
3123 
3124 	return 0;
3125 }
3126 
3127 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3128 {
3129 	struct sk_buff *_skb;
3130 	int err;
3131 
3132 	switch (control & L2CAP_CTRL_SAR) {
3133 	case L2CAP_SDU_UNSEGMENTED:
3134 		if (test_bit(CONN_SAR_SDU, &chan->conn_state))
3135 			goto drop;
3136 
3137 		return chan->ops->recv(chan->data, skb);
3138 
3139 	case L2CAP_SDU_START:
3140 		if (test_bit(CONN_SAR_SDU, &chan->conn_state))
3141 			goto drop;
3142 
3143 		chan->sdu_len = get_unaligned_le16(skb->data);
3144 
3145 		if (chan->sdu_len > chan->imtu)
3146 			goto disconnect;
3147 
3148 		chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3149 		if (!chan->sdu)
3150 			return -ENOMEM;
3151 
3152 		/* pull sdu_len bytes only after alloc, because of Local Busy
3153 		 * condition we have to be sure that this will be executed
3154 		 * only once, i.e., when alloc does not fail */
3155 		skb_pull(skb, 2);
3156 
3157 		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3158 
3159 		set_bit(CONN_SAR_SDU, &chan->conn_state);
3160 		chan->partial_sdu_len = skb->len;
3161 		break;
3162 
3163 	case L2CAP_SDU_CONTINUE:
3164 		if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3165 			goto disconnect;
3166 
3167 		if (!chan->sdu)
3168 			goto disconnect;
3169 
3170 		chan->partial_sdu_len += skb->len;
3171 		if (chan->partial_sdu_len > chan->sdu_len)
3172 			goto drop;
3173 
3174 		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3175 
3176 		break;
3177 
3178 	case L2CAP_SDU_END:
3179 		if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3180 			goto disconnect;
3181 
3182 		if (!chan->sdu)
3183 			goto disconnect;
3184 
3185 		if (!test_bit(CONN_SAR_RETRY, &chan->conn_state)) {
3186 			chan->partial_sdu_len += skb->len;
3187 
3188 			if (chan->partial_sdu_len > chan->imtu)
3189 				goto drop;
3190 
3191 			if (chan->partial_sdu_len != chan->sdu_len)
3192 				goto drop;
3193 
3194 			memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3195 		}
3196 
3197 		_skb = skb_clone(chan->sdu, GFP_ATOMIC);
3198 		if (!_skb) {
3199 			set_bit(CONN_SAR_RETRY, &chan->conn_state);
3200 			return -ENOMEM;
3201 		}
3202 
3203 		err = chan->ops->recv(chan->data, _skb);
3204 		if (err < 0) {
3205 			kfree_skb(_skb);
3206 			set_bit(CONN_SAR_RETRY, &chan->conn_state);
3207 			return err;
3208 		}
3209 
3210 		clear_bit(CONN_SAR_RETRY, &chan->conn_state);
3211 		clear_bit(CONN_SAR_SDU, &chan->conn_state);
3212 
3213 		kfree_skb(chan->sdu);
3214 		break;
3215 	}
3216 
3217 	kfree_skb(skb);
3218 	return 0;
3219 
3220 drop:
3221 	kfree_skb(chan->sdu);
3222 	chan->sdu = NULL;
3223 
3224 disconnect:
3225 	l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3226 	kfree_skb(skb);
3227 	return 0;
3228 }
3229 
3230 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
3231 {
3232 	struct sk_buff *skb;
3233 	u16 control;
3234 	int err;
3235 
3236 	while ((skb = skb_dequeue(&chan->busy_q))) {
3237 		control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3238 		err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3239 		if (err < 0) {
3240 			skb_queue_head(&chan->busy_q, skb);
3241 			return -EBUSY;
3242 		}
3243 
3244 		chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3245 	}
3246 
3247 	if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3248 		goto done;
3249 
3250 	control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3251 	control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3252 	l2cap_send_sframe(chan, control);
3253 	chan->retry_count = 1;
3254 
3255 	__clear_retrans_timer(chan);
3256 	__set_monitor_timer(chan);
3257 
3258 	set_bit(CONN_WAIT_F, &chan->conn_state);
3259 
3260 done:
3261 	clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3262 	clear_bit(CONN_RNR_SENT, &chan->conn_state);
3263 
3264 	BT_DBG("chan %p, Exit local busy", chan);
3265 
3266 	return 0;
3267 }
3268 
3269 static void l2cap_busy_work(struct work_struct *work)
3270 {
3271 	DECLARE_WAITQUEUE(wait, current);
3272 	struct l2cap_chan *chan =
3273 		container_of(work, struct l2cap_chan, busy_work);
3274 	struct sock *sk = chan->sk;
3275 	int n_tries = 0, timeo = HZ/5, err;
3276 	struct sk_buff *skb;
3277 
3278 	lock_sock(sk);
3279 
3280 	add_wait_queue(sk_sleep(sk), &wait);
3281 	while ((skb = skb_peek(&chan->busy_q))) {
3282 		set_current_state(TASK_INTERRUPTIBLE);
3283 
3284 		if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3285 			err = -EBUSY;
3286 			l2cap_send_disconn_req(chan->conn, chan, EBUSY);
3287 			break;
3288 		}
3289 
3290 		if (!timeo)
3291 			timeo = HZ/5;
3292 
3293 		if (signal_pending(current)) {
3294 			err = sock_intr_errno(timeo);
3295 			break;
3296 		}
3297 
3298 		release_sock(sk);
3299 		timeo = schedule_timeout(timeo);
3300 		lock_sock(sk);
3301 
3302 		err = sock_error(sk);
3303 		if (err)
3304 			break;
3305 
3306 		if (l2cap_try_push_rx_skb(chan) == 0)
3307 			break;
3308 	}
3309 
3310 	set_current_state(TASK_RUNNING);
3311 	remove_wait_queue(sk_sleep(sk), &wait);
3312 
3313 	release_sock(sk);
3314 }
3315 
3316 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3317 {
3318 	int sctrl, err;
3319 
3320 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3321 		bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3322 		__skb_queue_tail(&chan->busy_q, skb);
3323 		return l2cap_try_push_rx_skb(chan);
3324 
3325 
3326 	}
3327 
3328 	err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3329 	if (err >= 0) {
3330 		chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3331 		return err;
3332 	}
3333 
3334 	/* Busy Condition */
3335 	BT_DBG("chan %p, Enter local busy", chan);
3336 
3337 	set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3338 	bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3339 	__skb_queue_tail(&chan->busy_q, skb);
3340 
3341 	sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3342 	sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3343 	l2cap_send_sframe(chan, sctrl);
3344 
3345 	set_bit(CONN_RNR_SENT, &chan->conn_state);
3346 
3347 	__clear_ack_timer(chan);
3348 
3349 	queue_work(_busy_wq, &chan->busy_work);
3350 
3351 	return err;
3352 }
3353 
3354 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3355 {
3356 	struct sk_buff *_skb;
3357 	int err = -EINVAL;
3358 
3359 	/*
3360 	 * TODO: We have to notify the userland if some data is lost with the
3361 	 * Streaming Mode.
3362 	 */
3363 
3364 	switch (control & L2CAP_CTRL_SAR) {
3365 	case L2CAP_SDU_UNSEGMENTED:
3366 		if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3367 			kfree_skb(chan->sdu);
3368 			break;
3369 		}
3370 
3371 		err = chan->ops->recv(chan->data, skb);
3372 		if (!err)
3373 			return 0;
3374 
3375 		break;
3376 
3377 	case L2CAP_SDU_START:
3378 		if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3379 			kfree_skb(chan->sdu);
3380 			break;
3381 		}
3382 
3383 		chan->sdu_len = get_unaligned_le16(skb->data);
3384 		skb_pull(skb, 2);
3385 
3386 		if (chan->sdu_len > chan->imtu) {
3387 			err = -EMSGSIZE;
3388 			break;
3389 		}
3390 
3391 		chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3392 		if (!chan->sdu) {
3393 			err = -ENOMEM;
3394 			break;
3395 		}
3396 
3397 		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3398 
3399 		set_bit(CONN_SAR_SDU, &chan->conn_state);
3400 		chan->partial_sdu_len = skb->len;
3401 		err = 0;
3402 		break;
3403 
3404 	case L2CAP_SDU_CONTINUE:
3405 		if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3406 			break;
3407 
3408 		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3409 
3410 		chan->partial_sdu_len += skb->len;
3411 		if (chan->partial_sdu_len > chan->sdu_len)
3412 			kfree_skb(chan->sdu);
3413 		else
3414 			err = 0;
3415 
3416 		break;
3417 
3418 	case L2CAP_SDU_END:
3419 		if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3420 			break;
3421 
3422 		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3423 
3424 		clear_bit(CONN_SAR_SDU, &chan->conn_state);
3425 		chan->partial_sdu_len += skb->len;
3426 
3427 		if (chan->partial_sdu_len > chan->imtu)
3428 			goto drop;
3429 
3430 		if (chan->partial_sdu_len == chan->sdu_len) {
3431 			_skb = skb_clone(chan->sdu, GFP_ATOMIC);
3432 			err = chan->ops->recv(chan->data, _skb);
3433 			if (err < 0)
3434 				kfree_skb(_skb);
3435 		}
3436 		err = 0;
3437 
3438 drop:
3439 		kfree_skb(chan->sdu);
3440 		break;
3441 	}
3442 
3443 	kfree_skb(skb);
3444 	return err;
3445 }
3446 
3447 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3448 {
3449 	struct sk_buff *skb;
3450 	u16 control;
3451 
3452 	while ((skb = skb_peek(&chan->srej_q))) {
3453 		if (bt_cb(skb)->tx_seq != tx_seq)
3454 			break;
3455 
3456 		skb = skb_dequeue(&chan->srej_q);
3457 		control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3458 		l2cap_ertm_reassembly_sdu(chan, skb, control);
3459 		chan->buffer_seq_srej =
3460 			(chan->buffer_seq_srej + 1) % 64;
3461 		tx_seq = (tx_seq + 1) % 64;
3462 	}
3463 }
3464 
3465 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3466 {
3467 	struct srej_list *l, *tmp;
3468 	u16 control;
3469 
3470 	list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3471 		if (l->tx_seq == tx_seq) {
3472 			list_del(&l->list);
3473 			kfree(l);
3474 			return;
3475 		}
3476 		control = L2CAP_SUPER_SELECT_REJECT;
3477 		control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3478 		l2cap_send_sframe(chan, control);
3479 		list_del(&l->list);
3480 		list_add_tail(&l->list, &chan->srej_l);
3481 	}
3482 }
3483 
3484 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3485 {
3486 	struct srej_list *new;
3487 	u16 control;
3488 
3489 	while (tx_seq != chan->expected_tx_seq) {
3490 		control = L2CAP_SUPER_SELECT_REJECT;
3491 		control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3492 		l2cap_send_sframe(chan, control);
3493 
3494 		new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3495 		new->tx_seq = chan->expected_tx_seq;
3496 		chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3497 		list_add_tail(&new->list, &chan->srej_l);
3498 	}
3499 	chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3500 }
3501 
3502 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3503 {
3504 	u8 tx_seq = __get_txseq(rx_control);
3505 	u8 req_seq = __get_reqseq(rx_control);
3506 	u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3507 	int tx_seq_offset, expected_tx_seq_offset;
3508 	int num_to_ack = (chan->tx_win/6) + 1;
3509 	int err = 0;
3510 
3511 	BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3512 							tx_seq, rx_control);
3513 
3514 	if (L2CAP_CTRL_FINAL & rx_control &&
3515 			test_bit(CONN_WAIT_F, &chan->conn_state)) {
3516 		__clear_monitor_timer(chan);
3517 		if (chan->unacked_frames > 0)
3518 			__set_retrans_timer(chan);
3519 		clear_bit(CONN_WAIT_F, &chan->conn_state);
3520 	}
3521 
3522 	chan->expected_ack_seq = req_seq;
3523 	l2cap_drop_acked_frames(chan);
3524 
3525 	if (tx_seq == chan->expected_tx_seq)
3526 		goto expected;
3527 
3528 	tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3529 	if (tx_seq_offset < 0)
3530 		tx_seq_offset += 64;
3531 
3532 	/* invalid tx_seq */
3533 	if (tx_seq_offset >= chan->tx_win) {
3534 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3535 		goto drop;
3536 	}
3537 
3538 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3539 		goto drop;
3540 
3541 	if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3542 		struct srej_list *first;
3543 
3544 		first = list_first_entry(&chan->srej_l,
3545 				struct srej_list, list);
3546 		if (tx_seq == first->tx_seq) {
3547 			l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3548 			l2cap_check_srej_gap(chan, tx_seq);
3549 
3550 			list_del(&first->list);
3551 			kfree(first);
3552 
3553 			if (list_empty(&chan->srej_l)) {
3554 				chan->buffer_seq = chan->buffer_seq_srej;
3555 				clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3556 				l2cap_send_ack(chan);
3557 				BT_DBG("chan %p, Exit SREJ_SENT", chan);
3558 			}
3559 		} else {
3560 			struct srej_list *l;
3561 
3562 			/* duplicated tx_seq */
3563 			if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3564 				goto drop;
3565 
3566 			list_for_each_entry(l, &chan->srej_l, list) {
3567 				if (l->tx_seq == tx_seq) {
3568 					l2cap_resend_srejframe(chan, tx_seq);
3569 					return 0;
3570 				}
3571 			}
3572 			l2cap_send_srejframe(chan, tx_seq);
3573 		}
3574 	} else {
3575 		expected_tx_seq_offset =
3576 			(chan->expected_tx_seq - chan->buffer_seq) % 64;
3577 		if (expected_tx_seq_offset < 0)
3578 			expected_tx_seq_offset += 64;
3579 
3580 		/* duplicated tx_seq */
3581 		if (tx_seq_offset < expected_tx_seq_offset)
3582 			goto drop;
3583 
3584 		set_bit(CONN_SREJ_SENT, &chan->conn_state);
3585 
3586 		BT_DBG("chan %p, Enter SREJ", chan);
3587 
3588 		INIT_LIST_HEAD(&chan->srej_l);
3589 		chan->buffer_seq_srej = chan->buffer_seq;
3590 
3591 		__skb_queue_head_init(&chan->srej_q);
3592 		__skb_queue_head_init(&chan->busy_q);
3593 		l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3594 
3595 		set_bit(CONN_SEND_PBIT, &chan->conn_state);
3596 
3597 		l2cap_send_srejframe(chan, tx_seq);
3598 
3599 		__clear_ack_timer(chan);
3600 	}
3601 	return 0;
3602 
3603 expected:
3604 	chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3605 
3606 	if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3607 		bt_cb(skb)->tx_seq = tx_seq;
3608 		bt_cb(skb)->sar = sar;
3609 		__skb_queue_tail(&chan->srej_q, skb);
3610 		return 0;
3611 	}
3612 
3613 	err = l2cap_push_rx_skb(chan, skb, rx_control);
3614 	if (err < 0)
3615 		return 0;
3616 
3617 	if (rx_control & L2CAP_CTRL_FINAL) {
3618 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3619 			l2cap_retransmit_frames(chan);
3620 	}
3621 
3622 	__set_ack_timer(chan);
3623 
3624 	chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3625 	if (chan->num_acked == num_to_ack - 1)
3626 		l2cap_send_ack(chan);
3627 
3628 	return 0;
3629 
3630 drop:
3631 	kfree_skb(skb);
3632 	return 0;
3633 }
3634 
3635 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3636 {
3637 	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3638 						rx_control);
3639 
3640 	chan->expected_ack_seq = __get_reqseq(rx_control);
3641 	l2cap_drop_acked_frames(chan);
3642 
3643 	if (rx_control & L2CAP_CTRL_POLL) {
3644 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
3645 		if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3646 			if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3647 					(chan->unacked_frames > 0))
3648 				__set_retrans_timer(chan);
3649 
3650 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3651 			l2cap_send_srejtail(chan);
3652 		} else {
3653 			l2cap_send_i_or_rr_or_rnr(chan);
3654 		}
3655 
3656 	} else if (rx_control & L2CAP_CTRL_FINAL) {
3657 		clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3658 
3659 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3660 			l2cap_retransmit_frames(chan);
3661 
3662 	} else {
3663 		if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3664 				(chan->unacked_frames > 0))
3665 			__set_retrans_timer(chan);
3666 
3667 		clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3668 		if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3669 			l2cap_send_ack(chan);
3670 		else
3671 			l2cap_ertm_send(chan);
3672 	}
3673 }
3674 
3675 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3676 {
3677 	u8 tx_seq = __get_reqseq(rx_control);
3678 
3679 	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3680 
3681 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3682 
3683 	chan->expected_ack_seq = tx_seq;
3684 	l2cap_drop_acked_frames(chan);
3685 
3686 	if (rx_control & L2CAP_CTRL_FINAL) {
3687 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3688 			l2cap_retransmit_frames(chan);
3689 	} else {
3690 		l2cap_retransmit_frames(chan);
3691 
3692 		if (test_bit(CONN_WAIT_F, &chan->conn_state))
3693 			set_bit(CONN_REJ_ACT, &chan->conn_state);
3694 	}
3695 }
3696 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3697 {
3698 	u8 tx_seq = __get_reqseq(rx_control);
3699 
3700 	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3701 
3702 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3703 
3704 	if (rx_control & L2CAP_CTRL_POLL) {
3705 		chan->expected_ack_seq = tx_seq;
3706 		l2cap_drop_acked_frames(chan);
3707 
3708 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
3709 		l2cap_retransmit_one_frame(chan, tx_seq);
3710 
3711 		l2cap_ertm_send(chan);
3712 
3713 		if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3714 			chan->srej_save_reqseq = tx_seq;
3715 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
3716 		}
3717 	} else if (rx_control & L2CAP_CTRL_FINAL) {
3718 		if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3719 				chan->srej_save_reqseq == tx_seq)
3720 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3721 		else
3722 			l2cap_retransmit_one_frame(chan, tx_seq);
3723 	} else {
3724 		l2cap_retransmit_one_frame(chan, tx_seq);
3725 		if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3726 			chan->srej_save_reqseq = tx_seq;
3727 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
3728 		}
3729 	}
3730 }
3731 
3732 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3733 {
3734 	u8 tx_seq = __get_reqseq(rx_control);
3735 
3736 	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3737 
3738 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3739 	chan->expected_ack_seq = tx_seq;
3740 	l2cap_drop_acked_frames(chan);
3741 
3742 	if (rx_control & L2CAP_CTRL_POLL)
3743 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
3744 
3745 	if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3746 		__clear_retrans_timer(chan);
3747 		if (rx_control & L2CAP_CTRL_POLL)
3748 			l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3749 		return;
3750 	}
3751 
3752 	if (rx_control & L2CAP_CTRL_POLL)
3753 		l2cap_send_srejtail(chan);
3754 	else
3755 		l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3756 }
3757 
3758 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3759 {
3760 	BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3761 
3762 	if (L2CAP_CTRL_FINAL & rx_control &&
3763 			test_bit(CONN_WAIT_F, &chan->conn_state)) {
3764 		__clear_monitor_timer(chan);
3765 		if (chan->unacked_frames > 0)
3766 			__set_retrans_timer(chan);
3767 		clear_bit(CONN_WAIT_F, &chan->conn_state);
3768 	}
3769 
3770 	switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3771 	case L2CAP_SUPER_RCV_READY:
3772 		l2cap_data_channel_rrframe(chan, rx_control);
3773 		break;
3774 
3775 	case L2CAP_SUPER_REJECT:
3776 		l2cap_data_channel_rejframe(chan, rx_control);
3777 		break;
3778 
3779 	case L2CAP_SUPER_SELECT_REJECT:
3780 		l2cap_data_channel_srejframe(chan, rx_control);
3781 		break;
3782 
3783 	case L2CAP_SUPER_RCV_NOT_READY:
3784 		l2cap_data_channel_rnrframe(chan, rx_control);
3785 		break;
3786 	}
3787 
3788 	kfree_skb(skb);
3789 	return 0;
3790 }
3791 
3792 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3793 {
3794 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3795 	u16 control;
3796 	u8 req_seq;
3797 	int len, next_tx_seq_offset, req_seq_offset;
3798 
3799 	control = get_unaligned_le16(skb->data);
3800 	skb_pull(skb, 2);
3801 	len = skb->len;
3802 
3803 	/*
3804 	 * We can just drop the corrupted I-frame here.
3805 	 * Receiver will miss it and start proper recovery
3806 	 * procedures and ask retransmission.
3807 	 */
3808 	if (l2cap_check_fcs(chan, skb))
3809 		goto drop;
3810 
3811 	if (__is_sar_start(control) && __is_iframe(control))
3812 		len -= 2;
3813 
3814 	if (chan->fcs == L2CAP_FCS_CRC16)
3815 		len -= 2;
3816 
3817 	if (len > chan->mps) {
3818 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3819 		goto drop;
3820 	}
3821 
3822 	req_seq = __get_reqseq(control);
3823 	req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3824 	if (req_seq_offset < 0)
3825 		req_seq_offset += 64;
3826 
3827 	next_tx_seq_offset =
3828 		(chan->next_tx_seq - chan->expected_ack_seq) % 64;
3829 	if (next_tx_seq_offset < 0)
3830 		next_tx_seq_offset += 64;
3831 
3832 	/* check for invalid req-seq */
3833 	if (req_seq_offset > next_tx_seq_offset) {
3834 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3835 		goto drop;
3836 	}
3837 
3838 	if (__is_iframe(control)) {
3839 		if (len < 0) {
3840 			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3841 			goto drop;
3842 		}
3843 
3844 		l2cap_data_channel_iframe(chan, control, skb);
3845 	} else {
3846 		if (len != 0) {
3847 			BT_ERR("%d", len);
3848 			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3849 			goto drop;
3850 		}
3851 
3852 		l2cap_data_channel_sframe(chan, control, skb);
3853 	}
3854 
3855 	return 0;
3856 
3857 drop:
3858 	kfree_skb(skb);
3859 	return 0;
3860 }
3861 
3862 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3863 {
3864 	struct l2cap_chan *chan;
3865 	struct sock *sk = NULL;
3866 	u16 control;
3867 	u8 tx_seq;
3868 	int len;
3869 
3870 	chan = l2cap_get_chan_by_scid(conn, cid);
3871 	if (!chan) {
3872 		BT_DBG("unknown cid 0x%4.4x", cid);
3873 		goto drop;
3874 	}
3875 
3876 	sk = chan->sk;
3877 
3878 	BT_DBG("chan %p, len %d", chan, skb->len);
3879 
3880 	if (chan->state != BT_CONNECTED)
3881 		goto drop;
3882 
3883 	switch (chan->mode) {
3884 	case L2CAP_MODE_BASIC:
3885 		/* If socket recv buffers overflows we drop data here
3886 		 * which is *bad* because L2CAP has to be reliable.
3887 		 * But we don't have any other choice. L2CAP doesn't
3888 		 * provide flow control mechanism. */
3889 
3890 		if (chan->imtu < skb->len)
3891 			goto drop;
3892 
3893 		if (!chan->ops->recv(chan->data, skb))
3894 			goto done;
3895 		break;
3896 
3897 	case L2CAP_MODE_ERTM:
3898 		if (!sock_owned_by_user(sk)) {
3899 			l2cap_ertm_data_rcv(sk, skb);
3900 		} else {
3901 			if (sk_add_backlog(sk, skb))
3902 				goto drop;
3903 		}
3904 
3905 		goto done;
3906 
3907 	case L2CAP_MODE_STREAMING:
3908 		control = get_unaligned_le16(skb->data);
3909 		skb_pull(skb, 2);
3910 		len = skb->len;
3911 
3912 		if (l2cap_check_fcs(chan, skb))
3913 			goto drop;
3914 
3915 		if (__is_sar_start(control))
3916 			len -= 2;
3917 
3918 		if (chan->fcs == L2CAP_FCS_CRC16)
3919 			len -= 2;
3920 
3921 		if (len > chan->mps || len < 0 || __is_sframe(control))
3922 			goto drop;
3923 
3924 		tx_seq = __get_txseq(control);
3925 
3926 		if (chan->expected_tx_seq == tx_seq)
3927 			chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3928 		else
3929 			chan->expected_tx_seq = (tx_seq + 1) % 64;
3930 
3931 		l2cap_streaming_reassembly_sdu(chan, skb, control);
3932 
3933 		goto done;
3934 
3935 	default:
3936 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3937 		break;
3938 	}
3939 
3940 drop:
3941 	kfree_skb(skb);
3942 
3943 done:
3944 	if (sk)
3945 		bh_unlock_sock(sk);
3946 
3947 	return 0;
3948 }
3949 
3950 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3951 {
3952 	struct sock *sk = NULL;
3953 	struct l2cap_chan *chan;
3954 
3955 	chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3956 	if (!chan)
3957 		goto drop;
3958 
3959 	sk = chan->sk;
3960 
3961 	bh_lock_sock(sk);
3962 
3963 	BT_DBG("sk %p, len %d", sk, skb->len);
3964 
3965 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3966 		goto drop;
3967 
3968 	if (chan->imtu < skb->len)
3969 		goto drop;
3970 
3971 	if (!chan->ops->recv(chan->data, skb))
3972 		goto done;
3973 
3974 drop:
3975 	kfree_skb(skb);
3976 
3977 done:
3978 	if (sk)
3979 		bh_unlock_sock(sk);
3980 	return 0;
3981 }
3982 
3983 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3984 {
3985 	struct sock *sk = NULL;
3986 	struct l2cap_chan *chan;
3987 
3988 	chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3989 	if (!chan)
3990 		goto drop;
3991 
3992 	sk = chan->sk;
3993 
3994 	bh_lock_sock(sk);
3995 
3996 	BT_DBG("sk %p, len %d", sk, skb->len);
3997 
3998 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3999 		goto drop;
4000 
4001 	if (chan->imtu < skb->len)
4002 		goto drop;
4003 
4004 	if (!chan->ops->recv(chan->data, skb))
4005 		goto done;
4006 
4007 drop:
4008 	kfree_skb(skb);
4009 
4010 done:
4011 	if (sk)
4012 		bh_unlock_sock(sk);
4013 	return 0;
4014 }
4015 
4016 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4017 {
4018 	struct l2cap_hdr *lh = (void *) skb->data;
4019 	u16 cid, len;
4020 	__le16 psm;
4021 
4022 	skb_pull(skb, L2CAP_HDR_SIZE);
4023 	cid = __le16_to_cpu(lh->cid);
4024 	len = __le16_to_cpu(lh->len);
4025 
4026 	if (len != skb->len) {
4027 		kfree_skb(skb);
4028 		return;
4029 	}
4030 
4031 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
4032 
4033 	switch (cid) {
4034 	case L2CAP_CID_LE_SIGNALING:
4035 	case L2CAP_CID_SIGNALING:
4036 		l2cap_sig_channel(conn, skb);
4037 		break;
4038 
4039 	case L2CAP_CID_CONN_LESS:
4040 		psm = get_unaligned_le16(skb->data);
4041 		skb_pull(skb, 2);
4042 		l2cap_conless_channel(conn, psm, skb);
4043 		break;
4044 
4045 	case L2CAP_CID_LE_DATA:
4046 		l2cap_att_channel(conn, cid, skb);
4047 		break;
4048 
4049 	case L2CAP_CID_SMP:
4050 		if (smp_sig_channel(conn, skb))
4051 			l2cap_conn_del(conn->hcon, EACCES);
4052 		break;
4053 
4054 	default:
4055 		l2cap_data_channel(conn, cid, skb);
4056 		break;
4057 	}
4058 }
4059 
4060 /* ---- L2CAP interface with lower layer (HCI) ---- */
4061 
4062 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4063 {
4064 	int exact = 0, lm1 = 0, lm2 = 0;
4065 	struct l2cap_chan *c;
4066 
4067 	if (type != ACL_LINK)
4068 		return -EINVAL;
4069 
4070 	BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4071 
4072 	/* Find listening sockets and check their link_mode */
4073 	read_lock(&chan_list_lock);
4074 	list_for_each_entry(c, &chan_list, global_l) {
4075 		struct sock *sk = c->sk;
4076 
4077 		if (c->state != BT_LISTEN)
4078 			continue;
4079 
4080 		if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4081 			lm1 |= HCI_LM_ACCEPT;
4082 			if (c->role_switch)
4083 				lm1 |= HCI_LM_MASTER;
4084 			exact++;
4085 		} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4086 			lm2 |= HCI_LM_ACCEPT;
4087 			if (c->role_switch)
4088 				lm2 |= HCI_LM_MASTER;
4089 		}
4090 	}
4091 	read_unlock(&chan_list_lock);
4092 
4093 	return exact ? lm1 : lm2;
4094 }
4095 
4096 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4097 {
4098 	struct l2cap_conn *conn;
4099 
4100 	BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4101 
4102 	if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4103 		return -EINVAL;
4104 
4105 	if (!status) {
4106 		conn = l2cap_conn_add(hcon, status);
4107 		if (conn)
4108 			l2cap_conn_ready(conn);
4109 	} else
4110 		l2cap_conn_del(hcon, bt_err(status));
4111 
4112 	return 0;
4113 }
4114 
4115 static int l2cap_disconn_ind(struct hci_conn *hcon)
4116 {
4117 	struct l2cap_conn *conn = hcon->l2cap_data;
4118 
4119 	BT_DBG("hcon %p", hcon);
4120 
4121 	if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4122 		return 0x13;
4123 
4124 	return conn->disc_reason;
4125 }
4126 
4127 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4128 {
4129 	BT_DBG("hcon %p reason %d", hcon, reason);
4130 
4131 	if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4132 		return -EINVAL;
4133 
4134 	l2cap_conn_del(hcon, bt_err(reason));
4135 
4136 	return 0;
4137 }
4138 
4139 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4140 {
4141 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4142 		return;
4143 
4144 	if (encrypt == 0x00) {
4145 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
4146 			__clear_chan_timer(chan);
4147 			__set_chan_timer(chan, HZ * 5);
4148 		} else if (chan->sec_level == BT_SECURITY_HIGH)
4149 			l2cap_chan_close(chan, ECONNREFUSED);
4150 	} else {
4151 		if (chan->sec_level == BT_SECURITY_MEDIUM)
4152 			__clear_chan_timer(chan);
4153 	}
4154 }
4155 
4156 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4157 {
4158 	struct l2cap_conn *conn = hcon->l2cap_data;
4159 	struct l2cap_chan *chan;
4160 
4161 	if (!conn)
4162 		return 0;
4163 
4164 	BT_DBG("conn %p", conn);
4165 
4166 	read_lock(&conn->chan_lock);
4167 
4168 	list_for_each_entry(chan, &conn->chan_l, list) {
4169 		struct sock *sk = chan->sk;
4170 
4171 		bh_lock_sock(sk);
4172 
4173 		BT_DBG("chan->scid %d", chan->scid);
4174 
4175 		if (chan->scid == L2CAP_CID_LE_DATA) {
4176 			if (!status && encrypt) {
4177 				chan->sec_level = hcon->sec_level;
4178 				del_timer(&conn->security_timer);
4179 				l2cap_chan_ready(sk);
4180 			}
4181 
4182 			bh_unlock_sock(sk);
4183 			continue;
4184 		}
4185 
4186 		if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4187 			bh_unlock_sock(sk);
4188 			continue;
4189 		}
4190 
4191 		if (!status && (chan->state == BT_CONNECTED ||
4192 						chan->state == BT_CONFIG)) {
4193 			l2cap_check_encryption(chan, encrypt);
4194 			bh_unlock_sock(sk);
4195 			continue;
4196 		}
4197 
4198 		if (chan->state == BT_CONNECT) {
4199 			if (!status) {
4200 				struct l2cap_conn_req req;
4201 				req.scid = cpu_to_le16(chan->scid);
4202 				req.psm  = chan->psm;
4203 
4204 				chan->ident = l2cap_get_ident(conn);
4205 				set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4206 
4207 				l2cap_send_cmd(conn, chan->ident,
4208 					L2CAP_CONN_REQ, sizeof(req), &req);
4209 			} else {
4210 				__clear_chan_timer(chan);
4211 				__set_chan_timer(chan, HZ / 10);
4212 			}
4213 		} else if (chan->state == BT_CONNECT2) {
4214 			struct l2cap_conn_rsp rsp;
4215 			__u16 res, stat;
4216 
4217 			if (!status) {
4218 				if (bt_sk(sk)->defer_setup) {
4219 					struct sock *parent = bt_sk(sk)->parent;
4220 					res = L2CAP_CR_PEND;
4221 					stat = L2CAP_CS_AUTHOR_PEND;
4222 					parent->sk_data_ready(parent, 0);
4223 				} else {
4224 					l2cap_state_change(chan, BT_CONFIG);
4225 					res = L2CAP_CR_SUCCESS;
4226 					stat = L2CAP_CS_NO_INFO;
4227 				}
4228 			} else {
4229 				l2cap_state_change(chan, BT_DISCONN);
4230 				__set_chan_timer(chan, HZ / 10);
4231 				res = L2CAP_CR_SEC_BLOCK;
4232 				stat = L2CAP_CS_NO_INFO;
4233 			}
4234 
4235 			rsp.scid   = cpu_to_le16(chan->dcid);
4236 			rsp.dcid   = cpu_to_le16(chan->scid);
4237 			rsp.result = cpu_to_le16(res);
4238 			rsp.status = cpu_to_le16(stat);
4239 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4240 							sizeof(rsp), &rsp);
4241 		}
4242 
4243 		bh_unlock_sock(sk);
4244 	}
4245 
4246 	read_unlock(&conn->chan_lock);
4247 
4248 	return 0;
4249 }
4250 
4251 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4252 {
4253 	struct l2cap_conn *conn = hcon->l2cap_data;
4254 
4255 	if (!conn)
4256 		conn = l2cap_conn_add(hcon, 0);
4257 
4258 	if (!conn)
4259 		goto drop;
4260 
4261 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4262 
4263 	if (!(flags & ACL_CONT)) {
4264 		struct l2cap_hdr *hdr;
4265 		struct l2cap_chan *chan;
4266 		u16 cid;
4267 		int len;
4268 
4269 		if (conn->rx_len) {
4270 			BT_ERR("Unexpected start frame (len %d)", skb->len);
4271 			kfree_skb(conn->rx_skb);
4272 			conn->rx_skb = NULL;
4273 			conn->rx_len = 0;
4274 			l2cap_conn_unreliable(conn, ECOMM);
4275 		}
4276 
4277 		/* Start fragment always begin with Basic L2CAP header */
4278 		if (skb->len < L2CAP_HDR_SIZE) {
4279 			BT_ERR("Frame is too short (len %d)", skb->len);
4280 			l2cap_conn_unreliable(conn, ECOMM);
4281 			goto drop;
4282 		}
4283 
4284 		hdr = (struct l2cap_hdr *) skb->data;
4285 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4286 		cid = __le16_to_cpu(hdr->cid);
4287 
4288 		if (len == skb->len) {
4289 			/* Complete frame received */
4290 			l2cap_recv_frame(conn, skb);
4291 			return 0;
4292 		}
4293 
4294 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4295 
4296 		if (skb->len > len) {
4297 			BT_ERR("Frame is too long (len %d, expected len %d)",
4298 				skb->len, len);
4299 			l2cap_conn_unreliable(conn, ECOMM);
4300 			goto drop;
4301 		}
4302 
4303 		chan = l2cap_get_chan_by_scid(conn, cid);
4304 
4305 		if (chan && chan->sk) {
4306 			struct sock *sk = chan->sk;
4307 
4308 			if (chan->imtu < len - L2CAP_HDR_SIZE) {
4309 				BT_ERR("Frame exceeding recv MTU (len %d, "
4310 							"MTU %d)", len,
4311 							chan->imtu);
4312 				bh_unlock_sock(sk);
4313 				l2cap_conn_unreliable(conn, ECOMM);
4314 				goto drop;
4315 			}
4316 			bh_unlock_sock(sk);
4317 		}
4318 
4319 		/* Allocate skb for the complete frame (with header) */
4320 		conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4321 		if (!conn->rx_skb)
4322 			goto drop;
4323 
4324 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4325 								skb->len);
4326 		conn->rx_len = len - skb->len;
4327 	} else {
4328 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4329 
4330 		if (!conn->rx_len) {
4331 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4332 			l2cap_conn_unreliable(conn, ECOMM);
4333 			goto drop;
4334 		}
4335 
4336 		if (skb->len > conn->rx_len) {
4337 			BT_ERR("Fragment is too long (len %d, expected %d)",
4338 					skb->len, conn->rx_len);
4339 			kfree_skb(conn->rx_skb);
4340 			conn->rx_skb = NULL;
4341 			conn->rx_len = 0;
4342 			l2cap_conn_unreliable(conn, ECOMM);
4343 			goto drop;
4344 		}
4345 
4346 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4347 								skb->len);
4348 		conn->rx_len -= skb->len;
4349 
4350 		if (!conn->rx_len) {
4351 			/* Complete frame received */
4352 			l2cap_recv_frame(conn, conn->rx_skb);
4353 			conn->rx_skb = NULL;
4354 		}
4355 	}
4356 
4357 drop:
4358 	kfree_skb(skb);
4359 	return 0;
4360 }
4361 
4362 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4363 {
4364 	struct l2cap_chan *c;
4365 
4366 	read_lock_bh(&chan_list_lock);
4367 
4368 	list_for_each_entry(c, &chan_list, global_l) {
4369 		struct sock *sk = c->sk;
4370 
4371 		seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4372 					batostr(&bt_sk(sk)->src),
4373 					batostr(&bt_sk(sk)->dst),
4374 					c->state, __le16_to_cpu(c->psm),
4375 					c->scid, c->dcid, c->imtu, c->omtu,
4376 					c->sec_level, c->mode);
4377 }
4378 
4379 	read_unlock_bh(&chan_list_lock);
4380 
4381 	return 0;
4382 }
4383 
4384 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4385 {
4386 	return single_open(file, l2cap_debugfs_show, inode->i_private);
4387 }
4388 
4389 static const struct file_operations l2cap_debugfs_fops = {
4390 	.open		= l2cap_debugfs_open,
4391 	.read		= seq_read,
4392 	.llseek		= seq_lseek,
4393 	.release	= single_release,
4394 };
4395 
4396 static struct dentry *l2cap_debugfs;
4397 
4398 static struct hci_proto l2cap_hci_proto = {
4399 	.name		= "L2CAP",
4400 	.id		= HCI_PROTO_L2CAP,
4401 	.connect_ind	= l2cap_connect_ind,
4402 	.connect_cfm	= l2cap_connect_cfm,
4403 	.disconn_ind	= l2cap_disconn_ind,
4404 	.disconn_cfm	= l2cap_disconn_cfm,
4405 	.security_cfm	= l2cap_security_cfm,
4406 	.recv_acldata	= l2cap_recv_acldata
4407 };
4408 
4409 int __init l2cap_init(void)
4410 {
4411 	int err;
4412 
4413 	err = l2cap_init_sockets();
4414 	if (err < 0)
4415 		return err;
4416 
4417 	_busy_wq = create_singlethread_workqueue("l2cap");
4418 	if (!_busy_wq) {
4419 		err = -ENOMEM;
4420 		goto error;
4421 	}
4422 
4423 	err = hci_register_proto(&l2cap_hci_proto);
4424 	if (err < 0) {
4425 		BT_ERR("L2CAP protocol registration failed");
4426 		bt_sock_unregister(BTPROTO_L2CAP);
4427 		goto error;
4428 	}
4429 
4430 	if (bt_debugfs) {
4431 		l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4432 					bt_debugfs, NULL, &l2cap_debugfs_fops);
4433 		if (!l2cap_debugfs)
4434 			BT_ERR("Failed to create L2CAP debug file");
4435 	}
4436 
4437 	return 0;
4438 
4439 error:
4440 	destroy_workqueue(_busy_wq);
4441 	l2cap_cleanup_sockets();
4442 	return err;
4443 }
4444 
4445 void l2cap_exit(void)
4446 {
4447 	debugfs_remove(l2cap_debugfs);
4448 
4449 	flush_workqueue(_busy_wq);
4450 	destroy_workqueue(_busy_wq);
4451 
4452 	if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4453 		BT_ERR("L2CAP protocol unregistration failed");
4454 
4455 	l2cap_cleanup_sockets();
4456 }
4457 
4458 module_param(disable_ertm, bool, 0644);
4459 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4460