xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision 9c1f8594)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6 
7    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License version 2 as
11    published by the Free Software Foundation;
12 
13    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 
22    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24    SOFTWARE IS DISCLAIMED.
25 */
26 
27 /* Bluetooth L2CAP core. */
28 
29 #include <linux/module.h>
30 
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50 
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53 
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
58 
59 int disable_ertm;
60 
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
66 
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 				u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
70 								void *data);
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 				struct l2cap_chan *chan, int err);
74 
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
76 
77 /* ---- L2CAP channels ---- */
78 
79 static inline void chan_hold(struct l2cap_chan *c)
80 {
81 	atomic_inc(&c->refcnt);
82 }
83 
84 static inline void chan_put(struct l2cap_chan *c)
85 {
86 	if (atomic_dec_and_test(&c->refcnt))
87 		kfree(c);
88 }
89 
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
91 {
92 	struct l2cap_chan *c;
93 
94 	list_for_each_entry(c, &conn->chan_l, list) {
95 		if (c->dcid == cid)
96 			return c;
97 	}
98 	return NULL;
99 
100 }
101 
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 {
104 	struct l2cap_chan *c;
105 
106 	list_for_each_entry(c, &conn->chan_l, list) {
107 		if (c->scid == cid)
108 			return c;
109 	}
110 	return NULL;
111 }
112 
113 /* Find channel with given SCID.
114  * Returns locked socket */
115 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
116 {
117 	struct l2cap_chan *c;
118 
119 	read_lock(&conn->chan_lock);
120 	c = __l2cap_get_chan_by_scid(conn, cid);
121 	if (c)
122 		bh_lock_sock(c->sk);
123 	read_unlock(&conn->chan_lock);
124 	return c;
125 }
126 
127 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
128 {
129 	struct l2cap_chan *c;
130 
131 	list_for_each_entry(c, &conn->chan_l, list) {
132 		if (c->ident == ident)
133 			return c;
134 	}
135 	return NULL;
136 }
137 
138 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
139 {
140 	struct l2cap_chan *c;
141 
142 	read_lock(&conn->chan_lock);
143 	c = __l2cap_get_chan_by_ident(conn, ident);
144 	if (c)
145 		bh_lock_sock(c->sk);
146 	read_unlock(&conn->chan_lock);
147 	return c;
148 }
149 
150 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
151 {
152 	struct l2cap_chan *c;
153 
154 	list_for_each_entry(c, &chan_list, global_l) {
155 		if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
156 			goto found;
157 	}
158 
159 	c = NULL;
160 found:
161 	return c;
162 }
163 
164 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
165 {
166 	int err;
167 
168 	write_lock_bh(&chan_list_lock);
169 
170 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
171 		err = -EADDRINUSE;
172 		goto done;
173 	}
174 
175 	if (psm) {
176 		chan->psm = psm;
177 		chan->sport = psm;
178 		err = 0;
179 	} else {
180 		u16 p;
181 
182 		err = -EINVAL;
183 		for (p = 0x1001; p < 0x1100; p += 2)
184 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 				chan->psm   = cpu_to_le16(p);
186 				chan->sport = cpu_to_le16(p);
187 				err = 0;
188 				break;
189 			}
190 	}
191 
192 done:
193 	write_unlock_bh(&chan_list_lock);
194 	return err;
195 }
196 
197 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
198 {
199 	write_lock_bh(&chan_list_lock);
200 
201 	chan->scid = scid;
202 
203 	write_unlock_bh(&chan_list_lock);
204 
205 	return 0;
206 }
207 
208 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
209 {
210 	u16 cid = L2CAP_CID_DYN_START;
211 
212 	for (; cid < L2CAP_CID_DYN_END; cid++) {
213 		if (!__l2cap_get_chan_by_scid(conn, cid))
214 			return cid;
215 	}
216 
217 	return 0;
218 }
219 
220 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
221 {
222 	BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
223 
224 	if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
225 		chan_hold(chan);
226 }
227 
228 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
229 {
230 	BT_DBG("chan %p state %d", chan, chan->state);
231 
232 	if (timer_pending(timer) && del_timer(timer))
233 		chan_put(chan);
234 }
235 
236 static void l2cap_state_change(struct l2cap_chan *chan, int state)
237 {
238 	chan->state = state;
239 	chan->ops->state_change(chan->data, state);
240 }
241 
242 static void l2cap_chan_timeout(unsigned long arg)
243 {
244 	struct l2cap_chan *chan = (struct l2cap_chan *) arg;
245 	struct sock *sk = chan->sk;
246 	int reason;
247 
248 	BT_DBG("chan %p state %d", chan, chan->state);
249 
250 	bh_lock_sock(sk);
251 
252 	if (sock_owned_by_user(sk)) {
253 		/* sk is owned by user. Try again later */
254 		__set_chan_timer(chan, HZ / 5);
255 		bh_unlock_sock(sk);
256 		chan_put(chan);
257 		return;
258 	}
259 
260 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
261 		reason = ECONNREFUSED;
262 	else if (chan->state == BT_CONNECT &&
263 					chan->sec_level != BT_SECURITY_SDP)
264 		reason = ECONNREFUSED;
265 	else
266 		reason = ETIMEDOUT;
267 
268 	l2cap_chan_close(chan, reason);
269 
270 	bh_unlock_sock(sk);
271 
272 	chan->ops->close(chan->data);
273 	chan_put(chan);
274 }
275 
276 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
277 {
278 	struct l2cap_chan *chan;
279 
280 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
281 	if (!chan)
282 		return NULL;
283 
284 	chan->sk = sk;
285 
286 	write_lock_bh(&chan_list_lock);
287 	list_add(&chan->global_l, &chan_list);
288 	write_unlock_bh(&chan_list_lock);
289 
290 	setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
291 
292 	chan->state = BT_OPEN;
293 
294 	atomic_set(&chan->refcnt, 1);
295 
296 	return chan;
297 }
298 
299 void l2cap_chan_destroy(struct l2cap_chan *chan)
300 {
301 	write_lock_bh(&chan_list_lock);
302 	list_del(&chan->global_l);
303 	write_unlock_bh(&chan_list_lock);
304 
305 	chan_put(chan);
306 }
307 
308 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
309 {
310 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
311 			chan->psm, chan->dcid);
312 
313 	conn->disc_reason = 0x13;
314 
315 	chan->conn = conn;
316 
317 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
318 		if (conn->hcon->type == LE_LINK) {
319 			/* LE connection */
320 			chan->omtu = L2CAP_LE_DEFAULT_MTU;
321 			chan->scid = L2CAP_CID_LE_DATA;
322 			chan->dcid = L2CAP_CID_LE_DATA;
323 		} else {
324 			/* Alloc CID for connection-oriented socket */
325 			chan->scid = l2cap_alloc_cid(conn);
326 			chan->omtu = L2CAP_DEFAULT_MTU;
327 		}
328 	} else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
329 		/* Connectionless socket */
330 		chan->scid = L2CAP_CID_CONN_LESS;
331 		chan->dcid = L2CAP_CID_CONN_LESS;
332 		chan->omtu = L2CAP_DEFAULT_MTU;
333 	} else {
334 		/* Raw socket can send/recv signalling messages only */
335 		chan->scid = L2CAP_CID_SIGNALING;
336 		chan->dcid = L2CAP_CID_SIGNALING;
337 		chan->omtu = L2CAP_DEFAULT_MTU;
338 	}
339 
340 	chan_hold(chan);
341 
342 	list_add(&chan->list, &conn->chan_l);
343 }
344 
345 /* Delete channel.
346  * Must be called on the locked socket. */
347 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
348 {
349 	struct sock *sk = chan->sk;
350 	struct l2cap_conn *conn = chan->conn;
351 	struct sock *parent = bt_sk(sk)->parent;
352 
353 	__clear_chan_timer(chan);
354 
355 	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
356 
357 	if (conn) {
358 		/* Delete from channel list */
359 		write_lock_bh(&conn->chan_lock);
360 		list_del(&chan->list);
361 		write_unlock_bh(&conn->chan_lock);
362 		chan_put(chan);
363 
364 		chan->conn = NULL;
365 		hci_conn_put(conn->hcon);
366 	}
367 
368 	l2cap_state_change(chan, BT_CLOSED);
369 	sock_set_flag(sk, SOCK_ZAPPED);
370 
371 	if (err)
372 		sk->sk_err = err;
373 
374 	if (parent) {
375 		bt_accept_unlink(sk);
376 		parent->sk_data_ready(parent, 0);
377 	} else
378 		sk->sk_state_change(sk);
379 
380 	if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
381 			test_bit(CONF_INPUT_DONE, &chan->conf_state)))
382 		return;
383 
384 	skb_queue_purge(&chan->tx_q);
385 
386 	if (chan->mode == L2CAP_MODE_ERTM) {
387 		struct srej_list *l, *tmp;
388 
389 		__clear_retrans_timer(chan);
390 		__clear_monitor_timer(chan);
391 		__clear_ack_timer(chan);
392 
393 		skb_queue_purge(&chan->srej_q);
394 
395 		list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
396 			list_del(&l->list);
397 			kfree(l);
398 		}
399 	}
400 }
401 
402 static void l2cap_chan_cleanup_listen(struct sock *parent)
403 {
404 	struct sock *sk;
405 
406 	BT_DBG("parent %p", parent);
407 
408 	/* Close not yet accepted channels */
409 	while ((sk = bt_accept_dequeue(parent, NULL))) {
410 		struct l2cap_chan *chan = l2cap_pi(sk)->chan;
411 		__clear_chan_timer(chan);
412 		lock_sock(sk);
413 		l2cap_chan_close(chan, ECONNRESET);
414 		release_sock(sk);
415 		chan->ops->close(chan->data);
416 	}
417 }
418 
419 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
420 {
421 	struct l2cap_conn *conn = chan->conn;
422 	struct sock *sk = chan->sk;
423 
424 	BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
425 
426 	switch (chan->state) {
427 	case BT_LISTEN:
428 		l2cap_chan_cleanup_listen(sk);
429 
430 		l2cap_state_change(chan, BT_CLOSED);
431 		sock_set_flag(sk, SOCK_ZAPPED);
432 		break;
433 
434 	case BT_CONNECTED:
435 	case BT_CONFIG:
436 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
437 					conn->hcon->type == ACL_LINK) {
438 			__clear_chan_timer(chan);
439 			__set_chan_timer(chan, sk->sk_sndtimeo);
440 			l2cap_send_disconn_req(conn, chan, reason);
441 		} else
442 			l2cap_chan_del(chan, reason);
443 		break;
444 
445 	case BT_CONNECT2:
446 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 					conn->hcon->type == ACL_LINK) {
448 			struct l2cap_conn_rsp rsp;
449 			__u16 result;
450 
451 			if (bt_sk(sk)->defer_setup)
452 				result = L2CAP_CR_SEC_BLOCK;
453 			else
454 				result = L2CAP_CR_BAD_PSM;
455 			l2cap_state_change(chan, BT_DISCONN);
456 
457 			rsp.scid   = cpu_to_le16(chan->dcid);
458 			rsp.dcid   = cpu_to_le16(chan->scid);
459 			rsp.result = cpu_to_le16(result);
460 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
461 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
462 							sizeof(rsp), &rsp);
463 		}
464 
465 		l2cap_chan_del(chan, reason);
466 		break;
467 
468 	case BT_CONNECT:
469 	case BT_DISCONN:
470 		l2cap_chan_del(chan, reason);
471 		break;
472 
473 	default:
474 		sock_set_flag(sk, SOCK_ZAPPED);
475 		break;
476 	}
477 }
478 
479 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
480 {
481 	if (chan->chan_type == L2CAP_CHAN_RAW) {
482 		switch (chan->sec_level) {
483 		case BT_SECURITY_HIGH:
484 			return HCI_AT_DEDICATED_BONDING_MITM;
485 		case BT_SECURITY_MEDIUM:
486 			return HCI_AT_DEDICATED_BONDING;
487 		default:
488 			return HCI_AT_NO_BONDING;
489 		}
490 	} else if (chan->psm == cpu_to_le16(0x0001)) {
491 		if (chan->sec_level == BT_SECURITY_LOW)
492 			chan->sec_level = BT_SECURITY_SDP;
493 
494 		if (chan->sec_level == BT_SECURITY_HIGH)
495 			return HCI_AT_NO_BONDING_MITM;
496 		else
497 			return HCI_AT_NO_BONDING;
498 	} else {
499 		switch (chan->sec_level) {
500 		case BT_SECURITY_HIGH:
501 			return HCI_AT_GENERAL_BONDING_MITM;
502 		case BT_SECURITY_MEDIUM:
503 			return HCI_AT_GENERAL_BONDING;
504 		default:
505 			return HCI_AT_NO_BONDING;
506 		}
507 	}
508 }
509 
510 /* Service level security */
511 static inline int l2cap_check_security(struct l2cap_chan *chan)
512 {
513 	struct l2cap_conn *conn = chan->conn;
514 	__u8 auth_type;
515 
516 	auth_type = l2cap_get_auth_type(chan);
517 
518 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
519 }
520 
521 static u8 l2cap_get_ident(struct l2cap_conn *conn)
522 {
523 	u8 id;
524 
525 	/* Get next available identificator.
526 	 *    1 - 128 are used by kernel.
527 	 *  129 - 199 are reserved.
528 	 *  200 - 254 are used by utilities like l2ping, etc.
529 	 */
530 
531 	spin_lock_bh(&conn->lock);
532 
533 	if (++conn->tx_ident > 128)
534 		conn->tx_ident = 1;
535 
536 	id = conn->tx_ident;
537 
538 	spin_unlock_bh(&conn->lock);
539 
540 	return id;
541 }
542 
543 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
544 {
545 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
546 	u8 flags;
547 
548 	BT_DBG("code 0x%2.2x", code);
549 
550 	if (!skb)
551 		return;
552 
553 	if (lmp_no_flush_capable(conn->hcon->hdev))
554 		flags = ACL_START_NO_FLUSH;
555 	else
556 		flags = ACL_START;
557 
558 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
559 
560 	hci_send_acl(conn->hcon, skb, flags);
561 }
562 
563 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
564 {
565 	struct sk_buff *skb;
566 	struct l2cap_hdr *lh;
567 	struct l2cap_conn *conn = chan->conn;
568 	int count, hlen = L2CAP_HDR_SIZE + 2;
569 	u8 flags;
570 
571 	if (chan->state != BT_CONNECTED)
572 		return;
573 
574 	if (chan->fcs == L2CAP_FCS_CRC16)
575 		hlen += 2;
576 
577 	BT_DBG("chan %p, control 0x%2.2x", chan, control);
578 
579 	count = min_t(unsigned int, conn->mtu, hlen);
580 	control |= L2CAP_CTRL_FRAME_TYPE;
581 
582 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
583 		control |= L2CAP_CTRL_FINAL;
584 
585 	if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
586 		control |= L2CAP_CTRL_POLL;
587 
588 	skb = bt_skb_alloc(count, GFP_ATOMIC);
589 	if (!skb)
590 		return;
591 
592 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
593 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
594 	lh->cid = cpu_to_le16(chan->dcid);
595 	put_unaligned_le16(control, skb_put(skb, 2));
596 
597 	if (chan->fcs == L2CAP_FCS_CRC16) {
598 		u16 fcs = crc16(0, (u8 *)lh, count - 2);
599 		put_unaligned_le16(fcs, skb_put(skb, 2));
600 	}
601 
602 	if (lmp_no_flush_capable(conn->hcon->hdev))
603 		flags = ACL_START_NO_FLUSH;
604 	else
605 		flags = ACL_START;
606 
607 	bt_cb(skb)->force_active = chan->force_active;
608 
609 	hci_send_acl(chan->conn->hcon, skb, flags);
610 }
611 
612 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
613 {
614 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
615 		control |= L2CAP_SUPER_RCV_NOT_READY;
616 		set_bit(CONN_RNR_SENT, &chan->conn_state);
617 	} else
618 		control |= L2CAP_SUPER_RCV_READY;
619 
620 	control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
621 
622 	l2cap_send_sframe(chan, control);
623 }
624 
625 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
626 {
627 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
628 }
629 
630 static void l2cap_do_start(struct l2cap_chan *chan)
631 {
632 	struct l2cap_conn *conn = chan->conn;
633 
634 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
635 		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
636 			return;
637 
638 		if (l2cap_check_security(chan) &&
639 				__l2cap_no_conn_pending(chan)) {
640 			struct l2cap_conn_req req;
641 			req.scid = cpu_to_le16(chan->scid);
642 			req.psm  = chan->psm;
643 
644 			chan->ident = l2cap_get_ident(conn);
645 			set_bit(CONF_CONNECT_PEND, &chan->conf_state);
646 
647 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
648 							sizeof(req), &req);
649 		}
650 	} else {
651 		struct l2cap_info_req req;
652 		req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
653 
654 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
655 		conn->info_ident = l2cap_get_ident(conn);
656 
657 		mod_timer(&conn->info_timer, jiffies +
658 					msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
659 
660 		l2cap_send_cmd(conn, conn->info_ident,
661 					L2CAP_INFO_REQ, sizeof(req), &req);
662 	}
663 }
664 
665 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
666 {
667 	u32 local_feat_mask = l2cap_feat_mask;
668 	if (!disable_ertm)
669 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
670 
671 	switch (mode) {
672 	case L2CAP_MODE_ERTM:
673 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
674 	case L2CAP_MODE_STREAMING:
675 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
676 	default:
677 		return 0x00;
678 	}
679 }
680 
681 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
682 {
683 	struct sock *sk;
684 	struct l2cap_disconn_req req;
685 
686 	if (!conn)
687 		return;
688 
689 	sk = chan->sk;
690 
691 	if (chan->mode == L2CAP_MODE_ERTM) {
692 		__clear_retrans_timer(chan);
693 		__clear_monitor_timer(chan);
694 		__clear_ack_timer(chan);
695 	}
696 
697 	req.dcid = cpu_to_le16(chan->dcid);
698 	req.scid = cpu_to_le16(chan->scid);
699 	l2cap_send_cmd(conn, l2cap_get_ident(conn),
700 			L2CAP_DISCONN_REQ, sizeof(req), &req);
701 
702 	l2cap_state_change(chan, BT_DISCONN);
703 	sk->sk_err = err;
704 }
705 
706 /* ---- L2CAP connections ---- */
707 static void l2cap_conn_start(struct l2cap_conn *conn)
708 {
709 	struct l2cap_chan *chan, *tmp;
710 
711 	BT_DBG("conn %p", conn);
712 
713 	read_lock(&conn->chan_lock);
714 
715 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
716 		struct sock *sk = chan->sk;
717 
718 		bh_lock_sock(sk);
719 
720 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
721 			bh_unlock_sock(sk);
722 			continue;
723 		}
724 
725 		if (chan->state == BT_CONNECT) {
726 			struct l2cap_conn_req req;
727 
728 			if (!l2cap_check_security(chan) ||
729 					!__l2cap_no_conn_pending(chan)) {
730 				bh_unlock_sock(sk);
731 				continue;
732 			}
733 
734 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
735 					&& test_bit(CONF_STATE2_DEVICE,
736 					&chan->conf_state)) {
737 				/* l2cap_chan_close() calls list_del(chan)
738 				 * so release the lock */
739 				read_unlock(&conn->chan_lock);
740 				l2cap_chan_close(chan, ECONNRESET);
741 				read_lock(&conn->chan_lock);
742 				bh_unlock_sock(sk);
743 				continue;
744 			}
745 
746 			req.scid = cpu_to_le16(chan->scid);
747 			req.psm  = chan->psm;
748 
749 			chan->ident = l2cap_get_ident(conn);
750 			set_bit(CONF_CONNECT_PEND, &chan->conf_state);
751 
752 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
753 							sizeof(req), &req);
754 
755 		} else if (chan->state == BT_CONNECT2) {
756 			struct l2cap_conn_rsp rsp;
757 			char buf[128];
758 			rsp.scid = cpu_to_le16(chan->dcid);
759 			rsp.dcid = cpu_to_le16(chan->scid);
760 
761 			if (l2cap_check_security(chan)) {
762 				if (bt_sk(sk)->defer_setup) {
763 					struct sock *parent = bt_sk(sk)->parent;
764 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
765 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
766 					if (parent)
767 						parent->sk_data_ready(parent, 0);
768 
769 				} else {
770 					l2cap_state_change(chan, BT_CONFIG);
771 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
772 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
773 				}
774 			} else {
775 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
776 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
777 			}
778 
779 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
780 							sizeof(rsp), &rsp);
781 
782 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
783 					rsp.result != L2CAP_CR_SUCCESS) {
784 				bh_unlock_sock(sk);
785 				continue;
786 			}
787 
788 			set_bit(CONF_REQ_SENT, &chan->conf_state);
789 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
790 						l2cap_build_conf_req(chan, buf), buf);
791 			chan->num_conf_req++;
792 		}
793 
794 		bh_unlock_sock(sk);
795 	}
796 
797 	read_unlock(&conn->chan_lock);
798 }
799 
800 /* Find socket with cid and source bdaddr.
801  * Returns closest match, locked.
802  */
803 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
804 {
805 	struct l2cap_chan *c, *c1 = NULL;
806 
807 	read_lock(&chan_list_lock);
808 
809 	list_for_each_entry(c, &chan_list, global_l) {
810 		struct sock *sk = c->sk;
811 
812 		if (state && c->state != state)
813 			continue;
814 
815 		if (c->scid == cid) {
816 			/* Exact match. */
817 			if (!bacmp(&bt_sk(sk)->src, src)) {
818 				read_unlock(&chan_list_lock);
819 				return c;
820 			}
821 
822 			/* Closest match */
823 			if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
824 				c1 = c;
825 		}
826 	}
827 
828 	read_unlock(&chan_list_lock);
829 
830 	return c1;
831 }
832 
833 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
834 {
835 	struct sock *parent, *sk;
836 	struct l2cap_chan *chan, *pchan;
837 
838 	BT_DBG("");
839 
840 	/* Check if we have socket listening on cid */
841 	pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
842 							conn->src);
843 	if (!pchan)
844 		return;
845 
846 	parent = pchan->sk;
847 
848 	bh_lock_sock(parent);
849 
850 	/* Check for backlog size */
851 	if (sk_acceptq_is_full(parent)) {
852 		BT_DBG("backlog full %d", parent->sk_ack_backlog);
853 		goto clean;
854 	}
855 
856 	chan = pchan->ops->new_connection(pchan->data);
857 	if (!chan)
858 		goto clean;
859 
860 	sk = chan->sk;
861 
862 	write_lock_bh(&conn->chan_lock);
863 
864 	hci_conn_hold(conn->hcon);
865 
866 	bacpy(&bt_sk(sk)->src, conn->src);
867 	bacpy(&bt_sk(sk)->dst, conn->dst);
868 
869 	bt_accept_enqueue(parent, sk);
870 
871 	__l2cap_chan_add(conn, chan);
872 
873 	__set_chan_timer(chan, sk->sk_sndtimeo);
874 
875 	l2cap_state_change(chan, BT_CONNECTED);
876 	parent->sk_data_ready(parent, 0);
877 
878 	write_unlock_bh(&conn->chan_lock);
879 
880 clean:
881 	bh_unlock_sock(parent);
882 }
883 
884 static void l2cap_chan_ready(struct sock *sk)
885 {
886 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
887 	struct sock *parent = bt_sk(sk)->parent;
888 
889 	BT_DBG("sk %p, parent %p", sk, parent);
890 
891 	chan->conf_state = 0;
892 	__clear_chan_timer(chan);
893 
894 	l2cap_state_change(chan, BT_CONNECTED);
895 	sk->sk_state_change(sk);
896 
897 	if (parent)
898 		parent->sk_data_ready(parent, 0);
899 }
900 
901 static void l2cap_conn_ready(struct l2cap_conn *conn)
902 {
903 	struct l2cap_chan *chan;
904 
905 	BT_DBG("conn %p", conn);
906 
907 	if (!conn->hcon->out && conn->hcon->type == LE_LINK)
908 		l2cap_le_conn_ready(conn);
909 
910 	read_lock(&conn->chan_lock);
911 
912 	list_for_each_entry(chan, &conn->chan_l, list) {
913 		struct sock *sk = chan->sk;
914 
915 		bh_lock_sock(sk);
916 
917 		if (conn->hcon->type == LE_LINK) {
918 			if (smp_conn_security(conn, chan->sec_level))
919 				l2cap_chan_ready(sk);
920 
921 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
922 			__clear_chan_timer(chan);
923 			l2cap_state_change(chan, BT_CONNECTED);
924 			sk->sk_state_change(sk);
925 
926 		} else if (chan->state == BT_CONNECT)
927 			l2cap_do_start(chan);
928 
929 		bh_unlock_sock(sk);
930 	}
931 
932 	read_unlock(&conn->chan_lock);
933 }
934 
935 /* Notify sockets that we cannot guaranty reliability anymore */
936 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
937 {
938 	struct l2cap_chan *chan;
939 
940 	BT_DBG("conn %p", conn);
941 
942 	read_lock(&conn->chan_lock);
943 
944 	list_for_each_entry(chan, &conn->chan_l, list) {
945 		struct sock *sk = chan->sk;
946 
947 		if (chan->force_reliable)
948 			sk->sk_err = err;
949 	}
950 
951 	read_unlock(&conn->chan_lock);
952 }
953 
954 static void l2cap_info_timeout(unsigned long arg)
955 {
956 	struct l2cap_conn *conn = (void *) arg;
957 
958 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
959 	conn->info_ident = 0;
960 
961 	l2cap_conn_start(conn);
962 }
963 
964 static void l2cap_conn_del(struct hci_conn *hcon, int err)
965 {
966 	struct l2cap_conn *conn = hcon->l2cap_data;
967 	struct l2cap_chan *chan, *l;
968 	struct sock *sk;
969 
970 	if (!conn)
971 		return;
972 
973 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
974 
975 	kfree_skb(conn->rx_skb);
976 
977 	/* Kill channels */
978 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
979 		sk = chan->sk;
980 		bh_lock_sock(sk);
981 		l2cap_chan_del(chan, err);
982 		bh_unlock_sock(sk);
983 		chan->ops->close(chan->data);
984 	}
985 
986 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
987 		del_timer_sync(&conn->info_timer);
988 
989 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
990 		del_timer(&conn->security_timer);
991 
992 	hcon->l2cap_data = NULL;
993 	kfree(conn);
994 }
995 
996 static void security_timeout(unsigned long arg)
997 {
998 	struct l2cap_conn *conn = (void *) arg;
999 
1000 	l2cap_conn_del(conn->hcon, ETIMEDOUT);
1001 }
1002 
1003 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1004 {
1005 	struct l2cap_conn *conn = hcon->l2cap_data;
1006 
1007 	if (conn || status)
1008 		return conn;
1009 
1010 	conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1011 	if (!conn)
1012 		return NULL;
1013 
1014 	hcon->l2cap_data = conn;
1015 	conn->hcon = hcon;
1016 
1017 	BT_DBG("hcon %p conn %p", hcon, conn);
1018 
1019 	if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1020 		conn->mtu = hcon->hdev->le_mtu;
1021 	else
1022 		conn->mtu = hcon->hdev->acl_mtu;
1023 
1024 	conn->src = &hcon->hdev->bdaddr;
1025 	conn->dst = &hcon->dst;
1026 
1027 	conn->feat_mask = 0;
1028 
1029 	spin_lock_init(&conn->lock);
1030 	rwlock_init(&conn->chan_lock);
1031 
1032 	INIT_LIST_HEAD(&conn->chan_l);
1033 
1034 	if (hcon->type == LE_LINK)
1035 		setup_timer(&conn->security_timer, security_timeout,
1036 						(unsigned long) conn);
1037 	else
1038 		setup_timer(&conn->info_timer, l2cap_info_timeout,
1039 						(unsigned long) conn);
1040 
1041 	conn->disc_reason = 0x13;
1042 
1043 	return conn;
1044 }
1045 
1046 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1047 {
1048 	write_lock_bh(&conn->chan_lock);
1049 	__l2cap_chan_add(conn, chan);
1050 	write_unlock_bh(&conn->chan_lock);
1051 }
1052 
1053 /* ---- Socket interface ---- */
1054 
1055 /* Find socket with psm and source bdaddr.
1056  * Returns closest match.
1057  */
1058 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1059 {
1060 	struct l2cap_chan *c, *c1 = NULL;
1061 
1062 	read_lock(&chan_list_lock);
1063 
1064 	list_for_each_entry(c, &chan_list, global_l) {
1065 		struct sock *sk = c->sk;
1066 
1067 		if (state && c->state != state)
1068 			continue;
1069 
1070 		if (c->psm == psm) {
1071 			/* Exact match. */
1072 			if (!bacmp(&bt_sk(sk)->src, src)) {
1073 				read_unlock(&chan_list_lock);
1074 				return c;
1075 			}
1076 
1077 			/* Closest match */
1078 			if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1079 				c1 = c;
1080 		}
1081 	}
1082 
1083 	read_unlock(&chan_list_lock);
1084 
1085 	return c1;
1086 }
1087 
1088 int l2cap_chan_connect(struct l2cap_chan *chan)
1089 {
1090 	struct sock *sk = chan->sk;
1091 	bdaddr_t *src = &bt_sk(sk)->src;
1092 	bdaddr_t *dst = &bt_sk(sk)->dst;
1093 	struct l2cap_conn *conn;
1094 	struct hci_conn *hcon;
1095 	struct hci_dev *hdev;
1096 	__u8 auth_type;
1097 	int err;
1098 
1099 	BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1100 							chan->psm);
1101 
1102 	hdev = hci_get_route(dst, src);
1103 	if (!hdev)
1104 		return -EHOSTUNREACH;
1105 
1106 	hci_dev_lock_bh(hdev);
1107 
1108 	auth_type = l2cap_get_auth_type(chan);
1109 
1110 	if (chan->dcid == L2CAP_CID_LE_DATA)
1111 		hcon = hci_connect(hdev, LE_LINK, dst,
1112 					chan->sec_level, auth_type);
1113 	else
1114 		hcon = hci_connect(hdev, ACL_LINK, dst,
1115 					chan->sec_level, auth_type);
1116 
1117 	if (IS_ERR(hcon)) {
1118 		err = PTR_ERR(hcon);
1119 		goto done;
1120 	}
1121 
1122 	conn = l2cap_conn_add(hcon, 0);
1123 	if (!conn) {
1124 		hci_conn_put(hcon);
1125 		err = -ENOMEM;
1126 		goto done;
1127 	}
1128 
1129 	/* Update source addr of the socket */
1130 	bacpy(src, conn->src);
1131 
1132 	l2cap_chan_add(conn, chan);
1133 
1134 	l2cap_state_change(chan, BT_CONNECT);
1135 	__set_chan_timer(chan, sk->sk_sndtimeo);
1136 
1137 	if (hcon->state == BT_CONNECTED) {
1138 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1139 			__clear_chan_timer(chan);
1140 			if (l2cap_check_security(chan))
1141 				l2cap_state_change(chan, BT_CONNECTED);
1142 		} else
1143 			l2cap_do_start(chan);
1144 	}
1145 
1146 	err = 0;
1147 
1148 done:
1149 	hci_dev_unlock_bh(hdev);
1150 	hci_dev_put(hdev);
1151 	return err;
1152 }
1153 
1154 int __l2cap_wait_ack(struct sock *sk)
1155 {
1156 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1157 	DECLARE_WAITQUEUE(wait, current);
1158 	int err = 0;
1159 	int timeo = HZ/5;
1160 
1161 	add_wait_queue(sk_sleep(sk), &wait);
1162 	set_current_state(TASK_INTERRUPTIBLE);
1163 	while (chan->unacked_frames > 0 && chan->conn) {
1164 		if (!timeo)
1165 			timeo = HZ/5;
1166 
1167 		if (signal_pending(current)) {
1168 			err = sock_intr_errno(timeo);
1169 			break;
1170 		}
1171 
1172 		release_sock(sk);
1173 		timeo = schedule_timeout(timeo);
1174 		lock_sock(sk);
1175 		set_current_state(TASK_INTERRUPTIBLE);
1176 
1177 		err = sock_error(sk);
1178 		if (err)
1179 			break;
1180 	}
1181 	set_current_state(TASK_RUNNING);
1182 	remove_wait_queue(sk_sleep(sk), &wait);
1183 	return err;
1184 }
1185 
1186 static void l2cap_monitor_timeout(unsigned long arg)
1187 {
1188 	struct l2cap_chan *chan = (void *) arg;
1189 	struct sock *sk = chan->sk;
1190 
1191 	BT_DBG("chan %p", chan);
1192 
1193 	bh_lock_sock(sk);
1194 	if (chan->retry_count >= chan->remote_max_tx) {
1195 		l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1196 		bh_unlock_sock(sk);
1197 		return;
1198 	}
1199 
1200 	chan->retry_count++;
1201 	__set_monitor_timer(chan);
1202 
1203 	l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1204 	bh_unlock_sock(sk);
1205 }
1206 
1207 static void l2cap_retrans_timeout(unsigned long arg)
1208 {
1209 	struct l2cap_chan *chan = (void *) arg;
1210 	struct sock *sk = chan->sk;
1211 
1212 	BT_DBG("chan %p", chan);
1213 
1214 	bh_lock_sock(sk);
1215 	chan->retry_count = 1;
1216 	__set_monitor_timer(chan);
1217 
1218 	set_bit(CONN_WAIT_F, &chan->conn_state);
1219 
1220 	l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1221 	bh_unlock_sock(sk);
1222 }
1223 
1224 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1225 {
1226 	struct sk_buff *skb;
1227 
1228 	while ((skb = skb_peek(&chan->tx_q)) &&
1229 			chan->unacked_frames) {
1230 		if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1231 			break;
1232 
1233 		skb = skb_dequeue(&chan->tx_q);
1234 		kfree_skb(skb);
1235 
1236 		chan->unacked_frames--;
1237 	}
1238 
1239 	if (!chan->unacked_frames)
1240 		__clear_retrans_timer(chan);
1241 }
1242 
1243 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1244 {
1245 	struct hci_conn *hcon = chan->conn->hcon;
1246 	u16 flags;
1247 
1248 	BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1249 
1250 	if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1251 		flags = ACL_START_NO_FLUSH;
1252 	else
1253 		flags = ACL_START;
1254 
1255 	bt_cb(skb)->force_active = chan->force_active;
1256 	hci_send_acl(hcon, skb, flags);
1257 }
1258 
1259 void l2cap_streaming_send(struct l2cap_chan *chan)
1260 {
1261 	struct sk_buff *skb;
1262 	u16 control, fcs;
1263 
1264 	while ((skb = skb_dequeue(&chan->tx_q))) {
1265 		control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1266 		control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1267 		put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1268 
1269 		if (chan->fcs == L2CAP_FCS_CRC16) {
1270 			fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1271 			put_unaligned_le16(fcs, skb->data + skb->len - 2);
1272 		}
1273 
1274 		l2cap_do_send(chan, skb);
1275 
1276 		chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1277 	}
1278 }
1279 
1280 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1281 {
1282 	struct sk_buff *skb, *tx_skb;
1283 	u16 control, fcs;
1284 
1285 	skb = skb_peek(&chan->tx_q);
1286 	if (!skb)
1287 		return;
1288 
1289 	do {
1290 		if (bt_cb(skb)->tx_seq == tx_seq)
1291 			break;
1292 
1293 		if (skb_queue_is_last(&chan->tx_q, skb))
1294 			return;
1295 
1296 	} while ((skb = skb_queue_next(&chan->tx_q, skb)));
1297 
1298 	if (chan->remote_max_tx &&
1299 			bt_cb(skb)->retries == chan->remote_max_tx) {
1300 		l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1301 		return;
1302 	}
1303 
1304 	tx_skb = skb_clone(skb, GFP_ATOMIC);
1305 	bt_cb(skb)->retries++;
1306 	control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1307 	control &= L2CAP_CTRL_SAR;
1308 
1309 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1310 		control |= L2CAP_CTRL_FINAL;
1311 
1312 	control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1313 			| (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1314 
1315 	put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1316 
1317 	if (chan->fcs == L2CAP_FCS_CRC16) {
1318 		fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1319 		put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1320 	}
1321 
1322 	l2cap_do_send(chan, tx_skb);
1323 }
1324 
1325 int l2cap_ertm_send(struct l2cap_chan *chan)
1326 {
1327 	struct sk_buff *skb, *tx_skb;
1328 	u16 control, fcs;
1329 	int nsent = 0;
1330 
1331 	if (chan->state != BT_CONNECTED)
1332 		return -ENOTCONN;
1333 
1334 	while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1335 
1336 		if (chan->remote_max_tx &&
1337 				bt_cb(skb)->retries == chan->remote_max_tx) {
1338 			l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1339 			break;
1340 		}
1341 
1342 		tx_skb = skb_clone(skb, GFP_ATOMIC);
1343 
1344 		bt_cb(skb)->retries++;
1345 
1346 		control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1347 		control &= L2CAP_CTRL_SAR;
1348 
1349 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1350 			control |= L2CAP_CTRL_FINAL;
1351 
1352 		control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1353 				| (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1354 		put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1355 
1356 
1357 		if (chan->fcs == L2CAP_FCS_CRC16) {
1358 			fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1359 			put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1360 		}
1361 
1362 		l2cap_do_send(chan, tx_skb);
1363 
1364 		__set_retrans_timer(chan);
1365 
1366 		bt_cb(skb)->tx_seq = chan->next_tx_seq;
1367 		chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1368 
1369 		if (bt_cb(skb)->retries == 1)
1370 			chan->unacked_frames++;
1371 
1372 		chan->frames_sent++;
1373 
1374 		if (skb_queue_is_last(&chan->tx_q, skb))
1375 			chan->tx_send_head = NULL;
1376 		else
1377 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1378 
1379 		nsent++;
1380 	}
1381 
1382 	return nsent;
1383 }
1384 
1385 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1386 {
1387 	int ret;
1388 
1389 	if (!skb_queue_empty(&chan->tx_q))
1390 		chan->tx_send_head = chan->tx_q.next;
1391 
1392 	chan->next_tx_seq = chan->expected_ack_seq;
1393 	ret = l2cap_ertm_send(chan);
1394 	return ret;
1395 }
1396 
1397 static void l2cap_send_ack(struct l2cap_chan *chan)
1398 {
1399 	u16 control = 0;
1400 
1401 	control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1402 
1403 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1404 		control |= L2CAP_SUPER_RCV_NOT_READY;
1405 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1406 		l2cap_send_sframe(chan, control);
1407 		return;
1408 	}
1409 
1410 	if (l2cap_ertm_send(chan) > 0)
1411 		return;
1412 
1413 	control |= L2CAP_SUPER_RCV_READY;
1414 	l2cap_send_sframe(chan, control);
1415 }
1416 
1417 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1418 {
1419 	struct srej_list *tail;
1420 	u16 control;
1421 
1422 	control = L2CAP_SUPER_SELECT_REJECT;
1423 	control |= L2CAP_CTRL_FINAL;
1424 
1425 	tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1426 	control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1427 
1428 	l2cap_send_sframe(chan, control);
1429 }
1430 
1431 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1432 {
1433 	struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1434 	struct sk_buff **frag;
1435 	int err, sent = 0;
1436 
1437 	if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1438 		return -EFAULT;
1439 
1440 	sent += count;
1441 	len  -= count;
1442 
1443 	/* Continuation fragments (no L2CAP header) */
1444 	frag = &skb_shinfo(skb)->frag_list;
1445 	while (len) {
1446 		count = min_t(unsigned int, conn->mtu, len);
1447 
1448 		*frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1449 		if (!*frag)
1450 			return err;
1451 		if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1452 			return -EFAULT;
1453 
1454 		sent += count;
1455 		len  -= count;
1456 
1457 		frag = &(*frag)->next;
1458 	}
1459 
1460 	return sent;
1461 }
1462 
1463 struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1464 {
1465 	struct sock *sk = chan->sk;
1466 	struct l2cap_conn *conn = chan->conn;
1467 	struct sk_buff *skb;
1468 	int err, count, hlen = L2CAP_HDR_SIZE + 2;
1469 	struct l2cap_hdr *lh;
1470 
1471 	BT_DBG("sk %p len %d", sk, (int)len);
1472 
1473 	count = min_t(unsigned int, (conn->mtu - hlen), len);
1474 	skb = bt_skb_send_alloc(sk, count + hlen,
1475 			msg->msg_flags & MSG_DONTWAIT, &err);
1476 	if (!skb)
1477 		return ERR_PTR(err);
1478 
1479 	/* Create L2CAP header */
1480 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1481 	lh->cid = cpu_to_le16(chan->dcid);
1482 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1483 	put_unaligned_le16(chan->psm, skb_put(skb, 2));
1484 
1485 	err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1486 	if (unlikely(err < 0)) {
1487 		kfree_skb(skb);
1488 		return ERR_PTR(err);
1489 	}
1490 	return skb;
1491 }
1492 
1493 struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1494 {
1495 	struct sock *sk = chan->sk;
1496 	struct l2cap_conn *conn = chan->conn;
1497 	struct sk_buff *skb;
1498 	int err, count, hlen = L2CAP_HDR_SIZE;
1499 	struct l2cap_hdr *lh;
1500 
1501 	BT_DBG("sk %p len %d", sk, (int)len);
1502 
1503 	count = min_t(unsigned int, (conn->mtu - hlen), len);
1504 	skb = bt_skb_send_alloc(sk, count + hlen,
1505 			msg->msg_flags & MSG_DONTWAIT, &err);
1506 	if (!skb)
1507 		return ERR_PTR(err);
1508 
1509 	/* Create L2CAP header */
1510 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1511 	lh->cid = cpu_to_le16(chan->dcid);
1512 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1513 
1514 	err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1515 	if (unlikely(err < 0)) {
1516 		kfree_skb(skb);
1517 		return ERR_PTR(err);
1518 	}
1519 	return skb;
1520 }
1521 
1522 struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1523 {
1524 	struct sock *sk = chan->sk;
1525 	struct l2cap_conn *conn = chan->conn;
1526 	struct sk_buff *skb;
1527 	int err, count, hlen = L2CAP_HDR_SIZE + 2;
1528 	struct l2cap_hdr *lh;
1529 
1530 	BT_DBG("sk %p len %d", sk, (int)len);
1531 
1532 	if (!conn)
1533 		return ERR_PTR(-ENOTCONN);
1534 
1535 	if (sdulen)
1536 		hlen += 2;
1537 
1538 	if (chan->fcs == L2CAP_FCS_CRC16)
1539 		hlen += 2;
1540 
1541 	count = min_t(unsigned int, (conn->mtu - hlen), len);
1542 	skb = bt_skb_send_alloc(sk, count + hlen,
1543 			msg->msg_flags & MSG_DONTWAIT, &err);
1544 	if (!skb)
1545 		return ERR_PTR(err);
1546 
1547 	/* Create L2CAP header */
1548 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1549 	lh->cid = cpu_to_le16(chan->dcid);
1550 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1551 	put_unaligned_le16(control, skb_put(skb, 2));
1552 	if (sdulen)
1553 		put_unaligned_le16(sdulen, skb_put(skb, 2));
1554 
1555 	err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1556 	if (unlikely(err < 0)) {
1557 		kfree_skb(skb);
1558 		return ERR_PTR(err);
1559 	}
1560 
1561 	if (chan->fcs == L2CAP_FCS_CRC16)
1562 		put_unaligned_le16(0, skb_put(skb, 2));
1563 
1564 	bt_cb(skb)->retries = 0;
1565 	return skb;
1566 }
1567 
1568 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1569 {
1570 	struct sk_buff *skb;
1571 	struct sk_buff_head sar_queue;
1572 	u16 control;
1573 	size_t size = 0;
1574 
1575 	skb_queue_head_init(&sar_queue);
1576 	control = L2CAP_SDU_START;
1577 	skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1578 	if (IS_ERR(skb))
1579 		return PTR_ERR(skb);
1580 
1581 	__skb_queue_tail(&sar_queue, skb);
1582 	len -= chan->remote_mps;
1583 	size += chan->remote_mps;
1584 
1585 	while (len > 0) {
1586 		size_t buflen;
1587 
1588 		if (len > chan->remote_mps) {
1589 			control = L2CAP_SDU_CONTINUE;
1590 			buflen = chan->remote_mps;
1591 		} else {
1592 			control = L2CAP_SDU_END;
1593 			buflen = len;
1594 		}
1595 
1596 		skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1597 		if (IS_ERR(skb)) {
1598 			skb_queue_purge(&sar_queue);
1599 			return PTR_ERR(skb);
1600 		}
1601 
1602 		__skb_queue_tail(&sar_queue, skb);
1603 		len -= buflen;
1604 		size += buflen;
1605 	}
1606 	skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1607 	if (chan->tx_send_head == NULL)
1608 		chan->tx_send_head = sar_queue.next;
1609 
1610 	return size;
1611 }
1612 
1613 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1614 {
1615 	struct sk_buff *skb;
1616 	u16 control;
1617 	int err;
1618 
1619 	/* Connectionless channel */
1620 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1621 		skb = l2cap_create_connless_pdu(chan, msg, len);
1622 		if (IS_ERR(skb))
1623 			return PTR_ERR(skb);
1624 
1625 		l2cap_do_send(chan, skb);
1626 		return len;
1627 	}
1628 
1629 	switch (chan->mode) {
1630 	case L2CAP_MODE_BASIC:
1631 		/* Check outgoing MTU */
1632 		if (len > chan->omtu)
1633 			return -EMSGSIZE;
1634 
1635 		/* Create a basic PDU */
1636 		skb = l2cap_create_basic_pdu(chan, msg, len);
1637 		if (IS_ERR(skb))
1638 			return PTR_ERR(skb);
1639 
1640 		l2cap_do_send(chan, skb);
1641 		err = len;
1642 		break;
1643 
1644 	case L2CAP_MODE_ERTM:
1645 	case L2CAP_MODE_STREAMING:
1646 		/* Entire SDU fits into one PDU */
1647 		if (len <= chan->remote_mps) {
1648 			control = L2CAP_SDU_UNSEGMENTED;
1649 			skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1650 									0);
1651 			if (IS_ERR(skb))
1652 				return PTR_ERR(skb);
1653 
1654 			__skb_queue_tail(&chan->tx_q, skb);
1655 
1656 			if (chan->tx_send_head == NULL)
1657 				chan->tx_send_head = skb;
1658 
1659 		} else {
1660 			/* Segment SDU into multiples PDUs */
1661 			err = l2cap_sar_segment_sdu(chan, msg, len);
1662 			if (err < 0)
1663 				return err;
1664 		}
1665 
1666 		if (chan->mode == L2CAP_MODE_STREAMING) {
1667 			l2cap_streaming_send(chan);
1668 			err = len;
1669 			break;
1670 		}
1671 
1672 		if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1673 				test_bit(CONN_WAIT_F, &chan->conn_state)) {
1674 			err = len;
1675 			break;
1676 		}
1677 
1678 		err = l2cap_ertm_send(chan);
1679 		if (err >= 0)
1680 			err = len;
1681 
1682 		break;
1683 
1684 	default:
1685 		BT_DBG("bad state %1.1x", chan->mode);
1686 		err = -EBADFD;
1687 	}
1688 
1689 	return err;
1690 }
1691 
1692 /* Copy frame to all raw sockets on that connection */
1693 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1694 {
1695 	struct sk_buff *nskb;
1696 	struct l2cap_chan *chan;
1697 
1698 	BT_DBG("conn %p", conn);
1699 
1700 	read_lock(&conn->chan_lock);
1701 	list_for_each_entry(chan, &conn->chan_l, list) {
1702 		struct sock *sk = chan->sk;
1703 		if (chan->chan_type != L2CAP_CHAN_RAW)
1704 			continue;
1705 
1706 		/* Don't send frame to the socket it came from */
1707 		if (skb->sk == sk)
1708 			continue;
1709 		nskb = skb_clone(skb, GFP_ATOMIC);
1710 		if (!nskb)
1711 			continue;
1712 
1713 		if (chan->ops->recv(chan->data, nskb))
1714 			kfree_skb(nskb);
1715 	}
1716 	read_unlock(&conn->chan_lock);
1717 }
1718 
1719 /* ---- L2CAP signalling commands ---- */
1720 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1721 				u8 code, u8 ident, u16 dlen, void *data)
1722 {
1723 	struct sk_buff *skb, **frag;
1724 	struct l2cap_cmd_hdr *cmd;
1725 	struct l2cap_hdr *lh;
1726 	int len, count;
1727 
1728 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1729 			conn, code, ident, dlen);
1730 
1731 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1732 	count = min_t(unsigned int, conn->mtu, len);
1733 
1734 	skb = bt_skb_alloc(count, GFP_ATOMIC);
1735 	if (!skb)
1736 		return NULL;
1737 
1738 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1739 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1740 
1741 	if (conn->hcon->type == LE_LINK)
1742 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1743 	else
1744 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1745 
1746 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1747 	cmd->code  = code;
1748 	cmd->ident = ident;
1749 	cmd->len   = cpu_to_le16(dlen);
1750 
1751 	if (dlen) {
1752 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1753 		memcpy(skb_put(skb, count), data, count);
1754 		data += count;
1755 	}
1756 
1757 	len -= skb->len;
1758 
1759 	/* Continuation fragments (no L2CAP header) */
1760 	frag = &skb_shinfo(skb)->frag_list;
1761 	while (len) {
1762 		count = min_t(unsigned int, conn->mtu, len);
1763 
1764 		*frag = bt_skb_alloc(count, GFP_ATOMIC);
1765 		if (!*frag)
1766 			goto fail;
1767 
1768 		memcpy(skb_put(*frag, count), data, count);
1769 
1770 		len  -= count;
1771 		data += count;
1772 
1773 		frag = &(*frag)->next;
1774 	}
1775 
1776 	return skb;
1777 
1778 fail:
1779 	kfree_skb(skb);
1780 	return NULL;
1781 }
1782 
1783 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1784 {
1785 	struct l2cap_conf_opt *opt = *ptr;
1786 	int len;
1787 
1788 	len = L2CAP_CONF_OPT_SIZE + opt->len;
1789 	*ptr += len;
1790 
1791 	*type = opt->type;
1792 	*olen = opt->len;
1793 
1794 	switch (opt->len) {
1795 	case 1:
1796 		*val = *((u8 *) opt->val);
1797 		break;
1798 
1799 	case 2:
1800 		*val = get_unaligned_le16(opt->val);
1801 		break;
1802 
1803 	case 4:
1804 		*val = get_unaligned_le32(opt->val);
1805 		break;
1806 
1807 	default:
1808 		*val = (unsigned long) opt->val;
1809 		break;
1810 	}
1811 
1812 	BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1813 	return len;
1814 }
1815 
1816 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1817 {
1818 	struct l2cap_conf_opt *opt = *ptr;
1819 
1820 	BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1821 
1822 	opt->type = type;
1823 	opt->len  = len;
1824 
1825 	switch (len) {
1826 	case 1:
1827 		*((u8 *) opt->val)  = val;
1828 		break;
1829 
1830 	case 2:
1831 		put_unaligned_le16(val, opt->val);
1832 		break;
1833 
1834 	case 4:
1835 		put_unaligned_le32(val, opt->val);
1836 		break;
1837 
1838 	default:
1839 		memcpy(opt->val, (void *) val, len);
1840 		break;
1841 	}
1842 
1843 	*ptr += L2CAP_CONF_OPT_SIZE + len;
1844 }
1845 
1846 static void l2cap_ack_timeout(unsigned long arg)
1847 {
1848 	struct l2cap_chan *chan = (void *) arg;
1849 
1850 	bh_lock_sock(chan->sk);
1851 	l2cap_send_ack(chan);
1852 	bh_unlock_sock(chan->sk);
1853 }
1854 
1855 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1856 {
1857 	struct sock *sk = chan->sk;
1858 
1859 	chan->expected_ack_seq = 0;
1860 	chan->unacked_frames = 0;
1861 	chan->buffer_seq = 0;
1862 	chan->num_acked = 0;
1863 	chan->frames_sent = 0;
1864 
1865 	setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1866 							(unsigned long) chan);
1867 	setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1868 							(unsigned long) chan);
1869 	setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1870 
1871 	skb_queue_head_init(&chan->srej_q);
1872 
1873 	INIT_LIST_HEAD(&chan->srej_l);
1874 
1875 
1876 	sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1877 }
1878 
1879 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1880 {
1881 	switch (mode) {
1882 	case L2CAP_MODE_STREAMING:
1883 	case L2CAP_MODE_ERTM:
1884 		if (l2cap_mode_supported(mode, remote_feat_mask))
1885 			return mode;
1886 		/* fall through */
1887 	default:
1888 		return L2CAP_MODE_BASIC;
1889 	}
1890 }
1891 
1892 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1893 {
1894 	struct l2cap_conf_req *req = data;
1895 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1896 	void *ptr = req->data;
1897 
1898 	BT_DBG("chan %p", chan);
1899 
1900 	if (chan->num_conf_req || chan->num_conf_rsp)
1901 		goto done;
1902 
1903 	switch (chan->mode) {
1904 	case L2CAP_MODE_STREAMING:
1905 	case L2CAP_MODE_ERTM:
1906 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1907 			break;
1908 
1909 		/* fall through */
1910 	default:
1911 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1912 		break;
1913 	}
1914 
1915 done:
1916 	if (chan->imtu != L2CAP_DEFAULT_MTU)
1917 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1918 
1919 	switch (chan->mode) {
1920 	case L2CAP_MODE_BASIC:
1921 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1922 				!(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1923 			break;
1924 
1925 		rfc.mode            = L2CAP_MODE_BASIC;
1926 		rfc.txwin_size      = 0;
1927 		rfc.max_transmit    = 0;
1928 		rfc.retrans_timeout = 0;
1929 		rfc.monitor_timeout = 0;
1930 		rfc.max_pdu_size    = 0;
1931 
1932 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1933 							(unsigned long) &rfc);
1934 		break;
1935 
1936 	case L2CAP_MODE_ERTM:
1937 		rfc.mode            = L2CAP_MODE_ERTM;
1938 		rfc.txwin_size      = chan->tx_win;
1939 		rfc.max_transmit    = chan->max_tx;
1940 		rfc.retrans_timeout = 0;
1941 		rfc.monitor_timeout = 0;
1942 		rfc.max_pdu_size    = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1943 		if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1944 			rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1945 
1946 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1947 							(unsigned long) &rfc);
1948 
1949 		if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1950 			break;
1951 
1952 		if (chan->fcs == L2CAP_FCS_NONE ||
1953 				test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1954 			chan->fcs = L2CAP_FCS_NONE;
1955 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1956 		}
1957 		break;
1958 
1959 	case L2CAP_MODE_STREAMING:
1960 		rfc.mode            = L2CAP_MODE_STREAMING;
1961 		rfc.txwin_size      = 0;
1962 		rfc.max_transmit    = 0;
1963 		rfc.retrans_timeout = 0;
1964 		rfc.monitor_timeout = 0;
1965 		rfc.max_pdu_size    = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1966 		if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1967 			rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1968 
1969 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1970 							(unsigned long) &rfc);
1971 
1972 		if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1973 			break;
1974 
1975 		if (chan->fcs == L2CAP_FCS_NONE ||
1976 				test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1977 			chan->fcs = L2CAP_FCS_NONE;
1978 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1979 		}
1980 		break;
1981 	}
1982 
1983 	req->dcid  = cpu_to_le16(chan->dcid);
1984 	req->flags = cpu_to_le16(0);
1985 
1986 	return ptr - data;
1987 }
1988 
1989 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1990 {
1991 	struct l2cap_conf_rsp *rsp = data;
1992 	void *ptr = rsp->data;
1993 	void *req = chan->conf_req;
1994 	int len = chan->conf_len;
1995 	int type, hint, olen;
1996 	unsigned long val;
1997 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1998 	u16 mtu = L2CAP_DEFAULT_MTU;
1999 	u16 result = L2CAP_CONF_SUCCESS;
2000 
2001 	BT_DBG("chan %p", chan);
2002 
2003 	while (len >= L2CAP_CONF_OPT_SIZE) {
2004 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2005 
2006 		hint  = type & L2CAP_CONF_HINT;
2007 		type &= L2CAP_CONF_MASK;
2008 
2009 		switch (type) {
2010 		case L2CAP_CONF_MTU:
2011 			mtu = val;
2012 			break;
2013 
2014 		case L2CAP_CONF_FLUSH_TO:
2015 			chan->flush_to = val;
2016 			break;
2017 
2018 		case L2CAP_CONF_QOS:
2019 			break;
2020 
2021 		case L2CAP_CONF_RFC:
2022 			if (olen == sizeof(rfc))
2023 				memcpy(&rfc, (void *) val, olen);
2024 			break;
2025 
2026 		case L2CAP_CONF_FCS:
2027 			if (val == L2CAP_FCS_NONE)
2028 				set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2029 
2030 			break;
2031 
2032 		default:
2033 			if (hint)
2034 				break;
2035 
2036 			result = L2CAP_CONF_UNKNOWN;
2037 			*((u8 *) ptr++) = type;
2038 			break;
2039 		}
2040 	}
2041 
2042 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
2043 		goto done;
2044 
2045 	switch (chan->mode) {
2046 	case L2CAP_MODE_STREAMING:
2047 	case L2CAP_MODE_ERTM:
2048 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2049 			chan->mode = l2cap_select_mode(rfc.mode,
2050 					chan->conn->feat_mask);
2051 			break;
2052 		}
2053 
2054 		if (chan->mode != rfc.mode)
2055 			return -ECONNREFUSED;
2056 
2057 		break;
2058 	}
2059 
2060 done:
2061 	if (chan->mode != rfc.mode) {
2062 		result = L2CAP_CONF_UNACCEPT;
2063 		rfc.mode = chan->mode;
2064 
2065 		if (chan->num_conf_rsp == 1)
2066 			return -ECONNREFUSED;
2067 
2068 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2069 					sizeof(rfc), (unsigned long) &rfc);
2070 	}
2071 
2072 
2073 	if (result == L2CAP_CONF_SUCCESS) {
2074 		/* Configure output options and let the other side know
2075 		 * which ones we don't like. */
2076 
2077 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
2078 			result = L2CAP_CONF_UNACCEPT;
2079 		else {
2080 			chan->omtu = mtu;
2081 			set_bit(CONF_MTU_DONE, &chan->conf_state);
2082 		}
2083 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2084 
2085 		switch (rfc.mode) {
2086 		case L2CAP_MODE_BASIC:
2087 			chan->fcs = L2CAP_FCS_NONE;
2088 			set_bit(CONF_MODE_DONE, &chan->conf_state);
2089 			break;
2090 
2091 		case L2CAP_MODE_ERTM:
2092 			chan->remote_tx_win = rfc.txwin_size;
2093 			chan->remote_max_tx = rfc.max_transmit;
2094 
2095 			if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2096 				rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2097 
2098 			chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2099 
2100 			rfc.retrans_timeout =
2101 				le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2102 			rfc.monitor_timeout =
2103 				le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2104 
2105 			set_bit(CONF_MODE_DONE, &chan->conf_state);
2106 
2107 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2108 					sizeof(rfc), (unsigned long) &rfc);
2109 
2110 			break;
2111 
2112 		case L2CAP_MODE_STREAMING:
2113 			if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2114 				rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2115 
2116 			chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2117 
2118 			set_bit(CONF_MODE_DONE, &chan->conf_state);
2119 
2120 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2121 					sizeof(rfc), (unsigned long) &rfc);
2122 
2123 			break;
2124 
2125 		default:
2126 			result = L2CAP_CONF_UNACCEPT;
2127 
2128 			memset(&rfc, 0, sizeof(rfc));
2129 			rfc.mode = chan->mode;
2130 		}
2131 
2132 		if (result == L2CAP_CONF_SUCCESS)
2133 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2134 	}
2135 	rsp->scid   = cpu_to_le16(chan->dcid);
2136 	rsp->result = cpu_to_le16(result);
2137 	rsp->flags  = cpu_to_le16(0x0000);
2138 
2139 	return ptr - data;
2140 }
2141 
2142 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2143 {
2144 	struct l2cap_conf_req *req = data;
2145 	void *ptr = req->data;
2146 	int type, olen;
2147 	unsigned long val;
2148 	struct l2cap_conf_rfc rfc;
2149 
2150 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2151 
2152 	while (len >= L2CAP_CONF_OPT_SIZE) {
2153 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2154 
2155 		switch (type) {
2156 		case L2CAP_CONF_MTU:
2157 			if (val < L2CAP_DEFAULT_MIN_MTU) {
2158 				*result = L2CAP_CONF_UNACCEPT;
2159 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2160 			} else
2161 				chan->imtu = val;
2162 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2163 			break;
2164 
2165 		case L2CAP_CONF_FLUSH_TO:
2166 			chan->flush_to = val;
2167 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2168 							2, chan->flush_to);
2169 			break;
2170 
2171 		case L2CAP_CONF_RFC:
2172 			if (olen == sizeof(rfc))
2173 				memcpy(&rfc, (void *)val, olen);
2174 
2175 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2176 							rfc.mode != chan->mode)
2177 				return -ECONNREFUSED;
2178 
2179 			chan->fcs = 0;
2180 
2181 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2182 					sizeof(rfc), (unsigned long) &rfc);
2183 			break;
2184 		}
2185 	}
2186 
2187 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2188 		return -ECONNREFUSED;
2189 
2190 	chan->mode = rfc.mode;
2191 
2192 	if (*result == L2CAP_CONF_SUCCESS) {
2193 		switch (rfc.mode) {
2194 		case L2CAP_MODE_ERTM:
2195 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2196 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2197 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
2198 			break;
2199 		case L2CAP_MODE_STREAMING:
2200 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
2201 		}
2202 	}
2203 
2204 	req->dcid   = cpu_to_le16(chan->dcid);
2205 	req->flags  = cpu_to_le16(0x0000);
2206 
2207 	return ptr - data;
2208 }
2209 
2210 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2211 {
2212 	struct l2cap_conf_rsp *rsp = data;
2213 	void *ptr = rsp->data;
2214 
2215 	BT_DBG("chan %p", chan);
2216 
2217 	rsp->scid   = cpu_to_le16(chan->dcid);
2218 	rsp->result = cpu_to_le16(result);
2219 	rsp->flags  = cpu_to_le16(flags);
2220 
2221 	return ptr - data;
2222 }
2223 
2224 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2225 {
2226 	struct l2cap_conn_rsp rsp;
2227 	struct l2cap_conn *conn = chan->conn;
2228 	u8 buf[128];
2229 
2230 	rsp.scid   = cpu_to_le16(chan->dcid);
2231 	rsp.dcid   = cpu_to_le16(chan->scid);
2232 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2233 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2234 	l2cap_send_cmd(conn, chan->ident,
2235 				L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2236 
2237 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2238 		return;
2239 
2240 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2241 			l2cap_build_conf_req(chan, buf), buf);
2242 	chan->num_conf_req++;
2243 }
2244 
2245 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2246 {
2247 	int type, olen;
2248 	unsigned long val;
2249 	struct l2cap_conf_rfc rfc;
2250 
2251 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2252 
2253 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2254 		return;
2255 
2256 	while (len >= L2CAP_CONF_OPT_SIZE) {
2257 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2258 
2259 		switch (type) {
2260 		case L2CAP_CONF_RFC:
2261 			if (olen == sizeof(rfc))
2262 				memcpy(&rfc, (void *)val, olen);
2263 			goto done;
2264 		}
2265 	}
2266 
2267 done:
2268 	switch (rfc.mode) {
2269 	case L2CAP_MODE_ERTM:
2270 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2271 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2272 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
2273 		break;
2274 	case L2CAP_MODE_STREAMING:
2275 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
2276 	}
2277 }
2278 
2279 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2280 {
2281 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2282 
2283 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2284 		return 0;
2285 
2286 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2287 					cmd->ident == conn->info_ident) {
2288 		del_timer(&conn->info_timer);
2289 
2290 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2291 		conn->info_ident = 0;
2292 
2293 		l2cap_conn_start(conn);
2294 	}
2295 
2296 	return 0;
2297 }
2298 
2299 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2300 {
2301 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2302 	struct l2cap_conn_rsp rsp;
2303 	struct l2cap_chan *chan = NULL, *pchan;
2304 	struct sock *parent, *sk = NULL;
2305 	int result, status = L2CAP_CS_NO_INFO;
2306 
2307 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2308 	__le16 psm = req->psm;
2309 
2310 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2311 
2312 	/* Check if we have socket listening on psm */
2313 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2314 	if (!pchan) {
2315 		result = L2CAP_CR_BAD_PSM;
2316 		goto sendresp;
2317 	}
2318 
2319 	parent = pchan->sk;
2320 
2321 	bh_lock_sock(parent);
2322 
2323 	/* Check if the ACL is secure enough (if not SDP) */
2324 	if (psm != cpu_to_le16(0x0001) &&
2325 				!hci_conn_check_link_mode(conn->hcon)) {
2326 		conn->disc_reason = 0x05;
2327 		result = L2CAP_CR_SEC_BLOCK;
2328 		goto response;
2329 	}
2330 
2331 	result = L2CAP_CR_NO_MEM;
2332 
2333 	/* Check for backlog size */
2334 	if (sk_acceptq_is_full(parent)) {
2335 		BT_DBG("backlog full %d", parent->sk_ack_backlog);
2336 		goto response;
2337 	}
2338 
2339 	chan = pchan->ops->new_connection(pchan->data);
2340 	if (!chan)
2341 		goto response;
2342 
2343 	sk = chan->sk;
2344 
2345 	write_lock_bh(&conn->chan_lock);
2346 
2347 	/* Check if we already have channel with that dcid */
2348 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
2349 		write_unlock_bh(&conn->chan_lock);
2350 		sock_set_flag(sk, SOCK_ZAPPED);
2351 		chan->ops->close(chan->data);
2352 		goto response;
2353 	}
2354 
2355 	hci_conn_hold(conn->hcon);
2356 
2357 	bacpy(&bt_sk(sk)->src, conn->src);
2358 	bacpy(&bt_sk(sk)->dst, conn->dst);
2359 	chan->psm  = psm;
2360 	chan->dcid = scid;
2361 
2362 	bt_accept_enqueue(parent, sk);
2363 
2364 	__l2cap_chan_add(conn, chan);
2365 
2366 	dcid = chan->scid;
2367 
2368 	__set_chan_timer(chan, sk->sk_sndtimeo);
2369 
2370 	chan->ident = cmd->ident;
2371 
2372 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2373 		if (l2cap_check_security(chan)) {
2374 			if (bt_sk(sk)->defer_setup) {
2375 				l2cap_state_change(chan, BT_CONNECT2);
2376 				result = L2CAP_CR_PEND;
2377 				status = L2CAP_CS_AUTHOR_PEND;
2378 				parent->sk_data_ready(parent, 0);
2379 			} else {
2380 				l2cap_state_change(chan, BT_CONFIG);
2381 				result = L2CAP_CR_SUCCESS;
2382 				status = L2CAP_CS_NO_INFO;
2383 			}
2384 		} else {
2385 			l2cap_state_change(chan, BT_CONNECT2);
2386 			result = L2CAP_CR_PEND;
2387 			status = L2CAP_CS_AUTHEN_PEND;
2388 		}
2389 	} else {
2390 		l2cap_state_change(chan, BT_CONNECT2);
2391 		result = L2CAP_CR_PEND;
2392 		status = L2CAP_CS_NO_INFO;
2393 	}
2394 
2395 	write_unlock_bh(&conn->chan_lock);
2396 
2397 response:
2398 	bh_unlock_sock(parent);
2399 
2400 sendresp:
2401 	rsp.scid   = cpu_to_le16(scid);
2402 	rsp.dcid   = cpu_to_le16(dcid);
2403 	rsp.result = cpu_to_le16(result);
2404 	rsp.status = cpu_to_le16(status);
2405 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2406 
2407 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2408 		struct l2cap_info_req info;
2409 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2410 
2411 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2412 		conn->info_ident = l2cap_get_ident(conn);
2413 
2414 		mod_timer(&conn->info_timer, jiffies +
2415 					msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2416 
2417 		l2cap_send_cmd(conn, conn->info_ident,
2418 					L2CAP_INFO_REQ, sizeof(info), &info);
2419 	}
2420 
2421 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2422 				result == L2CAP_CR_SUCCESS) {
2423 		u8 buf[128];
2424 		set_bit(CONF_REQ_SENT, &chan->conf_state);
2425 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2426 					l2cap_build_conf_req(chan, buf), buf);
2427 		chan->num_conf_req++;
2428 	}
2429 
2430 	return 0;
2431 }
2432 
2433 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2434 {
2435 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2436 	u16 scid, dcid, result, status;
2437 	struct l2cap_chan *chan;
2438 	struct sock *sk;
2439 	u8 req[128];
2440 
2441 	scid   = __le16_to_cpu(rsp->scid);
2442 	dcid   = __le16_to_cpu(rsp->dcid);
2443 	result = __le16_to_cpu(rsp->result);
2444 	status = __le16_to_cpu(rsp->status);
2445 
2446 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2447 
2448 	if (scid) {
2449 		chan = l2cap_get_chan_by_scid(conn, scid);
2450 		if (!chan)
2451 			return -EFAULT;
2452 	} else {
2453 		chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2454 		if (!chan)
2455 			return -EFAULT;
2456 	}
2457 
2458 	sk = chan->sk;
2459 
2460 	switch (result) {
2461 	case L2CAP_CR_SUCCESS:
2462 		l2cap_state_change(chan, BT_CONFIG);
2463 		chan->ident = 0;
2464 		chan->dcid = dcid;
2465 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2466 
2467 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2468 			break;
2469 
2470 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2471 					l2cap_build_conf_req(chan, req), req);
2472 		chan->num_conf_req++;
2473 		break;
2474 
2475 	case L2CAP_CR_PEND:
2476 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2477 		break;
2478 
2479 	default:
2480 		/* don't delete l2cap channel if sk is owned by user */
2481 		if (sock_owned_by_user(sk)) {
2482 			l2cap_state_change(chan, BT_DISCONN);
2483 			__clear_chan_timer(chan);
2484 			__set_chan_timer(chan, HZ / 5);
2485 			break;
2486 		}
2487 
2488 		l2cap_chan_del(chan, ECONNREFUSED);
2489 		break;
2490 	}
2491 
2492 	bh_unlock_sock(sk);
2493 	return 0;
2494 }
2495 
2496 static inline void set_default_fcs(struct l2cap_chan *chan)
2497 {
2498 	/* FCS is enabled only in ERTM or streaming mode, if one or both
2499 	 * sides request it.
2500 	 */
2501 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2502 		chan->fcs = L2CAP_FCS_NONE;
2503 	else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2504 		chan->fcs = L2CAP_FCS_CRC16;
2505 }
2506 
2507 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2508 {
2509 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2510 	u16 dcid, flags;
2511 	u8 rsp[64];
2512 	struct l2cap_chan *chan;
2513 	struct sock *sk;
2514 	int len;
2515 
2516 	dcid  = __le16_to_cpu(req->dcid);
2517 	flags = __le16_to_cpu(req->flags);
2518 
2519 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2520 
2521 	chan = l2cap_get_chan_by_scid(conn, dcid);
2522 	if (!chan)
2523 		return -ENOENT;
2524 
2525 	sk = chan->sk;
2526 
2527 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2528 		struct l2cap_cmd_rej_cid rej;
2529 
2530 		rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2531 		rej.scid = cpu_to_le16(chan->scid);
2532 		rej.dcid = cpu_to_le16(chan->dcid);
2533 
2534 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2535 				sizeof(rej), &rej);
2536 		goto unlock;
2537 	}
2538 
2539 	/* Reject if config buffer is too small. */
2540 	len = cmd_len - sizeof(*req);
2541 	if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2542 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2543 				l2cap_build_conf_rsp(chan, rsp,
2544 					L2CAP_CONF_REJECT, flags), rsp);
2545 		goto unlock;
2546 	}
2547 
2548 	/* Store config. */
2549 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
2550 	chan->conf_len += len;
2551 
2552 	if (flags & 0x0001) {
2553 		/* Incomplete config. Send empty response. */
2554 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2555 				l2cap_build_conf_rsp(chan, rsp,
2556 					L2CAP_CONF_SUCCESS, 0x0001), rsp);
2557 		goto unlock;
2558 	}
2559 
2560 	/* Complete config. */
2561 	len = l2cap_parse_conf_req(chan, rsp);
2562 	if (len < 0) {
2563 		l2cap_send_disconn_req(conn, chan, ECONNRESET);
2564 		goto unlock;
2565 	}
2566 
2567 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2568 	chan->num_conf_rsp++;
2569 
2570 	/* Reset config buffer. */
2571 	chan->conf_len = 0;
2572 
2573 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2574 		goto unlock;
2575 
2576 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2577 		set_default_fcs(chan);
2578 
2579 		l2cap_state_change(chan, BT_CONNECTED);
2580 
2581 		chan->next_tx_seq = 0;
2582 		chan->expected_tx_seq = 0;
2583 		skb_queue_head_init(&chan->tx_q);
2584 		if (chan->mode == L2CAP_MODE_ERTM)
2585 			l2cap_ertm_init(chan);
2586 
2587 		l2cap_chan_ready(sk);
2588 		goto unlock;
2589 	}
2590 
2591 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2592 		u8 buf[64];
2593 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2594 					l2cap_build_conf_req(chan, buf), buf);
2595 		chan->num_conf_req++;
2596 	}
2597 
2598 unlock:
2599 	bh_unlock_sock(sk);
2600 	return 0;
2601 }
2602 
2603 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2604 {
2605 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2606 	u16 scid, flags, result;
2607 	struct l2cap_chan *chan;
2608 	struct sock *sk;
2609 	int len = cmd->len - sizeof(*rsp);
2610 
2611 	scid   = __le16_to_cpu(rsp->scid);
2612 	flags  = __le16_to_cpu(rsp->flags);
2613 	result = __le16_to_cpu(rsp->result);
2614 
2615 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2616 			scid, flags, result);
2617 
2618 	chan = l2cap_get_chan_by_scid(conn, scid);
2619 	if (!chan)
2620 		return 0;
2621 
2622 	sk = chan->sk;
2623 
2624 	switch (result) {
2625 	case L2CAP_CONF_SUCCESS:
2626 		l2cap_conf_rfc_get(chan, rsp->data, len);
2627 		break;
2628 
2629 	case L2CAP_CONF_UNACCEPT:
2630 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2631 			char req[64];
2632 
2633 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2634 				l2cap_send_disconn_req(conn, chan, ECONNRESET);
2635 				goto done;
2636 			}
2637 
2638 			/* throw out any old stored conf requests */
2639 			result = L2CAP_CONF_SUCCESS;
2640 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2641 								req, &result);
2642 			if (len < 0) {
2643 				l2cap_send_disconn_req(conn, chan, ECONNRESET);
2644 				goto done;
2645 			}
2646 
2647 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
2648 						L2CAP_CONF_REQ, len, req);
2649 			chan->num_conf_req++;
2650 			if (result != L2CAP_CONF_SUCCESS)
2651 				goto done;
2652 			break;
2653 		}
2654 
2655 	default:
2656 		sk->sk_err = ECONNRESET;
2657 		__set_chan_timer(chan, HZ * 5);
2658 		l2cap_send_disconn_req(conn, chan, ECONNRESET);
2659 		goto done;
2660 	}
2661 
2662 	if (flags & 0x01)
2663 		goto done;
2664 
2665 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
2666 
2667 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2668 		set_default_fcs(chan);
2669 
2670 		l2cap_state_change(chan, BT_CONNECTED);
2671 		chan->next_tx_seq = 0;
2672 		chan->expected_tx_seq = 0;
2673 		skb_queue_head_init(&chan->tx_q);
2674 		if (chan->mode ==  L2CAP_MODE_ERTM)
2675 			l2cap_ertm_init(chan);
2676 
2677 		l2cap_chan_ready(sk);
2678 	}
2679 
2680 done:
2681 	bh_unlock_sock(sk);
2682 	return 0;
2683 }
2684 
2685 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2686 {
2687 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2688 	struct l2cap_disconn_rsp rsp;
2689 	u16 dcid, scid;
2690 	struct l2cap_chan *chan;
2691 	struct sock *sk;
2692 
2693 	scid = __le16_to_cpu(req->scid);
2694 	dcid = __le16_to_cpu(req->dcid);
2695 
2696 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2697 
2698 	chan = l2cap_get_chan_by_scid(conn, dcid);
2699 	if (!chan)
2700 		return 0;
2701 
2702 	sk = chan->sk;
2703 
2704 	rsp.dcid = cpu_to_le16(chan->scid);
2705 	rsp.scid = cpu_to_le16(chan->dcid);
2706 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2707 
2708 	sk->sk_shutdown = SHUTDOWN_MASK;
2709 
2710 	/* don't delete l2cap channel if sk is owned by user */
2711 	if (sock_owned_by_user(sk)) {
2712 		l2cap_state_change(chan, BT_DISCONN);
2713 		__clear_chan_timer(chan);
2714 		__set_chan_timer(chan, HZ / 5);
2715 		bh_unlock_sock(sk);
2716 		return 0;
2717 	}
2718 
2719 	l2cap_chan_del(chan, ECONNRESET);
2720 	bh_unlock_sock(sk);
2721 
2722 	chan->ops->close(chan->data);
2723 	return 0;
2724 }
2725 
2726 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2727 {
2728 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2729 	u16 dcid, scid;
2730 	struct l2cap_chan *chan;
2731 	struct sock *sk;
2732 
2733 	scid = __le16_to_cpu(rsp->scid);
2734 	dcid = __le16_to_cpu(rsp->dcid);
2735 
2736 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2737 
2738 	chan = l2cap_get_chan_by_scid(conn, scid);
2739 	if (!chan)
2740 		return 0;
2741 
2742 	sk = chan->sk;
2743 
2744 	/* don't delete l2cap channel if sk is owned by user */
2745 	if (sock_owned_by_user(sk)) {
2746 		l2cap_state_change(chan,BT_DISCONN);
2747 		__clear_chan_timer(chan);
2748 		__set_chan_timer(chan, HZ / 5);
2749 		bh_unlock_sock(sk);
2750 		return 0;
2751 	}
2752 
2753 	l2cap_chan_del(chan, 0);
2754 	bh_unlock_sock(sk);
2755 
2756 	chan->ops->close(chan->data);
2757 	return 0;
2758 }
2759 
2760 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2761 {
2762 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2763 	u16 type;
2764 
2765 	type = __le16_to_cpu(req->type);
2766 
2767 	BT_DBG("type 0x%4.4x", type);
2768 
2769 	if (type == L2CAP_IT_FEAT_MASK) {
2770 		u8 buf[8];
2771 		u32 feat_mask = l2cap_feat_mask;
2772 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2773 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2774 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2775 		if (!disable_ertm)
2776 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2777 							 | L2CAP_FEAT_FCS;
2778 		put_unaligned_le32(feat_mask, rsp->data);
2779 		l2cap_send_cmd(conn, cmd->ident,
2780 					L2CAP_INFO_RSP, sizeof(buf), buf);
2781 	} else if (type == L2CAP_IT_FIXED_CHAN) {
2782 		u8 buf[12];
2783 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2784 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2785 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2786 		memcpy(buf + 4, l2cap_fixed_chan, 8);
2787 		l2cap_send_cmd(conn, cmd->ident,
2788 					L2CAP_INFO_RSP, sizeof(buf), buf);
2789 	} else {
2790 		struct l2cap_info_rsp rsp;
2791 		rsp.type   = cpu_to_le16(type);
2792 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2793 		l2cap_send_cmd(conn, cmd->ident,
2794 					L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2795 	}
2796 
2797 	return 0;
2798 }
2799 
2800 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2801 {
2802 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2803 	u16 type, result;
2804 
2805 	type   = __le16_to_cpu(rsp->type);
2806 	result = __le16_to_cpu(rsp->result);
2807 
2808 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2809 
2810 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
2811 	if (cmd->ident != conn->info_ident ||
2812 			conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2813 		return 0;
2814 
2815 	del_timer(&conn->info_timer);
2816 
2817 	if (result != L2CAP_IR_SUCCESS) {
2818 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2819 		conn->info_ident = 0;
2820 
2821 		l2cap_conn_start(conn);
2822 
2823 		return 0;
2824 	}
2825 
2826 	if (type == L2CAP_IT_FEAT_MASK) {
2827 		conn->feat_mask = get_unaligned_le32(rsp->data);
2828 
2829 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2830 			struct l2cap_info_req req;
2831 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2832 
2833 			conn->info_ident = l2cap_get_ident(conn);
2834 
2835 			l2cap_send_cmd(conn, conn->info_ident,
2836 					L2CAP_INFO_REQ, sizeof(req), &req);
2837 		} else {
2838 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2839 			conn->info_ident = 0;
2840 
2841 			l2cap_conn_start(conn);
2842 		}
2843 	} else if (type == L2CAP_IT_FIXED_CHAN) {
2844 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2845 		conn->info_ident = 0;
2846 
2847 		l2cap_conn_start(conn);
2848 	}
2849 
2850 	return 0;
2851 }
2852 
2853 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2854 							u16 to_multiplier)
2855 {
2856 	u16 max_latency;
2857 
2858 	if (min > max || min < 6 || max > 3200)
2859 		return -EINVAL;
2860 
2861 	if (to_multiplier < 10 || to_multiplier > 3200)
2862 		return -EINVAL;
2863 
2864 	if (max >= to_multiplier * 8)
2865 		return -EINVAL;
2866 
2867 	max_latency = (to_multiplier * 8 / max) - 1;
2868 	if (latency > 499 || latency > max_latency)
2869 		return -EINVAL;
2870 
2871 	return 0;
2872 }
2873 
2874 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2875 					struct l2cap_cmd_hdr *cmd, u8 *data)
2876 {
2877 	struct hci_conn *hcon = conn->hcon;
2878 	struct l2cap_conn_param_update_req *req;
2879 	struct l2cap_conn_param_update_rsp rsp;
2880 	u16 min, max, latency, to_multiplier, cmd_len;
2881 	int err;
2882 
2883 	if (!(hcon->link_mode & HCI_LM_MASTER))
2884 		return -EINVAL;
2885 
2886 	cmd_len = __le16_to_cpu(cmd->len);
2887 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2888 		return -EPROTO;
2889 
2890 	req = (struct l2cap_conn_param_update_req *) data;
2891 	min		= __le16_to_cpu(req->min);
2892 	max		= __le16_to_cpu(req->max);
2893 	latency		= __le16_to_cpu(req->latency);
2894 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
2895 
2896 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2897 						min, max, latency, to_multiplier);
2898 
2899 	memset(&rsp, 0, sizeof(rsp));
2900 
2901 	err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2902 	if (err)
2903 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2904 	else
2905 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2906 
2907 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2908 							sizeof(rsp), &rsp);
2909 
2910 	if (!err)
2911 		hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2912 
2913 	return 0;
2914 }
2915 
2916 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2917 			struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2918 {
2919 	int err = 0;
2920 
2921 	switch (cmd->code) {
2922 	case L2CAP_COMMAND_REJ:
2923 		l2cap_command_rej(conn, cmd, data);
2924 		break;
2925 
2926 	case L2CAP_CONN_REQ:
2927 		err = l2cap_connect_req(conn, cmd, data);
2928 		break;
2929 
2930 	case L2CAP_CONN_RSP:
2931 		err = l2cap_connect_rsp(conn, cmd, data);
2932 		break;
2933 
2934 	case L2CAP_CONF_REQ:
2935 		err = l2cap_config_req(conn, cmd, cmd_len, data);
2936 		break;
2937 
2938 	case L2CAP_CONF_RSP:
2939 		err = l2cap_config_rsp(conn, cmd, data);
2940 		break;
2941 
2942 	case L2CAP_DISCONN_REQ:
2943 		err = l2cap_disconnect_req(conn, cmd, data);
2944 		break;
2945 
2946 	case L2CAP_DISCONN_RSP:
2947 		err = l2cap_disconnect_rsp(conn, cmd, data);
2948 		break;
2949 
2950 	case L2CAP_ECHO_REQ:
2951 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2952 		break;
2953 
2954 	case L2CAP_ECHO_RSP:
2955 		break;
2956 
2957 	case L2CAP_INFO_REQ:
2958 		err = l2cap_information_req(conn, cmd, data);
2959 		break;
2960 
2961 	case L2CAP_INFO_RSP:
2962 		err = l2cap_information_rsp(conn, cmd, data);
2963 		break;
2964 
2965 	default:
2966 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2967 		err = -EINVAL;
2968 		break;
2969 	}
2970 
2971 	return err;
2972 }
2973 
2974 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2975 					struct l2cap_cmd_hdr *cmd, u8 *data)
2976 {
2977 	switch (cmd->code) {
2978 	case L2CAP_COMMAND_REJ:
2979 		return 0;
2980 
2981 	case L2CAP_CONN_PARAM_UPDATE_REQ:
2982 		return l2cap_conn_param_update_req(conn, cmd, data);
2983 
2984 	case L2CAP_CONN_PARAM_UPDATE_RSP:
2985 		return 0;
2986 
2987 	default:
2988 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2989 		return -EINVAL;
2990 	}
2991 }
2992 
2993 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2994 							struct sk_buff *skb)
2995 {
2996 	u8 *data = skb->data;
2997 	int len = skb->len;
2998 	struct l2cap_cmd_hdr cmd;
2999 	int err;
3000 
3001 	l2cap_raw_recv(conn, skb);
3002 
3003 	while (len >= L2CAP_CMD_HDR_SIZE) {
3004 		u16 cmd_len;
3005 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3006 		data += L2CAP_CMD_HDR_SIZE;
3007 		len  -= L2CAP_CMD_HDR_SIZE;
3008 
3009 		cmd_len = le16_to_cpu(cmd.len);
3010 
3011 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3012 
3013 		if (cmd_len > len || !cmd.ident) {
3014 			BT_DBG("corrupted command");
3015 			break;
3016 		}
3017 
3018 		if (conn->hcon->type == LE_LINK)
3019 			err = l2cap_le_sig_cmd(conn, &cmd, data);
3020 		else
3021 			err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3022 
3023 		if (err) {
3024 			struct l2cap_cmd_rej_unk rej;
3025 
3026 			BT_ERR("Wrong link type (%d)", err);
3027 
3028 			/* FIXME: Map err to a valid reason */
3029 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3030 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3031 		}
3032 
3033 		data += cmd_len;
3034 		len  -= cmd_len;
3035 	}
3036 
3037 	kfree_skb(skb);
3038 }
3039 
3040 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
3041 {
3042 	u16 our_fcs, rcv_fcs;
3043 	int hdr_size = L2CAP_HDR_SIZE + 2;
3044 
3045 	if (chan->fcs == L2CAP_FCS_CRC16) {
3046 		skb_trim(skb, skb->len - 2);
3047 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3048 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3049 
3050 		if (our_fcs != rcv_fcs)
3051 			return -EBADMSG;
3052 	}
3053 	return 0;
3054 }
3055 
3056 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3057 {
3058 	u16 control = 0;
3059 
3060 	chan->frames_sent = 0;
3061 
3062 	control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3063 
3064 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3065 		control |= L2CAP_SUPER_RCV_NOT_READY;
3066 		l2cap_send_sframe(chan, control);
3067 		set_bit(CONN_RNR_SENT, &chan->conn_state);
3068 	}
3069 
3070 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3071 		l2cap_retransmit_frames(chan);
3072 
3073 	l2cap_ertm_send(chan);
3074 
3075 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3076 			chan->frames_sent == 0) {
3077 		control |= L2CAP_SUPER_RCV_READY;
3078 		l2cap_send_sframe(chan, control);
3079 	}
3080 }
3081 
3082 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3083 {
3084 	struct sk_buff *next_skb;
3085 	int tx_seq_offset, next_tx_seq_offset;
3086 
3087 	bt_cb(skb)->tx_seq = tx_seq;
3088 	bt_cb(skb)->sar = sar;
3089 
3090 	next_skb = skb_peek(&chan->srej_q);
3091 	if (!next_skb) {
3092 		__skb_queue_tail(&chan->srej_q, skb);
3093 		return 0;
3094 	}
3095 
3096 	tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3097 	if (tx_seq_offset < 0)
3098 		tx_seq_offset += 64;
3099 
3100 	do {
3101 		if (bt_cb(next_skb)->tx_seq == tx_seq)
3102 			return -EINVAL;
3103 
3104 		next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3105 						chan->buffer_seq) % 64;
3106 		if (next_tx_seq_offset < 0)
3107 			next_tx_seq_offset += 64;
3108 
3109 		if (next_tx_seq_offset > tx_seq_offset) {
3110 			__skb_queue_before(&chan->srej_q, next_skb, skb);
3111 			return 0;
3112 		}
3113 
3114 		if (skb_queue_is_last(&chan->srej_q, next_skb))
3115 			break;
3116 
3117 	} while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3118 
3119 	__skb_queue_tail(&chan->srej_q, skb);
3120 
3121 	return 0;
3122 }
3123 
3124 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3125 {
3126 	struct sk_buff *_skb;
3127 	int err;
3128 
3129 	switch (control & L2CAP_CTRL_SAR) {
3130 	case L2CAP_SDU_UNSEGMENTED:
3131 		if (test_bit(CONN_SAR_SDU, &chan->conn_state))
3132 			goto drop;
3133 
3134 		return chan->ops->recv(chan->data, skb);
3135 
3136 	case L2CAP_SDU_START:
3137 		if (test_bit(CONN_SAR_SDU, &chan->conn_state))
3138 			goto drop;
3139 
3140 		chan->sdu_len = get_unaligned_le16(skb->data);
3141 
3142 		if (chan->sdu_len > chan->imtu)
3143 			goto disconnect;
3144 
3145 		chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3146 		if (!chan->sdu)
3147 			return -ENOMEM;
3148 
3149 		/* pull sdu_len bytes only after alloc, because of Local Busy
3150 		 * condition we have to be sure that this will be executed
3151 		 * only once, i.e., when alloc does not fail */
3152 		skb_pull(skb, 2);
3153 
3154 		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3155 
3156 		set_bit(CONN_SAR_SDU, &chan->conn_state);
3157 		chan->partial_sdu_len = skb->len;
3158 		break;
3159 
3160 	case L2CAP_SDU_CONTINUE:
3161 		if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3162 			goto disconnect;
3163 
3164 		if (!chan->sdu)
3165 			goto disconnect;
3166 
3167 		chan->partial_sdu_len += skb->len;
3168 		if (chan->partial_sdu_len > chan->sdu_len)
3169 			goto drop;
3170 
3171 		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3172 
3173 		break;
3174 
3175 	case L2CAP_SDU_END:
3176 		if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3177 			goto disconnect;
3178 
3179 		if (!chan->sdu)
3180 			goto disconnect;
3181 
3182 		chan->partial_sdu_len += skb->len;
3183 
3184 		if (chan->partial_sdu_len > chan->imtu)
3185 			goto drop;
3186 
3187 		if (chan->partial_sdu_len != chan->sdu_len)
3188 			goto drop;
3189 
3190 		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3191 
3192 		_skb = skb_clone(chan->sdu, GFP_ATOMIC);
3193 		if (!_skb) {
3194 			return -ENOMEM;
3195 		}
3196 
3197 		err = chan->ops->recv(chan->data, _skb);
3198 		if (err < 0) {
3199 			kfree_skb(_skb);
3200 			return err;
3201 		}
3202 
3203 		clear_bit(CONN_SAR_SDU, &chan->conn_state);
3204 
3205 		kfree_skb(chan->sdu);
3206 		break;
3207 	}
3208 
3209 	kfree_skb(skb);
3210 	return 0;
3211 
3212 drop:
3213 	kfree_skb(chan->sdu);
3214 	chan->sdu = NULL;
3215 
3216 disconnect:
3217 	l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3218 	kfree_skb(skb);
3219 	return 0;
3220 }
3221 
3222 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3223 {
3224 	u16 control;
3225 
3226 	BT_DBG("chan %p, Enter local busy", chan);
3227 
3228 	set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3229 
3230 	control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3231 	control |= L2CAP_SUPER_RCV_NOT_READY;
3232 	l2cap_send_sframe(chan, control);
3233 
3234 	set_bit(CONN_RNR_SENT, &chan->conn_state);
3235 
3236 	__clear_ack_timer(chan);
3237 }
3238 
3239 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3240 {
3241 	u16 control;
3242 
3243 	if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3244 		goto done;
3245 
3246 	control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3247 	control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3248 	l2cap_send_sframe(chan, control);
3249 	chan->retry_count = 1;
3250 
3251 	__clear_retrans_timer(chan);
3252 	__set_monitor_timer(chan);
3253 
3254 	set_bit(CONN_WAIT_F, &chan->conn_state);
3255 
3256 done:
3257 	clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3258 	clear_bit(CONN_RNR_SENT, &chan->conn_state);
3259 
3260 	BT_DBG("chan %p, Exit local busy", chan);
3261 }
3262 
3263 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3264 {
3265 	if (chan->mode == L2CAP_MODE_ERTM) {
3266 		if (busy)
3267 			l2cap_ertm_enter_local_busy(chan);
3268 		else
3269 			l2cap_ertm_exit_local_busy(chan);
3270 	}
3271 }
3272 
3273 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3274 {
3275 	struct sk_buff *_skb;
3276 	int err = -EINVAL;
3277 
3278 	/*
3279 	 * TODO: We have to notify the userland if some data is lost with the
3280 	 * Streaming Mode.
3281 	 */
3282 
3283 	switch (control & L2CAP_CTRL_SAR) {
3284 	case L2CAP_SDU_UNSEGMENTED:
3285 		if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3286 			kfree_skb(chan->sdu);
3287 			break;
3288 		}
3289 
3290 		err = chan->ops->recv(chan->data, skb);
3291 		if (!err)
3292 			return 0;
3293 
3294 		break;
3295 
3296 	case L2CAP_SDU_START:
3297 		if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3298 			kfree_skb(chan->sdu);
3299 			break;
3300 		}
3301 
3302 		chan->sdu_len = get_unaligned_le16(skb->data);
3303 		skb_pull(skb, 2);
3304 
3305 		if (chan->sdu_len > chan->imtu) {
3306 			err = -EMSGSIZE;
3307 			break;
3308 		}
3309 
3310 		chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3311 		if (!chan->sdu) {
3312 			err = -ENOMEM;
3313 			break;
3314 		}
3315 
3316 		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3317 
3318 		set_bit(CONN_SAR_SDU, &chan->conn_state);
3319 		chan->partial_sdu_len = skb->len;
3320 		err = 0;
3321 		break;
3322 
3323 	case L2CAP_SDU_CONTINUE:
3324 		if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3325 			break;
3326 
3327 		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3328 
3329 		chan->partial_sdu_len += skb->len;
3330 		if (chan->partial_sdu_len > chan->sdu_len)
3331 			kfree_skb(chan->sdu);
3332 		else
3333 			err = 0;
3334 
3335 		break;
3336 
3337 	case L2CAP_SDU_END:
3338 		if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3339 			break;
3340 
3341 		memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3342 
3343 		clear_bit(CONN_SAR_SDU, &chan->conn_state);
3344 		chan->partial_sdu_len += skb->len;
3345 
3346 		if (chan->partial_sdu_len > chan->imtu)
3347 			goto drop;
3348 
3349 		if (chan->partial_sdu_len == chan->sdu_len) {
3350 			_skb = skb_clone(chan->sdu, GFP_ATOMIC);
3351 			err = chan->ops->recv(chan->data, _skb);
3352 			if (err < 0)
3353 				kfree_skb(_skb);
3354 		}
3355 		err = 0;
3356 
3357 drop:
3358 		kfree_skb(chan->sdu);
3359 		break;
3360 	}
3361 
3362 	kfree_skb(skb);
3363 	return err;
3364 }
3365 
3366 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3367 {
3368 	struct sk_buff *skb;
3369 	u16 control;
3370 
3371 	while ((skb = skb_peek(&chan->srej_q)) &&
3372 			!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3373 		int err;
3374 
3375 		if (bt_cb(skb)->tx_seq != tx_seq)
3376 			break;
3377 
3378 		skb = skb_dequeue(&chan->srej_q);
3379 		control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3380 		err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3381 
3382 		if (err < 0) {
3383 			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3384 			break;
3385 		}
3386 
3387 		chan->buffer_seq_srej =
3388 			(chan->buffer_seq_srej + 1) % 64;
3389 		tx_seq = (tx_seq + 1) % 64;
3390 	}
3391 }
3392 
3393 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3394 {
3395 	struct srej_list *l, *tmp;
3396 	u16 control;
3397 
3398 	list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3399 		if (l->tx_seq == tx_seq) {
3400 			list_del(&l->list);
3401 			kfree(l);
3402 			return;
3403 		}
3404 		control = L2CAP_SUPER_SELECT_REJECT;
3405 		control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3406 		l2cap_send_sframe(chan, control);
3407 		list_del(&l->list);
3408 		list_add_tail(&l->list, &chan->srej_l);
3409 	}
3410 }
3411 
3412 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3413 {
3414 	struct srej_list *new;
3415 	u16 control;
3416 
3417 	while (tx_seq != chan->expected_tx_seq) {
3418 		control = L2CAP_SUPER_SELECT_REJECT;
3419 		control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3420 		l2cap_send_sframe(chan, control);
3421 
3422 		new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3423 		new->tx_seq = chan->expected_tx_seq;
3424 		chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3425 		list_add_tail(&new->list, &chan->srej_l);
3426 	}
3427 	chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3428 }
3429 
3430 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3431 {
3432 	u8 tx_seq = __get_txseq(rx_control);
3433 	u8 req_seq = __get_reqseq(rx_control);
3434 	u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3435 	int tx_seq_offset, expected_tx_seq_offset;
3436 	int num_to_ack = (chan->tx_win/6) + 1;
3437 	int err = 0;
3438 
3439 	BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3440 							tx_seq, rx_control);
3441 
3442 	if (L2CAP_CTRL_FINAL & rx_control &&
3443 			test_bit(CONN_WAIT_F, &chan->conn_state)) {
3444 		__clear_monitor_timer(chan);
3445 		if (chan->unacked_frames > 0)
3446 			__set_retrans_timer(chan);
3447 		clear_bit(CONN_WAIT_F, &chan->conn_state);
3448 	}
3449 
3450 	chan->expected_ack_seq = req_seq;
3451 	l2cap_drop_acked_frames(chan);
3452 
3453 	tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3454 	if (tx_seq_offset < 0)
3455 		tx_seq_offset += 64;
3456 
3457 	/* invalid tx_seq */
3458 	if (tx_seq_offset >= chan->tx_win) {
3459 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3460 		goto drop;
3461 	}
3462 
3463 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3464 		goto drop;
3465 
3466 	if (tx_seq == chan->expected_tx_seq)
3467 		goto expected;
3468 
3469 	if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3470 		struct srej_list *first;
3471 
3472 		first = list_first_entry(&chan->srej_l,
3473 				struct srej_list, list);
3474 		if (tx_seq == first->tx_seq) {
3475 			l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3476 			l2cap_check_srej_gap(chan, tx_seq);
3477 
3478 			list_del(&first->list);
3479 			kfree(first);
3480 
3481 			if (list_empty(&chan->srej_l)) {
3482 				chan->buffer_seq = chan->buffer_seq_srej;
3483 				clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3484 				l2cap_send_ack(chan);
3485 				BT_DBG("chan %p, Exit SREJ_SENT", chan);
3486 			}
3487 		} else {
3488 			struct srej_list *l;
3489 
3490 			/* duplicated tx_seq */
3491 			if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3492 				goto drop;
3493 
3494 			list_for_each_entry(l, &chan->srej_l, list) {
3495 				if (l->tx_seq == tx_seq) {
3496 					l2cap_resend_srejframe(chan, tx_seq);
3497 					return 0;
3498 				}
3499 			}
3500 			l2cap_send_srejframe(chan, tx_seq);
3501 		}
3502 	} else {
3503 		expected_tx_seq_offset =
3504 			(chan->expected_tx_seq - chan->buffer_seq) % 64;
3505 		if (expected_tx_seq_offset < 0)
3506 			expected_tx_seq_offset += 64;
3507 
3508 		/* duplicated tx_seq */
3509 		if (tx_seq_offset < expected_tx_seq_offset)
3510 			goto drop;
3511 
3512 		set_bit(CONN_SREJ_SENT, &chan->conn_state);
3513 
3514 		BT_DBG("chan %p, Enter SREJ", chan);
3515 
3516 		INIT_LIST_HEAD(&chan->srej_l);
3517 		chan->buffer_seq_srej = chan->buffer_seq;
3518 
3519 		__skb_queue_head_init(&chan->srej_q);
3520 		l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3521 
3522 		set_bit(CONN_SEND_PBIT, &chan->conn_state);
3523 
3524 		l2cap_send_srejframe(chan, tx_seq);
3525 
3526 		__clear_ack_timer(chan);
3527 	}
3528 	return 0;
3529 
3530 expected:
3531 	chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3532 
3533 	if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3534 		bt_cb(skb)->tx_seq = tx_seq;
3535 		bt_cb(skb)->sar = sar;
3536 		__skb_queue_tail(&chan->srej_q, skb);
3537 		return 0;
3538 	}
3539 
3540 	err = l2cap_ertm_reassembly_sdu(chan, skb, rx_control);
3541 	chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3542 	if (err < 0) {
3543 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3544 		return err;
3545 	}
3546 
3547 	if (rx_control & L2CAP_CTRL_FINAL) {
3548 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3549 			l2cap_retransmit_frames(chan);
3550 	}
3551 
3552 	__set_ack_timer(chan);
3553 
3554 	chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3555 	if (chan->num_acked == num_to_ack - 1)
3556 		l2cap_send_ack(chan);
3557 
3558 	return 0;
3559 
3560 drop:
3561 	kfree_skb(skb);
3562 	return 0;
3563 }
3564 
3565 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3566 {
3567 	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3568 						rx_control);
3569 
3570 	chan->expected_ack_seq = __get_reqseq(rx_control);
3571 	l2cap_drop_acked_frames(chan);
3572 
3573 	if (rx_control & L2CAP_CTRL_POLL) {
3574 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
3575 		if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3576 			if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3577 					(chan->unacked_frames > 0))
3578 				__set_retrans_timer(chan);
3579 
3580 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3581 			l2cap_send_srejtail(chan);
3582 		} else {
3583 			l2cap_send_i_or_rr_or_rnr(chan);
3584 		}
3585 
3586 	} else if (rx_control & L2CAP_CTRL_FINAL) {
3587 		clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3588 
3589 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3590 			l2cap_retransmit_frames(chan);
3591 
3592 	} else {
3593 		if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3594 				(chan->unacked_frames > 0))
3595 			__set_retrans_timer(chan);
3596 
3597 		clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3598 		if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3599 			l2cap_send_ack(chan);
3600 		else
3601 			l2cap_ertm_send(chan);
3602 	}
3603 }
3604 
3605 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3606 {
3607 	u8 tx_seq = __get_reqseq(rx_control);
3608 
3609 	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3610 
3611 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3612 
3613 	chan->expected_ack_seq = tx_seq;
3614 	l2cap_drop_acked_frames(chan);
3615 
3616 	if (rx_control & L2CAP_CTRL_FINAL) {
3617 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3618 			l2cap_retransmit_frames(chan);
3619 	} else {
3620 		l2cap_retransmit_frames(chan);
3621 
3622 		if (test_bit(CONN_WAIT_F, &chan->conn_state))
3623 			set_bit(CONN_REJ_ACT, &chan->conn_state);
3624 	}
3625 }
3626 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3627 {
3628 	u8 tx_seq = __get_reqseq(rx_control);
3629 
3630 	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3631 
3632 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3633 
3634 	if (rx_control & L2CAP_CTRL_POLL) {
3635 		chan->expected_ack_seq = tx_seq;
3636 		l2cap_drop_acked_frames(chan);
3637 
3638 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
3639 		l2cap_retransmit_one_frame(chan, tx_seq);
3640 
3641 		l2cap_ertm_send(chan);
3642 
3643 		if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3644 			chan->srej_save_reqseq = tx_seq;
3645 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
3646 		}
3647 	} else if (rx_control & L2CAP_CTRL_FINAL) {
3648 		if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3649 				chan->srej_save_reqseq == tx_seq)
3650 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3651 		else
3652 			l2cap_retransmit_one_frame(chan, tx_seq);
3653 	} else {
3654 		l2cap_retransmit_one_frame(chan, tx_seq);
3655 		if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3656 			chan->srej_save_reqseq = tx_seq;
3657 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
3658 		}
3659 	}
3660 }
3661 
3662 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3663 {
3664 	u8 tx_seq = __get_reqseq(rx_control);
3665 
3666 	BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3667 
3668 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3669 	chan->expected_ack_seq = tx_seq;
3670 	l2cap_drop_acked_frames(chan);
3671 
3672 	if (rx_control & L2CAP_CTRL_POLL)
3673 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
3674 
3675 	if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3676 		__clear_retrans_timer(chan);
3677 		if (rx_control & L2CAP_CTRL_POLL)
3678 			l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3679 		return;
3680 	}
3681 
3682 	if (rx_control & L2CAP_CTRL_POLL)
3683 		l2cap_send_srejtail(chan);
3684 	else
3685 		l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3686 }
3687 
3688 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3689 {
3690 	BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3691 
3692 	if (L2CAP_CTRL_FINAL & rx_control &&
3693 			test_bit(CONN_WAIT_F, &chan->conn_state)) {
3694 		__clear_monitor_timer(chan);
3695 		if (chan->unacked_frames > 0)
3696 			__set_retrans_timer(chan);
3697 		clear_bit(CONN_WAIT_F, &chan->conn_state);
3698 	}
3699 
3700 	switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3701 	case L2CAP_SUPER_RCV_READY:
3702 		l2cap_data_channel_rrframe(chan, rx_control);
3703 		break;
3704 
3705 	case L2CAP_SUPER_REJECT:
3706 		l2cap_data_channel_rejframe(chan, rx_control);
3707 		break;
3708 
3709 	case L2CAP_SUPER_SELECT_REJECT:
3710 		l2cap_data_channel_srejframe(chan, rx_control);
3711 		break;
3712 
3713 	case L2CAP_SUPER_RCV_NOT_READY:
3714 		l2cap_data_channel_rnrframe(chan, rx_control);
3715 		break;
3716 	}
3717 
3718 	kfree_skb(skb);
3719 	return 0;
3720 }
3721 
3722 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3723 {
3724 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3725 	u16 control;
3726 	u8 req_seq;
3727 	int len, next_tx_seq_offset, req_seq_offset;
3728 
3729 	control = get_unaligned_le16(skb->data);
3730 	skb_pull(skb, 2);
3731 	len = skb->len;
3732 
3733 	/*
3734 	 * We can just drop the corrupted I-frame here.
3735 	 * Receiver will miss it and start proper recovery
3736 	 * procedures and ask retransmission.
3737 	 */
3738 	if (l2cap_check_fcs(chan, skb))
3739 		goto drop;
3740 
3741 	if (__is_sar_start(control) && __is_iframe(control))
3742 		len -= 2;
3743 
3744 	if (chan->fcs == L2CAP_FCS_CRC16)
3745 		len -= 2;
3746 
3747 	if (len > chan->mps) {
3748 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3749 		goto drop;
3750 	}
3751 
3752 	req_seq = __get_reqseq(control);
3753 	req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3754 	if (req_seq_offset < 0)
3755 		req_seq_offset += 64;
3756 
3757 	next_tx_seq_offset =
3758 		(chan->next_tx_seq - chan->expected_ack_seq) % 64;
3759 	if (next_tx_seq_offset < 0)
3760 		next_tx_seq_offset += 64;
3761 
3762 	/* check for invalid req-seq */
3763 	if (req_seq_offset > next_tx_seq_offset) {
3764 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3765 		goto drop;
3766 	}
3767 
3768 	if (__is_iframe(control)) {
3769 		if (len < 0) {
3770 			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3771 			goto drop;
3772 		}
3773 
3774 		l2cap_data_channel_iframe(chan, control, skb);
3775 	} else {
3776 		if (len != 0) {
3777 			BT_ERR("%d", len);
3778 			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3779 			goto drop;
3780 		}
3781 
3782 		l2cap_data_channel_sframe(chan, control, skb);
3783 	}
3784 
3785 	return 0;
3786 
3787 drop:
3788 	kfree_skb(skb);
3789 	return 0;
3790 }
3791 
3792 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3793 {
3794 	struct l2cap_chan *chan;
3795 	struct sock *sk = NULL;
3796 	u16 control;
3797 	u8 tx_seq;
3798 	int len;
3799 
3800 	chan = l2cap_get_chan_by_scid(conn, cid);
3801 	if (!chan) {
3802 		BT_DBG("unknown cid 0x%4.4x", cid);
3803 		goto drop;
3804 	}
3805 
3806 	sk = chan->sk;
3807 
3808 	BT_DBG("chan %p, len %d", chan, skb->len);
3809 
3810 	if (chan->state != BT_CONNECTED)
3811 		goto drop;
3812 
3813 	switch (chan->mode) {
3814 	case L2CAP_MODE_BASIC:
3815 		/* If socket recv buffers overflows we drop data here
3816 		 * which is *bad* because L2CAP has to be reliable.
3817 		 * But we don't have any other choice. L2CAP doesn't
3818 		 * provide flow control mechanism. */
3819 
3820 		if (chan->imtu < skb->len)
3821 			goto drop;
3822 
3823 		if (!chan->ops->recv(chan->data, skb))
3824 			goto done;
3825 		break;
3826 
3827 	case L2CAP_MODE_ERTM:
3828 		if (!sock_owned_by_user(sk)) {
3829 			l2cap_ertm_data_rcv(sk, skb);
3830 		} else {
3831 			if (sk_add_backlog(sk, skb))
3832 				goto drop;
3833 		}
3834 
3835 		goto done;
3836 
3837 	case L2CAP_MODE_STREAMING:
3838 		control = get_unaligned_le16(skb->data);
3839 		skb_pull(skb, 2);
3840 		len = skb->len;
3841 
3842 		if (l2cap_check_fcs(chan, skb))
3843 			goto drop;
3844 
3845 		if (__is_sar_start(control))
3846 			len -= 2;
3847 
3848 		if (chan->fcs == L2CAP_FCS_CRC16)
3849 			len -= 2;
3850 
3851 		if (len > chan->mps || len < 0 || __is_sframe(control))
3852 			goto drop;
3853 
3854 		tx_seq = __get_txseq(control);
3855 
3856 		if (chan->expected_tx_seq == tx_seq)
3857 			chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3858 		else
3859 			chan->expected_tx_seq = (tx_seq + 1) % 64;
3860 
3861 		l2cap_streaming_reassembly_sdu(chan, skb, control);
3862 
3863 		goto done;
3864 
3865 	default:
3866 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3867 		break;
3868 	}
3869 
3870 drop:
3871 	kfree_skb(skb);
3872 
3873 done:
3874 	if (sk)
3875 		bh_unlock_sock(sk);
3876 
3877 	return 0;
3878 }
3879 
3880 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3881 {
3882 	struct sock *sk = NULL;
3883 	struct l2cap_chan *chan;
3884 
3885 	chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3886 	if (!chan)
3887 		goto drop;
3888 
3889 	sk = chan->sk;
3890 
3891 	bh_lock_sock(sk);
3892 
3893 	BT_DBG("sk %p, len %d", sk, skb->len);
3894 
3895 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3896 		goto drop;
3897 
3898 	if (chan->imtu < skb->len)
3899 		goto drop;
3900 
3901 	if (!chan->ops->recv(chan->data, skb))
3902 		goto done;
3903 
3904 drop:
3905 	kfree_skb(skb);
3906 
3907 done:
3908 	if (sk)
3909 		bh_unlock_sock(sk);
3910 	return 0;
3911 }
3912 
3913 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3914 {
3915 	struct sock *sk = NULL;
3916 	struct l2cap_chan *chan;
3917 
3918 	chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3919 	if (!chan)
3920 		goto drop;
3921 
3922 	sk = chan->sk;
3923 
3924 	bh_lock_sock(sk);
3925 
3926 	BT_DBG("sk %p, len %d", sk, skb->len);
3927 
3928 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3929 		goto drop;
3930 
3931 	if (chan->imtu < skb->len)
3932 		goto drop;
3933 
3934 	if (!chan->ops->recv(chan->data, skb))
3935 		goto done;
3936 
3937 drop:
3938 	kfree_skb(skb);
3939 
3940 done:
3941 	if (sk)
3942 		bh_unlock_sock(sk);
3943 	return 0;
3944 }
3945 
3946 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3947 {
3948 	struct l2cap_hdr *lh = (void *) skb->data;
3949 	u16 cid, len;
3950 	__le16 psm;
3951 
3952 	skb_pull(skb, L2CAP_HDR_SIZE);
3953 	cid = __le16_to_cpu(lh->cid);
3954 	len = __le16_to_cpu(lh->len);
3955 
3956 	if (len != skb->len) {
3957 		kfree_skb(skb);
3958 		return;
3959 	}
3960 
3961 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
3962 
3963 	switch (cid) {
3964 	case L2CAP_CID_LE_SIGNALING:
3965 	case L2CAP_CID_SIGNALING:
3966 		l2cap_sig_channel(conn, skb);
3967 		break;
3968 
3969 	case L2CAP_CID_CONN_LESS:
3970 		psm = get_unaligned_le16(skb->data);
3971 		skb_pull(skb, 2);
3972 		l2cap_conless_channel(conn, psm, skb);
3973 		break;
3974 
3975 	case L2CAP_CID_LE_DATA:
3976 		l2cap_att_channel(conn, cid, skb);
3977 		break;
3978 
3979 	case L2CAP_CID_SMP:
3980 		if (smp_sig_channel(conn, skb))
3981 			l2cap_conn_del(conn->hcon, EACCES);
3982 		break;
3983 
3984 	default:
3985 		l2cap_data_channel(conn, cid, skb);
3986 		break;
3987 	}
3988 }
3989 
3990 /* ---- L2CAP interface with lower layer (HCI) ---- */
3991 
3992 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3993 {
3994 	int exact = 0, lm1 = 0, lm2 = 0;
3995 	struct l2cap_chan *c;
3996 
3997 	if (type != ACL_LINK)
3998 		return -EINVAL;
3999 
4000 	BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4001 
4002 	/* Find listening sockets and check their link_mode */
4003 	read_lock(&chan_list_lock);
4004 	list_for_each_entry(c, &chan_list, global_l) {
4005 		struct sock *sk = c->sk;
4006 
4007 		if (c->state != BT_LISTEN)
4008 			continue;
4009 
4010 		if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4011 			lm1 |= HCI_LM_ACCEPT;
4012 			if (c->role_switch)
4013 				lm1 |= HCI_LM_MASTER;
4014 			exact++;
4015 		} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4016 			lm2 |= HCI_LM_ACCEPT;
4017 			if (c->role_switch)
4018 				lm2 |= HCI_LM_MASTER;
4019 		}
4020 	}
4021 	read_unlock(&chan_list_lock);
4022 
4023 	return exact ? lm1 : lm2;
4024 }
4025 
4026 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4027 {
4028 	struct l2cap_conn *conn;
4029 
4030 	BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4031 
4032 	if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4033 		return -EINVAL;
4034 
4035 	if (!status) {
4036 		conn = l2cap_conn_add(hcon, status);
4037 		if (conn)
4038 			l2cap_conn_ready(conn);
4039 	} else
4040 		l2cap_conn_del(hcon, bt_to_errno(status));
4041 
4042 	return 0;
4043 }
4044 
4045 static int l2cap_disconn_ind(struct hci_conn *hcon)
4046 {
4047 	struct l2cap_conn *conn = hcon->l2cap_data;
4048 
4049 	BT_DBG("hcon %p", hcon);
4050 
4051 	if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4052 		return 0x13;
4053 
4054 	return conn->disc_reason;
4055 }
4056 
4057 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4058 {
4059 	BT_DBG("hcon %p reason %d", hcon, reason);
4060 
4061 	if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4062 		return -EINVAL;
4063 
4064 	l2cap_conn_del(hcon, bt_to_errno(reason));
4065 
4066 	return 0;
4067 }
4068 
4069 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4070 {
4071 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4072 		return;
4073 
4074 	if (encrypt == 0x00) {
4075 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
4076 			__clear_chan_timer(chan);
4077 			__set_chan_timer(chan, HZ * 5);
4078 		} else if (chan->sec_level == BT_SECURITY_HIGH)
4079 			l2cap_chan_close(chan, ECONNREFUSED);
4080 	} else {
4081 		if (chan->sec_level == BT_SECURITY_MEDIUM)
4082 			__clear_chan_timer(chan);
4083 	}
4084 }
4085 
4086 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4087 {
4088 	struct l2cap_conn *conn = hcon->l2cap_data;
4089 	struct l2cap_chan *chan;
4090 
4091 	if (!conn)
4092 		return 0;
4093 
4094 	BT_DBG("conn %p", conn);
4095 
4096 	read_lock(&conn->chan_lock);
4097 
4098 	list_for_each_entry(chan, &conn->chan_l, list) {
4099 		struct sock *sk = chan->sk;
4100 
4101 		bh_lock_sock(sk);
4102 
4103 		BT_DBG("chan->scid %d", chan->scid);
4104 
4105 		if (chan->scid == L2CAP_CID_LE_DATA) {
4106 			if (!status && encrypt) {
4107 				chan->sec_level = hcon->sec_level;
4108 				del_timer(&conn->security_timer);
4109 				l2cap_chan_ready(sk);
4110 				smp_distribute_keys(conn, 0);
4111 			}
4112 
4113 			bh_unlock_sock(sk);
4114 			continue;
4115 		}
4116 
4117 		if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4118 			bh_unlock_sock(sk);
4119 			continue;
4120 		}
4121 
4122 		if (!status && (chan->state == BT_CONNECTED ||
4123 						chan->state == BT_CONFIG)) {
4124 			l2cap_check_encryption(chan, encrypt);
4125 			bh_unlock_sock(sk);
4126 			continue;
4127 		}
4128 
4129 		if (chan->state == BT_CONNECT) {
4130 			if (!status) {
4131 				struct l2cap_conn_req req;
4132 				req.scid = cpu_to_le16(chan->scid);
4133 				req.psm  = chan->psm;
4134 
4135 				chan->ident = l2cap_get_ident(conn);
4136 				set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4137 
4138 				l2cap_send_cmd(conn, chan->ident,
4139 					L2CAP_CONN_REQ, sizeof(req), &req);
4140 			} else {
4141 				__clear_chan_timer(chan);
4142 				__set_chan_timer(chan, HZ / 10);
4143 			}
4144 		} else if (chan->state == BT_CONNECT2) {
4145 			struct l2cap_conn_rsp rsp;
4146 			__u16 res, stat;
4147 
4148 			if (!status) {
4149 				if (bt_sk(sk)->defer_setup) {
4150 					struct sock *parent = bt_sk(sk)->parent;
4151 					res = L2CAP_CR_PEND;
4152 					stat = L2CAP_CS_AUTHOR_PEND;
4153 					if (parent)
4154 						parent->sk_data_ready(parent, 0);
4155 				} else {
4156 					l2cap_state_change(chan, BT_CONFIG);
4157 					res = L2CAP_CR_SUCCESS;
4158 					stat = L2CAP_CS_NO_INFO;
4159 				}
4160 			} else {
4161 				l2cap_state_change(chan, BT_DISCONN);
4162 				__set_chan_timer(chan, HZ / 10);
4163 				res = L2CAP_CR_SEC_BLOCK;
4164 				stat = L2CAP_CS_NO_INFO;
4165 			}
4166 
4167 			rsp.scid   = cpu_to_le16(chan->dcid);
4168 			rsp.dcid   = cpu_to_le16(chan->scid);
4169 			rsp.result = cpu_to_le16(res);
4170 			rsp.status = cpu_to_le16(stat);
4171 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4172 							sizeof(rsp), &rsp);
4173 		}
4174 
4175 		bh_unlock_sock(sk);
4176 	}
4177 
4178 	read_unlock(&conn->chan_lock);
4179 
4180 	return 0;
4181 }
4182 
4183 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4184 {
4185 	struct l2cap_conn *conn = hcon->l2cap_data;
4186 
4187 	if (!conn)
4188 		conn = l2cap_conn_add(hcon, 0);
4189 
4190 	if (!conn)
4191 		goto drop;
4192 
4193 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4194 
4195 	if (!(flags & ACL_CONT)) {
4196 		struct l2cap_hdr *hdr;
4197 		struct l2cap_chan *chan;
4198 		u16 cid;
4199 		int len;
4200 
4201 		if (conn->rx_len) {
4202 			BT_ERR("Unexpected start frame (len %d)", skb->len);
4203 			kfree_skb(conn->rx_skb);
4204 			conn->rx_skb = NULL;
4205 			conn->rx_len = 0;
4206 			l2cap_conn_unreliable(conn, ECOMM);
4207 		}
4208 
4209 		/* Start fragment always begin with Basic L2CAP header */
4210 		if (skb->len < L2CAP_HDR_SIZE) {
4211 			BT_ERR("Frame is too short (len %d)", skb->len);
4212 			l2cap_conn_unreliable(conn, ECOMM);
4213 			goto drop;
4214 		}
4215 
4216 		hdr = (struct l2cap_hdr *) skb->data;
4217 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4218 		cid = __le16_to_cpu(hdr->cid);
4219 
4220 		if (len == skb->len) {
4221 			/* Complete frame received */
4222 			l2cap_recv_frame(conn, skb);
4223 			return 0;
4224 		}
4225 
4226 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4227 
4228 		if (skb->len > len) {
4229 			BT_ERR("Frame is too long (len %d, expected len %d)",
4230 				skb->len, len);
4231 			l2cap_conn_unreliable(conn, ECOMM);
4232 			goto drop;
4233 		}
4234 
4235 		chan = l2cap_get_chan_by_scid(conn, cid);
4236 
4237 		if (chan && chan->sk) {
4238 			struct sock *sk = chan->sk;
4239 
4240 			if (chan->imtu < len - L2CAP_HDR_SIZE) {
4241 				BT_ERR("Frame exceeding recv MTU (len %d, "
4242 							"MTU %d)", len,
4243 							chan->imtu);
4244 				bh_unlock_sock(sk);
4245 				l2cap_conn_unreliable(conn, ECOMM);
4246 				goto drop;
4247 			}
4248 			bh_unlock_sock(sk);
4249 		}
4250 
4251 		/* Allocate skb for the complete frame (with header) */
4252 		conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4253 		if (!conn->rx_skb)
4254 			goto drop;
4255 
4256 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4257 								skb->len);
4258 		conn->rx_len = len - skb->len;
4259 	} else {
4260 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4261 
4262 		if (!conn->rx_len) {
4263 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4264 			l2cap_conn_unreliable(conn, ECOMM);
4265 			goto drop;
4266 		}
4267 
4268 		if (skb->len > conn->rx_len) {
4269 			BT_ERR("Fragment is too long (len %d, expected %d)",
4270 					skb->len, conn->rx_len);
4271 			kfree_skb(conn->rx_skb);
4272 			conn->rx_skb = NULL;
4273 			conn->rx_len = 0;
4274 			l2cap_conn_unreliable(conn, ECOMM);
4275 			goto drop;
4276 		}
4277 
4278 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4279 								skb->len);
4280 		conn->rx_len -= skb->len;
4281 
4282 		if (!conn->rx_len) {
4283 			/* Complete frame received */
4284 			l2cap_recv_frame(conn, conn->rx_skb);
4285 			conn->rx_skb = NULL;
4286 		}
4287 	}
4288 
4289 drop:
4290 	kfree_skb(skb);
4291 	return 0;
4292 }
4293 
4294 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4295 {
4296 	struct l2cap_chan *c;
4297 
4298 	read_lock_bh(&chan_list_lock);
4299 
4300 	list_for_each_entry(c, &chan_list, global_l) {
4301 		struct sock *sk = c->sk;
4302 
4303 		seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4304 					batostr(&bt_sk(sk)->src),
4305 					batostr(&bt_sk(sk)->dst),
4306 					c->state, __le16_to_cpu(c->psm),
4307 					c->scid, c->dcid, c->imtu, c->omtu,
4308 					c->sec_level, c->mode);
4309 }
4310 
4311 	read_unlock_bh(&chan_list_lock);
4312 
4313 	return 0;
4314 }
4315 
4316 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4317 {
4318 	return single_open(file, l2cap_debugfs_show, inode->i_private);
4319 }
4320 
4321 static const struct file_operations l2cap_debugfs_fops = {
4322 	.open		= l2cap_debugfs_open,
4323 	.read		= seq_read,
4324 	.llseek		= seq_lseek,
4325 	.release	= single_release,
4326 };
4327 
4328 static struct dentry *l2cap_debugfs;
4329 
4330 static struct hci_proto l2cap_hci_proto = {
4331 	.name		= "L2CAP",
4332 	.id		= HCI_PROTO_L2CAP,
4333 	.connect_ind	= l2cap_connect_ind,
4334 	.connect_cfm	= l2cap_connect_cfm,
4335 	.disconn_ind	= l2cap_disconn_ind,
4336 	.disconn_cfm	= l2cap_disconn_cfm,
4337 	.security_cfm	= l2cap_security_cfm,
4338 	.recv_acldata	= l2cap_recv_acldata
4339 };
4340 
4341 int __init l2cap_init(void)
4342 {
4343 	int err;
4344 
4345 	err = l2cap_init_sockets();
4346 	if (err < 0)
4347 		return err;
4348 
4349 	err = hci_register_proto(&l2cap_hci_proto);
4350 	if (err < 0) {
4351 		BT_ERR("L2CAP protocol registration failed");
4352 		bt_sock_unregister(BTPROTO_L2CAP);
4353 		goto error;
4354 	}
4355 
4356 	if (bt_debugfs) {
4357 		l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4358 					bt_debugfs, NULL, &l2cap_debugfs_fops);
4359 		if (!l2cap_debugfs)
4360 			BT_ERR("Failed to create L2CAP debug file");
4361 	}
4362 
4363 	return 0;
4364 
4365 error:
4366 	l2cap_cleanup_sockets();
4367 	return err;
4368 }
4369 
4370 void l2cap_exit(void)
4371 {
4372 	debugfs_remove(l2cap_debugfs);
4373 
4374 	if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4375 		BT_ERR("L2CAP protocol unregistration failed");
4376 
4377 	l2cap_cleanup_sockets();
4378 }
4379 
4380 module_param(disable_ertm, bool, 0644);
4381 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4382