xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision 7e035230)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 
42 bool disable_ertm;
43 
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
46 
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
49 
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 				u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
53 								void *data);
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 				   struct l2cap_chan *chan, int err);
57 
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 		    struct sk_buff_head *skbs, u8 event);
60 
61 /* ---- L2CAP channels ---- */
62 
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
64 {
65 	struct l2cap_chan *c;
66 
67 	list_for_each_entry(c, &conn->chan_l, list) {
68 		if (c->dcid == cid)
69 			return c;
70 	}
71 	return NULL;
72 }
73 
74 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
75 {
76 	struct l2cap_chan *c;
77 
78 	list_for_each_entry(c, &conn->chan_l, list) {
79 		if (c->scid == cid)
80 			return c;
81 	}
82 	return NULL;
83 }
84 
85 /* Find channel with given SCID.
86  * Returns locked channel. */
87 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
88 {
89 	struct l2cap_chan *c;
90 
91 	mutex_lock(&conn->chan_lock);
92 	c = __l2cap_get_chan_by_scid(conn, cid);
93 	if (c)
94 		l2cap_chan_lock(c);
95 	mutex_unlock(&conn->chan_lock);
96 
97 	return c;
98 }
99 
100 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
101 {
102 	struct l2cap_chan *c;
103 
104 	list_for_each_entry(c, &conn->chan_l, list) {
105 		if (c->ident == ident)
106 			return c;
107 	}
108 	return NULL;
109 }
110 
111 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
112 {
113 	struct l2cap_chan *c;
114 
115 	list_for_each_entry(c, &chan_list, global_l) {
116 		if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
117 			return c;
118 	}
119 	return NULL;
120 }
121 
122 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
123 {
124 	int err;
125 
126 	write_lock(&chan_list_lock);
127 
128 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
129 		err = -EADDRINUSE;
130 		goto done;
131 	}
132 
133 	if (psm) {
134 		chan->psm = psm;
135 		chan->sport = psm;
136 		err = 0;
137 	} else {
138 		u16 p;
139 
140 		err = -EINVAL;
141 		for (p = 0x1001; p < 0x1100; p += 2)
142 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
143 				chan->psm   = cpu_to_le16(p);
144 				chan->sport = cpu_to_le16(p);
145 				err = 0;
146 				break;
147 			}
148 	}
149 
150 done:
151 	write_unlock(&chan_list_lock);
152 	return err;
153 }
154 
155 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
156 {
157 	write_lock(&chan_list_lock);
158 
159 	chan->scid = scid;
160 
161 	write_unlock(&chan_list_lock);
162 
163 	return 0;
164 }
165 
166 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
167 {
168 	u16 cid = L2CAP_CID_DYN_START;
169 
170 	for (; cid < L2CAP_CID_DYN_END; cid++) {
171 		if (!__l2cap_get_chan_by_scid(conn, cid))
172 			return cid;
173 	}
174 
175 	return 0;
176 }
177 
178 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
179 {
180 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
181 						state_to_string(state));
182 
183 	chan->state = state;
184 	chan->ops->state_change(chan, state);
185 }
186 
187 static void l2cap_state_change(struct l2cap_chan *chan, int state)
188 {
189 	struct sock *sk = chan->sk;
190 
191 	lock_sock(sk);
192 	__l2cap_state_change(chan, state);
193 	release_sock(sk);
194 }
195 
196 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
197 {
198 	struct sock *sk = chan->sk;
199 
200 	sk->sk_err = err;
201 }
202 
203 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
204 {
205 	struct sock *sk = chan->sk;
206 
207 	lock_sock(sk);
208 	__l2cap_chan_set_err(chan, err);
209 	release_sock(sk);
210 }
211 
212 static void __set_retrans_timer(struct l2cap_chan *chan)
213 {
214 	if (!delayed_work_pending(&chan->monitor_timer) &&
215 	    chan->retrans_timeout) {
216 		l2cap_set_timer(chan, &chan->retrans_timer,
217 				msecs_to_jiffies(chan->retrans_timeout));
218 	}
219 }
220 
221 static void __set_monitor_timer(struct l2cap_chan *chan)
222 {
223 	__clear_retrans_timer(chan);
224 	if (chan->monitor_timeout) {
225 		l2cap_set_timer(chan, &chan->monitor_timer,
226 				msecs_to_jiffies(chan->monitor_timeout));
227 	}
228 }
229 
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
231 					       u16 seq)
232 {
233 	struct sk_buff *skb;
234 
235 	skb_queue_walk(head, skb) {
236 		if (bt_cb(skb)->control.txseq == seq)
237 			return skb;
238 	}
239 
240 	return NULL;
241 }
242 
243 /* ---- L2CAP sequence number lists ---- */
244 
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246  * SREJ requests that are received and for frames that are to be
247  * retransmitted. These seq_list functions implement a singly-linked
248  * list in an array, where membership in the list can also be checked
249  * in constant time. Items can also be added to the tail of the list
250  * and removed from the head in constant time, without further memory
251  * allocs or frees.
252  */
253 
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
255 {
256 	size_t alloc_size, i;
257 
258 	/* Allocated size is a power of 2 to map sequence numbers
259 	 * (which may be up to 14 bits) in to a smaller array that is
260 	 * sized for the negotiated ERTM transmit windows.
261 	 */
262 	alloc_size = roundup_pow_of_two(size);
263 
264 	seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
265 	if (!seq_list->list)
266 		return -ENOMEM;
267 
268 	seq_list->mask = alloc_size - 1;
269 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 	for (i = 0; i < alloc_size; i++)
272 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
273 
274 	return 0;
275 }
276 
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
278 {
279 	kfree(seq_list->list);
280 }
281 
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
283 					   u16 seq)
284 {
285 	/* Constant-time check for list membership */
286 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
287 }
288 
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
290 {
291 	u16 mask = seq_list->mask;
292 
293 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 		/* In case someone tries to pop the head of an empty list */
295 		return L2CAP_SEQ_LIST_CLEAR;
296 	} else if (seq_list->head == seq) {
297 		/* Head can be removed in constant time */
298 		seq_list->head = seq_list->list[seq & mask];
299 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
300 
301 		if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 			seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 			seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
304 		}
305 	} else {
306 		/* Walk the list to find the sequence number */
307 		u16 prev = seq_list->head;
308 		while (seq_list->list[prev & mask] != seq) {
309 			prev = seq_list->list[prev & mask];
310 			if (prev == L2CAP_SEQ_LIST_TAIL)
311 				return L2CAP_SEQ_LIST_CLEAR;
312 		}
313 
314 		/* Unlink the number from the list and clear it */
315 		seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 		if (seq_list->tail == seq)
318 			seq_list->tail = prev;
319 	}
320 	return seq;
321 }
322 
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
324 {
325 	/* Remove the head in constant time */
326 	return l2cap_seq_list_remove(seq_list, seq_list->head);
327 }
328 
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
330 {
331 	u16 i;
332 
333 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
334 		return;
335 
336 	for (i = 0; i <= seq_list->mask; i++)
337 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
338 
339 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 }
342 
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
344 {
345 	u16 mask = seq_list->mask;
346 
347 	/* All appends happen in constant time */
348 
349 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
350 		return;
351 
352 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 		seq_list->head = seq;
354 	else
355 		seq_list->list[seq_list->tail & mask] = seq;
356 
357 	seq_list->tail = seq;
358 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
359 }
360 
361 static void l2cap_chan_timeout(struct work_struct *work)
362 {
363 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
364 							chan_timer.work);
365 	struct l2cap_conn *conn = chan->conn;
366 	int reason;
367 
368 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
369 
370 	mutex_lock(&conn->chan_lock);
371 	l2cap_chan_lock(chan);
372 
373 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 		reason = ECONNREFUSED;
375 	else if (chan->state == BT_CONNECT &&
376 					chan->sec_level != BT_SECURITY_SDP)
377 		reason = ECONNREFUSED;
378 	else
379 		reason = ETIMEDOUT;
380 
381 	l2cap_chan_close(chan, reason);
382 
383 	l2cap_chan_unlock(chan);
384 
385 	chan->ops->close(chan);
386 	mutex_unlock(&conn->chan_lock);
387 
388 	l2cap_chan_put(chan);
389 }
390 
391 struct l2cap_chan *l2cap_chan_create(void)
392 {
393 	struct l2cap_chan *chan;
394 
395 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
396 	if (!chan)
397 		return NULL;
398 
399 	mutex_init(&chan->lock);
400 
401 	write_lock(&chan_list_lock);
402 	list_add(&chan->global_l, &chan_list);
403 	write_unlock(&chan_list_lock);
404 
405 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
406 
407 	chan->state = BT_OPEN;
408 
409 	kref_init(&chan->kref);
410 
411 	/* This flag is cleared in l2cap_chan_ready() */
412 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
413 
414 	BT_DBG("chan %p", chan);
415 
416 	return chan;
417 }
418 
419 static void l2cap_chan_destroy(struct kref *kref)
420 {
421 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
422 
423 	BT_DBG("chan %p", chan);
424 
425 	write_lock(&chan_list_lock);
426 	list_del(&chan->global_l);
427 	write_unlock(&chan_list_lock);
428 
429 	kfree(chan);
430 }
431 
432 void l2cap_chan_hold(struct l2cap_chan *c)
433 {
434 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
435 
436 	kref_get(&c->kref);
437 }
438 
439 void l2cap_chan_put(struct l2cap_chan *c)
440 {
441 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
442 
443 	kref_put(&c->kref, l2cap_chan_destroy);
444 }
445 
446 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
447 {
448 	chan->fcs  = L2CAP_FCS_CRC16;
449 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
450 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
451 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
452 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
453 	chan->sec_level = BT_SECURITY_LOW;
454 
455 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
456 }
457 
458 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
459 {
460 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
461 	       __le16_to_cpu(chan->psm), chan->dcid);
462 
463 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
464 
465 	chan->conn = conn;
466 
467 	switch (chan->chan_type) {
468 	case L2CAP_CHAN_CONN_ORIENTED:
469 		if (conn->hcon->type == LE_LINK) {
470 			/* LE connection */
471 			chan->omtu = L2CAP_DEFAULT_MTU;
472 			chan->scid = L2CAP_CID_LE_DATA;
473 			chan->dcid = L2CAP_CID_LE_DATA;
474 		} else {
475 			/* Alloc CID for connection-oriented socket */
476 			chan->scid = l2cap_alloc_cid(conn);
477 			chan->omtu = L2CAP_DEFAULT_MTU;
478 		}
479 		break;
480 
481 	case L2CAP_CHAN_CONN_LESS:
482 		/* Connectionless socket */
483 		chan->scid = L2CAP_CID_CONN_LESS;
484 		chan->dcid = L2CAP_CID_CONN_LESS;
485 		chan->omtu = L2CAP_DEFAULT_MTU;
486 		break;
487 
488 	case L2CAP_CHAN_CONN_FIX_A2MP:
489 		chan->scid = L2CAP_CID_A2MP;
490 		chan->dcid = L2CAP_CID_A2MP;
491 		chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
492 		chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
493 		break;
494 
495 	default:
496 		/* Raw socket can send/recv signalling messages only */
497 		chan->scid = L2CAP_CID_SIGNALING;
498 		chan->dcid = L2CAP_CID_SIGNALING;
499 		chan->omtu = L2CAP_DEFAULT_MTU;
500 	}
501 
502 	chan->local_id		= L2CAP_BESTEFFORT_ID;
503 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
504 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
505 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
506 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
507 	chan->local_flush_to	= L2CAP_DEFAULT_FLUSH_TO;
508 
509 	l2cap_chan_hold(chan);
510 
511 	list_add(&chan->list, &conn->chan_l);
512 }
513 
514 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
515 {
516 	mutex_lock(&conn->chan_lock);
517 	__l2cap_chan_add(conn, chan);
518 	mutex_unlock(&conn->chan_lock);
519 }
520 
521 void l2cap_chan_del(struct l2cap_chan *chan, int err)
522 {
523 	struct l2cap_conn *conn = chan->conn;
524 
525 	__clear_chan_timer(chan);
526 
527 	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
528 
529 	if (conn) {
530 		/* Delete from channel list */
531 		list_del(&chan->list);
532 
533 		l2cap_chan_put(chan);
534 
535 		chan->conn = NULL;
536 
537 		if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
538 			hci_conn_put(conn->hcon);
539 	}
540 
541 	if (chan->ops->teardown)
542 		chan->ops->teardown(chan, err);
543 
544 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
545 		return;
546 
547 	switch(chan->mode) {
548 	case L2CAP_MODE_BASIC:
549 		break;
550 
551 	case L2CAP_MODE_ERTM:
552 		__clear_retrans_timer(chan);
553 		__clear_monitor_timer(chan);
554 		__clear_ack_timer(chan);
555 
556 		skb_queue_purge(&chan->srej_q);
557 
558 		l2cap_seq_list_free(&chan->srej_list);
559 		l2cap_seq_list_free(&chan->retrans_list);
560 
561 		/* fall through */
562 
563 	case L2CAP_MODE_STREAMING:
564 		skb_queue_purge(&chan->tx_q);
565 		break;
566 	}
567 
568 	return;
569 }
570 
571 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
572 {
573 	struct l2cap_conn *conn = chan->conn;
574 	struct sock *sk = chan->sk;
575 
576 	BT_DBG("chan %p state %s sk %p", chan,
577 					state_to_string(chan->state), sk);
578 
579 	switch (chan->state) {
580 	case BT_LISTEN:
581 		if (chan->ops->teardown)
582 			chan->ops->teardown(chan, 0);
583 		break;
584 
585 	case BT_CONNECTED:
586 	case BT_CONFIG:
587 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
588 					conn->hcon->type == ACL_LINK) {
589 			__set_chan_timer(chan, sk->sk_sndtimeo);
590 			l2cap_send_disconn_req(conn, chan, reason);
591 		} else
592 			l2cap_chan_del(chan, reason);
593 		break;
594 
595 	case BT_CONNECT2:
596 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
597 					conn->hcon->type == ACL_LINK) {
598 			struct l2cap_conn_rsp rsp;
599 			__u16 result;
600 
601 			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
602 				result = L2CAP_CR_SEC_BLOCK;
603 			else
604 				result = L2CAP_CR_BAD_PSM;
605 			l2cap_state_change(chan, BT_DISCONN);
606 
607 			rsp.scid   = cpu_to_le16(chan->dcid);
608 			rsp.dcid   = cpu_to_le16(chan->scid);
609 			rsp.result = cpu_to_le16(result);
610 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
611 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
612 							sizeof(rsp), &rsp);
613 		}
614 
615 		l2cap_chan_del(chan, reason);
616 		break;
617 
618 	case BT_CONNECT:
619 	case BT_DISCONN:
620 		l2cap_chan_del(chan, reason);
621 		break;
622 
623 	default:
624 		if (chan->ops->teardown)
625 			chan->ops->teardown(chan, 0);
626 		break;
627 	}
628 }
629 
630 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
631 {
632 	if (chan->chan_type == L2CAP_CHAN_RAW) {
633 		switch (chan->sec_level) {
634 		case BT_SECURITY_HIGH:
635 			return HCI_AT_DEDICATED_BONDING_MITM;
636 		case BT_SECURITY_MEDIUM:
637 			return HCI_AT_DEDICATED_BONDING;
638 		default:
639 			return HCI_AT_NO_BONDING;
640 		}
641 	} else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
642 		if (chan->sec_level == BT_SECURITY_LOW)
643 			chan->sec_level = BT_SECURITY_SDP;
644 
645 		if (chan->sec_level == BT_SECURITY_HIGH)
646 			return HCI_AT_NO_BONDING_MITM;
647 		else
648 			return HCI_AT_NO_BONDING;
649 	} else {
650 		switch (chan->sec_level) {
651 		case BT_SECURITY_HIGH:
652 			return HCI_AT_GENERAL_BONDING_MITM;
653 		case BT_SECURITY_MEDIUM:
654 			return HCI_AT_GENERAL_BONDING;
655 		default:
656 			return HCI_AT_NO_BONDING;
657 		}
658 	}
659 }
660 
661 /* Service level security */
662 int l2cap_chan_check_security(struct l2cap_chan *chan)
663 {
664 	struct l2cap_conn *conn = chan->conn;
665 	__u8 auth_type;
666 
667 	auth_type = l2cap_get_auth_type(chan);
668 
669 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
670 }
671 
672 static u8 l2cap_get_ident(struct l2cap_conn *conn)
673 {
674 	u8 id;
675 
676 	/* Get next available identificator.
677 	 *    1 - 128 are used by kernel.
678 	 *  129 - 199 are reserved.
679 	 *  200 - 254 are used by utilities like l2ping, etc.
680 	 */
681 
682 	spin_lock(&conn->lock);
683 
684 	if (++conn->tx_ident > 128)
685 		conn->tx_ident = 1;
686 
687 	id = conn->tx_ident;
688 
689 	spin_unlock(&conn->lock);
690 
691 	return id;
692 }
693 
694 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
695 {
696 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
697 	u8 flags;
698 
699 	BT_DBG("code 0x%2.2x", code);
700 
701 	if (!skb)
702 		return;
703 
704 	if (lmp_no_flush_capable(conn->hcon->hdev))
705 		flags = ACL_START_NO_FLUSH;
706 	else
707 		flags = ACL_START;
708 
709 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
710 	skb->priority = HCI_PRIO_MAX;
711 
712 	hci_send_acl(conn->hchan, skb, flags);
713 }
714 
715 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
716 {
717 	struct hci_conn *hcon = chan->conn->hcon;
718 	u16 flags;
719 
720 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
721 							skb->priority);
722 
723 	if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
724 					lmp_no_flush_capable(hcon->hdev))
725 		flags = ACL_START_NO_FLUSH;
726 	else
727 		flags = ACL_START;
728 
729 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
730 	hci_send_acl(chan->conn->hchan, skb, flags);
731 }
732 
733 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
734 {
735 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
736 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
737 
738 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
739 		/* S-Frame */
740 		control->sframe = 1;
741 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
742 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
743 
744 		control->sar = 0;
745 		control->txseq = 0;
746 	} else {
747 		/* I-Frame */
748 		control->sframe = 0;
749 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
750 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
751 
752 		control->poll = 0;
753 		control->super = 0;
754 	}
755 }
756 
757 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
758 {
759 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
760 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
761 
762 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
763 		/* S-Frame */
764 		control->sframe = 1;
765 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
766 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
767 
768 		control->sar = 0;
769 		control->txseq = 0;
770 	} else {
771 		/* I-Frame */
772 		control->sframe = 0;
773 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
774 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
775 
776 		control->poll = 0;
777 		control->super = 0;
778 	}
779 }
780 
781 static inline void __unpack_control(struct l2cap_chan *chan,
782 				    struct sk_buff *skb)
783 {
784 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
785 		__unpack_extended_control(get_unaligned_le32(skb->data),
786 					  &bt_cb(skb)->control);
787 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
788 	} else {
789 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
790 					  &bt_cb(skb)->control);
791 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
792 	}
793 }
794 
795 static u32 __pack_extended_control(struct l2cap_ctrl *control)
796 {
797 	u32 packed;
798 
799 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
800 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
801 
802 	if (control->sframe) {
803 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
804 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
805 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
806 	} else {
807 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
808 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
809 	}
810 
811 	return packed;
812 }
813 
814 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
815 {
816 	u16 packed;
817 
818 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
819 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
820 
821 	if (control->sframe) {
822 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
823 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
824 		packed |= L2CAP_CTRL_FRAME_TYPE;
825 	} else {
826 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
827 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
828 	}
829 
830 	return packed;
831 }
832 
833 static inline void __pack_control(struct l2cap_chan *chan,
834 				  struct l2cap_ctrl *control,
835 				  struct sk_buff *skb)
836 {
837 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
838 		put_unaligned_le32(__pack_extended_control(control),
839 				   skb->data + L2CAP_HDR_SIZE);
840 	} else {
841 		put_unaligned_le16(__pack_enhanced_control(control),
842 				   skb->data + L2CAP_HDR_SIZE);
843 	}
844 }
845 
846 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
847 {
848 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
849 		return L2CAP_EXT_HDR_SIZE;
850 	else
851 		return L2CAP_ENH_HDR_SIZE;
852 }
853 
854 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
855 					       u32 control)
856 {
857 	struct sk_buff *skb;
858 	struct l2cap_hdr *lh;
859 	int hlen = __ertm_hdr_size(chan);
860 
861 	if (chan->fcs == L2CAP_FCS_CRC16)
862 		hlen += L2CAP_FCS_SIZE;
863 
864 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
865 
866 	if (!skb)
867 		return ERR_PTR(-ENOMEM);
868 
869 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
870 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
871 	lh->cid = cpu_to_le16(chan->dcid);
872 
873 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
874 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
875 	else
876 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
877 
878 	if (chan->fcs == L2CAP_FCS_CRC16) {
879 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
880 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
881 	}
882 
883 	skb->priority = HCI_PRIO_MAX;
884 	return skb;
885 }
886 
887 static void l2cap_send_sframe(struct l2cap_chan *chan,
888 			      struct l2cap_ctrl *control)
889 {
890 	struct sk_buff *skb;
891 	u32 control_field;
892 
893 	BT_DBG("chan %p, control %p", chan, control);
894 
895 	if (!control->sframe)
896 		return;
897 
898 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
899 	    !control->poll)
900 		control->final = 1;
901 
902 	if (control->super == L2CAP_SUPER_RR)
903 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
904 	else if (control->super == L2CAP_SUPER_RNR)
905 		set_bit(CONN_RNR_SENT, &chan->conn_state);
906 
907 	if (control->super != L2CAP_SUPER_SREJ) {
908 		chan->last_acked_seq = control->reqseq;
909 		__clear_ack_timer(chan);
910 	}
911 
912 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
913 	       control->final, control->poll, control->super);
914 
915 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
916 		control_field = __pack_extended_control(control);
917 	else
918 		control_field = __pack_enhanced_control(control);
919 
920 	skb = l2cap_create_sframe_pdu(chan, control_field);
921 	if (!IS_ERR(skb))
922 		l2cap_do_send(chan, skb);
923 }
924 
925 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
926 {
927 	struct l2cap_ctrl control;
928 
929 	BT_DBG("chan %p, poll %d", chan, poll);
930 
931 	memset(&control, 0, sizeof(control));
932 	control.sframe = 1;
933 	control.poll = poll;
934 
935 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
936 		control.super = L2CAP_SUPER_RNR;
937 	else
938 		control.super = L2CAP_SUPER_RR;
939 
940 	control.reqseq = chan->buffer_seq;
941 	l2cap_send_sframe(chan, &control);
942 }
943 
944 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
945 {
946 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
947 }
948 
949 static void l2cap_send_conn_req(struct l2cap_chan *chan)
950 {
951 	struct l2cap_conn *conn = chan->conn;
952 	struct l2cap_conn_req req;
953 
954 	req.scid = cpu_to_le16(chan->scid);
955 	req.psm  = chan->psm;
956 
957 	chan->ident = l2cap_get_ident(conn);
958 
959 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
960 
961 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
962 }
963 
964 static void l2cap_chan_ready(struct l2cap_chan *chan)
965 {
966 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
967 	chan->conf_state = 0;
968 	__clear_chan_timer(chan);
969 
970 	chan->state = BT_CONNECTED;
971 
972 	chan->ops->ready(chan);
973 }
974 
975 static void l2cap_do_start(struct l2cap_chan *chan)
976 {
977 	struct l2cap_conn *conn = chan->conn;
978 
979 	if (conn->hcon->type == LE_LINK) {
980 		l2cap_chan_ready(chan);
981 		return;
982 	}
983 
984 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
985 		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
986 			return;
987 
988 		if (l2cap_chan_check_security(chan) &&
989 				__l2cap_no_conn_pending(chan))
990 			l2cap_send_conn_req(chan);
991 	} else {
992 		struct l2cap_info_req req;
993 		req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
994 
995 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
996 		conn->info_ident = l2cap_get_ident(conn);
997 
998 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
999 
1000 		l2cap_send_cmd(conn, conn->info_ident,
1001 					L2CAP_INFO_REQ, sizeof(req), &req);
1002 	}
1003 }
1004 
1005 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1006 {
1007 	u32 local_feat_mask = l2cap_feat_mask;
1008 	if (!disable_ertm)
1009 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1010 
1011 	switch (mode) {
1012 	case L2CAP_MODE_ERTM:
1013 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1014 	case L2CAP_MODE_STREAMING:
1015 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1016 	default:
1017 		return 0x00;
1018 	}
1019 }
1020 
1021 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1022 {
1023 	struct sock *sk = chan->sk;
1024 	struct l2cap_disconn_req req;
1025 
1026 	if (!conn)
1027 		return;
1028 
1029 	if (chan->mode == L2CAP_MODE_ERTM) {
1030 		__clear_retrans_timer(chan);
1031 		__clear_monitor_timer(chan);
1032 		__clear_ack_timer(chan);
1033 	}
1034 
1035 	if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1036 		__l2cap_state_change(chan, BT_DISCONN);
1037 		return;
1038 	}
1039 
1040 	req.dcid = cpu_to_le16(chan->dcid);
1041 	req.scid = cpu_to_le16(chan->scid);
1042 	l2cap_send_cmd(conn, l2cap_get_ident(conn),
1043 			L2CAP_DISCONN_REQ, sizeof(req), &req);
1044 
1045 	lock_sock(sk);
1046 	__l2cap_state_change(chan, BT_DISCONN);
1047 	__l2cap_chan_set_err(chan, err);
1048 	release_sock(sk);
1049 }
1050 
1051 /* ---- L2CAP connections ---- */
1052 static void l2cap_conn_start(struct l2cap_conn *conn)
1053 {
1054 	struct l2cap_chan *chan, *tmp;
1055 
1056 	BT_DBG("conn %p", conn);
1057 
1058 	mutex_lock(&conn->chan_lock);
1059 
1060 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1061 		struct sock *sk = chan->sk;
1062 
1063 		l2cap_chan_lock(chan);
1064 
1065 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1066 			l2cap_chan_unlock(chan);
1067 			continue;
1068 		}
1069 
1070 		if (chan->state == BT_CONNECT) {
1071 			if (!l2cap_chan_check_security(chan) ||
1072 					!__l2cap_no_conn_pending(chan)) {
1073 				l2cap_chan_unlock(chan);
1074 				continue;
1075 			}
1076 
1077 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1078 					&& test_bit(CONF_STATE2_DEVICE,
1079 					&chan->conf_state)) {
1080 				l2cap_chan_close(chan, ECONNRESET);
1081 				l2cap_chan_unlock(chan);
1082 				continue;
1083 			}
1084 
1085 			l2cap_send_conn_req(chan);
1086 
1087 		} else if (chan->state == BT_CONNECT2) {
1088 			struct l2cap_conn_rsp rsp;
1089 			char buf[128];
1090 			rsp.scid = cpu_to_le16(chan->dcid);
1091 			rsp.dcid = cpu_to_le16(chan->scid);
1092 
1093 			if (l2cap_chan_check_security(chan)) {
1094 				lock_sock(sk);
1095 				if (test_bit(BT_SK_DEFER_SETUP,
1096 					     &bt_sk(sk)->flags)) {
1097 					struct sock *parent = bt_sk(sk)->parent;
1098 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1099 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1100 					if (parent)
1101 						parent->sk_data_ready(parent, 0);
1102 
1103 				} else {
1104 					__l2cap_state_change(chan, BT_CONFIG);
1105 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1106 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1107 				}
1108 				release_sock(sk);
1109 			} else {
1110 				rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1111 				rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1112 			}
1113 
1114 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1115 							sizeof(rsp), &rsp);
1116 
1117 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1118 					rsp.result != L2CAP_CR_SUCCESS) {
1119 				l2cap_chan_unlock(chan);
1120 				continue;
1121 			}
1122 
1123 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1124 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1125 						l2cap_build_conf_req(chan, buf), buf);
1126 			chan->num_conf_req++;
1127 		}
1128 
1129 		l2cap_chan_unlock(chan);
1130 	}
1131 
1132 	mutex_unlock(&conn->chan_lock);
1133 }
1134 
1135 /* Find socket with cid and source/destination bdaddr.
1136  * Returns closest match, locked.
1137  */
1138 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1139 						    bdaddr_t *src,
1140 						    bdaddr_t *dst)
1141 {
1142 	struct l2cap_chan *c, *c1 = NULL;
1143 
1144 	read_lock(&chan_list_lock);
1145 
1146 	list_for_each_entry(c, &chan_list, global_l) {
1147 		struct sock *sk = c->sk;
1148 
1149 		if (state && c->state != state)
1150 			continue;
1151 
1152 		if (c->scid == cid) {
1153 			int src_match, dst_match;
1154 			int src_any, dst_any;
1155 
1156 			/* Exact match. */
1157 			src_match = !bacmp(&bt_sk(sk)->src, src);
1158 			dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1159 			if (src_match && dst_match) {
1160 				read_unlock(&chan_list_lock);
1161 				return c;
1162 			}
1163 
1164 			/* Closest match */
1165 			src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1166 			dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1167 			if ((src_match && dst_any) || (src_any && dst_match) ||
1168 			    (src_any && dst_any))
1169 				c1 = c;
1170 		}
1171 	}
1172 
1173 	read_unlock(&chan_list_lock);
1174 
1175 	return c1;
1176 }
1177 
1178 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1179 {
1180 	struct sock *parent, *sk;
1181 	struct l2cap_chan *chan, *pchan;
1182 
1183 	BT_DBG("");
1184 
1185 	/* Check if we have socket listening on cid */
1186 	pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1187 					  conn->src, conn->dst);
1188 	if (!pchan)
1189 		return;
1190 
1191 	parent = pchan->sk;
1192 
1193 	lock_sock(parent);
1194 
1195 	chan = pchan->ops->new_connection(pchan);
1196 	if (!chan)
1197 		goto clean;
1198 
1199 	sk = chan->sk;
1200 
1201 	hci_conn_hold(conn->hcon);
1202 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1203 
1204 	bacpy(&bt_sk(sk)->src, conn->src);
1205 	bacpy(&bt_sk(sk)->dst, conn->dst);
1206 
1207 	bt_accept_enqueue(parent, sk);
1208 
1209 	l2cap_chan_add(conn, chan);
1210 
1211 	l2cap_chan_ready(chan);
1212 
1213 clean:
1214 	release_sock(parent);
1215 }
1216 
1217 static void l2cap_conn_ready(struct l2cap_conn *conn)
1218 {
1219 	struct l2cap_chan *chan;
1220 	struct hci_conn *hcon = conn->hcon;
1221 
1222 	BT_DBG("conn %p", conn);
1223 
1224 	if (!hcon->out && hcon->type == LE_LINK)
1225 		l2cap_le_conn_ready(conn);
1226 
1227 	if (hcon->out && hcon->type == LE_LINK)
1228 		smp_conn_security(hcon, hcon->pending_sec_level);
1229 
1230 	mutex_lock(&conn->chan_lock);
1231 
1232 	list_for_each_entry(chan, &conn->chan_l, list) {
1233 
1234 		l2cap_chan_lock(chan);
1235 
1236 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1237 			l2cap_chan_unlock(chan);
1238 			continue;
1239 		}
1240 
1241 		if (hcon->type == LE_LINK) {
1242 			if (smp_conn_security(hcon, chan->sec_level))
1243 				l2cap_chan_ready(chan);
1244 
1245 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1246 			struct sock *sk = chan->sk;
1247 			__clear_chan_timer(chan);
1248 			lock_sock(sk);
1249 			__l2cap_state_change(chan, BT_CONNECTED);
1250 			sk->sk_state_change(sk);
1251 			release_sock(sk);
1252 
1253 		} else if (chan->state == BT_CONNECT)
1254 			l2cap_do_start(chan);
1255 
1256 		l2cap_chan_unlock(chan);
1257 	}
1258 
1259 	mutex_unlock(&conn->chan_lock);
1260 }
1261 
1262 /* Notify sockets that we cannot guaranty reliability anymore */
1263 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1264 {
1265 	struct l2cap_chan *chan;
1266 
1267 	BT_DBG("conn %p", conn);
1268 
1269 	mutex_lock(&conn->chan_lock);
1270 
1271 	list_for_each_entry(chan, &conn->chan_l, list) {
1272 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1273 			__l2cap_chan_set_err(chan, err);
1274 	}
1275 
1276 	mutex_unlock(&conn->chan_lock);
1277 }
1278 
1279 static void l2cap_info_timeout(struct work_struct *work)
1280 {
1281 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1282 							info_timer.work);
1283 
1284 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1285 	conn->info_ident = 0;
1286 
1287 	l2cap_conn_start(conn);
1288 }
1289 
1290 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1291 {
1292 	struct l2cap_conn *conn = hcon->l2cap_data;
1293 	struct l2cap_chan *chan, *l;
1294 
1295 	if (!conn)
1296 		return;
1297 
1298 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1299 
1300 	kfree_skb(conn->rx_skb);
1301 
1302 	mutex_lock(&conn->chan_lock);
1303 
1304 	/* Kill channels */
1305 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1306 		l2cap_chan_hold(chan);
1307 		l2cap_chan_lock(chan);
1308 
1309 		l2cap_chan_del(chan, err);
1310 
1311 		l2cap_chan_unlock(chan);
1312 
1313 		chan->ops->close(chan);
1314 		l2cap_chan_put(chan);
1315 	}
1316 
1317 	mutex_unlock(&conn->chan_lock);
1318 
1319 	hci_chan_del(conn->hchan);
1320 
1321 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1322 		cancel_delayed_work_sync(&conn->info_timer);
1323 
1324 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1325 		cancel_delayed_work_sync(&conn->security_timer);
1326 		smp_chan_destroy(conn);
1327 	}
1328 
1329 	hcon->l2cap_data = NULL;
1330 	kfree(conn);
1331 }
1332 
1333 static void security_timeout(struct work_struct *work)
1334 {
1335 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1336 						security_timer.work);
1337 
1338 	BT_DBG("conn %p", conn);
1339 
1340 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1341 		smp_chan_destroy(conn);
1342 		l2cap_conn_del(conn->hcon, ETIMEDOUT);
1343 	}
1344 }
1345 
1346 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1347 {
1348 	struct l2cap_conn *conn = hcon->l2cap_data;
1349 	struct hci_chan *hchan;
1350 
1351 	if (conn || status)
1352 		return conn;
1353 
1354 	hchan = hci_chan_create(hcon);
1355 	if (!hchan)
1356 		return NULL;
1357 
1358 	conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1359 	if (!conn) {
1360 		hci_chan_del(hchan);
1361 		return NULL;
1362 	}
1363 
1364 	hcon->l2cap_data = conn;
1365 	conn->hcon = hcon;
1366 	conn->hchan = hchan;
1367 
1368 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1369 
1370 	if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1371 		conn->mtu = hcon->hdev->le_mtu;
1372 	else
1373 		conn->mtu = hcon->hdev->acl_mtu;
1374 
1375 	conn->src = &hcon->hdev->bdaddr;
1376 	conn->dst = &hcon->dst;
1377 
1378 	conn->feat_mask = 0;
1379 
1380 	spin_lock_init(&conn->lock);
1381 	mutex_init(&conn->chan_lock);
1382 
1383 	INIT_LIST_HEAD(&conn->chan_l);
1384 
1385 	if (hcon->type == LE_LINK)
1386 		INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1387 	else
1388 		INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1389 
1390 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1391 
1392 	return conn;
1393 }
1394 
1395 /* ---- Socket interface ---- */
1396 
1397 /* Find socket with psm and source / destination bdaddr.
1398  * Returns closest match.
1399  */
1400 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1401 						   bdaddr_t *src,
1402 						   bdaddr_t *dst)
1403 {
1404 	struct l2cap_chan *c, *c1 = NULL;
1405 
1406 	read_lock(&chan_list_lock);
1407 
1408 	list_for_each_entry(c, &chan_list, global_l) {
1409 		struct sock *sk = c->sk;
1410 
1411 		if (state && c->state != state)
1412 			continue;
1413 
1414 		if (c->psm == psm) {
1415 			int src_match, dst_match;
1416 			int src_any, dst_any;
1417 
1418 			/* Exact match. */
1419 			src_match = !bacmp(&bt_sk(sk)->src, src);
1420 			dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1421 			if (src_match && dst_match) {
1422 				read_unlock(&chan_list_lock);
1423 				return c;
1424 			}
1425 
1426 			/* Closest match */
1427 			src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1428 			dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1429 			if ((src_match && dst_any) || (src_any && dst_match) ||
1430 			    (src_any && dst_any))
1431 				c1 = c;
1432 		}
1433 	}
1434 
1435 	read_unlock(&chan_list_lock);
1436 
1437 	return c1;
1438 }
1439 
1440 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1441 		       bdaddr_t *dst, u8 dst_type)
1442 {
1443 	struct sock *sk = chan->sk;
1444 	bdaddr_t *src = &bt_sk(sk)->src;
1445 	struct l2cap_conn *conn;
1446 	struct hci_conn *hcon;
1447 	struct hci_dev *hdev;
1448 	__u8 auth_type;
1449 	int err;
1450 
1451 	BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1452 	       dst_type, __le16_to_cpu(psm));
1453 
1454 	hdev = hci_get_route(dst, src);
1455 	if (!hdev)
1456 		return -EHOSTUNREACH;
1457 
1458 	hci_dev_lock(hdev);
1459 
1460 	l2cap_chan_lock(chan);
1461 
1462 	/* PSM must be odd and lsb of upper byte must be 0 */
1463 	if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1464 					chan->chan_type != L2CAP_CHAN_RAW) {
1465 		err = -EINVAL;
1466 		goto done;
1467 	}
1468 
1469 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1470 		err = -EINVAL;
1471 		goto done;
1472 	}
1473 
1474 	switch (chan->mode) {
1475 	case L2CAP_MODE_BASIC:
1476 		break;
1477 	case L2CAP_MODE_ERTM:
1478 	case L2CAP_MODE_STREAMING:
1479 		if (!disable_ertm)
1480 			break;
1481 		/* fall through */
1482 	default:
1483 		err = -ENOTSUPP;
1484 		goto done;
1485 	}
1486 
1487 	switch (chan->state) {
1488 	case BT_CONNECT:
1489 	case BT_CONNECT2:
1490 	case BT_CONFIG:
1491 		/* Already connecting */
1492 		err = 0;
1493 		goto done;
1494 
1495 	case BT_CONNECTED:
1496 		/* Already connected */
1497 		err = -EISCONN;
1498 		goto done;
1499 
1500 	case BT_OPEN:
1501 	case BT_BOUND:
1502 		/* Can connect */
1503 		break;
1504 
1505 	default:
1506 		err = -EBADFD;
1507 		goto done;
1508 	}
1509 
1510 	/* Set destination address and psm */
1511 	lock_sock(sk);
1512 	bacpy(&bt_sk(sk)->dst, dst);
1513 	release_sock(sk);
1514 
1515 	chan->psm = psm;
1516 	chan->dcid = cid;
1517 
1518 	auth_type = l2cap_get_auth_type(chan);
1519 
1520 	if (chan->dcid == L2CAP_CID_LE_DATA)
1521 		hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1522 				   chan->sec_level, auth_type);
1523 	else
1524 		hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1525 				   chan->sec_level, auth_type);
1526 
1527 	if (IS_ERR(hcon)) {
1528 		err = PTR_ERR(hcon);
1529 		goto done;
1530 	}
1531 
1532 	conn = l2cap_conn_add(hcon, 0);
1533 	if (!conn) {
1534 		hci_conn_put(hcon);
1535 		err = -ENOMEM;
1536 		goto done;
1537 	}
1538 
1539 	if (hcon->type == LE_LINK) {
1540 		err = 0;
1541 
1542 		if (!list_empty(&conn->chan_l)) {
1543 			err = -EBUSY;
1544 			hci_conn_put(hcon);
1545 		}
1546 
1547 		if (err)
1548 			goto done;
1549 	}
1550 
1551 	/* Update source addr of the socket */
1552 	bacpy(src, conn->src);
1553 
1554 	l2cap_chan_unlock(chan);
1555 	l2cap_chan_add(conn, chan);
1556 	l2cap_chan_lock(chan);
1557 
1558 	l2cap_state_change(chan, BT_CONNECT);
1559 	__set_chan_timer(chan, sk->sk_sndtimeo);
1560 
1561 	if (hcon->state == BT_CONNECTED) {
1562 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1563 			__clear_chan_timer(chan);
1564 			if (l2cap_chan_check_security(chan))
1565 				l2cap_state_change(chan, BT_CONNECTED);
1566 		} else
1567 			l2cap_do_start(chan);
1568 	}
1569 
1570 	err = 0;
1571 
1572 done:
1573 	l2cap_chan_unlock(chan);
1574 	hci_dev_unlock(hdev);
1575 	hci_dev_put(hdev);
1576 	return err;
1577 }
1578 
1579 int __l2cap_wait_ack(struct sock *sk)
1580 {
1581 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1582 	DECLARE_WAITQUEUE(wait, current);
1583 	int err = 0;
1584 	int timeo = HZ/5;
1585 
1586 	add_wait_queue(sk_sleep(sk), &wait);
1587 	set_current_state(TASK_INTERRUPTIBLE);
1588 	while (chan->unacked_frames > 0 && chan->conn) {
1589 		if (!timeo)
1590 			timeo = HZ/5;
1591 
1592 		if (signal_pending(current)) {
1593 			err = sock_intr_errno(timeo);
1594 			break;
1595 		}
1596 
1597 		release_sock(sk);
1598 		timeo = schedule_timeout(timeo);
1599 		lock_sock(sk);
1600 		set_current_state(TASK_INTERRUPTIBLE);
1601 
1602 		err = sock_error(sk);
1603 		if (err)
1604 			break;
1605 	}
1606 	set_current_state(TASK_RUNNING);
1607 	remove_wait_queue(sk_sleep(sk), &wait);
1608 	return err;
1609 }
1610 
1611 static void l2cap_monitor_timeout(struct work_struct *work)
1612 {
1613 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1614 					       monitor_timer.work);
1615 
1616 	BT_DBG("chan %p", chan);
1617 
1618 	l2cap_chan_lock(chan);
1619 
1620 	if (!chan->conn) {
1621 		l2cap_chan_unlock(chan);
1622 		l2cap_chan_put(chan);
1623 		return;
1624 	}
1625 
1626 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1627 
1628 	l2cap_chan_unlock(chan);
1629 	l2cap_chan_put(chan);
1630 }
1631 
1632 static void l2cap_retrans_timeout(struct work_struct *work)
1633 {
1634 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1635 					       retrans_timer.work);
1636 
1637 	BT_DBG("chan %p", chan);
1638 
1639 	l2cap_chan_lock(chan);
1640 
1641 	if (!chan->conn) {
1642 		l2cap_chan_unlock(chan);
1643 		l2cap_chan_put(chan);
1644 		return;
1645 	}
1646 
1647 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1648 	l2cap_chan_unlock(chan);
1649 	l2cap_chan_put(chan);
1650 }
1651 
1652 static void l2cap_streaming_send(struct l2cap_chan *chan,
1653 				 struct sk_buff_head *skbs)
1654 {
1655 	struct sk_buff *skb;
1656 	struct l2cap_ctrl *control;
1657 
1658 	BT_DBG("chan %p, skbs %p", chan, skbs);
1659 
1660 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1661 
1662 	while (!skb_queue_empty(&chan->tx_q)) {
1663 
1664 		skb = skb_dequeue(&chan->tx_q);
1665 
1666 		bt_cb(skb)->control.retries = 1;
1667 		control = &bt_cb(skb)->control;
1668 
1669 		control->reqseq = 0;
1670 		control->txseq = chan->next_tx_seq;
1671 
1672 		__pack_control(chan, control, skb);
1673 
1674 		if (chan->fcs == L2CAP_FCS_CRC16) {
1675 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1676 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1677 		}
1678 
1679 		l2cap_do_send(chan, skb);
1680 
1681 		BT_DBG("Sent txseq %u", control->txseq);
1682 
1683 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1684 		chan->frames_sent++;
1685 	}
1686 }
1687 
1688 static int l2cap_ertm_send(struct l2cap_chan *chan)
1689 {
1690 	struct sk_buff *skb, *tx_skb;
1691 	struct l2cap_ctrl *control;
1692 	int sent = 0;
1693 
1694 	BT_DBG("chan %p", chan);
1695 
1696 	if (chan->state != BT_CONNECTED)
1697 		return -ENOTCONN;
1698 
1699 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1700 		return 0;
1701 
1702 	while (chan->tx_send_head &&
1703 	       chan->unacked_frames < chan->remote_tx_win &&
1704 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1705 
1706 		skb = chan->tx_send_head;
1707 
1708 		bt_cb(skb)->control.retries = 1;
1709 		control = &bt_cb(skb)->control;
1710 
1711 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1712 			control->final = 1;
1713 
1714 		control->reqseq = chan->buffer_seq;
1715 		chan->last_acked_seq = chan->buffer_seq;
1716 		control->txseq = chan->next_tx_seq;
1717 
1718 		__pack_control(chan, control, skb);
1719 
1720 		if (chan->fcs == L2CAP_FCS_CRC16) {
1721 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1722 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1723 		}
1724 
1725 		/* Clone after data has been modified. Data is assumed to be
1726 		   read-only (for locking purposes) on cloned sk_buffs.
1727 		 */
1728 		tx_skb = skb_clone(skb, GFP_KERNEL);
1729 
1730 		if (!tx_skb)
1731 			break;
1732 
1733 		__set_retrans_timer(chan);
1734 
1735 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1736 		chan->unacked_frames++;
1737 		chan->frames_sent++;
1738 		sent++;
1739 
1740 		if (skb_queue_is_last(&chan->tx_q, skb))
1741 			chan->tx_send_head = NULL;
1742 		else
1743 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1744 
1745 		l2cap_do_send(chan, tx_skb);
1746 		BT_DBG("Sent txseq %u", control->txseq);
1747 	}
1748 
1749 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1750 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
1751 
1752 	return sent;
1753 }
1754 
1755 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1756 {
1757 	struct l2cap_ctrl control;
1758 	struct sk_buff *skb;
1759 	struct sk_buff *tx_skb;
1760 	u16 seq;
1761 
1762 	BT_DBG("chan %p", chan);
1763 
1764 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1765 		return;
1766 
1767 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1768 		seq = l2cap_seq_list_pop(&chan->retrans_list);
1769 
1770 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1771 		if (!skb) {
1772 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
1773 				seq);
1774 			continue;
1775 		}
1776 
1777 		bt_cb(skb)->control.retries++;
1778 		control = bt_cb(skb)->control;
1779 
1780 		if (chan->max_tx != 0 &&
1781 		    bt_cb(skb)->control.retries > chan->max_tx) {
1782 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1783 			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1784 			l2cap_seq_list_clear(&chan->retrans_list);
1785 			break;
1786 		}
1787 
1788 		control.reqseq = chan->buffer_seq;
1789 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1790 			control.final = 1;
1791 		else
1792 			control.final = 0;
1793 
1794 		if (skb_cloned(skb)) {
1795 			/* Cloned sk_buffs are read-only, so we need a
1796 			 * writeable copy
1797 			 */
1798 			tx_skb = skb_copy(skb, GFP_ATOMIC);
1799 		} else {
1800 			tx_skb = skb_clone(skb, GFP_ATOMIC);
1801 		}
1802 
1803 		if (!tx_skb) {
1804 			l2cap_seq_list_clear(&chan->retrans_list);
1805 			break;
1806 		}
1807 
1808 		/* Update skb contents */
1809 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1810 			put_unaligned_le32(__pack_extended_control(&control),
1811 					   tx_skb->data + L2CAP_HDR_SIZE);
1812 		} else {
1813 			put_unaligned_le16(__pack_enhanced_control(&control),
1814 					   tx_skb->data + L2CAP_HDR_SIZE);
1815 		}
1816 
1817 		if (chan->fcs == L2CAP_FCS_CRC16) {
1818 			u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1819 			put_unaligned_le16(fcs, skb_put(tx_skb,
1820 							L2CAP_FCS_SIZE));
1821 		}
1822 
1823 		l2cap_do_send(chan, tx_skb);
1824 
1825 		BT_DBG("Resent txseq %d", control.txseq);
1826 
1827 		chan->last_acked_seq = chan->buffer_seq;
1828 	}
1829 }
1830 
1831 static void l2cap_retransmit(struct l2cap_chan *chan,
1832 			     struct l2cap_ctrl *control)
1833 {
1834 	BT_DBG("chan %p, control %p", chan, control);
1835 
1836 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1837 	l2cap_ertm_resend(chan);
1838 }
1839 
1840 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1841 				 struct l2cap_ctrl *control)
1842 {
1843 	struct sk_buff *skb;
1844 
1845 	BT_DBG("chan %p, control %p", chan, control);
1846 
1847 	if (control->poll)
1848 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
1849 
1850 	l2cap_seq_list_clear(&chan->retrans_list);
1851 
1852 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1853 		return;
1854 
1855 	if (chan->unacked_frames) {
1856 		skb_queue_walk(&chan->tx_q, skb) {
1857 			if (bt_cb(skb)->control.txseq == control->reqseq ||
1858 				skb == chan->tx_send_head)
1859 				break;
1860 		}
1861 
1862 		skb_queue_walk_from(&chan->tx_q, skb) {
1863 			if (skb == chan->tx_send_head)
1864 				break;
1865 
1866 			l2cap_seq_list_append(&chan->retrans_list,
1867 					      bt_cb(skb)->control.txseq);
1868 		}
1869 
1870 		l2cap_ertm_resend(chan);
1871 	}
1872 }
1873 
1874 static void l2cap_send_ack(struct l2cap_chan *chan)
1875 {
1876 	struct l2cap_ctrl control;
1877 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1878 					 chan->last_acked_seq);
1879 	int threshold;
1880 
1881 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1882 	       chan, chan->last_acked_seq, chan->buffer_seq);
1883 
1884 	memset(&control, 0, sizeof(control));
1885 	control.sframe = 1;
1886 
1887 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1888 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
1889 		__clear_ack_timer(chan);
1890 		control.super = L2CAP_SUPER_RNR;
1891 		control.reqseq = chan->buffer_seq;
1892 		l2cap_send_sframe(chan, &control);
1893 	} else {
1894 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1895 			l2cap_ertm_send(chan);
1896 			/* If any i-frames were sent, they included an ack */
1897 			if (chan->buffer_seq == chan->last_acked_seq)
1898 				frames_to_ack = 0;
1899 		}
1900 
1901 		/* Ack now if the window is 3/4ths full.
1902 		 * Calculate without mul or div
1903 		 */
1904 		threshold = chan->ack_win;
1905 		threshold += threshold << 1;
1906 		threshold >>= 2;
1907 
1908 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1909 		       threshold);
1910 
1911 		if (frames_to_ack >= threshold) {
1912 			__clear_ack_timer(chan);
1913 			control.super = L2CAP_SUPER_RR;
1914 			control.reqseq = chan->buffer_seq;
1915 			l2cap_send_sframe(chan, &control);
1916 			frames_to_ack = 0;
1917 		}
1918 
1919 		if (frames_to_ack)
1920 			__set_ack_timer(chan);
1921 	}
1922 }
1923 
1924 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1925 					 struct msghdr *msg, int len,
1926 					 int count, struct sk_buff *skb)
1927 {
1928 	struct l2cap_conn *conn = chan->conn;
1929 	struct sk_buff **frag;
1930 	int sent = 0;
1931 
1932 	if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1933 		return -EFAULT;
1934 
1935 	sent += count;
1936 	len  -= count;
1937 
1938 	/* Continuation fragments (no L2CAP header) */
1939 	frag = &skb_shinfo(skb)->frag_list;
1940 	while (len) {
1941 		struct sk_buff *tmp;
1942 
1943 		count = min_t(unsigned int, conn->mtu, len);
1944 
1945 		tmp = chan->ops->alloc_skb(chan, count,
1946 					   msg->msg_flags & MSG_DONTWAIT);
1947 		if (IS_ERR(tmp))
1948 			return PTR_ERR(tmp);
1949 
1950 		*frag = tmp;
1951 
1952 		if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1953 			return -EFAULT;
1954 
1955 		(*frag)->priority = skb->priority;
1956 
1957 		sent += count;
1958 		len  -= count;
1959 
1960 		skb->len += (*frag)->len;
1961 		skb->data_len += (*frag)->len;
1962 
1963 		frag = &(*frag)->next;
1964 	}
1965 
1966 	return sent;
1967 }
1968 
1969 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1970 						 struct msghdr *msg, size_t len,
1971 						 u32 priority)
1972 {
1973 	struct l2cap_conn *conn = chan->conn;
1974 	struct sk_buff *skb;
1975 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1976 	struct l2cap_hdr *lh;
1977 
1978 	BT_DBG("chan %p len %zu priority %u", chan, len, priority);
1979 
1980 	count = min_t(unsigned int, (conn->mtu - hlen), len);
1981 
1982 	skb = chan->ops->alloc_skb(chan, count + hlen,
1983 				   msg->msg_flags & MSG_DONTWAIT);
1984 	if (IS_ERR(skb))
1985 		return skb;
1986 
1987 	skb->priority = priority;
1988 
1989 	/* Create L2CAP header */
1990 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1991 	lh->cid = cpu_to_le16(chan->dcid);
1992 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1993 	put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1994 
1995 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1996 	if (unlikely(err < 0)) {
1997 		kfree_skb(skb);
1998 		return ERR_PTR(err);
1999 	}
2000 	return skb;
2001 }
2002 
2003 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2004 					      struct msghdr *msg, size_t len,
2005 					      u32 priority)
2006 {
2007 	struct l2cap_conn *conn = chan->conn;
2008 	struct sk_buff *skb;
2009 	int err, count;
2010 	struct l2cap_hdr *lh;
2011 
2012 	BT_DBG("chan %p len %zu", chan, len);
2013 
2014 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2015 
2016 	skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2017 				   msg->msg_flags & MSG_DONTWAIT);
2018 	if (IS_ERR(skb))
2019 		return skb;
2020 
2021 	skb->priority = priority;
2022 
2023 	/* Create L2CAP header */
2024 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2025 	lh->cid = cpu_to_le16(chan->dcid);
2026 	lh->len = cpu_to_le16(len);
2027 
2028 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2029 	if (unlikely(err < 0)) {
2030 		kfree_skb(skb);
2031 		return ERR_PTR(err);
2032 	}
2033 	return skb;
2034 }
2035 
2036 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2037 					       struct msghdr *msg, size_t len,
2038 					       u16 sdulen)
2039 {
2040 	struct l2cap_conn *conn = chan->conn;
2041 	struct sk_buff *skb;
2042 	int err, count, hlen;
2043 	struct l2cap_hdr *lh;
2044 
2045 	BT_DBG("chan %p len %zu", chan, len);
2046 
2047 	if (!conn)
2048 		return ERR_PTR(-ENOTCONN);
2049 
2050 	hlen = __ertm_hdr_size(chan);
2051 
2052 	if (sdulen)
2053 		hlen += L2CAP_SDULEN_SIZE;
2054 
2055 	if (chan->fcs == L2CAP_FCS_CRC16)
2056 		hlen += L2CAP_FCS_SIZE;
2057 
2058 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2059 
2060 	skb = chan->ops->alloc_skb(chan, count + hlen,
2061 				   msg->msg_flags & MSG_DONTWAIT);
2062 	if (IS_ERR(skb))
2063 		return skb;
2064 
2065 	/* Create L2CAP header */
2066 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2067 	lh->cid = cpu_to_le16(chan->dcid);
2068 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2069 
2070 	/* Control header is populated later */
2071 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2072 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2073 	else
2074 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2075 
2076 	if (sdulen)
2077 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2078 
2079 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2080 	if (unlikely(err < 0)) {
2081 		kfree_skb(skb);
2082 		return ERR_PTR(err);
2083 	}
2084 
2085 	bt_cb(skb)->control.fcs = chan->fcs;
2086 	bt_cb(skb)->control.retries = 0;
2087 	return skb;
2088 }
2089 
2090 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2091 			     struct sk_buff_head *seg_queue,
2092 			     struct msghdr *msg, size_t len)
2093 {
2094 	struct sk_buff *skb;
2095 	u16 sdu_len;
2096 	size_t pdu_len;
2097 	u8 sar;
2098 
2099 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2100 
2101 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2102 	 * so fragmented skbs are not used.  The HCI layer's handling
2103 	 * of fragmented skbs is not compatible with ERTM's queueing.
2104 	 */
2105 
2106 	/* PDU size is derived from the HCI MTU */
2107 	pdu_len = chan->conn->mtu;
2108 
2109 	pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2110 
2111 	/* Adjust for largest possible L2CAP overhead. */
2112 	if (chan->fcs)
2113 		pdu_len -= L2CAP_FCS_SIZE;
2114 
2115 	pdu_len -= __ertm_hdr_size(chan);
2116 
2117 	/* Remote device may have requested smaller PDUs */
2118 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2119 
2120 	if (len <= pdu_len) {
2121 		sar = L2CAP_SAR_UNSEGMENTED;
2122 		sdu_len = 0;
2123 		pdu_len = len;
2124 	} else {
2125 		sar = L2CAP_SAR_START;
2126 		sdu_len = len;
2127 		pdu_len -= L2CAP_SDULEN_SIZE;
2128 	}
2129 
2130 	while (len > 0) {
2131 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2132 
2133 		if (IS_ERR(skb)) {
2134 			__skb_queue_purge(seg_queue);
2135 			return PTR_ERR(skb);
2136 		}
2137 
2138 		bt_cb(skb)->control.sar = sar;
2139 		__skb_queue_tail(seg_queue, skb);
2140 
2141 		len -= pdu_len;
2142 		if (sdu_len) {
2143 			sdu_len = 0;
2144 			pdu_len += L2CAP_SDULEN_SIZE;
2145 		}
2146 
2147 		if (len <= pdu_len) {
2148 			sar = L2CAP_SAR_END;
2149 			pdu_len = len;
2150 		} else {
2151 			sar = L2CAP_SAR_CONTINUE;
2152 		}
2153 	}
2154 
2155 	return 0;
2156 }
2157 
2158 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2159 								u32 priority)
2160 {
2161 	struct sk_buff *skb;
2162 	int err;
2163 	struct sk_buff_head seg_queue;
2164 
2165 	/* Connectionless channel */
2166 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2167 		skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2168 		if (IS_ERR(skb))
2169 			return PTR_ERR(skb);
2170 
2171 		l2cap_do_send(chan, skb);
2172 		return len;
2173 	}
2174 
2175 	switch (chan->mode) {
2176 	case L2CAP_MODE_BASIC:
2177 		/* Check outgoing MTU */
2178 		if (len > chan->omtu)
2179 			return -EMSGSIZE;
2180 
2181 		/* Create a basic PDU */
2182 		skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2183 		if (IS_ERR(skb))
2184 			return PTR_ERR(skb);
2185 
2186 		l2cap_do_send(chan, skb);
2187 		err = len;
2188 		break;
2189 
2190 	case L2CAP_MODE_ERTM:
2191 	case L2CAP_MODE_STREAMING:
2192 		/* Check outgoing MTU */
2193 		if (len > chan->omtu) {
2194 			err = -EMSGSIZE;
2195 			break;
2196 		}
2197 
2198 		__skb_queue_head_init(&seg_queue);
2199 
2200 		/* Do segmentation before calling in to the state machine,
2201 		 * since it's possible to block while waiting for memory
2202 		 * allocation.
2203 		 */
2204 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2205 
2206 		/* The channel could have been closed while segmenting,
2207 		 * check that it is still connected.
2208 		 */
2209 		if (chan->state != BT_CONNECTED) {
2210 			__skb_queue_purge(&seg_queue);
2211 			err = -ENOTCONN;
2212 		}
2213 
2214 		if (err)
2215 			break;
2216 
2217 		if (chan->mode == L2CAP_MODE_ERTM)
2218 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2219 		else
2220 			l2cap_streaming_send(chan, &seg_queue);
2221 
2222 		err = len;
2223 
2224 		/* If the skbs were not queued for sending, they'll still be in
2225 		 * seg_queue and need to be purged.
2226 		 */
2227 		__skb_queue_purge(&seg_queue);
2228 		break;
2229 
2230 	default:
2231 		BT_DBG("bad state %1.1x", chan->mode);
2232 		err = -EBADFD;
2233 	}
2234 
2235 	return err;
2236 }
2237 
2238 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2239 {
2240 	struct l2cap_ctrl control;
2241 	u16 seq;
2242 
2243 	BT_DBG("chan %p, txseq %u", chan, txseq);
2244 
2245 	memset(&control, 0, sizeof(control));
2246 	control.sframe = 1;
2247 	control.super = L2CAP_SUPER_SREJ;
2248 
2249 	for (seq = chan->expected_tx_seq; seq != txseq;
2250 	     seq = __next_seq(chan, seq)) {
2251 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2252 			control.reqseq = seq;
2253 			l2cap_send_sframe(chan, &control);
2254 			l2cap_seq_list_append(&chan->srej_list, seq);
2255 		}
2256 	}
2257 
2258 	chan->expected_tx_seq = __next_seq(chan, txseq);
2259 }
2260 
2261 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2262 {
2263 	struct l2cap_ctrl control;
2264 
2265 	BT_DBG("chan %p", chan);
2266 
2267 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2268 		return;
2269 
2270 	memset(&control, 0, sizeof(control));
2271 	control.sframe = 1;
2272 	control.super = L2CAP_SUPER_SREJ;
2273 	control.reqseq = chan->srej_list.tail;
2274 	l2cap_send_sframe(chan, &control);
2275 }
2276 
2277 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2278 {
2279 	struct l2cap_ctrl control;
2280 	u16 initial_head;
2281 	u16 seq;
2282 
2283 	BT_DBG("chan %p, txseq %u", chan, txseq);
2284 
2285 	memset(&control, 0, sizeof(control));
2286 	control.sframe = 1;
2287 	control.super = L2CAP_SUPER_SREJ;
2288 
2289 	/* Capture initial list head to allow only one pass through the list. */
2290 	initial_head = chan->srej_list.head;
2291 
2292 	do {
2293 		seq = l2cap_seq_list_pop(&chan->srej_list);
2294 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2295 			break;
2296 
2297 		control.reqseq = seq;
2298 		l2cap_send_sframe(chan, &control);
2299 		l2cap_seq_list_append(&chan->srej_list, seq);
2300 	} while (chan->srej_list.head != initial_head);
2301 }
2302 
2303 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2304 {
2305 	struct sk_buff *acked_skb;
2306 	u16 ackseq;
2307 
2308 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2309 
2310 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2311 		return;
2312 
2313 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2314 	       chan->expected_ack_seq, chan->unacked_frames);
2315 
2316 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2317 	     ackseq = __next_seq(chan, ackseq)) {
2318 
2319 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2320 		if (acked_skb) {
2321 			skb_unlink(acked_skb, &chan->tx_q);
2322 			kfree_skb(acked_skb);
2323 			chan->unacked_frames--;
2324 		}
2325 	}
2326 
2327 	chan->expected_ack_seq = reqseq;
2328 
2329 	if (chan->unacked_frames == 0)
2330 		__clear_retrans_timer(chan);
2331 
2332 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2333 }
2334 
2335 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2336 {
2337 	BT_DBG("chan %p", chan);
2338 
2339 	chan->expected_tx_seq = chan->buffer_seq;
2340 	l2cap_seq_list_clear(&chan->srej_list);
2341 	skb_queue_purge(&chan->srej_q);
2342 	chan->rx_state = L2CAP_RX_STATE_RECV;
2343 }
2344 
2345 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2346 				struct l2cap_ctrl *control,
2347 				struct sk_buff_head *skbs, u8 event)
2348 {
2349 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2350 	       event);
2351 
2352 	switch (event) {
2353 	case L2CAP_EV_DATA_REQUEST:
2354 		if (chan->tx_send_head == NULL)
2355 			chan->tx_send_head = skb_peek(skbs);
2356 
2357 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2358 		l2cap_ertm_send(chan);
2359 		break;
2360 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2361 		BT_DBG("Enter LOCAL_BUSY");
2362 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2363 
2364 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2365 			/* The SREJ_SENT state must be aborted if we are to
2366 			 * enter the LOCAL_BUSY state.
2367 			 */
2368 			l2cap_abort_rx_srej_sent(chan);
2369 		}
2370 
2371 		l2cap_send_ack(chan);
2372 
2373 		break;
2374 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2375 		BT_DBG("Exit LOCAL_BUSY");
2376 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2377 
2378 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2379 			struct l2cap_ctrl local_control;
2380 
2381 			memset(&local_control, 0, sizeof(local_control));
2382 			local_control.sframe = 1;
2383 			local_control.super = L2CAP_SUPER_RR;
2384 			local_control.poll = 1;
2385 			local_control.reqseq = chan->buffer_seq;
2386 			l2cap_send_sframe(chan, &local_control);
2387 
2388 			chan->retry_count = 1;
2389 			__set_monitor_timer(chan);
2390 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2391 		}
2392 		break;
2393 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2394 		l2cap_process_reqseq(chan, control->reqseq);
2395 		break;
2396 	case L2CAP_EV_EXPLICIT_POLL:
2397 		l2cap_send_rr_or_rnr(chan, 1);
2398 		chan->retry_count = 1;
2399 		__set_monitor_timer(chan);
2400 		__clear_ack_timer(chan);
2401 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2402 		break;
2403 	case L2CAP_EV_RETRANS_TO:
2404 		l2cap_send_rr_or_rnr(chan, 1);
2405 		chan->retry_count = 1;
2406 		__set_monitor_timer(chan);
2407 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2408 		break;
2409 	case L2CAP_EV_RECV_FBIT:
2410 		/* Nothing to process */
2411 		break;
2412 	default:
2413 		break;
2414 	}
2415 }
2416 
2417 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2418 				  struct l2cap_ctrl *control,
2419 				  struct sk_buff_head *skbs, u8 event)
2420 {
2421 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2422 	       event);
2423 
2424 	switch (event) {
2425 	case L2CAP_EV_DATA_REQUEST:
2426 		if (chan->tx_send_head == NULL)
2427 			chan->tx_send_head = skb_peek(skbs);
2428 		/* Queue data, but don't send. */
2429 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2430 		break;
2431 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2432 		BT_DBG("Enter LOCAL_BUSY");
2433 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2434 
2435 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2436 			/* The SREJ_SENT state must be aborted if we are to
2437 			 * enter the LOCAL_BUSY state.
2438 			 */
2439 			l2cap_abort_rx_srej_sent(chan);
2440 		}
2441 
2442 		l2cap_send_ack(chan);
2443 
2444 		break;
2445 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2446 		BT_DBG("Exit LOCAL_BUSY");
2447 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2448 
2449 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2450 			struct l2cap_ctrl local_control;
2451 			memset(&local_control, 0, sizeof(local_control));
2452 			local_control.sframe = 1;
2453 			local_control.super = L2CAP_SUPER_RR;
2454 			local_control.poll = 1;
2455 			local_control.reqseq = chan->buffer_seq;
2456 			l2cap_send_sframe(chan, &local_control);
2457 
2458 			chan->retry_count = 1;
2459 			__set_monitor_timer(chan);
2460 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2461 		}
2462 		break;
2463 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2464 		l2cap_process_reqseq(chan, control->reqseq);
2465 
2466 		/* Fall through */
2467 
2468 	case L2CAP_EV_RECV_FBIT:
2469 		if (control && control->final) {
2470 			__clear_monitor_timer(chan);
2471 			if (chan->unacked_frames > 0)
2472 				__set_retrans_timer(chan);
2473 			chan->retry_count = 0;
2474 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2475 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2476 		}
2477 		break;
2478 	case L2CAP_EV_EXPLICIT_POLL:
2479 		/* Ignore */
2480 		break;
2481 	case L2CAP_EV_MONITOR_TO:
2482 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2483 			l2cap_send_rr_or_rnr(chan, 1);
2484 			__set_monitor_timer(chan);
2485 			chan->retry_count++;
2486 		} else {
2487 			l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2488 		}
2489 		break;
2490 	default:
2491 		break;
2492 	}
2493 }
2494 
2495 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2496 		     struct sk_buff_head *skbs, u8 event)
2497 {
2498 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2499 	       chan, control, skbs, event, chan->tx_state);
2500 
2501 	switch (chan->tx_state) {
2502 	case L2CAP_TX_STATE_XMIT:
2503 		l2cap_tx_state_xmit(chan, control, skbs, event);
2504 		break;
2505 	case L2CAP_TX_STATE_WAIT_F:
2506 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2507 		break;
2508 	default:
2509 		/* Ignore event */
2510 		break;
2511 	}
2512 }
2513 
2514 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2515 			     struct l2cap_ctrl *control)
2516 {
2517 	BT_DBG("chan %p, control %p", chan, control);
2518 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2519 }
2520 
2521 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2522 				  struct l2cap_ctrl *control)
2523 {
2524 	BT_DBG("chan %p, control %p", chan, control);
2525 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2526 }
2527 
2528 /* Copy frame to all raw sockets on that connection */
2529 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2530 {
2531 	struct sk_buff *nskb;
2532 	struct l2cap_chan *chan;
2533 
2534 	BT_DBG("conn %p", conn);
2535 
2536 	mutex_lock(&conn->chan_lock);
2537 
2538 	list_for_each_entry(chan, &conn->chan_l, list) {
2539 		struct sock *sk = chan->sk;
2540 		if (chan->chan_type != L2CAP_CHAN_RAW)
2541 			continue;
2542 
2543 		/* Don't send frame to the socket it came from */
2544 		if (skb->sk == sk)
2545 			continue;
2546 		nskb = skb_clone(skb, GFP_ATOMIC);
2547 		if (!nskb)
2548 			continue;
2549 
2550 		if (chan->ops->recv(chan, nskb))
2551 			kfree_skb(nskb);
2552 	}
2553 
2554 	mutex_unlock(&conn->chan_lock);
2555 }
2556 
2557 /* ---- L2CAP signalling commands ---- */
2558 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2559 				       u8 ident, u16 dlen, void *data)
2560 {
2561 	struct sk_buff *skb, **frag;
2562 	struct l2cap_cmd_hdr *cmd;
2563 	struct l2cap_hdr *lh;
2564 	int len, count;
2565 
2566 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2567 	       conn, code, ident, dlen);
2568 
2569 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2570 	count = min_t(unsigned int, conn->mtu, len);
2571 
2572 	skb = bt_skb_alloc(count, GFP_ATOMIC);
2573 	if (!skb)
2574 		return NULL;
2575 
2576 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2577 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2578 
2579 	if (conn->hcon->type == LE_LINK)
2580 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2581 	else
2582 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2583 
2584 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2585 	cmd->code  = code;
2586 	cmd->ident = ident;
2587 	cmd->len   = cpu_to_le16(dlen);
2588 
2589 	if (dlen) {
2590 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2591 		memcpy(skb_put(skb, count), data, count);
2592 		data += count;
2593 	}
2594 
2595 	len -= skb->len;
2596 
2597 	/* Continuation fragments (no L2CAP header) */
2598 	frag = &skb_shinfo(skb)->frag_list;
2599 	while (len) {
2600 		count = min_t(unsigned int, conn->mtu, len);
2601 
2602 		*frag = bt_skb_alloc(count, GFP_ATOMIC);
2603 		if (!*frag)
2604 			goto fail;
2605 
2606 		memcpy(skb_put(*frag, count), data, count);
2607 
2608 		len  -= count;
2609 		data += count;
2610 
2611 		frag = &(*frag)->next;
2612 	}
2613 
2614 	return skb;
2615 
2616 fail:
2617 	kfree_skb(skb);
2618 	return NULL;
2619 }
2620 
2621 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2622 {
2623 	struct l2cap_conf_opt *opt = *ptr;
2624 	int len;
2625 
2626 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2627 	*ptr += len;
2628 
2629 	*type = opt->type;
2630 	*olen = opt->len;
2631 
2632 	switch (opt->len) {
2633 	case 1:
2634 		*val = *((u8 *) opt->val);
2635 		break;
2636 
2637 	case 2:
2638 		*val = get_unaligned_le16(opt->val);
2639 		break;
2640 
2641 	case 4:
2642 		*val = get_unaligned_le32(opt->val);
2643 		break;
2644 
2645 	default:
2646 		*val = (unsigned long) opt->val;
2647 		break;
2648 	}
2649 
2650 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2651 	return len;
2652 }
2653 
2654 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2655 {
2656 	struct l2cap_conf_opt *opt = *ptr;
2657 
2658 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2659 
2660 	opt->type = type;
2661 	opt->len  = len;
2662 
2663 	switch (len) {
2664 	case 1:
2665 		*((u8 *) opt->val)  = val;
2666 		break;
2667 
2668 	case 2:
2669 		put_unaligned_le16(val, opt->val);
2670 		break;
2671 
2672 	case 4:
2673 		put_unaligned_le32(val, opt->val);
2674 		break;
2675 
2676 	default:
2677 		memcpy(opt->val, (void *) val, len);
2678 		break;
2679 	}
2680 
2681 	*ptr += L2CAP_CONF_OPT_SIZE + len;
2682 }
2683 
2684 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2685 {
2686 	struct l2cap_conf_efs efs;
2687 
2688 	switch (chan->mode) {
2689 	case L2CAP_MODE_ERTM:
2690 		efs.id		= chan->local_id;
2691 		efs.stype	= chan->local_stype;
2692 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2693 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2694 		efs.acc_lat	= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2695 		efs.flush_to	= __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2696 		break;
2697 
2698 	case L2CAP_MODE_STREAMING:
2699 		efs.id		= 1;
2700 		efs.stype	= L2CAP_SERV_BESTEFFORT;
2701 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2702 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2703 		efs.acc_lat	= 0;
2704 		efs.flush_to	= 0;
2705 		break;
2706 
2707 	default:
2708 		return;
2709 	}
2710 
2711 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2712 							(unsigned long) &efs);
2713 }
2714 
2715 static void l2cap_ack_timeout(struct work_struct *work)
2716 {
2717 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2718 					       ack_timer.work);
2719 	u16 frames_to_ack;
2720 
2721 	BT_DBG("chan %p", chan);
2722 
2723 	l2cap_chan_lock(chan);
2724 
2725 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2726 				     chan->last_acked_seq);
2727 
2728 	if (frames_to_ack)
2729 		l2cap_send_rr_or_rnr(chan, 0);
2730 
2731 	l2cap_chan_unlock(chan);
2732 	l2cap_chan_put(chan);
2733 }
2734 
2735 int l2cap_ertm_init(struct l2cap_chan *chan)
2736 {
2737 	int err;
2738 
2739 	chan->next_tx_seq = 0;
2740 	chan->expected_tx_seq = 0;
2741 	chan->expected_ack_seq = 0;
2742 	chan->unacked_frames = 0;
2743 	chan->buffer_seq = 0;
2744 	chan->frames_sent = 0;
2745 	chan->last_acked_seq = 0;
2746 	chan->sdu = NULL;
2747 	chan->sdu_last_frag = NULL;
2748 	chan->sdu_len = 0;
2749 
2750 	skb_queue_head_init(&chan->tx_q);
2751 
2752 	if (chan->mode != L2CAP_MODE_ERTM)
2753 		return 0;
2754 
2755 	chan->rx_state = L2CAP_RX_STATE_RECV;
2756 	chan->tx_state = L2CAP_TX_STATE_XMIT;
2757 
2758 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2759 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2760 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2761 
2762 	skb_queue_head_init(&chan->srej_q);
2763 
2764 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2765 	if (err < 0)
2766 		return err;
2767 
2768 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2769 	if (err < 0)
2770 		l2cap_seq_list_free(&chan->srej_list);
2771 
2772 	return err;
2773 }
2774 
2775 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2776 {
2777 	switch (mode) {
2778 	case L2CAP_MODE_STREAMING:
2779 	case L2CAP_MODE_ERTM:
2780 		if (l2cap_mode_supported(mode, remote_feat_mask))
2781 			return mode;
2782 		/* fall through */
2783 	default:
2784 		return L2CAP_MODE_BASIC;
2785 	}
2786 }
2787 
2788 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2789 {
2790 	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2791 }
2792 
2793 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2794 {
2795 	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2796 }
2797 
2798 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2799 {
2800 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2801 						__l2cap_ews_supported(chan)) {
2802 		/* use extended control field */
2803 		set_bit(FLAG_EXT_CTRL, &chan->flags);
2804 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2805 	} else {
2806 		chan->tx_win = min_t(u16, chan->tx_win,
2807 						L2CAP_DEFAULT_TX_WINDOW);
2808 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2809 	}
2810 	chan->ack_win = chan->tx_win;
2811 }
2812 
2813 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2814 {
2815 	struct l2cap_conf_req *req = data;
2816 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2817 	void *ptr = req->data;
2818 	u16 size;
2819 
2820 	BT_DBG("chan %p", chan);
2821 
2822 	if (chan->num_conf_req || chan->num_conf_rsp)
2823 		goto done;
2824 
2825 	switch (chan->mode) {
2826 	case L2CAP_MODE_STREAMING:
2827 	case L2CAP_MODE_ERTM:
2828 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2829 			break;
2830 
2831 		if (__l2cap_efs_supported(chan))
2832 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
2833 
2834 		/* fall through */
2835 	default:
2836 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2837 		break;
2838 	}
2839 
2840 done:
2841 	if (chan->imtu != L2CAP_DEFAULT_MTU)
2842 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2843 
2844 	switch (chan->mode) {
2845 	case L2CAP_MODE_BASIC:
2846 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2847 				!(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2848 			break;
2849 
2850 		rfc.mode            = L2CAP_MODE_BASIC;
2851 		rfc.txwin_size      = 0;
2852 		rfc.max_transmit    = 0;
2853 		rfc.retrans_timeout = 0;
2854 		rfc.monitor_timeout = 0;
2855 		rfc.max_pdu_size    = 0;
2856 
2857 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2858 							(unsigned long) &rfc);
2859 		break;
2860 
2861 	case L2CAP_MODE_ERTM:
2862 		rfc.mode            = L2CAP_MODE_ERTM;
2863 		rfc.max_transmit    = chan->max_tx;
2864 		rfc.retrans_timeout = 0;
2865 		rfc.monitor_timeout = 0;
2866 
2867 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2868 						L2CAP_EXT_HDR_SIZE -
2869 						L2CAP_SDULEN_SIZE -
2870 						L2CAP_FCS_SIZE);
2871 		rfc.max_pdu_size = cpu_to_le16(size);
2872 
2873 		l2cap_txwin_setup(chan);
2874 
2875 		rfc.txwin_size = min_t(u16, chan->tx_win,
2876 						L2CAP_DEFAULT_TX_WINDOW);
2877 
2878 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2879 							(unsigned long) &rfc);
2880 
2881 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2882 			l2cap_add_opt_efs(&ptr, chan);
2883 
2884 		if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2885 			break;
2886 
2887 		if (chan->fcs == L2CAP_FCS_NONE ||
2888 				test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2889 			chan->fcs = L2CAP_FCS_NONE;
2890 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2891 		}
2892 
2893 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2894 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2895 								chan->tx_win);
2896 		break;
2897 
2898 	case L2CAP_MODE_STREAMING:
2899 		l2cap_txwin_setup(chan);
2900 		rfc.mode            = L2CAP_MODE_STREAMING;
2901 		rfc.txwin_size      = 0;
2902 		rfc.max_transmit    = 0;
2903 		rfc.retrans_timeout = 0;
2904 		rfc.monitor_timeout = 0;
2905 
2906 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2907 						L2CAP_EXT_HDR_SIZE -
2908 						L2CAP_SDULEN_SIZE -
2909 						L2CAP_FCS_SIZE);
2910 		rfc.max_pdu_size = cpu_to_le16(size);
2911 
2912 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2913 							(unsigned long) &rfc);
2914 
2915 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2916 			l2cap_add_opt_efs(&ptr, chan);
2917 
2918 		if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2919 			break;
2920 
2921 		if (chan->fcs == L2CAP_FCS_NONE ||
2922 				test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2923 			chan->fcs = L2CAP_FCS_NONE;
2924 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2925 		}
2926 		break;
2927 	}
2928 
2929 	req->dcid  = cpu_to_le16(chan->dcid);
2930 	req->flags = __constant_cpu_to_le16(0);
2931 
2932 	return ptr - data;
2933 }
2934 
2935 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2936 {
2937 	struct l2cap_conf_rsp *rsp = data;
2938 	void *ptr = rsp->data;
2939 	void *req = chan->conf_req;
2940 	int len = chan->conf_len;
2941 	int type, hint, olen;
2942 	unsigned long val;
2943 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2944 	struct l2cap_conf_efs efs;
2945 	u8 remote_efs = 0;
2946 	u16 mtu = L2CAP_DEFAULT_MTU;
2947 	u16 result = L2CAP_CONF_SUCCESS;
2948 	u16 size;
2949 
2950 	BT_DBG("chan %p", chan);
2951 
2952 	while (len >= L2CAP_CONF_OPT_SIZE) {
2953 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2954 
2955 		hint  = type & L2CAP_CONF_HINT;
2956 		type &= L2CAP_CONF_MASK;
2957 
2958 		switch (type) {
2959 		case L2CAP_CONF_MTU:
2960 			mtu = val;
2961 			break;
2962 
2963 		case L2CAP_CONF_FLUSH_TO:
2964 			chan->flush_to = val;
2965 			break;
2966 
2967 		case L2CAP_CONF_QOS:
2968 			break;
2969 
2970 		case L2CAP_CONF_RFC:
2971 			if (olen == sizeof(rfc))
2972 				memcpy(&rfc, (void *) val, olen);
2973 			break;
2974 
2975 		case L2CAP_CONF_FCS:
2976 			if (val == L2CAP_FCS_NONE)
2977 				set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2978 			break;
2979 
2980 		case L2CAP_CONF_EFS:
2981 			remote_efs = 1;
2982 			if (olen == sizeof(efs))
2983 				memcpy(&efs, (void *) val, olen);
2984 			break;
2985 
2986 		case L2CAP_CONF_EWS:
2987 			if (!enable_hs)
2988 				return -ECONNREFUSED;
2989 
2990 			set_bit(FLAG_EXT_CTRL, &chan->flags);
2991 			set_bit(CONF_EWS_RECV, &chan->conf_state);
2992 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2993 			chan->remote_tx_win = val;
2994 			break;
2995 
2996 		default:
2997 			if (hint)
2998 				break;
2999 
3000 			result = L2CAP_CONF_UNKNOWN;
3001 			*((u8 *) ptr++) = type;
3002 			break;
3003 		}
3004 	}
3005 
3006 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3007 		goto done;
3008 
3009 	switch (chan->mode) {
3010 	case L2CAP_MODE_STREAMING:
3011 	case L2CAP_MODE_ERTM:
3012 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3013 			chan->mode = l2cap_select_mode(rfc.mode,
3014 					chan->conn->feat_mask);
3015 			break;
3016 		}
3017 
3018 		if (remote_efs) {
3019 			if (__l2cap_efs_supported(chan))
3020 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3021 			else
3022 				return -ECONNREFUSED;
3023 		}
3024 
3025 		if (chan->mode != rfc.mode)
3026 			return -ECONNREFUSED;
3027 
3028 		break;
3029 	}
3030 
3031 done:
3032 	if (chan->mode != rfc.mode) {
3033 		result = L2CAP_CONF_UNACCEPT;
3034 		rfc.mode = chan->mode;
3035 
3036 		if (chan->num_conf_rsp == 1)
3037 			return -ECONNREFUSED;
3038 
3039 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3040 					sizeof(rfc), (unsigned long) &rfc);
3041 	}
3042 
3043 	if (result == L2CAP_CONF_SUCCESS) {
3044 		/* Configure output options and let the other side know
3045 		 * which ones we don't like. */
3046 
3047 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3048 			result = L2CAP_CONF_UNACCEPT;
3049 		else {
3050 			chan->omtu = mtu;
3051 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3052 		}
3053 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3054 
3055 		if (remote_efs) {
3056 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3057 					efs.stype != L2CAP_SERV_NOTRAFIC &&
3058 					efs.stype != chan->local_stype) {
3059 
3060 				result = L2CAP_CONF_UNACCEPT;
3061 
3062 				if (chan->num_conf_req >= 1)
3063 					return -ECONNREFUSED;
3064 
3065 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3066 							sizeof(efs),
3067 							(unsigned long) &efs);
3068 			} else {
3069 				/* Send PENDING Conf Rsp */
3070 				result = L2CAP_CONF_PENDING;
3071 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3072 			}
3073 		}
3074 
3075 		switch (rfc.mode) {
3076 		case L2CAP_MODE_BASIC:
3077 			chan->fcs = L2CAP_FCS_NONE;
3078 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3079 			break;
3080 
3081 		case L2CAP_MODE_ERTM:
3082 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3083 				chan->remote_tx_win = rfc.txwin_size;
3084 			else
3085 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3086 
3087 			chan->remote_max_tx = rfc.max_transmit;
3088 
3089 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3090 						chan->conn->mtu -
3091 						L2CAP_EXT_HDR_SIZE -
3092 						L2CAP_SDULEN_SIZE -
3093 						L2CAP_FCS_SIZE);
3094 			rfc.max_pdu_size = cpu_to_le16(size);
3095 			chan->remote_mps = size;
3096 
3097 			rfc.retrans_timeout =
3098 				__constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3099 			rfc.monitor_timeout =
3100 				__constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3101 
3102 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3103 
3104 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3105 					sizeof(rfc), (unsigned long) &rfc);
3106 
3107 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3108 				chan->remote_id = efs.id;
3109 				chan->remote_stype = efs.stype;
3110 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3111 				chan->remote_flush_to =
3112 						le32_to_cpu(efs.flush_to);
3113 				chan->remote_acc_lat =
3114 						le32_to_cpu(efs.acc_lat);
3115 				chan->remote_sdu_itime =
3116 					le32_to_cpu(efs.sdu_itime);
3117 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3118 					sizeof(efs), (unsigned long) &efs);
3119 			}
3120 			break;
3121 
3122 		case L2CAP_MODE_STREAMING:
3123 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3124 						chan->conn->mtu -
3125 						L2CAP_EXT_HDR_SIZE -
3126 						L2CAP_SDULEN_SIZE -
3127 						L2CAP_FCS_SIZE);
3128 			rfc.max_pdu_size = cpu_to_le16(size);
3129 			chan->remote_mps = size;
3130 
3131 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3132 
3133 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3134 					sizeof(rfc), (unsigned long) &rfc);
3135 
3136 			break;
3137 
3138 		default:
3139 			result = L2CAP_CONF_UNACCEPT;
3140 
3141 			memset(&rfc, 0, sizeof(rfc));
3142 			rfc.mode = chan->mode;
3143 		}
3144 
3145 		if (result == L2CAP_CONF_SUCCESS)
3146 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3147 	}
3148 	rsp->scid   = cpu_to_le16(chan->dcid);
3149 	rsp->result = cpu_to_le16(result);
3150 	rsp->flags  = __constant_cpu_to_le16(0);
3151 
3152 	return ptr - data;
3153 }
3154 
3155 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3156 {
3157 	struct l2cap_conf_req *req = data;
3158 	void *ptr = req->data;
3159 	int type, olen;
3160 	unsigned long val;
3161 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3162 	struct l2cap_conf_efs efs;
3163 
3164 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3165 
3166 	while (len >= L2CAP_CONF_OPT_SIZE) {
3167 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3168 
3169 		switch (type) {
3170 		case L2CAP_CONF_MTU:
3171 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3172 				*result = L2CAP_CONF_UNACCEPT;
3173 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3174 			} else
3175 				chan->imtu = val;
3176 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3177 			break;
3178 
3179 		case L2CAP_CONF_FLUSH_TO:
3180 			chan->flush_to = val;
3181 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3182 							2, chan->flush_to);
3183 			break;
3184 
3185 		case L2CAP_CONF_RFC:
3186 			if (olen == sizeof(rfc))
3187 				memcpy(&rfc, (void *)val, olen);
3188 
3189 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3190 							rfc.mode != chan->mode)
3191 				return -ECONNREFUSED;
3192 
3193 			chan->fcs = 0;
3194 
3195 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3196 					sizeof(rfc), (unsigned long) &rfc);
3197 			break;
3198 
3199 		case L2CAP_CONF_EWS:
3200 			chan->ack_win = min_t(u16, val, chan->ack_win);
3201 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3202 					   chan->tx_win);
3203 			break;
3204 
3205 		case L2CAP_CONF_EFS:
3206 			if (olen == sizeof(efs))
3207 				memcpy(&efs, (void *)val, olen);
3208 
3209 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3210 					efs.stype != L2CAP_SERV_NOTRAFIC &&
3211 					efs.stype != chan->local_stype)
3212 				return -ECONNREFUSED;
3213 
3214 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3215 					sizeof(efs), (unsigned long) &efs);
3216 			break;
3217 		}
3218 	}
3219 
3220 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3221 		return -ECONNREFUSED;
3222 
3223 	chan->mode = rfc.mode;
3224 
3225 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3226 		switch (rfc.mode) {
3227 		case L2CAP_MODE_ERTM:
3228 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3229 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3230 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3231 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3232 				chan->ack_win = min_t(u16, chan->ack_win,
3233 						      rfc.txwin_size);
3234 
3235 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3236 				chan->local_msdu = le16_to_cpu(efs.msdu);
3237 				chan->local_sdu_itime =
3238 						le32_to_cpu(efs.sdu_itime);
3239 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3240 				chan->local_flush_to =
3241 						le32_to_cpu(efs.flush_to);
3242 			}
3243 			break;
3244 
3245 		case L2CAP_MODE_STREAMING:
3246 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3247 		}
3248 	}
3249 
3250 	req->dcid   = cpu_to_le16(chan->dcid);
3251 	req->flags  = __constant_cpu_to_le16(0);
3252 
3253 	return ptr - data;
3254 }
3255 
3256 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3257 {
3258 	struct l2cap_conf_rsp *rsp = data;
3259 	void *ptr = rsp->data;
3260 
3261 	BT_DBG("chan %p", chan);
3262 
3263 	rsp->scid   = cpu_to_le16(chan->dcid);
3264 	rsp->result = cpu_to_le16(result);
3265 	rsp->flags  = cpu_to_le16(flags);
3266 
3267 	return ptr - data;
3268 }
3269 
3270 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3271 {
3272 	struct l2cap_conn_rsp rsp;
3273 	struct l2cap_conn *conn = chan->conn;
3274 	u8 buf[128];
3275 
3276 	rsp.scid   = cpu_to_le16(chan->dcid);
3277 	rsp.dcid   = cpu_to_le16(chan->scid);
3278 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3279 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3280 	l2cap_send_cmd(conn, chan->ident,
3281 				L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3282 
3283 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3284 		return;
3285 
3286 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3287 			l2cap_build_conf_req(chan, buf), buf);
3288 	chan->num_conf_req++;
3289 }
3290 
3291 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3292 {
3293 	int type, olen;
3294 	unsigned long val;
3295 	/* Use sane default values in case a misbehaving remote device
3296 	 * did not send an RFC or extended window size option.
3297 	 */
3298 	u16 txwin_ext = chan->ack_win;
3299 	struct l2cap_conf_rfc rfc = {
3300 		.mode = chan->mode,
3301 		.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3302 		.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3303 		.max_pdu_size = cpu_to_le16(chan->imtu),
3304 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3305 	};
3306 
3307 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3308 
3309 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3310 		return;
3311 
3312 	while (len >= L2CAP_CONF_OPT_SIZE) {
3313 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3314 
3315 		switch (type) {
3316 		case L2CAP_CONF_RFC:
3317 			if (olen == sizeof(rfc))
3318 				memcpy(&rfc, (void *)val, olen);
3319 			break;
3320 		case L2CAP_CONF_EWS:
3321 			txwin_ext = val;
3322 			break;
3323 		}
3324 	}
3325 
3326 	switch (rfc.mode) {
3327 	case L2CAP_MODE_ERTM:
3328 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3329 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3330 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3331 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3332 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3333 		else
3334 			chan->ack_win = min_t(u16, chan->ack_win,
3335 					      rfc.txwin_size);
3336 		break;
3337 	case L2CAP_MODE_STREAMING:
3338 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3339 	}
3340 }
3341 
3342 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3343 {
3344 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3345 
3346 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3347 		return 0;
3348 
3349 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3350 					cmd->ident == conn->info_ident) {
3351 		cancel_delayed_work(&conn->info_timer);
3352 
3353 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3354 		conn->info_ident = 0;
3355 
3356 		l2cap_conn_start(conn);
3357 	}
3358 
3359 	return 0;
3360 }
3361 
3362 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3363 {
3364 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3365 	struct l2cap_conn_rsp rsp;
3366 	struct l2cap_chan *chan = NULL, *pchan;
3367 	struct sock *parent, *sk = NULL;
3368 	int result, status = L2CAP_CS_NO_INFO;
3369 
3370 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3371 	__le16 psm = req->psm;
3372 
3373 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3374 
3375 	/* Check if we have socket listening on psm */
3376 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3377 	if (!pchan) {
3378 		result = L2CAP_CR_BAD_PSM;
3379 		goto sendresp;
3380 	}
3381 
3382 	parent = pchan->sk;
3383 
3384 	mutex_lock(&conn->chan_lock);
3385 	lock_sock(parent);
3386 
3387 	/* Check if the ACL is secure enough (if not SDP) */
3388 	if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3389 				!hci_conn_check_link_mode(conn->hcon)) {
3390 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3391 		result = L2CAP_CR_SEC_BLOCK;
3392 		goto response;
3393 	}
3394 
3395 	result = L2CAP_CR_NO_MEM;
3396 
3397 	/* Check if we already have channel with that dcid */
3398 	if (__l2cap_get_chan_by_dcid(conn, scid))
3399 		goto response;
3400 
3401 	chan = pchan->ops->new_connection(pchan);
3402 	if (!chan)
3403 		goto response;
3404 
3405 	sk = chan->sk;
3406 
3407 	hci_conn_hold(conn->hcon);
3408 
3409 	bacpy(&bt_sk(sk)->src, conn->src);
3410 	bacpy(&bt_sk(sk)->dst, conn->dst);
3411 	chan->psm  = psm;
3412 	chan->dcid = scid;
3413 
3414 	bt_accept_enqueue(parent, sk);
3415 
3416 	__l2cap_chan_add(conn, chan);
3417 
3418 	dcid = chan->scid;
3419 
3420 	__set_chan_timer(chan, sk->sk_sndtimeo);
3421 
3422 	chan->ident = cmd->ident;
3423 
3424 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3425 		if (l2cap_chan_check_security(chan)) {
3426 			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3427 				__l2cap_state_change(chan, BT_CONNECT2);
3428 				result = L2CAP_CR_PEND;
3429 				status = L2CAP_CS_AUTHOR_PEND;
3430 				parent->sk_data_ready(parent, 0);
3431 			} else {
3432 				__l2cap_state_change(chan, BT_CONFIG);
3433 				result = L2CAP_CR_SUCCESS;
3434 				status = L2CAP_CS_NO_INFO;
3435 			}
3436 		} else {
3437 			__l2cap_state_change(chan, BT_CONNECT2);
3438 			result = L2CAP_CR_PEND;
3439 			status = L2CAP_CS_AUTHEN_PEND;
3440 		}
3441 	} else {
3442 		__l2cap_state_change(chan, BT_CONNECT2);
3443 		result = L2CAP_CR_PEND;
3444 		status = L2CAP_CS_NO_INFO;
3445 	}
3446 
3447 response:
3448 	release_sock(parent);
3449 	mutex_unlock(&conn->chan_lock);
3450 
3451 sendresp:
3452 	rsp.scid   = cpu_to_le16(scid);
3453 	rsp.dcid   = cpu_to_le16(dcid);
3454 	rsp.result = cpu_to_le16(result);
3455 	rsp.status = cpu_to_le16(status);
3456 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3457 
3458 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3459 		struct l2cap_info_req info;
3460 		info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3461 
3462 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3463 		conn->info_ident = l2cap_get_ident(conn);
3464 
3465 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3466 
3467 		l2cap_send_cmd(conn, conn->info_ident,
3468 					L2CAP_INFO_REQ, sizeof(info), &info);
3469 	}
3470 
3471 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3472 				result == L2CAP_CR_SUCCESS) {
3473 		u8 buf[128];
3474 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3475 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3476 					l2cap_build_conf_req(chan, buf), buf);
3477 		chan->num_conf_req++;
3478 	}
3479 
3480 	return 0;
3481 }
3482 
3483 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3484 {
3485 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3486 	u16 scid, dcid, result, status;
3487 	struct l2cap_chan *chan;
3488 	u8 req[128];
3489 	int err;
3490 
3491 	scid   = __le16_to_cpu(rsp->scid);
3492 	dcid   = __le16_to_cpu(rsp->dcid);
3493 	result = __le16_to_cpu(rsp->result);
3494 	status = __le16_to_cpu(rsp->status);
3495 
3496 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3497 						dcid, scid, result, status);
3498 
3499 	mutex_lock(&conn->chan_lock);
3500 
3501 	if (scid) {
3502 		chan = __l2cap_get_chan_by_scid(conn, scid);
3503 		if (!chan) {
3504 			err = -EFAULT;
3505 			goto unlock;
3506 		}
3507 	} else {
3508 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3509 		if (!chan) {
3510 			err = -EFAULT;
3511 			goto unlock;
3512 		}
3513 	}
3514 
3515 	err = 0;
3516 
3517 	l2cap_chan_lock(chan);
3518 
3519 	switch (result) {
3520 	case L2CAP_CR_SUCCESS:
3521 		l2cap_state_change(chan, BT_CONFIG);
3522 		chan->ident = 0;
3523 		chan->dcid = dcid;
3524 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3525 
3526 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3527 			break;
3528 
3529 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3530 					l2cap_build_conf_req(chan, req), req);
3531 		chan->num_conf_req++;
3532 		break;
3533 
3534 	case L2CAP_CR_PEND:
3535 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3536 		break;
3537 
3538 	default:
3539 		l2cap_chan_del(chan, ECONNREFUSED);
3540 		break;
3541 	}
3542 
3543 	l2cap_chan_unlock(chan);
3544 
3545 unlock:
3546 	mutex_unlock(&conn->chan_lock);
3547 
3548 	return err;
3549 }
3550 
3551 static inline void set_default_fcs(struct l2cap_chan *chan)
3552 {
3553 	/* FCS is enabled only in ERTM or streaming mode, if one or both
3554 	 * sides request it.
3555 	 */
3556 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3557 		chan->fcs = L2CAP_FCS_NONE;
3558 	else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3559 		chan->fcs = L2CAP_FCS_CRC16;
3560 }
3561 
3562 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3563 {
3564 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3565 	u16 dcid, flags;
3566 	u8 rsp[64];
3567 	struct l2cap_chan *chan;
3568 	int len, err = 0;
3569 
3570 	dcid  = __le16_to_cpu(req->dcid);
3571 	flags = __le16_to_cpu(req->flags);
3572 
3573 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3574 
3575 	chan = l2cap_get_chan_by_scid(conn, dcid);
3576 	if (!chan)
3577 		return -ENOENT;
3578 
3579 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3580 		struct l2cap_cmd_rej_cid rej;
3581 
3582 		rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3583 		rej.scid = cpu_to_le16(chan->scid);
3584 		rej.dcid = cpu_to_le16(chan->dcid);
3585 
3586 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3587 				sizeof(rej), &rej);
3588 		goto unlock;
3589 	}
3590 
3591 	/* Reject if config buffer is too small. */
3592 	len = cmd_len - sizeof(*req);
3593 	if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3594 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3595 				l2cap_build_conf_rsp(chan, rsp,
3596 					L2CAP_CONF_REJECT, flags), rsp);
3597 		goto unlock;
3598 	}
3599 
3600 	/* Store config. */
3601 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
3602 	chan->conf_len += len;
3603 
3604 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3605 		/* Incomplete config. Send empty response. */
3606 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3607 				l2cap_build_conf_rsp(chan, rsp,
3608 					L2CAP_CONF_SUCCESS, flags), rsp);
3609 		goto unlock;
3610 	}
3611 
3612 	/* Complete config. */
3613 	len = l2cap_parse_conf_req(chan, rsp);
3614 	if (len < 0) {
3615 		l2cap_send_disconn_req(conn, chan, ECONNRESET);
3616 		goto unlock;
3617 	}
3618 
3619 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3620 	chan->num_conf_rsp++;
3621 
3622 	/* Reset config buffer. */
3623 	chan->conf_len = 0;
3624 
3625 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3626 		goto unlock;
3627 
3628 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3629 		set_default_fcs(chan);
3630 
3631 		if (chan->mode == L2CAP_MODE_ERTM ||
3632 		    chan->mode == L2CAP_MODE_STREAMING)
3633 			err = l2cap_ertm_init(chan);
3634 
3635 		if (err < 0)
3636 			l2cap_send_disconn_req(chan->conn, chan, -err);
3637 		else
3638 			l2cap_chan_ready(chan);
3639 
3640 		goto unlock;
3641 	}
3642 
3643 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3644 		u8 buf[64];
3645 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3646 					l2cap_build_conf_req(chan, buf), buf);
3647 		chan->num_conf_req++;
3648 	}
3649 
3650 	/* Got Conf Rsp PENDING from remote side and asume we sent
3651 	   Conf Rsp PENDING in the code above */
3652 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3653 			test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3654 
3655 		/* check compatibility */
3656 
3657 		clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3658 		set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3659 
3660 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3661 					l2cap_build_conf_rsp(chan, rsp,
3662 					L2CAP_CONF_SUCCESS, flags), rsp);
3663 	}
3664 
3665 unlock:
3666 	l2cap_chan_unlock(chan);
3667 	return err;
3668 }
3669 
3670 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3671 {
3672 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3673 	u16 scid, flags, result;
3674 	struct l2cap_chan *chan;
3675 	int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3676 	int err = 0;
3677 
3678 	scid   = __le16_to_cpu(rsp->scid);
3679 	flags  = __le16_to_cpu(rsp->flags);
3680 	result = __le16_to_cpu(rsp->result);
3681 
3682 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3683 	       result, len);
3684 
3685 	chan = l2cap_get_chan_by_scid(conn, scid);
3686 	if (!chan)
3687 		return 0;
3688 
3689 	switch (result) {
3690 	case L2CAP_CONF_SUCCESS:
3691 		l2cap_conf_rfc_get(chan, rsp->data, len);
3692 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3693 		break;
3694 
3695 	case L2CAP_CONF_PENDING:
3696 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3697 
3698 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3699 			char buf[64];
3700 
3701 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3702 								buf, &result);
3703 			if (len < 0) {
3704 				l2cap_send_disconn_req(conn, chan, ECONNRESET);
3705 				goto done;
3706 			}
3707 
3708 			/* check compatibility */
3709 
3710 			clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3711 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3712 
3713 			l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3714 						l2cap_build_conf_rsp(chan, buf,
3715 						L2CAP_CONF_SUCCESS, 0x0000), buf);
3716 		}
3717 		goto done;
3718 
3719 	case L2CAP_CONF_UNACCEPT:
3720 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3721 			char req[64];
3722 
3723 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3724 				l2cap_send_disconn_req(conn, chan, ECONNRESET);
3725 				goto done;
3726 			}
3727 
3728 			/* throw out any old stored conf requests */
3729 			result = L2CAP_CONF_SUCCESS;
3730 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3731 								req, &result);
3732 			if (len < 0) {
3733 				l2cap_send_disconn_req(conn, chan, ECONNRESET);
3734 				goto done;
3735 			}
3736 
3737 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
3738 						L2CAP_CONF_REQ, len, req);
3739 			chan->num_conf_req++;
3740 			if (result != L2CAP_CONF_SUCCESS)
3741 				goto done;
3742 			break;
3743 		}
3744 
3745 	default:
3746 		l2cap_chan_set_err(chan, ECONNRESET);
3747 
3748 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3749 		l2cap_send_disconn_req(conn, chan, ECONNRESET);
3750 		goto done;
3751 	}
3752 
3753 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3754 		goto done;
3755 
3756 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
3757 
3758 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3759 		set_default_fcs(chan);
3760 
3761 		if (chan->mode == L2CAP_MODE_ERTM ||
3762 		    chan->mode == L2CAP_MODE_STREAMING)
3763 			err = l2cap_ertm_init(chan);
3764 
3765 		if (err < 0)
3766 			l2cap_send_disconn_req(chan->conn, chan, -err);
3767 		else
3768 			l2cap_chan_ready(chan);
3769 	}
3770 
3771 done:
3772 	l2cap_chan_unlock(chan);
3773 	return err;
3774 }
3775 
3776 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3777 {
3778 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3779 	struct l2cap_disconn_rsp rsp;
3780 	u16 dcid, scid;
3781 	struct l2cap_chan *chan;
3782 	struct sock *sk;
3783 
3784 	scid = __le16_to_cpu(req->scid);
3785 	dcid = __le16_to_cpu(req->dcid);
3786 
3787 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3788 
3789 	mutex_lock(&conn->chan_lock);
3790 
3791 	chan = __l2cap_get_chan_by_scid(conn, dcid);
3792 	if (!chan) {
3793 		mutex_unlock(&conn->chan_lock);
3794 		return 0;
3795 	}
3796 
3797 	l2cap_chan_lock(chan);
3798 
3799 	sk = chan->sk;
3800 
3801 	rsp.dcid = cpu_to_le16(chan->scid);
3802 	rsp.scid = cpu_to_le16(chan->dcid);
3803 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3804 
3805 	lock_sock(sk);
3806 	sk->sk_shutdown = SHUTDOWN_MASK;
3807 	release_sock(sk);
3808 
3809 	l2cap_chan_hold(chan);
3810 	l2cap_chan_del(chan, ECONNRESET);
3811 
3812 	l2cap_chan_unlock(chan);
3813 
3814 	chan->ops->close(chan);
3815 	l2cap_chan_put(chan);
3816 
3817 	mutex_unlock(&conn->chan_lock);
3818 
3819 	return 0;
3820 }
3821 
3822 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3823 {
3824 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3825 	u16 dcid, scid;
3826 	struct l2cap_chan *chan;
3827 
3828 	scid = __le16_to_cpu(rsp->scid);
3829 	dcid = __le16_to_cpu(rsp->dcid);
3830 
3831 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3832 
3833 	mutex_lock(&conn->chan_lock);
3834 
3835 	chan = __l2cap_get_chan_by_scid(conn, scid);
3836 	if (!chan) {
3837 		mutex_unlock(&conn->chan_lock);
3838 		return 0;
3839 	}
3840 
3841 	l2cap_chan_lock(chan);
3842 
3843 	l2cap_chan_hold(chan);
3844 	l2cap_chan_del(chan, 0);
3845 
3846 	l2cap_chan_unlock(chan);
3847 
3848 	chan->ops->close(chan);
3849 	l2cap_chan_put(chan);
3850 
3851 	mutex_unlock(&conn->chan_lock);
3852 
3853 	return 0;
3854 }
3855 
3856 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3857 {
3858 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3859 	u16 type;
3860 
3861 	type = __le16_to_cpu(req->type);
3862 
3863 	BT_DBG("type 0x%4.4x", type);
3864 
3865 	if (type == L2CAP_IT_FEAT_MASK) {
3866 		u8 buf[8];
3867 		u32 feat_mask = l2cap_feat_mask;
3868 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3869 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3870 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3871 		if (!disable_ertm)
3872 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3873 							 | L2CAP_FEAT_FCS;
3874 		if (enable_hs)
3875 			feat_mask |= L2CAP_FEAT_EXT_FLOW
3876 						| L2CAP_FEAT_EXT_WINDOW;
3877 
3878 		put_unaligned_le32(feat_mask, rsp->data);
3879 		l2cap_send_cmd(conn, cmd->ident,
3880 					L2CAP_INFO_RSP, sizeof(buf), buf);
3881 	} else if (type == L2CAP_IT_FIXED_CHAN) {
3882 		u8 buf[12];
3883 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3884 
3885 		if (enable_hs)
3886 			l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3887 		else
3888 			l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3889 
3890 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3891 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3892 		memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3893 		l2cap_send_cmd(conn, cmd->ident,
3894 					L2CAP_INFO_RSP, sizeof(buf), buf);
3895 	} else {
3896 		struct l2cap_info_rsp rsp;
3897 		rsp.type   = cpu_to_le16(type);
3898 		rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3899 		l2cap_send_cmd(conn, cmd->ident,
3900 					L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3901 	}
3902 
3903 	return 0;
3904 }
3905 
3906 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3907 {
3908 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3909 	u16 type, result;
3910 
3911 	type   = __le16_to_cpu(rsp->type);
3912 	result = __le16_to_cpu(rsp->result);
3913 
3914 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3915 
3916 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
3917 	if (cmd->ident != conn->info_ident ||
3918 			conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3919 		return 0;
3920 
3921 	cancel_delayed_work(&conn->info_timer);
3922 
3923 	if (result != L2CAP_IR_SUCCESS) {
3924 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3925 		conn->info_ident = 0;
3926 
3927 		l2cap_conn_start(conn);
3928 
3929 		return 0;
3930 	}
3931 
3932 	switch (type) {
3933 	case L2CAP_IT_FEAT_MASK:
3934 		conn->feat_mask = get_unaligned_le32(rsp->data);
3935 
3936 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3937 			struct l2cap_info_req req;
3938 			req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3939 
3940 			conn->info_ident = l2cap_get_ident(conn);
3941 
3942 			l2cap_send_cmd(conn, conn->info_ident,
3943 					L2CAP_INFO_REQ, sizeof(req), &req);
3944 		} else {
3945 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3946 			conn->info_ident = 0;
3947 
3948 			l2cap_conn_start(conn);
3949 		}
3950 		break;
3951 
3952 	case L2CAP_IT_FIXED_CHAN:
3953 		conn->fixed_chan_mask = rsp->data[0];
3954 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3955 		conn->info_ident = 0;
3956 
3957 		l2cap_conn_start(conn);
3958 		break;
3959 	}
3960 
3961 	return 0;
3962 }
3963 
3964 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3965 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3966 					void *data)
3967 {
3968 	struct l2cap_create_chan_req *req = data;
3969 	struct l2cap_create_chan_rsp rsp;
3970 	u16 psm, scid;
3971 
3972 	if (cmd_len != sizeof(*req))
3973 		return -EPROTO;
3974 
3975 	if (!enable_hs)
3976 		return -EINVAL;
3977 
3978 	psm = le16_to_cpu(req->psm);
3979 	scid = le16_to_cpu(req->scid);
3980 
3981 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
3982 
3983 	/* Placeholder: Always reject */
3984 	rsp.dcid = 0;
3985 	rsp.scid = cpu_to_le16(scid);
3986 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3987 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3988 
3989 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3990 		       sizeof(rsp), &rsp);
3991 
3992 	return 0;
3993 }
3994 
3995 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3996 					struct l2cap_cmd_hdr *cmd, void *data)
3997 {
3998 	BT_DBG("conn %p", conn);
3999 
4000 	return l2cap_connect_rsp(conn, cmd, data);
4001 }
4002 
4003 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4004 				     u16 icid, u16 result)
4005 {
4006 	struct l2cap_move_chan_rsp rsp;
4007 
4008 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4009 
4010 	rsp.icid = cpu_to_le16(icid);
4011 	rsp.result = cpu_to_le16(result);
4012 
4013 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4014 }
4015 
4016 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4017 				     struct l2cap_chan *chan,
4018 				     u16 icid, u16 result)
4019 {
4020 	struct l2cap_move_chan_cfm cfm;
4021 	u8 ident;
4022 
4023 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4024 
4025 	ident = l2cap_get_ident(conn);
4026 	if (chan)
4027 		chan->ident = ident;
4028 
4029 	cfm.icid = cpu_to_le16(icid);
4030 	cfm.result = cpu_to_le16(result);
4031 
4032 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4033 }
4034 
4035 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4036 					 u16 icid)
4037 {
4038 	struct l2cap_move_chan_cfm_rsp rsp;
4039 
4040 	BT_DBG("icid 0x%4.4x", icid);
4041 
4042 	rsp.icid = cpu_to_le16(icid);
4043 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4044 }
4045 
4046 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4047 					 struct l2cap_cmd_hdr *cmd,
4048 					 u16 cmd_len, void *data)
4049 {
4050 	struct l2cap_move_chan_req *req = data;
4051 	u16 icid = 0;
4052 	u16 result = L2CAP_MR_NOT_ALLOWED;
4053 
4054 	if (cmd_len != sizeof(*req))
4055 		return -EPROTO;
4056 
4057 	icid = le16_to_cpu(req->icid);
4058 
4059 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4060 
4061 	if (!enable_hs)
4062 		return -EINVAL;
4063 
4064 	/* Placeholder: Always refuse */
4065 	l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4066 
4067 	return 0;
4068 }
4069 
4070 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4071 					 struct l2cap_cmd_hdr *cmd,
4072 					 u16 cmd_len, void *data)
4073 {
4074 	struct l2cap_move_chan_rsp *rsp = data;
4075 	u16 icid, result;
4076 
4077 	if (cmd_len != sizeof(*rsp))
4078 		return -EPROTO;
4079 
4080 	icid = le16_to_cpu(rsp->icid);
4081 	result = le16_to_cpu(rsp->result);
4082 
4083 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4084 
4085 	/* Placeholder: Always unconfirmed */
4086 	l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4087 
4088 	return 0;
4089 }
4090 
4091 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4092 					     struct l2cap_cmd_hdr *cmd,
4093 					     u16 cmd_len, void *data)
4094 {
4095 	struct l2cap_move_chan_cfm *cfm = data;
4096 	u16 icid, result;
4097 
4098 	if (cmd_len != sizeof(*cfm))
4099 		return -EPROTO;
4100 
4101 	icid = le16_to_cpu(cfm->icid);
4102 	result = le16_to_cpu(cfm->result);
4103 
4104 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4105 
4106 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4107 
4108 	return 0;
4109 }
4110 
4111 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4112 						 struct l2cap_cmd_hdr *cmd,
4113 						 u16 cmd_len, void *data)
4114 {
4115 	struct l2cap_move_chan_cfm_rsp *rsp = data;
4116 	u16 icid;
4117 
4118 	if (cmd_len != sizeof(*rsp))
4119 		return -EPROTO;
4120 
4121 	icid = le16_to_cpu(rsp->icid);
4122 
4123 	BT_DBG("icid 0x%4.4x", icid);
4124 
4125 	return 0;
4126 }
4127 
4128 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4129 							u16 to_multiplier)
4130 {
4131 	u16 max_latency;
4132 
4133 	if (min > max || min < 6 || max > 3200)
4134 		return -EINVAL;
4135 
4136 	if (to_multiplier < 10 || to_multiplier > 3200)
4137 		return -EINVAL;
4138 
4139 	if (max >= to_multiplier * 8)
4140 		return -EINVAL;
4141 
4142 	max_latency = (to_multiplier * 8 / max) - 1;
4143 	if (latency > 499 || latency > max_latency)
4144 		return -EINVAL;
4145 
4146 	return 0;
4147 }
4148 
4149 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4150 					struct l2cap_cmd_hdr *cmd, u8 *data)
4151 {
4152 	struct hci_conn *hcon = conn->hcon;
4153 	struct l2cap_conn_param_update_req *req;
4154 	struct l2cap_conn_param_update_rsp rsp;
4155 	u16 min, max, latency, to_multiplier, cmd_len;
4156 	int err;
4157 
4158 	if (!(hcon->link_mode & HCI_LM_MASTER))
4159 		return -EINVAL;
4160 
4161 	cmd_len = __le16_to_cpu(cmd->len);
4162 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4163 		return -EPROTO;
4164 
4165 	req = (struct l2cap_conn_param_update_req *) data;
4166 	min		= __le16_to_cpu(req->min);
4167 	max		= __le16_to_cpu(req->max);
4168 	latency		= __le16_to_cpu(req->latency);
4169 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
4170 
4171 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4172 						min, max, latency, to_multiplier);
4173 
4174 	memset(&rsp, 0, sizeof(rsp));
4175 
4176 	err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4177 	if (err)
4178 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4179 	else
4180 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4181 
4182 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4183 							sizeof(rsp), &rsp);
4184 
4185 	if (!err)
4186 		hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4187 
4188 	return 0;
4189 }
4190 
4191 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4192 			struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4193 {
4194 	int err = 0;
4195 
4196 	switch (cmd->code) {
4197 	case L2CAP_COMMAND_REJ:
4198 		l2cap_command_rej(conn, cmd, data);
4199 		break;
4200 
4201 	case L2CAP_CONN_REQ:
4202 		err = l2cap_connect_req(conn, cmd, data);
4203 		break;
4204 
4205 	case L2CAP_CONN_RSP:
4206 		err = l2cap_connect_rsp(conn, cmd, data);
4207 		break;
4208 
4209 	case L2CAP_CONF_REQ:
4210 		err = l2cap_config_req(conn, cmd, cmd_len, data);
4211 		break;
4212 
4213 	case L2CAP_CONF_RSP:
4214 		err = l2cap_config_rsp(conn, cmd, data);
4215 		break;
4216 
4217 	case L2CAP_DISCONN_REQ:
4218 		err = l2cap_disconnect_req(conn, cmd, data);
4219 		break;
4220 
4221 	case L2CAP_DISCONN_RSP:
4222 		err = l2cap_disconnect_rsp(conn, cmd, data);
4223 		break;
4224 
4225 	case L2CAP_ECHO_REQ:
4226 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4227 		break;
4228 
4229 	case L2CAP_ECHO_RSP:
4230 		break;
4231 
4232 	case L2CAP_INFO_REQ:
4233 		err = l2cap_information_req(conn, cmd, data);
4234 		break;
4235 
4236 	case L2CAP_INFO_RSP:
4237 		err = l2cap_information_rsp(conn, cmd, data);
4238 		break;
4239 
4240 	case L2CAP_CREATE_CHAN_REQ:
4241 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4242 		break;
4243 
4244 	case L2CAP_CREATE_CHAN_RSP:
4245 		err = l2cap_create_channel_rsp(conn, cmd, data);
4246 		break;
4247 
4248 	case L2CAP_MOVE_CHAN_REQ:
4249 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4250 		break;
4251 
4252 	case L2CAP_MOVE_CHAN_RSP:
4253 		err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4254 		break;
4255 
4256 	case L2CAP_MOVE_CHAN_CFM:
4257 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4258 		break;
4259 
4260 	case L2CAP_MOVE_CHAN_CFM_RSP:
4261 		err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4262 		break;
4263 
4264 	default:
4265 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4266 		err = -EINVAL;
4267 		break;
4268 	}
4269 
4270 	return err;
4271 }
4272 
4273 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4274 					struct l2cap_cmd_hdr *cmd, u8 *data)
4275 {
4276 	switch (cmd->code) {
4277 	case L2CAP_COMMAND_REJ:
4278 		return 0;
4279 
4280 	case L2CAP_CONN_PARAM_UPDATE_REQ:
4281 		return l2cap_conn_param_update_req(conn, cmd, data);
4282 
4283 	case L2CAP_CONN_PARAM_UPDATE_RSP:
4284 		return 0;
4285 
4286 	default:
4287 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4288 		return -EINVAL;
4289 	}
4290 }
4291 
4292 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4293 							struct sk_buff *skb)
4294 {
4295 	u8 *data = skb->data;
4296 	int len = skb->len;
4297 	struct l2cap_cmd_hdr cmd;
4298 	int err;
4299 
4300 	l2cap_raw_recv(conn, skb);
4301 
4302 	while (len >= L2CAP_CMD_HDR_SIZE) {
4303 		u16 cmd_len;
4304 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4305 		data += L2CAP_CMD_HDR_SIZE;
4306 		len  -= L2CAP_CMD_HDR_SIZE;
4307 
4308 		cmd_len = le16_to_cpu(cmd.len);
4309 
4310 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4311 
4312 		if (cmd_len > len || !cmd.ident) {
4313 			BT_DBG("corrupted command");
4314 			break;
4315 		}
4316 
4317 		if (conn->hcon->type == LE_LINK)
4318 			err = l2cap_le_sig_cmd(conn, &cmd, data);
4319 		else
4320 			err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4321 
4322 		if (err) {
4323 			struct l2cap_cmd_rej_unk rej;
4324 
4325 			BT_ERR("Wrong link type (%d)", err);
4326 
4327 			/* FIXME: Map err to a valid reason */
4328 			rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4329 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4330 		}
4331 
4332 		data += cmd_len;
4333 		len  -= cmd_len;
4334 	}
4335 
4336 	kfree_skb(skb);
4337 }
4338 
4339 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
4340 {
4341 	u16 our_fcs, rcv_fcs;
4342 	int hdr_size;
4343 
4344 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4345 		hdr_size = L2CAP_EXT_HDR_SIZE;
4346 	else
4347 		hdr_size = L2CAP_ENH_HDR_SIZE;
4348 
4349 	if (chan->fcs == L2CAP_FCS_CRC16) {
4350 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4351 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4352 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4353 
4354 		if (our_fcs != rcv_fcs)
4355 			return -EBADMSG;
4356 	}
4357 	return 0;
4358 }
4359 
4360 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4361 {
4362 	struct l2cap_ctrl control;
4363 
4364 	BT_DBG("chan %p", chan);
4365 
4366 	memset(&control, 0, sizeof(control));
4367 	control.sframe = 1;
4368 	control.final = 1;
4369 	control.reqseq = chan->buffer_seq;
4370 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
4371 
4372 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4373 		control.super = L2CAP_SUPER_RNR;
4374 		l2cap_send_sframe(chan, &control);
4375 	}
4376 
4377 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4378 	    chan->unacked_frames > 0)
4379 		__set_retrans_timer(chan);
4380 
4381 	/* Send pending iframes */
4382 	l2cap_ertm_send(chan);
4383 
4384 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4385 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4386 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
4387 		 * send it now.
4388 		 */
4389 		control.super = L2CAP_SUPER_RR;
4390 		l2cap_send_sframe(chan, &control);
4391 	}
4392 }
4393 
4394 static void append_skb_frag(struct sk_buff *skb,
4395 			struct sk_buff *new_frag, struct sk_buff **last_frag)
4396 {
4397 	/* skb->len reflects data in skb as well as all fragments
4398 	 * skb->data_len reflects only data in fragments
4399 	 */
4400 	if (!skb_has_frag_list(skb))
4401 		skb_shinfo(skb)->frag_list = new_frag;
4402 
4403 	new_frag->next = NULL;
4404 
4405 	(*last_frag)->next = new_frag;
4406 	*last_frag = new_frag;
4407 
4408 	skb->len += new_frag->len;
4409 	skb->data_len += new_frag->len;
4410 	skb->truesize += new_frag->truesize;
4411 }
4412 
4413 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4414 				struct l2cap_ctrl *control)
4415 {
4416 	int err = -EINVAL;
4417 
4418 	switch (control->sar) {
4419 	case L2CAP_SAR_UNSEGMENTED:
4420 		if (chan->sdu)
4421 			break;
4422 
4423 		err = chan->ops->recv(chan, skb);
4424 		break;
4425 
4426 	case L2CAP_SAR_START:
4427 		if (chan->sdu)
4428 			break;
4429 
4430 		chan->sdu_len = get_unaligned_le16(skb->data);
4431 		skb_pull(skb, L2CAP_SDULEN_SIZE);
4432 
4433 		if (chan->sdu_len > chan->imtu) {
4434 			err = -EMSGSIZE;
4435 			break;
4436 		}
4437 
4438 		if (skb->len >= chan->sdu_len)
4439 			break;
4440 
4441 		chan->sdu = skb;
4442 		chan->sdu_last_frag = skb;
4443 
4444 		skb = NULL;
4445 		err = 0;
4446 		break;
4447 
4448 	case L2CAP_SAR_CONTINUE:
4449 		if (!chan->sdu)
4450 			break;
4451 
4452 		append_skb_frag(chan->sdu, skb,
4453 				&chan->sdu_last_frag);
4454 		skb = NULL;
4455 
4456 		if (chan->sdu->len >= chan->sdu_len)
4457 			break;
4458 
4459 		err = 0;
4460 		break;
4461 
4462 	case L2CAP_SAR_END:
4463 		if (!chan->sdu)
4464 			break;
4465 
4466 		append_skb_frag(chan->sdu, skb,
4467 				&chan->sdu_last_frag);
4468 		skb = NULL;
4469 
4470 		if (chan->sdu->len != chan->sdu_len)
4471 			break;
4472 
4473 		err = chan->ops->recv(chan, chan->sdu);
4474 
4475 		if (!err) {
4476 			/* Reassembly complete */
4477 			chan->sdu = NULL;
4478 			chan->sdu_last_frag = NULL;
4479 			chan->sdu_len = 0;
4480 		}
4481 		break;
4482 	}
4483 
4484 	if (err) {
4485 		kfree_skb(skb);
4486 		kfree_skb(chan->sdu);
4487 		chan->sdu = NULL;
4488 		chan->sdu_last_frag = NULL;
4489 		chan->sdu_len = 0;
4490 	}
4491 
4492 	return err;
4493 }
4494 
4495 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4496 {
4497 	u8 event;
4498 
4499 	if (chan->mode != L2CAP_MODE_ERTM)
4500 		return;
4501 
4502 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4503 	l2cap_tx(chan, NULL, NULL, event);
4504 }
4505 
4506 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4507 {
4508 	int err = 0;
4509 	/* Pass sequential frames to l2cap_reassemble_sdu()
4510 	 * until a gap is encountered.
4511 	 */
4512 
4513 	BT_DBG("chan %p", chan);
4514 
4515 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4516 		struct sk_buff *skb;
4517 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
4518 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
4519 
4520 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4521 
4522 		if (!skb)
4523 			break;
4524 
4525 		skb_unlink(skb, &chan->srej_q);
4526 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4527 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4528 		if (err)
4529 			break;
4530 	}
4531 
4532 	if (skb_queue_empty(&chan->srej_q)) {
4533 		chan->rx_state = L2CAP_RX_STATE_RECV;
4534 		l2cap_send_ack(chan);
4535 	}
4536 
4537 	return err;
4538 }
4539 
4540 static void l2cap_handle_srej(struct l2cap_chan *chan,
4541 			      struct l2cap_ctrl *control)
4542 {
4543 	struct sk_buff *skb;
4544 
4545 	BT_DBG("chan %p, control %p", chan, control);
4546 
4547 	if (control->reqseq == chan->next_tx_seq) {
4548 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4549 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4550 		return;
4551 	}
4552 
4553 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4554 
4555 	if (skb == NULL) {
4556 		BT_DBG("Seq %d not available for retransmission",
4557 		       control->reqseq);
4558 		return;
4559 	}
4560 
4561 	if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4562 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4563 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4564 		return;
4565 	}
4566 
4567 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4568 
4569 	if (control->poll) {
4570 		l2cap_pass_to_tx(chan, control);
4571 
4572 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
4573 		l2cap_retransmit(chan, control);
4574 		l2cap_ertm_send(chan);
4575 
4576 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4577 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
4578 			chan->srej_save_reqseq = control->reqseq;
4579 		}
4580 	} else {
4581 		l2cap_pass_to_tx_fbit(chan, control);
4582 
4583 		if (control->final) {
4584 			if (chan->srej_save_reqseq != control->reqseq ||
4585 			    !test_and_clear_bit(CONN_SREJ_ACT,
4586 						&chan->conn_state))
4587 				l2cap_retransmit(chan, control);
4588 		} else {
4589 			l2cap_retransmit(chan, control);
4590 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4591 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
4592 				chan->srej_save_reqseq = control->reqseq;
4593 			}
4594 		}
4595 	}
4596 }
4597 
4598 static void l2cap_handle_rej(struct l2cap_chan *chan,
4599 			     struct l2cap_ctrl *control)
4600 {
4601 	struct sk_buff *skb;
4602 
4603 	BT_DBG("chan %p, control %p", chan, control);
4604 
4605 	if (control->reqseq == chan->next_tx_seq) {
4606 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4607 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4608 		return;
4609 	}
4610 
4611 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4612 
4613 	if (chan->max_tx && skb &&
4614 	    bt_cb(skb)->control.retries >= chan->max_tx) {
4615 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4616 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4617 		return;
4618 	}
4619 
4620 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4621 
4622 	l2cap_pass_to_tx(chan, control);
4623 
4624 	if (control->final) {
4625 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4626 			l2cap_retransmit_all(chan, control);
4627 	} else {
4628 		l2cap_retransmit_all(chan, control);
4629 		l2cap_ertm_send(chan);
4630 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4631 			set_bit(CONN_REJ_ACT, &chan->conn_state);
4632 	}
4633 }
4634 
4635 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4636 {
4637 	BT_DBG("chan %p, txseq %d", chan, txseq);
4638 
4639 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4640 	       chan->expected_tx_seq);
4641 
4642 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4643 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4644 								chan->tx_win) {
4645 			/* See notes below regarding "double poll" and
4646 			 * invalid packets.
4647 			 */
4648 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4649 				BT_DBG("Invalid/Ignore - after SREJ");
4650 				return L2CAP_TXSEQ_INVALID_IGNORE;
4651 			} else {
4652 				BT_DBG("Invalid - in window after SREJ sent");
4653 				return L2CAP_TXSEQ_INVALID;
4654 			}
4655 		}
4656 
4657 		if (chan->srej_list.head == txseq) {
4658 			BT_DBG("Expected SREJ");
4659 			return L2CAP_TXSEQ_EXPECTED_SREJ;
4660 		}
4661 
4662 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4663 			BT_DBG("Duplicate SREJ - txseq already stored");
4664 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
4665 		}
4666 
4667 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4668 			BT_DBG("Unexpected SREJ - not requested");
4669 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4670 		}
4671 	}
4672 
4673 	if (chan->expected_tx_seq == txseq) {
4674 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4675 		    chan->tx_win) {
4676 			BT_DBG("Invalid - txseq outside tx window");
4677 			return L2CAP_TXSEQ_INVALID;
4678 		} else {
4679 			BT_DBG("Expected");
4680 			return L2CAP_TXSEQ_EXPECTED;
4681 		}
4682 	}
4683 
4684 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4685 		__seq_offset(chan, chan->expected_tx_seq,
4686 			     chan->last_acked_seq)){
4687 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
4688 		return L2CAP_TXSEQ_DUPLICATE;
4689 	}
4690 
4691 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4692 		/* A source of invalid packets is a "double poll" condition,
4693 		 * where delays cause us to send multiple poll packets.  If
4694 		 * the remote stack receives and processes both polls,
4695 		 * sequence numbers can wrap around in such a way that a
4696 		 * resent frame has a sequence number that looks like new data
4697 		 * with a sequence gap.  This would trigger an erroneous SREJ
4698 		 * request.
4699 		 *
4700 		 * Fortunately, this is impossible with a tx window that's
4701 		 * less than half of the maximum sequence number, which allows
4702 		 * invalid frames to be safely ignored.
4703 		 *
4704 		 * With tx window sizes greater than half of the tx window
4705 		 * maximum, the frame is invalid and cannot be ignored.  This
4706 		 * causes a disconnect.
4707 		 */
4708 
4709 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4710 			BT_DBG("Invalid/Ignore - txseq outside tx window");
4711 			return L2CAP_TXSEQ_INVALID_IGNORE;
4712 		} else {
4713 			BT_DBG("Invalid - txseq outside tx window");
4714 			return L2CAP_TXSEQ_INVALID;
4715 		}
4716 	} else {
4717 		BT_DBG("Unexpected - txseq indicates missing frames");
4718 		return L2CAP_TXSEQ_UNEXPECTED;
4719 	}
4720 }
4721 
4722 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4723 			       struct l2cap_ctrl *control,
4724 			       struct sk_buff *skb, u8 event)
4725 {
4726 	int err = 0;
4727 	bool skb_in_use = 0;
4728 
4729 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4730 	       event);
4731 
4732 	switch (event) {
4733 	case L2CAP_EV_RECV_IFRAME:
4734 		switch (l2cap_classify_txseq(chan, control->txseq)) {
4735 		case L2CAP_TXSEQ_EXPECTED:
4736 			l2cap_pass_to_tx(chan, control);
4737 
4738 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4739 				BT_DBG("Busy, discarding expected seq %d",
4740 				       control->txseq);
4741 				break;
4742 			}
4743 
4744 			chan->expected_tx_seq = __next_seq(chan,
4745 							   control->txseq);
4746 
4747 			chan->buffer_seq = chan->expected_tx_seq;
4748 			skb_in_use = 1;
4749 
4750 			err = l2cap_reassemble_sdu(chan, skb, control);
4751 			if (err)
4752 				break;
4753 
4754 			if (control->final) {
4755 				if (!test_and_clear_bit(CONN_REJ_ACT,
4756 							&chan->conn_state)) {
4757 					control->final = 0;
4758 					l2cap_retransmit_all(chan, control);
4759 					l2cap_ertm_send(chan);
4760 				}
4761 			}
4762 
4763 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4764 				l2cap_send_ack(chan);
4765 			break;
4766 		case L2CAP_TXSEQ_UNEXPECTED:
4767 			l2cap_pass_to_tx(chan, control);
4768 
4769 			/* Can't issue SREJ frames in the local busy state.
4770 			 * Drop this frame, it will be seen as missing
4771 			 * when local busy is exited.
4772 			 */
4773 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4774 				BT_DBG("Busy, discarding unexpected seq %d",
4775 				       control->txseq);
4776 				break;
4777 			}
4778 
4779 			/* There was a gap in the sequence, so an SREJ
4780 			 * must be sent for each missing frame.  The
4781 			 * current frame is stored for later use.
4782 			 */
4783 			skb_queue_tail(&chan->srej_q, skb);
4784 			skb_in_use = 1;
4785 			BT_DBG("Queued %p (queue len %d)", skb,
4786 			       skb_queue_len(&chan->srej_q));
4787 
4788 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4789 			l2cap_seq_list_clear(&chan->srej_list);
4790 			l2cap_send_srej(chan, control->txseq);
4791 
4792 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4793 			break;
4794 		case L2CAP_TXSEQ_DUPLICATE:
4795 			l2cap_pass_to_tx(chan, control);
4796 			break;
4797 		case L2CAP_TXSEQ_INVALID_IGNORE:
4798 			break;
4799 		case L2CAP_TXSEQ_INVALID:
4800 		default:
4801 			l2cap_send_disconn_req(chan->conn, chan,
4802 					       ECONNRESET);
4803 			break;
4804 		}
4805 		break;
4806 	case L2CAP_EV_RECV_RR:
4807 		l2cap_pass_to_tx(chan, control);
4808 		if (control->final) {
4809 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4810 
4811 			if (!test_and_clear_bit(CONN_REJ_ACT,
4812 						&chan->conn_state)) {
4813 				control->final = 0;
4814 				l2cap_retransmit_all(chan, control);
4815 			}
4816 
4817 			l2cap_ertm_send(chan);
4818 		} else if (control->poll) {
4819 			l2cap_send_i_or_rr_or_rnr(chan);
4820 		} else {
4821 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
4822 					       &chan->conn_state) &&
4823 			    chan->unacked_frames)
4824 				__set_retrans_timer(chan);
4825 
4826 			l2cap_ertm_send(chan);
4827 		}
4828 		break;
4829 	case L2CAP_EV_RECV_RNR:
4830 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4831 		l2cap_pass_to_tx(chan, control);
4832 		if (control && control->poll) {
4833 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
4834 			l2cap_send_rr_or_rnr(chan, 0);
4835 		}
4836 		__clear_retrans_timer(chan);
4837 		l2cap_seq_list_clear(&chan->retrans_list);
4838 		break;
4839 	case L2CAP_EV_RECV_REJ:
4840 		l2cap_handle_rej(chan, control);
4841 		break;
4842 	case L2CAP_EV_RECV_SREJ:
4843 		l2cap_handle_srej(chan, control);
4844 		break;
4845 	default:
4846 		break;
4847 	}
4848 
4849 	if (skb && !skb_in_use) {
4850 		BT_DBG("Freeing %p", skb);
4851 		kfree_skb(skb);
4852 	}
4853 
4854 	return err;
4855 }
4856 
4857 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4858 				    struct l2cap_ctrl *control,
4859 				    struct sk_buff *skb, u8 event)
4860 {
4861 	int err = 0;
4862 	u16 txseq = control->txseq;
4863 	bool skb_in_use = 0;
4864 
4865 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4866 	       event);
4867 
4868 	switch (event) {
4869 	case L2CAP_EV_RECV_IFRAME:
4870 		switch (l2cap_classify_txseq(chan, txseq)) {
4871 		case L2CAP_TXSEQ_EXPECTED:
4872 			/* Keep frame for reassembly later */
4873 			l2cap_pass_to_tx(chan, control);
4874 			skb_queue_tail(&chan->srej_q, skb);
4875 			skb_in_use = 1;
4876 			BT_DBG("Queued %p (queue len %d)", skb,
4877 			       skb_queue_len(&chan->srej_q));
4878 
4879 			chan->expected_tx_seq = __next_seq(chan, txseq);
4880 			break;
4881 		case L2CAP_TXSEQ_EXPECTED_SREJ:
4882 			l2cap_seq_list_pop(&chan->srej_list);
4883 
4884 			l2cap_pass_to_tx(chan, control);
4885 			skb_queue_tail(&chan->srej_q, skb);
4886 			skb_in_use = 1;
4887 			BT_DBG("Queued %p (queue len %d)", skb,
4888 			       skb_queue_len(&chan->srej_q));
4889 
4890 			err = l2cap_rx_queued_iframes(chan);
4891 			if (err)
4892 				break;
4893 
4894 			break;
4895 		case L2CAP_TXSEQ_UNEXPECTED:
4896 			/* Got a frame that can't be reassembled yet.
4897 			 * Save it for later, and send SREJs to cover
4898 			 * the missing frames.
4899 			 */
4900 			skb_queue_tail(&chan->srej_q, skb);
4901 			skb_in_use = 1;
4902 			BT_DBG("Queued %p (queue len %d)", skb,
4903 			       skb_queue_len(&chan->srej_q));
4904 
4905 			l2cap_pass_to_tx(chan, control);
4906 			l2cap_send_srej(chan, control->txseq);
4907 			break;
4908 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4909 			/* This frame was requested with an SREJ, but
4910 			 * some expected retransmitted frames are
4911 			 * missing.  Request retransmission of missing
4912 			 * SREJ'd frames.
4913 			 */
4914 			skb_queue_tail(&chan->srej_q, skb);
4915 			skb_in_use = 1;
4916 			BT_DBG("Queued %p (queue len %d)", skb,
4917 			       skb_queue_len(&chan->srej_q));
4918 
4919 			l2cap_pass_to_tx(chan, control);
4920 			l2cap_send_srej_list(chan, control->txseq);
4921 			break;
4922 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
4923 			/* We've already queued this frame.  Drop this copy. */
4924 			l2cap_pass_to_tx(chan, control);
4925 			break;
4926 		case L2CAP_TXSEQ_DUPLICATE:
4927 			/* Expecting a later sequence number, so this frame
4928 			 * was already received.  Ignore it completely.
4929 			 */
4930 			break;
4931 		case L2CAP_TXSEQ_INVALID_IGNORE:
4932 			break;
4933 		case L2CAP_TXSEQ_INVALID:
4934 		default:
4935 			l2cap_send_disconn_req(chan->conn, chan,
4936 					       ECONNRESET);
4937 			break;
4938 		}
4939 		break;
4940 	case L2CAP_EV_RECV_RR:
4941 		l2cap_pass_to_tx(chan, control);
4942 		if (control->final) {
4943 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4944 
4945 			if (!test_and_clear_bit(CONN_REJ_ACT,
4946 						&chan->conn_state)) {
4947 				control->final = 0;
4948 				l2cap_retransmit_all(chan, control);
4949 			}
4950 
4951 			l2cap_ertm_send(chan);
4952 		} else if (control->poll) {
4953 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
4954 					       &chan->conn_state) &&
4955 			    chan->unacked_frames) {
4956 				__set_retrans_timer(chan);
4957 			}
4958 
4959 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
4960 			l2cap_send_srej_tail(chan);
4961 		} else {
4962 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
4963 					       &chan->conn_state) &&
4964 			    chan->unacked_frames)
4965 				__set_retrans_timer(chan);
4966 
4967 			l2cap_send_ack(chan);
4968 		}
4969 		break;
4970 	case L2CAP_EV_RECV_RNR:
4971 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4972 		l2cap_pass_to_tx(chan, control);
4973 		if (control->poll) {
4974 			l2cap_send_srej_tail(chan);
4975 		} else {
4976 			struct l2cap_ctrl rr_control;
4977 			memset(&rr_control, 0, sizeof(rr_control));
4978 			rr_control.sframe = 1;
4979 			rr_control.super = L2CAP_SUPER_RR;
4980 			rr_control.reqseq = chan->buffer_seq;
4981 			l2cap_send_sframe(chan, &rr_control);
4982 		}
4983 
4984 		break;
4985 	case L2CAP_EV_RECV_REJ:
4986 		l2cap_handle_rej(chan, control);
4987 		break;
4988 	case L2CAP_EV_RECV_SREJ:
4989 		l2cap_handle_srej(chan, control);
4990 		break;
4991 	}
4992 
4993 	if (skb && !skb_in_use) {
4994 		BT_DBG("Freeing %p", skb);
4995 		kfree_skb(skb);
4996 	}
4997 
4998 	return err;
4999 }
5000 
5001 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5002 {
5003 	/* Make sure reqseq is for a packet that has been sent but not acked */
5004 	u16 unacked;
5005 
5006 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5007 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5008 }
5009 
5010 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5011 		    struct sk_buff *skb, u8 event)
5012 {
5013 	int err = 0;
5014 
5015 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5016 	       control, skb, event, chan->rx_state);
5017 
5018 	if (__valid_reqseq(chan, control->reqseq)) {
5019 		switch (chan->rx_state) {
5020 		case L2CAP_RX_STATE_RECV:
5021 			err = l2cap_rx_state_recv(chan, control, skb, event);
5022 			break;
5023 		case L2CAP_RX_STATE_SREJ_SENT:
5024 			err = l2cap_rx_state_srej_sent(chan, control, skb,
5025 						       event);
5026 			break;
5027 		default:
5028 			/* shut it down */
5029 			break;
5030 		}
5031 	} else {
5032 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5033 		       control->reqseq, chan->next_tx_seq,
5034 		       chan->expected_ack_seq);
5035 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5036 	}
5037 
5038 	return err;
5039 }
5040 
5041 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5042 			   struct sk_buff *skb)
5043 {
5044 	int err = 0;
5045 
5046 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5047 	       chan->rx_state);
5048 
5049 	if (l2cap_classify_txseq(chan, control->txseq) ==
5050 	    L2CAP_TXSEQ_EXPECTED) {
5051 		l2cap_pass_to_tx(chan, control);
5052 
5053 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5054 		       __next_seq(chan, chan->buffer_seq));
5055 
5056 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5057 
5058 		l2cap_reassemble_sdu(chan, skb, control);
5059 	} else {
5060 		if (chan->sdu) {
5061 			kfree_skb(chan->sdu);
5062 			chan->sdu = NULL;
5063 		}
5064 		chan->sdu_last_frag = NULL;
5065 		chan->sdu_len = 0;
5066 
5067 		if (skb) {
5068 			BT_DBG("Freeing %p", skb);
5069 			kfree_skb(skb);
5070 		}
5071 	}
5072 
5073 	chan->last_acked_seq = control->txseq;
5074 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
5075 
5076 	return err;
5077 }
5078 
5079 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5080 {
5081 	struct l2cap_ctrl *control = &bt_cb(skb)->control;
5082 	u16 len;
5083 	u8 event;
5084 
5085 	__unpack_control(chan, skb);
5086 
5087 	len = skb->len;
5088 
5089 	/*
5090 	 * We can just drop the corrupted I-frame here.
5091 	 * Receiver will miss it and start proper recovery
5092 	 * procedures and ask for retransmission.
5093 	 */
5094 	if (l2cap_check_fcs(chan, skb))
5095 		goto drop;
5096 
5097 	if (!control->sframe && control->sar == L2CAP_SAR_START)
5098 		len -= L2CAP_SDULEN_SIZE;
5099 
5100 	if (chan->fcs == L2CAP_FCS_CRC16)
5101 		len -= L2CAP_FCS_SIZE;
5102 
5103 	if (len > chan->mps) {
5104 		l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5105 		goto drop;
5106 	}
5107 
5108 	if (!control->sframe) {
5109 		int err;
5110 
5111 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5112 		       control->sar, control->reqseq, control->final,
5113 		       control->txseq);
5114 
5115 		/* Validate F-bit - F=0 always valid, F=1 only
5116 		 * valid in TX WAIT_F
5117 		 */
5118 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5119 			goto drop;
5120 
5121 		if (chan->mode != L2CAP_MODE_STREAMING) {
5122 			event = L2CAP_EV_RECV_IFRAME;
5123 			err = l2cap_rx(chan, control, skb, event);
5124 		} else {
5125 			err = l2cap_stream_rx(chan, control, skb);
5126 		}
5127 
5128 		if (err)
5129 			l2cap_send_disconn_req(chan->conn, chan,
5130 					       ECONNRESET);
5131 	} else {
5132 		const u8 rx_func_to_event[4] = {
5133 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5134 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5135 		};
5136 
5137 		/* Only I-frames are expected in streaming mode */
5138 		if (chan->mode == L2CAP_MODE_STREAMING)
5139 			goto drop;
5140 
5141 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5142 		       control->reqseq, control->final, control->poll,
5143 		       control->super);
5144 
5145 		if (len != 0) {
5146 			BT_ERR("%d", len);
5147 			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5148 			goto drop;
5149 		}
5150 
5151 		/* Validate F and P bits */
5152 		if (control->final && (control->poll ||
5153 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5154 			goto drop;
5155 
5156 		event = rx_func_to_event[control->super];
5157 		if (l2cap_rx(chan, control, skb, event))
5158 			l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5159 	}
5160 
5161 	return 0;
5162 
5163 drop:
5164 	kfree_skb(skb);
5165 	return 0;
5166 }
5167 
5168 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5169 			       struct sk_buff *skb)
5170 {
5171 	struct l2cap_chan *chan;
5172 
5173 	chan = l2cap_get_chan_by_scid(conn, cid);
5174 	if (!chan) {
5175 		if (cid == L2CAP_CID_A2MP) {
5176 			chan = a2mp_channel_create(conn, skb);
5177 			if (!chan) {
5178 				kfree_skb(skb);
5179 				return;
5180 			}
5181 
5182 			l2cap_chan_lock(chan);
5183 		} else {
5184 			BT_DBG("unknown cid 0x%4.4x", cid);
5185 			/* Drop packet and return */
5186 			kfree_skb(skb);
5187 			return;
5188 		}
5189 	}
5190 
5191 	BT_DBG("chan %p, len %d", chan, skb->len);
5192 
5193 	if (chan->state != BT_CONNECTED)
5194 		goto drop;
5195 
5196 	switch (chan->mode) {
5197 	case L2CAP_MODE_BASIC:
5198 		/* If socket recv buffers overflows we drop data here
5199 		 * which is *bad* because L2CAP has to be reliable.
5200 		 * But we don't have any other choice. L2CAP doesn't
5201 		 * provide flow control mechanism. */
5202 
5203 		if (chan->imtu < skb->len)
5204 			goto drop;
5205 
5206 		if (!chan->ops->recv(chan, skb))
5207 			goto done;
5208 		break;
5209 
5210 	case L2CAP_MODE_ERTM:
5211 	case L2CAP_MODE_STREAMING:
5212 		l2cap_data_rcv(chan, skb);
5213 		goto done;
5214 
5215 	default:
5216 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5217 		break;
5218 	}
5219 
5220 drop:
5221 	kfree_skb(skb);
5222 
5223 done:
5224 	l2cap_chan_unlock(chan);
5225 }
5226 
5227 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5228 				  struct sk_buff *skb)
5229 {
5230 	struct l2cap_chan *chan;
5231 
5232 	chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5233 	if (!chan)
5234 		goto drop;
5235 
5236 	BT_DBG("chan %p, len %d", chan, skb->len);
5237 
5238 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5239 		goto drop;
5240 
5241 	if (chan->imtu < skb->len)
5242 		goto drop;
5243 
5244 	if (!chan->ops->recv(chan, skb))
5245 		return;
5246 
5247 drop:
5248 	kfree_skb(skb);
5249 }
5250 
5251 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5252 			      struct sk_buff *skb)
5253 {
5254 	struct l2cap_chan *chan;
5255 
5256 	chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5257 	if (!chan)
5258 		goto drop;
5259 
5260 	BT_DBG("chan %p, len %d", chan, skb->len);
5261 
5262 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5263 		goto drop;
5264 
5265 	if (chan->imtu < skb->len)
5266 		goto drop;
5267 
5268 	if (!chan->ops->recv(chan, skb))
5269 		return;
5270 
5271 drop:
5272 	kfree_skb(skb);
5273 }
5274 
5275 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5276 {
5277 	struct l2cap_hdr *lh = (void *) skb->data;
5278 	u16 cid, len;
5279 	__le16 psm;
5280 
5281 	skb_pull(skb, L2CAP_HDR_SIZE);
5282 	cid = __le16_to_cpu(lh->cid);
5283 	len = __le16_to_cpu(lh->len);
5284 
5285 	if (len != skb->len) {
5286 		kfree_skb(skb);
5287 		return;
5288 	}
5289 
5290 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
5291 
5292 	switch (cid) {
5293 	case L2CAP_CID_LE_SIGNALING:
5294 	case L2CAP_CID_SIGNALING:
5295 		l2cap_sig_channel(conn, skb);
5296 		break;
5297 
5298 	case L2CAP_CID_CONN_LESS:
5299 		psm = get_unaligned((__le16 *) skb->data);
5300 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
5301 		l2cap_conless_channel(conn, psm, skb);
5302 		break;
5303 
5304 	case L2CAP_CID_LE_DATA:
5305 		l2cap_att_channel(conn, cid, skb);
5306 		break;
5307 
5308 	case L2CAP_CID_SMP:
5309 		if (smp_sig_channel(conn, skb))
5310 			l2cap_conn_del(conn->hcon, EACCES);
5311 		break;
5312 
5313 	default:
5314 		l2cap_data_channel(conn, cid, skb);
5315 		break;
5316 	}
5317 }
5318 
5319 /* ---- L2CAP interface with lower layer (HCI) ---- */
5320 
5321 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5322 {
5323 	int exact = 0, lm1 = 0, lm2 = 0;
5324 	struct l2cap_chan *c;
5325 
5326 	BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5327 
5328 	/* Find listening sockets and check their link_mode */
5329 	read_lock(&chan_list_lock);
5330 	list_for_each_entry(c, &chan_list, global_l) {
5331 		struct sock *sk = c->sk;
5332 
5333 		if (c->state != BT_LISTEN)
5334 			continue;
5335 
5336 		if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5337 			lm1 |= HCI_LM_ACCEPT;
5338 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5339 				lm1 |= HCI_LM_MASTER;
5340 			exact++;
5341 		} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5342 			lm2 |= HCI_LM_ACCEPT;
5343 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5344 				lm2 |= HCI_LM_MASTER;
5345 		}
5346 	}
5347 	read_unlock(&chan_list_lock);
5348 
5349 	return exact ? lm1 : lm2;
5350 }
5351 
5352 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5353 {
5354 	struct l2cap_conn *conn;
5355 
5356 	BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5357 
5358 	if (!status) {
5359 		conn = l2cap_conn_add(hcon, status);
5360 		if (conn)
5361 			l2cap_conn_ready(conn);
5362 	} else
5363 		l2cap_conn_del(hcon, bt_to_errno(status));
5364 
5365 }
5366 
5367 int l2cap_disconn_ind(struct hci_conn *hcon)
5368 {
5369 	struct l2cap_conn *conn = hcon->l2cap_data;
5370 
5371 	BT_DBG("hcon %p", hcon);
5372 
5373 	if (!conn)
5374 		return HCI_ERROR_REMOTE_USER_TERM;
5375 	return conn->disc_reason;
5376 }
5377 
5378 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5379 {
5380 	BT_DBG("hcon %p reason %d", hcon, reason);
5381 
5382 	l2cap_conn_del(hcon, bt_to_errno(reason));
5383 }
5384 
5385 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5386 {
5387 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5388 		return;
5389 
5390 	if (encrypt == 0x00) {
5391 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
5392 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5393 		} else if (chan->sec_level == BT_SECURITY_HIGH)
5394 			l2cap_chan_close(chan, ECONNREFUSED);
5395 	} else {
5396 		if (chan->sec_level == BT_SECURITY_MEDIUM)
5397 			__clear_chan_timer(chan);
5398 	}
5399 }
5400 
5401 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5402 {
5403 	struct l2cap_conn *conn = hcon->l2cap_data;
5404 	struct l2cap_chan *chan;
5405 
5406 	if (!conn)
5407 		return 0;
5408 
5409 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
5410 
5411 	if (hcon->type == LE_LINK) {
5412 		if (!status && encrypt)
5413 			smp_distribute_keys(conn, 0);
5414 		cancel_delayed_work(&conn->security_timer);
5415 	}
5416 
5417 	mutex_lock(&conn->chan_lock);
5418 
5419 	list_for_each_entry(chan, &conn->chan_l, list) {
5420 		l2cap_chan_lock(chan);
5421 
5422 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5423 		       state_to_string(chan->state));
5424 
5425 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
5426 			l2cap_chan_unlock(chan);
5427 			continue;
5428 		}
5429 
5430 		if (chan->scid == L2CAP_CID_LE_DATA) {
5431 			if (!status && encrypt) {
5432 				chan->sec_level = hcon->sec_level;
5433 				l2cap_chan_ready(chan);
5434 			}
5435 
5436 			l2cap_chan_unlock(chan);
5437 			continue;
5438 		}
5439 
5440 		if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5441 			l2cap_chan_unlock(chan);
5442 			continue;
5443 		}
5444 
5445 		if (!status && (chan->state == BT_CONNECTED ||
5446 						chan->state == BT_CONFIG)) {
5447 			struct sock *sk = chan->sk;
5448 
5449 			clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5450 			sk->sk_state_change(sk);
5451 
5452 			l2cap_check_encryption(chan, encrypt);
5453 			l2cap_chan_unlock(chan);
5454 			continue;
5455 		}
5456 
5457 		if (chan->state == BT_CONNECT) {
5458 			if (!status) {
5459 				l2cap_send_conn_req(chan);
5460 			} else {
5461 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5462 			}
5463 		} else if (chan->state == BT_CONNECT2) {
5464 			struct sock *sk = chan->sk;
5465 			struct l2cap_conn_rsp rsp;
5466 			__u16 res, stat;
5467 
5468 			lock_sock(sk);
5469 
5470 			if (!status) {
5471 				if (test_bit(BT_SK_DEFER_SETUP,
5472 					     &bt_sk(sk)->flags)) {
5473 					struct sock *parent = bt_sk(sk)->parent;
5474 					res = L2CAP_CR_PEND;
5475 					stat = L2CAP_CS_AUTHOR_PEND;
5476 					if (parent)
5477 						parent->sk_data_ready(parent, 0);
5478 				} else {
5479 					__l2cap_state_change(chan, BT_CONFIG);
5480 					res = L2CAP_CR_SUCCESS;
5481 					stat = L2CAP_CS_NO_INFO;
5482 				}
5483 			} else {
5484 				__l2cap_state_change(chan, BT_DISCONN);
5485 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5486 				res = L2CAP_CR_SEC_BLOCK;
5487 				stat = L2CAP_CS_NO_INFO;
5488 			}
5489 
5490 			release_sock(sk);
5491 
5492 			rsp.scid   = cpu_to_le16(chan->dcid);
5493 			rsp.dcid   = cpu_to_le16(chan->scid);
5494 			rsp.result = cpu_to_le16(res);
5495 			rsp.status = cpu_to_le16(stat);
5496 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5497 							sizeof(rsp), &rsp);
5498 
5499 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5500 			    res == L2CAP_CR_SUCCESS) {
5501 				char buf[128];
5502 				set_bit(CONF_REQ_SENT, &chan->conf_state);
5503 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
5504 					       L2CAP_CONF_REQ,
5505 					       l2cap_build_conf_req(chan, buf),
5506 					       buf);
5507 				chan->num_conf_req++;
5508 			}
5509 		}
5510 
5511 		l2cap_chan_unlock(chan);
5512 	}
5513 
5514 	mutex_unlock(&conn->chan_lock);
5515 
5516 	return 0;
5517 }
5518 
5519 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5520 {
5521 	struct l2cap_conn *conn = hcon->l2cap_data;
5522 
5523 	if (!conn)
5524 		conn = l2cap_conn_add(hcon, 0);
5525 
5526 	if (!conn)
5527 		goto drop;
5528 
5529 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5530 
5531 	if (!(flags & ACL_CONT)) {
5532 		struct l2cap_hdr *hdr;
5533 		int len;
5534 
5535 		if (conn->rx_len) {
5536 			BT_ERR("Unexpected start frame (len %d)", skb->len);
5537 			kfree_skb(conn->rx_skb);
5538 			conn->rx_skb = NULL;
5539 			conn->rx_len = 0;
5540 			l2cap_conn_unreliable(conn, ECOMM);
5541 		}
5542 
5543 		/* Start fragment always begin with Basic L2CAP header */
5544 		if (skb->len < L2CAP_HDR_SIZE) {
5545 			BT_ERR("Frame is too short (len %d)", skb->len);
5546 			l2cap_conn_unreliable(conn, ECOMM);
5547 			goto drop;
5548 		}
5549 
5550 		hdr = (struct l2cap_hdr *) skb->data;
5551 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5552 
5553 		if (len == skb->len) {
5554 			/* Complete frame received */
5555 			l2cap_recv_frame(conn, skb);
5556 			return 0;
5557 		}
5558 
5559 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5560 
5561 		if (skb->len > len) {
5562 			BT_ERR("Frame is too long (len %d, expected len %d)",
5563 				skb->len, len);
5564 			l2cap_conn_unreliable(conn, ECOMM);
5565 			goto drop;
5566 		}
5567 
5568 		/* Allocate skb for the complete frame (with header) */
5569 		conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5570 		if (!conn->rx_skb)
5571 			goto drop;
5572 
5573 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5574 								skb->len);
5575 		conn->rx_len = len - skb->len;
5576 	} else {
5577 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5578 
5579 		if (!conn->rx_len) {
5580 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5581 			l2cap_conn_unreliable(conn, ECOMM);
5582 			goto drop;
5583 		}
5584 
5585 		if (skb->len > conn->rx_len) {
5586 			BT_ERR("Fragment is too long (len %d, expected %d)",
5587 					skb->len, conn->rx_len);
5588 			kfree_skb(conn->rx_skb);
5589 			conn->rx_skb = NULL;
5590 			conn->rx_len = 0;
5591 			l2cap_conn_unreliable(conn, ECOMM);
5592 			goto drop;
5593 		}
5594 
5595 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5596 								skb->len);
5597 		conn->rx_len -= skb->len;
5598 
5599 		if (!conn->rx_len) {
5600 			/* Complete frame received */
5601 			l2cap_recv_frame(conn, conn->rx_skb);
5602 			conn->rx_skb = NULL;
5603 		}
5604 	}
5605 
5606 drop:
5607 	kfree_skb(skb);
5608 	return 0;
5609 }
5610 
5611 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5612 {
5613 	struct l2cap_chan *c;
5614 
5615 	read_lock(&chan_list_lock);
5616 
5617 	list_for_each_entry(c, &chan_list, global_l) {
5618 		struct sock *sk = c->sk;
5619 
5620 		seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5621 					batostr(&bt_sk(sk)->src),
5622 					batostr(&bt_sk(sk)->dst),
5623 					c->state, __le16_to_cpu(c->psm),
5624 					c->scid, c->dcid, c->imtu, c->omtu,
5625 					c->sec_level, c->mode);
5626 	}
5627 
5628 	read_unlock(&chan_list_lock);
5629 
5630 	return 0;
5631 }
5632 
5633 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5634 {
5635 	return single_open(file, l2cap_debugfs_show, inode->i_private);
5636 }
5637 
5638 static const struct file_operations l2cap_debugfs_fops = {
5639 	.open		= l2cap_debugfs_open,
5640 	.read		= seq_read,
5641 	.llseek		= seq_lseek,
5642 	.release	= single_release,
5643 };
5644 
5645 static struct dentry *l2cap_debugfs;
5646 
5647 int __init l2cap_init(void)
5648 {
5649 	int err;
5650 
5651 	err = l2cap_init_sockets();
5652 	if (err < 0)
5653 		return err;
5654 
5655 	if (bt_debugfs) {
5656 		l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5657 					bt_debugfs, NULL, &l2cap_debugfs_fops);
5658 		if (!l2cap_debugfs)
5659 			BT_ERR("Failed to create L2CAP debug file");
5660 	}
5661 
5662 	return 0;
5663 }
5664 
5665 void l2cap_exit(void)
5666 {
5667 	debugfs_remove(l2cap_debugfs);
5668 	l2cap_cleanup_sockets();
5669 }
5670 
5671 module_param(disable_ertm, bool, 0644);
5672 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
5673