xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision 930beb5a)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43 
44 bool disable_ertm;
45 
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
48 
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
51 
52 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
53 				       u8 code, u8 ident, u16 dlen, void *data);
54 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
55 			   void *data);
56 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
57 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
58 
59 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
60 		     struct sk_buff_head *skbs, u8 event);
61 
62 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
63 {
64 	if (hcon->type == LE_LINK) {
65 		if (type == ADDR_LE_DEV_PUBLIC)
66 			return BDADDR_LE_PUBLIC;
67 		else
68 			return BDADDR_LE_RANDOM;
69 	}
70 
71 	return BDADDR_BREDR;
72 }
73 
74 /* ---- L2CAP channels ---- */
75 
76 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
77 						   u16 cid)
78 {
79 	struct l2cap_chan *c;
80 
81 	list_for_each_entry(c, &conn->chan_l, list) {
82 		if (c->dcid == cid)
83 			return c;
84 	}
85 	return NULL;
86 }
87 
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
89 						   u16 cid)
90 {
91 	struct l2cap_chan *c;
92 
93 	list_for_each_entry(c, &conn->chan_l, list) {
94 		if (c->scid == cid)
95 			return c;
96 	}
97 	return NULL;
98 }
99 
100 /* Find channel with given SCID.
101  * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 						 u16 cid)
104 {
105 	struct l2cap_chan *c;
106 
107 	mutex_lock(&conn->chan_lock);
108 	c = __l2cap_get_chan_by_scid(conn, cid);
109 	if (c)
110 		l2cap_chan_lock(c);
111 	mutex_unlock(&conn->chan_lock);
112 
113 	return c;
114 }
115 
116 /* Find channel with given DCID.
117  * Returns locked channel.
118  */
119 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
120 						 u16 cid)
121 {
122 	struct l2cap_chan *c;
123 
124 	mutex_lock(&conn->chan_lock);
125 	c = __l2cap_get_chan_by_dcid(conn, cid);
126 	if (c)
127 		l2cap_chan_lock(c);
128 	mutex_unlock(&conn->chan_lock);
129 
130 	return c;
131 }
132 
133 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
134 						    u8 ident)
135 {
136 	struct l2cap_chan *c;
137 
138 	list_for_each_entry(c, &conn->chan_l, list) {
139 		if (c->ident == ident)
140 			return c;
141 	}
142 	return NULL;
143 }
144 
145 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
146 						  u8 ident)
147 {
148 	struct l2cap_chan *c;
149 
150 	mutex_lock(&conn->chan_lock);
151 	c = __l2cap_get_chan_by_ident(conn, ident);
152 	if (c)
153 		l2cap_chan_lock(c);
154 	mutex_unlock(&conn->chan_lock);
155 
156 	return c;
157 }
158 
159 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
160 {
161 	struct l2cap_chan *c;
162 
163 	list_for_each_entry(c, &chan_list, global_l) {
164 		if (c->sport == psm && !bacmp(&c->src, src))
165 			return c;
166 	}
167 	return NULL;
168 }
169 
170 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
171 {
172 	int err;
173 
174 	write_lock(&chan_list_lock);
175 
176 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
177 		err = -EADDRINUSE;
178 		goto done;
179 	}
180 
181 	if (psm) {
182 		chan->psm = psm;
183 		chan->sport = psm;
184 		err = 0;
185 	} else {
186 		u16 p;
187 
188 		err = -EINVAL;
189 		for (p = 0x1001; p < 0x1100; p += 2)
190 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
191 				chan->psm   = cpu_to_le16(p);
192 				chan->sport = cpu_to_le16(p);
193 				err = 0;
194 				break;
195 			}
196 	}
197 
198 done:
199 	write_unlock(&chan_list_lock);
200 	return err;
201 }
202 
203 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
204 {
205 	write_lock(&chan_list_lock);
206 
207 	chan->scid = scid;
208 
209 	write_unlock(&chan_list_lock);
210 
211 	return 0;
212 }
213 
214 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
215 {
216 	u16 cid = L2CAP_CID_DYN_START;
217 
218 	for (; cid < L2CAP_CID_DYN_END; cid++) {
219 		if (!__l2cap_get_chan_by_scid(conn, cid))
220 			return cid;
221 	}
222 
223 	return 0;
224 }
225 
226 static void l2cap_state_change(struct l2cap_chan *chan, int state)
227 {
228 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
229 	       state_to_string(state));
230 
231 	chan->state = state;
232 	chan->ops->state_change(chan, state, 0);
233 }
234 
235 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
236 						int state, int err)
237 {
238 	chan->state = state;
239 	chan->ops->state_change(chan, chan->state, err);
240 }
241 
242 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
243 {
244 	chan->ops->state_change(chan, chan->state, err);
245 }
246 
247 static void __set_retrans_timer(struct l2cap_chan *chan)
248 {
249 	if (!delayed_work_pending(&chan->monitor_timer) &&
250 	    chan->retrans_timeout) {
251 		l2cap_set_timer(chan, &chan->retrans_timer,
252 				msecs_to_jiffies(chan->retrans_timeout));
253 	}
254 }
255 
256 static void __set_monitor_timer(struct l2cap_chan *chan)
257 {
258 	__clear_retrans_timer(chan);
259 	if (chan->monitor_timeout) {
260 		l2cap_set_timer(chan, &chan->monitor_timer,
261 				msecs_to_jiffies(chan->monitor_timeout));
262 	}
263 }
264 
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 					       u16 seq)
267 {
268 	struct sk_buff *skb;
269 
270 	skb_queue_walk(head, skb) {
271 		if (bt_cb(skb)->control.txseq == seq)
272 			return skb;
273 	}
274 
275 	return NULL;
276 }
277 
278 /* ---- L2CAP sequence number lists ---- */
279 
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281  * SREJ requests that are received and for frames that are to be
282  * retransmitted. These seq_list functions implement a singly-linked
283  * list in an array, where membership in the list can also be checked
284  * in constant time. Items can also be added to the tail of the list
285  * and removed from the head in constant time, without further memory
286  * allocs or frees.
287  */
288 
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
290 {
291 	size_t alloc_size, i;
292 
293 	/* Allocated size is a power of 2 to map sequence numbers
294 	 * (which may be up to 14 bits) in to a smaller array that is
295 	 * sized for the negotiated ERTM transmit windows.
296 	 */
297 	alloc_size = roundup_pow_of_two(size);
298 
299 	seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 	if (!seq_list->list)
301 		return -ENOMEM;
302 
303 	seq_list->mask = alloc_size - 1;
304 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 	for (i = 0; i < alloc_size; i++)
307 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
308 
309 	return 0;
310 }
311 
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
313 {
314 	kfree(seq_list->list);
315 }
316 
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 					   u16 seq)
319 {
320 	/* Constant-time check for list membership */
321 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
322 }
323 
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
325 {
326 	u16 mask = seq_list->mask;
327 
328 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 		/* In case someone tries to pop the head of an empty list */
330 		return L2CAP_SEQ_LIST_CLEAR;
331 	} else if (seq_list->head == seq) {
332 		/* Head can be removed in constant time */
333 		seq_list->head = seq_list->list[seq & mask];
334 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335 
336 		if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 			seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 			seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 		}
340 	} else {
341 		/* Walk the list to find the sequence number */
342 		u16 prev = seq_list->head;
343 		while (seq_list->list[prev & mask] != seq) {
344 			prev = seq_list->list[prev & mask];
345 			if (prev == L2CAP_SEQ_LIST_TAIL)
346 				return L2CAP_SEQ_LIST_CLEAR;
347 		}
348 
349 		/* Unlink the number from the list and clear it */
350 		seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 		if (seq_list->tail == seq)
353 			seq_list->tail = prev;
354 	}
355 	return seq;
356 }
357 
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
359 {
360 	/* Remove the head in constant time */
361 	return l2cap_seq_list_remove(seq_list, seq_list->head);
362 }
363 
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 	u16 i;
367 
368 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 		return;
370 
371 	for (i = 0; i <= seq_list->mask; i++)
372 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373 
374 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377 
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 	u16 mask = seq_list->mask;
381 
382 	/* All appends happen in constant time */
383 
384 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 		return;
386 
387 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 		seq_list->head = seq;
389 	else
390 		seq_list->list[seq_list->tail & mask] = seq;
391 
392 	seq_list->tail = seq;
393 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395 
396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 					       chan_timer.work);
400 	struct l2cap_conn *conn = chan->conn;
401 	int reason;
402 
403 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404 
405 	mutex_lock(&conn->chan_lock);
406 	l2cap_chan_lock(chan);
407 
408 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 		reason = ECONNREFUSED;
410 	else if (chan->state == BT_CONNECT &&
411 		 chan->sec_level != BT_SECURITY_SDP)
412 		reason = ECONNREFUSED;
413 	else
414 		reason = ETIMEDOUT;
415 
416 	l2cap_chan_close(chan, reason);
417 
418 	l2cap_chan_unlock(chan);
419 
420 	chan->ops->close(chan);
421 	mutex_unlock(&conn->chan_lock);
422 
423 	l2cap_chan_put(chan);
424 }
425 
426 struct l2cap_chan *l2cap_chan_create(void)
427 {
428 	struct l2cap_chan *chan;
429 
430 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 	if (!chan)
432 		return NULL;
433 
434 	mutex_init(&chan->lock);
435 
436 	write_lock(&chan_list_lock);
437 	list_add(&chan->global_l, &chan_list);
438 	write_unlock(&chan_list_lock);
439 
440 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
441 
442 	chan->state = BT_OPEN;
443 
444 	kref_init(&chan->kref);
445 
446 	/* This flag is cleared in l2cap_chan_ready() */
447 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
448 
449 	BT_DBG("chan %p", chan);
450 
451 	return chan;
452 }
453 
454 static void l2cap_chan_destroy(struct kref *kref)
455 {
456 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
457 
458 	BT_DBG("chan %p", chan);
459 
460 	write_lock(&chan_list_lock);
461 	list_del(&chan->global_l);
462 	write_unlock(&chan_list_lock);
463 
464 	kfree(chan);
465 }
466 
467 void l2cap_chan_hold(struct l2cap_chan *c)
468 {
469 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
470 
471 	kref_get(&c->kref);
472 }
473 
474 void l2cap_chan_put(struct l2cap_chan *c)
475 {
476 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
477 
478 	kref_put(&c->kref, l2cap_chan_destroy);
479 }
480 
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482 {
483 	chan->fcs  = L2CAP_FCS_CRC16;
484 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 	chan->sec_level = BT_SECURITY_LOW;
489 
490 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
491 }
492 
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
494 {
495 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 	       __le16_to_cpu(chan->psm), chan->dcid);
497 
498 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
499 
500 	chan->conn = conn;
501 
502 	switch (chan->chan_type) {
503 	case L2CAP_CHAN_CONN_ORIENTED:
504 		if (conn->hcon->type == LE_LINK) {
505 			/* LE connection */
506 			chan->omtu = L2CAP_DEFAULT_MTU;
507 			if (chan->dcid == L2CAP_CID_ATT)
508 				chan->scid = L2CAP_CID_ATT;
509 			else
510 				chan->scid = l2cap_alloc_cid(conn);
511 		} else {
512 			/* Alloc CID for connection-oriented socket */
513 			chan->scid = l2cap_alloc_cid(conn);
514 			chan->omtu = L2CAP_DEFAULT_MTU;
515 		}
516 		break;
517 
518 	case L2CAP_CHAN_CONN_LESS:
519 		/* Connectionless socket */
520 		chan->scid = L2CAP_CID_CONN_LESS;
521 		chan->dcid = L2CAP_CID_CONN_LESS;
522 		chan->omtu = L2CAP_DEFAULT_MTU;
523 		break;
524 
525 	case L2CAP_CHAN_CONN_FIX_A2MP:
526 		chan->scid = L2CAP_CID_A2MP;
527 		chan->dcid = L2CAP_CID_A2MP;
528 		chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
529 		chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
530 		break;
531 
532 	default:
533 		/* Raw socket can send/recv signalling messages only */
534 		chan->scid = L2CAP_CID_SIGNALING;
535 		chan->dcid = L2CAP_CID_SIGNALING;
536 		chan->omtu = L2CAP_DEFAULT_MTU;
537 	}
538 
539 	chan->local_id		= L2CAP_BESTEFFORT_ID;
540 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
541 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
542 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
543 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
544 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
545 
546 	l2cap_chan_hold(chan);
547 
548 	hci_conn_hold(conn->hcon);
549 
550 	list_add(&chan->list, &conn->chan_l);
551 }
552 
553 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
554 {
555 	mutex_lock(&conn->chan_lock);
556 	__l2cap_chan_add(conn, chan);
557 	mutex_unlock(&conn->chan_lock);
558 }
559 
560 void l2cap_chan_del(struct l2cap_chan *chan, int err)
561 {
562 	struct l2cap_conn *conn = chan->conn;
563 
564 	__clear_chan_timer(chan);
565 
566 	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
567 
568 	if (conn) {
569 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
570 		/* Delete from channel list */
571 		list_del(&chan->list);
572 
573 		l2cap_chan_put(chan);
574 
575 		chan->conn = NULL;
576 
577 		if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
578 			hci_conn_drop(conn->hcon);
579 
580 		if (mgr && mgr->bredr_chan == chan)
581 			mgr->bredr_chan = NULL;
582 	}
583 
584 	if (chan->hs_hchan) {
585 		struct hci_chan *hs_hchan = chan->hs_hchan;
586 
587 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
588 		amp_disconnect_logical_link(hs_hchan);
589 	}
590 
591 	chan->ops->teardown(chan, err);
592 
593 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
594 		return;
595 
596 	switch(chan->mode) {
597 	case L2CAP_MODE_BASIC:
598 		break;
599 
600 	case L2CAP_MODE_ERTM:
601 		__clear_retrans_timer(chan);
602 		__clear_monitor_timer(chan);
603 		__clear_ack_timer(chan);
604 
605 		skb_queue_purge(&chan->srej_q);
606 
607 		l2cap_seq_list_free(&chan->srej_list);
608 		l2cap_seq_list_free(&chan->retrans_list);
609 
610 		/* fall through */
611 
612 	case L2CAP_MODE_STREAMING:
613 		skb_queue_purge(&chan->tx_q);
614 		break;
615 	}
616 
617 	return;
618 }
619 
620 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
621 {
622 	struct l2cap_conn *conn = chan->conn;
623 
624 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
625 
626 	switch (chan->state) {
627 	case BT_LISTEN:
628 		chan->ops->teardown(chan, 0);
629 		break;
630 
631 	case BT_CONNECTED:
632 	case BT_CONFIG:
633 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
634 		    conn->hcon->type == ACL_LINK) {
635 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
636 			l2cap_send_disconn_req(chan, reason);
637 		} else
638 			l2cap_chan_del(chan, reason);
639 		break;
640 
641 	case BT_CONNECT2:
642 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
643 		    conn->hcon->type == ACL_LINK) {
644 			struct l2cap_conn_rsp rsp;
645 			__u16 result;
646 
647 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
648 				result = L2CAP_CR_SEC_BLOCK;
649 			else
650 				result = L2CAP_CR_BAD_PSM;
651 
652 			l2cap_state_change(chan, BT_DISCONN);
653 
654 			rsp.scid   = cpu_to_le16(chan->dcid);
655 			rsp.dcid   = cpu_to_le16(chan->scid);
656 			rsp.result = cpu_to_le16(result);
657 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
658 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
659 				       sizeof(rsp), &rsp);
660 		}
661 
662 		l2cap_chan_del(chan, reason);
663 		break;
664 
665 	case BT_CONNECT:
666 	case BT_DISCONN:
667 		l2cap_chan_del(chan, reason);
668 		break;
669 
670 	default:
671 		chan->ops->teardown(chan, 0);
672 		break;
673 	}
674 }
675 
676 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
677 {
678 	switch (chan->chan_type) {
679 	case L2CAP_CHAN_RAW:
680 		switch (chan->sec_level) {
681 		case BT_SECURITY_HIGH:
682 			return HCI_AT_DEDICATED_BONDING_MITM;
683 		case BT_SECURITY_MEDIUM:
684 			return HCI_AT_DEDICATED_BONDING;
685 		default:
686 			return HCI_AT_NO_BONDING;
687 		}
688 		break;
689 	case L2CAP_CHAN_CONN_LESS:
690 		if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
691 			if (chan->sec_level == BT_SECURITY_LOW)
692 				chan->sec_level = BT_SECURITY_SDP;
693 		}
694 		if (chan->sec_level == BT_SECURITY_HIGH)
695 			return HCI_AT_NO_BONDING_MITM;
696 		else
697 			return HCI_AT_NO_BONDING;
698 		break;
699 	case L2CAP_CHAN_CONN_ORIENTED:
700 		if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
701 			if (chan->sec_level == BT_SECURITY_LOW)
702 				chan->sec_level = BT_SECURITY_SDP;
703 
704 			if (chan->sec_level == BT_SECURITY_HIGH)
705 				return HCI_AT_NO_BONDING_MITM;
706 			else
707 				return HCI_AT_NO_BONDING;
708 		}
709 		/* fall through */
710 	default:
711 		switch (chan->sec_level) {
712 		case BT_SECURITY_HIGH:
713 			return HCI_AT_GENERAL_BONDING_MITM;
714 		case BT_SECURITY_MEDIUM:
715 			return HCI_AT_GENERAL_BONDING;
716 		default:
717 			return HCI_AT_NO_BONDING;
718 		}
719 		break;
720 	}
721 }
722 
723 /* Service level security */
724 int l2cap_chan_check_security(struct l2cap_chan *chan)
725 {
726 	struct l2cap_conn *conn = chan->conn;
727 	__u8 auth_type;
728 
729 	auth_type = l2cap_get_auth_type(chan);
730 
731 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
732 }
733 
734 static u8 l2cap_get_ident(struct l2cap_conn *conn)
735 {
736 	u8 id;
737 
738 	/* Get next available identificator.
739 	 *    1 - 128 are used by kernel.
740 	 *  129 - 199 are reserved.
741 	 *  200 - 254 are used by utilities like l2ping, etc.
742 	 */
743 
744 	spin_lock(&conn->lock);
745 
746 	if (++conn->tx_ident > 128)
747 		conn->tx_ident = 1;
748 
749 	id = conn->tx_ident;
750 
751 	spin_unlock(&conn->lock);
752 
753 	return id;
754 }
755 
756 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
757 			   void *data)
758 {
759 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
760 	u8 flags;
761 
762 	BT_DBG("code 0x%2.2x", code);
763 
764 	if (!skb)
765 		return;
766 
767 	if (lmp_no_flush_capable(conn->hcon->hdev))
768 		flags = ACL_START_NO_FLUSH;
769 	else
770 		flags = ACL_START;
771 
772 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
773 	skb->priority = HCI_PRIO_MAX;
774 
775 	hci_send_acl(conn->hchan, skb, flags);
776 }
777 
778 static bool __chan_is_moving(struct l2cap_chan *chan)
779 {
780 	return chan->move_state != L2CAP_MOVE_STABLE &&
781 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
782 }
783 
784 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
785 {
786 	struct hci_conn *hcon = chan->conn->hcon;
787 	u16 flags;
788 
789 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
790 	       skb->priority);
791 
792 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
793 		if (chan->hs_hchan)
794 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
795 		else
796 			kfree_skb(skb);
797 
798 		return;
799 	}
800 
801 	if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
802 	    lmp_no_flush_capable(hcon->hdev))
803 		flags = ACL_START_NO_FLUSH;
804 	else
805 		flags = ACL_START;
806 
807 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
808 	hci_send_acl(chan->conn->hchan, skb, flags);
809 }
810 
811 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
812 {
813 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
814 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
815 
816 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
817 		/* S-Frame */
818 		control->sframe = 1;
819 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
820 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
821 
822 		control->sar = 0;
823 		control->txseq = 0;
824 	} else {
825 		/* I-Frame */
826 		control->sframe = 0;
827 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
828 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
829 
830 		control->poll = 0;
831 		control->super = 0;
832 	}
833 }
834 
835 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
836 {
837 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
838 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
839 
840 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
841 		/* S-Frame */
842 		control->sframe = 1;
843 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
844 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
845 
846 		control->sar = 0;
847 		control->txseq = 0;
848 	} else {
849 		/* I-Frame */
850 		control->sframe = 0;
851 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
852 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
853 
854 		control->poll = 0;
855 		control->super = 0;
856 	}
857 }
858 
859 static inline void __unpack_control(struct l2cap_chan *chan,
860 				    struct sk_buff *skb)
861 {
862 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
863 		__unpack_extended_control(get_unaligned_le32(skb->data),
864 					  &bt_cb(skb)->control);
865 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
866 	} else {
867 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
868 					  &bt_cb(skb)->control);
869 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
870 	}
871 }
872 
873 static u32 __pack_extended_control(struct l2cap_ctrl *control)
874 {
875 	u32 packed;
876 
877 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
878 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
879 
880 	if (control->sframe) {
881 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
882 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
883 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
884 	} else {
885 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
886 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
887 	}
888 
889 	return packed;
890 }
891 
892 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
893 {
894 	u16 packed;
895 
896 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
897 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
898 
899 	if (control->sframe) {
900 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
901 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
902 		packed |= L2CAP_CTRL_FRAME_TYPE;
903 	} else {
904 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
905 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
906 	}
907 
908 	return packed;
909 }
910 
911 static inline void __pack_control(struct l2cap_chan *chan,
912 				  struct l2cap_ctrl *control,
913 				  struct sk_buff *skb)
914 {
915 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
916 		put_unaligned_le32(__pack_extended_control(control),
917 				   skb->data + L2CAP_HDR_SIZE);
918 	} else {
919 		put_unaligned_le16(__pack_enhanced_control(control),
920 				   skb->data + L2CAP_HDR_SIZE);
921 	}
922 }
923 
924 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
925 {
926 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
927 		return L2CAP_EXT_HDR_SIZE;
928 	else
929 		return L2CAP_ENH_HDR_SIZE;
930 }
931 
932 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
933 					       u32 control)
934 {
935 	struct sk_buff *skb;
936 	struct l2cap_hdr *lh;
937 	int hlen = __ertm_hdr_size(chan);
938 
939 	if (chan->fcs == L2CAP_FCS_CRC16)
940 		hlen += L2CAP_FCS_SIZE;
941 
942 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
943 
944 	if (!skb)
945 		return ERR_PTR(-ENOMEM);
946 
947 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
948 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
949 	lh->cid = cpu_to_le16(chan->dcid);
950 
951 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
952 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
953 	else
954 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
955 
956 	if (chan->fcs == L2CAP_FCS_CRC16) {
957 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
958 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
959 	}
960 
961 	skb->priority = HCI_PRIO_MAX;
962 	return skb;
963 }
964 
965 static void l2cap_send_sframe(struct l2cap_chan *chan,
966 			      struct l2cap_ctrl *control)
967 {
968 	struct sk_buff *skb;
969 	u32 control_field;
970 
971 	BT_DBG("chan %p, control %p", chan, control);
972 
973 	if (!control->sframe)
974 		return;
975 
976 	if (__chan_is_moving(chan))
977 		return;
978 
979 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
980 	    !control->poll)
981 		control->final = 1;
982 
983 	if (control->super == L2CAP_SUPER_RR)
984 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
985 	else if (control->super == L2CAP_SUPER_RNR)
986 		set_bit(CONN_RNR_SENT, &chan->conn_state);
987 
988 	if (control->super != L2CAP_SUPER_SREJ) {
989 		chan->last_acked_seq = control->reqseq;
990 		__clear_ack_timer(chan);
991 	}
992 
993 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
994 	       control->final, control->poll, control->super);
995 
996 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
997 		control_field = __pack_extended_control(control);
998 	else
999 		control_field = __pack_enhanced_control(control);
1000 
1001 	skb = l2cap_create_sframe_pdu(chan, control_field);
1002 	if (!IS_ERR(skb))
1003 		l2cap_do_send(chan, skb);
1004 }
1005 
1006 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1007 {
1008 	struct l2cap_ctrl control;
1009 
1010 	BT_DBG("chan %p, poll %d", chan, poll);
1011 
1012 	memset(&control, 0, sizeof(control));
1013 	control.sframe = 1;
1014 	control.poll = poll;
1015 
1016 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1017 		control.super = L2CAP_SUPER_RNR;
1018 	else
1019 		control.super = L2CAP_SUPER_RR;
1020 
1021 	control.reqseq = chan->buffer_seq;
1022 	l2cap_send_sframe(chan, &control);
1023 }
1024 
1025 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1026 {
1027 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1028 }
1029 
1030 static bool __amp_capable(struct l2cap_chan *chan)
1031 {
1032 	struct l2cap_conn *conn = chan->conn;
1033 	struct hci_dev *hdev;
1034 	bool amp_available = false;
1035 
1036 	if (!conn->hs_enabled)
1037 		return false;
1038 
1039 	if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1040 		return false;
1041 
1042 	read_lock(&hci_dev_list_lock);
1043 	list_for_each_entry(hdev, &hci_dev_list, list) {
1044 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1045 		    test_bit(HCI_UP, &hdev->flags)) {
1046 			amp_available = true;
1047 			break;
1048 		}
1049 	}
1050 	read_unlock(&hci_dev_list_lock);
1051 
1052 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1053 		return amp_available;
1054 
1055 	return false;
1056 }
1057 
1058 static bool l2cap_check_efs(struct l2cap_chan *chan)
1059 {
1060 	/* Check EFS parameters */
1061 	return true;
1062 }
1063 
1064 void l2cap_send_conn_req(struct l2cap_chan *chan)
1065 {
1066 	struct l2cap_conn *conn = chan->conn;
1067 	struct l2cap_conn_req req;
1068 
1069 	req.scid = cpu_to_le16(chan->scid);
1070 	req.psm  = chan->psm;
1071 
1072 	chan->ident = l2cap_get_ident(conn);
1073 
1074 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1075 
1076 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1077 }
1078 
1079 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1080 {
1081 	struct l2cap_create_chan_req req;
1082 	req.scid = cpu_to_le16(chan->scid);
1083 	req.psm  = chan->psm;
1084 	req.amp_id = amp_id;
1085 
1086 	chan->ident = l2cap_get_ident(chan->conn);
1087 
1088 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1089 		       sizeof(req), &req);
1090 }
1091 
1092 static void l2cap_move_setup(struct l2cap_chan *chan)
1093 {
1094 	struct sk_buff *skb;
1095 
1096 	BT_DBG("chan %p", chan);
1097 
1098 	if (chan->mode != L2CAP_MODE_ERTM)
1099 		return;
1100 
1101 	__clear_retrans_timer(chan);
1102 	__clear_monitor_timer(chan);
1103 	__clear_ack_timer(chan);
1104 
1105 	chan->retry_count = 0;
1106 	skb_queue_walk(&chan->tx_q, skb) {
1107 		if (bt_cb(skb)->control.retries)
1108 			bt_cb(skb)->control.retries = 1;
1109 		else
1110 			break;
1111 	}
1112 
1113 	chan->expected_tx_seq = chan->buffer_seq;
1114 
1115 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1116 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1117 	l2cap_seq_list_clear(&chan->retrans_list);
1118 	l2cap_seq_list_clear(&chan->srej_list);
1119 	skb_queue_purge(&chan->srej_q);
1120 
1121 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1122 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1123 
1124 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1125 }
1126 
1127 static void l2cap_move_done(struct l2cap_chan *chan)
1128 {
1129 	u8 move_role = chan->move_role;
1130 	BT_DBG("chan %p", chan);
1131 
1132 	chan->move_state = L2CAP_MOVE_STABLE;
1133 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1134 
1135 	if (chan->mode != L2CAP_MODE_ERTM)
1136 		return;
1137 
1138 	switch (move_role) {
1139 	case L2CAP_MOVE_ROLE_INITIATOR:
1140 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1141 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1142 		break;
1143 	case L2CAP_MOVE_ROLE_RESPONDER:
1144 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1145 		break;
1146 	}
1147 }
1148 
1149 static void l2cap_chan_ready(struct l2cap_chan *chan)
1150 {
1151 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1152 	chan->conf_state = 0;
1153 	__clear_chan_timer(chan);
1154 
1155 	chan->state = BT_CONNECTED;
1156 
1157 	chan->ops->ready(chan);
1158 }
1159 
1160 static void l2cap_start_connection(struct l2cap_chan *chan)
1161 {
1162 	if (__amp_capable(chan)) {
1163 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1164 		a2mp_discover_amp(chan);
1165 	} else {
1166 		l2cap_send_conn_req(chan);
1167 	}
1168 }
1169 
1170 static void l2cap_do_start(struct l2cap_chan *chan)
1171 {
1172 	struct l2cap_conn *conn = chan->conn;
1173 
1174 	if (conn->hcon->type == LE_LINK) {
1175 		l2cap_chan_ready(chan);
1176 		return;
1177 	}
1178 
1179 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1180 		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1181 			return;
1182 
1183 		if (l2cap_chan_check_security(chan) &&
1184 		    __l2cap_no_conn_pending(chan)) {
1185 			l2cap_start_connection(chan);
1186 		}
1187 	} else {
1188 		struct l2cap_info_req req;
1189 		req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1190 
1191 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1192 		conn->info_ident = l2cap_get_ident(conn);
1193 
1194 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1195 
1196 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1197 			       sizeof(req), &req);
1198 	}
1199 }
1200 
1201 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1202 {
1203 	u32 local_feat_mask = l2cap_feat_mask;
1204 	if (!disable_ertm)
1205 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1206 
1207 	switch (mode) {
1208 	case L2CAP_MODE_ERTM:
1209 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1210 	case L2CAP_MODE_STREAMING:
1211 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1212 	default:
1213 		return 0x00;
1214 	}
1215 }
1216 
1217 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1218 {
1219 	struct l2cap_conn *conn = chan->conn;
1220 	struct l2cap_disconn_req req;
1221 
1222 	if (!conn)
1223 		return;
1224 
1225 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1226 		__clear_retrans_timer(chan);
1227 		__clear_monitor_timer(chan);
1228 		__clear_ack_timer(chan);
1229 	}
1230 
1231 	if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1232 		l2cap_state_change(chan, BT_DISCONN);
1233 		return;
1234 	}
1235 
1236 	req.dcid = cpu_to_le16(chan->dcid);
1237 	req.scid = cpu_to_le16(chan->scid);
1238 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1239 		       sizeof(req), &req);
1240 
1241 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1242 }
1243 
1244 /* ---- L2CAP connections ---- */
1245 static void l2cap_conn_start(struct l2cap_conn *conn)
1246 {
1247 	struct l2cap_chan *chan, *tmp;
1248 
1249 	BT_DBG("conn %p", conn);
1250 
1251 	mutex_lock(&conn->chan_lock);
1252 
1253 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1254 		l2cap_chan_lock(chan);
1255 
1256 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1257 			l2cap_chan_unlock(chan);
1258 			continue;
1259 		}
1260 
1261 		if (chan->state == BT_CONNECT) {
1262 			if (!l2cap_chan_check_security(chan) ||
1263 			    !__l2cap_no_conn_pending(chan)) {
1264 				l2cap_chan_unlock(chan);
1265 				continue;
1266 			}
1267 
1268 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1269 			    && test_bit(CONF_STATE2_DEVICE,
1270 					&chan->conf_state)) {
1271 				l2cap_chan_close(chan, ECONNRESET);
1272 				l2cap_chan_unlock(chan);
1273 				continue;
1274 			}
1275 
1276 			l2cap_start_connection(chan);
1277 
1278 		} else if (chan->state == BT_CONNECT2) {
1279 			struct l2cap_conn_rsp rsp;
1280 			char buf[128];
1281 			rsp.scid = cpu_to_le16(chan->dcid);
1282 			rsp.dcid = cpu_to_le16(chan->scid);
1283 
1284 			if (l2cap_chan_check_security(chan)) {
1285 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1286 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1287 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1288 					chan->ops->defer(chan);
1289 
1290 				} else {
1291 					l2cap_state_change(chan, BT_CONFIG);
1292 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1293 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1294 				}
1295 			} else {
1296 				rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1297 				rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1298 			}
1299 
1300 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1301 				       sizeof(rsp), &rsp);
1302 
1303 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1304 			    rsp.result != L2CAP_CR_SUCCESS) {
1305 				l2cap_chan_unlock(chan);
1306 				continue;
1307 			}
1308 
1309 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1310 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1311 				       l2cap_build_conf_req(chan, buf), buf);
1312 			chan->num_conf_req++;
1313 		}
1314 
1315 		l2cap_chan_unlock(chan);
1316 	}
1317 
1318 	mutex_unlock(&conn->chan_lock);
1319 }
1320 
1321 /* Find socket with cid and source/destination bdaddr.
1322  * Returns closest match, locked.
1323  */
1324 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1325 						    bdaddr_t *src,
1326 						    bdaddr_t *dst)
1327 {
1328 	struct l2cap_chan *c, *c1 = NULL;
1329 
1330 	read_lock(&chan_list_lock);
1331 
1332 	list_for_each_entry(c, &chan_list, global_l) {
1333 		if (state && c->state != state)
1334 			continue;
1335 
1336 		if (c->scid == cid) {
1337 			int src_match, dst_match;
1338 			int src_any, dst_any;
1339 
1340 			/* Exact match. */
1341 			src_match = !bacmp(&c->src, src);
1342 			dst_match = !bacmp(&c->dst, dst);
1343 			if (src_match && dst_match) {
1344 				read_unlock(&chan_list_lock);
1345 				return c;
1346 			}
1347 
1348 			/* Closest match */
1349 			src_any = !bacmp(&c->src, BDADDR_ANY);
1350 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1351 			if ((src_match && dst_any) || (src_any && dst_match) ||
1352 			    (src_any && dst_any))
1353 				c1 = c;
1354 		}
1355 	}
1356 
1357 	read_unlock(&chan_list_lock);
1358 
1359 	return c1;
1360 }
1361 
1362 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1363 {
1364 	struct hci_conn *hcon = conn->hcon;
1365 	struct l2cap_chan *chan, *pchan;
1366 	u8 dst_type;
1367 
1368 	BT_DBG("");
1369 
1370 	/* Check if we have socket listening on cid */
1371 	pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1372 					  &hcon->src, &hcon->dst);
1373 	if (!pchan)
1374 		return;
1375 
1376 	/* Client ATT sockets should override the server one */
1377 	if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1378 		return;
1379 
1380 	dst_type = bdaddr_type(hcon, hcon->dst_type);
1381 
1382 	/* If device is blocked, do not create a channel for it */
1383 	if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1384 		return;
1385 
1386 	l2cap_chan_lock(pchan);
1387 
1388 	chan = pchan->ops->new_connection(pchan);
1389 	if (!chan)
1390 		goto clean;
1391 
1392 	chan->dcid = L2CAP_CID_ATT;
1393 
1394 	bacpy(&chan->src, &hcon->src);
1395 	bacpy(&chan->dst, &hcon->dst);
1396 	chan->src_type = bdaddr_type(hcon, hcon->src_type);
1397 	chan->dst_type = dst_type;
1398 
1399 	__l2cap_chan_add(conn, chan);
1400 
1401 clean:
1402 	l2cap_chan_unlock(pchan);
1403 }
1404 
1405 static void l2cap_conn_ready(struct l2cap_conn *conn)
1406 {
1407 	struct l2cap_chan *chan;
1408 	struct hci_conn *hcon = conn->hcon;
1409 
1410 	BT_DBG("conn %p", conn);
1411 
1412 	/* For outgoing pairing which doesn't necessarily have an
1413 	 * associated socket (e.g. mgmt_pair_device).
1414 	 */
1415 	if (hcon->out && hcon->type == LE_LINK)
1416 		smp_conn_security(hcon, hcon->pending_sec_level);
1417 
1418 	mutex_lock(&conn->chan_lock);
1419 
1420 	if (hcon->type == LE_LINK)
1421 		l2cap_le_conn_ready(conn);
1422 
1423 	list_for_each_entry(chan, &conn->chan_l, list) {
1424 
1425 		l2cap_chan_lock(chan);
1426 
1427 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1428 			l2cap_chan_unlock(chan);
1429 			continue;
1430 		}
1431 
1432 		if (hcon->type == LE_LINK) {
1433 			if (smp_conn_security(hcon, chan->sec_level))
1434 				l2cap_chan_ready(chan);
1435 
1436 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1437 			l2cap_chan_ready(chan);
1438 
1439 		} else if (chan->state == BT_CONNECT) {
1440 			l2cap_do_start(chan);
1441 		}
1442 
1443 		l2cap_chan_unlock(chan);
1444 	}
1445 
1446 	mutex_unlock(&conn->chan_lock);
1447 }
1448 
1449 /* Notify sockets that we cannot guaranty reliability anymore */
1450 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1451 {
1452 	struct l2cap_chan *chan;
1453 
1454 	BT_DBG("conn %p", conn);
1455 
1456 	mutex_lock(&conn->chan_lock);
1457 
1458 	list_for_each_entry(chan, &conn->chan_l, list) {
1459 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1460 			l2cap_chan_set_err(chan, err);
1461 	}
1462 
1463 	mutex_unlock(&conn->chan_lock);
1464 }
1465 
1466 static void l2cap_info_timeout(struct work_struct *work)
1467 {
1468 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1469 					       info_timer.work);
1470 
1471 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1472 	conn->info_ident = 0;
1473 
1474 	l2cap_conn_start(conn);
1475 }
1476 
1477 /*
1478  * l2cap_user
1479  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1480  * callback is called during registration. The ->remove callback is called
1481  * during unregistration.
1482  * An l2cap_user object can either be explicitly unregistered or when the
1483  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1484  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1485  * External modules must own a reference to the l2cap_conn object if they intend
1486  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1487  * any time if they don't.
1488  */
1489 
1490 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1491 {
1492 	struct hci_dev *hdev = conn->hcon->hdev;
1493 	int ret;
1494 
1495 	/* We need to check whether l2cap_conn is registered. If it is not, we
1496 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1497 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1498 	 * relies on the parent hci_conn object to be locked. This itself relies
1499 	 * on the hci_dev object to be locked. So we must lock the hci device
1500 	 * here, too. */
1501 
1502 	hci_dev_lock(hdev);
1503 
1504 	if (user->list.next || user->list.prev) {
1505 		ret = -EINVAL;
1506 		goto out_unlock;
1507 	}
1508 
1509 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1510 	if (!conn->hchan) {
1511 		ret = -ENODEV;
1512 		goto out_unlock;
1513 	}
1514 
1515 	ret = user->probe(conn, user);
1516 	if (ret)
1517 		goto out_unlock;
1518 
1519 	list_add(&user->list, &conn->users);
1520 	ret = 0;
1521 
1522 out_unlock:
1523 	hci_dev_unlock(hdev);
1524 	return ret;
1525 }
1526 EXPORT_SYMBOL(l2cap_register_user);
1527 
1528 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1529 {
1530 	struct hci_dev *hdev = conn->hcon->hdev;
1531 
1532 	hci_dev_lock(hdev);
1533 
1534 	if (!user->list.next || !user->list.prev)
1535 		goto out_unlock;
1536 
1537 	list_del(&user->list);
1538 	user->list.next = NULL;
1539 	user->list.prev = NULL;
1540 	user->remove(conn, user);
1541 
1542 out_unlock:
1543 	hci_dev_unlock(hdev);
1544 }
1545 EXPORT_SYMBOL(l2cap_unregister_user);
1546 
1547 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1548 {
1549 	struct l2cap_user *user;
1550 
1551 	while (!list_empty(&conn->users)) {
1552 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1553 		list_del(&user->list);
1554 		user->list.next = NULL;
1555 		user->list.prev = NULL;
1556 		user->remove(conn, user);
1557 	}
1558 }
1559 
1560 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1561 {
1562 	struct l2cap_conn *conn = hcon->l2cap_data;
1563 	struct l2cap_chan *chan, *l;
1564 
1565 	if (!conn)
1566 		return;
1567 
1568 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1569 
1570 	kfree_skb(conn->rx_skb);
1571 
1572 	l2cap_unregister_all_users(conn);
1573 
1574 	mutex_lock(&conn->chan_lock);
1575 
1576 	/* Kill channels */
1577 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1578 		l2cap_chan_hold(chan);
1579 		l2cap_chan_lock(chan);
1580 
1581 		l2cap_chan_del(chan, err);
1582 
1583 		l2cap_chan_unlock(chan);
1584 
1585 		chan->ops->close(chan);
1586 		l2cap_chan_put(chan);
1587 	}
1588 
1589 	mutex_unlock(&conn->chan_lock);
1590 
1591 	hci_chan_del(conn->hchan);
1592 
1593 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1594 		cancel_delayed_work_sync(&conn->info_timer);
1595 
1596 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1597 		cancel_delayed_work_sync(&conn->security_timer);
1598 		smp_chan_destroy(conn);
1599 	}
1600 
1601 	hcon->l2cap_data = NULL;
1602 	conn->hchan = NULL;
1603 	l2cap_conn_put(conn);
1604 }
1605 
1606 static void security_timeout(struct work_struct *work)
1607 {
1608 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1609 					       security_timer.work);
1610 
1611 	BT_DBG("conn %p", conn);
1612 
1613 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1614 		smp_chan_destroy(conn);
1615 		l2cap_conn_del(conn->hcon, ETIMEDOUT);
1616 	}
1617 }
1618 
1619 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1620 {
1621 	struct l2cap_conn *conn = hcon->l2cap_data;
1622 	struct hci_chan *hchan;
1623 
1624 	if (conn)
1625 		return conn;
1626 
1627 	hchan = hci_chan_create(hcon);
1628 	if (!hchan)
1629 		return NULL;
1630 
1631 	conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1632 	if (!conn) {
1633 		hci_chan_del(hchan);
1634 		return NULL;
1635 	}
1636 
1637 	kref_init(&conn->ref);
1638 	hcon->l2cap_data = conn;
1639 	conn->hcon = hcon;
1640 	hci_conn_get(conn->hcon);
1641 	conn->hchan = hchan;
1642 
1643 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1644 
1645 	switch (hcon->type) {
1646 	case LE_LINK:
1647 		if (hcon->hdev->le_mtu) {
1648 			conn->mtu = hcon->hdev->le_mtu;
1649 			break;
1650 		}
1651 		/* fall through */
1652 	default:
1653 		conn->mtu = hcon->hdev->acl_mtu;
1654 		break;
1655 	}
1656 
1657 	conn->feat_mask = 0;
1658 
1659 	if (hcon->type == ACL_LINK)
1660 		conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1661 					    &hcon->hdev->dev_flags);
1662 
1663 	spin_lock_init(&conn->lock);
1664 	mutex_init(&conn->chan_lock);
1665 
1666 	INIT_LIST_HEAD(&conn->chan_l);
1667 	INIT_LIST_HEAD(&conn->users);
1668 
1669 	if (hcon->type == LE_LINK)
1670 		INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1671 	else
1672 		INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1673 
1674 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1675 
1676 	return conn;
1677 }
1678 
1679 static void l2cap_conn_free(struct kref *ref)
1680 {
1681 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1682 
1683 	hci_conn_put(conn->hcon);
1684 	kfree(conn);
1685 }
1686 
1687 void l2cap_conn_get(struct l2cap_conn *conn)
1688 {
1689 	kref_get(&conn->ref);
1690 }
1691 EXPORT_SYMBOL(l2cap_conn_get);
1692 
1693 void l2cap_conn_put(struct l2cap_conn *conn)
1694 {
1695 	kref_put(&conn->ref, l2cap_conn_free);
1696 }
1697 EXPORT_SYMBOL(l2cap_conn_put);
1698 
1699 /* ---- Socket interface ---- */
1700 
1701 /* Find socket with psm and source / destination bdaddr.
1702  * Returns closest match.
1703  */
1704 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1705 						   bdaddr_t *src,
1706 						   bdaddr_t *dst)
1707 {
1708 	struct l2cap_chan *c, *c1 = NULL;
1709 
1710 	read_lock(&chan_list_lock);
1711 
1712 	list_for_each_entry(c, &chan_list, global_l) {
1713 		if (state && c->state != state)
1714 			continue;
1715 
1716 		if (c->psm == psm) {
1717 			int src_match, dst_match;
1718 			int src_any, dst_any;
1719 
1720 			/* Exact match. */
1721 			src_match = !bacmp(&c->src, src);
1722 			dst_match = !bacmp(&c->dst, dst);
1723 			if (src_match && dst_match) {
1724 				read_unlock(&chan_list_lock);
1725 				return c;
1726 			}
1727 
1728 			/* Closest match */
1729 			src_any = !bacmp(&c->src, BDADDR_ANY);
1730 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1731 			if ((src_match && dst_any) || (src_any && dst_match) ||
1732 			    (src_any && dst_any))
1733 				c1 = c;
1734 		}
1735 	}
1736 
1737 	read_unlock(&chan_list_lock);
1738 
1739 	return c1;
1740 }
1741 
1742 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1743 		       bdaddr_t *dst, u8 dst_type)
1744 {
1745 	struct l2cap_conn *conn;
1746 	struct hci_conn *hcon;
1747 	struct hci_dev *hdev;
1748 	__u8 auth_type;
1749 	int err;
1750 
1751 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1752 	       dst_type, __le16_to_cpu(psm));
1753 
1754 	hdev = hci_get_route(dst, &chan->src);
1755 	if (!hdev)
1756 		return -EHOSTUNREACH;
1757 
1758 	hci_dev_lock(hdev);
1759 
1760 	l2cap_chan_lock(chan);
1761 
1762 	/* PSM must be odd and lsb of upper byte must be 0 */
1763 	if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1764 	    chan->chan_type != L2CAP_CHAN_RAW) {
1765 		err = -EINVAL;
1766 		goto done;
1767 	}
1768 
1769 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1770 		err = -EINVAL;
1771 		goto done;
1772 	}
1773 
1774 	switch (chan->mode) {
1775 	case L2CAP_MODE_BASIC:
1776 		break;
1777 	case L2CAP_MODE_ERTM:
1778 	case L2CAP_MODE_STREAMING:
1779 		if (!disable_ertm)
1780 			break;
1781 		/* fall through */
1782 	default:
1783 		err = -ENOTSUPP;
1784 		goto done;
1785 	}
1786 
1787 	switch (chan->state) {
1788 	case BT_CONNECT:
1789 	case BT_CONNECT2:
1790 	case BT_CONFIG:
1791 		/* Already connecting */
1792 		err = 0;
1793 		goto done;
1794 
1795 	case BT_CONNECTED:
1796 		/* Already connected */
1797 		err = -EISCONN;
1798 		goto done;
1799 
1800 	case BT_OPEN:
1801 	case BT_BOUND:
1802 		/* Can connect */
1803 		break;
1804 
1805 	default:
1806 		err = -EBADFD;
1807 		goto done;
1808 	}
1809 
1810 	/* Set destination address and psm */
1811 	bacpy(&chan->dst, dst);
1812 	chan->dst_type = dst_type;
1813 
1814 	chan->psm = psm;
1815 	chan->dcid = cid;
1816 
1817 	auth_type = l2cap_get_auth_type(chan);
1818 
1819 	if (bdaddr_type_is_le(dst_type))
1820 		hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1821 				   chan->sec_level, auth_type);
1822 	else
1823 		hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1824 				   chan->sec_level, auth_type);
1825 
1826 	if (IS_ERR(hcon)) {
1827 		err = PTR_ERR(hcon);
1828 		goto done;
1829 	}
1830 
1831 	conn = l2cap_conn_add(hcon);
1832 	if (!conn) {
1833 		hci_conn_drop(hcon);
1834 		err = -ENOMEM;
1835 		goto done;
1836 	}
1837 
1838 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1839 		hci_conn_drop(hcon);
1840 		err = -EBUSY;
1841 		goto done;
1842 	}
1843 
1844 	/* Update source addr of the socket */
1845 	bacpy(&chan->src, &hcon->src);
1846 	chan->src_type = bdaddr_type(hcon, hcon->src_type);
1847 
1848 	l2cap_chan_unlock(chan);
1849 	l2cap_chan_add(conn, chan);
1850 	l2cap_chan_lock(chan);
1851 
1852 	/* l2cap_chan_add takes its own ref so we can drop this one */
1853 	hci_conn_drop(hcon);
1854 
1855 	l2cap_state_change(chan, BT_CONNECT);
1856 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1857 
1858 	if (hcon->state == BT_CONNECTED) {
1859 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1860 			__clear_chan_timer(chan);
1861 			if (l2cap_chan_check_security(chan))
1862 				l2cap_state_change(chan, BT_CONNECTED);
1863 		} else
1864 			l2cap_do_start(chan);
1865 	}
1866 
1867 	err = 0;
1868 
1869 done:
1870 	l2cap_chan_unlock(chan);
1871 	hci_dev_unlock(hdev);
1872 	hci_dev_put(hdev);
1873 	return err;
1874 }
1875 
1876 static void l2cap_monitor_timeout(struct work_struct *work)
1877 {
1878 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1879 					       monitor_timer.work);
1880 
1881 	BT_DBG("chan %p", chan);
1882 
1883 	l2cap_chan_lock(chan);
1884 
1885 	if (!chan->conn) {
1886 		l2cap_chan_unlock(chan);
1887 		l2cap_chan_put(chan);
1888 		return;
1889 	}
1890 
1891 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1892 
1893 	l2cap_chan_unlock(chan);
1894 	l2cap_chan_put(chan);
1895 }
1896 
1897 static void l2cap_retrans_timeout(struct work_struct *work)
1898 {
1899 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1900 					       retrans_timer.work);
1901 
1902 	BT_DBG("chan %p", chan);
1903 
1904 	l2cap_chan_lock(chan);
1905 
1906 	if (!chan->conn) {
1907 		l2cap_chan_unlock(chan);
1908 		l2cap_chan_put(chan);
1909 		return;
1910 	}
1911 
1912 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1913 	l2cap_chan_unlock(chan);
1914 	l2cap_chan_put(chan);
1915 }
1916 
1917 static void l2cap_streaming_send(struct l2cap_chan *chan,
1918 				 struct sk_buff_head *skbs)
1919 {
1920 	struct sk_buff *skb;
1921 	struct l2cap_ctrl *control;
1922 
1923 	BT_DBG("chan %p, skbs %p", chan, skbs);
1924 
1925 	if (__chan_is_moving(chan))
1926 		return;
1927 
1928 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1929 
1930 	while (!skb_queue_empty(&chan->tx_q)) {
1931 
1932 		skb = skb_dequeue(&chan->tx_q);
1933 
1934 		bt_cb(skb)->control.retries = 1;
1935 		control = &bt_cb(skb)->control;
1936 
1937 		control->reqseq = 0;
1938 		control->txseq = chan->next_tx_seq;
1939 
1940 		__pack_control(chan, control, skb);
1941 
1942 		if (chan->fcs == L2CAP_FCS_CRC16) {
1943 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1944 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1945 		}
1946 
1947 		l2cap_do_send(chan, skb);
1948 
1949 		BT_DBG("Sent txseq %u", control->txseq);
1950 
1951 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1952 		chan->frames_sent++;
1953 	}
1954 }
1955 
1956 static int l2cap_ertm_send(struct l2cap_chan *chan)
1957 {
1958 	struct sk_buff *skb, *tx_skb;
1959 	struct l2cap_ctrl *control;
1960 	int sent = 0;
1961 
1962 	BT_DBG("chan %p", chan);
1963 
1964 	if (chan->state != BT_CONNECTED)
1965 		return -ENOTCONN;
1966 
1967 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1968 		return 0;
1969 
1970 	if (__chan_is_moving(chan))
1971 		return 0;
1972 
1973 	while (chan->tx_send_head &&
1974 	       chan->unacked_frames < chan->remote_tx_win &&
1975 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1976 
1977 		skb = chan->tx_send_head;
1978 
1979 		bt_cb(skb)->control.retries = 1;
1980 		control = &bt_cb(skb)->control;
1981 
1982 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1983 			control->final = 1;
1984 
1985 		control->reqseq = chan->buffer_seq;
1986 		chan->last_acked_seq = chan->buffer_seq;
1987 		control->txseq = chan->next_tx_seq;
1988 
1989 		__pack_control(chan, control, skb);
1990 
1991 		if (chan->fcs == L2CAP_FCS_CRC16) {
1992 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1993 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1994 		}
1995 
1996 		/* Clone after data has been modified. Data is assumed to be
1997 		   read-only (for locking purposes) on cloned sk_buffs.
1998 		 */
1999 		tx_skb = skb_clone(skb, GFP_KERNEL);
2000 
2001 		if (!tx_skb)
2002 			break;
2003 
2004 		__set_retrans_timer(chan);
2005 
2006 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2007 		chan->unacked_frames++;
2008 		chan->frames_sent++;
2009 		sent++;
2010 
2011 		if (skb_queue_is_last(&chan->tx_q, skb))
2012 			chan->tx_send_head = NULL;
2013 		else
2014 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2015 
2016 		l2cap_do_send(chan, tx_skb);
2017 		BT_DBG("Sent txseq %u", control->txseq);
2018 	}
2019 
2020 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2021 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2022 
2023 	return sent;
2024 }
2025 
2026 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2027 {
2028 	struct l2cap_ctrl control;
2029 	struct sk_buff *skb;
2030 	struct sk_buff *tx_skb;
2031 	u16 seq;
2032 
2033 	BT_DBG("chan %p", chan);
2034 
2035 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2036 		return;
2037 
2038 	if (__chan_is_moving(chan))
2039 		return;
2040 
2041 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2042 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2043 
2044 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2045 		if (!skb) {
2046 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2047 			       seq);
2048 			continue;
2049 		}
2050 
2051 		bt_cb(skb)->control.retries++;
2052 		control = bt_cb(skb)->control;
2053 
2054 		if (chan->max_tx != 0 &&
2055 		    bt_cb(skb)->control.retries > chan->max_tx) {
2056 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2057 			l2cap_send_disconn_req(chan, ECONNRESET);
2058 			l2cap_seq_list_clear(&chan->retrans_list);
2059 			break;
2060 		}
2061 
2062 		control.reqseq = chan->buffer_seq;
2063 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2064 			control.final = 1;
2065 		else
2066 			control.final = 0;
2067 
2068 		if (skb_cloned(skb)) {
2069 			/* Cloned sk_buffs are read-only, so we need a
2070 			 * writeable copy
2071 			 */
2072 			tx_skb = skb_copy(skb, GFP_KERNEL);
2073 		} else {
2074 			tx_skb = skb_clone(skb, GFP_KERNEL);
2075 		}
2076 
2077 		if (!tx_skb) {
2078 			l2cap_seq_list_clear(&chan->retrans_list);
2079 			break;
2080 		}
2081 
2082 		/* Update skb contents */
2083 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2084 			put_unaligned_le32(__pack_extended_control(&control),
2085 					   tx_skb->data + L2CAP_HDR_SIZE);
2086 		} else {
2087 			put_unaligned_le16(__pack_enhanced_control(&control),
2088 					   tx_skb->data + L2CAP_HDR_SIZE);
2089 		}
2090 
2091 		if (chan->fcs == L2CAP_FCS_CRC16) {
2092 			u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2093 			put_unaligned_le16(fcs, skb_put(tx_skb,
2094 							L2CAP_FCS_SIZE));
2095 		}
2096 
2097 		l2cap_do_send(chan, tx_skb);
2098 
2099 		BT_DBG("Resent txseq %d", control.txseq);
2100 
2101 		chan->last_acked_seq = chan->buffer_seq;
2102 	}
2103 }
2104 
2105 static void l2cap_retransmit(struct l2cap_chan *chan,
2106 			     struct l2cap_ctrl *control)
2107 {
2108 	BT_DBG("chan %p, control %p", chan, control);
2109 
2110 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2111 	l2cap_ertm_resend(chan);
2112 }
2113 
2114 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2115 				 struct l2cap_ctrl *control)
2116 {
2117 	struct sk_buff *skb;
2118 
2119 	BT_DBG("chan %p, control %p", chan, control);
2120 
2121 	if (control->poll)
2122 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2123 
2124 	l2cap_seq_list_clear(&chan->retrans_list);
2125 
2126 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2127 		return;
2128 
2129 	if (chan->unacked_frames) {
2130 		skb_queue_walk(&chan->tx_q, skb) {
2131 			if (bt_cb(skb)->control.txseq == control->reqseq ||
2132 			    skb == chan->tx_send_head)
2133 				break;
2134 		}
2135 
2136 		skb_queue_walk_from(&chan->tx_q, skb) {
2137 			if (skb == chan->tx_send_head)
2138 				break;
2139 
2140 			l2cap_seq_list_append(&chan->retrans_list,
2141 					      bt_cb(skb)->control.txseq);
2142 		}
2143 
2144 		l2cap_ertm_resend(chan);
2145 	}
2146 }
2147 
2148 static void l2cap_send_ack(struct l2cap_chan *chan)
2149 {
2150 	struct l2cap_ctrl control;
2151 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2152 					 chan->last_acked_seq);
2153 	int threshold;
2154 
2155 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2156 	       chan, chan->last_acked_seq, chan->buffer_seq);
2157 
2158 	memset(&control, 0, sizeof(control));
2159 	control.sframe = 1;
2160 
2161 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2162 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2163 		__clear_ack_timer(chan);
2164 		control.super = L2CAP_SUPER_RNR;
2165 		control.reqseq = chan->buffer_seq;
2166 		l2cap_send_sframe(chan, &control);
2167 	} else {
2168 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2169 			l2cap_ertm_send(chan);
2170 			/* If any i-frames were sent, they included an ack */
2171 			if (chan->buffer_seq == chan->last_acked_seq)
2172 				frames_to_ack = 0;
2173 		}
2174 
2175 		/* Ack now if the window is 3/4ths full.
2176 		 * Calculate without mul or div
2177 		 */
2178 		threshold = chan->ack_win;
2179 		threshold += threshold << 1;
2180 		threshold >>= 2;
2181 
2182 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2183 		       threshold);
2184 
2185 		if (frames_to_ack >= threshold) {
2186 			__clear_ack_timer(chan);
2187 			control.super = L2CAP_SUPER_RR;
2188 			control.reqseq = chan->buffer_seq;
2189 			l2cap_send_sframe(chan, &control);
2190 			frames_to_ack = 0;
2191 		}
2192 
2193 		if (frames_to_ack)
2194 			__set_ack_timer(chan);
2195 	}
2196 }
2197 
2198 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2199 					 struct msghdr *msg, int len,
2200 					 int count, struct sk_buff *skb)
2201 {
2202 	struct l2cap_conn *conn = chan->conn;
2203 	struct sk_buff **frag;
2204 	int sent = 0;
2205 
2206 	if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2207 		return -EFAULT;
2208 
2209 	sent += count;
2210 	len  -= count;
2211 
2212 	/* Continuation fragments (no L2CAP header) */
2213 	frag = &skb_shinfo(skb)->frag_list;
2214 	while (len) {
2215 		struct sk_buff *tmp;
2216 
2217 		count = min_t(unsigned int, conn->mtu, len);
2218 
2219 		tmp = chan->ops->alloc_skb(chan, count,
2220 					   msg->msg_flags & MSG_DONTWAIT);
2221 		if (IS_ERR(tmp))
2222 			return PTR_ERR(tmp);
2223 
2224 		*frag = tmp;
2225 
2226 		if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2227 			return -EFAULT;
2228 
2229 		(*frag)->priority = skb->priority;
2230 
2231 		sent += count;
2232 		len  -= count;
2233 
2234 		skb->len += (*frag)->len;
2235 		skb->data_len += (*frag)->len;
2236 
2237 		frag = &(*frag)->next;
2238 	}
2239 
2240 	return sent;
2241 }
2242 
2243 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2244 						 struct msghdr *msg, size_t len,
2245 						 u32 priority)
2246 {
2247 	struct l2cap_conn *conn = chan->conn;
2248 	struct sk_buff *skb;
2249 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2250 	struct l2cap_hdr *lh;
2251 
2252 	BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2253 	       __le16_to_cpu(chan->psm), len, priority);
2254 
2255 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2256 
2257 	skb = chan->ops->alloc_skb(chan, count + hlen,
2258 				   msg->msg_flags & MSG_DONTWAIT);
2259 	if (IS_ERR(skb))
2260 		return skb;
2261 
2262 	skb->priority = priority;
2263 
2264 	/* Create L2CAP header */
2265 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2266 	lh->cid = cpu_to_le16(chan->dcid);
2267 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2268 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2269 
2270 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2271 	if (unlikely(err < 0)) {
2272 		kfree_skb(skb);
2273 		return ERR_PTR(err);
2274 	}
2275 	return skb;
2276 }
2277 
2278 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2279 					      struct msghdr *msg, size_t len,
2280 					      u32 priority)
2281 {
2282 	struct l2cap_conn *conn = chan->conn;
2283 	struct sk_buff *skb;
2284 	int err, count;
2285 	struct l2cap_hdr *lh;
2286 
2287 	BT_DBG("chan %p len %zu", chan, len);
2288 
2289 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2290 
2291 	skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2292 				   msg->msg_flags & MSG_DONTWAIT);
2293 	if (IS_ERR(skb))
2294 		return skb;
2295 
2296 	skb->priority = priority;
2297 
2298 	/* Create L2CAP header */
2299 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2300 	lh->cid = cpu_to_le16(chan->dcid);
2301 	lh->len = cpu_to_le16(len);
2302 
2303 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2304 	if (unlikely(err < 0)) {
2305 		kfree_skb(skb);
2306 		return ERR_PTR(err);
2307 	}
2308 	return skb;
2309 }
2310 
2311 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2312 					       struct msghdr *msg, size_t len,
2313 					       u16 sdulen)
2314 {
2315 	struct l2cap_conn *conn = chan->conn;
2316 	struct sk_buff *skb;
2317 	int err, count, hlen;
2318 	struct l2cap_hdr *lh;
2319 
2320 	BT_DBG("chan %p len %zu", chan, len);
2321 
2322 	if (!conn)
2323 		return ERR_PTR(-ENOTCONN);
2324 
2325 	hlen = __ertm_hdr_size(chan);
2326 
2327 	if (sdulen)
2328 		hlen += L2CAP_SDULEN_SIZE;
2329 
2330 	if (chan->fcs == L2CAP_FCS_CRC16)
2331 		hlen += L2CAP_FCS_SIZE;
2332 
2333 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2334 
2335 	skb = chan->ops->alloc_skb(chan, count + hlen,
2336 				   msg->msg_flags & MSG_DONTWAIT);
2337 	if (IS_ERR(skb))
2338 		return skb;
2339 
2340 	/* Create L2CAP header */
2341 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2342 	lh->cid = cpu_to_le16(chan->dcid);
2343 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2344 
2345 	/* Control header is populated later */
2346 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2347 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2348 	else
2349 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2350 
2351 	if (sdulen)
2352 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2353 
2354 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2355 	if (unlikely(err < 0)) {
2356 		kfree_skb(skb);
2357 		return ERR_PTR(err);
2358 	}
2359 
2360 	bt_cb(skb)->control.fcs = chan->fcs;
2361 	bt_cb(skb)->control.retries = 0;
2362 	return skb;
2363 }
2364 
2365 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2366 			     struct sk_buff_head *seg_queue,
2367 			     struct msghdr *msg, size_t len)
2368 {
2369 	struct sk_buff *skb;
2370 	u16 sdu_len;
2371 	size_t pdu_len;
2372 	u8 sar;
2373 
2374 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2375 
2376 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2377 	 * so fragmented skbs are not used.  The HCI layer's handling
2378 	 * of fragmented skbs is not compatible with ERTM's queueing.
2379 	 */
2380 
2381 	/* PDU size is derived from the HCI MTU */
2382 	pdu_len = chan->conn->mtu;
2383 
2384 	/* Constrain PDU size for BR/EDR connections */
2385 	if (!chan->hs_hcon)
2386 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2387 
2388 	/* Adjust for largest possible L2CAP overhead. */
2389 	if (chan->fcs)
2390 		pdu_len -= L2CAP_FCS_SIZE;
2391 
2392 	pdu_len -= __ertm_hdr_size(chan);
2393 
2394 	/* Remote device may have requested smaller PDUs */
2395 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2396 
2397 	if (len <= pdu_len) {
2398 		sar = L2CAP_SAR_UNSEGMENTED;
2399 		sdu_len = 0;
2400 		pdu_len = len;
2401 	} else {
2402 		sar = L2CAP_SAR_START;
2403 		sdu_len = len;
2404 		pdu_len -= L2CAP_SDULEN_SIZE;
2405 	}
2406 
2407 	while (len > 0) {
2408 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2409 
2410 		if (IS_ERR(skb)) {
2411 			__skb_queue_purge(seg_queue);
2412 			return PTR_ERR(skb);
2413 		}
2414 
2415 		bt_cb(skb)->control.sar = sar;
2416 		__skb_queue_tail(seg_queue, skb);
2417 
2418 		len -= pdu_len;
2419 		if (sdu_len) {
2420 			sdu_len = 0;
2421 			pdu_len += L2CAP_SDULEN_SIZE;
2422 		}
2423 
2424 		if (len <= pdu_len) {
2425 			sar = L2CAP_SAR_END;
2426 			pdu_len = len;
2427 		} else {
2428 			sar = L2CAP_SAR_CONTINUE;
2429 		}
2430 	}
2431 
2432 	return 0;
2433 }
2434 
2435 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2436 		    u32 priority)
2437 {
2438 	struct sk_buff *skb;
2439 	int err;
2440 	struct sk_buff_head seg_queue;
2441 
2442 	if (!chan->conn)
2443 		return -ENOTCONN;
2444 
2445 	/* Connectionless channel */
2446 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2447 		skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2448 		if (IS_ERR(skb))
2449 			return PTR_ERR(skb);
2450 
2451 		l2cap_do_send(chan, skb);
2452 		return len;
2453 	}
2454 
2455 	switch (chan->mode) {
2456 	case L2CAP_MODE_BASIC:
2457 		/* Check outgoing MTU */
2458 		if (len > chan->omtu)
2459 			return -EMSGSIZE;
2460 
2461 		/* Create a basic PDU */
2462 		skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2463 		if (IS_ERR(skb))
2464 			return PTR_ERR(skb);
2465 
2466 		l2cap_do_send(chan, skb);
2467 		err = len;
2468 		break;
2469 
2470 	case L2CAP_MODE_ERTM:
2471 	case L2CAP_MODE_STREAMING:
2472 		/* Check outgoing MTU */
2473 		if (len > chan->omtu) {
2474 			err = -EMSGSIZE;
2475 			break;
2476 		}
2477 
2478 		__skb_queue_head_init(&seg_queue);
2479 
2480 		/* Do segmentation before calling in to the state machine,
2481 		 * since it's possible to block while waiting for memory
2482 		 * allocation.
2483 		 */
2484 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2485 
2486 		/* The channel could have been closed while segmenting,
2487 		 * check that it is still connected.
2488 		 */
2489 		if (chan->state != BT_CONNECTED) {
2490 			__skb_queue_purge(&seg_queue);
2491 			err = -ENOTCONN;
2492 		}
2493 
2494 		if (err)
2495 			break;
2496 
2497 		if (chan->mode == L2CAP_MODE_ERTM)
2498 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2499 		else
2500 			l2cap_streaming_send(chan, &seg_queue);
2501 
2502 		err = len;
2503 
2504 		/* If the skbs were not queued for sending, they'll still be in
2505 		 * seg_queue and need to be purged.
2506 		 */
2507 		__skb_queue_purge(&seg_queue);
2508 		break;
2509 
2510 	default:
2511 		BT_DBG("bad state %1.1x", chan->mode);
2512 		err = -EBADFD;
2513 	}
2514 
2515 	return err;
2516 }
2517 
2518 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2519 {
2520 	struct l2cap_ctrl control;
2521 	u16 seq;
2522 
2523 	BT_DBG("chan %p, txseq %u", chan, txseq);
2524 
2525 	memset(&control, 0, sizeof(control));
2526 	control.sframe = 1;
2527 	control.super = L2CAP_SUPER_SREJ;
2528 
2529 	for (seq = chan->expected_tx_seq; seq != txseq;
2530 	     seq = __next_seq(chan, seq)) {
2531 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2532 			control.reqseq = seq;
2533 			l2cap_send_sframe(chan, &control);
2534 			l2cap_seq_list_append(&chan->srej_list, seq);
2535 		}
2536 	}
2537 
2538 	chan->expected_tx_seq = __next_seq(chan, txseq);
2539 }
2540 
2541 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2542 {
2543 	struct l2cap_ctrl control;
2544 
2545 	BT_DBG("chan %p", chan);
2546 
2547 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2548 		return;
2549 
2550 	memset(&control, 0, sizeof(control));
2551 	control.sframe = 1;
2552 	control.super = L2CAP_SUPER_SREJ;
2553 	control.reqseq = chan->srej_list.tail;
2554 	l2cap_send_sframe(chan, &control);
2555 }
2556 
2557 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2558 {
2559 	struct l2cap_ctrl control;
2560 	u16 initial_head;
2561 	u16 seq;
2562 
2563 	BT_DBG("chan %p, txseq %u", chan, txseq);
2564 
2565 	memset(&control, 0, sizeof(control));
2566 	control.sframe = 1;
2567 	control.super = L2CAP_SUPER_SREJ;
2568 
2569 	/* Capture initial list head to allow only one pass through the list. */
2570 	initial_head = chan->srej_list.head;
2571 
2572 	do {
2573 		seq = l2cap_seq_list_pop(&chan->srej_list);
2574 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2575 			break;
2576 
2577 		control.reqseq = seq;
2578 		l2cap_send_sframe(chan, &control);
2579 		l2cap_seq_list_append(&chan->srej_list, seq);
2580 	} while (chan->srej_list.head != initial_head);
2581 }
2582 
2583 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2584 {
2585 	struct sk_buff *acked_skb;
2586 	u16 ackseq;
2587 
2588 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2589 
2590 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2591 		return;
2592 
2593 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2594 	       chan->expected_ack_seq, chan->unacked_frames);
2595 
2596 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2597 	     ackseq = __next_seq(chan, ackseq)) {
2598 
2599 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2600 		if (acked_skb) {
2601 			skb_unlink(acked_skb, &chan->tx_q);
2602 			kfree_skb(acked_skb);
2603 			chan->unacked_frames--;
2604 		}
2605 	}
2606 
2607 	chan->expected_ack_seq = reqseq;
2608 
2609 	if (chan->unacked_frames == 0)
2610 		__clear_retrans_timer(chan);
2611 
2612 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2613 }
2614 
2615 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2616 {
2617 	BT_DBG("chan %p", chan);
2618 
2619 	chan->expected_tx_seq = chan->buffer_seq;
2620 	l2cap_seq_list_clear(&chan->srej_list);
2621 	skb_queue_purge(&chan->srej_q);
2622 	chan->rx_state = L2CAP_RX_STATE_RECV;
2623 }
2624 
2625 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2626 				struct l2cap_ctrl *control,
2627 				struct sk_buff_head *skbs, u8 event)
2628 {
2629 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2630 	       event);
2631 
2632 	switch (event) {
2633 	case L2CAP_EV_DATA_REQUEST:
2634 		if (chan->tx_send_head == NULL)
2635 			chan->tx_send_head = skb_peek(skbs);
2636 
2637 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2638 		l2cap_ertm_send(chan);
2639 		break;
2640 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2641 		BT_DBG("Enter LOCAL_BUSY");
2642 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2643 
2644 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2645 			/* The SREJ_SENT state must be aborted if we are to
2646 			 * enter the LOCAL_BUSY state.
2647 			 */
2648 			l2cap_abort_rx_srej_sent(chan);
2649 		}
2650 
2651 		l2cap_send_ack(chan);
2652 
2653 		break;
2654 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2655 		BT_DBG("Exit LOCAL_BUSY");
2656 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2657 
2658 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2659 			struct l2cap_ctrl local_control;
2660 
2661 			memset(&local_control, 0, sizeof(local_control));
2662 			local_control.sframe = 1;
2663 			local_control.super = L2CAP_SUPER_RR;
2664 			local_control.poll = 1;
2665 			local_control.reqseq = chan->buffer_seq;
2666 			l2cap_send_sframe(chan, &local_control);
2667 
2668 			chan->retry_count = 1;
2669 			__set_monitor_timer(chan);
2670 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2671 		}
2672 		break;
2673 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2674 		l2cap_process_reqseq(chan, control->reqseq);
2675 		break;
2676 	case L2CAP_EV_EXPLICIT_POLL:
2677 		l2cap_send_rr_or_rnr(chan, 1);
2678 		chan->retry_count = 1;
2679 		__set_monitor_timer(chan);
2680 		__clear_ack_timer(chan);
2681 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2682 		break;
2683 	case L2CAP_EV_RETRANS_TO:
2684 		l2cap_send_rr_or_rnr(chan, 1);
2685 		chan->retry_count = 1;
2686 		__set_monitor_timer(chan);
2687 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2688 		break;
2689 	case L2CAP_EV_RECV_FBIT:
2690 		/* Nothing to process */
2691 		break;
2692 	default:
2693 		break;
2694 	}
2695 }
2696 
2697 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2698 				  struct l2cap_ctrl *control,
2699 				  struct sk_buff_head *skbs, u8 event)
2700 {
2701 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2702 	       event);
2703 
2704 	switch (event) {
2705 	case L2CAP_EV_DATA_REQUEST:
2706 		if (chan->tx_send_head == NULL)
2707 			chan->tx_send_head = skb_peek(skbs);
2708 		/* Queue data, but don't send. */
2709 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2710 		break;
2711 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2712 		BT_DBG("Enter LOCAL_BUSY");
2713 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2714 
2715 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2716 			/* The SREJ_SENT state must be aborted if we are to
2717 			 * enter the LOCAL_BUSY state.
2718 			 */
2719 			l2cap_abort_rx_srej_sent(chan);
2720 		}
2721 
2722 		l2cap_send_ack(chan);
2723 
2724 		break;
2725 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2726 		BT_DBG("Exit LOCAL_BUSY");
2727 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2728 
2729 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2730 			struct l2cap_ctrl local_control;
2731 			memset(&local_control, 0, sizeof(local_control));
2732 			local_control.sframe = 1;
2733 			local_control.super = L2CAP_SUPER_RR;
2734 			local_control.poll = 1;
2735 			local_control.reqseq = chan->buffer_seq;
2736 			l2cap_send_sframe(chan, &local_control);
2737 
2738 			chan->retry_count = 1;
2739 			__set_monitor_timer(chan);
2740 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2741 		}
2742 		break;
2743 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2744 		l2cap_process_reqseq(chan, control->reqseq);
2745 
2746 		/* Fall through */
2747 
2748 	case L2CAP_EV_RECV_FBIT:
2749 		if (control && control->final) {
2750 			__clear_monitor_timer(chan);
2751 			if (chan->unacked_frames > 0)
2752 				__set_retrans_timer(chan);
2753 			chan->retry_count = 0;
2754 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2755 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2756 		}
2757 		break;
2758 	case L2CAP_EV_EXPLICIT_POLL:
2759 		/* Ignore */
2760 		break;
2761 	case L2CAP_EV_MONITOR_TO:
2762 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2763 			l2cap_send_rr_or_rnr(chan, 1);
2764 			__set_monitor_timer(chan);
2765 			chan->retry_count++;
2766 		} else {
2767 			l2cap_send_disconn_req(chan, ECONNABORTED);
2768 		}
2769 		break;
2770 	default:
2771 		break;
2772 	}
2773 }
2774 
2775 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2776 		     struct sk_buff_head *skbs, u8 event)
2777 {
2778 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2779 	       chan, control, skbs, event, chan->tx_state);
2780 
2781 	switch (chan->tx_state) {
2782 	case L2CAP_TX_STATE_XMIT:
2783 		l2cap_tx_state_xmit(chan, control, skbs, event);
2784 		break;
2785 	case L2CAP_TX_STATE_WAIT_F:
2786 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2787 		break;
2788 	default:
2789 		/* Ignore event */
2790 		break;
2791 	}
2792 }
2793 
2794 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2795 			     struct l2cap_ctrl *control)
2796 {
2797 	BT_DBG("chan %p, control %p", chan, control);
2798 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2799 }
2800 
2801 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2802 				  struct l2cap_ctrl *control)
2803 {
2804 	BT_DBG("chan %p, control %p", chan, control);
2805 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2806 }
2807 
2808 /* Copy frame to all raw sockets on that connection */
2809 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2810 {
2811 	struct sk_buff *nskb;
2812 	struct l2cap_chan *chan;
2813 
2814 	BT_DBG("conn %p", conn);
2815 
2816 	mutex_lock(&conn->chan_lock);
2817 
2818 	list_for_each_entry(chan, &conn->chan_l, list) {
2819 		if (chan->chan_type != L2CAP_CHAN_RAW)
2820 			continue;
2821 
2822 		/* Don't send frame to the channel it came from */
2823 		if (bt_cb(skb)->chan == chan)
2824 			continue;
2825 
2826 		nskb = skb_clone(skb, GFP_KERNEL);
2827 		if (!nskb)
2828 			continue;
2829 		if (chan->ops->recv(chan, nskb))
2830 			kfree_skb(nskb);
2831 	}
2832 
2833 	mutex_unlock(&conn->chan_lock);
2834 }
2835 
2836 /* ---- L2CAP signalling commands ---- */
2837 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2838 				       u8 ident, u16 dlen, void *data)
2839 {
2840 	struct sk_buff *skb, **frag;
2841 	struct l2cap_cmd_hdr *cmd;
2842 	struct l2cap_hdr *lh;
2843 	int len, count;
2844 
2845 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2846 	       conn, code, ident, dlen);
2847 
2848 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2849 		return NULL;
2850 
2851 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2852 	count = min_t(unsigned int, conn->mtu, len);
2853 
2854 	skb = bt_skb_alloc(count, GFP_KERNEL);
2855 	if (!skb)
2856 		return NULL;
2857 
2858 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2859 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2860 
2861 	if (conn->hcon->type == LE_LINK)
2862 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2863 	else
2864 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2865 
2866 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2867 	cmd->code  = code;
2868 	cmd->ident = ident;
2869 	cmd->len   = cpu_to_le16(dlen);
2870 
2871 	if (dlen) {
2872 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2873 		memcpy(skb_put(skb, count), data, count);
2874 		data += count;
2875 	}
2876 
2877 	len -= skb->len;
2878 
2879 	/* Continuation fragments (no L2CAP header) */
2880 	frag = &skb_shinfo(skb)->frag_list;
2881 	while (len) {
2882 		count = min_t(unsigned int, conn->mtu, len);
2883 
2884 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2885 		if (!*frag)
2886 			goto fail;
2887 
2888 		memcpy(skb_put(*frag, count), data, count);
2889 
2890 		len  -= count;
2891 		data += count;
2892 
2893 		frag = &(*frag)->next;
2894 	}
2895 
2896 	return skb;
2897 
2898 fail:
2899 	kfree_skb(skb);
2900 	return NULL;
2901 }
2902 
2903 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2904 				     unsigned long *val)
2905 {
2906 	struct l2cap_conf_opt *opt = *ptr;
2907 	int len;
2908 
2909 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2910 	*ptr += len;
2911 
2912 	*type = opt->type;
2913 	*olen = opt->len;
2914 
2915 	switch (opt->len) {
2916 	case 1:
2917 		*val = *((u8 *) opt->val);
2918 		break;
2919 
2920 	case 2:
2921 		*val = get_unaligned_le16(opt->val);
2922 		break;
2923 
2924 	case 4:
2925 		*val = get_unaligned_le32(opt->val);
2926 		break;
2927 
2928 	default:
2929 		*val = (unsigned long) opt->val;
2930 		break;
2931 	}
2932 
2933 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2934 	return len;
2935 }
2936 
2937 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2938 {
2939 	struct l2cap_conf_opt *opt = *ptr;
2940 
2941 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2942 
2943 	opt->type = type;
2944 	opt->len  = len;
2945 
2946 	switch (len) {
2947 	case 1:
2948 		*((u8 *) opt->val)  = val;
2949 		break;
2950 
2951 	case 2:
2952 		put_unaligned_le16(val, opt->val);
2953 		break;
2954 
2955 	case 4:
2956 		put_unaligned_le32(val, opt->val);
2957 		break;
2958 
2959 	default:
2960 		memcpy(opt->val, (void *) val, len);
2961 		break;
2962 	}
2963 
2964 	*ptr += L2CAP_CONF_OPT_SIZE + len;
2965 }
2966 
2967 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2968 {
2969 	struct l2cap_conf_efs efs;
2970 
2971 	switch (chan->mode) {
2972 	case L2CAP_MODE_ERTM:
2973 		efs.id		= chan->local_id;
2974 		efs.stype	= chan->local_stype;
2975 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2976 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2977 		efs.acc_lat	= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2978 		efs.flush_to	= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2979 		break;
2980 
2981 	case L2CAP_MODE_STREAMING:
2982 		efs.id		= 1;
2983 		efs.stype	= L2CAP_SERV_BESTEFFORT;
2984 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2985 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2986 		efs.acc_lat	= 0;
2987 		efs.flush_to	= 0;
2988 		break;
2989 
2990 	default:
2991 		return;
2992 	}
2993 
2994 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2995 			   (unsigned long) &efs);
2996 }
2997 
2998 static void l2cap_ack_timeout(struct work_struct *work)
2999 {
3000 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3001 					       ack_timer.work);
3002 	u16 frames_to_ack;
3003 
3004 	BT_DBG("chan %p", chan);
3005 
3006 	l2cap_chan_lock(chan);
3007 
3008 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3009 				     chan->last_acked_seq);
3010 
3011 	if (frames_to_ack)
3012 		l2cap_send_rr_or_rnr(chan, 0);
3013 
3014 	l2cap_chan_unlock(chan);
3015 	l2cap_chan_put(chan);
3016 }
3017 
3018 int l2cap_ertm_init(struct l2cap_chan *chan)
3019 {
3020 	int err;
3021 
3022 	chan->next_tx_seq = 0;
3023 	chan->expected_tx_seq = 0;
3024 	chan->expected_ack_seq = 0;
3025 	chan->unacked_frames = 0;
3026 	chan->buffer_seq = 0;
3027 	chan->frames_sent = 0;
3028 	chan->last_acked_seq = 0;
3029 	chan->sdu = NULL;
3030 	chan->sdu_last_frag = NULL;
3031 	chan->sdu_len = 0;
3032 
3033 	skb_queue_head_init(&chan->tx_q);
3034 
3035 	chan->local_amp_id = AMP_ID_BREDR;
3036 	chan->move_id = AMP_ID_BREDR;
3037 	chan->move_state = L2CAP_MOVE_STABLE;
3038 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3039 
3040 	if (chan->mode != L2CAP_MODE_ERTM)
3041 		return 0;
3042 
3043 	chan->rx_state = L2CAP_RX_STATE_RECV;
3044 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3045 
3046 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3047 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3048 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3049 
3050 	skb_queue_head_init(&chan->srej_q);
3051 
3052 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3053 	if (err < 0)
3054 		return err;
3055 
3056 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3057 	if (err < 0)
3058 		l2cap_seq_list_free(&chan->srej_list);
3059 
3060 	return err;
3061 }
3062 
3063 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3064 {
3065 	switch (mode) {
3066 	case L2CAP_MODE_STREAMING:
3067 	case L2CAP_MODE_ERTM:
3068 		if (l2cap_mode_supported(mode, remote_feat_mask))
3069 			return mode;
3070 		/* fall through */
3071 	default:
3072 		return L2CAP_MODE_BASIC;
3073 	}
3074 }
3075 
3076 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3077 {
3078 	return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3079 }
3080 
3081 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3082 {
3083 	return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3084 }
3085 
3086 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3087 				      struct l2cap_conf_rfc *rfc)
3088 {
3089 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3090 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3091 
3092 		/* Class 1 devices have must have ERTM timeouts
3093 		 * exceeding the Link Supervision Timeout.  The
3094 		 * default Link Supervision Timeout for AMP
3095 		 * controllers is 10 seconds.
3096 		 *
3097 		 * Class 1 devices use 0xffffffff for their
3098 		 * best-effort flush timeout, so the clamping logic
3099 		 * will result in a timeout that meets the above
3100 		 * requirement.  ERTM timeouts are 16-bit values, so
3101 		 * the maximum timeout is 65.535 seconds.
3102 		 */
3103 
3104 		/* Convert timeout to milliseconds and round */
3105 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3106 
3107 		/* This is the recommended formula for class 2 devices
3108 		 * that start ERTM timers when packets are sent to the
3109 		 * controller.
3110 		 */
3111 		ertm_to = 3 * ertm_to + 500;
3112 
3113 		if (ertm_to > 0xffff)
3114 			ertm_to = 0xffff;
3115 
3116 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3117 		rfc->monitor_timeout = rfc->retrans_timeout;
3118 	} else {
3119 		rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3120 		rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3121 	}
3122 }
3123 
3124 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3125 {
3126 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3127 	    __l2cap_ews_supported(chan->conn)) {
3128 		/* use extended control field */
3129 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3130 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3131 	} else {
3132 		chan->tx_win = min_t(u16, chan->tx_win,
3133 				     L2CAP_DEFAULT_TX_WINDOW);
3134 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3135 	}
3136 	chan->ack_win = chan->tx_win;
3137 }
3138 
3139 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3140 {
3141 	struct l2cap_conf_req *req = data;
3142 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3143 	void *ptr = req->data;
3144 	u16 size;
3145 
3146 	BT_DBG("chan %p", chan);
3147 
3148 	if (chan->num_conf_req || chan->num_conf_rsp)
3149 		goto done;
3150 
3151 	switch (chan->mode) {
3152 	case L2CAP_MODE_STREAMING:
3153 	case L2CAP_MODE_ERTM:
3154 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3155 			break;
3156 
3157 		if (__l2cap_efs_supported(chan->conn))
3158 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3159 
3160 		/* fall through */
3161 	default:
3162 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3163 		break;
3164 	}
3165 
3166 done:
3167 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3168 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3169 
3170 	switch (chan->mode) {
3171 	case L2CAP_MODE_BASIC:
3172 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3173 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3174 			break;
3175 
3176 		rfc.mode            = L2CAP_MODE_BASIC;
3177 		rfc.txwin_size      = 0;
3178 		rfc.max_transmit    = 0;
3179 		rfc.retrans_timeout = 0;
3180 		rfc.monitor_timeout = 0;
3181 		rfc.max_pdu_size    = 0;
3182 
3183 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3184 				   (unsigned long) &rfc);
3185 		break;
3186 
3187 	case L2CAP_MODE_ERTM:
3188 		rfc.mode            = L2CAP_MODE_ERTM;
3189 		rfc.max_transmit    = chan->max_tx;
3190 
3191 		__l2cap_set_ertm_timeouts(chan, &rfc);
3192 
3193 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3194 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3195 			     L2CAP_FCS_SIZE);
3196 		rfc.max_pdu_size = cpu_to_le16(size);
3197 
3198 		l2cap_txwin_setup(chan);
3199 
3200 		rfc.txwin_size = min_t(u16, chan->tx_win,
3201 				       L2CAP_DEFAULT_TX_WINDOW);
3202 
3203 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3204 				   (unsigned long) &rfc);
3205 
3206 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3207 			l2cap_add_opt_efs(&ptr, chan);
3208 
3209 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3210 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3211 					   chan->tx_win);
3212 
3213 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3214 			if (chan->fcs == L2CAP_FCS_NONE ||
3215 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3216 				chan->fcs = L2CAP_FCS_NONE;
3217 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3218 						   chan->fcs);
3219 			}
3220 		break;
3221 
3222 	case L2CAP_MODE_STREAMING:
3223 		l2cap_txwin_setup(chan);
3224 		rfc.mode            = L2CAP_MODE_STREAMING;
3225 		rfc.txwin_size      = 0;
3226 		rfc.max_transmit    = 0;
3227 		rfc.retrans_timeout = 0;
3228 		rfc.monitor_timeout = 0;
3229 
3230 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3231 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3232 			     L2CAP_FCS_SIZE);
3233 		rfc.max_pdu_size = cpu_to_le16(size);
3234 
3235 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3236 				   (unsigned long) &rfc);
3237 
3238 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3239 			l2cap_add_opt_efs(&ptr, chan);
3240 
3241 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3242 			if (chan->fcs == L2CAP_FCS_NONE ||
3243 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3244 				chan->fcs = L2CAP_FCS_NONE;
3245 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3246 						   chan->fcs);
3247 			}
3248 		break;
3249 	}
3250 
3251 	req->dcid  = cpu_to_le16(chan->dcid);
3252 	req->flags = __constant_cpu_to_le16(0);
3253 
3254 	return ptr - data;
3255 }
3256 
3257 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3258 {
3259 	struct l2cap_conf_rsp *rsp = data;
3260 	void *ptr = rsp->data;
3261 	void *req = chan->conf_req;
3262 	int len = chan->conf_len;
3263 	int type, hint, olen;
3264 	unsigned long val;
3265 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3266 	struct l2cap_conf_efs efs;
3267 	u8 remote_efs = 0;
3268 	u16 mtu = L2CAP_DEFAULT_MTU;
3269 	u16 result = L2CAP_CONF_SUCCESS;
3270 	u16 size;
3271 
3272 	BT_DBG("chan %p", chan);
3273 
3274 	while (len >= L2CAP_CONF_OPT_SIZE) {
3275 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3276 
3277 		hint  = type & L2CAP_CONF_HINT;
3278 		type &= L2CAP_CONF_MASK;
3279 
3280 		switch (type) {
3281 		case L2CAP_CONF_MTU:
3282 			mtu = val;
3283 			break;
3284 
3285 		case L2CAP_CONF_FLUSH_TO:
3286 			chan->flush_to = val;
3287 			break;
3288 
3289 		case L2CAP_CONF_QOS:
3290 			break;
3291 
3292 		case L2CAP_CONF_RFC:
3293 			if (olen == sizeof(rfc))
3294 				memcpy(&rfc, (void *) val, olen);
3295 			break;
3296 
3297 		case L2CAP_CONF_FCS:
3298 			if (val == L2CAP_FCS_NONE)
3299 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3300 			break;
3301 
3302 		case L2CAP_CONF_EFS:
3303 			remote_efs = 1;
3304 			if (olen == sizeof(efs))
3305 				memcpy(&efs, (void *) val, olen);
3306 			break;
3307 
3308 		case L2CAP_CONF_EWS:
3309 			if (!chan->conn->hs_enabled)
3310 				return -ECONNREFUSED;
3311 
3312 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3313 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3314 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3315 			chan->remote_tx_win = val;
3316 			break;
3317 
3318 		default:
3319 			if (hint)
3320 				break;
3321 
3322 			result = L2CAP_CONF_UNKNOWN;
3323 			*((u8 *) ptr++) = type;
3324 			break;
3325 		}
3326 	}
3327 
3328 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3329 		goto done;
3330 
3331 	switch (chan->mode) {
3332 	case L2CAP_MODE_STREAMING:
3333 	case L2CAP_MODE_ERTM:
3334 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3335 			chan->mode = l2cap_select_mode(rfc.mode,
3336 						       chan->conn->feat_mask);
3337 			break;
3338 		}
3339 
3340 		if (remote_efs) {
3341 			if (__l2cap_efs_supported(chan->conn))
3342 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3343 			else
3344 				return -ECONNREFUSED;
3345 		}
3346 
3347 		if (chan->mode != rfc.mode)
3348 			return -ECONNREFUSED;
3349 
3350 		break;
3351 	}
3352 
3353 done:
3354 	if (chan->mode != rfc.mode) {
3355 		result = L2CAP_CONF_UNACCEPT;
3356 		rfc.mode = chan->mode;
3357 
3358 		if (chan->num_conf_rsp == 1)
3359 			return -ECONNREFUSED;
3360 
3361 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3362 				   (unsigned long) &rfc);
3363 	}
3364 
3365 	if (result == L2CAP_CONF_SUCCESS) {
3366 		/* Configure output options and let the other side know
3367 		 * which ones we don't like. */
3368 
3369 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3370 			result = L2CAP_CONF_UNACCEPT;
3371 		else {
3372 			chan->omtu = mtu;
3373 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3374 		}
3375 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3376 
3377 		if (remote_efs) {
3378 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3379 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3380 			    efs.stype != chan->local_stype) {
3381 
3382 				result = L2CAP_CONF_UNACCEPT;
3383 
3384 				if (chan->num_conf_req >= 1)
3385 					return -ECONNREFUSED;
3386 
3387 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3388 						   sizeof(efs),
3389 						   (unsigned long) &efs);
3390 			} else {
3391 				/* Send PENDING Conf Rsp */
3392 				result = L2CAP_CONF_PENDING;
3393 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3394 			}
3395 		}
3396 
3397 		switch (rfc.mode) {
3398 		case L2CAP_MODE_BASIC:
3399 			chan->fcs = L2CAP_FCS_NONE;
3400 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3401 			break;
3402 
3403 		case L2CAP_MODE_ERTM:
3404 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3405 				chan->remote_tx_win = rfc.txwin_size;
3406 			else
3407 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3408 
3409 			chan->remote_max_tx = rfc.max_transmit;
3410 
3411 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3412 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3413 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3414 			rfc.max_pdu_size = cpu_to_le16(size);
3415 			chan->remote_mps = size;
3416 
3417 			__l2cap_set_ertm_timeouts(chan, &rfc);
3418 
3419 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3420 
3421 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3422 					   sizeof(rfc), (unsigned long) &rfc);
3423 
3424 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3425 				chan->remote_id = efs.id;
3426 				chan->remote_stype = efs.stype;
3427 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3428 				chan->remote_flush_to =
3429 					le32_to_cpu(efs.flush_to);
3430 				chan->remote_acc_lat =
3431 					le32_to_cpu(efs.acc_lat);
3432 				chan->remote_sdu_itime =
3433 					le32_to_cpu(efs.sdu_itime);
3434 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3435 						   sizeof(efs),
3436 						   (unsigned long) &efs);
3437 			}
3438 			break;
3439 
3440 		case L2CAP_MODE_STREAMING:
3441 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3442 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3443 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3444 			rfc.max_pdu_size = cpu_to_le16(size);
3445 			chan->remote_mps = size;
3446 
3447 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3448 
3449 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3450 					   (unsigned long) &rfc);
3451 
3452 			break;
3453 
3454 		default:
3455 			result = L2CAP_CONF_UNACCEPT;
3456 
3457 			memset(&rfc, 0, sizeof(rfc));
3458 			rfc.mode = chan->mode;
3459 		}
3460 
3461 		if (result == L2CAP_CONF_SUCCESS)
3462 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3463 	}
3464 	rsp->scid   = cpu_to_le16(chan->dcid);
3465 	rsp->result = cpu_to_le16(result);
3466 	rsp->flags  = __constant_cpu_to_le16(0);
3467 
3468 	return ptr - data;
3469 }
3470 
3471 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3472 				void *data, u16 *result)
3473 {
3474 	struct l2cap_conf_req *req = data;
3475 	void *ptr = req->data;
3476 	int type, olen;
3477 	unsigned long val;
3478 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3479 	struct l2cap_conf_efs efs;
3480 
3481 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3482 
3483 	while (len >= L2CAP_CONF_OPT_SIZE) {
3484 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3485 
3486 		switch (type) {
3487 		case L2CAP_CONF_MTU:
3488 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3489 				*result = L2CAP_CONF_UNACCEPT;
3490 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3491 			} else
3492 				chan->imtu = val;
3493 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3494 			break;
3495 
3496 		case L2CAP_CONF_FLUSH_TO:
3497 			chan->flush_to = val;
3498 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3499 					   2, chan->flush_to);
3500 			break;
3501 
3502 		case L2CAP_CONF_RFC:
3503 			if (olen == sizeof(rfc))
3504 				memcpy(&rfc, (void *)val, olen);
3505 
3506 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3507 			    rfc.mode != chan->mode)
3508 				return -ECONNREFUSED;
3509 
3510 			chan->fcs = 0;
3511 
3512 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3513 					   sizeof(rfc), (unsigned long) &rfc);
3514 			break;
3515 
3516 		case L2CAP_CONF_EWS:
3517 			chan->ack_win = min_t(u16, val, chan->ack_win);
3518 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3519 					   chan->tx_win);
3520 			break;
3521 
3522 		case L2CAP_CONF_EFS:
3523 			if (olen == sizeof(efs))
3524 				memcpy(&efs, (void *)val, olen);
3525 
3526 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3527 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3528 			    efs.stype != chan->local_stype)
3529 				return -ECONNREFUSED;
3530 
3531 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3532 					   (unsigned long) &efs);
3533 			break;
3534 
3535 		case L2CAP_CONF_FCS:
3536 			if (*result == L2CAP_CONF_PENDING)
3537 				if (val == L2CAP_FCS_NONE)
3538 					set_bit(CONF_RECV_NO_FCS,
3539 						&chan->conf_state);
3540 			break;
3541 		}
3542 	}
3543 
3544 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3545 		return -ECONNREFUSED;
3546 
3547 	chan->mode = rfc.mode;
3548 
3549 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3550 		switch (rfc.mode) {
3551 		case L2CAP_MODE_ERTM:
3552 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3553 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3554 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3555 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3556 				chan->ack_win = min_t(u16, chan->ack_win,
3557 						      rfc.txwin_size);
3558 
3559 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3560 				chan->local_msdu = le16_to_cpu(efs.msdu);
3561 				chan->local_sdu_itime =
3562 					le32_to_cpu(efs.sdu_itime);
3563 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3564 				chan->local_flush_to =
3565 					le32_to_cpu(efs.flush_to);
3566 			}
3567 			break;
3568 
3569 		case L2CAP_MODE_STREAMING:
3570 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3571 		}
3572 	}
3573 
3574 	req->dcid   = cpu_to_le16(chan->dcid);
3575 	req->flags  = __constant_cpu_to_le16(0);
3576 
3577 	return ptr - data;
3578 }
3579 
3580 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3581 				u16 result, u16 flags)
3582 {
3583 	struct l2cap_conf_rsp *rsp = data;
3584 	void *ptr = rsp->data;
3585 
3586 	BT_DBG("chan %p", chan);
3587 
3588 	rsp->scid   = cpu_to_le16(chan->dcid);
3589 	rsp->result = cpu_to_le16(result);
3590 	rsp->flags  = cpu_to_le16(flags);
3591 
3592 	return ptr - data;
3593 }
3594 
3595 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3596 {
3597 	struct l2cap_conn_rsp rsp;
3598 	struct l2cap_conn *conn = chan->conn;
3599 	u8 buf[128];
3600 	u8 rsp_code;
3601 
3602 	rsp.scid   = cpu_to_le16(chan->dcid);
3603 	rsp.dcid   = cpu_to_le16(chan->scid);
3604 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3605 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3606 
3607 	if (chan->hs_hcon)
3608 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3609 	else
3610 		rsp_code = L2CAP_CONN_RSP;
3611 
3612 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3613 
3614 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3615 
3616 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3617 		return;
3618 
3619 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3620 		       l2cap_build_conf_req(chan, buf), buf);
3621 	chan->num_conf_req++;
3622 }
3623 
3624 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3625 {
3626 	int type, olen;
3627 	unsigned long val;
3628 	/* Use sane default values in case a misbehaving remote device
3629 	 * did not send an RFC or extended window size option.
3630 	 */
3631 	u16 txwin_ext = chan->ack_win;
3632 	struct l2cap_conf_rfc rfc = {
3633 		.mode = chan->mode,
3634 		.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3635 		.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3636 		.max_pdu_size = cpu_to_le16(chan->imtu),
3637 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3638 	};
3639 
3640 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3641 
3642 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3643 		return;
3644 
3645 	while (len >= L2CAP_CONF_OPT_SIZE) {
3646 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3647 
3648 		switch (type) {
3649 		case L2CAP_CONF_RFC:
3650 			if (olen == sizeof(rfc))
3651 				memcpy(&rfc, (void *)val, olen);
3652 			break;
3653 		case L2CAP_CONF_EWS:
3654 			txwin_ext = val;
3655 			break;
3656 		}
3657 	}
3658 
3659 	switch (rfc.mode) {
3660 	case L2CAP_MODE_ERTM:
3661 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3662 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3663 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3664 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3665 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3666 		else
3667 			chan->ack_win = min_t(u16, chan->ack_win,
3668 					      rfc.txwin_size);
3669 		break;
3670 	case L2CAP_MODE_STREAMING:
3671 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3672 	}
3673 }
3674 
3675 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3676 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3677 				    u8 *data)
3678 {
3679 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3680 
3681 	if (cmd_len < sizeof(*rej))
3682 		return -EPROTO;
3683 
3684 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3685 		return 0;
3686 
3687 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3688 	    cmd->ident == conn->info_ident) {
3689 		cancel_delayed_work(&conn->info_timer);
3690 
3691 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3692 		conn->info_ident = 0;
3693 
3694 		l2cap_conn_start(conn);
3695 	}
3696 
3697 	return 0;
3698 }
3699 
3700 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3701 					struct l2cap_cmd_hdr *cmd,
3702 					u8 *data, u8 rsp_code, u8 amp_id)
3703 {
3704 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3705 	struct l2cap_conn_rsp rsp;
3706 	struct l2cap_chan *chan = NULL, *pchan;
3707 	int result, status = L2CAP_CS_NO_INFO;
3708 
3709 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3710 	__le16 psm = req->psm;
3711 
3712 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3713 
3714 	/* Check if we have socket listening on psm */
3715 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3716 					 &conn->hcon->dst);
3717 	if (!pchan) {
3718 		result = L2CAP_CR_BAD_PSM;
3719 		goto sendresp;
3720 	}
3721 
3722 	mutex_lock(&conn->chan_lock);
3723 	l2cap_chan_lock(pchan);
3724 
3725 	/* Check if the ACL is secure enough (if not SDP) */
3726 	if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3727 	    !hci_conn_check_link_mode(conn->hcon)) {
3728 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3729 		result = L2CAP_CR_SEC_BLOCK;
3730 		goto response;
3731 	}
3732 
3733 	result = L2CAP_CR_NO_MEM;
3734 
3735 	/* Check if we already have channel with that dcid */
3736 	if (__l2cap_get_chan_by_dcid(conn, scid))
3737 		goto response;
3738 
3739 	chan = pchan->ops->new_connection(pchan);
3740 	if (!chan)
3741 		goto response;
3742 
3743 	/* For certain devices (ex: HID mouse), support for authentication,
3744 	 * pairing and bonding is optional. For such devices, inorder to avoid
3745 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3746 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3747 	 */
3748 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3749 
3750 	bacpy(&chan->src, &conn->hcon->src);
3751 	bacpy(&chan->dst, &conn->hcon->dst);
3752 	chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3753 	chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3754 	chan->psm  = psm;
3755 	chan->dcid = scid;
3756 	chan->local_amp_id = amp_id;
3757 
3758 	__l2cap_chan_add(conn, chan);
3759 
3760 	dcid = chan->scid;
3761 
3762 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3763 
3764 	chan->ident = cmd->ident;
3765 
3766 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3767 		if (l2cap_chan_check_security(chan)) {
3768 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3769 				l2cap_state_change(chan, BT_CONNECT2);
3770 				result = L2CAP_CR_PEND;
3771 				status = L2CAP_CS_AUTHOR_PEND;
3772 				chan->ops->defer(chan);
3773 			} else {
3774 				/* Force pending result for AMP controllers.
3775 				 * The connection will succeed after the
3776 				 * physical link is up.
3777 				 */
3778 				if (amp_id == AMP_ID_BREDR) {
3779 					l2cap_state_change(chan, BT_CONFIG);
3780 					result = L2CAP_CR_SUCCESS;
3781 				} else {
3782 					l2cap_state_change(chan, BT_CONNECT2);
3783 					result = L2CAP_CR_PEND;
3784 				}
3785 				status = L2CAP_CS_NO_INFO;
3786 			}
3787 		} else {
3788 			l2cap_state_change(chan, BT_CONNECT2);
3789 			result = L2CAP_CR_PEND;
3790 			status = L2CAP_CS_AUTHEN_PEND;
3791 		}
3792 	} else {
3793 		l2cap_state_change(chan, BT_CONNECT2);
3794 		result = L2CAP_CR_PEND;
3795 		status = L2CAP_CS_NO_INFO;
3796 	}
3797 
3798 response:
3799 	l2cap_chan_unlock(pchan);
3800 	mutex_unlock(&conn->chan_lock);
3801 
3802 sendresp:
3803 	rsp.scid   = cpu_to_le16(scid);
3804 	rsp.dcid   = cpu_to_le16(dcid);
3805 	rsp.result = cpu_to_le16(result);
3806 	rsp.status = cpu_to_le16(status);
3807 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3808 
3809 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3810 		struct l2cap_info_req info;
3811 		info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3812 
3813 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3814 		conn->info_ident = l2cap_get_ident(conn);
3815 
3816 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3817 
3818 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3819 			       sizeof(info), &info);
3820 	}
3821 
3822 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3823 	    result == L2CAP_CR_SUCCESS) {
3824 		u8 buf[128];
3825 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3826 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3827 			       l2cap_build_conf_req(chan, buf), buf);
3828 		chan->num_conf_req++;
3829 	}
3830 
3831 	return chan;
3832 }
3833 
3834 static int l2cap_connect_req(struct l2cap_conn *conn,
3835 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3836 {
3837 	struct hci_dev *hdev = conn->hcon->hdev;
3838 	struct hci_conn *hcon = conn->hcon;
3839 
3840 	if (cmd_len < sizeof(struct l2cap_conn_req))
3841 		return -EPROTO;
3842 
3843 	hci_dev_lock(hdev);
3844 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3845 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3846 		mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3847 				      hcon->dst_type, 0, NULL, 0,
3848 				      hcon->dev_class);
3849 	hci_dev_unlock(hdev);
3850 
3851 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3852 	return 0;
3853 }
3854 
3855 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3856 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3857 				    u8 *data)
3858 {
3859 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3860 	u16 scid, dcid, result, status;
3861 	struct l2cap_chan *chan;
3862 	u8 req[128];
3863 	int err;
3864 
3865 	if (cmd_len < sizeof(*rsp))
3866 		return -EPROTO;
3867 
3868 	scid   = __le16_to_cpu(rsp->scid);
3869 	dcid   = __le16_to_cpu(rsp->dcid);
3870 	result = __le16_to_cpu(rsp->result);
3871 	status = __le16_to_cpu(rsp->status);
3872 
3873 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3874 	       dcid, scid, result, status);
3875 
3876 	mutex_lock(&conn->chan_lock);
3877 
3878 	if (scid) {
3879 		chan = __l2cap_get_chan_by_scid(conn, scid);
3880 		if (!chan) {
3881 			err = -EBADSLT;
3882 			goto unlock;
3883 		}
3884 	} else {
3885 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3886 		if (!chan) {
3887 			err = -EBADSLT;
3888 			goto unlock;
3889 		}
3890 	}
3891 
3892 	err = 0;
3893 
3894 	l2cap_chan_lock(chan);
3895 
3896 	switch (result) {
3897 	case L2CAP_CR_SUCCESS:
3898 		l2cap_state_change(chan, BT_CONFIG);
3899 		chan->ident = 0;
3900 		chan->dcid = dcid;
3901 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3902 
3903 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3904 			break;
3905 
3906 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3907 			       l2cap_build_conf_req(chan, req), req);
3908 		chan->num_conf_req++;
3909 		break;
3910 
3911 	case L2CAP_CR_PEND:
3912 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3913 		break;
3914 
3915 	default:
3916 		l2cap_chan_del(chan, ECONNREFUSED);
3917 		break;
3918 	}
3919 
3920 	l2cap_chan_unlock(chan);
3921 
3922 unlock:
3923 	mutex_unlock(&conn->chan_lock);
3924 
3925 	return err;
3926 }
3927 
3928 static inline void set_default_fcs(struct l2cap_chan *chan)
3929 {
3930 	/* FCS is enabled only in ERTM or streaming mode, if one or both
3931 	 * sides request it.
3932 	 */
3933 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3934 		chan->fcs = L2CAP_FCS_NONE;
3935 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3936 		chan->fcs = L2CAP_FCS_CRC16;
3937 }
3938 
3939 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3940 				    u8 ident, u16 flags)
3941 {
3942 	struct l2cap_conn *conn = chan->conn;
3943 
3944 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3945 	       flags);
3946 
3947 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3948 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3949 
3950 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3951 		       l2cap_build_conf_rsp(chan, data,
3952 					    L2CAP_CONF_SUCCESS, flags), data);
3953 }
3954 
3955 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3956 				   u16 scid, u16 dcid)
3957 {
3958 	struct l2cap_cmd_rej_cid rej;
3959 
3960 	rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3961 	rej.scid = __cpu_to_le16(scid);
3962 	rej.dcid = __cpu_to_le16(dcid);
3963 
3964 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3965 }
3966 
3967 static inline int l2cap_config_req(struct l2cap_conn *conn,
3968 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3969 				   u8 *data)
3970 {
3971 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3972 	u16 dcid, flags;
3973 	u8 rsp[64];
3974 	struct l2cap_chan *chan;
3975 	int len, err = 0;
3976 
3977 	if (cmd_len < sizeof(*req))
3978 		return -EPROTO;
3979 
3980 	dcid  = __le16_to_cpu(req->dcid);
3981 	flags = __le16_to_cpu(req->flags);
3982 
3983 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3984 
3985 	chan = l2cap_get_chan_by_scid(conn, dcid);
3986 	if (!chan) {
3987 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
3988 		return 0;
3989 	}
3990 
3991 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3992 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
3993 				       chan->dcid);
3994 		goto unlock;
3995 	}
3996 
3997 	/* Reject if config buffer is too small. */
3998 	len = cmd_len - sizeof(*req);
3999 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4000 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4001 			       l2cap_build_conf_rsp(chan, rsp,
4002 			       L2CAP_CONF_REJECT, flags), rsp);
4003 		goto unlock;
4004 	}
4005 
4006 	/* Store config. */
4007 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4008 	chan->conf_len += len;
4009 
4010 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4011 		/* Incomplete config. Send empty response. */
4012 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4013 			       l2cap_build_conf_rsp(chan, rsp,
4014 			       L2CAP_CONF_SUCCESS, flags), rsp);
4015 		goto unlock;
4016 	}
4017 
4018 	/* Complete config. */
4019 	len = l2cap_parse_conf_req(chan, rsp);
4020 	if (len < 0) {
4021 		l2cap_send_disconn_req(chan, ECONNRESET);
4022 		goto unlock;
4023 	}
4024 
4025 	chan->ident = cmd->ident;
4026 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4027 	chan->num_conf_rsp++;
4028 
4029 	/* Reset config buffer. */
4030 	chan->conf_len = 0;
4031 
4032 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4033 		goto unlock;
4034 
4035 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4036 		set_default_fcs(chan);
4037 
4038 		if (chan->mode == L2CAP_MODE_ERTM ||
4039 		    chan->mode == L2CAP_MODE_STREAMING)
4040 			err = l2cap_ertm_init(chan);
4041 
4042 		if (err < 0)
4043 			l2cap_send_disconn_req(chan, -err);
4044 		else
4045 			l2cap_chan_ready(chan);
4046 
4047 		goto unlock;
4048 	}
4049 
4050 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4051 		u8 buf[64];
4052 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4053 			       l2cap_build_conf_req(chan, buf), buf);
4054 		chan->num_conf_req++;
4055 	}
4056 
4057 	/* Got Conf Rsp PENDING from remote side and asume we sent
4058 	   Conf Rsp PENDING in the code above */
4059 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4060 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4061 
4062 		/* check compatibility */
4063 
4064 		/* Send rsp for BR/EDR channel */
4065 		if (!chan->hs_hcon)
4066 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4067 		else
4068 			chan->ident = cmd->ident;
4069 	}
4070 
4071 unlock:
4072 	l2cap_chan_unlock(chan);
4073 	return err;
4074 }
4075 
4076 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4077 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4078 				   u8 *data)
4079 {
4080 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4081 	u16 scid, flags, result;
4082 	struct l2cap_chan *chan;
4083 	int len = cmd_len - sizeof(*rsp);
4084 	int err = 0;
4085 
4086 	if (cmd_len < sizeof(*rsp))
4087 		return -EPROTO;
4088 
4089 	scid   = __le16_to_cpu(rsp->scid);
4090 	flags  = __le16_to_cpu(rsp->flags);
4091 	result = __le16_to_cpu(rsp->result);
4092 
4093 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4094 	       result, len);
4095 
4096 	chan = l2cap_get_chan_by_scid(conn, scid);
4097 	if (!chan)
4098 		return 0;
4099 
4100 	switch (result) {
4101 	case L2CAP_CONF_SUCCESS:
4102 		l2cap_conf_rfc_get(chan, rsp->data, len);
4103 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4104 		break;
4105 
4106 	case L2CAP_CONF_PENDING:
4107 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4108 
4109 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4110 			char buf[64];
4111 
4112 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4113 						   buf, &result);
4114 			if (len < 0) {
4115 				l2cap_send_disconn_req(chan, ECONNRESET);
4116 				goto done;
4117 			}
4118 
4119 			if (!chan->hs_hcon) {
4120 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4121 							0);
4122 			} else {
4123 				if (l2cap_check_efs(chan)) {
4124 					amp_create_logical_link(chan);
4125 					chan->ident = cmd->ident;
4126 				}
4127 			}
4128 		}
4129 		goto done;
4130 
4131 	case L2CAP_CONF_UNACCEPT:
4132 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4133 			char req[64];
4134 
4135 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4136 				l2cap_send_disconn_req(chan, ECONNRESET);
4137 				goto done;
4138 			}
4139 
4140 			/* throw out any old stored conf requests */
4141 			result = L2CAP_CONF_SUCCESS;
4142 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4143 						   req, &result);
4144 			if (len < 0) {
4145 				l2cap_send_disconn_req(chan, ECONNRESET);
4146 				goto done;
4147 			}
4148 
4149 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4150 				       L2CAP_CONF_REQ, len, req);
4151 			chan->num_conf_req++;
4152 			if (result != L2CAP_CONF_SUCCESS)
4153 				goto done;
4154 			break;
4155 		}
4156 
4157 	default:
4158 		l2cap_chan_set_err(chan, ECONNRESET);
4159 
4160 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4161 		l2cap_send_disconn_req(chan, ECONNRESET);
4162 		goto done;
4163 	}
4164 
4165 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4166 		goto done;
4167 
4168 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4169 
4170 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4171 		set_default_fcs(chan);
4172 
4173 		if (chan->mode == L2CAP_MODE_ERTM ||
4174 		    chan->mode == L2CAP_MODE_STREAMING)
4175 			err = l2cap_ertm_init(chan);
4176 
4177 		if (err < 0)
4178 			l2cap_send_disconn_req(chan, -err);
4179 		else
4180 			l2cap_chan_ready(chan);
4181 	}
4182 
4183 done:
4184 	l2cap_chan_unlock(chan);
4185 	return err;
4186 }
4187 
4188 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4189 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4190 				       u8 *data)
4191 {
4192 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4193 	struct l2cap_disconn_rsp rsp;
4194 	u16 dcid, scid;
4195 	struct l2cap_chan *chan;
4196 
4197 	if (cmd_len != sizeof(*req))
4198 		return -EPROTO;
4199 
4200 	scid = __le16_to_cpu(req->scid);
4201 	dcid = __le16_to_cpu(req->dcid);
4202 
4203 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4204 
4205 	mutex_lock(&conn->chan_lock);
4206 
4207 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4208 	if (!chan) {
4209 		mutex_unlock(&conn->chan_lock);
4210 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4211 		return 0;
4212 	}
4213 
4214 	l2cap_chan_lock(chan);
4215 
4216 	rsp.dcid = cpu_to_le16(chan->scid);
4217 	rsp.scid = cpu_to_le16(chan->dcid);
4218 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4219 
4220 	chan->ops->set_shutdown(chan);
4221 
4222 	l2cap_chan_hold(chan);
4223 	l2cap_chan_del(chan, ECONNRESET);
4224 
4225 	l2cap_chan_unlock(chan);
4226 
4227 	chan->ops->close(chan);
4228 	l2cap_chan_put(chan);
4229 
4230 	mutex_unlock(&conn->chan_lock);
4231 
4232 	return 0;
4233 }
4234 
4235 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4236 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4237 				       u8 *data)
4238 {
4239 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4240 	u16 dcid, scid;
4241 	struct l2cap_chan *chan;
4242 
4243 	if (cmd_len != sizeof(*rsp))
4244 		return -EPROTO;
4245 
4246 	scid = __le16_to_cpu(rsp->scid);
4247 	dcid = __le16_to_cpu(rsp->dcid);
4248 
4249 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4250 
4251 	mutex_lock(&conn->chan_lock);
4252 
4253 	chan = __l2cap_get_chan_by_scid(conn, scid);
4254 	if (!chan) {
4255 		mutex_unlock(&conn->chan_lock);
4256 		return 0;
4257 	}
4258 
4259 	l2cap_chan_lock(chan);
4260 
4261 	l2cap_chan_hold(chan);
4262 	l2cap_chan_del(chan, 0);
4263 
4264 	l2cap_chan_unlock(chan);
4265 
4266 	chan->ops->close(chan);
4267 	l2cap_chan_put(chan);
4268 
4269 	mutex_unlock(&conn->chan_lock);
4270 
4271 	return 0;
4272 }
4273 
4274 static inline int l2cap_information_req(struct l2cap_conn *conn,
4275 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4276 					u8 *data)
4277 {
4278 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4279 	u16 type;
4280 
4281 	if (cmd_len != sizeof(*req))
4282 		return -EPROTO;
4283 
4284 	type = __le16_to_cpu(req->type);
4285 
4286 	BT_DBG("type 0x%4.4x", type);
4287 
4288 	if (type == L2CAP_IT_FEAT_MASK) {
4289 		u8 buf[8];
4290 		u32 feat_mask = l2cap_feat_mask;
4291 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4292 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4293 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4294 		if (!disable_ertm)
4295 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4296 				| L2CAP_FEAT_FCS;
4297 		if (conn->hs_enabled)
4298 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4299 				| L2CAP_FEAT_EXT_WINDOW;
4300 
4301 		put_unaligned_le32(feat_mask, rsp->data);
4302 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4303 			       buf);
4304 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4305 		u8 buf[12];
4306 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4307 
4308 		if (conn->hs_enabled)
4309 			l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4310 		else
4311 			l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4312 
4313 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4314 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4315 		memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4316 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4317 			       buf);
4318 	} else {
4319 		struct l2cap_info_rsp rsp;
4320 		rsp.type   = cpu_to_le16(type);
4321 		rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4322 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4323 			       &rsp);
4324 	}
4325 
4326 	return 0;
4327 }
4328 
4329 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4330 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4331 					u8 *data)
4332 {
4333 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4334 	u16 type, result;
4335 
4336 	if (cmd_len < sizeof(*rsp))
4337 		return -EPROTO;
4338 
4339 	type   = __le16_to_cpu(rsp->type);
4340 	result = __le16_to_cpu(rsp->result);
4341 
4342 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4343 
4344 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4345 	if (cmd->ident != conn->info_ident ||
4346 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4347 		return 0;
4348 
4349 	cancel_delayed_work(&conn->info_timer);
4350 
4351 	if (result != L2CAP_IR_SUCCESS) {
4352 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4353 		conn->info_ident = 0;
4354 
4355 		l2cap_conn_start(conn);
4356 
4357 		return 0;
4358 	}
4359 
4360 	switch (type) {
4361 	case L2CAP_IT_FEAT_MASK:
4362 		conn->feat_mask = get_unaligned_le32(rsp->data);
4363 
4364 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4365 			struct l2cap_info_req req;
4366 			req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4367 
4368 			conn->info_ident = l2cap_get_ident(conn);
4369 
4370 			l2cap_send_cmd(conn, conn->info_ident,
4371 				       L2CAP_INFO_REQ, sizeof(req), &req);
4372 		} else {
4373 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4374 			conn->info_ident = 0;
4375 
4376 			l2cap_conn_start(conn);
4377 		}
4378 		break;
4379 
4380 	case L2CAP_IT_FIXED_CHAN:
4381 		conn->fixed_chan_mask = rsp->data[0];
4382 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4383 		conn->info_ident = 0;
4384 
4385 		l2cap_conn_start(conn);
4386 		break;
4387 	}
4388 
4389 	return 0;
4390 }
4391 
4392 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4393 				    struct l2cap_cmd_hdr *cmd,
4394 				    u16 cmd_len, void *data)
4395 {
4396 	struct l2cap_create_chan_req *req = data;
4397 	struct l2cap_create_chan_rsp rsp;
4398 	struct l2cap_chan *chan;
4399 	struct hci_dev *hdev;
4400 	u16 psm, scid;
4401 
4402 	if (cmd_len != sizeof(*req))
4403 		return -EPROTO;
4404 
4405 	if (!conn->hs_enabled)
4406 		return -EINVAL;
4407 
4408 	psm = le16_to_cpu(req->psm);
4409 	scid = le16_to_cpu(req->scid);
4410 
4411 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4412 
4413 	/* For controller id 0 make BR/EDR connection */
4414 	if (req->amp_id == AMP_ID_BREDR) {
4415 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4416 			      req->amp_id);
4417 		return 0;
4418 	}
4419 
4420 	/* Validate AMP controller id */
4421 	hdev = hci_dev_get(req->amp_id);
4422 	if (!hdev)
4423 		goto error;
4424 
4425 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4426 		hci_dev_put(hdev);
4427 		goto error;
4428 	}
4429 
4430 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4431 			     req->amp_id);
4432 	if (chan) {
4433 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4434 		struct hci_conn *hs_hcon;
4435 
4436 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4437 						  &conn->hcon->dst);
4438 		if (!hs_hcon) {
4439 			hci_dev_put(hdev);
4440 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4441 					       chan->dcid);
4442 			return 0;
4443 		}
4444 
4445 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4446 
4447 		mgr->bredr_chan = chan;
4448 		chan->hs_hcon = hs_hcon;
4449 		chan->fcs = L2CAP_FCS_NONE;
4450 		conn->mtu = hdev->block_mtu;
4451 	}
4452 
4453 	hci_dev_put(hdev);
4454 
4455 	return 0;
4456 
4457 error:
4458 	rsp.dcid = 0;
4459 	rsp.scid = cpu_to_le16(scid);
4460 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4461 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4462 
4463 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4464 		       sizeof(rsp), &rsp);
4465 
4466 	return 0;
4467 }
4468 
4469 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4470 {
4471 	struct l2cap_move_chan_req req;
4472 	u8 ident;
4473 
4474 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4475 
4476 	ident = l2cap_get_ident(chan->conn);
4477 	chan->ident = ident;
4478 
4479 	req.icid = cpu_to_le16(chan->scid);
4480 	req.dest_amp_id = dest_amp_id;
4481 
4482 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4483 		       &req);
4484 
4485 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4486 }
4487 
4488 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4489 {
4490 	struct l2cap_move_chan_rsp rsp;
4491 
4492 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4493 
4494 	rsp.icid = cpu_to_le16(chan->dcid);
4495 	rsp.result = cpu_to_le16(result);
4496 
4497 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4498 		       sizeof(rsp), &rsp);
4499 }
4500 
4501 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4502 {
4503 	struct l2cap_move_chan_cfm cfm;
4504 
4505 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4506 
4507 	chan->ident = l2cap_get_ident(chan->conn);
4508 
4509 	cfm.icid = cpu_to_le16(chan->scid);
4510 	cfm.result = cpu_to_le16(result);
4511 
4512 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4513 		       sizeof(cfm), &cfm);
4514 
4515 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4516 }
4517 
4518 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4519 {
4520 	struct l2cap_move_chan_cfm cfm;
4521 
4522 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4523 
4524 	cfm.icid = cpu_to_le16(icid);
4525 	cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4526 
4527 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4528 		       sizeof(cfm), &cfm);
4529 }
4530 
4531 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4532 					 u16 icid)
4533 {
4534 	struct l2cap_move_chan_cfm_rsp rsp;
4535 
4536 	BT_DBG("icid 0x%4.4x", icid);
4537 
4538 	rsp.icid = cpu_to_le16(icid);
4539 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4540 }
4541 
4542 static void __release_logical_link(struct l2cap_chan *chan)
4543 {
4544 	chan->hs_hchan = NULL;
4545 	chan->hs_hcon = NULL;
4546 
4547 	/* Placeholder - release the logical link */
4548 }
4549 
4550 static void l2cap_logical_fail(struct l2cap_chan *chan)
4551 {
4552 	/* Logical link setup failed */
4553 	if (chan->state != BT_CONNECTED) {
4554 		/* Create channel failure, disconnect */
4555 		l2cap_send_disconn_req(chan, ECONNRESET);
4556 		return;
4557 	}
4558 
4559 	switch (chan->move_role) {
4560 	case L2CAP_MOVE_ROLE_RESPONDER:
4561 		l2cap_move_done(chan);
4562 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4563 		break;
4564 	case L2CAP_MOVE_ROLE_INITIATOR:
4565 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4566 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4567 			/* Remote has only sent pending or
4568 			 * success responses, clean up
4569 			 */
4570 			l2cap_move_done(chan);
4571 		}
4572 
4573 		/* Other amp move states imply that the move
4574 		 * has already aborted
4575 		 */
4576 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4577 		break;
4578 	}
4579 }
4580 
4581 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4582 					struct hci_chan *hchan)
4583 {
4584 	struct l2cap_conf_rsp rsp;
4585 
4586 	chan->hs_hchan = hchan;
4587 	chan->hs_hcon->l2cap_data = chan->conn;
4588 
4589 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4590 
4591 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4592 		int err;
4593 
4594 		set_default_fcs(chan);
4595 
4596 		err = l2cap_ertm_init(chan);
4597 		if (err < 0)
4598 			l2cap_send_disconn_req(chan, -err);
4599 		else
4600 			l2cap_chan_ready(chan);
4601 	}
4602 }
4603 
4604 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4605 				      struct hci_chan *hchan)
4606 {
4607 	chan->hs_hcon = hchan->conn;
4608 	chan->hs_hcon->l2cap_data = chan->conn;
4609 
4610 	BT_DBG("move_state %d", chan->move_state);
4611 
4612 	switch (chan->move_state) {
4613 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4614 		/* Move confirm will be sent after a success
4615 		 * response is received
4616 		 */
4617 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4618 		break;
4619 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4620 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4621 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4622 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4623 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4624 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4625 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4626 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4627 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4628 		}
4629 		break;
4630 	default:
4631 		/* Move was not in expected state, free the channel */
4632 		__release_logical_link(chan);
4633 
4634 		chan->move_state = L2CAP_MOVE_STABLE;
4635 	}
4636 }
4637 
4638 /* Call with chan locked */
4639 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4640 		       u8 status)
4641 {
4642 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4643 
4644 	if (status) {
4645 		l2cap_logical_fail(chan);
4646 		__release_logical_link(chan);
4647 		return;
4648 	}
4649 
4650 	if (chan->state != BT_CONNECTED) {
4651 		/* Ignore logical link if channel is on BR/EDR */
4652 		if (chan->local_amp_id != AMP_ID_BREDR)
4653 			l2cap_logical_finish_create(chan, hchan);
4654 	} else {
4655 		l2cap_logical_finish_move(chan, hchan);
4656 	}
4657 }
4658 
4659 void l2cap_move_start(struct l2cap_chan *chan)
4660 {
4661 	BT_DBG("chan %p", chan);
4662 
4663 	if (chan->local_amp_id == AMP_ID_BREDR) {
4664 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4665 			return;
4666 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4667 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4668 		/* Placeholder - start physical link setup */
4669 	} else {
4670 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4671 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4672 		chan->move_id = 0;
4673 		l2cap_move_setup(chan);
4674 		l2cap_send_move_chan_req(chan, 0);
4675 	}
4676 }
4677 
4678 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4679 			    u8 local_amp_id, u8 remote_amp_id)
4680 {
4681 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4682 	       local_amp_id, remote_amp_id);
4683 
4684 	chan->fcs = L2CAP_FCS_NONE;
4685 
4686 	/* Outgoing channel on AMP */
4687 	if (chan->state == BT_CONNECT) {
4688 		if (result == L2CAP_CR_SUCCESS) {
4689 			chan->local_amp_id = local_amp_id;
4690 			l2cap_send_create_chan_req(chan, remote_amp_id);
4691 		} else {
4692 			/* Revert to BR/EDR connect */
4693 			l2cap_send_conn_req(chan);
4694 		}
4695 
4696 		return;
4697 	}
4698 
4699 	/* Incoming channel on AMP */
4700 	if (__l2cap_no_conn_pending(chan)) {
4701 		struct l2cap_conn_rsp rsp;
4702 		char buf[128];
4703 		rsp.scid = cpu_to_le16(chan->dcid);
4704 		rsp.dcid = cpu_to_le16(chan->scid);
4705 
4706 		if (result == L2CAP_CR_SUCCESS) {
4707 			/* Send successful response */
4708 			rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4709 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4710 		} else {
4711 			/* Send negative response */
4712 			rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4713 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4714 		}
4715 
4716 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4717 			       sizeof(rsp), &rsp);
4718 
4719 		if (result == L2CAP_CR_SUCCESS) {
4720 			l2cap_state_change(chan, BT_CONFIG);
4721 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4722 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4723 				       L2CAP_CONF_REQ,
4724 				       l2cap_build_conf_req(chan, buf), buf);
4725 			chan->num_conf_req++;
4726 		}
4727 	}
4728 }
4729 
4730 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4731 				   u8 remote_amp_id)
4732 {
4733 	l2cap_move_setup(chan);
4734 	chan->move_id = local_amp_id;
4735 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4736 
4737 	l2cap_send_move_chan_req(chan, remote_amp_id);
4738 }
4739 
4740 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4741 {
4742 	struct hci_chan *hchan = NULL;
4743 
4744 	/* Placeholder - get hci_chan for logical link */
4745 
4746 	if (hchan) {
4747 		if (hchan->state == BT_CONNECTED) {
4748 			/* Logical link is ready to go */
4749 			chan->hs_hcon = hchan->conn;
4750 			chan->hs_hcon->l2cap_data = chan->conn;
4751 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4752 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4753 
4754 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4755 		} else {
4756 			/* Wait for logical link to be ready */
4757 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4758 		}
4759 	} else {
4760 		/* Logical link not available */
4761 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4762 	}
4763 }
4764 
4765 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4766 {
4767 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4768 		u8 rsp_result;
4769 		if (result == -EINVAL)
4770 			rsp_result = L2CAP_MR_BAD_ID;
4771 		else
4772 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4773 
4774 		l2cap_send_move_chan_rsp(chan, rsp_result);
4775 	}
4776 
4777 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4778 	chan->move_state = L2CAP_MOVE_STABLE;
4779 
4780 	/* Restart data transmission */
4781 	l2cap_ertm_send(chan);
4782 }
4783 
4784 /* Invoke with locked chan */
4785 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4786 {
4787 	u8 local_amp_id = chan->local_amp_id;
4788 	u8 remote_amp_id = chan->remote_amp_id;
4789 
4790 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4791 	       chan, result, local_amp_id, remote_amp_id);
4792 
4793 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4794 		l2cap_chan_unlock(chan);
4795 		return;
4796 	}
4797 
4798 	if (chan->state != BT_CONNECTED) {
4799 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4800 	} else if (result != L2CAP_MR_SUCCESS) {
4801 		l2cap_do_move_cancel(chan, result);
4802 	} else {
4803 		switch (chan->move_role) {
4804 		case L2CAP_MOVE_ROLE_INITIATOR:
4805 			l2cap_do_move_initiate(chan, local_amp_id,
4806 					       remote_amp_id);
4807 			break;
4808 		case L2CAP_MOVE_ROLE_RESPONDER:
4809 			l2cap_do_move_respond(chan, result);
4810 			break;
4811 		default:
4812 			l2cap_do_move_cancel(chan, result);
4813 			break;
4814 		}
4815 	}
4816 }
4817 
4818 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4819 					 struct l2cap_cmd_hdr *cmd,
4820 					 u16 cmd_len, void *data)
4821 {
4822 	struct l2cap_move_chan_req *req = data;
4823 	struct l2cap_move_chan_rsp rsp;
4824 	struct l2cap_chan *chan;
4825 	u16 icid = 0;
4826 	u16 result = L2CAP_MR_NOT_ALLOWED;
4827 
4828 	if (cmd_len != sizeof(*req))
4829 		return -EPROTO;
4830 
4831 	icid = le16_to_cpu(req->icid);
4832 
4833 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4834 
4835 	if (!conn->hs_enabled)
4836 		return -EINVAL;
4837 
4838 	chan = l2cap_get_chan_by_dcid(conn, icid);
4839 	if (!chan) {
4840 		rsp.icid = cpu_to_le16(icid);
4841 		rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4842 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4843 			       sizeof(rsp), &rsp);
4844 		return 0;
4845 	}
4846 
4847 	chan->ident = cmd->ident;
4848 
4849 	if (chan->scid < L2CAP_CID_DYN_START ||
4850 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4851 	    (chan->mode != L2CAP_MODE_ERTM &&
4852 	     chan->mode != L2CAP_MODE_STREAMING)) {
4853 		result = L2CAP_MR_NOT_ALLOWED;
4854 		goto send_move_response;
4855 	}
4856 
4857 	if (chan->local_amp_id == req->dest_amp_id) {
4858 		result = L2CAP_MR_SAME_ID;
4859 		goto send_move_response;
4860 	}
4861 
4862 	if (req->dest_amp_id != AMP_ID_BREDR) {
4863 		struct hci_dev *hdev;
4864 		hdev = hci_dev_get(req->dest_amp_id);
4865 		if (!hdev || hdev->dev_type != HCI_AMP ||
4866 		    !test_bit(HCI_UP, &hdev->flags)) {
4867 			if (hdev)
4868 				hci_dev_put(hdev);
4869 
4870 			result = L2CAP_MR_BAD_ID;
4871 			goto send_move_response;
4872 		}
4873 		hci_dev_put(hdev);
4874 	}
4875 
4876 	/* Detect a move collision.  Only send a collision response
4877 	 * if this side has "lost", otherwise proceed with the move.
4878 	 * The winner has the larger bd_addr.
4879 	 */
4880 	if ((__chan_is_moving(chan) ||
4881 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4882 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4883 		result = L2CAP_MR_COLLISION;
4884 		goto send_move_response;
4885 	}
4886 
4887 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4888 	l2cap_move_setup(chan);
4889 	chan->move_id = req->dest_amp_id;
4890 	icid = chan->dcid;
4891 
4892 	if (req->dest_amp_id == AMP_ID_BREDR) {
4893 		/* Moving to BR/EDR */
4894 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4895 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4896 			result = L2CAP_MR_PEND;
4897 		} else {
4898 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4899 			result = L2CAP_MR_SUCCESS;
4900 		}
4901 	} else {
4902 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4903 		/* Placeholder - uncomment when amp functions are available */
4904 		/*amp_accept_physical(chan, req->dest_amp_id);*/
4905 		result = L2CAP_MR_PEND;
4906 	}
4907 
4908 send_move_response:
4909 	l2cap_send_move_chan_rsp(chan, result);
4910 
4911 	l2cap_chan_unlock(chan);
4912 
4913 	return 0;
4914 }
4915 
4916 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4917 {
4918 	struct l2cap_chan *chan;
4919 	struct hci_chan *hchan = NULL;
4920 
4921 	chan = l2cap_get_chan_by_scid(conn, icid);
4922 	if (!chan) {
4923 		l2cap_send_move_chan_cfm_icid(conn, icid);
4924 		return;
4925 	}
4926 
4927 	__clear_chan_timer(chan);
4928 	if (result == L2CAP_MR_PEND)
4929 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4930 
4931 	switch (chan->move_state) {
4932 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4933 		/* Move confirm will be sent when logical link
4934 		 * is complete.
4935 		 */
4936 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4937 		break;
4938 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4939 		if (result == L2CAP_MR_PEND) {
4940 			break;
4941 		} else if (test_bit(CONN_LOCAL_BUSY,
4942 				    &chan->conn_state)) {
4943 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4944 		} else {
4945 			/* Logical link is up or moving to BR/EDR,
4946 			 * proceed with move
4947 			 */
4948 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4949 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4950 		}
4951 		break;
4952 	case L2CAP_MOVE_WAIT_RSP:
4953 		/* Moving to AMP */
4954 		if (result == L2CAP_MR_SUCCESS) {
4955 			/* Remote is ready, send confirm immediately
4956 			 * after logical link is ready
4957 			 */
4958 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4959 		} else {
4960 			/* Both logical link and move success
4961 			 * are required to confirm
4962 			 */
4963 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4964 		}
4965 
4966 		/* Placeholder - get hci_chan for logical link */
4967 		if (!hchan) {
4968 			/* Logical link not available */
4969 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4970 			break;
4971 		}
4972 
4973 		/* If the logical link is not yet connected, do not
4974 		 * send confirmation.
4975 		 */
4976 		if (hchan->state != BT_CONNECTED)
4977 			break;
4978 
4979 		/* Logical link is already ready to go */
4980 
4981 		chan->hs_hcon = hchan->conn;
4982 		chan->hs_hcon->l2cap_data = chan->conn;
4983 
4984 		if (result == L2CAP_MR_SUCCESS) {
4985 			/* Can confirm now */
4986 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4987 		} else {
4988 			/* Now only need move success
4989 			 * to confirm
4990 			 */
4991 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4992 		}
4993 
4994 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4995 		break;
4996 	default:
4997 		/* Any other amp move state means the move failed. */
4998 		chan->move_id = chan->local_amp_id;
4999 		l2cap_move_done(chan);
5000 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5001 	}
5002 
5003 	l2cap_chan_unlock(chan);
5004 }
5005 
5006 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5007 			    u16 result)
5008 {
5009 	struct l2cap_chan *chan;
5010 
5011 	chan = l2cap_get_chan_by_ident(conn, ident);
5012 	if (!chan) {
5013 		/* Could not locate channel, icid is best guess */
5014 		l2cap_send_move_chan_cfm_icid(conn, icid);
5015 		return;
5016 	}
5017 
5018 	__clear_chan_timer(chan);
5019 
5020 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5021 		if (result == L2CAP_MR_COLLISION) {
5022 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5023 		} else {
5024 			/* Cleanup - cancel move */
5025 			chan->move_id = chan->local_amp_id;
5026 			l2cap_move_done(chan);
5027 		}
5028 	}
5029 
5030 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5031 
5032 	l2cap_chan_unlock(chan);
5033 }
5034 
5035 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5036 				  struct l2cap_cmd_hdr *cmd,
5037 				  u16 cmd_len, void *data)
5038 {
5039 	struct l2cap_move_chan_rsp *rsp = data;
5040 	u16 icid, result;
5041 
5042 	if (cmd_len != sizeof(*rsp))
5043 		return -EPROTO;
5044 
5045 	icid = le16_to_cpu(rsp->icid);
5046 	result = le16_to_cpu(rsp->result);
5047 
5048 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5049 
5050 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5051 		l2cap_move_continue(conn, icid, result);
5052 	else
5053 		l2cap_move_fail(conn, cmd->ident, icid, result);
5054 
5055 	return 0;
5056 }
5057 
5058 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5059 				      struct l2cap_cmd_hdr *cmd,
5060 				      u16 cmd_len, void *data)
5061 {
5062 	struct l2cap_move_chan_cfm *cfm = data;
5063 	struct l2cap_chan *chan;
5064 	u16 icid, result;
5065 
5066 	if (cmd_len != sizeof(*cfm))
5067 		return -EPROTO;
5068 
5069 	icid = le16_to_cpu(cfm->icid);
5070 	result = le16_to_cpu(cfm->result);
5071 
5072 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5073 
5074 	chan = l2cap_get_chan_by_dcid(conn, icid);
5075 	if (!chan) {
5076 		/* Spec requires a response even if the icid was not found */
5077 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5078 		return 0;
5079 	}
5080 
5081 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5082 		if (result == L2CAP_MC_CONFIRMED) {
5083 			chan->local_amp_id = chan->move_id;
5084 			if (chan->local_amp_id == AMP_ID_BREDR)
5085 				__release_logical_link(chan);
5086 		} else {
5087 			chan->move_id = chan->local_amp_id;
5088 		}
5089 
5090 		l2cap_move_done(chan);
5091 	}
5092 
5093 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5094 
5095 	l2cap_chan_unlock(chan);
5096 
5097 	return 0;
5098 }
5099 
5100 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5101 						 struct l2cap_cmd_hdr *cmd,
5102 						 u16 cmd_len, void *data)
5103 {
5104 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5105 	struct l2cap_chan *chan;
5106 	u16 icid;
5107 
5108 	if (cmd_len != sizeof(*rsp))
5109 		return -EPROTO;
5110 
5111 	icid = le16_to_cpu(rsp->icid);
5112 
5113 	BT_DBG("icid 0x%4.4x", icid);
5114 
5115 	chan = l2cap_get_chan_by_scid(conn, icid);
5116 	if (!chan)
5117 		return 0;
5118 
5119 	__clear_chan_timer(chan);
5120 
5121 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5122 		chan->local_amp_id = chan->move_id;
5123 
5124 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5125 			__release_logical_link(chan);
5126 
5127 		l2cap_move_done(chan);
5128 	}
5129 
5130 	l2cap_chan_unlock(chan);
5131 
5132 	return 0;
5133 }
5134 
5135 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5136 					 u16 to_multiplier)
5137 {
5138 	u16 max_latency;
5139 
5140 	if (min > max || min < 6 || max > 3200)
5141 		return -EINVAL;
5142 
5143 	if (to_multiplier < 10 || to_multiplier > 3200)
5144 		return -EINVAL;
5145 
5146 	if (max >= to_multiplier * 8)
5147 		return -EINVAL;
5148 
5149 	max_latency = (to_multiplier * 8 / max) - 1;
5150 	if (latency > 499 || latency > max_latency)
5151 		return -EINVAL;
5152 
5153 	return 0;
5154 }
5155 
5156 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5157 					      struct l2cap_cmd_hdr *cmd,
5158 					      u8 *data)
5159 {
5160 	struct hci_conn *hcon = conn->hcon;
5161 	struct l2cap_conn_param_update_req *req;
5162 	struct l2cap_conn_param_update_rsp rsp;
5163 	u16 min, max, latency, to_multiplier, cmd_len;
5164 	int err;
5165 
5166 	if (!(hcon->link_mode & HCI_LM_MASTER))
5167 		return -EINVAL;
5168 
5169 	cmd_len = __le16_to_cpu(cmd->len);
5170 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5171 		return -EPROTO;
5172 
5173 	req = (struct l2cap_conn_param_update_req *) data;
5174 	min		= __le16_to_cpu(req->min);
5175 	max		= __le16_to_cpu(req->max);
5176 	latency		= __le16_to_cpu(req->latency);
5177 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5178 
5179 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5180 	       min, max, latency, to_multiplier);
5181 
5182 	memset(&rsp, 0, sizeof(rsp));
5183 
5184 	err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5185 	if (err)
5186 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5187 	else
5188 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5189 
5190 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5191 		       sizeof(rsp), &rsp);
5192 
5193 	if (!err)
5194 		hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5195 
5196 	return 0;
5197 }
5198 
5199 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5200 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5201 				      u8 *data)
5202 {
5203 	int err = 0;
5204 
5205 	switch (cmd->code) {
5206 	case L2CAP_COMMAND_REJ:
5207 		l2cap_command_rej(conn, cmd, cmd_len, data);
5208 		break;
5209 
5210 	case L2CAP_CONN_REQ:
5211 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5212 		break;
5213 
5214 	case L2CAP_CONN_RSP:
5215 	case L2CAP_CREATE_CHAN_RSP:
5216 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5217 		break;
5218 
5219 	case L2CAP_CONF_REQ:
5220 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5221 		break;
5222 
5223 	case L2CAP_CONF_RSP:
5224 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5225 		break;
5226 
5227 	case L2CAP_DISCONN_REQ:
5228 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5229 		break;
5230 
5231 	case L2CAP_DISCONN_RSP:
5232 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5233 		break;
5234 
5235 	case L2CAP_ECHO_REQ:
5236 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5237 		break;
5238 
5239 	case L2CAP_ECHO_RSP:
5240 		break;
5241 
5242 	case L2CAP_INFO_REQ:
5243 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5244 		break;
5245 
5246 	case L2CAP_INFO_RSP:
5247 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5248 		break;
5249 
5250 	case L2CAP_CREATE_CHAN_REQ:
5251 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5252 		break;
5253 
5254 	case L2CAP_MOVE_CHAN_REQ:
5255 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5256 		break;
5257 
5258 	case L2CAP_MOVE_CHAN_RSP:
5259 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5260 		break;
5261 
5262 	case L2CAP_MOVE_CHAN_CFM:
5263 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5264 		break;
5265 
5266 	case L2CAP_MOVE_CHAN_CFM_RSP:
5267 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5268 		break;
5269 
5270 	default:
5271 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5272 		err = -EINVAL;
5273 		break;
5274 	}
5275 
5276 	return err;
5277 }
5278 
5279 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5280 				   struct l2cap_cmd_hdr *cmd, u8 *data)
5281 {
5282 	switch (cmd->code) {
5283 	case L2CAP_COMMAND_REJ:
5284 		return 0;
5285 
5286 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5287 		return l2cap_conn_param_update_req(conn, cmd, data);
5288 
5289 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5290 		return 0;
5291 
5292 	default:
5293 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5294 		return -EINVAL;
5295 	}
5296 }
5297 
5298 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5299 					struct sk_buff *skb)
5300 {
5301 	struct hci_conn *hcon = conn->hcon;
5302 	struct l2cap_cmd_hdr *cmd;
5303 	u16 len;
5304 	int err;
5305 
5306 	if (hcon->type != LE_LINK)
5307 		goto drop;
5308 
5309 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5310 		goto drop;
5311 
5312 	cmd = (void *) skb->data;
5313 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5314 
5315 	len = le16_to_cpu(cmd->len);
5316 
5317 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5318 
5319 	if (len != skb->len || !cmd->ident) {
5320 		BT_DBG("corrupted command");
5321 		goto drop;
5322 	}
5323 
5324 	err = l2cap_le_sig_cmd(conn, cmd, skb->data);
5325 	if (err) {
5326 		struct l2cap_cmd_rej_unk rej;
5327 
5328 		BT_ERR("Wrong link type (%d)", err);
5329 
5330 		rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5331 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5332 			       sizeof(rej), &rej);
5333 	}
5334 
5335 drop:
5336 	kfree_skb(skb);
5337 }
5338 
5339 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5340 				     struct sk_buff *skb)
5341 {
5342 	struct hci_conn *hcon = conn->hcon;
5343 	u8 *data = skb->data;
5344 	int len = skb->len;
5345 	struct l2cap_cmd_hdr cmd;
5346 	int err;
5347 
5348 	l2cap_raw_recv(conn, skb);
5349 
5350 	if (hcon->type != ACL_LINK)
5351 		goto drop;
5352 
5353 	while (len >= L2CAP_CMD_HDR_SIZE) {
5354 		u16 cmd_len;
5355 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5356 		data += L2CAP_CMD_HDR_SIZE;
5357 		len  -= L2CAP_CMD_HDR_SIZE;
5358 
5359 		cmd_len = le16_to_cpu(cmd.len);
5360 
5361 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5362 		       cmd.ident);
5363 
5364 		if (cmd_len > len || !cmd.ident) {
5365 			BT_DBG("corrupted command");
5366 			break;
5367 		}
5368 
5369 		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5370 		if (err) {
5371 			struct l2cap_cmd_rej_unk rej;
5372 
5373 			BT_ERR("Wrong link type (%d)", err);
5374 
5375 			rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5376 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5377 				       sizeof(rej), &rej);
5378 		}
5379 
5380 		data += cmd_len;
5381 		len  -= cmd_len;
5382 	}
5383 
5384 drop:
5385 	kfree_skb(skb);
5386 }
5387 
5388 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5389 {
5390 	u16 our_fcs, rcv_fcs;
5391 	int hdr_size;
5392 
5393 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5394 		hdr_size = L2CAP_EXT_HDR_SIZE;
5395 	else
5396 		hdr_size = L2CAP_ENH_HDR_SIZE;
5397 
5398 	if (chan->fcs == L2CAP_FCS_CRC16) {
5399 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5400 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5401 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5402 
5403 		if (our_fcs != rcv_fcs)
5404 			return -EBADMSG;
5405 	}
5406 	return 0;
5407 }
5408 
5409 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5410 {
5411 	struct l2cap_ctrl control;
5412 
5413 	BT_DBG("chan %p", chan);
5414 
5415 	memset(&control, 0, sizeof(control));
5416 	control.sframe = 1;
5417 	control.final = 1;
5418 	control.reqseq = chan->buffer_seq;
5419 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5420 
5421 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5422 		control.super = L2CAP_SUPER_RNR;
5423 		l2cap_send_sframe(chan, &control);
5424 	}
5425 
5426 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5427 	    chan->unacked_frames > 0)
5428 		__set_retrans_timer(chan);
5429 
5430 	/* Send pending iframes */
5431 	l2cap_ertm_send(chan);
5432 
5433 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5434 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5435 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5436 		 * send it now.
5437 		 */
5438 		control.super = L2CAP_SUPER_RR;
5439 		l2cap_send_sframe(chan, &control);
5440 	}
5441 }
5442 
5443 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5444 			    struct sk_buff **last_frag)
5445 {
5446 	/* skb->len reflects data in skb as well as all fragments
5447 	 * skb->data_len reflects only data in fragments
5448 	 */
5449 	if (!skb_has_frag_list(skb))
5450 		skb_shinfo(skb)->frag_list = new_frag;
5451 
5452 	new_frag->next = NULL;
5453 
5454 	(*last_frag)->next = new_frag;
5455 	*last_frag = new_frag;
5456 
5457 	skb->len += new_frag->len;
5458 	skb->data_len += new_frag->len;
5459 	skb->truesize += new_frag->truesize;
5460 }
5461 
5462 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5463 				struct l2cap_ctrl *control)
5464 {
5465 	int err = -EINVAL;
5466 
5467 	switch (control->sar) {
5468 	case L2CAP_SAR_UNSEGMENTED:
5469 		if (chan->sdu)
5470 			break;
5471 
5472 		err = chan->ops->recv(chan, skb);
5473 		break;
5474 
5475 	case L2CAP_SAR_START:
5476 		if (chan->sdu)
5477 			break;
5478 
5479 		chan->sdu_len = get_unaligned_le16(skb->data);
5480 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5481 
5482 		if (chan->sdu_len > chan->imtu) {
5483 			err = -EMSGSIZE;
5484 			break;
5485 		}
5486 
5487 		if (skb->len >= chan->sdu_len)
5488 			break;
5489 
5490 		chan->sdu = skb;
5491 		chan->sdu_last_frag = skb;
5492 
5493 		skb = NULL;
5494 		err = 0;
5495 		break;
5496 
5497 	case L2CAP_SAR_CONTINUE:
5498 		if (!chan->sdu)
5499 			break;
5500 
5501 		append_skb_frag(chan->sdu, skb,
5502 				&chan->sdu_last_frag);
5503 		skb = NULL;
5504 
5505 		if (chan->sdu->len >= chan->sdu_len)
5506 			break;
5507 
5508 		err = 0;
5509 		break;
5510 
5511 	case L2CAP_SAR_END:
5512 		if (!chan->sdu)
5513 			break;
5514 
5515 		append_skb_frag(chan->sdu, skb,
5516 				&chan->sdu_last_frag);
5517 		skb = NULL;
5518 
5519 		if (chan->sdu->len != chan->sdu_len)
5520 			break;
5521 
5522 		err = chan->ops->recv(chan, chan->sdu);
5523 
5524 		if (!err) {
5525 			/* Reassembly complete */
5526 			chan->sdu = NULL;
5527 			chan->sdu_last_frag = NULL;
5528 			chan->sdu_len = 0;
5529 		}
5530 		break;
5531 	}
5532 
5533 	if (err) {
5534 		kfree_skb(skb);
5535 		kfree_skb(chan->sdu);
5536 		chan->sdu = NULL;
5537 		chan->sdu_last_frag = NULL;
5538 		chan->sdu_len = 0;
5539 	}
5540 
5541 	return err;
5542 }
5543 
5544 static int l2cap_resegment(struct l2cap_chan *chan)
5545 {
5546 	/* Placeholder */
5547 	return 0;
5548 }
5549 
5550 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5551 {
5552 	u8 event;
5553 
5554 	if (chan->mode != L2CAP_MODE_ERTM)
5555 		return;
5556 
5557 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5558 	l2cap_tx(chan, NULL, NULL, event);
5559 }
5560 
5561 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5562 {
5563 	int err = 0;
5564 	/* Pass sequential frames to l2cap_reassemble_sdu()
5565 	 * until a gap is encountered.
5566 	 */
5567 
5568 	BT_DBG("chan %p", chan);
5569 
5570 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5571 		struct sk_buff *skb;
5572 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5573 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5574 
5575 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5576 
5577 		if (!skb)
5578 			break;
5579 
5580 		skb_unlink(skb, &chan->srej_q);
5581 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5582 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5583 		if (err)
5584 			break;
5585 	}
5586 
5587 	if (skb_queue_empty(&chan->srej_q)) {
5588 		chan->rx_state = L2CAP_RX_STATE_RECV;
5589 		l2cap_send_ack(chan);
5590 	}
5591 
5592 	return err;
5593 }
5594 
5595 static void l2cap_handle_srej(struct l2cap_chan *chan,
5596 			      struct l2cap_ctrl *control)
5597 {
5598 	struct sk_buff *skb;
5599 
5600 	BT_DBG("chan %p, control %p", chan, control);
5601 
5602 	if (control->reqseq == chan->next_tx_seq) {
5603 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5604 		l2cap_send_disconn_req(chan, ECONNRESET);
5605 		return;
5606 	}
5607 
5608 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5609 
5610 	if (skb == NULL) {
5611 		BT_DBG("Seq %d not available for retransmission",
5612 		       control->reqseq);
5613 		return;
5614 	}
5615 
5616 	if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5617 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5618 		l2cap_send_disconn_req(chan, ECONNRESET);
5619 		return;
5620 	}
5621 
5622 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5623 
5624 	if (control->poll) {
5625 		l2cap_pass_to_tx(chan, control);
5626 
5627 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5628 		l2cap_retransmit(chan, control);
5629 		l2cap_ertm_send(chan);
5630 
5631 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5632 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5633 			chan->srej_save_reqseq = control->reqseq;
5634 		}
5635 	} else {
5636 		l2cap_pass_to_tx_fbit(chan, control);
5637 
5638 		if (control->final) {
5639 			if (chan->srej_save_reqseq != control->reqseq ||
5640 			    !test_and_clear_bit(CONN_SREJ_ACT,
5641 						&chan->conn_state))
5642 				l2cap_retransmit(chan, control);
5643 		} else {
5644 			l2cap_retransmit(chan, control);
5645 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5646 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5647 				chan->srej_save_reqseq = control->reqseq;
5648 			}
5649 		}
5650 	}
5651 }
5652 
5653 static void l2cap_handle_rej(struct l2cap_chan *chan,
5654 			     struct l2cap_ctrl *control)
5655 {
5656 	struct sk_buff *skb;
5657 
5658 	BT_DBG("chan %p, control %p", chan, control);
5659 
5660 	if (control->reqseq == chan->next_tx_seq) {
5661 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5662 		l2cap_send_disconn_req(chan, ECONNRESET);
5663 		return;
5664 	}
5665 
5666 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5667 
5668 	if (chan->max_tx && skb &&
5669 	    bt_cb(skb)->control.retries >= chan->max_tx) {
5670 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5671 		l2cap_send_disconn_req(chan, ECONNRESET);
5672 		return;
5673 	}
5674 
5675 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5676 
5677 	l2cap_pass_to_tx(chan, control);
5678 
5679 	if (control->final) {
5680 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5681 			l2cap_retransmit_all(chan, control);
5682 	} else {
5683 		l2cap_retransmit_all(chan, control);
5684 		l2cap_ertm_send(chan);
5685 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5686 			set_bit(CONN_REJ_ACT, &chan->conn_state);
5687 	}
5688 }
5689 
5690 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5691 {
5692 	BT_DBG("chan %p, txseq %d", chan, txseq);
5693 
5694 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5695 	       chan->expected_tx_seq);
5696 
5697 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5698 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5699 		    chan->tx_win) {
5700 			/* See notes below regarding "double poll" and
5701 			 * invalid packets.
5702 			 */
5703 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5704 				BT_DBG("Invalid/Ignore - after SREJ");
5705 				return L2CAP_TXSEQ_INVALID_IGNORE;
5706 			} else {
5707 				BT_DBG("Invalid - in window after SREJ sent");
5708 				return L2CAP_TXSEQ_INVALID;
5709 			}
5710 		}
5711 
5712 		if (chan->srej_list.head == txseq) {
5713 			BT_DBG("Expected SREJ");
5714 			return L2CAP_TXSEQ_EXPECTED_SREJ;
5715 		}
5716 
5717 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5718 			BT_DBG("Duplicate SREJ - txseq already stored");
5719 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
5720 		}
5721 
5722 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5723 			BT_DBG("Unexpected SREJ - not requested");
5724 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5725 		}
5726 	}
5727 
5728 	if (chan->expected_tx_seq == txseq) {
5729 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5730 		    chan->tx_win) {
5731 			BT_DBG("Invalid - txseq outside tx window");
5732 			return L2CAP_TXSEQ_INVALID;
5733 		} else {
5734 			BT_DBG("Expected");
5735 			return L2CAP_TXSEQ_EXPECTED;
5736 		}
5737 	}
5738 
5739 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5740 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5741 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
5742 		return L2CAP_TXSEQ_DUPLICATE;
5743 	}
5744 
5745 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5746 		/* A source of invalid packets is a "double poll" condition,
5747 		 * where delays cause us to send multiple poll packets.  If
5748 		 * the remote stack receives and processes both polls,
5749 		 * sequence numbers can wrap around in such a way that a
5750 		 * resent frame has a sequence number that looks like new data
5751 		 * with a sequence gap.  This would trigger an erroneous SREJ
5752 		 * request.
5753 		 *
5754 		 * Fortunately, this is impossible with a tx window that's
5755 		 * less than half of the maximum sequence number, which allows
5756 		 * invalid frames to be safely ignored.
5757 		 *
5758 		 * With tx window sizes greater than half of the tx window
5759 		 * maximum, the frame is invalid and cannot be ignored.  This
5760 		 * causes a disconnect.
5761 		 */
5762 
5763 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5764 			BT_DBG("Invalid/Ignore - txseq outside tx window");
5765 			return L2CAP_TXSEQ_INVALID_IGNORE;
5766 		} else {
5767 			BT_DBG("Invalid - txseq outside tx window");
5768 			return L2CAP_TXSEQ_INVALID;
5769 		}
5770 	} else {
5771 		BT_DBG("Unexpected - txseq indicates missing frames");
5772 		return L2CAP_TXSEQ_UNEXPECTED;
5773 	}
5774 }
5775 
5776 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5777 			       struct l2cap_ctrl *control,
5778 			       struct sk_buff *skb, u8 event)
5779 {
5780 	int err = 0;
5781 	bool skb_in_use = false;
5782 
5783 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5784 	       event);
5785 
5786 	switch (event) {
5787 	case L2CAP_EV_RECV_IFRAME:
5788 		switch (l2cap_classify_txseq(chan, control->txseq)) {
5789 		case L2CAP_TXSEQ_EXPECTED:
5790 			l2cap_pass_to_tx(chan, control);
5791 
5792 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5793 				BT_DBG("Busy, discarding expected seq %d",
5794 				       control->txseq);
5795 				break;
5796 			}
5797 
5798 			chan->expected_tx_seq = __next_seq(chan,
5799 							   control->txseq);
5800 
5801 			chan->buffer_seq = chan->expected_tx_seq;
5802 			skb_in_use = true;
5803 
5804 			err = l2cap_reassemble_sdu(chan, skb, control);
5805 			if (err)
5806 				break;
5807 
5808 			if (control->final) {
5809 				if (!test_and_clear_bit(CONN_REJ_ACT,
5810 							&chan->conn_state)) {
5811 					control->final = 0;
5812 					l2cap_retransmit_all(chan, control);
5813 					l2cap_ertm_send(chan);
5814 				}
5815 			}
5816 
5817 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5818 				l2cap_send_ack(chan);
5819 			break;
5820 		case L2CAP_TXSEQ_UNEXPECTED:
5821 			l2cap_pass_to_tx(chan, control);
5822 
5823 			/* Can't issue SREJ frames in the local busy state.
5824 			 * Drop this frame, it will be seen as missing
5825 			 * when local busy is exited.
5826 			 */
5827 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5828 				BT_DBG("Busy, discarding unexpected seq %d",
5829 				       control->txseq);
5830 				break;
5831 			}
5832 
5833 			/* There was a gap in the sequence, so an SREJ
5834 			 * must be sent for each missing frame.  The
5835 			 * current frame is stored for later use.
5836 			 */
5837 			skb_queue_tail(&chan->srej_q, skb);
5838 			skb_in_use = true;
5839 			BT_DBG("Queued %p (queue len %d)", skb,
5840 			       skb_queue_len(&chan->srej_q));
5841 
5842 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5843 			l2cap_seq_list_clear(&chan->srej_list);
5844 			l2cap_send_srej(chan, control->txseq);
5845 
5846 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5847 			break;
5848 		case L2CAP_TXSEQ_DUPLICATE:
5849 			l2cap_pass_to_tx(chan, control);
5850 			break;
5851 		case L2CAP_TXSEQ_INVALID_IGNORE:
5852 			break;
5853 		case L2CAP_TXSEQ_INVALID:
5854 		default:
5855 			l2cap_send_disconn_req(chan, ECONNRESET);
5856 			break;
5857 		}
5858 		break;
5859 	case L2CAP_EV_RECV_RR:
5860 		l2cap_pass_to_tx(chan, control);
5861 		if (control->final) {
5862 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5863 
5864 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5865 			    !__chan_is_moving(chan)) {
5866 				control->final = 0;
5867 				l2cap_retransmit_all(chan, control);
5868 			}
5869 
5870 			l2cap_ertm_send(chan);
5871 		} else if (control->poll) {
5872 			l2cap_send_i_or_rr_or_rnr(chan);
5873 		} else {
5874 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
5875 					       &chan->conn_state) &&
5876 			    chan->unacked_frames)
5877 				__set_retrans_timer(chan);
5878 
5879 			l2cap_ertm_send(chan);
5880 		}
5881 		break;
5882 	case L2CAP_EV_RECV_RNR:
5883 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5884 		l2cap_pass_to_tx(chan, control);
5885 		if (control && control->poll) {
5886 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
5887 			l2cap_send_rr_or_rnr(chan, 0);
5888 		}
5889 		__clear_retrans_timer(chan);
5890 		l2cap_seq_list_clear(&chan->retrans_list);
5891 		break;
5892 	case L2CAP_EV_RECV_REJ:
5893 		l2cap_handle_rej(chan, control);
5894 		break;
5895 	case L2CAP_EV_RECV_SREJ:
5896 		l2cap_handle_srej(chan, control);
5897 		break;
5898 	default:
5899 		break;
5900 	}
5901 
5902 	if (skb && !skb_in_use) {
5903 		BT_DBG("Freeing %p", skb);
5904 		kfree_skb(skb);
5905 	}
5906 
5907 	return err;
5908 }
5909 
5910 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5911 				    struct l2cap_ctrl *control,
5912 				    struct sk_buff *skb, u8 event)
5913 {
5914 	int err = 0;
5915 	u16 txseq = control->txseq;
5916 	bool skb_in_use = false;
5917 
5918 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5919 	       event);
5920 
5921 	switch (event) {
5922 	case L2CAP_EV_RECV_IFRAME:
5923 		switch (l2cap_classify_txseq(chan, txseq)) {
5924 		case L2CAP_TXSEQ_EXPECTED:
5925 			/* Keep frame for reassembly later */
5926 			l2cap_pass_to_tx(chan, control);
5927 			skb_queue_tail(&chan->srej_q, skb);
5928 			skb_in_use = true;
5929 			BT_DBG("Queued %p (queue len %d)", skb,
5930 			       skb_queue_len(&chan->srej_q));
5931 
5932 			chan->expected_tx_seq = __next_seq(chan, txseq);
5933 			break;
5934 		case L2CAP_TXSEQ_EXPECTED_SREJ:
5935 			l2cap_seq_list_pop(&chan->srej_list);
5936 
5937 			l2cap_pass_to_tx(chan, control);
5938 			skb_queue_tail(&chan->srej_q, skb);
5939 			skb_in_use = true;
5940 			BT_DBG("Queued %p (queue len %d)", skb,
5941 			       skb_queue_len(&chan->srej_q));
5942 
5943 			err = l2cap_rx_queued_iframes(chan);
5944 			if (err)
5945 				break;
5946 
5947 			break;
5948 		case L2CAP_TXSEQ_UNEXPECTED:
5949 			/* Got a frame that can't be reassembled yet.
5950 			 * Save it for later, and send SREJs to cover
5951 			 * the missing frames.
5952 			 */
5953 			skb_queue_tail(&chan->srej_q, skb);
5954 			skb_in_use = true;
5955 			BT_DBG("Queued %p (queue len %d)", skb,
5956 			       skb_queue_len(&chan->srej_q));
5957 
5958 			l2cap_pass_to_tx(chan, control);
5959 			l2cap_send_srej(chan, control->txseq);
5960 			break;
5961 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5962 			/* This frame was requested with an SREJ, but
5963 			 * some expected retransmitted frames are
5964 			 * missing.  Request retransmission of missing
5965 			 * SREJ'd frames.
5966 			 */
5967 			skb_queue_tail(&chan->srej_q, skb);
5968 			skb_in_use = true;
5969 			BT_DBG("Queued %p (queue len %d)", skb,
5970 			       skb_queue_len(&chan->srej_q));
5971 
5972 			l2cap_pass_to_tx(chan, control);
5973 			l2cap_send_srej_list(chan, control->txseq);
5974 			break;
5975 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
5976 			/* We've already queued this frame.  Drop this copy. */
5977 			l2cap_pass_to_tx(chan, control);
5978 			break;
5979 		case L2CAP_TXSEQ_DUPLICATE:
5980 			/* Expecting a later sequence number, so this frame
5981 			 * was already received.  Ignore it completely.
5982 			 */
5983 			break;
5984 		case L2CAP_TXSEQ_INVALID_IGNORE:
5985 			break;
5986 		case L2CAP_TXSEQ_INVALID:
5987 		default:
5988 			l2cap_send_disconn_req(chan, ECONNRESET);
5989 			break;
5990 		}
5991 		break;
5992 	case L2CAP_EV_RECV_RR:
5993 		l2cap_pass_to_tx(chan, control);
5994 		if (control->final) {
5995 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5996 
5997 			if (!test_and_clear_bit(CONN_REJ_ACT,
5998 						&chan->conn_state)) {
5999 				control->final = 0;
6000 				l2cap_retransmit_all(chan, control);
6001 			}
6002 
6003 			l2cap_ertm_send(chan);
6004 		} else if (control->poll) {
6005 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6006 					       &chan->conn_state) &&
6007 			    chan->unacked_frames) {
6008 				__set_retrans_timer(chan);
6009 			}
6010 
6011 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6012 			l2cap_send_srej_tail(chan);
6013 		} else {
6014 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6015 					       &chan->conn_state) &&
6016 			    chan->unacked_frames)
6017 				__set_retrans_timer(chan);
6018 
6019 			l2cap_send_ack(chan);
6020 		}
6021 		break;
6022 	case L2CAP_EV_RECV_RNR:
6023 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6024 		l2cap_pass_to_tx(chan, control);
6025 		if (control->poll) {
6026 			l2cap_send_srej_tail(chan);
6027 		} else {
6028 			struct l2cap_ctrl rr_control;
6029 			memset(&rr_control, 0, sizeof(rr_control));
6030 			rr_control.sframe = 1;
6031 			rr_control.super = L2CAP_SUPER_RR;
6032 			rr_control.reqseq = chan->buffer_seq;
6033 			l2cap_send_sframe(chan, &rr_control);
6034 		}
6035 
6036 		break;
6037 	case L2CAP_EV_RECV_REJ:
6038 		l2cap_handle_rej(chan, control);
6039 		break;
6040 	case L2CAP_EV_RECV_SREJ:
6041 		l2cap_handle_srej(chan, control);
6042 		break;
6043 	}
6044 
6045 	if (skb && !skb_in_use) {
6046 		BT_DBG("Freeing %p", skb);
6047 		kfree_skb(skb);
6048 	}
6049 
6050 	return err;
6051 }
6052 
6053 static int l2cap_finish_move(struct l2cap_chan *chan)
6054 {
6055 	BT_DBG("chan %p", chan);
6056 
6057 	chan->rx_state = L2CAP_RX_STATE_RECV;
6058 
6059 	if (chan->hs_hcon)
6060 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6061 	else
6062 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6063 
6064 	return l2cap_resegment(chan);
6065 }
6066 
6067 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6068 				 struct l2cap_ctrl *control,
6069 				 struct sk_buff *skb, u8 event)
6070 {
6071 	int err;
6072 
6073 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6074 	       event);
6075 
6076 	if (!control->poll)
6077 		return -EPROTO;
6078 
6079 	l2cap_process_reqseq(chan, control->reqseq);
6080 
6081 	if (!skb_queue_empty(&chan->tx_q))
6082 		chan->tx_send_head = skb_peek(&chan->tx_q);
6083 	else
6084 		chan->tx_send_head = NULL;
6085 
6086 	/* Rewind next_tx_seq to the point expected
6087 	 * by the receiver.
6088 	 */
6089 	chan->next_tx_seq = control->reqseq;
6090 	chan->unacked_frames = 0;
6091 
6092 	err = l2cap_finish_move(chan);
6093 	if (err)
6094 		return err;
6095 
6096 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6097 	l2cap_send_i_or_rr_or_rnr(chan);
6098 
6099 	if (event == L2CAP_EV_RECV_IFRAME)
6100 		return -EPROTO;
6101 
6102 	return l2cap_rx_state_recv(chan, control, NULL, event);
6103 }
6104 
6105 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6106 				 struct l2cap_ctrl *control,
6107 				 struct sk_buff *skb, u8 event)
6108 {
6109 	int err;
6110 
6111 	if (!control->final)
6112 		return -EPROTO;
6113 
6114 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6115 
6116 	chan->rx_state = L2CAP_RX_STATE_RECV;
6117 	l2cap_process_reqseq(chan, control->reqseq);
6118 
6119 	if (!skb_queue_empty(&chan->tx_q))
6120 		chan->tx_send_head = skb_peek(&chan->tx_q);
6121 	else
6122 		chan->tx_send_head = NULL;
6123 
6124 	/* Rewind next_tx_seq to the point expected
6125 	 * by the receiver.
6126 	 */
6127 	chan->next_tx_seq = control->reqseq;
6128 	chan->unacked_frames = 0;
6129 
6130 	if (chan->hs_hcon)
6131 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6132 	else
6133 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6134 
6135 	err = l2cap_resegment(chan);
6136 
6137 	if (!err)
6138 		err = l2cap_rx_state_recv(chan, control, skb, event);
6139 
6140 	return err;
6141 }
6142 
6143 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6144 {
6145 	/* Make sure reqseq is for a packet that has been sent but not acked */
6146 	u16 unacked;
6147 
6148 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6149 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6150 }
6151 
6152 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6153 		    struct sk_buff *skb, u8 event)
6154 {
6155 	int err = 0;
6156 
6157 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6158 	       control, skb, event, chan->rx_state);
6159 
6160 	if (__valid_reqseq(chan, control->reqseq)) {
6161 		switch (chan->rx_state) {
6162 		case L2CAP_RX_STATE_RECV:
6163 			err = l2cap_rx_state_recv(chan, control, skb, event);
6164 			break;
6165 		case L2CAP_RX_STATE_SREJ_SENT:
6166 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6167 						       event);
6168 			break;
6169 		case L2CAP_RX_STATE_WAIT_P:
6170 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6171 			break;
6172 		case L2CAP_RX_STATE_WAIT_F:
6173 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6174 			break;
6175 		default:
6176 			/* shut it down */
6177 			break;
6178 		}
6179 	} else {
6180 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6181 		       control->reqseq, chan->next_tx_seq,
6182 		       chan->expected_ack_seq);
6183 		l2cap_send_disconn_req(chan, ECONNRESET);
6184 	}
6185 
6186 	return err;
6187 }
6188 
6189 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6190 			   struct sk_buff *skb)
6191 {
6192 	int err = 0;
6193 
6194 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6195 	       chan->rx_state);
6196 
6197 	if (l2cap_classify_txseq(chan, control->txseq) ==
6198 	    L2CAP_TXSEQ_EXPECTED) {
6199 		l2cap_pass_to_tx(chan, control);
6200 
6201 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6202 		       __next_seq(chan, chan->buffer_seq));
6203 
6204 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6205 
6206 		l2cap_reassemble_sdu(chan, skb, control);
6207 	} else {
6208 		if (chan->sdu) {
6209 			kfree_skb(chan->sdu);
6210 			chan->sdu = NULL;
6211 		}
6212 		chan->sdu_last_frag = NULL;
6213 		chan->sdu_len = 0;
6214 
6215 		if (skb) {
6216 			BT_DBG("Freeing %p", skb);
6217 			kfree_skb(skb);
6218 		}
6219 	}
6220 
6221 	chan->last_acked_seq = control->txseq;
6222 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6223 
6224 	return err;
6225 }
6226 
6227 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6228 {
6229 	struct l2cap_ctrl *control = &bt_cb(skb)->control;
6230 	u16 len;
6231 	u8 event;
6232 
6233 	__unpack_control(chan, skb);
6234 
6235 	len = skb->len;
6236 
6237 	/*
6238 	 * We can just drop the corrupted I-frame here.
6239 	 * Receiver will miss it and start proper recovery
6240 	 * procedures and ask for retransmission.
6241 	 */
6242 	if (l2cap_check_fcs(chan, skb))
6243 		goto drop;
6244 
6245 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6246 		len -= L2CAP_SDULEN_SIZE;
6247 
6248 	if (chan->fcs == L2CAP_FCS_CRC16)
6249 		len -= L2CAP_FCS_SIZE;
6250 
6251 	if (len > chan->mps) {
6252 		l2cap_send_disconn_req(chan, ECONNRESET);
6253 		goto drop;
6254 	}
6255 
6256 	if (!control->sframe) {
6257 		int err;
6258 
6259 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6260 		       control->sar, control->reqseq, control->final,
6261 		       control->txseq);
6262 
6263 		/* Validate F-bit - F=0 always valid, F=1 only
6264 		 * valid in TX WAIT_F
6265 		 */
6266 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6267 			goto drop;
6268 
6269 		if (chan->mode != L2CAP_MODE_STREAMING) {
6270 			event = L2CAP_EV_RECV_IFRAME;
6271 			err = l2cap_rx(chan, control, skb, event);
6272 		} else {
6273 			err = l2cap_stream_rx(chan, control, skb);
6274 		}
6275 
6276 		if (err)
6277 			l2cap_send_disconn_req(chan, ECONNRESET);
6278 	} else {
6279 		const u8 rx_func_to_event[4] = {
6280 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6281 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6282 		};
6283 
6284 		/* Only I-frames are expected in streaming mode */
6285 		if (chan->mode == L2CAP_MODE_STREAMING)
6286 			goto drop;
6287 
6288 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6289 		       control->reqseq, control->final, control->poll,
6290 		       control->super);
6291 
6292 		if (len != 0) {
6293 			BT_ERR("Trailing bytes: %d in sframe", len);
6294 			l2cap_send_disconn_req(chan, ECONNRESET);
6295 			goto drop;
6296 		}
6297 
6298 		/* Validate F and P bits */
6299 		if (control->final && (control->poll ||
6300 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6301 			goto drop;
6302 
6303 		event = rx_func_to_event[control->super];
6304 		if (l2cap_rx(chan, control, skb, event))
6305 			l2cap_send_disconn_req(chan, ECONNRESET);
6306 	}
6307 
6308 	return 0;
6309 
6310 drop:
6311 	kfree_skb(skb);
6312 	return 0;
6313 }
6314 
6315 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6316 			       struct sk_buff *skb)
6317 {
6318 	struct l2cap_chan *chan;
6319 
6320 	chan = l2cap_get_chan_by_scid(conn, cid);
6321 	if (!chan) {
6322 		if (cid == L2CAP_CID_A2MP) {
6323 			chan = a2mp_channel_create(conn, skb);
6324 			if (!chan) {
6325 				kfree_skb(skb);
6326 				return;
6327 			}
6328 
6329 			l2cap_chan_lock(chan);
6330 		} else {
6331 			BT_DBG("unknown cid 0x%4.4x", cid);
6332 			/* Drop packet and return */
6333 			kfree_skb(skb);
6334 			return;
6335 		}
6336 	}
6337 
6338 	BT_DBG("chan %p, len %d", chan, skb->len);
6339 
6340 	if (chan->state != BT_CONNECTED)
6341 		goto drop;
6342 
6343 	switch (chan->mode) {
6344 	case L2CAP_MODE_BASIC:
6345 		/* If socket recv buffers overflows we drop data here
6346 		 * which is *bad* because L2CAP has to be reliable.
6347 		 * But we don't have any other choice. L2CAP doesn't
6348 		 * provide flow control mechanism. */
6349 
6350 		if (chan->imtu < skb->len)
6351 			goto drop;
6352 
6353 		if (!chan->ops->recv(chan, skb))
6354 			goto done;
6355 		break;
6356 
6357 	case L2CAP_MODE_ERTM:
6358 	case L2CAP_MODE_STREAMING:
6359 		l2cap_data_rcv(chan, skb);
6360 		goto done;
6361 
6362 	default:
6363 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6364 		break;
6365 	}
6366 
6367 drop:
6368 	kfree_skb(skb);
6369 
6370 done:
6371 	l2cap_chan_unlock(chan);
6372 }
6373 
6374 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6375 				  struct sk_buff *skb)
6376 {
6377 	struct hci_conn *hcon = conn->hcon;
6378 	struct l2cap_chan *chan;
6379 
6380 	if (hcon->type != ACL_LINK)
6381 		goto drop;
6382 
6383 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst);
6384 	if (!chan)
6385 		goto drop;
6386 
6387 	BT_DBG("chan %p, len %d", chan, skb->len);
6388 
6389 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6390 		goto drop;
6391 
6392 	if (chan->imtu < skb->len)
6393 		goto drop;
6394 
6395 	/* Store remote BD_ADDR and PSM for msg_name */
6396 	bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6397 	bt_cb(skb)->psm = psm;
6398 
6399 	if (!chan->ops->recv(chan, skb))
6400 		return;
6401 
6402 drop:
6403 	kfree_skb(skb);
6404 }
6405 
6406 static void l2cap_att_channel(struct l2cap_conn *conn,
6407 			      struct sk_buff *skb)
6408 {
6409 	struct hci_conn *hcon = conn->hcon;
6410 	struct l2cap_chan *chan;
6411 
6412 	if (hcon->type != LE_LINK)
6413 		goto drop;
6414 
6415 	chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6416 					 &hcon->src, &hcon->dst);
6417 	if (!chan)
6418 		goto drop;
6419 
6420 	BT_DBG("chan %p, len %d", chan, skb->len);
6421 
6422 	if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6423 		goto drop;
6424 
6425 	if (chan->imtu < skb->len)
6426 		goto drop;
6427 
6428 	if (!chan->ops->recv(chan, skb))
6429 		return;
6430 
6431 drop:
6432 	kfree_skb(skb);
6433 }
6434 
6435 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6436 {
6437 	struct l2cap_hdr *lh = (void *) skb->data;
6438 	u16 cid, len;
6439 	__le16 psm;
6440 
6441 	skb_pull(skb, L2CAP_HDR_SIZE);
6442 	cid = __le16_to_cpu(lh->cid);
6443 	len = __le16_to_cpu(lh->len);
6444 
6445 	if (len != skb->len) {
6446 		kfree_skb(skb);
6447 		return;
6448 	}
6449 
6450 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6451 
6452 	switch (cid) {
6453 	case L2CAP_CID_SIGNALING:
6454 		l2cap_sig_channel(conn, skb);
6455 		break;
6456 
6457 	case L2CAP_CID_CONN_LESS:
6458 		psm = get_unaligned((__le16 *) skb->data);
6459 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6460 		l2cap_conless_channel(conn, psm, skb);
6461 		break;
6462 
6463 	case L2CAP_CID_ATT:
6464 		l2cap_att_channel(conn, skb);
6465 		break;
6466 
6467 	case L2CAP_CID_LE_SIGNALING:
6468 		l2cap_le_sig_channel(conn, skb);
6469 		break;
6470 
6471 	case L2CAP_CID_SMP:
6472 		if (smp_sig_channel(conn, skb))
6473 			l2cap_conn_del(conn->hcon, EACCES);
6474 		break;
6475 
6476 	default:
6477 		l2cap_data_channel(conn, cid, skb);
6478 		break;
6479 	}
6480 }
6481 
6482 /* ---- L2CAP interface with lower layer (HCI) ---- */
6483 
6484 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6485 {
6486 	int exact = 0, lm1 = 0, lm2 = 0;
6487 	struct l2cap_chan *c;
6488 
6489 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6490 
6491 	/* Find listening sockets and check their link_mode */
6492 	read_lock(&chan_list_lock);
6493 	list_for_each_entry(c, &chan_list, global_l) {
6494 		if (c->state != BT_LISTEN)
6495 			continue;
6496 
6497 		if (!bacmp(&c->src, &hdev->bdaddr)) {
6498 			lm1 |= HCI_LM_ACCEPT;
6499 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6500 				lm1 |= HCI_LM_MASTER;
6501 			exact++;
6502 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
6503 			lm2 |= HCI_LM_ACCEPT;
6504 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6505 				lm2 |= HCI_LM_MASTER;
6506 		}
6507 	}
6508 	read_unlock(&chan_list_lock);
6509 
6510 	return exact ? lm1 : lm2;
6511 }
6512 
6513 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6514 {
6515 	struct l2cap_conn *conn;
6516 
6517 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6518 
6519 	if (!status) {
6520 		conn = l2cap_conn_add(hcon);
6521 		if (conn)
6522 			l2cap_conn_ready(conn);
6523 	} else {
6524 		l2cap_conn_del(hcon, bt_to_errno(status));
6525 	}
6526 }
6527 
6528 int l2cap_disconn_ind(struct hci_conn *hcon)
6529 {
6530 	struct l2cap_conn *conn = hcon->l2cap_data;
6531 
6532 	BT_DBG("hcon %p", hcon);
6533 
6534 	if (!conn)
6535 		return HCI_ERROR_REMOTE_USER_TERM;
6536 	return conn->disc_reason;
6537 }
6538 
6539 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6540 {
6541 	BT_DBG("hcon %p reason %d", hcon, reason);
6542 
6543 	l2cap_conn_del(hcon, bt_to_errno(reason));
6544 }
6545 
6546 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6547 {
6548 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6549 		return;
6550 
6551 	if (encrypt == 0x00) {
6552 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
6553 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6554 		} else if (chan->sec_level == BT_SECURITY_HIGH)
6555 			l2cap_chan_close(chan, ECONNREFUSED);
6556 	} else {
6557 		if (chan->sec_level == BT_SECURITY_MEDIUM)
6558 			__clear_chan_timer(chan);
6559 	}
6560 }
6561 
6562 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6563 {
6564 	struct l2cap_conn *conn = hcon->l2cap_data;
6565 	struct l2cap_chan *chan;
6566 
6567 	if (!conn)
6568 		return 0;
6569 
6570 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6571 
6572 	if (hcon->type == LE_LINK) {
6573 		if (!status && encrypt)
6574 			smp_distribute_keys(conn, 0);
6575 		cancel_delayed_work(&conn->security_timer);
6576 	}
6577 
6578 	mutex_lock(&conn->chan_lock);
6579 
6580 	list_for_each_entry(chan, &conn->chan_l, list) {
6581 		l2cap_chan_lock(chan);
6582 
6583 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6584 		       state_to_string(chan->state));
6585 
6586 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6587 			l2cap_chan_unlock(chan);
6588 			continue;
6589 		}
6590 
6591 		if (chan->scid == L2CAP_CID_ATT) {
6592 			if (!status && encrypt) {
6593 				chan->sec_level = hcon->sec_level;
6594 				l2cap_chan_ready(chan);
6595 			}
6596 
6597 			l2cap_chan_unlock(chan);
6598 			continue;
6599 		}
6600 
6601 		if (!__l2cap_no_conn_pending(chan)) {
6602 			l2cap_chan_unlock(chan);
6603 			continue;
6604 		}
6605 
6606 		if (!status && (chan->state == BT_CONNECTED ||
6607 				chan->state == BT_CONFIG)) {
6608 			chan->ops->resume(chan);
6609 			l2cap_check_encryption(chan, encrypt);
6610 			l2cap_chan_unlock(chan);
6611 			continue;
6612 		}
6613 
6614 		if (chan->state == BT_CONNECT) {
6615 			if (!status) {
6616 				l2cap_start_connection(chan);
6617 			} else {
6618 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6619 			}
6620 		} else if (chan->state == BT_CONNECT2) {
6621 			struct l2cap_conn_rsp rsp;
6622 			__u16 res, stat;
6623 
6624 			if (!status) {
6625 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6626 					res = L2CAP_CR_PEND;
6627 					stat = L2CAP_CS_AUTHOR_PEND;
6628 					chan->ops->defer(chan);
6629 				} else {
6630 					l2cap_state_change(chan, BT_CONFIG);
6631 					res = L2CAP_CR_SUCCESS;
6632 					stat = L2CAP_CS_NO_INFO;
6633 				}
6634 			} else {
6635 				l2cap_state_change(chan, BT_DISCONN);
6636 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6637 				res = L2CAP_CR_SEC_BLOCK;
6638 				stat = L2CAP_CS_NO_INFO;
6639 			}
6640 
6641 			rsp.scid   = cpu_to_le16(chan->dcid);
6642 			rsp.dcid   = cpu_to_le16(chan->scid);
6643 			rsp.result = cpu_to_le16(res);
6644 			rsp.status = cpu_to_le16(stat);
6645 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6646 				       sizeof(rsp), &rsp);
6647 
6648 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6649 			    res == L2CAP_CR_SUCCESS) {
6650 				char buf[128];
6651 				set_bit(CONF_REQ_SENT, &chan->conf_state);
6652 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
6653 					       L2CAP_CONF_REQ,
6654 					       l2cap_build_conf_req(chan, buf),
6655 					       buf);
6656 				chan->num_conf_req++;
6657 			}
6658 		}
6659 
6660 		l2cap_chan_unlock(chan);
6661 	}
6662 
6663 	mutex_unlock(&conn->chan_lock);
6664 
6665 	return 0;
6666 }
6667 
6668 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6669 {
6670 	struct l2cap_conn *conn = hcon->l2cap_data;
6671 	struct l2cap_hdr *hdr;
6672 	int len;
6673 
6674 	/* For AMP controller do not create l2cap conn */
6675 	if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6676 		goto drop;
6677 
6678 	if (!conn)
6679 		conn = l2cap_conn_add(hcon);
6680 
6681 	if (!conn)
6682 		goto drop;
6683 
6684 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6685 
6686 	switch (flags) {
6687 	case ACL_START:
6688 	case ACL_START_NO_FLUSH:
6689 	case ACL_COMPLETE:
6690 		if (conn->rx_len) {
6691 			BT_ERR("Unexpected start frame (len %d)", skb->len);
6692 			kfree_skb(conn->rx_skb);
6693 			conn->rx_skb = NULL;
6694 			conn->rx_len = 0;
6695 			l2cap_conn_unreliable(conn, ECOMM);
6696 		}
6697 
6698 		/* Start fragment always begin with Basic L2CAP header */
6699 		if (skb->len < L2CAP_HDR_SIZE) {
6700 			BT_ERR("Frame is too short (len %d)", skb->len);
6701 			l2cap_conn_unreliable(conn, ECOMM);
6702 			goto drop;
6703 		}
6704 
6705 		hdr = (struct l2cap_hdr *) skb->data;
6706 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6707 
6708 		if (len == skb->len) {
6709 			/* Complete frame received */
6710 			l2cap_recv_frame(conn, skb);
6711 			return 0;
6712 		}
6713 
6714 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6715 
6716 		if (skb->len > len) {
6717 			BT_ERR("Frame is too long (len %d, expected len %d)",
6718 			       skb->len, len);
6719 			l2cap_conn_unreliable(conn, ECOMM);
6720 			goto drop;
6721 		}
6722 
6723 		/* Allocate skb for the complete frame (with header) */
6724 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6725 		if (!conn->rx_skb)
6726 			goto drop;
6727 
6728 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6729 					  skb->len);
6730 		conn->rx_len = len - skb->len;
6731 		break;
6732 
6733 	case ACL_CONT:
6734 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6735 
6736 		if (!conn->rx_len) {
6737 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6738 			l2cap_conn_unreliable(conn, ECOMM);
6739 			goto drop;
6740 		}
6741 
6742 		if (skb->len > conn->rx_len) {
6743 			BT_ERR("Fragment is too long (len %d, expected %d)",
6744 			       skb->len, conn->rx_len);
6745 			kfree_skb(conn->rx_skb);
6746 			conn->rx_skb = NULL;
6747 			conn->rx_len = 0;
6748 			l2cap_conn_unreliable(conn, ECOMM);
6749 			goto drop;
6750 		}
6751 
6752 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6753 					  skb->len);
6754 		conn->rx_len -= skb->len;
6755 
6756 		if (!conn->rx_len) {
6757 			/* Complete frame received. l2cap_recv_frame
6758 			 * takes ownership of the skb so set the global
6759 			 * rx_skb pointer to NULL first.
6760 			 */
6761 			struct sk_buff *rx_skb = conn->rx_skb;
6762 			conn->rx_skb = NULL;
6763 			l2cap_recv_frame(conn, rx_skb);
6764 		}
6765 		break;
6766 	}
6767 
6768 drop:
6769 	kfree_skb(skb);
6770 	return 0;
6771 }
6772 
6773 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6774 {
6775 	struct l2cap_chan *c;
6776 
6777 	read_lock(&chan_list_lock);
6778 
6779 	list_for_each_entry(c, &chan_list, global_l) {
6780 		seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6781 			   &c->src, &c->dst,
6782 			   c->state, __le16_to_cpu(c->psm),
6783 			   c->scid, c->dcid, c->imtu, c->omtu,
6784 			   c->sec_level, c->mode);
6785 	}
6786 
6787 	read_unlock(&chan_list_lock);
6788 
6789 	return 0;
6790 }
6791 
6792 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6793 {
6794 	return single_open(file, l2cap_debugfs_show, inode->i_private);
6795 }
6796 
6797 static const struct file_operations l2cap_debugfs_fops = {
6798 	.open		= l2cap_debugfs_open,
6799 	.read		= seq_read,
6800 	.llseek		= seq_lseek,
6801 	.release	= single_release,
6802 };
6803 
6804 static struct dentry *l2cap_debugfs;
6805 
6806 int __init l2cap_init(void)
6807 {
6808 	int err;
6809 
6810 	err = l2cap_init_sockets();
6811 	if (err < 0)
6812 		return err;
6813 
6814 	if (IS_ERR_OR_NULL(bt_debugfs))
6815 		return 0;
6816 
6817 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6818 					    NULL, &l2cap_debugfs_fops);
6819 
6820 	return 0;
6821 }
6822 
6823 void l2cap_exit(void)
6824 {
6825 	debugfs_remove(l2cap_debugfs);
6826 	l2cap_cleanup_sockets();
6827 }
6828 
6829 module_param(disable_ertm, bool, 0644);
6830 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
6831