xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision 82806c25)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred;
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 
65 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
66 {
67 	if (link_type == LE_LINK) {
68 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
69 			return BDADDR_LE_PUBLIC;
70 		else
71 			return BDADDR_LE_RANDOM;
72 	}
73 
74 	return BDADDR_BREDR;
75 }
76 
77 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
78 {
79 	return bdaddr_type(hcon->type, hcon->src_type);
80 }
81 
82 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
83 {
84 	return bdaddr_type(hcon->type, hcon->dst_type);
85 }
86 
87 /* ---- L2CAP channels ---- */
88 
89 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
90 						   u16 cid)
91 {
92 	struct l2cap_chan *c;
93 
94 	list_for_each_entry(c, &conn->chan_l, list) {
95 		if (c->dcid == cid)
96 			return c;
97 	}
98 	return NULL;
99 }
100 
101 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
102 						   u16 cid)
103 {
104 	struct l2cap_chan *c;
105 
106 	list_for_each_entry(c, &conn->chan_l, list) {
107 		if (c->scid == cid)
108 			return c;
109 	}
110 	return NULL;
111 }
112 
113 /* Find channel with given SCID.
114  * Returns a reference locked channel.
115  */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
117 						 u16 cid)
118 {
119 	struct l2cap_chan *c;
120 
121 	mutex_lock(&conn->chan_lock);
122 	c = __l2cap_get_chan_by_scid(conn, cid);
123 	if (c) {
124 		/* Only lock if chan reference is not 0 */
125 		c = l2cap_chan_hold_unless_zero(c);
126 		if (c)
127 			l2cap_chan_lock(c);
128 	}
129 	mutex_unlock(&conn->chan_lock);
130 
131 	return c;
132 }
133 
134 /* Find channel with given DCID.
135  * Returns a reference locked channel.
136  */
137 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
138 						 u16 cid)
139 {
140 	struct l2cap_chan *c;
141 
142 	mutex_lock(&conn->chan_lock);
143 	c = __l2cap_get_chan_by_dcid(conn, cid);
144 	if (c) {
145 		/* Only lock if chan reference is not 0 */
146 		c = l2cap_chan_hold_unless_zero(c);
147 		if (c)
148 			l2cap_chan_lock(c);
149 	}
150 	mutex_unlock(&conn->chan_lock);
151 
152 	return c;
153 }
154 
155 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
156 						    u8 ident)
157 {
158 	struct l2cap_chan *c;
159 
160 	list_for_each_entry(c, &conn->chan_l, list) {
161 		if (c->ident == ident)
162 			return c;
163 	}
164 	return NULL;
165 }
166 
167 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
168 						  u8 ident)
169 {
170 	struct l2cap_chan *c;
171 
172 	mutex_lock(&conn->chan_lock);
173 	c = __l2cap_get_chan_by_ident(conn, ident);
174 	if (c) {
175 		/* Only lock if chan reference is not 0 */
176 		c = l2cap_chan_hold_unless_zero(c);
177 		if (c)
178 			l2cap_chan_lock(c);
179 	}
180 	mutex_unlock(&conn->chan_lock);
181 
182 	return c;
183 }
184 
185 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
186 						      u8 src_type)
187 {
188 	struct l2cap_chan *c;
189 
190 	list_for_each_entry(c, &chan_list, global_l) {
191 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
192 			continue;
193 
194 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
195 			continue;
196 
197 		if (c->sport == psm && !bacmp(&c->src, src))
198 			return c;
199 	}
200 	return NULL;
201 }
202 
203 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
204 {
205 	int err;
206 
207 	write_lock(&chan_list_lock);
208 
209 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
210 		err = -EADDRINUSE;
211 		goto done;
212 	}
213 
214 	if (psm) {
215 		chan->psm = psm;
216 		chan->sport = psm;
217 		err = 0;
218 	} else {
219 		u16 p, start, end, incr;
220 
221 		if (chan->src_type == BDADDR_BREDR) {
222 			start = L2CAP_PSM_DYN_START;
223 			end = L2CAP_PSM_AUTO_END;
224 			incr = 2;
225 		} else {
226 			start = L2CAP_PSM_LE_DYN_START;
227 			end = L2CAP_PSM_LE_DYN_END;
228 			incr = 1;
229 		}
230 
231 		err = -EINVAL;
232 		for (p = start; p <= end; p += incr)
233 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
234 							 chan->src_type)) {
235 				chan->psm   = cpu_to_le16(p);
236 				chan->sport = cpu_to_le16(p);
237 				err = 0;
238 				break;
239 			}
240 	}
241 
242 done:
243 	write_unlock(&chan_list_lock);
244 	return err;
245 }
246 EXPORT_SYMBOL_GPL(l2cap_add_psm);
247 
248 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
249 {
250 	write_lock(&chan_list_lock);
251 
252 	/* Override the defaults (which are for conn-oriented) */
253 	chan->omtu = L2CAP_DEFAULT_MTU;
254 	chan->chan_type = L2CAP_CHAN_FIXED;
255 
256 	chan->scid = scid;
257 
258 	write_unlock(&chan_list_lock);
259 
260 	return 0;
261 }
262 
263 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
264 {
265 	u16 cid, dyn_end;
266 
267 	if (conn->hcon->type == LE_LINK)
268 		dyn_end = L2CAP_CID_LE_DYN_END;
269 	else
270 		dyn_end = L2CAP_CID_DYN_END;
271 
272 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
273 		if (!__l2cap_get_chan_by_scid(conn, cid))
274 			return cid;
275 	}
276 
277 	return 0;
278 }
279 
280 static void l2cap_state_change(struct l2cap_chan *chan, int state)
281 {
282 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
283 	       state_to_string(state));
284 
285 	chan->state = state;
286 	chan->ops->state_change(chan, state, 0);
287 }
288 
289 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
290 						int state, int err)
291 {
292 	chan->state = state;
293 	chan->ops->state_change(chan, chan->state, err);
294 }
295 
296 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
297 {
298 	chan->ops->state_change(chan, chan->state, err);
299 }
300 
301 static void __set_retrans_timer(struct l2cap_chan *chan)
302 {
303 	if (!delayed_work_pending(&chan->monitor_timer) &&
304 	    chan->retrans_timeout) {
305 		l2cap_set_timer(chan, &chan->retrans_timer,
306 				msecs_to_jiffies(chan->retrans_timeout));
307 	}
308 }
309 
310 static void __set_monitor_timer(struct l2cap_chan *chan)
311 {
312 	__clear_retrans_timer(chan);
313 	if (chan->monitor_timeout) {
314 		l2cap_set_timer(chan, &chan->monitor_timer,
315 				msecs_to_jiffies(chan->monitor_timeout));
316 	}
317 }
318 
319 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
320 					       u16 seq)
321 {
322 	struct sk_buff *skb;
323 
324 	skb_queue_walk(head, skb) {
325 		if (bt_cb(skb)->l2cap.txseq == seq)
326 			return skb;
327 	}
328 
329 	return NULL;
330 }
331 
332 /* ---- L2CAP sequence number lists ---- */
333 
334 /* For ERTM, ordered lists of sequence numbers must be tracked for
335  * SREJ requests that are received and for frames that are to be
336  * retransmitted. These seq_list functions implement a singly-linked
337  * list in an array, where membership in the list can also be checked
338  * in constant time. Items can also be added to the tail of the list
339  * and removed from the head in constant time, without further memory
340  * allocs or frees.
341  */
342 
343 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
344 {
345 	size_t alloc_size, i;
346 
347 	/* Allocated size is a power of 2 to map sequence numbers
348 	 * (which may be up to 14 bits) in to a smaller array that is
349 	 * sized for the negotiated ERTM transmit windows.
350 	 */
351 	alloc_size = roundup_pow_of_two(size);
352 
353 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
354 	if (!seq_list->list)
355 		return -ENOMEM;
356 
357 	seq_list->mask = alloc_size - 1;
358 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
359 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
360 	for (i = 0; i < alloc_size; i++)
361 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
362 
363 	return 0;
364 }
365 
366 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
367 {
368 	kfree(seq_list->list);
369 }
370 
371 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
372 					   u16 seq)
373 {
374 	/* Constant-time check for list membership */
375 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
376 }
377 
378 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
379 {
380 	u16 seq = seq_list->head;
381 	u16 mask = seq_list->mask;
382 
383 	seq_list->head = seq_list->list[seq & mask];
384 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
385 
386 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
387 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
388 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
389 	}
390 
391 	return seq;
392 }
393 
394 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
395 {
396 	u16 i;
397 
398 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
399 		return;
400 
401 	for (i = 0; i <= seq_list->mask; i++)
402 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
403 
404 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
405 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
406 }
407 
408 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
409 {
410 	u16 mask = seq_list->mask;
411 
412 	/* All appends happen in constant time */
413 
414 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
415 		return;
416 
417 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
418 		seq_list->head = seq;
419 	else
420 		seq_list->list[seq_list->tail & mask] = seq;
421 
422 	seq_list->tail = seq;
423 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
424 }
425 
426 static void l2cap_chan_timeout(struct work_struct *work)
427 {
428 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
429 					       chan_timer.work);
430 	struct l2cap_conn *conn = chan->conn;
431 	int reason;
432 
433 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
434 
435 	mutex_lock(&conn->chan_lock);
436 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
437 	 * this work. No need to call l2cap_chan_hold(chan) here again.
438 	 */
439 	l2cap_chan_lock(chan);
440 
441 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
442 		reason = ECONNREFUSED;
443 	else if (chan->state == BT_CONNECT &&
444 		 chan->sec_level != BT_SECURITY_SDP)
445 		reason = ECONNREFUSED;
446 	else
447 		reason = ETIMEDOUT;
448 
449 	l2cap_chan_close(chan, reason);
450 
451 	chan->ops->close(chan);
452 
453 	l2cap_chan_unlock(chan);
454 	l2cap_chan_put(chan);
455 
456 	mutex_unlock(&conn->chan_lock);
457 }
458 
459 struct l2cap_chan *l2cap_chan_create(void)
460 {
461 	struct l2cap_chan *chan;
462 
463 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
464 	if (!chan)
465 		return NULL;
466 
467 	skb_queue_head_init(&chan->tx_q);
468 	skb_queue_head_init(&chan->srej_q);
469 	mutex_init(&chan->lock);
470 
471 	/* Set default lock nesting level */
472 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
473 
474 	write_lock(&chan_list_lock);
475 	list_add(&chan->global_l, &chan_list);
476 	write_unlock(&chan_list_lock);
477 
478 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
479 
480 	chan->state = BT_OPEN;
481 
482 	kref_init(&chan->kref);
483 
484 	/* This flag is cleared in l2cap_chan_ready() */
485 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
486 
487 	BT_DBG("chan %p", chan);
488 
489 	return chan;
490 }
491 EXPORT_SYMBOL_GPL(l2cap_chan_create);
492 
493 static void l2cap_chan_destroy(struct kref *kref)
494 {
495 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
496 
497 	BT_DBG("chan %p", chan);
498 
499 	write_lock(&chan_list_lock);
500 	list_del(&chan->global_l);
501 	write_unlock(&chan_list_lock);
502 
503 	kfree(chan);
504 }
505 
506 void l2cap_chan_hold(struct l2cap_chan *c)
507 {
508 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
509 
510 	kref_get(&c->kref);
511 }
512 
513 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
514 {
515 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
516 
517 	if (!kref_get_unless_zero(&c->kref))
518 		return NULL;
519 
520 	return c;
521 }
522 
523 void l2cap_chan_put(struct l2cap_chan *c)
524 {
525 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
526 
527 	kref_put(&c->kref, l2cap_chan_destroy);
528 }
529 EXPORT_SYMBOL_GPL(l2cap_chan_put);
530 
531 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
532 {
533 	chan->fcs  = L2CAP_FCS_CRC16;
534 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
535 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
536 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
537 	chan->remote_max_tx = chan->max_tx;
538 	chan->remote_tx_win = chan->tx_win;
539 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
540 	chan->sec_level = BT_SECURITY_LOW;
541 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
542 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
543 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
544 
545 	chan->conf_state = 0;
546 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
547 
548 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
549 }
550 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
551 
552 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
553 {
554 	chan->sdu = NULL;
555 	chan->sdu_last_frag = NULL;
556 	chan->sdu_len = 0;
557 	chan->tx_credits = tx_credits;
558 	/* Derive MPS from connection MTU to stop HCI fragmentation */
559 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
560 	/* Give enough credits for a full packet */
561 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
562 
563 	skb_queue_head_init(&chan->tx_q);
564 }
565 
566 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
567 {
568 	l2cap_le_flowctl_init(chan, tx_credits);
569 
570 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
571 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
572 		chan->mps = L2CAP_ECRED_MIN_MPS;
573 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
574 	}
575 }
576 
577 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
578 {
579 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
580 	       __le16_to_cpu(chan->psm), chan->dcid);
581 
582 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
583 
584 	chan->conn = conn;
585 
586 	switch (chan->chan_type) {
587 	case L2CAP_CHAN_CONN_ORIENTED:
588 		/* Alloc CID for connection-oriented socket */
589 		chan->scid = l2cap_alloc_cid(conn);
590 		if (conn->hcon->type == ACL_LINK)
591 			chan->omtu = L2CAP_DEFAULT_MTU;
592 		break;
593 
594 	case L2CAP_CHAN_CONN_LESS:
595 		/* Connectionless socket */
596 		chan->scid = L2CAP_CID_CONN_LESS;
597 		chan->dcid = L2CAP_CID_CONN_LESS;
598 		chan->omtu = L2CAP_DEFAULT_MTU;
599 		break;
600 
601 	case L2CAP_CHAN_FIXED:
602 		/* Caller will set CID and CID specific MTU values */
603 		break;
604 
605 	default:
606 		/* Raw socket can send/recv signalling messages only */
607 		chan->scid = L2CAP_CID_SIGNALING;
608 		chan->dcid = L2CAP_CID_SIGNALING;
609 		chan->omtu = L2CAP_DEFAULT_MTU;
610 	}
611 
612 	chan->local_id		= L2CAP_BESTEFFORT_ID;
613 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
614 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
615 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
616 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
617 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
618 
619 	l2cap_chan_hold(chan);
620 
621 	/* Only keep a reference for fixed channels if they requested it */
622 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
623 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
624 		hci_conn_hold(conn->hcon);
625 
626 	list_add(&chan->list, &conn->chan_l);
627 }
628 
629 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
630 {
631 	mutex_lock(&conn->chan_lock);
632 	__l2cap_chan_add(conn, chan);
633 	mutex_unlock(&conn->chan_lock);
634 }
635 
636 void l2cap_chan_del(struct l2cap_chan *chan, int err)
637 {
638 	struct l2cap_conn *conn = chan->conn;
639 
640 	__clear_chan_timer(chan);
641 
642 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
643 	       state_to_string(chan->state));
644 
645 	chan->ops->teardown(chan, err);
646 
647 	if (conn) {
648 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
649 		/* Delete from channel list */
650 		list_del(&chan->list);
651 
652 		l2cap_chan_put(chan);
653 
654 		chan->conn = NULL;
655 
656 		/* Reference was only held for non-fixed channels or
657 		 * fixed channels that explicitly requested it using the
658 		 * FLAG_HOLD_HCI_CONN flag.
659 		 */
660 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
661 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
662 			hci_conn_drop(conn->hcon);
663 
664 		if (mgr && mgr->bredr_chan == chan)
665 			mgr->bredr_chan = NULL;
666 	}
667 
668 	if (chan->hs_hchan) {
669 		struct hci_chan *hs_hchan = chan->hs_hchan;
670 
671 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
672 		amp_disconnect_logical_link(hs_hchan);
673 	}
674 
675 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
676 		return;
677 
678 	switch (chan->mode) {
679 	case L2CAP_MODE_BASIC:
680 		break;
681 
682 	case L2CAP_MODE_LE_FLOWCTL:
683 	case L2CAP_MODE_EXT_FLOWCTL:
684 		skb_queue_purge(&chan->tx_q);
685 		break;
686 
687 	case L2CAP_MODE_ERTM:
688 		__clear_retrans_timer(chan);
689 		__clear_monitor_timer(chan);
690 		__clear_ack_timer(chan);
691 
692 		skb_queue_purge(&chan->srej_q);
693 
694 		l2cap_seq_list_free(&chan->srej_list);
695 		l2cap_seq_list_free(&chan->retrans_list);
696 		fallthrough;
697 
698 	case L2CAP_MODE_STREAMING:
699 		skb_queue_purge(&chan->tx_q);
700 		break;
701 	}
702 }
703 EXPORT_SYMBOL_GPL(l2cap_chan_del);
704 
705 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
706 			      void *data)
707 {
708 	struct l2cap_chan *chan;
709 
710 	list_for_each_entry(chan, &conn->chan_l, list) {
711 		func(chan, data);
712 	}
713 }
714 
715 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
716 		     void *data)
717 {
718 	if (!conn)
719 		return;
720 
721 	mutex_lock(&conn->chan_lock);
722 	__l2cap_chan_list(conn, func, data);
723 	mutex_unlock(&conn->chan_lock);
724 }
725 
726 EXPORT_SYMBOL_GPL(l2cap_chan_list);
727 
728 static void l2cap_conn_update_id_addr(struct work_struct *work)
729 {
730 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
731 					       id_addr_update_work);
732 	struct hci_conn *hcon = conn->hcon;
733 	struct l2cap_chan *chan;
734 
735 	mutex_lock(&conn->chan_lock);
736 
737 	list_for_each_entry(chan, &conn->chan_l, list) {
738 		l2cap_chan_lock(chan);
739 		bacpy(&chan->dst, &hcon->dst);
740 		chan->dst_type = bdaddr_dst_type(hcon);
741 		l2cap_chan_unlock(chan);
742 	}
743 
744 	mutex_unlock(&conn->chan_lock);
745 }
746 
747 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
748 {
749 	struct l2cap_conn *conn = chan->conn;
750 	struct l2cap_le_conn_rsp rsp;
751 	u16 result;
752 
753 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
754 		result = L2CAP_CR_LE_AUTHORIZATION;
755 	else
756 		result = L2CAP_CR_LE_BAD_PSM;
757 
758 	l2cap_state_change(chan, BT_DISCONN);
759 
760 	rsp.dcid    = cpu_to_le16(chan->scid);
761 	rsp.mtu     = cpu_to_le16(chan->imtu);
762 	rsp.mps     = cpu_to_le16(chan->mps);
763 	rsp.credits = cpu_to_le16(chan->rx_credits);
764 	rsp.result  = cpu_to_le16(result);
765 
766 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
767 		       &rsp);
768 }
769 
770 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
771 {
772 	struct l2cap_conn *conn = chan->conn;
773 	struct l2cap_ecred_conn_rsp rsp;
774 	u16 result;
775 
776 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
777 		result = L2CAP_CR_LE_AUTHORIZATION;
778 	else
779 		result = L2CAP_CR_LE_BAD_PSM;
780 
781 	l2cap_state_change(chan, BT_DISCONN);
782 
783 	memset(&rsp, 0, sizeof(rsp));
784 
785 	rsp.result  = cpu_to_le16(result);
786 
787 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
788 		       &rsp);
789 }
790 
791 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
792 {
793 	struct l2cap_conn *conn = chan->conn;
794 	struct l2cap_conn_rsp rsp;
795 	u16 result;
796 
797 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
798 		result = L2CAP_CR_SEC_BLOCK;
799 	else
800 		result = L2CAP_CR_BAD_PSM;
801 
802 	l2cap_state_change(chan, BT_DISCONN);
803 
804 	rsp.scid   = cpu_to_le16(chan->dcid);
805 	rsp.dcid   = cpu_to_le16(chan->scid);
806 	rsp.result = cpu_to_le16(result);
807 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
808 
809 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
810 }
811 
812 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
813 {
814 	struct l2cap_conn *conn = chan->conn;
815 
816 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
817 
818 	switch (chan->state) {
819 	case BT_LISTEN:
820 		chan->ops->teardown(chan, 0);
821 		break;
822 
823 	case BT_CONNECTED:
824 	case BT_CONFIG:
825 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
826 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
827 			l2cap_send_disconn_req(chan, reason);
828 		} else
829 			l2cap_chan_del(chan, reason);
830 		break;
831 
832 	case BT_CONNECT2:
833 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
834 			if (conn->hcon->type == ACL_LINK)
835 				l2cap_chan_connect_reject(chan);
836 			else if (conn->hcon->type == LE_LINK) {
837 				switch (chan->mode) {
838 				case L2CAP_MODE_LE_FLOWCTL:
839 					l2cap_chan_le_connect_reject(chan);
840 					break;
841 				case L2CAP_MODE_EXT_FLOWCTL:
842 					l2cap_chan_ecred_connect_reject(chan);
843 					break;
844 				}
845 			}
846 		}
847 
848 		l2cap_chan_del(chan, reason);
849 		break;
850 
851 	case BT_CONNECT:
852 	case BT_DISCONN:
853 		l2cap_chan_del(chan, reason);
854 		break;
855 
856 	default:
857 		chan->ops->teardown(chan, 0);
858 		break;
859 	}
860 }
861 EXPORT_SYMBOL(l2cap_chan_close);
862 
863 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
864 {
865 	switch (chan->chan_type) {
866 	case L2CAP_CHAN_RAW:
867 		switch (chan->sec_level) {
868 		case BT_SECURITY_HIGH:
869 		case BT_SECURITY_FIPS:
870 			return HCI_AT_DEDICATED_BONDING_MITM;
871 		case BT_SECURITY_MEDIUM:
872 			return HCI_AT_DEDICATED_BONDING;
873 		default:
874 			return HCI_AT_NO_BONDING;
875 		}
876 		break;
877 	case L2CAP_CHAN_CONN_LESS:
878 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
879 			if (chan->sec_level == BT_SECURITY_LOW)
880 				chan->sec_level = BT_SECURITY_SDP;
881 		}
882 		if (chan->sec_level == BT_SECURITY_HIGH ||
883 		    chan->sec_level == BT_SECURITY_FIPS)
884 			return HCI_AT_NO_BONDING_MITM;
885 		else
886 			return HCI_AT_NO_BONDING;
887 		break;
888 	case L2CAP_CHAN_CONN_ORIENTED:
889 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
890 			if (chan->sec_level == BT_SECURITY_LOW)
891 				chan->sec_level = BT_SECURITY_SDP;
892 
893 			if (chan->sec_level == BT_SECURITY_HIGH ||
894 			    chan->sec_level == BT_SECURITY_FIPS)
895 				return HCI_AT_NO_BONDING_MITM;
896 			else
897 				return HCI_AT_NO_BONDING;
898 		}
899 		fallthrough;
900 
901 	default:
902 		switch (chan->sec_level) {
903 		case BT_SECURITY_HIGH:
904 		case BT_SECURITY_FIPS:
905 			return HCI_AT_GENERAL_BONDING_MITM;
906 		case BT_SECURITY_MEDIUM:
907 			return HCI_AT_GENERAL_BONDING;
908 		default:
909 			return HCI_AT_NO_BONDING;
910 		}
911 		break;
912 	}
913 }
914 
915 /* Service level security */
916 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
917 {
918 	struct l2cap_conn *conn = chan->conn;
919 	__u8 auth_type;
920 
921 	if (conn->hcon->type == LE_LINK)
922 		return smp_conn_security(conn->hcon, chan->sec_level);
923 
924 	auth_type = l2cap_get_auth_type(chan);
925 
926 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
927 				 initiator);
928 }
929 
930 static u8 l2cap_get_ident(struct l2cap_conn *conn)
931 {
932 	u8 id;
933 
934 	/* Get next available identificator.
935 	 *    1 - 128 are used by kernel.
936 	 *  129 - 199 are reserved.
937 	 *  200 - 254 are used by utilities like l2ping, etc.
938 	 */
939 
940 	mutex_lock(&conn->ident_lock);
941 
942 	if (++conn->tx_ident > 128)
943 		conn->tx_ident = 1;
944 
945 	id = conn->tx_ident;
946 
947 	mutex_unlock(&conn->ident_lock);
948 
949 	return id;
950 }
951 
952 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
953 			   void *data)
954 {
955 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
956 	u8 flags;
957 
958 	BT_DBG("code 0x%2.2x", code);
959 
960 	if (!skb)
961 		return;
962 
963 	/* Use NO_FLUSH if supported or we have an LE link (which does
964 	 * not support auto-flushing packets) */
965 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
966 	    conn->hcon->type == LE_LINK)
967 		flags = ACL_START_NO_FLUSH;
968 	else
969 		flags = ACL_START;
970 
971 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
972 	skb->priority = HCI_PRIO_MAX;
973 
974 	hci_send_acl(conn->hchan, skb, flags);
975 }
976 
977 static bool __chan_is_moving(struct l2cap_chan *chan)
978 {
979 	return chan->move_state != L2CAP_MOVE_STABLE &&
980 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
981 }
982 
983 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
984 {
985 	struct hci_conn *hcon = chan->conn->hcon;
986 	u16 flags;
987 
988 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
989 	       skb->priority);
990 
991 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
992 		if (chan->hs_hchan)
993 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
994 		else
995 			kfree_skb(skb);
996 
997 		return;
998 	}
999 
1000 	/* Use NO_FLUSH for LE links (where this is the only option) or
1001 	 * if the BR/EDR link supports it and flushing has not been
1002 	 * explicitly requested (through FLAG_FLUSHABLE).
1003 	 */
1004 	if (hcon->type == LE_LINK ||
1005 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1006 	     lmp_no_flush_capable(hcon->hdev)))
1007 		flags = ACL_START_NO_FLUSH;
1008 	else
1009 		flags = ACL_START;
1010 
1011 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1012 	hci_send_acl(chan->conn->hchan, skb, flags);
1013 }
1014 
1015 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1016 {
1017 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1018 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1019 
1020 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1021 		/* S-Frame */
1022 		control->sframe = 1;
1023 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1024 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1025 
1026 		control->sar = 0;
1027 		control->txseq = 0;
1028 	} else {
1029 		/* I-Frame */
1030 		control->sframe = 0;
1031 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1032 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1033 
1034 		control->poll = 0;
1035 		control->super = 0;
1036 	}
1037 }
1038 
1039 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1040 {
1041 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1042 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1043 
1044 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1045 		/* S-Frame */
1046 		control->sframe = 1;
1047 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1048 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1049 
1050 		control->sar = 0;
1051 		control->txseq = 0;
1052 	} else {
1053 		/* I-Frame */
1054 		control->sframe = 0;
1055 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1056 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1057 
1058 		control->poll = 0;
1059 		control->super = 0;
1060 	}
1061 }
1062 
1063 static inline void __unpack_control(struct l2cap_chan *chan,
1064 				    struct sk_buff *skb)
1065 {
1066 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1067 		__unpack_extended_control(get_unaligned_le32(skb->data),
1068 					  &bt_cb(skb)->l2cap);
1069 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1070 	} else {
1071 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1072 					  &bt_cb(skb)->l2cap);
1073 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1074 	}
1075 }
1076 
1077 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1078 {
1079 	u32 packed;
1080 
1081 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1082 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1083 
1084 	if (control->sframe) {
1085 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1086 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1087 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1088 	} else {
1089 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1090 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1091 	}
1092 
1093 	return packed;
1094 }
1095 
1096 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1097 {
1098 	u16 packed;
1099 
1100 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1101 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1102 
1103 	if (control->sframe) {
1104 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1105 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1106 		packed |= L2CAP_CTRL_FRAME_TYPE;
1107 	} else {
1108 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1109 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1110 	}
1111 
1112 	return packed;
1113 }
1114 
1115 static inline void __pack_control(struct l2cap_chan *chan,
1116 				  struct l2cap_ctrl *control,
1117 				  struct sk_buff *skb)
1118 {
1119 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1120 		put_unaligned_le32(__pack_extended_control(control),
1121 				   skb->data + L2CAP_HDR_SIZE);
1122 	} else {
1123 		put_unaligned_le16(__pack_enhanced_control(control),
1124 				   skb->data + L2CAP_HDR_SIZE);
1125 	}
1126 }
1127 
1128 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1129 {
1130 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1131 		return L2CAP_EXT_HDR_SIZE;
1132 	else
1133 		return L2CAP_ENH_HDR_SIZE;
1134 }
1135 
1136 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1137 					       u32 control)
1138 {
1139 	struct sk_buff *skb;
1140 	struct l2cap_hdr *lh;
1141 	int hlen = __ertm_hdr_size(chan);
1142 
1143 	if (chan->fcs == L2CAP_FCS_CRC16)
1144 		hlen += L2CAP_FCS_SIZE;
1145 
1146 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1147 
1148 	if (!skb)
1149 		return ERR_PTR(-ENOMEM);
1150 
1151 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1152 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1153 	lh->cid = cpu_to_le16(chan->dcid);
1154 
1155 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1156 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1157 	else
1158 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1159 
1160 	if (chan->fcs == L2CAP_FCS_CRC16) {
1161 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1162 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1163 	}
1164 
1165 	skb->priority = HCI_PRIO_MAX;
1166 	return skb;
1167 }
1168 
1169 static void l2cap_send_sframe(struct l2cap_chan *chan,
1170 			      struct l2cap_ctrl *control)
1171 {
1172 	struct sk_buff *skb;
1173 	u32 control_field;
1174 
1175 	BT_DBG("chan %p, control %p", chan, control);
1176 
1177 	if (!control->sframe)
1178 		return;
1179 
1180 	if (__chan_is_moving(chan))
1181 		return;
1182 
1183 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1184 	    !control->poll)
1185 		control->final = 1;
1186 
1187 	if (control->super == L2CAP_SUPER_RR)
1188 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1189 	else if (control->super == L2CAP_SUPER_RNR)
1190 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1191 
1192 	if (control->super != L2CAP_SUPER_SREJ) {
1193 		chan->last_acked_seq = control->reqseq;
1194 		__clear_ack_timer(chan);
1195 	}
1196 
1197 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1198 	       control->final, control->poll, control->super);
1199 
1200 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1201 		control_field = __pack_extended_control(control);
1202 	else
1203 		control_field = __pack_enhanced_control(control);
1204 
1205 	skb = l2cap_create_sframe_pdu(chan, control_field);
1206 	if (!IS_ERR(skb))
1207 		l2cap_do_send(chan, skb);
1208 }
1209 
1210 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1211 {
1212 	struct l2cap_ctrl control;
1213 
1214 	BT_DBG("chan %p, poll %d", chan, poll);
1215 
1216 	memset(&control, 0, sizeof(control));
1217 	control.sframe = 1;
1218 	control.poll = poll;
1219 
1220 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1221 		control.super = L2CAP_SUPER_RNR;
1222 	else
1223 		control.super = L2CAP_SUPER_RR;
1224 
1225 	control.reqseq = chan->buffer_seq;
1226 	l2cap_send_sframe(chan, &control);
1227 }
1228 
1229 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1230 {
1231 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1232 		return true;
1233 
1234 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1235 }
1236 
1237 static bool __amp_capable(struct l2cap_chan *chan)
1238 {
1239 	struct l2cap_conn *conn = chan->conn;
1240 	struct hci_dev *hdev;
1241 	bool amp_available = false;
1242 
1243 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1244 		return false;
1245 
1246 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1247 		return false;
1248 
1249 	read_lock(&hci_dev_list_lock);
1250 	list_for_each_entry(hdev, &hci_dev_list, list) {
1251 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1252 		    test_bit(HCI_UP, &hdev->flags)) {
1253 			amp_available = true;
1254 			break;
1255 		}
1256 	}
1257 	read_unlock(&hci_dev_list_lock);
1258 
1259 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1260 		return amp_available;
1261 
1262 	return false;
1263 }
1264 
1265 static bool l2cap_check_efs(struct l2cap_chan *chan)
1266 {
1267 	/* Check EFS parameters */
1268 	return true;
1269 }
1270 
1271 void l2cap_send_conn_req(struct l2cap_chan *chan)
1272 {
1273 	struct l2cap_conn *conn = chan->conn;
1274 	struct l2cap_conn_req req;
1275 
1276 	req.scid = cpu_to_le16(chan->scid);
1277 	req.psm  = chan->psm;
1278 
1279 	chan->ident = l2cap_get_ident(conn);
1280 
1281 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1282 
1283 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1284 }
1285 
1286 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1287 {
1288 	struct l2cap_create_chan_req req;
1289 	req.scid = cpu_to_le16(chan->scid);
1290 	req.psm  = chan->psm;
1291 	req.amp_id = amp_id;
1292 
1293 	chan->ident = l2cap_get_ident(chan->conn);
1294 
1295 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1296 		       sizeof(req), &req);
1297 }
1298 
1299 static void l2cap_move_setup(struct l2cap_chan *chan)
1300 {
1301 	struct sk_buff *skb;
1302 
1303 	BT_DBG("chan %p", chan);
1304 
1305 	if (chan->mode != L2CAP_MODE_ERTM)
1306 		return;
1307 
1308 	__clear_retrans_timer(chan);
1309 	__clear_monitor_timer(chan);
1310 	__clear_ack_timer(chan);
1311 
1312 	chan->retry_count = 0;
1313 	skb_queue_walk(&chan->tx_q, skb) {
1314 		if (bt_cb(skb)->l2cap.retries)
1315 			bt_cb(skb)->l2cap.retries = 1;
1316 		else
1317 			break;
1318 	}
1319 
1320 	chan->expected_tx_seq = chan->buffer_seq;
1321 
1322 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1323 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1324 	l2cap_seq_list_clear(&chan->retrans_list);
1325 	l2cap_seq_list_clear(&chan->srej_list);
1326 	skb_queue_purge(&chan->srej_q);
1327 
1328 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1329 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1330 
1331 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1332 }
1333 
1334 static void l2cap_move_done(struct l2cap_chan *chan)
1335 {
1336 	u8 move_role = chan->move_role;
1337 	BT_DBG("chan %p", chan);
1338 
1339 	chan->move_state = L2CAP_MOVE_STABLE;
1340 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1341 
1342 	if (chan->mode != L2CAP_MODE_ERTM)
1343 		return;
1344 
1345 	switch (move_role) {
1346 	case L2CAP_MOVE_ROLE_INITIATOR:
1347 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1348 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1349 		break;
1350 	case L2CAP_MOVE_ROLE_RESPONDER:
1351 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1352 		break;
1353 	}
1354 }
1355 
1356 static void l2cap_chan_ready(struct l2cap_chan *chan)
1357 {
1358 	/* The channel may have already been flagged as connected in
1359 	 * case of receiving data before the L2CAP info req/rsp
1360 	 * procedure is complete.
1361 	 */
1362 	if (chan->state == BT_CONNECTED)
1363 		return;
1364 
1365 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1366 	chan->conf_state = 0;
1367 	__clear_chan_timer(chan);
1368 
1369 	switch (chan->mode) {
1370 	case L2CAP_MODE_LE_FLOWCTL:
1371 	case L2CAP_MODE_EXT_FLOWCTL:
1372 		if (!chan->tx_credits)
1373 			chan->ops->suspend(chan);
1374 		break;
1375 	}
1376 
1377 	chan->state = BT_CONNECTED;
1378 
1379 	chan->ops->ready(chan);
1380 }
1381 
1382 static void l2cap_le_connect(struct l2cap_chan *chan)
1383 {
1384 	struct l2cap_conn *conn = chan->conn;
1385 	struct l2cap_le_conn_req req;
1386 
1387 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1388 		return;
1389 
1390 	if (!chan->imtu)
1391 		chan->imtu = chan->conn->mtu;
1392 
1393 	l2cap_le_flowctl_init(chan, 0);
1394 
1395 	memset(&req, 0, sizeof(req));
1396 	req.psm     = chan->psm;
1397 	req.scid    = cpu_to_le16(chan->scid);
1398 	req.mtu     = cpu_to_le16(chan->imtu);
1399 	req.mps     = cpu_to_le16(chan->mps);
1400 	req.credits = cpu_to_le16(chan->rx_credits);
1401 
1402 	chan->ident = l2cap_get_ident(conn);
1403 
1404 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1405 		       sizeof(req), &req);
1406 }
1407 
1408 struct l2cap_ecred_conn_data {
1409 	struct {
1410 		struct l2cap_ecred_conn_req req;
1411 		__le16 scid[5];
1412 	} __packed pdu;
1413 	struct l2cap_chan *chan;
1414 	struct pid *pid;
1415 	int count;
1416 };
1417 
1418 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1419 {
1420 	struct l2cap_ecred_conn_data *conn = data;
1421 	struct pid *pid;
1422 
1423 	if (chan == conn->chan)
1424 		return;
1425 
1426 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1427 		return;
1428 
1429 	pid = chan->ops->get_peer_pid(chan);
1430 
1431 	/* Only add deferred channels with the same PID/PSM */
1432 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1433 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1434 		return;
1435 
1436 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1437 		return;
1438 
1439 	l2cap_ecred_init(chan, 0);
1440 
1441 	/* Set the same ident so we can match on the rsp */
1442 	chan->ident = conn->chan->ident;
1443 
1444 	/* Include all channels deferred */
1445 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1446 
1447 	conn->count++;
1448 }
1449 
1450 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1451 {
1452 	struct l2cap_conn *conn = chan->conn;
1453 	struct l2cap_ecred_conn_data data;
1454 
1455 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1456 		return;
1457 
1458 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1459 		return;
1460 
1461 	l2cap_ecred_init(chan, 0);
1462 
1463 	memset(&data, 0, sizeof(data));
1464 	data.pdu.req.psm     = chan->psm;
1465 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1466 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1467 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1468 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1469 
1470 	chan->ident = l2cap_get_ident(conn);
1471 
1472 	data.count = 1;
1473 	data.chan = chan;
1474 	data.pid = chan->ops->get_peer_pid(chan);
1475 
1476 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1477 
1478 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1479 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1480 		       &data.pdu);
1481 }
1482 
1483 static void l2cap_le_start(struct l2cap_chan *chan)
1484 {
1485 	struct l2cap_conn *conn = chan->conn;
1486 
1487 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1488 		return;
1489 
1490 	if (!chan->psm) {
1491 		l2cap_chan_ready(chan);
1492 		return;
1493 	}
1494 
1495 	if (chan->state == BT_CONNECT) {
1496 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1497 			l2cap_ecred_connect(chan);
1498 		else
1499 			l2cap_le_connect(chan);
1500 	}
1501 }
1502 
1503 static void l2cap_start_connection(struct l2cap_chan *chan)
1504 {
1505 	if (__amp_capable(chan)) {
1506 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1507 		a2mp_discover_amp(chan);
1508 	} else if (chan->conn->hcon->type == LE_LINK) {
1509 		l2cap_le_start(chan);
1510 	} else {
1511 		l2cap_send_conn_req(chan);
1512 	}
1513 }
1514 
1515 static void l2cap_request_info(struct l2cap_conn *conn)
1516 {
1517 	struct l2cap_info_req req;
1518 
1519 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1520 		return;
1521 
1522 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1523 
1524 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1525 	conn->info_ident = l2cap_get_ident(conn);
1526 
1527 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1528 
1529 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1530 		       sizeof(req), &req);
1531 }
1532 
1533 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1534 {
1535 	/* The minimum encryption key size needs to be enforced by the
1536 	 * host stack before establishing any L2CAP connections. The
1537 	 * specification in theory allows a minimum of 1, but to align
1538 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1539 	 *
1540 	 * This check might also be called for unencrypted connections
1541 	 * that have no key size requirements. Ensure that the link is
1542 	 * actually encrypted before enforcing a key size.
1543 	 */
1544 	int min_key_size = hcon->hdev->min_enc_key_size;
1545 
1546 	/* On FIPS security level, key size must be 16 bytes */
1547 	if (hcon->sec_level == BT_SECURITY_FIPS)
1548 		min_key_size = 16;
1549 
1550 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1551 		hcon->enc_key_size >= min_key_size);
1552 }
1553 
1554 static void l2cap_do_start(struct l2cap_chan *chan)
1555 {
1556 	struct l2cap_conn *conn = chan->conn;
1557 
1558 	if (conn->hcon->type == LE_LINK) {
1559 		l2cap_le_start(chan);
1560 		return;
1561 	}
1562 
1563 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1564 		l2cap_request_info(conn);
1565 		return;
1566 	}
1567 
1568 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1569 		return;
1570 
1571 	if (!l2cap_chan_check_security(chan, true) ||
1572 	    !__l2cap_no_conn_pending(chan))
1573 		return;
1574 
1575 	if (l2cap_check_enc_key_size(conn->hcon))
1576 		l2cap_start_connection(chan);
1577 	else
1578 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1579 }
1580 
1581 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1582 {
1583 	u32 local_feat_mask = l2cap_feat_mask;
1584 	if (!disable_ertm)
1585 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1586 
1587 	switch (mode) {
1588 	case L2CAP_MODE_ERTM:
1589 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1590 	case L2CAP_MODE_STREAMING:
1591 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1592 	default:
1593 		return 0x00;
1594 	}
1595 }
1596 
1597 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1598 {
1599 	struct l2cap_conn *conn = chan->conn;
1600 	struct l2cap_disconn_req req;
1601 
1602 	if (!conn)
1603 		return;
1604 
1605 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1606 		__clear_retrans_timer(chan);
1607 		__clear_monitor_timer(chan);
1608 		__clear_ack_timer(chan);
1609 	}
1610 
1611 	if (chan->scid == L2CAP_CID_A2MP) {
1612 		l2cap_state_change(chan, BT_DISCONN);
1613 		return;
1614 	}
1615 
1616 	req.dcid = cpu_to_le16(chan->dcid);
1617 	req.scid = cpu_to_le16(chan->scid);
1618 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1619 		       sizeof(req), &req);
1620 
1621 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1622 }
1623 
1624 /* ---- L2CAP connections ---- */
1625 static void l2cap_conn_start(struct l2cap_conn *conn)
1626 {
1627 	struct l2cap_chan *chan, *tmp;
1628 
1629 	BT_DBG("conn %p", conn);
1630 
1631 	mutex_lock(&conn->chan_lock);
1632 
1633 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1634 		l2cap_chan_lock(chan);
1635 
1636 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1637 			l2cap_chan_ready(chan);
1638 			l2cap_chan_unlock(chan);
1639 			continue;
1640 		}
1641 
1642 		if (chan->state == BT_CONNECT) {
1643 			if (!l2cap_chan_check_security(chan, true) ||
1644 			    !__l2cap_no_conn_pending(chan)) {
1645 				l2cap_chan_unlock(chan);
1646 				continue;
1647 			}
1648 
1649 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1650 			    && test_bit(CONF_STATE2_DEVICE,
1651 					&chan->conf_state)) {
1652 				l2cap_chan_close(chan, ECONNRESET);
1653 				l2cap_chan_unlock(chan);
1654 				continue;
1655 			}
1656 
1657 			if (l2cap_check_enc_key_size(conn->hcon))
1658 				l2cap_start_connection(chan);
1659 			else
1660 				l2cap_chan_close(chan, ECONNREFUSED);
1661 
1662 		} else if (chan->state == BT_CONNECT2) {
1663 			struct l2cap_conn_rsp rsp;
1664 			char buf[128];
1665 			rsp.scid = cpu_to_le16(chan->dcid);
1666 			rsp.dcid = cpu_to_le16(chan->scid);
1667 
1668 			if (l2cap_chan_check_security(chan, false)) {
1669 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1670 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1671 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1672 					chan->ops->defer(chan);
1673 
1674 				} else {
1675 					l2cap_state_change(chan, BT_CONFIG);
1676 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1677 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1678 				}
1679 			} else {
1680 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1681 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1682 			}
1683 
1684 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1685 				       sizeof(rsp), &rsp);
1686 
1687 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1688 			    rsp.result != L2CAP_CR_SUCCESS) {
1689 				l2cap_chan_unlock(chan);
1690 				continue;
1691 			}
1692 
1693 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1694 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1695 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1696 			chan->num_conf_req++;
1697 		}
1698 
1699 		l2cap_chan_unlock(chan);
1700 	}
1701 
1702 	mutex_unlock(&conn->chan_lock);
1703 }
1704 
1705 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1706 {
1707 	struct hci_conn *hcon = conn->hcon;
1708 	struct hci_dev *hdev = hcon->hdev;
1709 
1710 	BT_DBG("%s conn %p", hdev->name, conn);
1711 
1712 	/* For outgoing pairing which doesn't necessarily have an
1713 	 * associated socket (e.g. mgmt_pair_device).
1714 	 */
1715 	if (hcon->out)
1716 		smp_conn_security(hcon, hcon->pending_sec_level);
1717 
1718 	/* For LE peripheral connections, make sure the connection interval
1719 	 * is in the range of the minimum and maximum interval that has
1720 	 * been configured for this connection. If not, then trigger
1721 	 * the connection update procedure.
1722 	 */
1723 	if (hcon->role == HCI_ROLE_SLAVE &&
1724 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1725 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1726 		struct l2cap_conn_param_update_req req;
1727 
1728 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1729 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1730 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1731 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1732 
1733 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1734 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1735 	}
1736 }
1737 
1738 static void l2cap_conn_ready(struct l2cap_conn *conn)
1739 {
1740 	struct l2cap_chan *chan;
1741 	struct hci_conn *hcon = conn->hcon;
1742 
1743 	BT_DBG("conn %p", conn);
1744 
1745 	if (hcon->type == ACL_LINK)
1746 		l2cap_request_info(conn);
1747 
1748 	mutex_lock(&conn->chan_lock);
1749 
1750 	list_for_each_entry(chan, &conn->chan_l, list) {
1751 
1752 		l2cap_chan_lock(chan);
1753 
1754 		if (chan->scid == L2CAP_CID_A2MP) {
1755 			l2cap_chan_unlock(chan);
1756 			continue;
1757 		}
1758 
1759 		if (hcon->type == LE_LINK) {
1760 			l2cap_le_start(chan);
1761 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1762 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1763 				l2cap_chan_ready(chan);
1764 		} else if (chan->state == BT_CONNECT) {
1765 			l2cap_do_start(chan);
1766 		}
1767 
1768 		l2cap_chan_unlock(chan);
1769 	}
1770 
1771 	mutex_unlock(&conn->chan_lock);
1772 
1773 	if (hcon->type == LE_LINK)
1774 		l2cap_le_conn_ready(conn);
1775 
1776 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1777 }
1778 
1779 /* Notify sockets that we cannot guaranty reliability anymore */
1780 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1781 {
1782 	struct l2cap_chan *chan;
1783 
1784 	BT_DBG("conn %p", conn);
1785 
1786 	mutex_lock(&conn->chan_lock);
1787 
1788 	list_for_each_entry(chan, &conn->chan_l, list) {
1789 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1790 			l2cap_chan_set_err(chan, err);
1791 	}
1792 
1793 	mutex_unlock(&conn->chan_lock);
1794 }
1795 
1796 static void l2cap_info_timeout(struct work_struct *work)
1797 {
1798 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1799 					       info_timer.work);
1800 
1801 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1802 	conn->info_ident = 0;
1803 
1804 	l2cap_conn_start(conn);
1805 }
1806 
1807 /*
1808  * l2cap_user
1809  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1810  * callback is called during registration. The ->remove callback is called
1811  * during unregistration.
1812  * An l2cap_user object can either be explicitly unregistered or when the
1813  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1814  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1815  * External modules must own a reference to the l2cap_conn object if they intend
1816  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1817  * any time if they don't.
1818  */
1819 
1820 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1821 {
1822 	struct hci_dev *hdev = conn->hcon->hdev;
1823 	int ret;
1824 
1825 	/* We need to check whether l2cap_conn is registered. If it is not, we
1826 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1827 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1828 	 * relies on the parent hci_conn object to be locked. This itself relies
1829 	 * on the hci_dev object to be locked. So we must lock the hci device
1830 	 * here, too. */
1831 
1832 	hci_dev_lock(hdev);
1833 
1834 	if (!list_empty(&user->list)) {
1835 		ret = -EINVAL;
1836 		goto out_unlock;
1837 	}
1838 
1839 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1840 	if (!conn->hchan) {
1841 		ret = -ENODEV;
1842 		goto out_unlock;
1843 	}
1844 
1845 	ret = user->probe(conn, user);
1846 	if (ret)
1847 		goto out_unlock;
1848 
1849 	list_add(&user->list, &conn->users);
1850 	ret = 0;
1851 
1852 out_unlock:
1853 	hci_dev_unlock(hdev);
1854 	return ret;
1855 }
1856 EXPORT_SYMBOL(l2cap_register_user);
1857 
1858 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1859 {
1860 	struct hci_dev *hdev = conn->hcon->hdev;
1861 
1862 	hci_dev_lock(hdev);
1863 
1864 	if (list_empty(&user->list))
1865 		goto out_unlock;
1866 
1867 	list_del_init(&user->list);
1868 	user->remove(conn, user);
1869 
1870 out_unlock:
1871 	hci_dev_unlock(hdev);
1872 }
1873 EXPORT_SYMBOL(l2cap_unregister_user);
1874 
1875 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1876 {
1877 	struct l2cap_user *user;
1878 
1879 	while (!list_empty(&conn->users)) {
1880 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1881 		list_del_init(&user->list);
1882 		user->remove(conn, user);
1883 	}
1884 }
1885 
1886 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1887 {
1888 	struct l2cap_conn *conn = hcon->l2cap_data;
1889 	struct l2cap_chan *chan, *l;
1890 
1891 	if (!conn)
1892 		return;
1893 
1894 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1895 
1896 	kfree_skb(conn->rx_skb);
1897 
1898 	skb_queue_purge(&conn->pending_rx);
1899 
1900 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1901 	 * might block if we are running on a worker from the same workqueue
1902 	 * pending_rx_work is waiting on.
1903 	 */
1904 	if (work_pending(&conn->pending_rx_work))
1905 		cancel_work_sync(&conn->pending_rx_work);
1906 
1907 	if (work_pending(&conn->id_addr_update_work))
1908 		cancel_work_sync(&conn->id_addr_update_work);
1909 
1910 	l2cap_unregister_all_users(conn);
1911 
1912 	/* Force the connection to be immediately dropped */
1913 	hcon->disc_timeout = 0;
1914 
1915 	mutex_lock(&conn->chan_lock);
1916 
1917 	/* Kill channels */
1918 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1919 		l2cap_chan_hold(chan);
1920 		l2cap_chan_lock(chan);
1921 
1922 		l2cap_chan_del(chan, err);
1923 
1924 		chan->ops->close(chan);
1925 
1926 		l2cap_chan_unlock(chan);
1927 		l2cap_chan_put(chan);
1928 	}
1929 
1930 	mutex_unlock(&conn->chan_lock);
1931 
1932 	hci_chan_del(conn->hchan);
1933 
1934 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1935 		cancel_delayed_work_sync(&conn->info_timer);
1936 
1937 	hcon->l2cap_data = NULL;
1938 	conn->hchan = NULL;
1939 	l2cap_conn_put(conn);
1940 }
1941 
1942 static void l2cap_conn_free(struct kref *ref)
1943 {
1944 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1945 
1946 	hci_conn_put(conn->hcon);
1947 	kfree(conn);
1948 }
1949 
1950 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1951 {
1952 	kref_get(&conn->ref);
1953 	return conn;
1954 }
1955 EXPORT_SYMBOL(l2cap_conn_get);
1956 
1957 void l2cap_conn_put(struct l2cap_conn *conn)
1958 {
1959 	kref_put(&conn->ref, l2cap_conn_free);
1960 }
1961 EXPORT_SYMBOL(l2cap_conn_put);
1962 
1963 /* ---- Socket interface ---- */
1964 
1965 /* Find socket with psm and source / destination bdaddr.
1966  * Returns closest match.
1967  */
1968 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1969 						   bdaddr_t *src,
1970 						   bdaddr_t *dst,
1971 						   u8 link_type)
1972 {
1973 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1974 
1975 	read_lock(&chan_list_lock);
1976 
1977 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1978 		if (state && c->state != state)
1979 			continue;
1980 
1981 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1982 			continue;
1983 
1984 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1985 			continue;
1986 
1987 		if (c->psm == psm) {
1988 			int src_match, dst_match;
1989 			int src_any, dst_any;
1990 
1991 			/* Exact match. */
1992 			src_match = !bacmp(&c->src, src);
1993 			dst_match = !bacmp(&c->dst, dst);
1994 			if (src_match && dst_match) {
1995 				c = l2cap_chan_hold_unless_zero(c);
1996 				if (c) {
1997 					read_unlock(&chan_list_lock);
1998 					return c;
1999 				}
2000 			}
2001 
2002 			/* Closest match */
2003 			src_any = !bacmp(&c->src, BDADDR_ANY);
2004 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
2005 			if ((src_match && dst_any) || (src_any && dst_match) ||
2006 			    (src_any && dst_any))
2007 				c1 = c;
2008 		}
2009 	}
2010 
2011 	if (c1)
2012 		c1 = l2cap_chan_hold_unless_zero(c1);
2013 
2014 	read_unlock(&chan_list_lock);
2015 
2016 	return c1;
2017 }
2018 
2019 static void l2cap_monitor_timeout(struct work_struct *work)
2020 {
2021 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2022 					       monitor_timer.work);
2023 
2024 	BT_DBG("chan %p", chan);
2025 
2026 	l2cap_chan_lock(chan);
2027 
2028 	if (!chan->conn) {
2029 		l2cap_chan_unlock(chan);
2030 		l2cap_chan_put(chan);
2031 		return;
2032 	}
2033 
2034 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2035 
2036 	l2cap_chan_unlock(chan);
2037 	l2cap_chan_put(chan);
2038 }
2039 
2040 static void l2cap_retrans_timeout(struct work_struct *work)
2041 {
2042 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2043 					       retrans_timer.work);
2044 
2045 	BT_DBG("chan %p", chan);
2046 
2047 	l2cap_chan_lock(chan);
2048 
2049 	if (!chan->conn) {
2050 		l2cap_chan_unlock(chan);
2051 		l2cap_chan_put(chan);
2052 		return;
2053 	}
2054 
2055 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2056 	l2cap_chan_unlock(chan);
2057 	l2cap_chan_put(chan);
2058 }
2059 
2060 static void l2cap_streaming_send(struct l2cap_chan *chan,
2061 				 struct sk_buff_head *skbs)
2062 {
2063 	struct sk_buff *skb;
2064 	struct l2cap_ctrl *control;
2065 
2066 	BT_DBG("chan %p, skbs %p", chan, skbs);
2067 
2068 	if (__chan_is_moving(chan))
2069 		return;
2070 
2071 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2072 
2073 	while (!skb_queue_empty(&chan->tx_q)) {
2074 
2075 		skb = skb_dequeue(&chan->tx_q);
2076 
2077 		bt_cb(skb)->l2cap.retries = 1;
2078 		control = &bt_cb(skb)->l2cap;
2079 
2080 		control->reqseq = 0;
2081 		control->txseq = chan->next_tx_seq;
2082 
2083 		__pack_control(chan, control, skb);
2084 
2085 		if (chan->fcs == L2CAP_FCS_CRC16) {
2086 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2087 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2088 		}
2089 
2090 		l2cap_do_send(chan, skb);
2091 
2092 		BT_DBG("Sent txseq %u", control->txseq);
2093 
2094 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2095 		chan->frames_sent++;
2096 	}
2097 }
2098 
2099 static int l2cap_ertm_send(struct l2cap_chan *chan)
2100 {
2101 	struct sk_buff *skb, *tx_skb;
2102 	struct l2cap_ctrl *control;
2103 	int sent = 0;
2104 
2105 	BT_DBG("chan %p", chan);
2106 
2107 	if (chan->state != BT_CONNECTED)
2108 		return -ENOTCONN;
2109 
2110 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2111 		return 0;
2112 
2113 	if (__chan_is_moving(chan))
2114 		return 0;
2115 
2116 	while (chan->tx_send_head &&
2117 	       chan->unacked_frames < chan->remote_tx_win &&
2118 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2119 
2120 		skb = chan->tx_send_head;
2121 
2122 		bt_cb(skb)->l2cap.retries = 1;
2123 		control = &bt_cb(skb)->l2cap;
2124 
2125 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2126 			control->final = 1;
2127 
2128 		control->reqseq = chan->buffer_seq;
2129 		chan->last_acked_seq = chan->buffer_seq;
2130 		control->txseq = chan->next_tx_seq;
2131 
2132 		__pack_control(chan, control, skb);
2133 
2134 		if (chan->fcs == L2CAP_FCS_CRC16) {
2135 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2136 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2137 		}
2138 
2139 		/* Clone after data has been modified. Data is assumed to be
2140 		   read-only (for locking purposes) on cloned sk_buffs.
2141 		 */
2142 		tx_skb = skb_clone(skb, GFP_KERNEL);
2143 
2144 		if (!tx_skb)
2145 			break;
2146 
2147 		__set_retrans_timer(chan);
2148 
2149 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2150 		chan->unacked_frames++;
2151 		chan->frames_sent++;
2152 		sent++;
2153 
2154 		if (skb_queue_is_last(&chan->tx_q, skb))
2155 			chan->tx_send_head = NULL;
2156 		else
2157 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2158 
2159 		l2cap_do_send(chan, tx_skb);
2160 		BT_DBG("Sent txseq %u", control->txseq);
2161 	}
2162 
2163 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2164 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2165 
2166 	return sent;
2167 }
2168 
2169 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2170 {
2171 	struct l2cap_ctrl control;
2172 	struct sk_buff *skb;
2173 	struct sk_buff *tx_skb;
2174 	u16 seq;
2175 
2176 	BT_DBG("chan %p", chan);
2177 
2178 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2179 		return;
2180 
2181 	if (__chan_is_moving(chan))
2182 		return;
2183 
2184 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2185 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2186 
2187 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2188 		if (!skb) {
2189 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2190 			       seq);
2191 			continue;
2192 		}
2193 
2194 		bt_cb(skb)->l2cap.retries++;
2195 		control = bt_cb(skb)->l2cap;
2196 
2197 		if (chan->max_tx != 0 &&
2198 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2199 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2200 			l2cap_send_disconn_req(chan, ECONNRESET);
2201 			l2cap_seq_list_clear(&chan->retrans_list);
2202 			break;
2203 		}
2204 
2205 		control.reqseq = chan->buffer_seq;
2206 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2207 			control.final = 1;
2208 		else
2209 			control.final = 0;
2210 
2211 		if (skb_cloned(skb)) {
2212 			/* Cloned sk_buffs are read-only, so we need a
2213 			 * writeable copy
2214 			 */
2215 			tx_skb = skb_copy(skb, GFP_KERNEL);
2216 		} else {
2217 			tx_skb = skb_clone(skb, GFP_KERNEL);
2218 		}
2219 
2220 		if (!tx_skb) {
2221 			l2cap_seq_list_clear(&chan->retrans_list);
2222 			break;
2223 		}
2224 
2225 		/* Update skb contents */
2226 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2227 			put_unaligned_le32(__pack_extended_control(&control),
2228 					   tx_skb->data + L2CAP_HDR_SIZE);
2229 		} else {
2230 			put_unaligned_le16(__pack_enhanced_control(&control),
2231 					   tx_skb->data + L2CAP_HDR_SIZE);
2232 		}
2233 
2234 		/* Update FCS */
2235 		if (chan->fcs == L2CAP_FCS_CRC16) {
2236 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2237 					tx_skb->len - L2CAP_FCS_SIZE);
2238 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2239 						L2CAP_FCS_SIZE);
2240 		}
2241 
2242 		l2cap_do_send(chan, tx_skb);
2243 
2244 		BT_DBG("Resent txseq %d", control.txseq);
2245 
2246 		chan->last_acked_seq = chan->buffer_seq;
2247 	}
2248 }
2249 
2250 static void l2cap_retransmit(struct l2cap_chan *chan,
2251 			     struct l2cap_ctrl *control)
2252 {
2253 	BT_DBG("chan %p, control %p", chan, control);
2254 
2255 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2256 	l2cap_ertm_resend(chan);
2257 }
2258 
2259 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2260 				 struct l2cap_ctrl *control)
2261 {
2262 	struct sk_buff *skb;
2263 
2264 	BT_DBG("chan %p, control %p", chan, control);
2265 
2266 	if (control->poll)
2267 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2268 
2269 	l2cap_seq_list_clear(&chan->retrans_list);
2270 
2271 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2272 		return;
2273 
2274 	if (chan->unacked_frames) {
2275 		skb_queue_walk(&chan->tx_q, skb) {
2276 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2277 			    skb == chan->tx_send_head)
2278 				break;
2279 		}
2280 
2281 		skb_queue_walk_from(&chan->tx_q, skb) {
2282 			if (skb == chan->tx_send_head)
2283 				break;
2284 
2285 			l2cap_seq_list_append(&chan->retrans_list,
2286 					      bt_cb(skb)->l2cap.txseq);
2287 		}
2288 
2289 		l2cap_ertm_resend(chan);
2290 	}
2291 }
2292 
2293 static void l2cap_send_ack(struct l2cap_chan *chan)
2294 {
2295 	struct l2cap_ctrl control;
2296 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2297 					 chan->last_acked_seq);
2298 	int threshold;
2299 
2300 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2301 	       chan, chan->last_acked_seq, chan->buffer_seq);
2302 
2303 	memset(&control, 0, sizeof(control));
2304 	control.sframe = 1;
2305 
2306 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2307 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2308 		__clear_ack_timer(chan);
2309 		control.super = L2CAP_SUPER_RNR;
2310 		control.reqseq = chan->buffer_seq;
2311 		l2cap_send_sframe(chan, &control);
2312 	} else {
2313 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2314 			l2cap_ertm_send(chan);
2315 			/* If any i-frames were sent, they included an ack */
2316 			if (chan->buffer_seq == chan->last_acked_seq)
2317 				frames_to_ack = 0;
2318 		}
2319 
2320 		/* Ack now if the window is 3/4ths full.
2321 		 * Calculate without mul or div
2322 		 */
2323 		threshold = chan->ack_win;
2324 		threshold += threshold << 1;
2325 		threshold >>= 2;
2326 
2327 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2328 		       threshold);
2329 
2330 		if (frames_to_ack >= threshold) {
2331 			__clear_ack_timer(chan);
2332 			control.super = L2CAP_SUPER_RR;
2333 			control.reqseq = chan->buffer_seq;
2334 			l2cap_send_sframe(chan, &control);
2335 			frames_to_ack = 0;
2336 		}
2337 
2338 		if (frames_to_ack)
2339 			__set_ack_timer(chan);
2340 	}
2341 }
2342 
2343 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2344 					 struct msghdr *msg, int len,
2345 					 int count, struct sk_buff *skb)
2346 {
2347 	struct l2cap_conn *conn = chan->conn;
2348 	struct sk_buff **frag;
2349 	int sent = 0;
2350 
2351 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2352 		return -EFAULT;
2353 
2354 	sent += count;
2355 	len  -= count;
2356 
2357 	/* Continuation fragments (no L2CAP header) */
2358 	frag = &skb_shinfo(skb)->frag_list;
2359 	while (len) {
2360 		struct sk_buff *tmp;
2361 
2362 		count = min_t(unsigned int, conn->mtu, len);
2363 
2364 		tmp = chan->ops->alloc_skb(chan, 0, count,
2365 					   msg->msg_flags & MSG_DONTWAIT);
2366 		if (IS_ERR(tmp))
2367 			return PTR_ERR(tmp);
2368 
2369 		*frag = tmp;
2370 
2371 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2372 				   &msg->msg_iter))
2373 			return -EFAULT;
2374 
2375 		sent += count;
2376 		len  -= count;
2377 
2378 		skb->len += (*frag)->len;
2379 		skb->data_len += (*frag)->len;
2380 
2381 		frag = &(*frag)->next;
2382 	}
2383 
2384 	return sent;
2385 }
2386 
2387 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2388 						 struct msghdr *msg, size_t len)
2389 {
2390 	struct l2cap_conn *conn = chan->conn;
2391 	struct sk_buff *skb;
2392 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2393 	struct l2cap_hdr *lh;
2394 
2395 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2396 	       __le16_to_cpu(chan->psm), len);
2397 
2398 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2399 
2400 	skb = chan->ops->alloc_skb(chan, hlen, count,
2401 				   msg->msg_flags & MSG_DONTWAIT);
2402 	if (IS_ERR(skb))
2403 		return skb;
2404 
2405 	/* Create L2CAP header */
2406 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2407 	lh->cid = cpu_to_le16(chan->dcid);
2408 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2409 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2410 
2411 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2412 	if (unlikely(err < 0)) {
2413 		kfree_skb(skb);
2414 		return ERR_PTR(err);
2415 	}
2416 	return skb;
2417 }
2418 
2419 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2420 					      struct msghdr *msg, size_t len)
2421 {
2422 	struct l2cap_conn *conn = chan->conn;
2423 	struct sk_buff *skb;
2424 	int err, count;
2425 	struct l2cap_hdr *lh;
2426 
2427 	BT_DBG("chan %p len %zu", chan, len);
2428 
2429 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2430 
2431 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2432 				   msg->msg_flags & MSG_DONTWAIT);
2433 	if (IS_ERR(skb))
2434 		return skb;
2435 
2436 	/* Create L2CAP header */
2437 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2438 	lh->cid = cpu_to_le16(chan->dcid);
2439 	lh->len = cpu_to_le16(len);
2440 
2441 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2442 	if (unlikely(err < 0)) {
2443 		kfree_skb(skb);
2444 		return ERR_PTR(err);
2445 	}
2446 	return skb;
2447 }
2448 
2449 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2450 					       struct msghdr *msg, size_t len,
2451 					       u16 sdulen)
2452 {
2453 	struct l2cap_conn *conn = chan->conn;
2454 	struct sk_buff *skb;
2455 	int err, count, hlen;
2456 	struct l2cap_hdr *lh;
2457 
2458 	BT_DBG("chan %p len %zu", chan, len);
2459 
2460 	if (!conn)
2461 		return ERR_PTR(-ENOTCONN);
2462 
2463 	hlen = __ertm_hdr_size(chan);
2464 
2465 	if (sdulen)
2466 		hlen += L2CAP_SDULEN_SIZE;
2467 
2468 	if (chan->fcs == L2CAP_FCS_CRC16)
2469 		hlen += L2CAP_FCS_SIZE;
2470 
2471 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2472 
2473 	skb = chan->ops->alloc_skb(chan, hlen, count,
2474 				   msg->msg_flags & MSG_DONTWAIT);
2475 	if (IS_ERR(skb))
2476 		return skb;
2477 
2478 	/* Create L2CAP header */
2479 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2480 	lh->cid = cpu_to_le16(chan->dcid);
2481 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2482 
2483 	/* Control header is populated later */
2484 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2485 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2486 	else
2487 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2488 
2489 	if (sdulen)
2490 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2491 
2492 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2493 	if (unlikely(err < 0)) {
2494 		kfree_skb(skb);
2495 		return ERR_PTR(err);
2496 	}
2497 
2498 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2499 	bt_cb(skb)->l2cap.retries = 0;
2500 	return skb;
2501 }
2502 
2503 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2504 			     struct sk_buff_head *seg_queue,
2505 			     struct msghdr *msg, size_t len)
2506 {
2507 	struct sk_buff *skb;
2508 	u16 sdu_len;
2509 	size_t pdu_len;
2510 	u8 sar;
2511 
2512 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2513 
2514 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2515 	 * so fragmented skbs are not used.  The HCI layer's handling
2516 	 * of fragmented skbs is not compatible with ERTM's queueing.
2517 	 */
2518 
2519 	/* PDU size is derived from the HCI MTU */
2520 	pdu_len = chan->conn->mtu;
2521 
2522 	/* Constrain PDU size for BR/EDR connections */
2523 	if (!chan->hs_hcon)
2524 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2525 
2526 	/* Adjust for largest possible L2CAP overhead. */
2527 	if (chan->fcs)
2528 		pdu_len -= L2CAP_FCS_SIZE;
2529 
2530 	pdu_len -= __ertm_hdr_size(chan);
2531 
2532 	/* Remote device may have requested smaller PDUs */
2533 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2534 
2535 	if (len <= pdu_len) {
2536 		sar = L2CAP_SAR_UNSEGMENTED;
2537 		sdu_len = 0;
2538 		pdu_len = len;
2539 	} else {
2540 		sar = L2CAP_SAR_START;
2541 		sdu_len = len;
2542 	}
2543 
2544 	while (len > 0) {
2545 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2546 
2547 		if (IS_ERR(skb)) {
2548 			__skb_queue_purge(seg_queue);
2549 			return PTR_ERR(skb);
2550 		}
2551 
2552 		bt_cb(skb)->l2cap.sar = sar;
2553 		__skb_queue_tail(seg_queue, skb);
2554 
2555 		len -= pdu_len;
2556 		if (sdu_len)
2557 			sdu_len = 0;
2558 
2559 		if (len <= pdu_len) {
2560 			sar = L2CAP_SAR_END;
2561 			pdu_len = len;
2562 		} else {
2563 			sar = L2CAP_SAR_CONTINUE;
2564 		}
2565 	}
2566 
2567 	return 0;
2568 }
2569 
2570 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2571 						   struct msghdr *msg,
2572 						   size_t len, u16 sdulen)
2573 {
2574 	struct l2cap_conn *conn = chan->conn;
2575 	struct sk_buff *skb;
2576 	int err, count, hlen;
2577 	struct l2cap_hdr *lh;
2578 
2579 	BT_DBG("chan %p len %zu", chan, len);
2580 
2581 	if (!conn)
2582 		return ERR_PTR(-ENOTCONN);
2583 
2584 	hlen = L2CAP_HDR_SIZE;
2585 
2586 	if (sdulen)
2587 		hlen += L2CAP_SDULEN_SIZE;
2588 
2589 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2590 
2591 	skb = chan->ops->alloc_skb(chan, hlen, count,
2592 				   msg->msg_flags & MSG_DONTWAIT);
2593 	if (IS_ERR(skb))
2594 		return skb;
2595 
2596 	/* Create L2CAP header */
2597 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2598 	lh->cid = cpu_to_le16(chan->dcid);
2599 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2600 
2601 	if (sdulen)
2602 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2603 
2604 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2605 	if (unlikely(err < 0)) {
2606 		kfree_skb(skb);
2607 		return ERR_PTR(err);
2608 	}
2609 
2610 	return skb;
2611 }
2612 
2613 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2614 				struct sk_buff_head *seg_queue,
2615 				struct msghdr *msg, size_t len)
2616 {
2617 	struct sk_buff *skb;
2618 	size_t pdu_len;
2619 	u16 sdu_len;
2620 
2621 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2622 
2623 	sdu_len = len;
2624 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2625 
2626 	while (len > 0) {
2627 		if (len <= pdu_len)
2628 			pdu_len = len;
2629 
2630 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2631 		if (IS_ERR(skb)) {
2632 			__skb_queue_purge(seg_queue);
2633 			return PTR_ERR(skb);
2634 		}
2635 
2636 		__skb_queue_tail(seg_queue, skb);
2637 
2638 		len -= pdu_len;
2639 
2640 		if (sdu_len) {
2641 			sdu_len = 0;
2642 			pdu_len += L2CAP_SDULEN_SIZE;
2643 		}
2644 	}
2645 
2646 	return 0;
2647 }
2648 
2649 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2650 {
2651 	int sent = 0;
2652 
2653 	BT_DBG("chan %p", chan);
2654 
2655 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2656 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2657 		chan->tx_credits--;
2658 		sent++;
2659 	}
2660 
2661 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2662 	       skb_queue_len(&chan->tx_q));
2663 }
2664 
2665 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2666 {
2667 	struct sk_buff *skb;
2668 	int err;
2669 	struct sk_buff_head seg_queue;
2670 
2671 	if (!chan->conn)
2672 		return -ENOTCONN;
2673 
2674 	/* Connectionless channel */
2675 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2676 		skb = l2cap_create_connless_pdu(chan, msg, len);
2677 		if (IS_ERR(skb))
2678 			return PTR_ERR(skb);
2679 
2680 		/* Channel lock is released before requesting new skb and then
2681 		 * reacquired thus we need to recheck channel state.
2682 		 */
2683 		if (chan->state != BT_CONNECTED) {
2684 			kfree_skb(skb);
2685 			return -ENOTCONN;
2686 		}
2687 
2688 		l2cap_do_send(chan, skb);
2689 		return len;
2690 	}
2691 
2692 	switch (chan->mode) {
2693 	case L2CAP_MODE_LE_FLOWCTL:
2694 	case L2CAP_MODE_EXT_FLOWCTL:
2695 		/* Check outgoing MTU */
2696 		if (len > chan->omtu)
2697 			return -EMSGSIZE;
2698 
2699 		__skb_queue_head_init(&seg_queue);
2700 
2701 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2702 
2703 		if (chan->state != BT_CONNECTED) {
2704 			__skb_queue_purge(&seg_queue);
2705 			err = -ENOTCONN;
2706 		}
2707 
2708 		if (err)
2709 			return err;
2710 
2711 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2712 
2713 		l2cap_le_flowctl_send(chan);
2714 
2715 		if (!chan->tx_credits)
2716 			chan->ops->suspend(chan);
2717 
2718 		err = len;
2719 
2720 		break;
2721 
2722 	case L2CAP_MODE_BASIC:
2723 		/* Check outgoing MTU */
2724 		if (len > chan->omtu)
2725 			return -EMSGSIZE;
2726 
2727 		/* Create a basic PDU */
2728 		skb = l2cap_create_basic_pdu(chan, msg, len);
2729 		if (IS_ERR(skb))
2730 			return PTR_ERR(skb);
2731 
2732 		/* Channel lock is released before requesting new skb and then
2733 		 * reacquired thus we need to recheck channel state.
2734 		 */
2735 		if (chan->state != BT_CONNECTED) {
2736 			kfree_skb(skb);
2737 			return -ENOTCONN;
2738 		}
2739 
2740 		l2cap_do_send(chan, skb);
2741 		err = len;
2742 		break;
2743 
2744 	case L2CAP_MODE_ERTM:
2745 	case L2CAP_MODE_STREAMING:
2746 		/* Check outgoing MTU */
2747 		if (len > chan->omtu) {
2748 			err = -EMSGSIZE;
2749 			break;
2750 		}
2751 
2752 		__skb_queue_head_init(&seg_queue);
2753 
2754 		/* Do segmentation before calling in to the state machine,
2755 		 * since it's possible to block while waiting for memory
2756 		 * allocation.
2757 		 */
2758 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2759 
2760 		/* The channel could have been closed while segmenting,
2761 		 * check that it is still connected.
2762 		 */
2763 		if (chan->state != BT_CONNECTED) {
2764 			__skb_queue_purge(&seg_queue);
2765 			err = -ENOTCONN;
2766 		}
2767 
2768 		if (err)
2769 			break;
2770 
2771 		if (chan->mode == L2CAP_MODE_ERTM)
2772 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2773 		else
2774 			l2cap_streaming_send(chan, &seg_queue);
2775 
2776 		err = len;
2777 
2778 		/* If the skbs were not queued for sending, they'll still be in
2779 		 * seg_queue and need to be purged.
2780 		 */
2781 		__skb_queue_purge(&seg_queue);
2782 		break;
2783 
2784 	default:
2785 		BT_DBG("bad state %1.1x", chan->mode);
2786 		err = -EBADFD;
2787 	}
2788 
2789 	return err;
2790 }
2791 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2792 
2793 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2794 {
2795 	struct l2cap_ctrl control;
2796 	u16 seq;
2797 
2798 	BT_DBG("chan %p, txseq %u", chan, txseq);
2799 
2800 	memset(&control, 0, sizeof(control));
2801 	control.sframe = 1;
2802 	control.super = L2CAP_SUPER_SREJ;
2803 
2804 	for (seq = chan->expected_tx_seq; seq != txseq;
2805 	     seq = __next_seq(chan, seq)) {
2806 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2807 			control.reqseq = seq;
2808 			l2cap_send_sframe(chan, &control);
2809 			l2cap_seq_list_append(&chan->srej_list, seq);
2810 		}
2811 	}
2812 
2813 	chan->expected_tx_seq = __next_seq(chan, txseq);
2814 }
2815 
2816 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2817 {
2818 	struct l2cap_ctrl control;
2819 
2820 	BT_DBG("chan %p", chan);
2821 
2822 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2823 		return;
2824 
2825 	memset(&control, 0, sizeof(control));
2826 	control.sframe = 1;
2827 	control.super = L2CAP_SUPER_SREJ;
2828 	control.reqseq = chan->srej_list.tail;
2829 	l2cap_send_sframe(chan, &control);
2830 }
2831 
2832 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2833 {
2834 	struct l2cap_ctrl control;
2835 	u16 initial_head;
2836 	u16 seq;
2837 
2838 	BT_DBG("chan %p, txseq %u", chan, txseq);
2839 
2840 	memset(&control, 0, sizeof(control));
2841 	control.sframe = 1;
2842 	control.super = L2CAP_SUPER_SREJ;
2843 
2844 	/* Capture initial list head to allow only one pass through the list. */
2845 	initial_head = chan->srej_list.head;
2846 
2847 	do {
2848 		seq = l2cap_seq_list_pop(&chan->srej_list);
2849 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2850 			break;
2851 
2852 		control.reqseq = seq;
2853 		l2cap_send_sframe(chan, &control);
2854 		l2cap_seq_list_append(&chan->srej_list, seq);
2855 	} while (chan->srej_list.head != initial_head);
2856 }
2857 
2858 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2859 {
2860 	struct sk_buff *acked_skb;
2861 	u16 ackseq;
2862 
2863 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2864 
2865 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2866 		return;
2867 
2868 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2869 	       chan->expected_ack_seq, chan->unacked_frames);
2870 
2871 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2872 	     ackseq = __next_seq(chan, ackseq)) {
2873 
2874 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2875 		if (acked_skb) {
2876 			skb_unlink(acked_skb, &chan->tx_q);
2877 			kfree_skb(acked_skb);
2878 			chan->unacked_frames--;
2879 		}
2880 	}
2881 
2882 	chan->expected_ack_seq = reqseq;
2883 
2884 	if (chan->unacked_frames == 0)
2885 		__clear_retrans_timer(chan);
2886 
2887 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2888 }
2889 
2890 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2891 {
2892 	BT_DBG("chan %p", chan);
2893 
2894 	chan->expected_tx_seq = chan->buffer_seq;
2895 	l2cap_seq_list_clear(&chan->srej_list);
2896 	skb_queue_purge(&chan->srej_q);
2897 	chan->rx_state = L2CAP_RX_STATE_RECV;
2898 }
2899 
2900 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2901 				struct l2cap_ctrl *control,
2902 				struct sk_buff_head *skbs, u8 event)
2903 {
2904 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2905 	       event);
2906 
2907 	switch (event) {
2908 	case L2CAP_EV_DATA_REQUEST:
2909 		if (chan->tx_send_head == NULL)
2910 			chan->tx_send_head = skb_peek(skbs);
2911 
2912 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2913 		l2cap_ertm_send(chan);
2914 		break;
2915 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2916 		BT_DBG("Enter LOCAL_BUSY");
2917 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2918 
2919 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2920 			/* The SREJ_SENT state must be aborted if we are to
2921 			 * enter the LOCAL_BUSY state.
2922 			 */
2923 			l2cap_abort_rx_srej_sent(chan);
2924 		}
2925 
2926 		l2cap_send_ack(chan);
2927 
2928 		break;
2929 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2930 		BT_DBG("Exit LOCAL_BUSY");
2931 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2932 
2933 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2934 			struct l2cap_ctrl local_control;
2935 
2936 			memset(&local_control, 0, sizeof(local_control));
2937 			local_control.sframe = 1;
2938 			local_control.super = L2CAP_SUPER_RR;
2939 			local_control.poll = 1;
2940 			local_control.reqseq = chan->buffer_seq;
2941 			l2cap_send_sframe(chan, &local_control);
2942 
2943 			chan->retry_count = 1;
2944 			__set_monitor_timer(chan);
2945 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2946 		}
2947 		break;
2948 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2949 		l2cap_process_reqseq(chan, control->reqseq);
2950 		break;
2951 	case L2CAP_EV_EXPLICIT_POLL:
2952 		l2cap_send_rr_or_rnr(chan, 1);
2953 		chan->retry_count = 1;
2954 		__set_monitor_timer(chan);
2955 		__clear_ack_timer(chan);
2956 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2957 		break;
2958 	case L2CAP_EV_RETRANS_TO:
2959 		l2cap_send_rr_or_rnr(chan, 1);
2960 		chan->retry_count = 1;
2961 		__set_monitor_timer(chan);
2962 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2963 		break;
2964 	case L2CAP_EV_RECV_FBIT:
2965 		/* Nothing to process */
2966 		break;
2967 	default:
2968 		break;
2969 	}
2970 }
2971 
2972 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2973 				  struct l2cap_ctrl *control,
2974 				  struct sk_buff_head *skbs, u8 event)
2975 {
2976 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2977 	       event);
2978 
2979 	switch (event) {
2980 	case L2CAP_EV_DATA_REQUEST:
2981 		if (chan->tx_send_head == NULL)
2982 			chan->tx_send_head = skb_peek(skbs);
2983 		/* Queue data, but don't send. */
2984 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2985 		break;
2986 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2987 		BT_DBG("Enter LOCAL_BUSY");
2988 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2989 
2990 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2991 			/* The SREJ_SENT state must be aborted if we are to
2992 			 * enter the LOCAL_BUSY state.
2993 			 */
2994 			l2cap_abort_rx_srej_sent(chan);
2995 		}
2996 
2997 		l2cap_send_ack(chan);
2998 
2999 		break;
3000 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
3001 		BT_DBG("Exit LOCAL_BUSY");
3002 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3003 
3004 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
3005 			struct l2cap_ctrl local_control;
3006 			memset(&local_control, 0, sizeof(local_control));
3007 			local_control.sframe = 1;
3008 			local_control.super = L2CAP_SUPER_RR;
3009 			local_control.poll = 1;
3010 			local_control.reqseq = chan->buffer_seq;
3011 			l2cap_send_sframe(chan, &local_control);
3012 
3013 			chan->retry_count = 1;
3014 			__set_monitor_timer(chan);
3015 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
3016 		}
3017 		break;
3018 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
3019 		l2cap_process_reqseq(chan, control->reqseq);
3020 		fallthrough;
3021 
3022 	case L2CAP_EV_RECV_FBIT:
3023 		if (control && control->final) {
3024 			__clear_monitor_timer(chan);
3025 			if (chan->unacked_frames > 0)
3026 				__set_retrans_timer(chan);
3027 			chan->retry_count = 0;
3028 			chan->tx_state = L2CAP_TX_STATE_XMIT;
3029 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3030 		}
3031 		break;
3032 	case L2CAP_EV_EXPLICIT_POLL:
3033 		/* Ignore */
3034 		break;
3035 	case L2CAP_EV_MONITOR_TO:
3036 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3037 			l2cap_send_rr_or_rnr(chan, 1);
3038 			__set_monitor_timer(chan);
3039 			chan->retry_count++;
3040 		} else {
3041 			l2cap_send_disconn_req(chan, ECONNABORTED);
3042 		}
3043 		break;
3044 	default:
3045 		break;
3046 	}
3047 }
3048 
3049 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3050 		     struct sk_buff_head *skbs, u8 event)
3051 {
3052 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3053 	       chan, control, skbs, event, chan->tx_state);
3054 
3055 	switch (chan->tx_state) {
3056 	case L2CAP_TX_STATE_XMIT:
3057 		l2cap_tx_state_xmit(chan, control, skbs, event);
3058 		break;
3059 	case L2CAP_TX_STATE_WAIT_F:
3060 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3061 		break;
3062 	default:
3063 		/* Ignore event */
3064 		break;
3065 	}
3066 }
3067 
3068 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3069 			     struct l2cap_ctrl *control)
3070 {
3071 	BT_DBG("chan %p, control %p", chan, control);
3072 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3073 }
3074 
3075 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3076 				  struct l2cap_ctrl *control)
3077 {
3078 	BT_DBG("chan %p, control %p", chan, control);
3079 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3080 }
3081 
3082 /* Copy frame to all raw sockets on that connection */
3083 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3084 {
3085 	struct sk_buff *nskb;
3086 	struct l2cap_chan *chan;
3087 
3088 	BT_DBG("conn %p", conn);
3089 
3090 	mutex_lock(&conn->chan_lock);
3091 
3092 	list_for_each_entry(chan, &conn->chan_l, list) {
3093 		if (chan->chan_type != L2CAP_CHAN_RAW)
3094 			continue;
3095 
3096 		/* Don't send frame to the channel it came from */
3097 		if (bt_cb(skb)->l2cap.chan == chan)
3098 			continue;
3099 
3100 		nskb = skb_clone(skb, GFP_KERNEL);
3101 		if (!nskb)
3102 			continue;
3103 		if (chan->ops->recv(chan, nskb))
3104 			kfree_skb(nskb);
3105 	}
3106 
3107 	mutex_unlock(&conn->chan_lock);
3108 }
3109 
3110 /* ---- L2CAP signalling commands ---- */
3111 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3112 				       u8 ident, u16 dlen, void *data)
3113 {
3114 	struct sk_buff *skb, **frag;
3115 	struct l2cap_cmd_hdr *cmd;
3116 	struct l2cap_hdr *lh;
3117 	int len, count;
3118 
3119 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3120 	       conn, code, ident, dlen);
3121 
3122 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3123 		return NULL;
3124 
3125 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3126 	count = min_t(unsigned int, conn->mtu, len);
3127 
3128 	skb = bt_skb_alloc(count, GFP_KERNEL);
3129 	if (!skb)
3130 		return NULL;
3131 
3132 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3133 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3134 
3135 	if (conn->hcon->type == LE_LINK)
3136 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3137 	else
3138 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3139 
3140 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3141 	cmd->code  = code;
3142 	cmd->ident = ident;
3143 	cmd->len   = cpu_to_le16(dlen);
3144 
3145 	if (dlen) {
3146 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3147 		skb_put_data(skb, data, count);
3148 		data += count;
3149 	}
3150 
3151 	len -= skb->len;
3152 
3153 	/* Continuation fragments (no L2CAP header) */
3154 	frag = &skb_shinfo(skb)->frag_list;
3155 	while (len) {
3156 		count = min_t(unsigned int, conn->mtu, len);
3157 
3158 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3159 		if (!*frag)
3160 			goto fail;
3161 
3162 		skb_put_data(*frag, data, count);
3163 
3164 		len  -= count;
3165 		data += count;
3166 
3167 		frag = &(*frag)->next;
3168 	}
3169 
3170 	return skb;
3171 
3172 fail:
3173 	kfree_skb(skb);
3174 	return NULL;
3175 }
3176 
3177 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3178 				     unsigned long *val)
3179 {
3180 	struct l2cap_conf_opt *opt = *ptr;
3181 	int len;
3182 
3183 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3184 	*ptr += len;
3185 
3186 	*type = opt->type;
3187 	*olen = opt->len;
3188 
3189 	switch (opt->len) {
3190 	case 1:
3191 		*val = *((u8 *) opt->val);
3192 		break;
3193 
3194 	case 2:
3195 		*val = get_unaligned_le16(opt->val);
3196 		break;
3197 
3198 	case 4:
3199 		*val = get_unaligned_le32(opt->val);
3200 		break;
3201 
3202 	default:
3203 		*val = (unsigned long) opt->val;
3204 		break;
3205 	}
3206 
3207 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3208 	return len;
3209 }
3210 
3211 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3212 {
3213 	struct l2cap_conf_opt *opt = *ptr;
3214 
3215 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3216 
3217 	if (size < L2CAP_CONF_OPT_SIZE + len)
3218 		return;
3219 
3220 	opt->type = type;
3221 	opt->len  = len;
3222 
3223 	switch (len) {
3224 	case 1:
3225 		*((u8 *) opt->val)  = val;
3226 		break;
3227 
3228 	case 2:
3229 		put_unaligned_le16(val, opt->val);
3230 		break;
3231 
3232 	case 4:
3233 		put_unaligned_le32(val, opt->val);
3234 		break;
3235 
3236 	default:
3237 		memcpy(opt->val, (void *) val, len);
3238 		break;
3239 	}
3240 
3241 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3242 }
3243 
3244 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3245 {
3246 	struct l2cap_conf_efs efs;
3247 
3248 	switch (chan->mode) {
3249 	case L2CAP_MODE_ERTM:
3250 		efs.id		= chan->local_id;
3251 		efs.stype	= chan->local_stype;
3252 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3253 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3254 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3255 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3256 		break;
3257 
3258 	case L2CAP_MODE_STREAMING:
3259 		efs.id		= 1;
3260 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3261 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3262 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3263 		efs.acc_lat	= 0;
3264 		efs.flush_to	= 0;
3265 		break;
3266 
3267 	default:
3268 		return;
3269 	}
3270 
3271 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3272 			   (unsigned long) &efs, size);
3273 }
3274 
3275 static void l2cap_ack_timeout(struct work_struct *work)
3276 {
3277 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3278 					       ack_timer.work);
3279 	u16 frames_to_ack;
3280 
3281 	BT_DBG("chan %p", chan);
3282 
3283 	l2cap_chan_lock(chan);
3284 
3285 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3286 				     chan->last_acked_seq);
3287 
3288 	if (frames_to_ack)
3289 		l2cap_send_rr_or_rnr(chan, 0);
3290 
3291 	l2cap_chan_unlock(chan);
3292 	l2cap_chan_put(chan);
3293 }
3294 
3295 int l2cap_ertm_init(struct l2cap_chan *chan)
3296 {
3297 	int err;
3298 
3299 	chan->next_tx_seq = 0;
3300 	chan->expected_tx_seq = 0;
3301 	chan->expected_ack_seq = 0;
3302 	chan->unacked_frames = 0;
3303 	chan->buffer_seq = 0;
3304 	chan->frames_sent = 0;
3305 	chan->last_acked_seq = 0;
3306 	chan->sdu = NULL;
3307 	chan->sdu_last_frag = NULL;
3308 	chan->sdu_len = 0;
3309 
3310 	skb_queue_head_init(&chan->tx_q);
3311 
3312 	chan->local_amp_id = AMP_ID_BREDR;
3313 	chan->move_id = AMP_ID_BREDR;
3314 	chan->move_state = L2CAP_MOVE_STABLE;
3315 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3316 
3317 	if (chan->mode != L2CAP_MODE_ERTM)
3318 		return 0;
3319 
3320 	chan->rx_state = L2CAP_RX_STATE_RECV;
3321 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3322 
3323 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3324 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3325 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3326 
3327 	skb_queue_head_init(&chan->srej_q);
3328 
3329 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3330 	if (err < 0)
3331 		return err;
3332 
3333 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3334 	if (err < 0)
3335 		l2cap_seq_list_free(&chan->srej_list);
3336 
3337 	return err;
3338 }
3339 
3340 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3341 {
3342 	switch (mode) {
3343 	case L2CAP_MODE_STREAMING:
3344 	case L2CAP_MODE_ERTM:
3345 		if (l2cap_mode_supported(mode, remote_feat_mask))
3346 			return mode;
3347 		fallthrough;
3348 	default:
3349 		return L2CAP_MODE_BASIC;
3350 	}
3351 }
3352 
3353 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3354 {
3355 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3356 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3357 }
3358 
3359 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3360 {
3361 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3362 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3363 }
3364 
3365 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3366 				      struct l2cap_conf_rfc *rfc)
3367 {
3368 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3369 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3370 
3371 		/* Class 1 devices have must have ERTM timeouts
3372 		 * exceeding the Link Supervision Timeout.  The
3373 		 * default Link Supervision Timeout for AMP
3374 		 * controllers is 10 seconds.
3375 		 *
3376 		 * Class 1 devices use 0xffffffff for their
3377 		 * best-effort flush timeout, so the clamping logic
3378 		 * will result in a timeout that meets the above
3379 		 * requirement.  ERTM timeouts are 16-bit values, so
3380 		 * the maximum timeout is 65.535 seconds.
3381 		 */
3382 
3383 		/* Convert timeout to milliseconds and round */
3384 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3385 
3386 		/* This is the recommended formula for class 2 devices
3387 		 * that start ERTM timers when packets are sent to the
3388 		 * controller.
3389 		 */
3390 		ertm_to = 3 * ertm_to + 500;
3391 
3392 		if (ertm_to > 0xffff)
3393 			ertm_to = 0xffff;
3394 
3395 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3396 		rfc->monitor_timeout = rfc->retrans_timeout;
3397 	} else {
3398 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3399 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3400 	}
3401 }
3402 
3403 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3404 {
3405 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3406 	    __l2cap_ews_supported(chan->conn)) {
3407 		/* use extended control field */
3408 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3409 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3410 	} else {
3411 		chan->tx_win = min_t(u16, chan->tx_win,
3412 				     L2CAP_DEFAULT_TX_WINDOW);
3413 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3414 	}
3415 	chan->ack_win = chan->tx_win;
3416 }
3417 
3418 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3419 {
3420 	struct hci_conn *conn = chan->conn->hcon;
3421 
3422 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3423 
3424 	/* The 2-DH1 packet has between 2 and 56 information bytes
3425 	 * (including the 2-byte payload header)
3426 	 */
3427 	if (!(conn->pkt_type & HCI_2DH1))
3428 		chan->imtu = 54;
3429 
3430 	/* The 3-DH1 packet has between 2 and 85 information bytes
3431 	 * (including the 2-byte payload header)
3432 	 */
3433 	if (!(conn->pkt_type & HCI_3DH1))
3434 		chan->imtu = 83;
3435 
3436 	/* The 2-DH3 packet has between 2 and 369 information bytes
3437 	 * (including the 2-byte payload header)
3438 	 */
3439 	if (!(conn->pkt_type & HCI_2DH3))
3440 		chan->imtu = 367;
3441 
3442 	/* The 3-DH3 packet has between 2 and 554 information bytes
3443 	 * (including the 2-byte payload header)
3444 	 */
3445 	if (!(conn->pkt_type & HCI_3DH3))
3446 		chan->imtu = 552;
3447 
3448 	/* The 2-DH5 packet has between 2 and 681 information bytes
3449 	 * (including the 2-byte payload header)
3450 	 */
3451 	if (!(conn->pkt_type & HCI_2DH5))
3452 		chan->imtu = 679;
3453 
3454 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3455 	 * (including the 2-byte payload header)
3456 	 */
3457 	if (!(conn->pkt_type & HCI_3DH5))
3458 		chan->imtu = 1021;
3459 }
3460 
3461 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3462 {
3463 	struct l2cap_conf_req *req = data;
3464 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3465 	void *ptr = req->data;
3466 	void *endptr = data + data_size;
3467 	u16 size;
3468 
3469 	BT_DBG("chan %p", chan);
3470 
3471 	if (chan->num_conf_req || chan->num_conf_rsp)
3472 		goto done;
3473 
3474 	switch (chan->mode) {
3475 	case L2CAP_MODE_STREAMING:
3476 	case L2CAP_MODE_ERTM:
3477 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3478 			break;
3479 
3480 		if (__l2cap_efs_supported(chan->conn))
3481 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3482 
3483 		fallthrough;
3484 	default:
3485 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3486 		break;
3487 	}
3488 
3489 done:
3490 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3491 		if (!chan->imtu)
3492 			l2cap_mtu_auto(chan);
3493 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3494 				   endptr - ptr);
3495 	}
3496 
3497 	switch (chan->mode) {
3498 	case L2CAP_MODE_BASIC:
3499 		if (disable_ertm)
3500 			break;
3501 
3502 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3503 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3504 			break;
3505 
3506 		rfc.mode            = L2CAP_MODE_BASIC;
3507 		rfc.txwin_size      = 0;
3508 		rfc.max_transmit    = 0;
3509 		rfc.retrans_timeout = 0;
3510 		rfc.monitor_timeout = 0;
3511 		rfc.max_pdu_size    = 0;
3512 
3513 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3514 				   (unsigned long) &rfc, endptr - ptr);
3515 		break;
3516 
3517 	case L2CAP_MODE_ERTM:
3518 		rfc.mode            = L2CAP_MODE_ERTM;
3519 		rfc.max_transmit    = chan->max_tx;
3520 
3521 		__l2cap_set_ertm_timeouts(chan, &rfc);
3522 
3523 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3524 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3525 			     L2CAP_FCS_SIZE);
3526 		rfc.max_pdu_size = cpu_to_le16(size);
3527 
3528 		l2cap_txwin_setup(chan);
3529 
3530 		rfc.txwin_size = min_t(u16, chan->tx_win,
3531 				       L2CAP_DEFAULT_TX_WINDOW);
3532 
3533 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3534 				   (unsigned long) &rfc, endptr - ptr);
3535 
3536 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3537 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3538 
3539 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3540 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3541 					   chan->tx_win, endptr - ptr);
3542 
3543 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3544 			if (chan->fcs == L2CAP_FCS_NONE ||
3545 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3546 				chan->fcs = L2CAP_FCS_NONE;
3547 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3548 						   chan->fcs, endptr - ptr);
3549 			}
3550 		break;
3551 
3552 	case L2CAP_MODE_STREAMING:
3553 		l2cap_txwin_setup(chan);
3554 		rfc.mode            = L2CAP_MODE_STREAMING;
3555 		rfc.txwin_size      = 0;
3556 		rfc.max_transmit    = 0;
3557 		rfc.retrans_timeout = 0;
3558 		rfc.monitor_timeout = 0;
3559 
3560 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3561 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3562 			     L2CAP_FCS_SIZE);
3563 		rfc.max_pdu_size = cpu_to_le16(size);
3564 
3565 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3566 				   (unsigned long) &rfc, endptr - ptr);
3567 
3568 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3569 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3570 
3571 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3572 			if (chan->fcs == L2CAP_FCS_NONE ||
3573 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3574 				chan->fcs = L2CAP_FCS_NONE;
3575 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3576 						   chan->fcs, endptr - ptr);
3577 			}
3578 		break;
3579 	}
3580 
3581 	req->dcid  = cpu_to_le16(chan->dcid);
3582 	req->flags = cpu_to_le16(0);
3583 
3584 	return ptr - data;
3585 }
3586 
3587 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3588 {
3589 	struct l2cap_conf_rsp *rsp = data;
3590 	void *ptr = rsp->data;
3591 	void *endptr = data + data_size;
3592 	void *req = chan->conf_req;
3593 	int len = chan->conf_len;
3594 	int type, hint, olen;
3595 	unsigned long val;
3596 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3597 	struct l2cap_conf_efs efs;
3598 	u8 remote_efs = 0;
3599 	u16 mtu = L2CAP_DEFAULT_MTU;
3600 	u16 result = L2CAP_CONF_SUCCESS;
3601 	u16 size;
3602 
3603 	BT_DBG("chan %p", chan);
3604 
3605 	while (len >= L2CAP_CONF_OPT_SIZE) {
3606 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3607 		if (len < 0)
3608 			break;
3609 
3610 		hint  = type & L2CAP_CONF_HINT;
3611 		type &= L2CAP_CONF_MASK;
3612 
3613 		switch (type) {
3614 		case L2CAP_CONF_MTU:
3615 			if (olen != 2)
3616 				break;
3617 			mtu = val;
3618 			break;
3619 
3620 		case L2CAP_CONF_FLUSH_TO:
3621 			if (olen != 2)
3622 				break;
3623 			chan->flush_to = val;
3624 			break;
3625 
3626 		case L2CAP_CONF_QOS:
3627 			break;
3628 
3629 		case L2CAP_CONF_RFC:
3630 			if (olen != sizeof(rfc))
3631 				break;
3632 			memcpy(&rfc, (void *) val, olen);
3633 			break;
3634 
3635 		case L2CAP_CONF_FCS:
3636 			if (olen != 1)
3637 				break;
3638 			if (val == L2CAP_FCS_NONE)
3639 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3640 			break;
3641 
3642 		case L2CAP_CONF_EFS:
3643 			if (olen != sizeof(efs))
3644 				break;
3645 			remote_efs = 1;
3646 			memcpy(&efs, (void *) val, olen);
3647 			break;
3648 
3649 		case L2CAP_CONF_EWS:
3650 			if (olen != 2)
3651 				break;
3652 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3653 				return -ECONNREFUSED;
3654 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3655 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3656 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3657 			chan->remote_tx_win = val;
3658 			break;
3659 
3660 		default:
3661 			if (hint)
3662 				break;
3663 			result = L2CAP_CONF_UNKNOWN;
3664 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3665 			break;
3666 		}
3667 	}
3668 
3669 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3670 		goto done;
3671 
3672 	switch (chan->mode) {
3673 	case L2CAP_MODE_STREAMING:
3674 	case L2CAP_MODE_ERTM:
3675 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3676 			chan->mode = l2cap_select_mode(rfc.mode,
3677 						       chan->conn->feat_mask);
3678 			break;
3679 		}
3680 
3681 		if (remote_efs) {
3682 			if (__l2cap_efs_supported(chan->conn))
3683 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3684 			else
3685 				return -ECONNREFUSED;
3686 		}
3687 
3688 		if (chan->mode != rfc.mode)
3689 			return -ECONNREFUSED;
3690 
3691 		break;
3692 	}
3693 
3694 done:
3695 	if (chan->mode != rfc.mode) {
3696 		result = L2CAP_CONF_UNACCEPT;
3697 		rfc.mode = chan->mode;
3698 
3699 		if (chan->num_conf_rsp == 1)
3700 			return -ECONNREFUSED;
3701 
3702 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3703 				   (unsigned long) &rfc, endptr - ptr);
3704 	}
3705 
3706 	if (result == L2CAP_CONF_SUCCESS) {
3707 		/* Configure output options and let the other side know
3708 		 * which ones we don't like. */
3709 
3710 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3711 			result = L2CAP_CONF_UNACCEPT;
3712 		else {
3713 			chan->omtu = mtu;
3714 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3715 		}
3716 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3717 
3718 		if (remote_efs) {
3719 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3720 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3721 			    efs.stype != chan->local_stype) {
3722 
3723 				result = L2CAP_CONF_UNACCEPT;
3724 
3725 				if (chan->num_conf_req >= 1)
3726 					return -ECONNREFUSED;
3727 
3728 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3729 						   sizeof(efs),
3730 						   (unsigned long) &efs, endptr - ptr);
3731 			} else {
3732 				/* Send PENDING Conf Rsp */
3733 				result = L2CAP_CONF_PENDING;
3734 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3735 			}
3736 		}
3737 
3738 		switch (rfc.mode) {
3739 		case L2CAP_MODE_BASIC:
3740 			chan->fcs = L2CAP_FCS_NONE;
3741 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3742 			break;
3743 
3744 		case L2CAP_MODE_ERTM:
3745 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3746 				chan->remote_tx_win = rfc.txwin_size;
3747 			else
3748 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3749 
3750 			chan->remote_max_tx = rfc.max_transmit;
3751 
3752 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3753 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3754 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3755 			rfc.max_pdu_size = cpu_to_le16(size);
3756 			chan->remote_mps = size;
3757 
3758 			__l2cap_set_ertm_timeouts(chan, &rfc);
3759 
3760 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3761 
3762 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3763 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3764 
3765 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3766 				chan->remote_id = efs.id;
3767 				chan->remote_stype = efs.stype;
3768 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3769 				chan->remote_flush_to =
3770 					le32_to_cpu(efs.flush_to);
3771 				chan->remote_acc_lat =
3772 					le32_to_cpu(efs.acc_lat);
3773 				chan->remote_sdu_itime =
3774 					le32_to_cpu(efs.sdu_itime);
3775 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3776 						   sizeof(efs),
3777 						   (unsigned long) &efs, endptr - ptr);
3778 			}
3779 			break;
3780 
3781 		case L2CAP_MODE_STREAMING:
3782 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3783 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3784 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3785 			rfc.max_pdu_size = cpu_to_le16(size);
3786 			chan->remote_mps = size;
3787 
3788 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3789 
3790 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3791 					   (unsigned long) &rfc, endptr - ptr);
3792 
3793 			break;
3794 
3795 		default:
3796 			result = L2CAP_CONF_UNACCEPT;
3797 
3798 			memset(&rfc, 0, sizeof(rfc));
3799 			rfc.mode = chan->mode;
3800 		}
3801 
3802 		if (result == L2CAP_CONF_SUCCESS)
3803 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3804 	}
3805 	rsp->scid   = cpu_to_le16(chan->dcid);
3806 	rsp->result = cpu_to_le16(result);
3807 	rsp->flags  = cpu_to_le16(0);
3808 
3809 	return ptr - data;
3810 }
3811 
3812 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3813 				void *data, size_t size, u16 *result)
3814 {
3815 	struct l2cap_conf_req *req = data;
3816 	void *ptr = req->data;
3817 	void *endptr = data + size;
3818 	int type, olen;
3819 	unsigned long val;
3820 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3821 	struct l2cap_conf_efs efs;
3822 
3823 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3824 
3825 	while (len >= L2CAP_CONF_OPT_SIZE) {
3826 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3827 		if (len < 0)
3828 			break;
3829 
3830 		switch (type) {
3831 		case L2CAP_CONF_MTU:
3832 			if (olen != 2)
3833 				break;
3834 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3835 				*result = L2CAP_CONF_UNACCEPT;
3836 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3837 			} else
3838 				chan->imtu = val;
3839 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3840 					   endptr - ptr);
3841 			break;
3842 
3843 		case L2CAP_CONF_FLUSH_TO:
3844 			if (olen != 2)
3845 				break;
3846 			chan->flush_to = val;
3847 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3848 					   chan->flush_to, endptr - ptr);
3849 			break;
3850 
3851 		case L2CAP_CONF_RFC:
3852 			if (olen != sizeof(rfc))
3853 				break;
3854 			memcpy(&rfc, (void *)val, olen);
3855 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3856 			    rfc.mode != chan->mode)
3857 				return -ECONNREFUSED;
3858 			chan->fcs = 0;
3859 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3860 					   (unsigned long) &rfc, endptr - ptr);
3861 			break;
3862 
3863 		case L2CAP_CONF_EWS:
3864 			if (olen != 2)
3865 				break;
3866 			chan->ack_win = min_t(u16, val, chan->ack_win);
3867 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3868 					   chan->tx_win, endptr - ptr);
3869 			break;
3870 
3871 		case L2CAP_CONF_EFS:
3872 			if (olen != sizeof(efs))
3873 				break;
3874 			memcpy(&efs, (void *)val, olen);
3875 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3876 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3877 			    efs.stype != chan->local_stype)
3878 				return -ECONNREFUSED;
3879 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3880 					   (unsigned long) &efs, endptr - ptr);
3881 			break;
3882 
3883 		case L2CAP_CONF_FCS:
3884 			if (olen != 1)
3885 				break;
3886 			if (*result == L2CAP_CONF_PENDING)
3887 				if (val == L2CAP_FCS_NONE)
3888 					set_bit(CONF_RECV_NO_FCS,
3889 						&chan->conf_state);
3890 			break;
3891 		}
3892 	}
3893 
3894 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3895 		return -ECONNREFUSED;
3896 
3897 	chan->mode = rfc.mode;
3898 
3899 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3900 		switch (rfc.mode) {
3901 		case L2CAP_MODE_ERTM:
3902 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3903 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3904 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3905 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3906 				chan->ack_win = min_t(u16, chan->ack_win,
3907 						      rfc.txwin_size);
3908 
3909 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3910 				chan->local_msdu = le16_to_cpu(efs.msdu);
3911 				chan->local_sdu_itime =
3912 					le32_to_cpu(efs.sdu_itime);
3913 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3914 				chan->local_flush_to =
3915 					le32_to_cpu(efs.flush_to);
3916 			}
3917 			break;
3918 
3919 		case L2CAP_MODE_STREAMING:
3920 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3921 		}
3922 	}
3923 
3924 	req->dcid   = cpu_to_le16(chan->dcid);
3925 	req->flags  = cpu_to_le16(0);
3926 
3927 	return ptr - data;
3928 }
3929 
3930 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3931 				u16 result, u16 flags)
3932 {
3933 	struct l2cap_conf_rsp *rsp = data;
3934 	void *ptr = rsp->data;
3935 
3936 	BT_DBG("chan %p", chan);
3937 
3938 	rsp->scid   = cpu_to_le16(chan->dcid);
3939 	rsp->result = cpu_to_le16(result);
3940 	rsp->flags  = cpu_to_le16(flags);
3941 
3942 	return ptr - data;
3943 }
3944 
3945 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3946 {
3947 	struct l2cap_le_conn_rsp rsp;
3948 	struct l2cap_conn *conn = chan->conn;
3949 
3950 	BT_DBG("chan %p", chan);
3951 
3952 	rsp.dcid    = cpu_to_le16(chan->scid);
3953 	rsp.mtu     = cpu_to_le16(chan->imtu);
3954 	rsp.mps     = cpu_to_le16(chan->mps);
3955 	rsp.credits = cpu_to_le16(chan->rx_credits);
3956 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3957 
3958 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3959 		       &rsp);
3960 }
3961 
3962 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3963 {
3964 	struct {
3965 		struct l2cap_ecred_conn_rsp rsp;
3966 		__le16 dcid[5];
3967 	} __packed pdu;
3968 	struct l2cap_conn *conn = chan->conn;
3969 	u16 ident = chan->ident;
3970 	int i = 0;
3971 
3972 	if (!ident)
3973 		return;
3974 
3975 	BT_DBG("chan %p ident %d", chan, ident);
3976 
3977 	pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3978 	pdu.rsp.mps     = cpu_to_le16(chan->mps);
3979 	pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3980 	pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3981 
3982 	mutex_lock(&conn->chan_lock);
3983 
3984 	list_for_each_entry(chan, &conn->chan_l, list) {
3985 		if (chan->ident != ident)
3986 			continue;
3987 
3988 		/* Reset ident so only one response is sent */
3989 		chan->ident = 0;
3990 
3991 		/* Include all channels pending with the same ident */
3992 		pdu.dcid[i++] = cpu_to_le16(chan->scid);
3993 	}
3994 
3995 	mutex_unlock(&conn->chan_lock);
3996 
3997 	l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
3998 			sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
3999 }
4000 
4001 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4002 {
4003 	struct l2cap_conn_rsp rsp;
4004 	struct l2cap_conn *conn = chan->conn;
4005 	u8 buf[128];
4006 	u8 rsp_code;
4007 
4008 	rsp.scid   = cpu_to_le16(chan->dcid);
4009 	rsp.dcid   = cpu_to_le16(chan->scid);
4010 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4011 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4012 
4013 	if (chan->hs_hcon)
4014 		rsp_code = L2CAP_CREATE_CHAN_RSP;
4015 	else
4016 		rsp_code = L2CAP_CONN_RSP;
4017 
4018 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4019 
4020 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4021 
4022 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4023 		return;
4024 
4025 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4026 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4027 	chan->num_conf_req++;
4028 }
4029 
4030 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4031 {
4032 	int type, olen;
4033 	unsigned long val;
4034 	/* Use sane default values in case a misbehaving remote device
4035 	 * did not send an RFC or extended window size option.
4036 	 */
4037 	u16 txwin_ext = chan->ack_win;
4038 	struct l2cap_conf_rfc rfc = {
4039 		.mode = chan->mode,
4040 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4041 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4042 		.max_pdu_size = cpu_to_le16(chan->imtu),
4043 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4044 	};
4045 
4046 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4047 
4048 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4049 		return;
4050 
4051 	while (len >= L2CAP_CONF_OPT_SIZE) {
4052 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4053 		if (len < 0)
4054 			break;
4055 
4056 		switch (type) {
4057 		case L2CAP_CONF_RFC:
4058 			if (olen != sizeof(rfc))
4059 				break;
4060 			memcpy(&rfc, (void *)val, olen);
4061 			break;
4062 		case L2CAP_CONF_EWS:
4063 			if (olen != 2)
4064 				break;
4065 			txwin_ext = val;
4066 			break;
4067 		}
4068 	}
4069 
4070 	switch (rfc.mode) {
4071 	case L2CAP_MODE_ERTM:
4072 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4073 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4074 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4075 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4076 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4077 		else
4078 			chan->ack_win = min_t(u16, chan->ack_win,
4079 					      rfc.txwin_size);
4080 		break;
4081 	case L2CAP_MODE_STREAMING:
4082 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4083 	}
4084 }
4085 
4086 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4087 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4088 				    u8 *data)
4089 {
4090 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4091 
4092 	if (cmd_len < sizeof(*rej))
4093 		return -EPROTO;
4094 
4095 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4096 		return 0;
4097 
4098 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4099 	    cmd->ident == conn->info_ident) {
4100 		cancel_delayed_work(&conn->info_timer);
4101 
4102 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4103 		conn->info_ident = 0;
4104 
4105 		l2cap_conn_start(conn);
4106 	}
4107 
4108 	return 0;
4109 }
4110 
4111 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4112 					struct l2cap_cmd_hdr *cmd,
4113 					u8 *data, u8 rsp_code, u8 amp_id)
4114 {
4115 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4116 	struct l2cap_conn_rsp rsp;
4117 	struct l2cap_chan *chan = NULL, *pchan;
4118 	int result, status = L2CAP_CS_NO_INFO;
4119 
4120 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4121 	__le16 psm = req->psm;
4122 
4123 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4124 
4125 	/* Check if we have socket listening on psm */
4126 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4127 					 &conn->hcon->dst, ACL_LINK);
4128 	if (!pchan) {
4129 		result = L2CAP_CR_BAD_PSM;
4130 		goto sendresp;
4131 	}
4132 
4133 	mutex_lock(&conn->chan_lock);
4134 	l2cap_chan_lock(pchan);
4135 
4136 	/* Check if the ACL is secure enough (if not SDP) */
4137 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4138 	    !hci_conn_check_link_mode(conn->hcon)) {
4139 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4140 		result = L2CAP_CR_SEC_BLOCK;
4141 		goto response;
4142 	}
4143 
4144 	result = L2CAP_CR_NO_MEM;
4145 
4146 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4147 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4148 		result = L2CAP_CR_INVALID_SCID;
4149 		goto response;
4150 	}
4151 
4152 	/* Check if we already have channel with that dcid */
4153 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4154 		result = L2CAP_CR_SCID_IN_USE;
4155 		goto response;
4156 	}
4157 
4158 	chan = pchan->ops->new_connection(pchan);
4159 	if (!chan)
4160 		goto response;
4161 
4162 	/* For certain devices (ex: HID mouse), support for authentication,
4163 	 * pairing and bonding is optional. For such devices, inorder to avoid
4164 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4165 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4166 	 */
4167 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4168 
4169 	bacpy(&chan->src, &conn->hcon->src);
4170 	bacpy(&chan->dst, &conn->hcon->dst);
4171 	chan->src_type = bdaddr_src_type(conn->hcon);
4172 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4173 	chan->psm  = psm;
4174 	chan->dcid = scid;
4175 	chan->local_amp_id = amp_id;
4176 
4177 	__l2cap_chan_add(conn, chan);
4178 
4179 	dcid = chan->scid;
4180 
4181 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4182 
4183 	chan->ident = cmd->ident;
4184 
4185 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4186 		if (l2cap_chan_check_security(chan, false)) {
4187 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4188 				l2cap_state_change(chan, BT_CONNECT2);
4189 				result = L2CAP_CR_PEND;
4190 				status = L2CAP_CS_AUTHOR_PEND;
4191 				chan->ops->defer(chan);
4192 			} else {
4193 				/* Force pending result for AMP controllers.
4194 				 * The connection will succeed after the
4195 				 * physical link is up.
4196 				 */
4197 				if (amp_id == AMP_ID_BREDR) {
4198 					l2cap_state_change(chan, BT_CONFIG);
4199 					result = L2CAP_CR_SUCCESS;
4200 				} else {
4201 					l2cap_state_change(chan, BT_CONNECT2);
4202 					result = L2CAP_CR_PEND;
4203 				}
4204 				status = L2CAP_CS_NO_INFO;
4205 			}
4206 		} else {
4207 			l2cap_state_change(chan, BT_CONNECT2);
4208 			result = L2CAP_CR_PEND;
4209 			status = L2CAP_CS_AUTHEN_PEND;
4210 		}
4211 	} else {
4212 		l2cap_state_change(chan, BT_CONNECT2);
4213 		result = L2CAP_CR_PEND;
4214 		status = L2CAP_CS_NO_INFO;
4215 	}
4216 
4217 response:
4218 	l2cap_chan_unlock(pchan);
4219 	mutex_unlock(&conn->chan_lock);
4220 	l2cap_chan_put(pchan);
4221 
4222 sendresp:
4223 	rsp.scid   = cpu_to_le16(scid);
4224 	rsp.dcid   = cpu_to_le16(dcid);
4225 	rsp.result = cpu_to_le16(result);
4226 	rsp.status = cpu_to_le16(status);
4227 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4228 
4229 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4230 		struct l2cap_info_req info;
4231 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4232 
4233 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4234 		conn->info_ident = l2cap_get_ident(conn);
4235 
4236 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4237 
4238 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4239 			       sizeof(info), &info);
4240 	}
4241 
4242 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4243 	    result == L2CAP_CR_SUCCESS) {
4244 		u8 buf[128];
4245 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4246 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4247 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4248 		chan->num_conf_req++;
4249 	}
4250 
4251 	return chan;
4252 }
4253 
4254 static int l2cap_connect_req(struct l2cap_conn *conn,
4255 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4256 {
4257 	struct hci_dev *hdev = conn->hcon->hdev;
4258 	struct hci_conn *hcon = conn->hcon;
4259 
4260 	if (cmd_len < sizeof(struct l2cap_conn_req))
4261 		return -EPROTO;
4262 
4263 	hci_dev_lock(hdev);
4264 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4265 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4266 		mgmt_device_connected(hdev, hcon, NULL, 0);
4267 	hci_dev_unlock(hdev);
4268 
4269 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4270 	return 0;
4271 }
4272 
4273 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4274 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4275 				    u8 *data)
4276 {
4277 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4278 	u16 scid, dcid, result, status;
4279 	struct l2cap_chan *chan;
4280 	u8 req[128];
4281 	int err;
4282 
4283 	if (cmd_len < sizeof(*rsp))
4284 		return -EPROTO;
4285 
4286 	scid   = __le16_to_cpu(rsp->scid);
4287 	dcid   = __le16_to_cpu(rsp->dcid);
4288 	result = __le16_to_cpu(rsp->result);
4289 	status = __le16_to_cpu(rsp->status);
4290 
4291 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4292 	       dcid, scid, result, status);
4293 
4294 	mutex_lock(&conn->chan_lock);
4295 
4296 	if (scid) {
4297 		chan = __l2cap_get_chan_by_scid(conn, scid);
4298 		if (!chan) {
4299 			err = -EBADSLT;
4300 			goto unlock;
4301 		}
4302 	} else {
4303 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4304 		if (!chan) {
4305 			err = -EBADSLT;
4306 			goto unlock;
4307 		}
4308 	}
4309 
4310 	err = 0;
4311 
4312 	l2cap_chan_lock(chan);
4313 
4314 	switch (result) {
4315 	case L2CAP_CR_SUCCESS:
4316 		l2cap_state_change(chan, BT_CONFIG);
4317 		chan->ident = 0;
4318 		chan->dcid = dcid;
4319 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4320 
4321 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4322 			break;
4323 
4324 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4325 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4326 		chan->num_conf_req++;
4327 		break;
4328 
4329 	case L2CAP_CR_PEND:
4330 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4331 		break;
4332 
4333 	default:
4334 		l2cap_chan_del(chan, ECONNREFUSED);
4335 		break;
4336 	}
4337 
4338 	l2cap_chan_unlock(chan);
4339 
4340 unlock:
4341 	mutex_unlock(&conn->chan_lock);
4342 
4343 	return err;
4344 }
4345 
4346 static inline void set_default_fcs(struct l2cap_chan *chan)
4347 {
4348 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4349 	 * sides request it.
4350 	 */
4351 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4352 		chan->fcs = L2CAP_FCS_NONE;
4353 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4354 		chan->fcs = L2CAP_FCS_CRC16;
4355 }
4356 
4357 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4358 				    u8 ident, u16 flags)
4359 {
4360 	struct l2cap_conn *conn = chan->conn;
4361 
4362 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4363 	       flags);
4364 
4365 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4366 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4367 
4368 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4369 		       l2cap_build_conf_rsp(chan, data,
4370 					    L2CAP_CONF_SUCCESS, flags), data);
4371 }
4372 
4373 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4374 				   u16 scid, u16 dcid)
4375 {
4376 	struct l2cap_cmd_rej_cid rej;
4377 
4378 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4379 	rej.scid = __cpu_to_le16(scid);
4380 	rej.dcid = __cpu_to_le16(dcid);
4381 
4382 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4383 }
4384 
4385 static inline int l2cap_config_req(struct l2cap_conn *conn,
4386 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4387 				   u8 *data)
4388 {
4389 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4390 	u16 dcid, flags;
4391 	u8 rsp[64];
4392 	struct l2cap_chan *chan;
4393 	int len, err = 0;
4394 
4395 	if (cmd_len < sizeof(*req))
4396 		return -EPROTO;
4397 
4398 	dcid  = __le16_to_cpu(req->dcid);
4399 	flags = __le16_to_cpu(req->flags);
4400 
4401 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4402 
4403 	chan = l2cap_get_chan_by_scid(conn, dcid);
4404 	if (!chan) {
4405 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4406 		return 0;
4407 	}
4408 
4409 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4410 	    chan->state != BT_CONNECTED) {
4411 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4412 				       chan->dcid);
4413 		goto unlock;
4414 	}
4415 
4416 	/* Reject if config buffer is too small. */
4417 	len = cmd_len - sizeof(*req);
4418 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4419 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4420 			       l2cap_build_conf_rsp(chan, rsp,
4421 			       L2CAP_CONF_REJECT, flags), rsp);
4422 		goto unlock;
4423 	}
4424 
4425 	/* Store config. */
4426 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4427 	chan->conf_len += len;
4428 
4429 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4430 		/* Incomplete config. Send empty response. */
4431 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4432 			       l2cap_build_conf_rsp(chan, rsp,
4433 			       L2CAP_CONF_SUCCESS, flags), rsp);
4434 		goto unlock;
4435 	}
4436 
4437 	/* Complete config. */
4438 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4439 	if (len < 0) {
4440 		l2cap_send_disconn_req(chan, ECONNRESET);
4441 		goto unlock;
4442 	}
4443 
4444 	chan->ident = cmd->ident;
4445 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4446 	chan->num_conf_rsp++;
4447 
4448 	/* Reset config buffer. */
4449 	chan->conf_len = 0;
4450 
4451 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4452 		goto unlock;
4453 
4454 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4455 		set_default_fcs(chan);
4456 
4457 		if (chan->mode == L2CAP_MODE_ERTM ||
4458 		    chan->mode == L2CAP_MODE_STREAMING)
4459 			err = l2cap_ertm_init(chan);
4460 
4461 		if (err < 0)
4462 			l2cap_send_disconn_req(chan, -err);
4463 		else
4464 			l2cap_chan_ready(chan);
4465 
4466 		goto unlock;
4467 	}
4468 
4469 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4470 		u8 buf[64];
4471 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4472 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4473 		chan->num_conf_req++;
4474 	}
4475 
4476 	/* Got Conf Rsp PENDING from remote side and assume we sent
4477 	   Conf Rsp PENDING in the code above */
4478 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4479 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4480 
4481 		/* check compatibility */
4482 
4483 		/* Send rsp for BR/EDR channel */
4484 		if (!chan->hs_hcon)
4485 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4486 		else
4487 			chan->ident = cmd->ident;
4488 	}
4489 
4490 unlock:
4491 	l2cap_chan_unlock(chan);
4492 	l2cap_chan_put(chan);
4493 	return err;
4494 }
4495 
4496 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4497 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4498 				   u8 *data)
4499 {
4500 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4501 	u16 scid, flags, result;
4502 	struct l2cap_chan *chan;
4503 	int len = cmd_len - sizeof(*rsp);
4504 	int err = 0;
4505 
4506 	if (cmd_len < sizeof(*rsp))
4507 		return -EPROTO;
4508 
4509 	scid   = __le16_to_cpu(rsp->scid);
4510 	flags  = __le16_to_cpu(rsp->flags);
4511 	result = __le16_to_cpu(rsp->result);
4512 
4513 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4514 	       result, len);
4515 
4516 	chan = l2cap_get_chan_by_scid(conn, scid);
4517 	if (!chan)
4518 		return 0;
4519 
4520 	switch (result) {
4521 	case L2CAP_CONF_SUCCESS:
4522 		l2cap_conf_rfc_get(chan, rsp->data, len);
4523 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4524 		break;
4525 
4526 	case L2CAP_CONF_PENDING:
4527 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4528 
4529 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4530 			char buf[64];
4531 
4532 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4533 						   buf, sizeof(buf), &result);
4534 			if (len < 0) {
4535 				l2cap_send_disconn_req(chan, ECONNRESET);
4536 				goto done;
4537 			}
4538 
4539 			if (!chan->hs_hcon) {
4540 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4541 							0);
4542 			} else {
4543 				if (l2cap_check_efs(chan)) {
4544 					amp_create_logical_link(chan);
4545 					chan->ident = cmd->ident;
4546 				}
4547 			}
4548 		}
4549 		goto done;
4550 
4551 	case L2CAP_CONF_UNKNOWN:
4552 	case L2CAP_CONF_UNACCEPT:
4553 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4554 			char req[64];
4555 
4556 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4557 				l2cap_send_disconn_req(chan, ECONNRESET);
4558 				goto done;
4559 			}
4560 
4561 			/* throw out any old stored conf requests */
4562 			result = L2CAP_CONF_SUCCESS;
4563 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4564 						   req, sizeof(req), &result);
4565 			if (len < 0) {
4566 				l2cap_send_disconn_req(chan, ECONNRESET);
4567 				goto done;
4568 			}
4569 
4570 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4571 				       L2CAP_CONF_REQ, len, req);
4572 			chan->num_conf_req++;
4573 			if (result != L2CAP_CONF_SUCCESS)
4574 				goto done;
4575 			break;
4576 		}
4577 		fallthrough;
4578 
4579 	default:
4580 		l2cap_chan_set_err(chan, ECONNRESET);
4581 
4582 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4583 		l2cap_send_disconn_req(chan, ECONNRESET);
4584 		goto done;
4585 	}
4586 
4587 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4588 		goto done;
4589 
4590 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4591 
4592 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4593 		set_default_fcs(chan);
4594 
4595 		if (chan->mode == L2CAP_MODE_ERTM ||
4596 		    chan->mode == L2CAP_MODE_STREAMING)
4597 			err = l2cap_ertm_init(chan);
4598 
4599 		if (err < 0)
4600 			l2cap_send_disconn_req(chan, -err);
4601 		else
4602 			l2cap_chan_ready(chan);
4603 	}
4604 
4605 done:
4606 	l2cap_chan_unlock(chan);
4607 	l2cap_chan_put(chan);
4608 	return err;
4609 }
4610 
4611 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4612 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4613 				       u8 *data)
4614 {
4615 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4616 	struct l2cap_disconn_rsp rsp;
4617 	u16 dcid, scid;
4618 	struct l2cap_chan *chan;
4619 
4620 	if (cmd_len != sizeof(*req))
4621 		return -EPROTO;
4622 
4623 	scid = __le16_to_cpu(req->scid);
4624 	dcid = __le16_to_cpu(req->dcid);
4625 
4626 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4627 
4628 	mutex_lock(&conn->chan_lock);
4629 
4630 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4631 	if (!chan) {
4632 		mutex_unlock(&conn->chan_lock);
4633 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4634 		return 0;
4635 	}
4636 
4637 	l2cap_chan_hold(chan);
4638 	l2cap_chan_lock(chan);
4639 
4640 	rsp.dcid = cpu_to_le16(chan->scid);
4641 	rsp.scid = cpu_to_le16(chan->dcid);
4642 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4643 
4644 	chan->ops->set_shutdown(chan);
4645 
4646 	l2cap_chan_del(chan, ECONNRESET);
4647 
4648 	chan->ops->close(chan);
4649 
4650 	l2cap_chan_unlock(chan);
4651 	l2cap_chan_put(chan);
4652 
4653 	mutex_unlock(&conn->chan_lock);
4654 
4655 	return 0;
4656 }
4657 
4658 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4659 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4660 				       u8 *data)
4661 {
4662 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4663 	u16 dcid, scid;
4664 	struct l2cap_chan *chan;
4665 
4666 	if (cmd_len != sizeof(*rsp))
4667 		return -EPROTO;
4668 
4669 	scid = __le16_to_cpu(rsp->scid);
4670 	dcid = __le16_to_cpu(rsp->dcid);
4671 
4672 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4673 
4674 	mutex_lock(&conn->chan_lock);
4675 
4676 	chan = __l2cap_get_chan_by_scid(conn, scid);
4677 	if (!chan) {
4678 		mutex_unlock(&conn->chan_lock);
4679 		return 0;
4680 	}
4681 
4682 	l2cap_chan_hold(chan);
4683 	l2cap_chan_lock(chan);
4684 
4685 	if (chan->state != BT_DISCONN) {
4686 		l2cap_chan_unlock(chan);
4687 		l2cap_chan_put(chan);
4688 		mutex_unlock(&conn->chan_lock);
4689 		return 0;
4690 	}
4691 
4692 	l2cap_chan_del(chan, 0);
4693 
4694 	chan->ops->close(chan);
4695 
4696 	l2cap_chan_unlock(chan);
4697 	l2cap_chan_put(chan);
4698 
4699 	mutex_unlock(&conn->chan_lock);
4700 
4701 	return 0;
4702 }
4703 
4704 static inline int l2cap_information_req(struct l2cap_conn *conn,
4705 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4706 					u8 *data)
4707 {
4708 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4709 	u16 type;
4710 
4711 	if (cmd_len != sizeof(*req))
4712 		return -EPROTO;
4713 
4714 	type = __le16_to_cpu(req->type);
4715 
4716 	BT_DBG("type 0x%4.4x", type);
4717 
4718 	if (type == L2CAP_IT_FEAT_MASK) {
4719 		u8 buf[8];
4720 		u32 feat_mask = l2cap_feat_mask;
4721 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4722 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4723 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4724 		if (!disable_ertm)
4725 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4726 				| L2CAP_FEAT_FCS;
4727 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4728 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4729 				| L2CAP_FEAT_EXT_WINDOW;
4730 
4731 		put_unaligned_le32(feat_mask, rsp->data);
4732 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4733 			       buf);
4734 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4735 		u8 buf[12];
4736 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4737 
4738 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4739 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4740 		rsp->data[0] = conn->local_fixed_chan;
4741 		memset(rsp->data + 1, 0, 7);
4742 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4743 			       buf);
4744 	} else {
4745 		struct l2cap_info_rsp rsp;
4746 		rsp.type   = cpu_to_le16(type);
4747 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4748 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4749 			       &rsp);
4750 	}
4751 
4752 	return 0;
4753 }
4754 
4755 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4756 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4757 					u8 *data)
4758 {
4759 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4760 	u16 type, result;
4761 
4762 	if (cmd_len < sizeof(*rsp))
4763 		return -EPROTO;
4764 
4765 	type   = __le16_to_cpu(rsp->type);
4766 	result = __le16_to_cpu(rsp->result);
4767 
4768 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4769 
4770 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4771 	if (cmd->ident != conn->info_ident ||
4772 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4773 		return 0;
4774 
4775 	cancel_delayed_work(&conn->info_timer);
4776 
4777 	if (result != L2CAP_IR_SUCCESS) {
4778 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4779 		conn->info_ident = 0;
4780 
4781 		l2cap_conn_start(conn);
4782 
4783 		return 0;
4784 	}
4785 
4786 	switch (type) {
4787 	case L2CAP_IT_FEAT_MASK:
4788 		conn->feat_mask = get_unaligned_le32(rsp->data);
4789 
4790 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4791 			struct l2cap_info_req req;
4792 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4793 
4794 			conn->info_ident = l2cap_get_ident(conn);
4795 
4796 			l2cap_send_cmd(conn, conn->info_ident,
4797 				       L2CAP_INFO_REQ, sizeof(req), &req);
4798 		} else {
4799 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4800 			conn->info_ident = 0;
4801 
4802 			l2cap_conn_start(conn);
4803 		}
4804 		break;
4805 
4806 	case L2CAP_IT_FIXED_CHAN:
4807 		conn->remote_fixed_chan = rsp->data[0];
4808 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4809 		conn->info_ident = 0;
4810 
4811 		l2cap_conn_start(conn);
4812 		break;
4813 	}
4814 
4815 	return 0;
4816 }
4817 
4818 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4819 				    struct l2cap_cmd_hdr *cmd,
4820 				    u16 cmd_len, void *data)
4821 {
4822 	struct l2cap_create_chan_req *req = data;
4823 	struct l2cap_create_chan_rsp rsp;
4824 	struct l2cap_chan *chan;
4825 	struct hci_dev *hdev;
4826 	u16 psm, scid;
4827 
4828 	if (cmd_len != sizeof(*req))
4829 		return -EPROTO;
4830 
4831 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4832 		return -EINVAL;
4833 
4834 	psm = le16_to_cpu(req->psm);
4835 	scid = le16_to_cpu(req->scid);
4836 
4837 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4838 
4839 	/* For controller id 0 make BR/EDR connection */
4840 	if (req->amp_id == AMP_ID_BREDR) {
4841 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4842 			      req->amp_id);
4843 		return 0;
4844 	}
4845 
4846 	/* Validate AMP controller id */
4847 	hdev = hci_dev_get(req->amp_id);
4848 	if (!hdev)
4849 		goto error;
4850 
4851 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4852 		hci_dev_put(hdev);
4853 		goto error;
4854 	}
4855 
4856 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4857 			     req->amp_id);
4858 	if (chan) {
4859 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4860 		struct hci_conn *hs_hcon;
4861 
4862 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4863 						  &conn->hcon->dst);
4864 		if (!hs_hcon) {
4865 			hci_dev_put(hdev);
4866 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4867 					       chan->dcid);
4868 			return 0;
4869 		}
4870 
4871 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4872 
4873 		mgr->bredr_chan = chan;
4874 		chan->hs_hcon = hs_hcon;
4875 		chan->fcs = L2CAP_FCS_NONE;
4876 		conn->mtu = hdev->block_mtu;
4877 	}
4878 
4879 	hci_dev_put(hdev);
4880 
4881 	return 0;
4882 
4883 error:
4884 	rsp.dcid = 0;
4885 	rsp.scid = cpu_to_le16(scid);
4886 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4887 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4888 
4889 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4890 		       sizeof(rsp), &rsp);
4891 
4892 	return 0;
4893 }
4894 
4895 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4896 {
4897 	struct l2cap_move_chan_req req;
4898 	u8 ident;
4899 
4900 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4901 
4902 	ident = l2cap_get_ident(chan->conn);
4903 	chan->ident = ident;
4904 
4905 	req.icid = cpu_to_le16(chan->scid);
4906 	req.dest_amp_id = dest_amp_id;
4907 
4908 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4909 		       &req);
4910 
4911 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4912 }
4913 
4914 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4915 {
4916 	struct l2cap_move_chan_rsp rsp;
4917 
4918 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4919 
4920 	rsp.icid = cpu_to_le16(chan->dcid);
4921 	rsp.result = cpu_to_le16(result);
4922 
4923 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4924 		       sizeof(rsp), &rsp);
4925 }
4926 
4927 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4928 {
4929 	struct l2cap_move_chan_cfm cfm;
4930 
4931 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4932 
4933 	chan->ident = l2cap_get_ident(chan->conn);
4934 
4935 	cfm.icid = cpu_to_le16(chan->scid);
4936 	cfm.result = cpu_to_le16(result);
4937 
4938 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4939 		       sizeof(cfm), &cfm);
4940 
4941 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4942 }
4943 
4944 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4945 {
4946 	struct l2cap_move_chan_cfm cfm;
4947 
4948 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4949 
4950 	cfm.icid = cpu_to_le16(icid);
4951 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4952 
4953 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4954 		       sizeof(cfm), &cfm);
4955 }
4956 
4957 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4958 					 u16 icid)
4959 {
4960 	struct l2cap_move_chan_cfm_rsp rsp;
4961 
4962 	BT_DBG("icid 0x%4.4x", icid);
4963 
4964 	rsp.icid = cpu_to_le16(icid);
4965 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4966 }
4967 
4968 static void __release_logical_link(struct l2cap_chan *chan)
4969 {
4970 	chan->hs_hchan = NULL;
4971 	chan->hs_hcon = NULL;
4972 
4973 	/* Placeholder - release the logical link */
4974 }
4975 
4976 static void l2cap_logical_fail(struct l2cap_chan *chan)
4977 {
4978 	/* Logical link setup failed */
4979 	if (chan->state != BT_CONNECTED) {
4980 		/* Create channel failure, disconnect */
4981 		l2cap_send_disconn_req(chan, ECONNRESET);
4982 		return;
4983 	}
4984 
4985 	switch (chan->move_role) {
4986 	case L2CAP_MOVE_ROLE_RESPONDER:
4987 		l2cap_move_done(chan);
4988 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4989 		break;
4990 	case L2CAP_MOVE_ROLE_INITIATOR:
4991 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4992 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4993 			/* Remote has only sent pending or
4994 			 * success responses, clean up
4995 			 */
4996 			l2cap_move_done(chan);
4997 		}
4998 
4999 		/* Other amp move states imply that the move
5000 		 * has already aborted
5001 		 */
5002 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5003 		break;
5004 	}
5005 }
5006 
5007 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5008 					struct hci_chan *hchan)
5009 {
5010 	struct l2cap_conf_rsp rsp;
5011 
5012 	chan->hs_hchan = hchan;
5013 	chan->hs_hcon->l2cap_data = chan->conn;
5014 
5015 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5016 
5017 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5018 		int err;
5019 
5020 		set_default_fcs(chan);
5021 
5022 		err = l2cap_ertm_init(chan);
5023 		if (err < 0)
5024 			l2cap_send_disconn_req(chan, -err);
5025 		else
5026 			l2cap_chan_ready(chan);
5027 	}
5028 }
5029 
5030 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5031 				      struct hci_chan *hchan)
5032 {
5033 	chan->hs_hcon = hchan->conn;
5034 	chan->hs_hcon->l2cap_data = chan->conn;
5035 
5036 	BT_DBG("move_state %d", chan->move_state);
5037 
5038 	switch (chan->move_state) {
5039 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5040 		/* Move confirm will be sent after a success
5041 		 * response is received
5042 		 */
5043 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5044 		break;
5045 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5046 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5047 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5048 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5049 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5050 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5051 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5052 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5053 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5054 		}
5055 		break;
5056 	default:
5057 		/* Move was not in expected state, free the channel */
5058 		__release_logical_link(chan);
5059 
5060 		chan->move_state = L2CAP_MOVE_STABLE;
5061 	}
5062 }
5063 
5064 /* Call with chan locked */
5065 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5066 		       u8 status)
5067 {
5068 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5069 
5070 	if (status) {
5071 		l2cap_logical_fail(chan);
5072 		__release_logical_link(chan);
5073 		return;
5074 	}
5075 
5076 	if (chan->state != BT_CONNECTED) {
5077 		/* Ignore logical link if channel is on BR/EDR */
5078 		if (chan->local_amp_id != AMP_ID_BREDR)
5079 			l2cap_logical_finish_create(chan, hchan);
5080 	} else {
5081 		l2cap_logical_finish_move(chan, hchan);
5082 	}
5083 }
5084 
5085 void l2cap_move_start(struct l2cap_chan *chan)
5086 {
5087 	BT_DBG("chan %p", chan);
5088 
5089 	if (chan->local_amp_id == AMP_ID_BREDR) {
5090 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5091 			return;
5092 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5093 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5094 		/* Placeholder - start physical link setup */
5095 	} else {
5096 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5097 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5098 		chan->move_id = 0;
5099 		l2cap_move_setup(chan);
5100 		l2cap_send_move_chan_req(chan, 0);
5101 	}
5102 }
5103 
5104 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5105 			    u8 local_amp_id, u8 remote_amp_id)
5106 {
5107 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5108 	       local_amp_id, remote_amp_id);
5109 
5110 	chan->fcs = L2CAP_FCS_NONE;
5111 
5112 	/* Outgoing channel on AMP */
5113 	if (chan->state == BT_CONNECT) {
5114 		if (result == L2CAP_CR_SUCCESS) {
5115 			chan->local_amp_id = local_amp_id;
5116 			l2cap_send_create_chan_req(chan, remote_amp_id);
5117 		} else {
5118 			/* Revert to BR/EDR connect */
5119 			l2cap_send_conn_req(chan);
5120 		}
5121 
5122 		return;
5123 	}
5124 
5125 	/* Incoming channel on AMP */
5126 	if (__l2cap_no_conn_pending(chan)) {
5127 		struct l2cap_conn_rsp rsp;
5128 		char buf[128];
5129 		rsp.scid = cpu_to_le16(chan->dcid);
5130 		rsp.dcid = cpu_to_le16(chan->scid);
5131 
5132 		if (result == L2CAP_CR_SUCCESS) {
5133 			/* Send successful response */
5134 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5135 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5136 		} else {
5137 			/* Send negative response */
5138 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5139 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5140 		}
5141 
5142 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5143 			       sizeof(rsp), &rsp);
5144 
5145 		if (result == L2CAP_CR_SUCCESS) {
5146 			l2cap_state_change(chan, BT_CONFIG);
5147 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5148 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5149 				       L2CAP_CONF_REQ,
5150 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5151 			chan->num_conf_req++;
5152 		}
5153 	}
5154 }
5155 
5156 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5157 				   u8 remote_amp_id)
5158 {
5159 	l2cap_move_setup(chan);
5160 	chan->move_id = local_amp_id;
5161 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5162 
5163 	l2cap_send_move_chan_req(chan, remote_amp_id);
5164 }
5165 
5166 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5167 {
5168 	struct hci_chan *hchan = NULL;
5169 
5170 	/* Placeholder - get hci_chan for logical link */
5171 
5172 	if (hchan) {
5173 		if (hchan->state == BT_CONNECTED) {
5174 			/* Logical link is ready to go */
5175 			chan->hs_hcon = hchan->conn;
5176 			chan->hs_hcon->l2cap_data = chan->conn;
5177 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5178 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5179 
5180 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5181 		} else {
5182 			/* Wait for logical link to be ready */
5183 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5184 		}
5185 	} else {
5186 		/* Logical link not available */
5187 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5188 	}
5189 }
5190 
5191 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5192 {
5193 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5194 		u8 rsp_result;
5195 		if (result == -EINVAL)
5196 			rsp_result = L2CAP_MR_BAD_ID;
5197 		else
5198 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5199 
5200 		l2cap_send_move_chan_rsp(chan, rsp_result);
5201 	}
5202 
5203 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5204 	chan->move_state = L2CAP_MOVE_STABLE;
5205 
5206 	/* Restart data transmission */
5207 	l2cap_ertm_send(chan);
5208 }
5209 
5210 /* Invoke with locked chan */
5211 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5212 {
5213 	u8 local_amp_id = chan->local_amp_id;
5214 	u8 remote_amp_id = chan->remote_amp_id;
5215 
5216 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5217 	       chan, result, local_amp_id, remote_amp_id);
5218 
5219 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5220 		return;
5221 
5222 	if (chan->state != BT_CONNECTED) {
5223 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5224 	} else if (result != L2CAP_MR_SUCCESS) {
5225 		l2cap_do_move_cancel(chan, result);
5226 	} else {
5227 		switch (chan->move_role) {
5228 		case L2CAP_MOVE_ROLE_INITIATOR:
5229 			l2cap_do_move_initiate(chan, local_amp_id,
5230 					       remote_amp_id);
5231 			break;
5232 		case L2CAP_MOVE_ROLE_RESPONDER:
5233 			l2cap_do_move_respond(chan, result);
5234 			break;
5235 		default:
5236 			l2cap_do_move_cancel(chan, result);
5237 			break;
5238 		}
5239 	}
5240 }
5241 
5242 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5243 					 struct l2cap_cmd_hdr *cmd,
5244 					 u16 cmd_len, void *data)
5245 {
5246 	struct l2cap_move_chan_req *req = data;
5247 	struct l2cap_move_chan_rsp rsp;
5248 	struct l2cap_chan *chan;
5249 	u16 icid = 0;
5250 	u16 result = L2CAP_MR_NOT_ALLOWED;
5251 
5252 	if (cmd_len != sizeof(*req))
5253 		return -EPROTO;
5254 
5255 	icid = le16_to_cpu(req->icid);
5256 
5257 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5258 
5259 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5260 		return -EINVAL;
5261 
5262 	chan = l2cap_get_chan_by_dcid(conn, icid);
5263 	if (!chan) {
5264 		rsp.icid = cpu_to_le16(icid);
5265 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5266 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5267 			       sizeof(rsp), &rsp);
5268 		return 0;
5269 	}
5270 
5271 	chan->ident = cmd->ident;
5272 
5273 	if (chan->scid < L2CAP_CID_DYN_START ||
5274 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5275 	    (chan->mode != L2CAP_MODE_ERTM &&
5276 	     chan->mode != L2CAP_MODE_STREAMING)) {
5277 		result = L2CAP_MR_NOT_ALLOWED;
5278 		goto send_move_response;
5279 	}
5280 
5281 	if (chan->local_amp_id == req->dest_amp_id) {
5282 		result = L2CAP_MR_SAME_ID;
5283 		goto send_move_response;
5284 	}
5285 
5286 	if (req->dest_amp_id != AMP_ID_BREDR) {
5287 		struct hci_dev *hdev;
5288 		hdev = hci_dev_get(req->dest_amp_id);
5289 		if (!hdev || hdev->dev_type != HCI_AMP ||
5290 		    !test_bit(HCI_UP, &hdev->flags)) {
5291 			if (hdev)
5292 				hci_dev_put(hdev);
5293 
5294 			result = L2CAP_MR_BAD_ID;
5295 			goto send_move_response;
5296 		}
5297 		hci_dev_put(hdev);
5298 	}
5299 
5300 	/* Detect a move collision.  Only send a collision response
5301 	 * if this side has "lost", otherwise proceed with the move.
5302 	 * The winner has the larger bd_addr.
5303 	 */
5304 	if ((__chan_is_moving(chan) ||
5305 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5306 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5307 		result = L2CAP_MR_COLLISION;
5308 		goto send_move_response;
5309 	}
5310 
5311 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5312 	l2cap_move_setup(chan);
5313 	chan->move_id = req->dest_amp_id;
5314 
5315 	if (req->dest_amp_id == AMP_ID_BREDR) {
5316 		/* Moving to BR/EDR */
5317 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5318 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5319 			result = L2CAP_MR_PEND;
5320 		} else {
5321 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5322 			result = L2CAP_MR_SUCCESS;
5323 		}
5324 	} else {
5325 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5326 		/* Placeholder - uncomment when amp functions are available */
5327 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5328 		result = L2CAP_MR_PEND;
5329 	}
5330 
5331 send_move_response:
5332 	l2cap_send_move_chan_rsp(chan, result);
5333 
5334 	l2cap_chan_unlock(chan);
5335 	l2cap_chan_put(chan);
5336 
5337 	return 0;
5338 }
5339 
5340 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5341 {
5342 	struct l2cap_chan *chan;
5343 	struct hci_chan *hchan = NULL;
5344 
5345 	chan = l2cap_get_chan_by_scid(conn, icid);
5346 	if (!chan) {
5347 		l2cap_send_move_chan_cfm_icid(conn, icid);
5348 		return;
5349 	}
5350 
5351 	__clear_chan_timer(chan);
5352 	if (result == L2CAP_MR_PEND)
5353 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5354 
5355 	switch (chan->move_state) {
5356 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5357 		/* Move confirm will be sent when logical link
5358 		 * is complete.
5359 		 */
5360 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5361 		break;
5362 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5363 		if (result == L2CAP_MR_PEND) {
5364 			break;
5365 		} else if (test_bit(CONN_LOCAL_BUSY,
5366 				    &chan->conn_state)) {
5367 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5368 		} else {
5369 			/* Logical link is up or moving to BR/EDR,
5370 			 * proceed with move
5371 			 */
5372 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5373 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5374 		}
5375 		break;
5376 	case L2CAP_MOVE_WAIT_RSP:
5377 		/* Moving to AMP */
5378 		if (result == L2CAP_MR_SUCCESS) {
5379 			/* Remote is ready, send confirm immediately
5380 			 * after logical link is ready
5381 			 */
5382 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5383 		} else {
5384 			/* Both logical link and move success
5385 			 * are required to confirm
5386 			 */
5387 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5388 		}
5389 
5390 		/* Placeholder - get hci_chan for logical link */
5391 		if (!hchan) {
5392 			/* Logical link not available */
5393 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5394 			break;
5395 		}
5396 
5397 		/* If the logical link is not yet connected, do not
5398 		 * send confirmation.
5399 		 */
5400 		if (hchan->state != BT_CONNECTED)
5401 			break;
5402 
5403 		/* Logical link is already ready to go */
5404 
5405 		chan->hs_hcon = hchan->conn;
5406 		chan->hs_hcon->l2cap_data = chan->conn;
5407 
5408 		if (result == L2CAP_MR_SUCCESS) {
5409 			/* Can confirm now */
5410 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5411 		} else {
5412 			/* Now only need move success
5413 			 * to confirm
5414 			 */
5415 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5416 		}
5417 
5418 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5419 		break;
5420 	default:
5421 		/* Any other amp move state means the move failed. */
5422 		chan->move_id = chan->local_amp_id;
5423 		l2cap_move_done(chan);
5424 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5425 	}
5426 
5427 	l2cap_chan_unlock(chan);
5428 	l2cap_chan_put(chan);
5429 }
5430 
5431 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5432 			    u16 result)
5433 {
5434 	struct l2cap_chan *chan;
5435 
5436 	chan = l2cap_get_chan_by_ident(conn, ident);
5437 	if (!chan) {
5438 		/* Could not locate channel, icid is best guess */
5439 		l2cap_send_move_chan_cfm_icid(conn, icid);
5440 		return;
5441 	}
5442 
5443 	__clear_chan_timer(chan);
5444 
5445 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5446 		if (result == L2CAP_MR_COLLISION) {
5447 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5448 		} else {
5449 			/* Cleanup - cancel move */
5450 			chan->move_id = chan->local_amp_id;
5451 			l2cap_move_done(chan);
5452 		}
5453 	}
5454 
5455 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5456 
5457 	l2cap_chan_unlock(chan);
5458 	l2cap_chan_put(chan);
5459 }
5460 
5461 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5462 				  struct l2cap_cmd_hdr *cmd,
5463 				  u16 cmd_len, void *data)
5464 {
5465 	struct l2cap_move_chan_rsp *rsp = data;
5466 	u16 icid, result;
5467 
5468 	if (cmd_len != sizeof(*rsp))
5469 		return -EPROTO;
5470 
5471 	icid = le16_to_cpu(rsp->icid);
5472 	result = le16_to_cpu(rsp->result);
5473 
5474 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5475 
5476 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5477 		l2cap_move_continue(conn, icid, result);
5478 	else
5479 		l2cap_move_fail(conn, cmd->ident, icid, result);
5480 
5481 	return 0;
5482 }
5483 
5484 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5485 				      struct l2cap_cmd_hdr *cmd,
5486 				      u16 cmd_len, void *data)
5487 {
5488 	struct l2cap_move_chan_cfm *cfm = data;
5489 	struct l2cap_chan *chan;
5490 	u16 icid, result;
5491 
5492 	if (cmd_len != sizeof(*cfm))
5493 		return -EPROTO;
5494 
5495 	icid = le16_to_cpu(cfm->icid);
5496 	result = le16_to_cpu(cfm->result);
5497 
5498 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5499 
5500 	chan = l2cap_get_chan_by_dcid(conn, icid);
5501 	if (!chan) {
5502 		/* Spec requires a response even if the icid was not found */
5503 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5504 		return 0;
5505 	}
5506 
5507 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5508 		if (result == L2CAP_MC_CONFIRMED) {
5509 			chan->local_amp_id = chan->move_id;
5510 			if (chan->local_amp_id == AMP_ID_BREDR)
5511 				__release_logical_link(chan);
5512 		} else {
5513 			chan->move_id = chan->local_amp_id;
5514 		}
5515 
5516 		l2cap_move_done(chan);
5517 	}
5518 
5519 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5520 
5521 	l2cap_chan_unlock(chan);
5522 	l2cap_chan_put(chan);
5523 
5524 	return 0;
5525 }
5526 
5527 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5528 						 struct l2cap_cmd_hdr *cmd,
5529 						 u16 cmd_len, void *data)
5530 {
5531 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5532 	struct l2cap_chan *chan;
5533 	u16 icid;
5534 
5535 	if (cmd_len != sizeof(*rsp))
5536 		return -EPROTO;
5537 
5538 	icid = le16_to_cpu(rsp->icid);
5539 
5540 	BT_DBG("icid 0x%4.4x", icid);
5541 
5542 	chan = l2cap_get_chan_by_scid(conn, icid);
5543 	if (!chan)
5544 		return 0;
5545 
5546 	__clear_chan_timer(chan);
5547 
5548 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5549 		chan->local_amp_id = chan->move_id;
5550 
5551 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5552 			__release_logical_link(chan);
5553 
5554 		l2cap_move_done(chan);
5555 	}
5556 
5557 	l2cap_chan_unlock(chan);
5558 	l2cap_chan_put(chan);
5559 
5560 	return 0;
5561 }
5562 
5563 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5564 					      struct l2cap_cmd_hdr *cmd,
5565 					      u16 cmd_len, u8 *data)
5566 {
5567 	struct hci_conn *hcon = conn->hcon;
5568 	struct l2cap_conn_param_update_req *req;
5569 	struct l2cap_conn_param_update_rsp rsp;
5570 	u16 min, max, latency, to_multiplier;
5571 	int err;
5572 
5573 	if (hcon->role != HCI_ROLE_MASTER)
5574 		return -EINVAL;
5575 
5576 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5577 		return -EPROTO;
5578 
5579 	req = (struct l2cap_conn_param_update_req *) data;
5580 	min		= __le16_to_cpu(req->min);
5581 	max		= __le16_to_cpu(req->max);
5582 	latency		= __le16_to_cpu(req->latency);
5583 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5584 
5585 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5586 	       min, max, latency, to_multiplier);
5587 
5588 	memset(&rsp, 0, sizeof(rsp));
5589 
5590 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5591 	if (err)
5592 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5593 	else
5594 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5595 
5596 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5597 		       sizeof(rsp), &rsp);
5598 
5599 	if (!err) {
5600 		u8 store_hint;
5601 
5602 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5603 						to_multiplier);
5604 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5605 				    store_hint, min, max, latency,
5606 				    to_multiplier);
5607 
5608 	}
5609 
5610 	return 0;
5611 }
5612 
5613 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5614 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5615 				u8 *data)
5616 {
5617 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5618 	struct hci_conn *hcon = conn->hcon;
5619 	u16 dcid, mtu, mps, credits, result;
5620 	struct l2cap_chan *chan;
5621 	int err, sec_level;
5622 
5623 	if (cmd_len < sizeof(*rsp))
5624 		return -EPROTO;
5625 
5626 	dcid    = __le16_to_cpu(rsp->dcid);
5627 	mtu     = __le16_to_cpu(rsp->mtu);
5628 	mps     = __le16_to_cpu(rsp->mps);
5629 	credits = __le16_to_cpu(rsp->credits);
5630 	result  = __le16_to_cpu(rsp->result);
5631 
5632 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5633 					   dcid < L2CAP_CID_DYN_START ||
5634 					   dcid > L2CAP_CID_LE_DYN_END))
5635 		return -EPROTO;
5636 
5637 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5638 	       dcid, mtu, mps, credits, result);
5639 
5640 	mutex_lock(&conn->chan_lock);
5641 
5642 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5643 	if (!chan) {
5644 		err = -EBADSLT;
5645 		goto unlock;
5646 	}
5647 
5648 	err = 0;
5649 
5650 	l2cap_chan_lock(chan);
5651 
5652 	switch (result) {
5653 	case L2CAP_CR_LE_SUCCESS:
5654 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5655 			err = -EBADSLT;
5656 			break;
5657 		}
5658 
5659 		chan->ident = 0;
5660 		chan->dcid = dcid;
5661 		chan->omtu = mtu;
5662 		chan->remote_mps = mps;
5663 		chan->tx_credits = credits;
5664 		l2cap_chan_ready(chan);
5665 		break;
5666 
5667 	case L2CAP_CR_LE_AUTHENTICATION:
5668 	case L2CAP_CR_LE_ENCRYPTION:
5669 		/* If we already have MITM protection we can't do
5670 		 * anything.
5671 		 */
5672 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5673 			l2cap_chan_del(chan, ECONNREFUSED);
5674 			break;
5675 		}
5676 
5677 		sec_level = hcon->sec_level + 1;
5678 		if (chan->sec_level < sec_level)
5679 			chan->sec_level = sec_level;
5680 
5681 		/* We'll need to send a new Connect Request */
5682 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5683 
5684 		smp_conn_security(hcon, chan->sec_level);
5685 		break;
5686 
5687 	default:
5688 		l2cap_chan_del(chan, ECONNREFUSED);
5689 		break;
5690 	}
5691 
5692 	l2cap_chan_unlock(chan);
5693 
5694 unlock:
5695 	mutex_unlock(&conn->chan_lock);
5696 
5697 	return err;
5698 }
5699 
5700 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5701 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5702 				      u8 *data)
5703 {
5704 	int err = 0;
5705 
5706 	switch (cmd->code) {
5707 	case L2CAP_COMMAND_REJ:
5708 		l2cap_command_rej(conn, cmd, cmd_len, data);
5709 		break;
5710 
5711 	case L2CAP_CONN_REQ:
5712 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5713 		break;
5714 
5715 	case L2CAP_CONN_RSP:
5716 	case L2CAP_CREATE_CHAN_RSP:
5717 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5718 		break;
5719 
5720 	case L2CAP_CONF_REQ:
5721 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5722 		break;
5723 
5724 	case L2CAP_CONF_RSP:
5725 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5726 		break;
5727 
5728 	case L2CAP_DISCONN_REQ:
5729 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5730 		break;
5731 
5732 	case L2CAP_DISCONN_RSP:
5733 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5734 		break;
5735 
5736 	case L2CAP_ECHO_REQ:
5737 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5738 		break;
5739 
5740 	case L2CAP_ECHO_RSP:
5741 		break;
5742 
5743 	case L2CAP_INFO_REQ:
5744 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5745 		break;
5746 
5747 	case L2CAP_INFO_RSP:
5748 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5749 		break;
5750 
5751 	case L2CAP_CREATE_CHAN_REQ:
5752 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5753 		break;
5754 
5755 	case L2CAP_MOVE_CHAN_REQ:
5756 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5757 		break;
5758 
5759 	case L2CAP_MOVE_CHAN_RSP:
5760 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5761 		break;
5762 
5763 	case L2CAP_MOVE_CHAN_CFM:
5764 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5765 		break;
5766 
5767 	case L2CAP_MOVE_CHAN_CFM_RSP:
5768 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5769 		break;
5770 
5771 	default:
5772 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5773 		err = -EINVAL;
5774 		break;
5775 	}
5776 
5777 	return err;
5778 }
5779 
5780 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5781 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5782 				u8 *data)
5783 {
5784 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5785 	struct l2cap_le_conn_rsp rsp;
5786 	struct l2cap_chan *chan, *pchan;
5787 	u16 dcid, scid, credits, mtu, mps;
5788 	__le16 psm;
5789 	u8 result;
5790 
5791 	if (cmd_len != sizeof(*req))
5792 		return -EPROTO;
5793 
5794 	scid = __le16_to_cpu(req->scid);
5795 	mtu  = __le16_to_cpu(req->mtu);
5796 	mps  = __le16_to_cpu(req->mps);
5797 	psm  = req->psm;
5798 	dcid = 0;
5799 	credits = 0;
5800 
5801 	if (mtu < 23 || mps < 23)
5802 		return -EPROTO;
5803 
5804 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5805 	       scid, mtu, mps);
5806 
5807 	/* Check if we have socket listening on psm */
5808 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5809 					 &conn->hcon->dst, LE_LINK);
5810 	if (!pchan) {
5811 		result = L2CAP_CR_LE_BAD_PSM;
5812 		chan = NULL;
5813 		goto response;
5814 	}
5815 
5816 	mutex_lock(&conn->chan_lock);
5817 	l2cap_chan_lock(pchan);
5818 
5819 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5820 				     SMP_ALLOW_STK)) {
5821 		result = L2CAP_CR_LE_AUTHENTICATION;
5822 		chan = NULL;
5823 		goto response_unlock;
5824 	}
5825 
5826 	/* Check for valid dynamic CID range */
5827 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5828 		result = L2CAP_CR_LE_INVALID_SCID;
5829 		chan = NULL;
5830 		goto response_unlock;
5831 	}
5832 
5833 	/* Check if we already have channel with that dcid */
5834 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5835 		result = L2CAP_CR_LE_SCID_IN_USE;
5836 		chan = NULL;
5837 		goto response_unlock;
5838 	}
5839 
5840 	chan = pchan->ops->new_connection(pchan);
5841 	if (!chan) {
5842 		result = L2CAP_CR_LE_NO_MEM;
5843 		goto response_unlock;
5844 	}
5845 
5846 	bacpy(&chan->src, &conn->hcon->src);
5847 	bacpy(&chan->dst, &conn->hcon->dst);
5848 	chan->src_type = bdaddr_src_type(conn->hcon);
5849 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5850 	chan->psm  = psm;
5851 	chan->dcid = scid;
5852 	chan->omtu = mtu;
5853 	chan->remote_mps = mps;
5854 
5855 	__l2cap_chan_add(conn, chan);
5856 
5857 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5858 
5859 	dcid = chan->scid;
5860 	credits = chan->rx_credits;
5861 
5862 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5863 
5864 	chan->ident = cmd->ident;
5865 
5866 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5867 		l2cap_state_change(chan, BT_CONNECT2);
5868 		/* The following result value is actually not defined
5869 		 * for LE CoC but we use it to let the function know
5870 		 * that it should bail out after doing its cleanup
5871 		 * instead of sending a response.
5872 		 */
5873 		result = L2CAP_CR_PEND;
5874 		chan->ops->defer(chan);
5875 	} else {
5876 		l2cap_chan_ready(chan);
5877 		result = L2CAP_CR_LE_SUCCESS;
5878 	}
5879 
5880 response_unlock:
5881 	l2cap_chan_unlock(pchan);
5882 	mutex_unlock(&conn->chan_lock);
5883 	l2cap_chan_put(pchan);
5884 
5885 	if (result == L2CAP_CR_PEND)
5886 		return 0;
5887 
5888 response:
5889 	if (chan) {
5890 		rsp.mtu = cpu_to_le16(chan->imtu);
5891 		rsp.mps = cpu_to_le16(chan->mps);
5892 	} else {
5893 		rsp.mtu = 0;
5894 		rsp.mps = 0;
5895 	}
5896 
5897 	rsp.dcid    = cpu_to_le16(dcid);
5898 	rsp.credits = cpu_to_le16(credits);
5899 	rsp.result  = cpu_to_le16(result);
5900 
5901 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5902 
5903 	return 0;
5904 }
5905 
5906 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5907 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5908 				   u8 *data)
5909 {
5910 	struct l2cap_le_credits *pkt;
5911 	struct l2cap_chan *chan;
5912 	u16 cid, credits, max_credits;
5913 
5914 	if (cmd_len != sizeof(*pkt))
5915 		return -EPROTO;
5916 
5917 	pkt = (struct l2cap_le_credits *) data;
5918 	cid	= __le16_to_cpu(pkt->cid);
5919 	credits	= __le16_to_cpu(pkt->credits);
5920 
5921 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5922 
5923 	chan = l2cap_get_chan_by_dcid(conn, cid);
5924 	if (!chan)
5925 		return -EBADSLT;
5926 
5927 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5928 	if (credits > max_credits) {
5929 		BT_ERR("LE credits overflow");
5930 		l2cap_send_disconn_req(chan, ECONNRESET);
5931 
5932 		/* Return 0 so that we don't trigger an unnecessary
5933 		 * command reject packet.
5934 		 */
5935 		goto unlock;
5936 	}
5937 
5938 	chan->tx_credits += credits;
5939 
5940 	/* Resume sending */
5941 	l2cap_le_flowctl_send(chan);
5942 
5943 	if (chan->tx_credits)
5944 		chan->ops->resume(chan);
5945 
5946 unlock:
5947 	l2cap_chan_unlock(chan);
5948 	l2cap_chan_put(chan);
5949 
5950 	return 0;
5951 }
5952 
5953 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5954 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5955 				       u8 *data)
5956 {
5957 	struct l2cap_ecred_conn_req *req = (void *) data;
5958 	struct {
5959 		struct l2cap_ecred_conn_rsp rsp;
5960 		__le16 dcid[L2CAP_ECRED_MAX_CID];
5961 	} __packed pdu;
5962 	struct l2cap_chan *chan, *pchan;
5963 	u16 mtu, mps;
5964 	__le16 psm;
5965 	u8 result, len = 0;
5966 	int i, num_scid;
5967 	bool defer = false;
5968 
5969 	if (!enable_ecred)
5970 		return -EINVAL;
5971 
5972 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5973 		result = L2CAP_CR_LE_INVALID_PARAMS;
5974 		goto response;
5975 	}
5976 
5977 	cmd_len -= sizeof(*req);
5978 	num_scid = cmd_len / sizeof(u16);
5979 
5980 	if (num_scid > ARRAY_SIZE(pdu.dcid)) {
5981 		result = L2CAP_CR_LE_INVALID_PARAMS;
5982 		goto response;
5983 	}
5984 
5985 	mtu  = __le16_to_cpu(req->mtu);
5986 	mps  = __le16_to_cpu(req->mps);
5987 
5988 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5989 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5990 		goto response;
5991 	}
5992 
5993 	psm  = req->psm;
5994 
5995 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5996 
5997 	memset(&pdu, 0, sizeof(pdu));
5998 
5999 	/* Check if we have socket listening on psm */
6000 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6001 					 &conn->hcon->dst, LE_LINK);
6002 	if (!pchan) {
6003 		result = L2CAP_CR_LE_BAD_PSM;
6004 		goto response;
6005 	}
6006 
6007 	mutex_lock(&conn->chan_lock);
6008 	l2cap_chan_lock(pchan);
6009 
6010 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6011 				     SMP_ALLOW_STK)) {
6012 		result = L2CAP_CR_LE_AUTHENTICATION;
6013 		goto unlock;
6014 	}
6015 
6016 	result = L2CAP_CR_LE_SUCCESS;
6017 
6018 	for (i = 0; i < num_scid; i++) {
6019 		u16 scid = __le16_to_cpu(req->scid[i]);
6020 
6021 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
6022 
6023 		pdu.dcid[i] = 0x0000;
6024 		len += sizeof(*pdu.dcid);
6025 
6026 		/* Check for valid dynamic CID range */
6027 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6028 			result = L2CAP_CR_LE_INVALID_SCID;
6029 			continue;
6030 		}
6031 
6032 		/* Check if we already have channel with that dcid */
6033 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
6034 			result = L2CAP_CR_LE_SCID_IN_USE;
6035 			continue;
6036 		}
6037 
6038 		chan = pchan->ops->new_connection(pchan);
6039 		if (!chan) {
6040 			result = L2CAP_CR_LE_NO_MEM;
6041 			continue;
6042 		}
6043 
6044 		bacpy(&chan->src, &conn->hcon->src);
6045 		bacpy(&chan->dst, &conn->hcon->dst);
6046 		chan->src_type = bdaddr_src_type(conn->hcon);
6047 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6048 		chan->psm  = psm;
6049 		chan->dcid = scid;
6050 		chan->omtu = mtu;
6051 		chan->remote_mps = mps;
6052 
6053 		__l2cap_chan_add(conn, chan);
6054 
6055 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6056 
6057 		/* Init response */
6058 		if (!pdu.rsp.credits) {
6059 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6060 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6061 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6062 		}
6063 
6064 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6065 
6066 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6067 
6068 		chan->ident = cmd->ident;
6069 
6070 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6071 			l2cap_state_change(chan, BT_CONNECT2);
6072 			defer = true;
6073 			chan->ops->defer(chan);
6074 		} else {
6075 			l2cap_chan_ready(chan);
6076 		}
6077 	}
6078 
6079 unlock:
6080 	l2cap_chan_unlock(pchan);
6081 	mutex_unlock(&conn->chan_lock);
6082 	l2cap_chan_put(pchan);
6083 
6084 response:
6085 	pdu.rsp.result = cpu_to_le16(result);
6086 
6087 	if (defer)
6088 		return 0;
6089 
6090 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6091 		       sizeof(pdu.rsp) + len, &pdu);
6092 
6093 	return 0;
6094 }
6095 
6096 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6097 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6098 				       u8 *data)
6099 {
6100 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6101 	struct hci_conn *hcon = conn->hcon;
6102 	u16 mtu, mps, credits, result;
6103 	struct l2cap_chan *chan, *tmp;
6104 	int err = 0, sec_level;
6105 	int i = 0;
6106 
6107 	if (cmd_len < sizeof(*rsp))
6108 		return -EPROTO;
6109 
6110 	mtu     = __le16_to_cpu(rsp->mtu);
6111 	mps     = __le16_to_cpu(rsp->mps);
6112 	credits = __le16_to_cpu(rsp->credits);
6113 	result  = __le16_to_cpu(rsp->result);
6114 
6115 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6116 	       result);
6117 
6118 	mutex_lock(&conn->chan_lock);
6119 
6120 	cmd_len -= sizeof(*rsp);
6121 
6122 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6123 		u16 dcid;
6124 
6125 		if (chan->ident != cmd->ident ||
6126 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6127 		    chan->state == BT_CONNECTED)
6128 			continue;
6129 
6130 		l2cap_chan_lock(chan);
6131 
6132 		/* Check that there is a dcid for each pending channel */
6133 		if (cmd_len < sizeof(dcid)) {
6134 			l2cap_chan_del(chan, ECONNREFUSED);
6135 			l2cap_chan_unlock(chan);
6136 			continue;
6137 		}
6138 
6139 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6140 		cmd_len -= sizeof(u16);
6141 
6142 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6143 
6144 		/* Check if dcid is already in use */
6145 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6146 			/* If a device receives a
6147 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6148 			 * already-assigned Destination CID, then both the
6149 			 * original channel and the new channel shall be
6150 			 * immediately discarded and not used.
6151 			 */
6152 			l2cap_chan_del(chan, ECONNREFUSED);
6153 			l2cap_chan_unlock(chan);
6154 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6155 			l2cap_chan_lock(chan);
6156 			l2cap_chan_del(chan, ECONNRESET);
6157 			l2cap_chan_unlock(chan);
6158 			continue;
6159 		}
6160 
6161 		switch (result) {
6162 		case L2CAP_CR_LE_AUTHENTICATION:
6163 		case L2CAP_CR_LE_ENCRYPTION:
6164 			/* If we already have MITM protection we can't do
6165 			 * anything.
6166 			 */
6167 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6168 				l2cap_chan_del(chan, ECONNREFUSED);
6169 				break;
6170 			}
6171 
6172 			sec_level = hcon->sec_level + 1;
6173 			if (chan->sec_level < sec_level)
6174 				chan->sec_level = sec_level;
6175 
6176 			/* We'll need to send a new Connect Request */
6177 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6178 
6179 			smp_conn_security(hcon, chan->sec_level);
6180 			break;
6181 
6182 		case L2CAP_CR_LE_BAD_PSM:
6183 			l2cap_chan_del(chan, ECONNREFUSED);
6184 			break;
6185 
6186 		default:
6187 			/* If dcid was not set it means channels was refused */
6188 			if (!dcid) {
6189 				l2cap_chan_del(chan, ECONNREFUSED);
6190 				break;
6191 			}
6192 
6193 			chan->ident = 0;
6194 			chan->dcid = dcid;
6195 			chan->omtu = mtu;
6196 			chan->remote_mps = mps;
6197 			chan->tx_credits = credits;
6198 			l2cap_chan_ready(chan);
6199 			break;
6200 		}
6201 
6202 		l2cap_chan_unlock(chan);
6203 	}
6204 
6205 	mutex_unlock(&conn->chan_lock);
6206 
6207 	return err;
6208 }
6209 
6210 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6211 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6212 					 u8 *data)
6213 {
6214 	struct l2cap_ecred_reconf_req *req = (void *) data;
6215 	struct l2cap_ecred_reconf_rsp rsp;
6216 	u16 mtu, mps, result;
6217 	struct l2cap_chan *chan;
6218 	int i, num_scid;
6219 
6220 	if (!enable_ecred)
6221 		return -EINVAL;
6222 
6223 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6224 		result = L2CAP_CR_LE_INVALID_PARAMS;
6225 		goto respond;
6226 	}
6227 
6228 	mtu = __le16_to_cpu(req->mtu);
6229 	mps = __le16_to_cpu(req->mps);
6230 
6231 	BT_DBG("mtu %u mps %u", mtu, mps);
6232 
6233 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6234 		result = L2CAP_RECONF_INVALID_MTU;
6235 		goto respond;
6236 	}
6237 
6238 	if (mps < L2CAP_ECRED_MIN_MPS) {
6239 		result = L2CAP_RECONF_INVALID_MPS;
6240 		goto respond;
6241 	}
6242 
6243 	cmd_len -= sizeof(*req);
6244 	num_scid = cmd_len / sizeof(u16);
6245 	result = L2CAP_RECONF_SUCCESS;
6246 
6247 	for (i = 0; i < num_scid; i++) {
6248 		u16 scid;
6249 
6250 		scid = __le16_to_cpu(req->scid[i]);
6251 		if (!scid)
6252 			return -EPROTO;
6253 
6254 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6255 		if (!chan)
6256 			continue;
6257 
6258 		/* If the MTU value is decreased for any of the included
6259 		 * channels, then the receiver shall disconnect all
6260 		 * included channels.
6261 		 */
6262 		if (chan->omtu > mtu) {
6263 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6264 			       chan->omtu, mtu);
6265 			result = L2CAP_RECONF_INVALID_MTU;
6266 		}
6267 
6268 		chan->omtu = mtu;
6269 		chan->remote_mps = mps;
6270 	}
6271 
6272 respond:
6273 	rsp.result = cpu_to_le16(result);
6274 
6275 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6276 		       &rsp);
6277 
6278 	return 0;
6279 }
6280 
6281 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6282 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6283 					 u8 *data)
6284 {
6285 	struct l2cap_chan *chan, *tmp;
6286 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6287 	u16 result;
6288 
6289 	if (cmd_len < sizeof(*rsp))
6290 		return -EPROTO;
6291 
6292 	result = __le16_to_cpu(rsp->result);
6293 
6294 	BT_DBG("result 0x%4.4x", rsp->result);
6295 
6296 	if (!result)
6297 		return 0;
6298 
6299 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6300 		if (chan->ident != cmd->ident)
6301 			continue;
6302 
6303 		l2cap_chan_del(chan, ECONNRESET);
6304 	}
6305 
6306 	return 0;
6307 }
6308 
6309 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6310 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6311 				       u8 *data)
6312 {
6313 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6314 	struct l2cap_chan *chan;
6315 
6316 	if (cmd_len < sizeof(*rej))
6317 		return -EPROTO;
6318 
6319 	mutex_lock(&conn->chan_lock);
6320 
6321 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6322 	if (!chan)
6323 		goto done;
6324 
6325 	l2cap_chan_lock(chan);
6326 	l2cap_chan_del(chan, ECONNREFUSED);
6327 	l2cap_chan_unlock(chan);
6328 
6329 done:
6330 	mutex_unlock(&conn->chan_lock);
6331 	return 0;
6332 }
6333 
6334 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6335 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6336 				   u8 *data)
6337 {
6338 	int err = 0;
6339 
6340 	switch (cmd->code) {
6341 	case L2CAP_COMMAND_REJ:
6342 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6343 		break;
6344 
6345 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6346 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6347 		break;
6348 
6349 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6350 		break;
6351 
6352 	case L2CAP_LE_CONN_RSP:
6353 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6354 		break;
6355 
6356 	case L2CAP_LE_CONN_REQ:
6357 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6358 		break;
6359 
6360 	case L2CAP_LE_CREDITS:
6361 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6362 		break;
6363 
6364 	case L2CAP_ECRED_CONN_REQ:
6365 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6366 		break;
6367 
6368 	case L2CAP_ECRED_CONN_RSP:
6369 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6370 		break;
6371 
6372 	case L2CAP_ECRED_RECONF_REQ:
6373 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6374 		break;
6375 
6376 	case L2CAP_ECRED_RECONF_RSP:
6377 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6378 		break;
6379 
6380 	case L2CAP_DISCONN_REQ:
6381 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6382 		break;
6383 
6384 	case L2CAP_DISCONN_RSP:
6385 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6386 		break;
6387 
6388 	default:
6389 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6390 		err = -EINVAL;
6391 		break;
6392 	}
6393 
6394 	return err;
6395 }
6396 
6397 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6398 					struct sk_buff *skb)
6399 {
6400 	struct hci_conn *hcon = conn->hcon;
6401 	struct l2cap_cmd_hdr *cmd;
6402 	u16 len;
6403 	int err;
6404 
6405 	if (hcon->type != LE_LINK)
6406 		goto drop;
6407 
6408 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6409 		goto drop;
6410 
6411 	cmd = (void *) skb->data;
6412 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6413 
6414 	len = le16_to_cpu(cmd->len);
6415 
6416 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6417 
6418 	if (len != skb->len || !cmd->ident) {
6419 		BT_DBG("corrupted command");
6420 		goto drop;
6421 	}
6422 
6423 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6424 	if (err) {
6425 		struct l2cap_cmd_rej_unk rej;
6426 
6427 		BT_ERR("Wrong link type (%d)", err);
6428 
6429 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6430 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6431 			       sizeof(rej), &rej);
6432 	}
6433 
6434 drop:
6435 	kfree_skb(skb);
6436 }
6437 
6438 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6439 				     struct sk_buff *skb)
6440 {
6441 	struct hci_conn *hcon = conn->hcon;
6442 	struct l2cap_cmd_hdr *cmd;
6443 	int err;
6444 
6445 	l2cap_raw_recv(conn, skb);
6446 
6447 	if (hcon->type != ACL_LINK)
6448 		goto drop;
6449 
6450 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6451 		u16 len;
6452 
6453 		cmd = (void *) skb->data;
6454 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6455 
6456 		len = le16_to_cpu(cmd->len);
6457 
6458 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6459 		       cmd->ident);
6460 
6461 		if (len > skb->len || !cmd->ident) {
6462 			BT_DBG("corrupted command");
6463 			break;
6464 		}
6465 
6466 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6467 		if (err) {
6468 			struct l2cap_cmd_rej_unk rej;
6469 
6470 			BT_ERR("Wrong link type (%d)", err);
6471 
6472 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6473 			l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6474 				       sizeof(rej), &rej);
6475 		}
6476 
6477 		skb_pull(skb, len);
6478 	}
6479 
6480 drop:
6481 	kfree_skb(skb);
6482 }
6483 
6484 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6485 {
6486 	u16 our_fcs, rcv_fcs;
6487 	int hdr_size;
6488 
6489 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6490 		hdr_size = L2CAP_EXT_HDR_SIZE;
6491 	else
6492 		hdr_size = L2CAP_ENH_HDR_SIZE;
6493 
6494 	if (chan->fcs == L2CAP_FCS_CRC16) {
6495 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6496 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6497 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6498 
6499 		if (our_fcs != rcv_fcs)
6500 			return -EBADMSG;
6501 	}
6502 	return 0;
6503 }
6504 
6505 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6506 {
6507 	struct l2cap_ctrl control;
6508 
6509 	BT_DBG("chan %p", chan);
6510 
6511 	memset(&control, 0, sizeof(control));
6512 	control.sframe = 1;
6513 	control.final = 1;
6514 	control.reqseq = chan->buffer_seq;
6515 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6516 
6517 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6518 		control.super = L2CAP_SUPER_RNR;
6519 		l2cap_send_sframe(chan, &control);
6520 	}
6521 
6522 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6523 	    chan->unacked_frames > 0)
6524 		__set_retrans_timer(chan);
6525 
6526 	/* Send pending iframes */
6527 	l2cap_ertm_send(chan);
6528 
6529 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6530 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6531 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6532 		 * send it now.
6533 		 */
6534 		control.super = L2CAP_SUPER_RR;
6535 		l2cap_send_sframe(chan, &control);
6536 	}
6537 }
6538 
6539 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6540 			    struct sk_buff **last_frag)
6541 {
6542 	/* skb->len reflects data in skb as well as all fragments
6543 	 * skb->data_len reflects only data in fragments
6544 	 */
6545 	if (!skb_has_frag_list(skb))
6546 		skb_shinfo(skb)->frag_list = new_frag;
6547 
6548 	new_frag->next = NULL;
6549 
6550 	(*last_frag)->next = new_frag;
6551 	*last_frag = new_frag;
6552 
6553 	skb->len += new_frag->len;
6554 	skb->data_len += new_frag->len;
6555 	skb->truesize += new_frag->truesize;
6556 }
6557 
6558 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6559 				struct l2cap_ctrl *control)
6560 {
6561 	int err = -EINVAL;
6562 
6563 	switch (control->sar) {
6564 	case L2CAP_SAR_UNSEGMENTED:
6565 		if (chan->sdu)
6566 			break;
6567 
6568 		err = chan->ops->recv(chan, skb);
6569 		break;
6570 
6571 	case L2CAP_SAR_START:
6572 		if (chan->sdu)
6573 			break;
6574 
6575 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6576 			break;
6577 
6578 		chan->sdu_len = get_unaligned_le16(skb->data);
6579 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6580 
6581 		if (chan->sdu_len > chan->imtu) {
6582 			err = -EMSGSIZE;
6583 			break;
6584 		}
6585 
6586 		if (skb->len >= chan->sdu_len)
6587 			break;
6588 
6589 		chan->sdu = skb;
6590 		chan->sdu_last_frag = skb;
6591 
6592 		skb = NULL;
6593 		err = 0;
6594 		break;
6595 
6596 	case L2CAP_SAR_CONTINUE:
6597 		if (!chan->sdu)
6598 			break;
6599 
6600 		append_skb_frag(chan->sdu, skb,
6601 				&chan->sdu_last_frag);
6602 		skb = NULL;
6603 
6604 		if (chan->sdu->len >= chan->sdu_len)
6605 			break;
6606 
6607 		err = 0;
6608 		break;
6609 
6610 	case L2CAP_SAR_END:
6611 		if (!chan->sdu)
6612 			break;
6613 
6614 		append_skb_frag(chan->sdu, skb,
6615 				&chan->sdu_last_frag);
6616 		skb = NULL;
6617 
6618 		if (chan->sdu->len != chan->sdu_len)
6619 			break;
6620 
6621 		err = chan->ops->recv(chan, chan->sdu);
6622 
6623 		if (!err) {
6624 			/* Reassembly complete */
6625 			chan->sdu = NULL;
6626 			chan->sdu_last_frag = NULL;
6627 			chan->sdu_len = 0;
6628 		}
6629 		break;
6630 	}
6631 
6632 	if (err) {
6633 		kfree_skb(skb);
6634 		kfree_skb(chan->sdu);
6635 		chan->sdu = NULL;
6636 		chan->sdu_last_frag = NULL;
6637 		chan->sdu_len = 0;
6638 	}
6639 
6640 	return err;
6641 }
6642 
6643 static int l2cap_resegment(struct l2cap_chan *chan)
6644 {
6645 	/* Placeholder */
6646 	return 0;
6647 }
6648 
6649 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6650 {
6651 	u8 event;
6652 
6653 	if (chan->mode != L2CAP_MODE_ERTM)
6654 		return;
6655 
6656 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6657 	l2cap_tx(chan, NULL, NULL, event);
6658 }
6659 
6660 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6661 {
6662 	int err = 0;
6663 	/* Pass sequential frames to l2cap_reassemble_sdu()
6664 	 * until a gap is encountered.
6665 	 */
6666 
6667 	BT_DBG("chan %p", chan);
6668 
6669 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6670 		struct sk_buff *skb;
6671 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6672 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6673 
6674 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6675 
6676 		if (!skb)
6677 			break;
6678 
6679 		skb_unlink(skb, &chan->srej_q);
6680 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6681 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6682 		if (err)
6683 			break;
6684 	}
6685 
6686 	if (skb_queue_empty(&chan->srej_q)) {
6687 		chan->rx_state = L2CAP_RX_STATE_RECV;
6688 		l2cap_send_ack(chan);
6689 	}
6690 
6691 	return err;
6692 }
6693 
6694 static void l2cap_handle_srej(struct l2cap_chan *chan,
6695 			      struct l2cap_ctrl *control)
6696 {
6697 	struct sk_buff *skb;
6698 
6699 	BT_DBG("chan %p, control %p", chan, control);
6700 
6701 	if (control->reqseq == chan->next_tx_seq) {
6702 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6703 		l2cap_send_disconn_req(chan, ECONNRESET);
6704 		return;
6705 	}
6706 
6707 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6708 
6709 	if (skb == NULL) {
6710 		BT_DBG("Seq %d not available for retransmission",
6711 		       control->reqseq);
6712 		return;
6713 	}
6714 
6715 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6716 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6717 		l2cap_send_disconn_req(chan, ECONNRESET);
6718 		return;
6719 	}
6720 
6721 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6722 
6723 	if (control->poll) {
6724 		l2cap_pass_to_tx(chan, control);
6725 
6726 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6727 		l2cap_retransmit(chan, control);
6728 		l2cap_ertm_send(chan);
6729 
6730 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6731 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6732 			chan->srej_save_reqseq = control->reqseq;
6733 		}
6734 	} else {
6735 		l2cap_pass_to_tx_fbit(chan, control);
6736 
6737 		if (control->final) {
6738 			if (chan->srej_save_reqseq != control->reqseq ||
6739 			    !test_and_clear_bit(CONN_SREJ_ACT,
6740 						&chan->conn_state))
6741 				l2cap_retransmit(chan, control);
6742 		} else {
6743 			l2cap_retransmit(chan, control);
6744 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6745 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6746 				chan->srej_save_reqseq = control->reqseq;
6747 			}
6748 		}
6749 	}
6750 }
6751 
6752 static void l2cap_handle_rej(struct l2cap_chan *chan,
6753 			     struct l2cap_ctrl *control)
6754 {
6755 	struct sk_buff *skb;
6756 
6757 	BT_DBG("chan %p, control %p", chan, control);
6758 
6759 	if (control->reqseq == chan->next_tx_seq) {
6760 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6761 		l2cap_send_disconn_req(chan, ECONNRESET);
6762 		return;
6763 	}
6764 
6765 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6766 
6767 	if (chan->max_tx && skb &&
6768 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6769 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6770 		l2cap_send_disconn_req(chan, ECONNRESET);
6771 		return;
6772 	}
6773 
6774 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6775 
6776 	l2cap_pass_to_tx(chan, control);
6777 
6778 	if (control->final) {
6779 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6780 			l2cap_retransmit_all(chan, control);
6781 	} else {
6782 		l2cap_retransmit_all(chan, control);
6783 		l2cap_ertm_send(chan);
6784 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6785 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6786 	}
6787 }
6788 
6789 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6790 {
6791 	BT_DBG("chan %p, txseq %d", chan, txseq);
6792 
6793 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6794 	       chan->expected_tx_seq);
6795 
6796 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6797 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6798 		    chan->tx_win) {
6799 			/* See notes below regarding "double poll" and
6800 			 * invalid packets.
6801 			 */
6802 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6803 				BT_DBG("Invalid/Ignore - after SREJ");
6804 				return L2CAP_TXSEQ_INVALID_IGNORE;
6805 			} else {
6806 				BT_DBG("Invalid - in window after SREJ sent");
6807 				return L2CAP_TXSEQ_INVALID;
6808 			}
6809 		}
6810 
6811 		if (chan->srej_list.head == txseq) {
6812 			BT_DBG("Expected SREJ");
6813 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6814 		}
6815 
6816 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6817 			BT_DBG("Duplicate SREJ - txseq already stored");
6818 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6819 		}
6820 
6821 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6822 			BT_DBG("Unexpected SREJ - not requested");
6823 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6824 		}
6825 	}
6826 
6827 	if (chan->expected_tx_seq == txseq) {
6828 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6829 		    chan->tx_win) {
6830 			BT_DBG("Invalid - txseq outside tx window");
6831 			return L2CAP_TXSEQ_INVALID;
6832 		} else {
6833 			BT_DBG("Expected");
6834 			return L2CAP_TXSEQ_EXPECTED;
6835 		}
6836 	}
6837 
6838 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6839 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6840 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6841 		return L2CAP_TXSEQ_DUPLICATE;
6842 	}
6843 
6844 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6845 		/* A source of invalid packets is a "double poll" condition,
6846 		 * where delays cause us to send multiple poll packets.  If
6847 		 * the remote stack receives and processes both polls,
6848 		 * sequence numbers can wrap around in such a way that a
6849 		 * resent frame has a sequence number that looks like new data
6850 		 * with a sequence gap.  This would trigger an erroneous SREJ
6851 		 * request.
6852 		 *
6853 		 * Fortunately, this is impossible with a tx window that's
6854 		 * less than half of the maximum sequence number, which allows
6855 		 * invalid frames to be safely ignored.
6856 		 *
6857 		 * With tx window sizes greater than half of the tx window
6858 		 * maximum, the frame is invalid and cannot be ignored.  This
6859 		 * causes a disconnect.
6860 		 */
6861 
6862 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6863 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6864 			return L2CAP_TXSEQ_INVALID_IGNORE;
6865 		} else {
6866 			BT_DBG("Invalid - txseq outside tx window");
6867 			return L2CAP_TXSEQ_INVALID;
6868 		}
6869 	} else {
6870 		BT_DBG("Unexpected - txseq indicates missing frames");
6871 		return L2CAP_TXSEQ_UNEXPECTED;
6872 	}
6873 }
6874 
6875 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6876 			       struct l2cap_ctrl *control,
6877 			       struct sk_buff *skb, u8 event)
6878 {
6879 	int err = 0;
6880 	bool skb_in_use = false;
6881 
6882 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6883 	       event);
6884 
6885 	switch (event) {
6886 	case L2CAP_EV_RECV_IFRAME:
6887 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6888 		case L2CAP_TXSEQ_EXPECTED:
6889 			l2cap_pass_to_tx(chan, control);
6890 
6891 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6892 				BT_DBG("Busy, discarding expected seq %d",
6893 				       control->txseq);
6894 				break;
6895 			}
6896 
6897 			chan->expected_tx_seq = __next_seq(chan,
6898 							   control->txseq);
6899 
6900 			chan->buffer_seq = chan->expected_tx_seq;
6901 			skb_in_use = true;
6902 
6903 			err = l2cap_reassemble_sdu(chan, skb, control);
6904 			if (err)
6905 				break;
6906 
6907 			if (control->final) {
6908 				if (!test_and_clear_bit(CONN_REJ_ACT,
6909 							&chan->conn_state)) {
6910 					control->final = 0;
6911 					l2cap_retransmit_all(chan, control);
6912 					l2cap_ertm_send(chan);
6913 				}
6914 			}
6915 
6916 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6917 				l2cap_send_ack(chan);
6918 			break;
6919 		case L2CAP_TXSEQ_UNEXPECTED:
6920 			l2cap_pass_to_tx(chan, control);
6921 
6922 			/* Can't issue SREJ frames in the local busy state.
6923 			 * Drop this frame, it will be seen as missing
6924 			 * when local busy is exited.
6925 			 */
6926 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6927 				BT_DBG("Busy, discarding unexpected seq %d",
6928 				       control->txseq);
6929 				break;
6930 			}
6931 
6932 			/* There was a gap in the sequence, so an SREJ
6933 			 * must be sent for each missing frame.  The
6934 			 * current frame is stored for later use.
6935 			 */
6936 			skb_queue_tail(&chan->srej_q, skb);
6937 			skb_in_use = true;
6938 			BT_DBG("Queued %p (queue len %d)", skb,
6939 			       skb_queue_len(&chan->srej_q));
6940 
6941 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6942 			l2cap_seq_list_clear(&chan->srej_list);
6943 			l2cap_send_srej(chan, control->txseq);
6944 
6945 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6946 			break;
6947 		case L2CAP_TXSEQ_DUPLICATE:
6948 			l2cap_pass_to_tx(chan, control);
6949 			break;
6950 		case L2CAP_TXSEQ_INVALID_IGNORE:
6951 			break;
6952 		case L2CAP_TXSEQ_INVALID:
6953 		default:
6954 			l2cap_send_disconn_req(chan, ECONNRESET);
6955 			break;
6956 		}
6957 		break;
6958 	case L2CAP_EV_RECV_RR:
6959 		l2cap_pass_to_tx(chan, control);
6960 		if (control->final) {
6961 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6962 
6963 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6964 			    !__chan_is_moving(chan)) {
6965 				control->final = 0;
6966 				l2cap_retransmit_all(chan, control);
6967 			}
6968 
6969 			l2cap_ertm_send(chan);
6970 		} else if (control->poll) {
6971 			l2cap_send_i_or_rr_or_rnr(chan);
6972 		} else {
6973 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6974 					       &chan->conn_state) &&
6975 			    chan->unacked_frames)
6976 				__set_retrans_timer(chan);
6977 
6978 			l2cap_ertm_send(chan);
6979 		}
6980 		break;
6981 	case L2CAP_EV_RECV_RNR:
6982 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6983 		l2cap_pass_to_tx(chan, control);
6984 		if (control && control->poll) {
6985 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6986 			l2cap_send_rr_or_rnr(chan, 0);
6987 		}
6988 		__clear_retrans_timer(chan);
6989 		l2cap_seq_list_clear(&chan->retrans_list);
6990 		break;
6991 	case L2CAP_EV_RECV_REJ:
6992 		l2cap_handle_rej(chan, control);
6993 		break;
6994 	case L2CAP_EV_RECV_SREJ:
6995 		l2cap_handle_srej(chan, control);
6996 		break;
6997 	default:
6998 		break;
6999 	}
7000 
7001 	if (skb && !skb_in_use) {
7002 		BT_DBG("Freeing %p", skb);
7003 		kfree_skb(skb);
7004 	}
7005 
7006 	return err;
7007 }
7008 
7009 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7010 				    struct l2cap_ctrl *control,
7011 				    struct sk_buff *skb, u8 event)
7012 {
7013 	int err = 0;
7014 	u16 txseq = control->txseq;
7015 	bool skb_in_use = false;
7016 
7017 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7018 	       event);
7019 
7020 	switch (event) {
7021 	case L2CAP_EV_RECV_IFRAME:
7022 		switch (l2cap_classify_txseq(chan, txseq)) {
7023 		case L2CAP_TXSEQ_EXPECTED:
7024 			/* Keep frame for reassembly later */
7025 			l2cap_pass_to_tx(chan, control);
7026 			skb_queue_tail(&chan->srej_q, skb);
7027 			skb_in_use = true;
7028 			BT_DBG("Queued %p (queue len %d)", skb,
7029 			       skb_queue_len(&chan->srej_q));
7030 
7031 			chan->expected_tx_seq = __next_seq(chan, txseq);
7032 			break;
7033 		case L2CAP_TXSEQ_EXPECTED_SREJ:
7034 			l2cap_seq_list_pop(&chan->srej_list);
7035 
7036 			l2cap_pass_to_tx(chan, control);
7037 			skb_queue_tail(&chan->srej_q, skb);
7038 			skb_in_use = true;
7039 			BT_DBG("Queued %p (queue len %d)", skb,
7040 			       skb_queue_len(&chan->srej_q));
7041 
7042 			err = l2cap_rx_queued_iframes(chan);
7043 			if (err)
7044 				break;
7045 
7046 			break;
7047 		case L2CAP_TXSEQ_UNEXPECTED:
7048 			/* Got a frame that can't be reassembled yet.
7049 			 * Save it for later, and send SREJs to cover
7050 			 * the missing frames.
7051 			 */
7052 			skb_queue_tail(&chan->srej_q, skb);
7053 			skb_in_use = true;
7054 			BT_DBG("Queued %p (queue len %d)", skb,
7055 			       skb_queue_len(&chan->srej_q));
7056 
7057 			l2cap_pass_to_tx(chan, control);
7058 			l2cap_send_srej(chan, control->txseq);
7059 			break;
7060 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7061 			/* This frame was requested with an SREJ, but
7062 			 * some expected retransmitted frames are
7063 			 * missing.  Request retransmission of missing
7064 			 * SREJ'd frames.
7065 			 */
7066 			skb_queue_tail(&chan->srej_q, skb);
7067 			skb_in_use = true;
7068 			BT_DBG("Queued %p (queue len %d)", skb,
7069 			       skb_queue_len(&chan->srej_q));
7070 
7071 			l2cap_pass_to_tx(chan, control);
7072 			l2cap_send_srej_list(chan, control->txseq);
7073 			break;
7074 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7075 			/* We've already queued this frame.  Drop this copy. */
7076 			l2cap_pass_to_tx(chan, control);
7077 			break;
7078 		case L2CAP_TXSEQ_DUPLICATE:
7079 			/* Expecting a later sequence number, so this frame
7080 			 * was already received.  Ignore it completely.
7081 			 */
7082 			break;
7083 		case L2CAP_TXSEQ_INVALID_IGNORE:
7084 			break;
7085 		case L2CAP_TXSEQ_INVALID:
7086 		default:
7087 			l2cap_send_disconn_req(chan, ECONNRESET);
7088 			break;
7089 		}
7090 		break;
7091 	case L2CAP_EV_RECV_RR:
7092 		l2cap_pass_to_tx(chan, control);
7093 		if (control->final) {
7094 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7095 
7096 			if (!test_and_clear_bit(CONN_REJ_ACT,
7097 						&chan->conn_state)) {
7098 				control->final = 0;
7099 				l2cap_retransmit_all(chan, control);
7100 			}
7101 
7102 			l2cap_ertm_send(chan);
7103 		} else if (control->poll) {
7104 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7105 					       &chan->conn_state) &&
7106 			    chan->unacked_frames) {
7107 				__set_retrans_timer(chan);
7108 			}
7109 
7110 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7111 			l2cap_send_srej_tail(chan);
7112 		} else {
7113 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7114 					       &chan->conn_state) &&
7115 			    chan->unacked_frames)
7116 				__set_retrans_timer(chan);
7117 
7118 			l2cap_send_ack(chan);
7119 		}
7120 		break;
7121 	case L2CAP_EV_RECV_RNR:
7122 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7123 		l2cap_pass_to_tx(chan, control);
7124 		if (control->poll) {
7125 			l2cap_send_srej_tail(chan);
7126 		} else {
7127 			struct l2cap_ctrl rr_control;
7128 			memset(&rr_control, 0, sizeof(rr_control));
7129 			rr_control.sframe = 1;
7130 			rr_control.super = L2CAP_SUPER_RR;
7131 			rr_control.reqseq = chan->buffer_seq;
7132 			l2cap_send_sframe(chan, &rr_control);
7133 		}
7134 
7135 		break;
7136 	case L2CAP_EV_RECV_REJ:
7137 		l2cap_handle_rej(chan, control);
7138 		break;
7139 	case L2CAP_EV_RECV_SREJ:
7140 		l2cap_handle_srej(chan, control);
7141 		break;
7142 	}
7143 
7144 	if (skb && !skb_in_use) {
7145 		BT_DBG("Freeing %p", skb);
7146 		kfree_skb(skb);
7147 	}
7148 
7149 	return err;
7150 }
7151 
7152 static int l2cap_finish_move(struct l2cap_chan *chan)
7153 {
7154 	BT_DBG("chan %p", chan);
7155 
7156 	chan->rx_state = L2CAP_RX_STATE_RECV;
7157 
7158 	if (chan->hs_hcon)
7159 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7160 	else
7161 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7162 
7163 	return l2cap_resegment(chan);
7164 }
7165 
7166 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7167 				 struct l2cap_ctrl *control,
7168 				 struct sk_buff *skb, u8 event)
7169 {
7170 	int err;
7171 
7172 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7173 	       event);
7174 
7175 	if (!control->poll)
7176 		return -EPROTO;
7177 
7178 	l2cap_process_reqseq(chan, control->reqseq);
7179 
7180 	if (!skb_queue_empty(&chan->tx_q))
7181 		chan->tx_send_head = skb_peek(&chan->tx_q);
7182 	else
7183 		chan->tx_send_head = NULL;
7184 
7185 	/* Rewind next_tx_seq to the point expected
7186 	 * by the receiver.
7187 	 */
7188 	chan->next_tx_seq = control->reqseq;
7189 	chan->unacked_frames = 0;
7190 
7191 	err = l2cap_finish_move(chan);
7192 	if (err)
7193 		return err;
7194 
7195 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7196 	l2cap_send_i_or_rr_or_rnr(chan);
7197 
7198 	if (event == L2CAP_EV_RECV_IFRAME)
7199 		return -EPROTO;
7200 
7201 	return l2cap_rx_state_recv(chan, control, NULL, event);
7202 }
7203 
7204 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7205 				 struct l2cap_ctrl *control,
7206 				 struct sk_buff *skb, u8 event)
7207 {
7208 	int err;
7209 
7210 	if (!control->final)
7211 		return -EPROTO;
7212 
7213 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7214 
7215 	chan->rx_state = L2CAP_RX_STATE_RECV;
7216 	l2cap_process_reqseq(chan, control->reqseq);
7217 
7218 	if (!skb_queue_empty(&chan->tx_q))
7219 		chan->tx_send_head = skb_peek(&chan->tx_q);
7220 	else
7221 		chan->tx_send_head = NULL;
7222 
7223 	/* Rewind next_tx_seq to the point expected
7224 	 * by the receiver.
7225 	 */
7226 	chan->next_tx_seq = control->reqseq;
7227 	chan->unacked_frames = 0;
7228 
7229 	if (chan->hs_hcon)
7230 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7231 	else
7232 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7233 
7234 	err = l2cap_resegment(chan);
7235 
7236 	if (!err)
7237 		err = l2cap_rx_state_recv(chan, control, skb, event);
7238 
7239 	return err;
7240 }
7241 
7242 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7243 {
7244 	/* Make sure reqseq is for a packet that has been sent but not acked */
7245 	u16 unacked;
7246 
7247 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7248 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7249 }
7250 
7251 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7252 		    struct sk_buff *skb, u8 event)
7253 {
7254 	int err = 0;
7255 
7256 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7257 	       control, skb, event, chan->rx_state);
7258 
7259 	if (__valid_reqseq(chan, control->reqseq)) {
7260 		switch (chan->rx_state) {
7261 		case L2CAP_RX_STATE_RECV:
7262 			err = l2cap_rx_state_recv(chan, control, skb, event);
7263 			break;
7264 		case L2CAP_RX_STATE_SREJ_SENT:
7265 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7266 						       event);
7267 			break;
7268 		case L2CAP_RX_STATE_WAIT_P:
7269 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7270 			break;
7271 		case L2CAP_RX_STATE_WAIT_F:
7272 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7273 			break;
7274 		default:
7275 			/* shut it down */
7276 			break;
7277 		}
7278 	} else {
7279 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7280 		       control->reqseq, chan->next_tx_seq,
7281 		       chan->expected_ack_seq);
7282 		l2cap_send_disconn_req(chan, ECONNRESET);
7283 	}
7284 
7285 	return err;
7286 }
7287 
7288 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7289 			   struct sk_buff *skb)
7290 {
7291 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7292 	       chan->rx_state);
7293 
7294 	if (l2cap_classify_txseq(chan, control->txseq) ==
7295 	    L2CAP_TXSEQ_EXPECTED) {
7296 		l2cap_pass_to_tx(chan, control);
7297 
7298 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
7299 		       __next_seq(chan, chan->buffer_seq));
7300 
7301 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7302 
7303 		l2cap_reassemble_sdu(chan, skb, control);
7304 	} else {
7305 		if (chan->sdu) {
7306 			kfree_skb(chan->sdu);
7307 			chan->sdu = NULL;
7308 		}
7309 		chan->sdu_last_frag = NULL;
7310 		chan->sdu_len = 0;
7311 
7312 		if (skb) {
7313 			BT_DBG("Freeing %p", skb);
7314 			kfree_skb(skb);
7315 		}
7316 	}
7317 
7318 	chan->last_acked_seq = control->txseq;
7319 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
7320 
7321 	return 0;
7322 }
7323 
7324 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7325 {
7326 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7327 	u16 len;
7328 	u8 event;
7329 
7330 	__unpack_control(chan, skb);
7331 
7332 	len = skb->len;
7333 
7334 	/*
7335 	 * We can just drop the corrupted I-frame here.
7336 	 * Receiver will miss it and start proper recovery
7337 	 * procedures and ask for retransmission.
7338 	 */
7339 	if (l2cap_check_fcs(chan, skb))
7340 		goto drop;
7341 
7342 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7343 		len -= L2CAP_SDULEN_SIZE;
7344 
7345 	if (chan->fcs == L2CAP_FCS_CRC16)
7346 		len -= L2CAP_FCS_SIZE;
7347 
7348 	if (len > chan->mps) {
7349 		l2cap_send_disconn_req(chan, ECONNRESET);
7350 		goto drop;
7351 	}
7352 
7353 	if (chan->ops->filter) {
7354 		if (chan->ops->filter(chan, skb))
7355 			goto drop;
7356 	}
7357 
7358 	if (!control->sframe) {
7359 		int err;
7360 
7361 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7362 		       control->sar, control->reqseq, control->final,
7363 		       control->txseq);
7364 
7365 		/* Validate F-bit - F=0 always valid, F=1 only
7366 		 * valid in TX WAIT_F
7367 		 */
7368 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7369 			goto drop;
7370 
7371 		if (chan->mode != L2CAP_MODE_STREAMING) {
7372 			event = L2CAP_EV_RECV_IFRAME;
7373 			err = l2cap_rx(chan, control, skb, event);
7374 		} else {
7375 			err = l2cap_stream_rx(chan, control, skb);
7376 		}
7377 
7378 		if (err)
7379 			l2cap_send_disconn_req(chan, ECONNRESET);
7380 	} else {
7381 		const u8 rx_func_to_event[4] = {
7382 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7383 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7384 		};
7385 
7386 		/* Only I-frames are expected in streaming mode */
7387 		if (chan->mode == L2CAP_MODE_STREAMING)
7388 			goto drop;
7389 
7390 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7391 		       control->reqseq, control->final, control->poll,
7392 		       control->super);
7393 
7394 		if (len != 0) {
7395 			BT_ERR("Trailing bytes: %d in sframe", len);
7396 			l2cap_send_disconn_req(chan, ECONNRESET);
7397 			goto drop;
7398 		}
7399 
7400 		/* Validate F and P bits */
7401 		if (control->final && (control->poll ||
7402 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7403 			goto drop;
7404 
7405 		event = rx_func_to_event[control->super];
7406 		if (l2cap_rx(chan, control, skb, event))
7407 			l2cap_send_disconn_req(chan, ECONNRESET);
7408 	}
7409 
7410 	return 0;
7411 
7412 drop:
7413 	kfree_skb(skb);
7414 	return 0;
7415 }
7416 
7417 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7418 {
7419 	struct l2cap_conn *conn = chan->conn;
7420 	struct l2cap_le_credits pkt;
7421 	u16 return_credits;
7422 
7423 	return_credits = (chan->imtu / chan->mps) + 1;
7424 
7425 	if (chan->rx_credits >= return_credits)
7426 		return;
7427 
7428 	return_credits -= chan->rx_credits;
7429 
7430 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7431 
7432 	chan->rx_credits += return_credits;
7433 
7434 	pkt.cid     = cpu_to_le16(chan->scid);
7435 	pkt.credits = cpu_to_le16(return_credits);
7436 
7437 	chan->ident = l2cap_get_ident(conn);
7438 
7439 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7440 }
7441 
7442 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7443 {
7444 	int err;
7445 
7446 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7447 
7448 	/* Wait recv to confirm reception before updating the credits */
7449 	err = chan->ops->recv(chan, skb);
7450 
7451 	/* Update credits whenever an SDU is received */
7452 	l2cap_chan_le_send_credits(chan);
7453 
7454 	return err;
7455 }
7456 
7457 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7458 {
7459 	int err;
7460 
7461 	if (!chan->rx_credits) {
7462 		BT_ERR("No credits to receive LE L2CAP data");
7463 		l2cap_send_disconn_req(chan, ECONNRESET);
7464 		return -ENOBUFS;
7465 	}
7466 
7467 	if (chan->imtu < skb->len) {
7468 		BT_ERR("Too big LE L2CAP PDU");
7469 		return -ENOBUFS;
7470 	}
7471 
7472 	chan->rx_credits--;
7473 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7474 
7475 	/* Update if remote had run out of credits, this should only happens
7476 	 * if the remote is not using the entire MPS.
7477 	 */
7478 	if (!chan->rx_credits)
7479 		l2cap_chan_le_send_credits(chan);
7480 
7481 	err = 0;
7482 
7483 	if (!chan->sdu) {
7484 		u16 sdu_len;
7485 
7486 		sdu_len = get_unaligned_le16(skb->data);
7487 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7488 
7489 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7490 		       sdu_len, skb->len, chan->imtu);
7491 
7492 		if (sdu_len > chan->imtu) {
7493 			BT_ERR("Too big LE L2CAP SDU length received");
7494 			err = -EMSGSIZE;
7495 			goto failed;
7496 		}
7497 
7498 		if (skb->len > sdu_len) {
7499 			BT_ERR("Too much LE L2CAP data received");
7500 			err = -EINVAL;
7501 			goto failed;
7502 		}
7503 
7504 		if (skb->len == sdu_len)
7505 			return l2cap_ecred_recv(chan, skb);
7506 
7507 		chan->sdu = skb;
7508 		chan->sdu_len = sdu_len;
7509 		chan->sdu_last_frag = skb;
7510 
7511 		/* Detect if remote is not able to use the selected MPS */
7512 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7513 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7514 
7515 			/* Adjust the number of credits */
7516 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7517 			chan->mps = mps_len;
7518 			l2cap_chan_le_send_credits(chan);
7519 		}
7520 
7521 		return 0;
7522 	}
7523 
7524 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7525 	       chan->sdu->len, skb->len, chan->sdu_len);
7526 
7527 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7528 		BT_ERR("Too much LE L2CAP data received");
7529 		err = -EINVAL;
7530 		goto failed;
7531 	}
7532 
7533 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7534 	skb = NULL;
7535 
7536 	if (chan->sdu->len == chan->sdu_len) {
7537 		err = l2cap_ecred_recv(chan, chan->sdu);
7538 		if (!err) {
7539 			chan->sdu = NULL;
7540 			chan->sdu_last_frag = NULL;
7541 			chan->sdu_len = 0;
7542 		}
7543 	}
7544 
7545 failed:
7546 	if (err) {
7547 		kfree_skb(skb);
7548 		kfree_skb(chan->sdu);
7549 		chan->sdu = NULL;
7550 		chan->sdu_last_frag = NULL;
7551 		chan->sdu_len = 0;
7552 	}
7553 
7554 	/* We can't return an error here since we took care of the skb
7555 	 * freeing internally. An error return would cause the caller to
7556 	 * do a double-free of the skb.
7557 	 */
7558 	return 0;
7559 }
7560 
7561 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7562 			       struct sk_buff *skb)
7563 {
7564 	struct l2cap_chan *chan;
7565 
7566 	chan = l2cap_get_chan_by_scid(conn, cid);
7567 	if (!chan) {
7568 		if (cid == L2CAP_CID_A2MP) {
7569 			chan = a2mp_channel_create(conn, skb);
7570 			if (!chan) {
7571 				kfree_skb(skb);
7572 				return;
7573 			}
7574 
7575 			l2cap_chan_lock(chan);
7576 		} else {
7577 			BT_DBG("unknown cid 0x%4.4x", cid);
7578 			/* Drop packet and return */
7579 			kfree_skb(skb);
7580 			return;
7581 		}
7582 	}
7583 
7584 	BT_DBG("chan %p, len %d", chan, skb->len);
7585 
7586 	/* If we receive data on a fixed channel before the info req/rsp
7587 	 * procedure is done simply assume that the channel is supported
7588 	 * and mark it as ready.
7589 	 */
7590 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7591 		l2cap_chan_ready(chan);
7592 
7593 	if (chan->state != BT_CONNECTED)
7594 		goto drop;
7595 
7596 	switch (chan->mode) {
7597 	case L2CAP_MODE_LE_FLOWCTL:
7598 	case L2CAP_MODE_EXT_FLOWCTL:
7599 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7600 			goto drop;
7601 
7602 		goto done;
7603 
7604 	case L2CAP_MODE_BASIC:
7605 		/* If socket recv buffers overflows we drop data here
7606 		 * which is *bad* because L2CAP has to be reliable.
7607 		 * But we don't have any other choice. L2CAP doesn't
7608 		 * provide flow control mechanism. */
7609 
7610 		if (chan->imtu < skb->len) {
7611 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7612 			goto drop;
7613 		}
7614 
7615 		if (!chan->ops->recv(chan, skb))
7616 			goto done;
7617 		break;
7618 
7619 	case L2CAP_MODE_ERTM:
7620 	case L2CAP_MODE_STREAMING:
7621 		l2cap_data_rcv(chan, skb);
7622 		goto done;
7623 
7624 	default:
7625 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7626 		break;
7627 	}
7628 
7629 drop:
7630 	kfree_skb(skb);
7631 
7632 done:
7633 	l2cap_chan_unlock(chan);
7634 	l2cap_chan_put(chan);
7635 }
7636 
7637 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7638 				  struct sk_buff *skb)
7639 {
7640 	struct hci_conn *hcon = conn->hcon;
7641 	struct l2cap_chan *chan;
7642 
7643 	if (hcon->type != ACL_LINK)
7644 		goto free_skb;
7645 
7646 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7647 					ACL_LINK);
7648 	if (!chan)
7649 		goto free_skb;
7650 
7651 	BT_DBG("chan %p, len %d", chan, skb->len);
7652 
7653 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7654 		goto drop;
7655 
7656 	if (chan->imtu < skb->len)
7657 		goto drop;
7658 
7659 	/* Store remote BD_ADDR and PSM for msg_name */
7660 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7661 	bt_cb(skb)->l2cap.psm = psm;
7662 
7663 	if (!chan->ops->recv(chan, skb)) {
7664 		l2cap_chan_put(chan);
7665 		return;
7666 	}
7667 
7668 drop:
7669 	l2cap_chan_put(chan);
7670 free_skb:
7671 	kfree_skb(skb);
7672 }
7673 
7674 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7675 {
7676 	struct l2cap_hdr *lh = (void *) skb->data;
7677 	struct hci_conn *hcon = conn->hcon;
7678 	u16 cid, len;
7679 	__le16 psm;
7680 
7681 	if (hcon->state != BT_CONNECTED) {
7682 		BT_DBG("queueing pending rx skb");
7683 		skb_queue_tail(&conn->pending_rx, skb);
7684 		return;
7685 	}
7686 
7687 	skb_pull(skb, L2CAP_HDR_SIZE);
7688 	cid = __le16_to_cpu(lh->cid);
7689 	len = __le16_to_cpu(lh->len);
7690 
7691 	if (len != skb->len) {
7692 		kfree_skb(skb);
7693 		return;
7694 	}
7695 
7696 	/* Since we can't actively block incoming LE connections we must
7697 	 * at least ensure that we ignore incoming data from them.
7698 	 */
7699 	if (hcon->type == LE_LINK &&
7700 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7701 				   bdaddr_dst_type(hcon))) {
7702 		kfree_skb(skb);
7703 		return;
7704 	}
7705 
7706 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7707 
7708 	switch (cid) {
7709 	case L2CAP_CID_SIGNALING:
7710 		l2cap_sig_channel(conn, skb);
7711 		break;
7712 
7713 	case L2CAP_CID_CONN_LESS:
7714 		psm = get_unaligned((__le16 *) skb->data);
7715 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7716 		l2cap_conless_channel(conn, psm, skb);
7717 		break;
7718 
7719 	case L2CAP_CID_LE_SIGNALING:
7720 		l2cap_le_sig_channel(conn, skb);
7721 		break;
7722 
7723 	default:
7724 		l2cap_data_channel(conn, cid, skb);
7725 		break;
7726 	}
7727 }
7728 
7729 static void process_pending_rx(struct work_struct *work)
7730 {
7731 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7732 					       pending_rx_work);
7733 	struct sk_buff *skb;
7734 
7735 	BT_DBG("");
7736 
7737 	while ((skb = skb_dequeue(&conn->pending_rx)))
7738 		l2cap_recv_frame(conn, skb);
7739 }
7740 
7741 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7742 {
7743 	struct l2cap_conn *conn = hcon->l2cap_data;
7744 	struct hci_chan *hchan;
7745 
7746 	if (conn)
7747 		return conn;
7748 
7749 	hchan = hci_chan_create(hcon);
7750 	if (!hchan)
7751 		return NULL;
7752 
7753 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7754 	if (!conn) {
7755 		hci_chan_del(hchan);
7756 		return NULL;
7757 	}
7758 
7759 	kref_init(&conn->ref);
7760 	hcon->l2cap_data = conn;
7761 	conn->hcon = hci_conn_get(hcon);
7762 	conn->hchan = hchan;
7763 
7764 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7765 
7766 	switch (hcon->type) {
7767 	case LE_LINK:
7768 		if (hcon->hdev->le_mtu) {
7769 			conn->mtu = hcon->hdev->le_mtu;
7770 			break;
7771 		}
7772 		fallthrough;
7773 	default:
7774 		conn->mtu = hcon->hdev->acl_mtu;
7775 		break;
7776 	}
7777 
7778 	conn->feat_mask = 0;
7779 
7780 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7781 
7782 	if (hcon->type == ACL_LINK &&
7783 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7784 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7785 
7786 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7787 	    (bredr_sc_enabled(hcon->hdev) ||
7788 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7789 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7790 
7791 	mutex_init(&conn->ident_lock);
7792 	mutex_init(&conn->chan_lock);
7793 
7794 	INIT_LIST_HEAD(&conn->chan_l);
7795 	INIT_LIST_HEAD(&conn->users);
7796 
7797 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7798 
7799 	skb_queue_head_init(&conn->pending_rx);
7800 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7801 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7802 
7803 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7804 
7805 	return conn;
7806 }
7807 
7808 static bool is_valid_psm(u16 psm, u8 dst_type)
7809 {
7810 	if (!psm)
7811 		return false;
7812 
7813 	if (bdaddr_type_is_le(dst_type))
7814 		return (psm <= 0x00ff);
7815 
7816 	/* PSM must be odd and lsb of upper byte must be 0 */
7817 	return ((psm & 0x0101) == 0x0001);
7818 }
7819 
7820 struct l2cap_chan_data {
7821 	struct l2cap_chan *chan;
7822 	struct pid *pid;
7823 	int count;
7824 };
7825 
7826 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7827 {
7828 	struct l2cap_chan_data *d = data;
7829 	struct pid *pid;
7830 
7831 	if (chan == d->chan)
7832 		return;
7833 
7834 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7835 		return;
7836 
7837 	pid = chan->ops->get_peer_pid(chan);
7838 
7839 	/* Only count deferred channels with the same PID/PSM */
7840 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7841 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7842 		return;
7843 
7844 	d->count++;
7845 }
7846 
7847 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7848 		       bdaddr_t *dst, u8 dst_type)
7849 {
7850 	struct l2cap_conn *conn;
7851 	struct hci_conn *hcon;
7852 	struct hci_dev *hdev;
7853 	int err;
7854 
7855 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7856 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7857 
7858 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7859 	if (!hdev)
7860 		return -EHOSTUNREACH;
7861 
7862 	hci_dev_lock(hdev);
7863 
7864 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7865 	    chan->chan_type != L2CAP_CHAN_RAW) {
7866 		err = -EINVAL;
7867 		goto done;
7868 	}
7869 
7870 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7871 		err = -EINVAL;
7872 		goto done;
7873 	}
7874 
7875 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7876 		err = -EINVAL;
7877 		goto done;
7878 	}
7879 
7880 	switch (chan->mode) {
7881 	case L2CAP_MODE_BASIC:
7882 		break;
7883 	case L2CAP_MODE_LE_FLOWCTL:
7884 		break;
7885 	case L2CAP_MODE_EXT_FLOWCTL:
7886 		if (!enable_ecred) {
7887 			err = -EOPNOTSUPP;
7888 			goto done;
7889 		}
7890 		break;
7891 	case L2CAP_MODE_ERTM:
7892 	case L2CAP_MODE_STREAMING:
7893 		if (!disable_ertm)
7894 			break;
7895 		fallthrough;
7896 	default:
7897 		err = -EOPNOTSUPP;
7898 		goto done;
7899 	}
7900 
7901 	switch (chan->state) {
7902 	case BT_CONNECT:
7903 	case BT_CONNECT2:
7904 	case BT_CONFIG:
7905 		/* Already connecting */
7906 		err = 0;
7907 		goto done;
7908 
7909 	case BT_CONNECTED:
7910 		/* Already connected */
7911 		err = -EISCONN;
7912 		goto done;
7913 
7914 	case BT_OPEN:
7915 	case BT_BOUND:
7916 		/* Can connect */
7917 		break;
7918 
7919 	default:
7920 		err = -EBADFD;
7921 		goto done;
7922 	}
7923 
7924 	/* Set destination address and psm */
7925 	bacpy(&chan->dst, dst);
7926 	chan->dst_type = dst_type;
7927 
7928 	chan->psm = psm;
7929 	chan->dcid = cid;
7930 
7931 	if (bdaddr_type_is_le(dst_type)) {
7932 		/* Convert from L2CAP channel address type to HCI address type
7933 		 */
7934 		if (dst_type == BDADDR_LE_PUBLIC)
7935 			dst_type = ADDR_LE_DEV_PUBLIC;
7936 		else
7937 			dst_type = ADDR_LE_DEV_RANDOM;
7938 
7939 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7940 			hcon = hci_connect_le(hdev, dst, dst_type, false,
7941 					      chan->sec_level,
7942 					      HCI_LE_CONN_TIMEOUT,
7943 					      HCI_ROLE_SLAVE);
7944 		else
7945 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7946 						   chan->sec_level,
7947 						   HCI_LE_CONN_TIMEOUT,
7948 						   CONN_REASON_L2CAP_CHAN);
7949 
7950 	} else {
7951 		u8 auth_type = l2cap_get_auth_type(chan);
7952 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7953 				       CONN_REASON_L2CAP_CHAN);
7954 	}
7955 
7956 	if (IS_ERR(hcon)) {
7957 		err = PTR_ERR(hcon);
7958 		goto done;
7959 	}
7960 
7961 	conn = l2cap_conn_add(hcon);
7962 	if (!conn) {
7963 		hci_conn_drop(hcon);
7964 		err = -ENOMEM;
7965 		goto done;
7966 	}
7967 
7968 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7969 		struct l2cap_chan_data data;
7970 
7971 		data.chan = chan;
7972 		data.pid = chan->ops->get_peer_pid(chan);
7973 		data.count = 1;
7974 
7975 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7976 
7977 		/* Check if there isn't too many channels being connected */
7978 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7979 			hci_conn_drop(hcon);
7980 			err = -EPROTO;
7981 			goto done;
7982 		}
7983 	}
7984 
7985 	mutex_lock(&conn->chan_lock);
7986 	l2cap_chan_lock(chan);
7987 
7988 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7989 		hci_conn_drop(hcon);
7990 		err = -EBUSY;
7991 		goto chan_unlock;
7992 	}
7993 
7994 	/* Update source addr of the socket */
7995 	bacpy(&chan->src, &hcon->src);
7996 	chan->src_type = bdaddr_src_type(hcon);
7997 
7998 	__l2cap_chan_add(conn, chan);
7999 
8000 	/* l2cap_chan_add takes its own ref so we can drop this one */
8001 	hci_conn_drop(hcon);
8002 
8003 	l2cap_state_change(chan, BT_CONNECT);
8004 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8005 
8006 	/* Release chan->sport so that it can be reused by other
8007 	 * sockets (as it's only used for listening sockets).
8008 	 */
8009 	write_lock(&chan_list_lock);
8010 	chan->sport = 0;
8011 	write_unlock(&chan_list_lock);
8012 
8013 	if (hcon->state == BT_CONNECTED) {
8014 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8015 			__clear_chan_timer(chan);
8016 			if (l2cap_chan_check_security(chan, true))
8017 				l2cap_state_change(chan, BT_CONNECTED);
8018 		} else
8019 			l2cap_do_start(chan);
8020 	}
8021 
8022 	err = 0;
8023 
8024 chan_unlock:
8025 	l2cap_chan_unlock(chan);
8026 	mutex_unlock(&conn->chan_lock);
8027 done:
8028 	hci_dev_unlock(hdev);
8029 	hci_dev_put(hdev);
8030 	return err;
8031 }
8032 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8033 
8034 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8035 {
8036 	struct l2cap_conn *conn = chan->conn;
8037 	struct {
8038 		struct l2cap_ecred_reconf_req req;
8039 		__le16 scid;
8040 	} pdu;
8041 
8042 	pdu.req.mtu = cpu_to_le16(chan->imtu);
8043 	pdu.req.mps = cpu_to_le16(chan->mps);
8044 	pdu.scid    = cpu_to_le16(chan->scid);
8045 
8046 	chan->ident = l2cap_get_ident(conn);
8047 
8048 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8049 		       sizeof(pdu), &pdu);
8050 }
8051 
8052 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8053 {
8054 	if (chan->imtu > mtu)
8055 		return -EINVAL;
8056 
8057 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8058 
8059 	chan->imtu = mtu;
8060 
8061 	l2cap_ecred_reconfigure(chan);
8062 
8063 	return 0;
8064 }
8065 
8066 /* ---- L2CAP interface with lower layer (HCI) ---- */
8067 
8068 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8069 {
8070 	int exact = 0, lm1 = 0, lm2 = 0;
8071 	struct l2cap_chan *c;
8072 
8073 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8074 
8075 	/* Find listening sockets and check their link_mode */
8076 	read_lock(&chan_list_lock);
8077 	list_for_each_entry(c, &chan_list, global_l) {
8078 		if (c->state != BT_LISTEN)
8079 			continue;
8080 
8081 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8082 			lm1 |= HCI_LM_ACCEPT;
8083 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8084 				lm1 |= HCI_LM_MASTER;
8085 			exact++;
8086 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8087 			lm2 |= HCI_LM_ACCEPT;
8088 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8089 				lm2 |= HCI_LM_MASTER;
8090 		}
8091 	}
8092 	read_unlock(&chan_list_lock);
8093 
8094 	return exact ? lm1 : lm2;
8095 }
8096 
8097 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8098  * from an existing channel in the list or from the beginning of the
8099  * global list (by passing NULL as first parameter).
8100  */
8101 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8102 						  struct hci_conn *hcon)
8103 {
8104 	u8 src_type = bdaddr_src_type(hcon);
8105 
8106 	read_lock(&chan_list_lock);
8107 
8108 	if (c)
8109 		c = list_next_entry(c, global_l);
8110 	else
8111 		c = list_entry(chan_list.next, typeof(*c), global_l);
8112 
8113 	list_for_each_entry_from(c, &chan_list, global_l) {
8114 		if (c->chan_type != L2CAP_CHAN_FIXED)
8115 			continue;
8116 		if (c->state != BT_LISTEN)
8117 			continue;
8118 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8119 			continue;
8120 		if (src_type != c->src_type)
8121 			continue;
8122 
8123 		c = l2cap_chan_hold_unless_zero(c);
8124 		read_unlock(&chan_list_lock);
8125 		return c;
8126 	}
8127 
8128 	read_unlock(&chan_list_lock);
8129 
8130 	return NULL;
8131 }
8132 
8133 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8134 {
8135 	struct hci_dev *hdev = hcon->hdev;
8136 	struct l2cap_conn *conn;
8137 	struct l2cap_chan *pchan;
8138 	u8 dst_type;
8139 
8140 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8141 		return;
8142 
8143 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8144 
8145 	if (status) {
8146 		l2cap_conn_del(hcon, bt_to_errno(status));
8147 		return;
8148 	}
8149 
8150 	conn = l2cap_conn_add(hcon);
8151 	if (!conn)
8152 		return;
8153 
8154 	dst_type = bdaddr_dst_type(hcon);
8155 
8156 	/* If device is blocked, do not create channels for it */
8157 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8158 		return;
8159 
8160 	/* Find fixed channels and notify them of the new connection. We
8161 	 * use multiple individual lookups, continuing each time where
8162 	 * we left off, because the list lock would prevent calling the
8163 	 * potentially sleeping l2cap_chan_lock() function.
8164 	 */
8165 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8166 	while (pchan) {
8167 		struct l2cap_chan *chan, *next;
8168 
8169 		/* Client fixed channels should override server ones */
8170 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8171 			goto next;
8172 
8173 		l2cap_chan_lock(pchan);
8174 		chan = pchan->ops->new_connection(pchan);
8175 		if (chan) {
8176 			bacpy(&chan->src, &hcon->src);
8177 			bacpy(&chan->dst, &hcon->dst);
8178 			chan->src_type = bdaddr_src_type(hcon);
8179 			chan->dst_type = dst_type;
8180 
8181 			__l2cap_chan_add(conn, chan);
8182 		}
8183 
8184 		l2cap_chan_unlock(pchan);
8185 next:
8186 		next = l2cap_global_fixed_chan(pchan, hcon);
8187 		l2cap_chan_put(pchan);
8188 		pchan = next;
8189 	}
8190 
8191 	l2cap_conn_ready(conn);
8192 }
8193 
8194 int l2cap_disconn_ind(struct hci_conn *hcon)
8195 {
8196 	struct l2cap_conn *conn = hcon->l2cap_data;
8197 
8198 	BT_DBG("hcon %p", hcon);
8199 
8200 	if (!conn)
8201 		return HCI_ERROR_REMOTE_USER_TERM;
8202 	return conn->disc_reason;
8203 }
8204 
8205 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8206 {
8207 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8208 		return;
8209 
8210 	BT_DBG("hcon %p reason %d", hcon, reason);
8211 
8212 	l2cap_conn_del(hcon, bt_to_errno(reason));
8213 }
8214 
8215 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8216 {
8217 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8218 		return;
8219 
8220 	if (encrypt == 0x00) {
8221 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8222 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8223 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8224 			   chan->sec_level == BT_SECURITY_FIPS)
8225 			l2cap_chan_close(chan, ECONNREFUSED);
8226 	} else {
8227 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8228 			__clear_chan_timer(chan);
8229 	}
8230 }
8231 
8232 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8233 {
8234 	struct l2cap_conn *conn = hcon->l2cap_data;
8235 	struct l2cap_chan *chan;
8236 
8237 	if (!conn)
8238 		return;
8239 
8240 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8241 
8242 	mutex_lock(&conn->chan_lock);
8243 
8244 	list_for_each_entry(chan, &conn->chan_l, list) {
8245 		l2cap_chan_lock(chan);
8246 
8247 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8248 		       state_to_string(chan->state));
8249 
8250 		if (chan->scid == L2CAP_CID_A2MP) {
8251 			l2cap_chan_unlock(chan);
8252 			continue;
8253 		}
8254 
8255 		if (!status && encrypt)
8256 			chan->sec_level = hcon->sec_level;
8257 
8258 		if (!__l2cap_no_conn_pending(chan)) {
8259 			l2cap_chan_unlock(chan);
8260 			continue;
8261 		}
8262 
8263 		if (!status && (chan->state == BT_CONNECTED ||
8264 				chan->state == BT_CONFIG)) {
8265 			chan->ops->resume(chan);
8266 			l2cap_check_encryption(chan, encrypt);
8267 			l2cap_chan_unlock(chan);
8268 			continue;
8269 		}
8270 
8271 		if (chan->state == BT_CONNECT) {
8272 			if (!status && l2cap_check_enc_key_size(hcon))
8273 				l2cap_start_connection(chan);
8274 			else
8275 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8276 		} else if (chan->state == BT_CONNECT2 &&
8277 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8278 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8279 			struct l2cap_conn_rsp rsp;
8280 			__u16 res, stat;
8281 
8282 			if (!status && l2cap_check_enc_key_size(hcon)) {
8283 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8284 					res = L2CAP_CR_PEND;
8285 					stat = L2CAP_CS_AUTHOR_PEND;
8286 					chan->ops->defer(chan);
8287 				} else {
8288 					l2cap_state_change(chan, BT_CONFIG);
8289 					res = L2CAP_CR_SUCCESS;
8290 					stat = L2CAP_CS_NO_INFO;
8291 				}
8292 			} else {
8293 				l2cap_state_change(chan, BT_DISCONN);
8294 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8295 				res = L2CAP_CR_SEC_BLOCK;
8296 				stat = L2CAP_CS_NO_INFO;
8297 			}
8298 
8299 			rsp.scid   = cpu_to_le16(chan->dcid);
8300 			rsp.dcid   = cpu_to_le16(chan->scid);
8301 			rsp.result = cpu_to_le16(res);
8302 			rsp.status = cpu_to_le16(stat);
8303 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8304 				       sizeof(rsp), &rsp);
8305 
8306 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8307 			    res == L2CAP_CR_SUCCESS) {
8308 				char buf[128];
8309 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8310 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8311 					       L2CAP_CONF_REQ,
8312 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8313 					       buf);
8314 				chan->num_conf_req++;
8315 			}
8316 		}
8317 
8318 		l2cap_chan_unlock(chan);
8319 	}
8320 
8321 	mutex_unlock(&conn->chan_lock);
8322 }
8323 
8324 /* Append fragment into frame respecting the maximum len of rx_skb */
8325 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
8326 			   u16 len)
8327 {
8328 	if (!conn->rx_skb) {
8329 		/* Allocate skb for the complete frame (with header) */
8330 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8331 		if (!conn->rx_skb)
8332 			return -ENOMEM;
8333 		/* Init rx_len */
8334 		conn->rx_len = len;
8335 	}
8336 
8337 	/* Copy as much as the rx_skb can hold */
8338 	len = min_t(u16, len, skb->len);
8339 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
8340 	skb_pull(skb, len);
8341 	conn->rx_len -= len;
8342 
8343 	return len;
8344 }
8345 
8346 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
8347 {
8348 	struct sk_buff *rx_skb;
8349 	int len;
8350 
8351 	/* Append just enough to complete the header */
8352 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
8353 
8354 	/* If header could not be read just continue */
8355 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
8356 		return len;
8357 
8358 	rx_skb = conn->rx_skb;
8359 	len = get_unaligned_le16(rx_skb->data);
8360 
8361 	/* Check if rx_skb has enough space to received all fragments */
8362 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
8363 		/* Update expected len */
8364 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
8365 		return L2CAP_LEN_SIZE;
8366 	}
8367 
8368 	/* Reset conn->rx_skb since it will need to be reallocated in order to
8369 	 * fit all fragments.
8370 	 */
8371 	conn->rx_skb = NULL;
8372 
8373 	/* Reallocates rx_skb using the exact expected length */
8374 	len = l2cap_recv_frag(conn, rx_skb,
8375 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
8376 	kfree_skb(rx_skb);
8377 
8378 	return len;
8379 }
8380 
8381 static void l2cap_recv_reset(struct l2cap_conn *conn)
8382 {
8383 	kfree_skb(conn->rx_skb);
8384 	conn->rx_skb = NULL;
8385 	conn->rx_len = 0;
8386 }
8387 
8388 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8389 {
8390 	struct l2cap_conn *conn = hcon->l2cap_data;
8391 	int len;
8392 
8393 	/* For AMP controller do not create l2cap conn */
8394 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8395 		goto drop;
8396 
8397 	if (!conn)
8398 		conn = l2cap_conn_add(hcon);
8399 
8400 	if (!conn)
8401 		goto drop;
8402 
8403 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
8404 
8405 	switch (flags) {
8406 	case ACL_START:
8407 	case ACL_START_NO_FLUSH:
8408 	case ACL_COMPLETE:
8409 		if (conn->rx_skb) {
8410 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8411 			l2cap_recv_reset(conn);
8412 			l2cap_conn_unreliable(conn, ECOMM);
8413 		}
8414 
8415 		/* Start fragment may not contain the L2CAP length so just
8416 		 * copy the initial byte when that happens and use conn->mtu as
8417 		 * expected length.
8418 		 */
8419 		if (skb->len < L2CAP_LEN_SIZE) {
8420 			if (l2cap_recv_frag(conn, skb, conn->mtu) < 0)
8421 				goto drop;
8422 			return;
8423 		}
8424 
8425 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
8426 
8427 		if (len == skb->len) {
8428 			/* Complete frame received */
8429 			l2cap_recv_frame(conn, skb);
8430 			return;
8431 		}
8432 
8433 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
8434 
8435 		if (skb->len > len) {
8436 			BT_ERR("Frame is too long (len %u, expected len %d)",
8437 			       skb->len, len);
8438 			l2cap_conn_unreliable(conn, ECOMM);
8439 			goto drop;
8440 		}
8441 
8442 		/* Append fragment into frame (with header) */
8443 		if (l2cap_recv_frag(conn, skb, len) < 0)
8444 			goto drop;
8445 
8446 		break;
8447 
8448 	case ACL_CONT:
8449 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
8450 
8451 		if (!conn->rx_skb) {
8452 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8453 			l2cap_conn_unreliable(conn, ECOMM);
8454 			goto drop;
8455 		}
8456 
8457 		/* Complete the L2CAP length if it has not been read */
8458 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
8459 			if (l2cap_recv_len(conn, skb) < 0) {
8460 				l2cap_conn_unreliable(conn, ECOMM);
8461 				goto drop;
8462 			}
8463 
8464 			/* Header still could not be read just continue */
8465 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
8466 				return;
8467 		}
8468 
8469 		if (skb->len > conn->rx_len) {
8470 			BT_ERR("Fragment is too long (len %u, expected %u)",
8471 			       skb->len, conn->rx_len);
8472 			l2cap_recv_reset(conn);
8473 			l2cap_conn_unreliable(conn, ECOMM);
8474 			goto drop;
8475 		}
8476 
8477 		/* Append fragment into frame (with header) */
8478 		l2cap_recv_frag(conn, skb, skb->len);
8479 
8480 		if (!conn->rx_len) {
8481 			/* Complete frame received. l2cap_recv_frame
8482 			 * takes ownership of the skb so set the global
8483 			 * rx_skb pointer to NULL first.
8484 			 */
8485 			struct sk_buff *rx_skb = conn->rx_skb;
8486 			conn->rx_skb = NULL;
8487 			l2cap_recv_frame(conn, rx_skb);
8488 		}
8489 		break;
8490 	}
8491 
8492 drop:
8493 	kfree_skb(skb);
8494 }
8495 
8496 static struct hci_cb l2cap_cb = {
8497 	.name		= "L2CAP",
8498 	.connect_cfm	= l2cap_connect_cfm,
8499 	.disconn_cfm	= l2cap_disconn_cfm,
8500 	.security_cfm	= l2cap_security_cfm,
8501 };
8502 
8503 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8504 {
8505 	struct l2cap_chan *c;
8506 
8507 	read_lock(&chan_list_lock);
8508 
8509 	list_for_each_entry(c, &chan_list, global_l) {
8510 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8511 			   &c->src, c->src_type, &c->dst, c->dst_type,
8512 			   c->state, __le16_to_cpu(c->psm),
8513 			   c->scid, c->dcid, c->imtu, c->omtu,
8514 			   c->sec_level, c->mode);
8515 	}
8516 
8517 	read_unlock(&chan_list_lock);
8518 
8519 	return 0;
8520 }
8521 
8522 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8523 
8524 static struct dentry *l2cap_debugfs;
8525 
8526 int __init l2cap_init(void)
8527 {
8528 	int err;
8529 
8530 	err = l2cap_init_sockets();
8531 	if (err < 0)
8532 		return err;
8533 
8534 	hci_register_cb(&l2cap_cb);
8535 
8536 	if (IS_ERR_OR_NULL(bt_debugfs))
8537 		return 0;
8538 
8539 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8540 					    NULL, &l2cap_debugfs_fops);
8541 
8542 	return 0;
8543 }
8544 
8545 void l2cap_exit(void)
8546 {
8547 	debugfs_remove(l2cap_debugfs);
8548 	hci_unregister_cb(&l2cap_cb);
8549 	l2cap_cleanup_sockets();
8550 }
8551 
8552 module_param(disable_ertm, bool, 0644);
8553 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8554 
8555 module_param(enable_ecred, bool, 0644);
8556 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8557