xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision cbadaf71)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred;
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 static void l2cap_retrans_timeout(struct work_struct *work);
65 static void l2cap_monitor_timeout(struct work_struct *work);
66 static void l2cap_ack_timeout(struct work_struct *work);
67 
68 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
69 {
70 	if (link_type == LE_LINK) {
71 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
72 			return BDADDR_LE_PUBLIC;
73 		else
74 			return BDADDR_LE_RANDOM;
75 	}
76 
77 	return BDADDR_BREDR;
78 }
79 
80 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
81 {
82 	return bdaddr_type(hcon->type, hcon->src_type);
83 }
84 
85 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
86 {
87 	return bdaddr_type(hcon->type, hcon->dst_type);
88 }
89 
90 /* ---- L2CAP channels ---- */
91 
92 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
93 						   u16 cid)
94 {
95 	struct l2cap_chan *c;
96 
97 	list_for_each_entry(c, &conn->chan_l, list) {
98 		if (c->dcid == cid)
99 			return c;
100 	}
101 	return NULL;
102 }
103 
104 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 						   u16 cid)
106 {
107 	struct l2cap_chan *c;
108 
109 	list_for_each_entry(c, &conn->chan_l, list) {
110 		if (c->scid == cid)
111 			return c;
112 	}
113 	return NULL;
114 }
115 
116 /* Find channel with given SCID.
117  * Returns a reference locked channel.
118  */
119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
120 						 u16 cid)
121 {
122 	struct l2cap_chan *c;
123 
124 	mutex_lock(&conn->chan_lock);
125 	c = __l2cap_get_chan_by_scid(conn, cid);
126 	if (c) {
127 		/* Only lock if chan reference is not 0 */
128 		c = l2cap_chan_hold_unless_zero(c);
129 		if (c)
130 			l2cap_chan_lock(c);
131 	}
132 	mutex_unlock(&conn->chan_lock);
133 
134 	return c;
135 }
136 
137 /* Find channel with given DCID.
138  * Returns a reference locked channel.
139  */
140 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
141 						 u16 cid)
142 {
143 	struct l2cap_chan *c;
144 
145 	mutex_lock(&conn->chan_lock);
146 	c = __l2cap_get_chan_by_dcid(conn, cid);
147 	if (c) {
148 		/* Only lock if chan reference is not 0 */
149 		c = l2cap_chan_hold_unless_zero(c);
150 		if (c)
151 			l2cap_chan_lock(c);
152 	}
153 	mutex_unlock(&conn->chan_lock);
154 
155 	return c;
156 }
157 
158 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
159 						    u8 ident)
160 {
161 	struct l2cap_chan *c;
162 
163 	list_for_each_entry(c, &conn->chan_l, list) {
164 		if (c->ident == ident)
165 			return c;
166 	}
167 	return NULL;
168 }
169 
170 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
171 						  u8 ident)
172 {
173 	struct l2cap_chan *c;
174 
175 	mutex_lock(&conn->chan_lock);
176 	c = __l2cap_get_chan_by_ident(conn, ident);
177 	if (c) {
178 		/* Only lock if chan reference is not 0 */
179 		c = l2cap_chan_hold_unless_zero(c);
180 		if (c)
181 			l2cap_chan_lock(c);
182 	}
183 	mutex_unlock(&conn->chan_lock);
184 
185 	return c;
186 }
187 
188 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
189 						      u8 src_type)
190 {
191 	struct l2cap_chan *c;
192 
193 	list_for_each_entry(c, &chan_list, global_l) {
194 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
195 			continue;
196 
197 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
198 			continue;
199 
200 		if (c->sport == psm && !bacmp(&c->src, src))
201 			return c;
202 	}
203 	return NULL;
204 }
205 
206 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
207 {
208 	int err;
209 
210 	write_lock(&chan_list_lock);
211 
212 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
213 		err = -EADDRINUSE;
214 		goto done;
215 	}
216 
217 	if (psm) {
218 		chan->psm = psm;
219 		chan->sport = psm;
220 		err = 0;
221 	} else {
222 		u16 p, start, end, incr;
223 
224 		if (chan->src_type == BDADDR_BREDR) {
225 			start = L2CAP_PSM_DYN_START;
226 			end = L2CAP_PSM_AUTO_END;
227 			incr = 2;
228 		} else {
229 			start = L2CAP_PSM_LE_DYN_START;
230 			end = L2CAP_PSM_LE_DYN_END;
231 			incr = 1;
232 		}
233 
234 		err = -EINVAL;
235 		for (p = start; p <= end; p += incr)
236 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
237 							 chan->src_type)) {
238 				chan->psm   = cpu_to_le16(p);
239 				chan->sport = cpu_to_le16(p);
240 				err = 0;
241 				break;
242 			}
243 	}
244 
245 done:
246 	write_unlock(&chan_list_lock);
247 	return err;
248 }
249 EXPORT_SYMBOL_GPL(l2cap_add_psm);
250 
251 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
252 {
253 	write_lock(&chan_list_lock);
254 
255 	/* Override the defaults (which are for conn-oriented) */
256 	chan->omtu = L2CAP_DEFAULT_MTU;
257 	chan->chan_type = L2CAP_CHAN_FIXED;
258 
259 	chan->scid = scid;
260 
261 	write_unlock(&chan_list_lock);
262 
263 	return 0;
264 }
265 
266 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
267 {
268 	u16 cid, dyn_end;
269 
270 	if (conn->hcon->type == LE_LINK)
271 		dyn_end = L2CAP_CID_LE_DYN_END;
272 	else
273 		dyn_end = L2CAP_CID_DYN_END;
274 
275 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
276 		if (!__l2cap_get_chan_by_scid(conn, cid))
277 			return cid;
278 	}
279 
280 	return 0;
281 }
282 
283 static void l2cap_state_change(struct l2cap_chan *chan, int state)
284 {
285 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
286 	       state_to_string(state));
287 
288 	chan->state = state;
289 	chan->ops->state_change(chan, state, 0);
290 }
291 
292 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
293 						int state, int err)
294 {
295 	chan->state = state;
296 	chan->ops->state_change(chan, chan->state, err);
297 }
298 
299 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
300 {
301 	chan->ops->state_change(chan, chan->state, err);
302 }
303 
304 static void __set_retrans_timer(struct l2cap_chan *chan)
305 {
306 	if (!delayed_work_pending(&chan->monitor_timer) &&
307 	    chan->retrans_timeout) {
308 		l2cap_set_timer(chan, &chan->retrans_timer,
309 				msecs_to_jiffies(chan->retrans_timeout));
310 	}
311 }
312 
313 static void __set_monitor_timer(struct l2cap_chan *chan)
314 {
315 	__clear_retrans_timer(chan);
316 	if (chan->monitor_timeout) {
317 		l2cap_set_timer(chan, &chan->monitor_timer,
318 				msecs_to_jiffies(chan->monitor_timeout));
319 	}
320 }
321 
322 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
323 					       u16 seq)
324 {
325 	struct sk_buff *skb;
326 
327 	skb_queue_walk(head, skb) {
328 		if (bt_cb(skb)->l2cap.txseq == seq)
329 			return skb;
330 	}
331 
332 	return NULL;
333 }
334 
335 /* ---- L2CAP sequence number lists ---- */
336 
337 /* For ERTM, ordered lists of sequence numbers must be tracked for
338  * SREJ requests that are received and for frames that are to be
339  * retransmitted. These seq_list functions implement a singly-linked
340  * list in an array, where membership in the list can also be checked
341  * in constant time. Items can also be added to the tail of the list
342  * and removed from the head in constant time, without further memory
343  * allocs or frees.
344  */
345 
346 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
347 {
348 	size_t alloc_size, i;
349 
350 	/* Allocated size is a power of 2 to map sequence numbers
351 	 * (which may be up to 14 bits) in to a smaller array that is
352 	 * sized for the negotiated ERTM transmit windows.
353 	 */
354 	alloc_size = roundup_pow_of_two(size);
355 
356 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
357 	if (!seq_list->list)
358 		return -ENOMEM;
359 
360 	seq_list->mask = alloc_size - 1;
361 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 	for (i = 0; i < alloc_size; i++)
364 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
365 
366 	return 0;
367 }
368 
369 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
370 {
371 	kfree(seq_list->list);
372 }
373 
374 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
375 					   u16 seq)
376 {
377 	/* Constant-time check for list membership */
378 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
379 }
380 
381 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
382 {
383 	u16 seq = seq_list->head;
384 	u16 mask = seq_list->mask;
385 
386 	seq_list->head = seq_list->list[seq & mask];
387 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
388 
389 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
390 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
392 	}
393 
394 	return seq;
395 }
396 
397 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
398 {
399 	u16 i;
400 
401 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
402 		return;
403 
404 	for (i = 0; i <= seq_list->mask; i++)
405 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
406 
407 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
408 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
409 }
410 
411 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
412 {
413 	u16 mask = seq_list->mask;
414 
415 	/* All appends happen in constant time */
416 
417 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
418 		return;
419 
420 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
421 		seq_list->head = seq;
422 	else
423 		seq_list->list[seq_list->tail & mask] = seq;
424 
425 	seq_list->tail = seq;
426 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
427 }
428 
429 static void l2cap_chan_timeout(struct work_struct *work)
430 {
431 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
432 					       chan_timer.work);
433 	struct l2cap_conn *conn = chan->conn;
434 	int reason;
435 
436 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
437 
438 	mutex_lock(&conn->chan_lock);
439 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
440 	 * this work. No need to call l2cap_chan_hold(chan) here again.
441 	 */
442 	l2cap_chan_lock(chan);
443 
444 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
445 		reason = ECONNREFUSED;
446 	else if (chan->state == BT_CONNECT &&
447 		 chan->sec_level != BT_SECURITY_SDP)
448 		reason = ECONNREFUSED;
449 	else
450 		reason = ETIMEDOUT;
451 
452 	l2cap_chan_close(chan, reason);
453 
454 	chan->ops->close(chan);
455 
456 	l2cap_chan_unlock(chan);
457 	l2cap_chan_put(chan);
458 
459 	mutex_unlock(&conn->chan_lock);
460 }
461 
462 struct l2cap_chan *l2cap_chan_create(void)
463 {
464 	struct l2cap_chan *chan;
465 
466 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
467 	if (!chan)
468 		return NULL;
469 
470 	skb_queue_head_init(&chan->tx_q);
471 	skb_queue_head_init(&chan->srej_q);
472 	mutex_init(&chan->lock);
473 
474 	/* Set default lock nesting level */
475 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
476 
477 	write_lock(&chan_list_lock);
478 	list_add(&chan->global_l, &chan_list);
479 	write_unlock(&chan_list_lock);
480 
481 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
482 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
483 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
484 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
485 
486 	chan->state = BT_OPEN;
487 
488 	kref_init(&chan->kref);
489 
490 	/* This flag is cleared in l2cap_chan_ready() */
491 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
492 
493 	BT_DBG("chan %p", chan);
494 
495 	return chan;
496 }
497 EXPORT_SYMBOL_GPL(l2cap_chan_create);
498 
499 static void l2cap_chan_destroy(struct kref *kref)
500 {
501 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
502 
503 	BT_DBG("chan %p", chan);
504 
505 	write_lock(&chan_list_lock);
506 	list_del(&chan->global_l);
507 	write_unlock(&chan_list_lock);
508 
509 	kfree(chan);
510 }
511 
512 void l2cap_chan_hold(struct l2cap_chan *c)
513 {
514 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
515 
516 	kref_get(&c->kref);
517 }
518 
519 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
520 {
521 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
522 
523 	if (!kref_get_unless_zero(&c->kref))
524 		return NULL;
525 
526 	return c;
527 }
528 
529 void l2cap_chan_put(struct l2cap_chan *c)
530 {
531 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
532 
533 	kref_put(&c->kref, l2cap_chan_destroy);
534 }
535 EXPORT_SYMBOL_GPL(l2cap_chan_put);
536 
537 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
538 {
539 	chan->fcs  = L2CAP_FCS_CRC16;
540 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
541 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
542 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
543 	chan->remote_max_tx = chan->max_tx;
544 	chan->remote_tx_win = chan->tx_win;
545 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
546 	chan->sec_level = BT_SECURITY_LOW;
547 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
548 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
549 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
550 
551 	chan->conf_state = 0;
552 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
553 
554 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
555 }
556 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
557 
558 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
559 {
560 	chan->sdu = NULL;
561 	chan->sdu_last_frag = NULL;
562 	chan->sdu_len = 0;
563 	chan->tx_credits = tx_credits;
564 	/* Derive MPS from connection MTU to stop HCI fragmentation */
565 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
566 	/* Give enough credits for a full packet */
567 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
568 
569 	skb_queue_head_init(&chan->tx_q);
570 }
571 
572 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
573 {
574 	l2cap_le_flowctl_init(chan, tx_credits);
575 
576 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
577 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
578 		chan->mps = L2CAP_ECRED_MIN_MPS;
579 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
580 	}
581 }
582 
583 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
584 {
585 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
586 	       __le16_to_cpu(chan->psm), chan->dcid);
587 
588 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
589 
590 	chan->conn = conn;
591 
592 	switch (chan->chan_type) {
593 	case L2CAP_CHAN_CONN_ORIENTED:
594 		/* Alloc CID for connection-oriented socket */
595 		chan->scid = l2cap_alloc_cid(conn);
596 		if (conn->hcon->type == ACL_LINK)
597 			chan->omtu = L2CAP_DEFAULT_MTU;
598 		break;
599 
600 	case L2CAP_CHAN_CONN_LESS:
601 		/* Connectionless socket */
602 		chan->scid = L2CAP_CID_CONN_LESS;
603 		chan->dcid = L2CAP_CID_CONN_LESS;
604 		chan->omtu = L2CAP_DEFAULT_MTU;
605 		break;
606 
607 	case L2CAP_CHAN_FIXED:
608 		/* Caller will set CID and CID specific MTU values */
609 		break;
610 
611 	default:
612 		/* Raw socket can send/recv signalling messages only */
613 		chan->scid = L2CAP_CID_SIGNALING;
614 		chan->dcid = L2CAP_CID_SIGNALING;
615 		chan->omtu = L2CAP_DEFAULT_MTU;
616 	}
617 
618 	chan->local_id		= L2CAP_BESTEFFORT_ID;
619 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
620 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
621 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
622 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
623 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
624 
625 	l2cap_chan_hold(chan);
626 
627 	/* Only keep a reference for fixed channels if they requested it */
628 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
629 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
630 		hci_conn_hold(conn->hcon);
631 
632 	list_add(&chan->list, &conn->chan_l);
633 }
634 
635 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
636 {
637 	mutex_lock(&conn->chan_lock);
638 	__l2cap_chan_add(conn, chan);
639 	mutex_unlock(&conn->chan_lock);
640 }
641 
642 void l2cap_chan_del(struct l2cap_chan *chan, int err)
643 {
644 	struct l2cap_conn *conn = chan->conn;
645 
646 	__clear_chan_timer(chan);
647 
648 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
649 	       state_to_string(chan->state));
650 
651 	chan->ops->teardown(chan, err);
652 
653 	if (conn) {
654 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
655 		/* Delete from channel list */
656 		list_del(&chan->list);
657 
658 		l2cap_chan_put(chan);
659 
660 		chan->conn = NULL;
661 
662 		/* Reference was only held for non-fixed channels or
663 		 * fixed channels that explicitly requested it using the
664 		 * FLAG_HOLD_HCI_CONN flag.
665 		 */
666 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
667 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
668 			hci_conn_drop(conn->hcon);
669 
670 		if (mgr && mgr->bredr_chan == chan)
671 			mgr->bredr_chan = NULL;
672 	}
673 
674 	if (chan->hs_hchan) {
675 		struct hci_chan *hs_hchan = chan->hs_hchan;
676 
677 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
678 		amp_disconnect_logical_link(hs_hchan);
679 	}
680 
681 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
682 		return;
683 
684 	switch (chan->mode) {
685 	case L2CAP_MODE_BASIC:
686 		break;
687 
688 	case L2CAP_MODE_LE_FLOWCTL:
689 	case L2CAP_MODE_EXT_FLOWCTL:
690 		skb_queue_purge(&chan->tx_q);
691 		break;
692 
693 	case L2CAP_MODE_ERTM:
694 		__clear_retrans_timer(chan);
695 		__clear_monitor_timer(chan);
696 		__clear_ack_timer(chan);
697 
698 		skb_queue_purge(&chan->srej_q);
699 
700 		l2cap_seq_list_free(&chan->srej_list);
701 		l2cap_seq_list_free(&chan->retrans_list);
702 		fallthrough;
703 
704 	case L2CAP_MODE_STREAMING:
705 		skb_queue_purge(&chan->tx_q);
706 		break;
707 	}
708 }
709 EXPORT_SYMBOL_GPL(l2cap_chan_del);
710 
711 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
712 			      void *data)
713 {
714 	struct l2cap_chan *chan;
715 
716 	list_for_each_entry(chan, &conn->chan_l, list) {
717 		func(chan, data);
718 	}
719 }
720 
721 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
722 		     void *data)
723 {
724 	if (!conn)
725 		return;
726 
727 	mutex_lock(&conn->chan_lock);
728 	__l2cap_chan_list(conn, func, data);
729 	mutex_unlock(&conn->chan_lock);
730 }
731 
732 EXPORT_SYMBOL_GPL(l2cap_chan_list);
733 
734 static void l2cap_conn_update_id_addr(struct work_struct *work)
735 {
736 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
737 					       id_addr_update_work);
738 	struct hci_conn *hcon = conn->hcon;
739 	struct l2cap_chan *chan;
740 
741 	mutex_lock(&conn->chan_lock);
742 
743 	list_for_each_entry(chan, &conn->chan_l, list) {
744 		l2cap_chan_lock(chan);
745 		bacpy(&chan->dst, &hcon->dst);
746 		chan->dst_type = bdaddr_dst_type(hcon);
747 		l2cap_chan_unlock(chan);
748 	}
749 
750 	mutex_unlock(&conn->chan_lock);
751 }
752 
753 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
754 {
755 	struct l2cap_conn *conn = chan->conn;
756 	struct l2cap_le_conn_rsp rsp;
757 	u16 result;
758 
759 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
760 		result = L2CAP_CR_LE_AUTHORIZATION;
761 	else
762 		result = L2CAP_CR_LE_BAD_PSM;
763 
764 	l2cap_state_change(chan, BT_DISCONN);
765 
766 	rsp.dcid    = cpu_to_le16(chan->scid);
767 	rsp.mtu     = cpu_to_le16(chan->imtu);
768 	rsp.mps     = cpu_to_le16(chan->mps);
769 	rsp.credits = cpu_to_le16(chan->rx_credits);
770 	rsp.result  = cpu_to_le16(result);
771 
772 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
773 		       &rsp);
774 }
775 
776 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
777 {
778 	struct l2cap_conn *conn = chan->conn;
779 	struct l2cap_ecred_conn_rsp rsp;
780 	u16 result;
781 
782 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
783 		result = L2CAP_CR_LE_AUTHORIZATION;
784 	else
785 		result = L2CAP_CR_LE_BAD_PSM;
786 
787 	l2cap_state_change(chan, BT_DISCONN);
788 
789 	memset(&rsp, 0, sizeof(rsp));
790 
791 	rsp.result  = cpu_to_le16(result);
792 
793 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
794 		       &rsp);
795 }
796 
797 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
798 {
799 	struct l2cap_conn *conn = chan->conn;
800 	struct l2cap_conn_rsp rsp;
801 	u16 result;
802 
803 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
804 		result = L2CAP_CR_SEC_BLOCK;
805 	else
806 		result = L2CAP_CR_BAD_PSM;
807 
808 	l2cap_state_change(chan, BT_DISCONN);
809 
810 	rsp.scid   = cpu_to_le16(chan->dcid);
811 	rsp.dcid   = cpu_to_le16(chan->scid);
812 	rsp.result = cpu_to_le16(result);
813 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
814 
815 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
816 }
817 
818 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
819 {
820 	struct l2cap_conn *conn = chan->conn;
821 
822 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
823 
824 	switch (chan->state) {
825 	case BT_LISTEN:
826 		chan->ops->teardown(chan, 0);
827 		break;
828 
829 	case BT_CONNECTED:
830 	case BT_CONFIG:
831 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
832 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
833 			l2cap_send_disconn_req(chan, reason);
834 		} else
835 			l2cap_chan_del(chan, reason);
836 		break;
837 
838 	case BT_CONNECT2:
839 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
840 			if (conn->hcon->type == ACL_LINK)
841 				l2cap_chan_connect_reject(chan);
842 			else if (conn->hcon->type == LE_LINK) {
843 				switch (chan->mode) {
844 				case L2CAP_MODE_LE_FLOWCTL:
845 					l2cap_chan_le_connect_reject(chan);
846 					break;
847 				case L2CAP_MODE_EXT_FLOWCTL:
848 					l2cap_chan_ecred_connect_reject(chan);
849 					break;
850 				}
851 			}
852 		}
853 
854 		l2cap_chan_del(chan, reason);
855 		break;
856 
857 	case BT_CONNECT:
858 	case BT_DISCONN:
859 		l2cap_chan_del(chan, reason);
860 		break;
861 
862 	default:
863 		chan->ops->teardown(chan, 0);
864 		break;
865 	}
866 }
867 EXPORT_SYMBOL(l2cap_chan_close);
868 
869 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
870 {
871 	switch (chan->chan_type) {
872 	case L2CAP_CHAN_RAW:
873 		switch (chan->sec_level) {
874 		case BT_SECURITY_HIGH:
875 		case BT_SECURITY_FIPS:
876 			return HCI_AT_DEDICATED_BONDING_MITM;
877 		case BT_SECURITY_MEDIUM:
878 			return HCI_AT_DEDICATED_BONDING;
879 		default:
880 			return HCI_AT_NO_BONDING;
881 		}
882 		break;
883 	case L2CAP_CHAN_CONN_LESS:
884 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
885 			if (chan->sec_level == BT_SECURITY_LOW)
886 				chan->sec_level = BT_SECURITY_SDP;
887 		}
888 		if (chan->sec_level == BT_SECURITY_HIGH ||
889 		    chan->sec_level == BT_SECURITY_FIPS)
890 			return HCI_AT_NO_BONDING_MITM;
891 		else
892 			return HCI_AT_NO_BONDING;
893 		break;
894 	case L2CAP_CHAN_CONN_ORIENTED:
895 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
896 			if (chan->sec_level == BT_SECURITY_LOW)
897 				chan->sec_level = BT_SECURITY_SDP;
898 
899 			if (chan->sec_level == BT_SECURITY_HIGH ||
900 			    chan->sec_level == BT_SECURITY_FIPS)
901 				return HCI_AT_NO_BONDING_MITM;
902 			else
903 				return HCI_AT_NO_BONDING;
904 		}
905 		fallthrough;
906 
907 	default:
908 		switch (chan->sec_level) {
909 		case BT_SECURITY_HIGH:
910 		case BT_SECURITY_FIPS:
911 			return HCI_AT_GENERAL_BONDING_MITM;
912 		case BT_SECURITY_MEDIUM:
913 			return HCI_AT_GENERAL_BONDING;
914 		default:
915 			return HCI_AT_NO_BONDING;
916 		}
917 		break;
918 	}
919 }
920 
921 /* Service level security */
922 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
923 {
924 	struct l2cap_conn *conn = chan->conn;
925 	__u8 auth_type;
926 
927 	if (conn->hcon->type == LE_LINK)
928 		return smp_conn_security(conn->hcon, chan->sec_level);
929 
930 	auth_type = l2cap_get_auth_type(chan);
931 
932 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
933 				 initiator);
934 }
935 
936 static u8 l2cap_get_ident(struct l2cap_conn *conn)
937 {
938 	u8 id;
939 
940 	/* Get next available identificator.
941 	 *    1 - 128 are used by kernel.
942 	 *  129 - 199 are reserved.
943 	 *  200 - 254 are used by utilities like l2ping, etc.
944 	 */
945 
946 	mutex_lock(&conn->ident_lock);
947 
948 	if (++conn->tx_ident > 128)
949 		conn->tx_ident = 1;
950 
951 	id = conn->tx_ident;
952 
953 	mutex_unlock(&conn->ident_lock);
954 
955 	return id;
956 }
957 
958 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
959 			   void *data)
960 {
961 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
962 	u8 flags;
963 
964 	BT_DBG("code 0x%2.2x", code);
965 
966 	if (!skb)
967 		return;
968 
969 	/* Use NO_FLUSH if supported or we have an LE link (which does
970 	 * not support auto-flushing packets) */
971 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
972 	    conn->hcon->type == LE_LINK)
973 		flags = ACL_START_NO_FLUSH;
974 	else
975 		flags = ACL_START;
976 
977 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
978 	skb->priority = HCI_PRIO_MAX;
979 
980 	hci_send_acl(conn->hchan, skb, flags);
981 }
982 
983 static bool __chan_is_moving(struct l2cap_chan *chan)
984 {
985 	return chan->move_state != L2CAP_MOVE_STABLE &&
986 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
987 }
988 
989 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
990 {
991 	struct hci_conn *hcon = chan->conn->hcon;
992 	u16 flags;
993 
994 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
995 	       skb->priority);
996 
997 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
998 		if (chan->hs_hchan)
999 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
1000 		else
1001 			kfree_skb(skb);
1002 
1003 		return;
1004 	}
1005 
1006 	/* Use NO_FLUSH for LE links (where this is the only option) or
1007 	 * if the BR/EDR link supports it and flushing has not been
1008 	 * explicitly requested (through FLAG_FLUSHABLE).
1009 	 */
1010 	if (hcon->type == LE_LINK ||
1011 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1012 	     lmp_no_flush_capable(hcon->hdev)))
1013 		flags = ACL_START_NO_FLUSH;
1014 	else
1015 		flags = ACL_START;
1016 
1017 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1018 	hci_send_acl(chan->conn->hchan, skb, flags);
1019 }
1020 
1021 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1022 {
1023 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1024 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1025 
1026 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1027 		/* S-Frame */
1028 		control->sframe = 1;
1029 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1030 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1031 
1032 		control->sar = 0;
1033 		control->txseq = 0;
1034 	} else {
1035 		/* I-Frame */
1036 		control->sframe = 0;
1037 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1038 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1039 
1040 		control->poll = 0;
1041 		control->super = 0;
1042 	}
1043 }
1044 
1045 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1046 {
1047 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1048 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1049 
1050 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1051 		/* S-Frame */
1052 		control->sframe = 1;
1053 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1054 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1055 
1056 		control->sar = 0;
1057 		control->txseq = 0;
1058 	} else {
1059 		/* I-Frame */
1060 		control->sframe = 0;
1061 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1062 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1063 
1064 		control->poll = 0;
1065 		control->super = 0;
1066 	}
1067 }
1068 
1069 static inline void __unpack_control(struct l2cap_chan *chan,
1070 				    struct sk_buff *skb)
1071 {
1072 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1073 		__unpack_extended_control(get_unaligned_le32(skb->data),
1074 					  &bt_cb(skb)->l2cap);
1075 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1076 	} else {
1077 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1078 					  &bt_cb(skb)->l2cap);
1079 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1080 	}
1081 }
1082 
1083 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1084 {
1085 	u32 packed;
1086 
1087 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1088 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1089 
1090 	if (control->sframe) {
1091 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1092 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1093 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1094 	} else {
1095 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1096 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1097 	}
1098 
1099 	return packed;
1100 }
1101 
1102 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1103 {
1104 	u16 packed;
1105 
1106 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1107 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1108 
1109 	if (control->sframe) {
1110 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1111 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1112 		packed |= L2CAP_CTRL_FRAME_TYPE;
1113 	} else {
1114 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1115 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1116 	}
1117 
1118 	return packed;
1119 }
1120 
1121 static inline void __pack_control(struct l2cap_chan *chan,
1122 				  struct l2cap_ctrl *control,
1123 				  struct sk_buff *skb)
1124 {
1125 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1126 		put_unaligned_le32(__pack_extended_control(control),
1127 				   skb->data + L2CAP_HDR_SIZE);
1128 	} else {
1129 		put_unaligned_le16(__pack_enhanced_control(control),
1130 				   skb->data + L2CAP_HDR_SIZE);
1131 	}
1132 }
1133 
1134 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1135 {
1136 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1137 		return L2CAP_EXT_HDR_SIZE;
1138 	else
1139 		return L2CAP_ENH_HDR_SIZE;
1140 }
1141 
1142 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1143 					       u32 control)
1144 {
1145 	struct sk_buff *skb;
1146 	struct l2cap_hdr *lh;
1147 	int hlen = __ertm_hdr_size(chan);
1148 
1149 	if (chan->fcs == L2CAP_FCS_CRC16)
1150 		hlen += L2CAP_FCS_SIZE;
1151 
1152 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1153 
1154 	if (!skb)
1155 		return ERR_PTR(-ENOMEM);
1156 
1157 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1158 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1159 	lh->cid = cpu_to_le16(chan->dcid);
1160 
1161 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1162 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1163 	else
1164 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1165 
1166 	if (chan->fcs == L2CAP_FCS_CRC16) {
1167 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1168 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1169 	}
1170 
1171 	skb->priority = HCI_PRIO_MAX;
1172 	return skb;
1173 }
1174 
1175 static void l2cap_send_sframe(struct l2cap_chan *chan,
1176 			      struct l2cap_ctrl *control)
1177 {
1178 	struct sk_buff *skb;
1179 	u32 control_field;
1180 
1181 	BT_DBG("chan %p, control %p", chan, control);
1182 
1183 	if (!control->sframe)
1184 		return;
1185 
1186 	if (__chan_is_moving(chan))
1187 		return;
1188 
1189 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1190 	    !control->poll)
1191 		control->final = 1;
1192 
1193 	if (control->super == L2CAP_SUPER_RR)
1194 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1195 	else if (control->super == L2CAP_SUPER_RNR)
1196 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1197 
1198 	if (control->super != L2CAP_SUPER_SREJ) {
1199 		chan->last_acked_seq = control->reqseq;
1200 		__clear_ack_timer(chan);
1201 	}
1202 
1203 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1204 	       control->final, control->poll, control->super);
1205 
1206 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1207 		control_field = __pack_extended_control(control);
1208 	else
1209 		control_field = __pack_enhanced_control(control);
1210 
1211 	skb = l2cap_create_sframe_pdu(chan, control_field);
1212 	if (!IS_ERR(skb))
1213 		l2cap_do_send(chan, skb);
1214 }
1215 
1216 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1217 {
1218 	struct l2cap_ctrl control;
1219 
1220 	BT_DBG("chan %p, poll %d", chan, poll);
1221 
1222 	memset(&control, 0, sizeof(control));
1223 	control.sframe = 1;
1224 	control.poll = poll;
1225 
1226 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1227 		control.super = L2CAP_SUPER_RNR;
1228 	else
1229 		control.super = L2CAP_SUPER_RR;
1230 
1231 	control.reqseq = chan->buffer_seq;
1232 	l2cap_send_sframe(chan, &control);
1233 }
1234 
1235 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1236 {
1237 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1238 		return true;
1239 
1240 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1241 }
1242 
1243 static bool __amp_capable(struct l2cap_chan *chan)
1244 {
1245 	struct l2cap_conn *conn = chan->conn;
1246 	struct hci_dev *hdev;
1247 	bool amp_available = false;
1248 
1249 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1250 		return false;
1251 
1252 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1253 		return false;
1254 
1255 	read_lock(&hci_dev_list_lock);
1256 	list_for_each_entry(hdev, &hci_dev_list, list) {
1257 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1258 		    test_bit(HCI_UP, &hdev->flags)) {
1259 			amp_available = true;
1260 			break;
1261 		}
1262 	}
1263 	read_unlock(&hci_dev_list_lock);
1264 
1265 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1266 		return amp_available;
1267 
1268 	return false;
1269 }
1270 
1271 static bool l2cap_check_efs(struct l2cap_chan *chan)
1272 {
1273 	/* Check EFS parameters */
1274 	return true;
1275 }
1276 
1277 void l2cap_send_conn_req(struct l2cap_chan *chan)
1278 {
1279 	struct l2cap_conn *conn = chan->conn;
1280 	struct l2cap_conn_req req;
1281 
1282 	req.scid = cpu_to_le16(chan->scid);
1283 	req.psm  = chan->psm;
1284 
1285 	chan->ident = l2cap_get_ident(conn);
1286 
1287 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1288 
1289 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1290 }
1291 
1292 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1293 {
1294 	struct l2cap_create_chan_req req;
1295 	req.scid = cpu_to_le16(chan->scid);
1296 	req.psm  = chan->psm;
1297 	req.amp_id = amp_id;
1298 
1299 	chan->ident = l2cap_get_ident(chan->conn);
1300 
1301 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1302 		       sizeof(req), &req);
1303 }
1304 
1305 static void l2cap_move_setup(struct l2cap_chan *chan)
1306 {
1307 	struct sk_buff *skb;
1308 
1309 	BT_DBG("chan %p", chan);
1310 
1311 	if (chan->mode != L2CAP_MODE_ERTM)
1312 		return;
1313 
1314 	__clear_retrans_timer(chan);
1315 	__clear_monitor_timer(chan);
1316 	__clear_ack_timer(chan);
1317 
1318 	chan->retry_count = 0;
1319 	skb_queue_walk(&chan->tx_q, skb) {
1320 		if (bt_cb(skb)->l2cap.retries)
1321 			bt_cb(skb)->l2cap.retries = 1;
1322 		else
1323 			break;
1324 	}
1325 
1326 	chan->expected_tx_seq = chan->buffer_seq;
1327 
1328 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1329 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1330 	l2cap_seq_list_clear(&chan->retrans_list);
1331 	l2cap_seq_list_clear(&chan->srej_list);
1332 	skb_queue_purge(&chan->srej_q);
1333 
1334 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1335 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1336 
1337 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1338 }
1339 
1340 static void l2cap_move_done(struct l2cap_chan *chan)
1341 {
1342 	u8 move_role = chan->move_role;
1343 	BT_DBG("chan %p", chan);
1344 
1345 	chan->move_state = L2CAP_MOVE_STABLE;
1346 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1347 
1348 	if (chan->mode != L2CAP_MODE_ERTM)
1349 		return;
1350 
1351 	switch (move_role) {
1352 	case L2CAP_MOVE_ROLE_INITIATOR:
1353 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1354 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1355 		break;
1356 	case L2CAP_MOVE_ROLE_RESPONDER:
1357 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1358 		break;
1359 	}
1360 }
1361 
1362 static void l2cap_chan_ready(struct l2cap_chan *chan)
1363 {
1364 	/* The channel may have already been flagged as connected in
1365 	 * case of receiving data before the L2CAP info req/rsp
1366 	 * procedure is complete.
1367 	 */
1368 	if (chan->state == BT_CONNECTED)
1369 		return;
1370 
1371 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1372 	chan->conf_state = 0;
1373 	__clear_chan_timer(chan);
1374 
1375 	switch (chan->mode) {
1376 	case L2CAP_MODE_LE_FLOWCTL:
1377 	case L2CAP_MODE_EXT_FLOWCTL:
1378 		if (!chan->tx_credits)
1379 			chan->ops->suspend(chan);
1380 		break;
1381 	}
1382 
1383 	chan->state = BT_CONNECTED;
1384 
1385 	chan->ops->ready(chan);
1386 }
1387 
1388 static void l2cap_le_connect(struct l2cap_chan *chan)
1389 {
1390 	struct l2cap_conn *conn = chan->conn;
1391 	struct l2cap_le_conn_req req;
1392 
1393 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1394 		return;
1395 
1396 	if (!chan->imtu)
1397 		chan->imtu = chan->conn->mtu;
1398 
1399 	l2cap_le_flowctl_init(chan, 0);
1400 
1401 	memset(&req, 0, sizeof(req));
1402 	req.psm     = chan->psm;
1403 	req.scid    = cpu_to_le16(chan->scid);
1404 	req.mtu     = cpu_to_le16(chan->imtu);
1405 	req.mps     = cpu_to_le16(chan->mps);
1406 	req.credits = cpu_to_le16(chan->rx_credits);
1407 
1408 	chan->ident = l2cap_get_ident(conn);
1409 
1410 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1411 		       sizeof(req), &req);
1412 }
1413 
1414 struct l2cap_ecred_conn_data {
1415 	struct {
1416 		struct l2cap_ecred_conn_req req;
1417 		__le16 scid[5];
1418 	} __packed pdu;
1419 	struct l2cap_chan *chan;
1420 	struct pid *pid;
1421 	int count;
1422 };
1423 
1424 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1425 {
1426 	struct l2cap_ecred_conn_data *conn = data;
1427 	struct pid *pid;
1428 
1429 	if (chan == conn->chan)
1430 		return;
1431 
1432 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1433 		return;
1434 
1435 	pid = chan->ops->get_peer_pid(chan);
1436 
1437 	/* Only add deferred channels with the same PID/PSM */
1438 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1439 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1440 		return;
1441 
1442 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1443 		return;
1444 
1445 	l2cap_ecred_init(chan, 0);
1446 
1447 	/* Set the same ident so we can match on the rsp */
1448 	chan->ident = conn->chan->ident;
1449 
1450 	/* Include all channels deferred */
1451 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1452 
1453 	conn->count++;
1454 }
1455 
1456 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1457 {
1458 	struct l2cap_conn *conn = chan->conn;
1459 	struct l2cap_ecred_conn_data data;
1460 
1461 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1462 		return;
1463 
1464 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1465 		return;
1466 
1467 	l2cap_ecred_init(chan, 0);
1468 
1469 	memset(&data, 0, sizeof(data));
1470 	data.pdu.req.psm     = chan->psm;
1471 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1472 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1473 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1474 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1475 
1476 	chan->ident = l2cap_get_ident(conn);
1477 
1478 	data.count = 1;
1479 	data.chan = chan;
1480 	data.pid = chan->ops->get_peer_pid(chan);
1481 
1482 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1483 
1484 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1485 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1486 		       &data.pdu);
1487 }
1488 
1489 static void l2cap_le_start(struct l2cap_chan *chan)
1490 {
1491 	struct l2cap_conn *conn = chan->conn;
1492 
1493 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1494 		return;
1495 
1496 	if (!chan->psm) {
1497 		l2cap_chan_ready(chan);
1498 		return;
1499 	}
1500 
1501 	if (chan->state == BT_CONNECT) {
1502 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1503 			l2cap_ecred_connect(chan);
1504 		else
1505 			l2cap_le_connect(chan);
1506 	}
1507 }
1508 
1509 static void l2cap_start_connection(struct l2cap_chan *chan)
1510 {
1511 	if (__amp_capable(chan)) {
1512 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1513 		a2mp_discover_amp(chan);
1514 	} else if (chan->conn->hcon->type == LE_LINK) {
1515 		l2cap_le_start(chan);
1516 	} else {
1517 		l2cap_send_conn_req(chan);
1518 	}
1519 }
1520 
1521 static void l2cap_request_info(struct l2cap_conn *conn)
1522 {
1523 	struct l2cap_info_req req;
1524 
1525 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1526 		return;
1527 
1528 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1529 
1530 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1531 	conn->info_ident = l2cap_get_ident(conn);
1532 
1533 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1534 
1535 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1536 		       sizeof(req), &req);
1537 }
1538 
1539 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1540 {
1541 	/* The minimum encryption key size needs to be enforced by the
1542 	 * host stack before establishing any L2CAP connections. The
1543 	 * specification in theory allows a minimum of 1, but to align
1544 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1545 	 *
1546 	 * This check might also be called for unencrypted connections
1547 	 * that have no key size requirements. Ensure that the link is
1548 	 * actually encrypted before enforcing a key size.
1549 	 */
1550 	int min_key_size = hcon->hdev->min_enc_key_size;
1551 
1552 	/* On FIPS security level, key size must be 16 bytes */
1553 	if (hcon->sec_level == BT_SECURITY_FIPS)
1554 		min_key_size = 16;
1555 
1556 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1557 		hcon->enc_key_size >= min_key_size);
1558 }
1559 
1560 static void l2cap_do_start(struct l2cap_chan *chan)
1561 {
1562 	struct l2cap_conn *conn = chan->conn;
1563 
1564 	if (conn->hcon->type == LE_LINK) {
1565 		l2cap_le_start(chan);
1566 		return;
1567 	}
1568 
1569 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1570 		l2cap_request_info(conn);
1571 		return;
1572 	}
1573 
1574 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1575 		return;
1576 
1577 	if (!l2cap_chan_check_security(chan, true) ||
1578 	    !__l2cap_no_conn_pending(chan))
1579 		return;
1580 
1581 	if (l2cap_check_enc_key_size(conn->hcon))
1582 		l2cap_start_connection(chan);
1583 	else
1584 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1585 }
1586 
1587 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1588 {
1589 	u32 local_feat_mask = l2cap_feat_mask;
1590 	if (!disable_ertm)
1591 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1592 
1593 	switch (mode) {
1594 	case L2CAP_MODE_ERTM:
1595 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1596 	case L2CAP_MODE_STREAMING:
1597 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1598 	default:
1599 		return 0x00;
1600 	}
1601 }
1602 
1603 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1604 {
1605 	struct l2cap_conn *conn = chan->conn;
1606 	struct l2cap_disconn_req req;
1607 
1608 	if (!conn)
1609 		return;
1610 
1611 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1612 		__clear_retrans_timer(chan);
1613 		__clear_monitor_timer(chan);
1614 		__clear_ack_timer(chan);
1615 	}
1616 
1617 	if (chan->scid == L2CAP_CID_A2MP) {
1618 		l2cap_state_change(chan, BT_DISCONN);
1619 		return;
1620 	}
1621 
1622 	req.dcid = cpu_to_le16(chan->dcid);
1623 	req.scid = cpu_to_le16(chan->scid);
1624 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1625 		       sizeof(req), &req);
1626 
1627 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1628 }
1629 
1630 /* ---- L2CAP connections ---- */
1631 static void l2cap_conn_start(struct l2cap_conn *conn)
1632 {
1633 	struct l2cap_chan *chan, *tmp;
1634 
1635 	BT_DBG("conn %p", conn);
1636 
1637 	mutex_lock(&conn->chan_lock);
1638 
1639 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1640 		l2cap_chan_lock(chan);
1641 
1642 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1643 			l2cap_chan_ready(chan);
1644 			l2cap_chan_unlock(chan);
1645 			continue;
1646 		}
1647 
1648 		if (chan->state == BT_CONNECT) {
1649 			if (!l2cap_chan_check_security(chan, true) ||
1650 			    !__l2cap_no_conn_pending(chan)) {
1651 				l2cap_chan_unlock(chan);
1652 				continue;
1653 			}
1654 
1655 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1656 			    && test_bit(CONF_STATE2_DEVICE,
1657 					&chan->conf_state)) {
1658 				l2cap_chan_close(chan, ECONNRESET);
1659 				l2cap_chan_unlock(chan);
1660 				continue;
1661 			}
1662 
1663 			if (l2cap_check_enc_key_size(conn->hcon))
1664 				l2cap_start_connection(chan);
1665 			else
1666 				l2cap_chan_close(chan, ECONNREFUSED);
1667 
1668 		} else if (chan->state == BT_CONNECT2) {
1669 			struct l2cap_conn_rsp rsp;
1670 			char buf[128];
1671 			rsp.scid = cpu_to_le16(chan->dcid);
1672 			rsp.dcid = cpu_to_le16(chan->scid);
1673 
1674 			if (l2cap_chan_check_security(chan, false)) {
1675 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1676 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1677 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1678 					chan->ops->defer(chan);
1679 
1680 				} else {
1681 					l2cap_state_change(chan, BT_CONFIG);
1682 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1683 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1684 				}
1685 			} else {
1686 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1687 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1688 			}
1689 
1690 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1691 				       sizeof(rsp), &rsp);
1692 
1693 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1694 			    rsp.result != L2CAP_CR_SUCCESS) {
1695 				l2cap_chan_unlock(chan);
1696 				continue;
1697 			}
1698 
1699 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1700 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1701 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1702 			chan->num_conf_req++;
1703 		}
1704 
1705 		l2cap_chan_unlock(chan);
1706 	}
1707 
1708 	mutex_unlock(&conn->chan_lock);
1709 }
1710 
1711 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1712 {
1713 	struct hci_conn *hcon = conn->hcon;
1714 	struct hci_dev *hdev = hcon->hdev;
1715 
1716 	BT_DBG("%s conn %p", hdev->name, conn);
1717 
1718 	/* For outgoing pairing which doesn't necessarily have an
1719 	 * associated socket (e.g. mgmt_pair_device).
1720 	 */
1721 	if (hcon->out)
1722 		smp_conn_security(hcon, hcon->pending_sec_level);
1723 
1724 	/* For LE peripheral connections, make sure the connection interval
1725 	 * is in the range of the minimum and maximum interval that has
1726 	 * been configured for this connection. If not, then trigger
1727 	 * the connection update procedure.
1728 	 */
1729 	if (hcon->role == HCI_ROLE_SLAVE &&
1730 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1731 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1732 		struct l2cap_conn_param_update_req req;
1733 
1734 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1735 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1736 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1737 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1738 
1739 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1740 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1741 	}
1742 }
1743 
1744 static void l2cap_conn_ready(struct l2cap_conn *conn)
1745 {
1746 	struct l2cap_chan *chan;
1747 	struct hci_conn *hcon = conn->hcon;
1748 
1749 	BT_DBG("conn %p", conn);
1750 
1751 	if (hcon->type == ACL_LINK)
1752 		l2cap_request_info(conn);
1753 
1754 	mutex_lock(&conn->chan_lock);
1755 
1756 	list_for_each_entry(chan, &conn->chan_l, list) {
1757 
1758 		l2cap_chan_lock(chan);
1759 
1760 		if (chan->scid == L2CAP_CID_A2MP) {
1761 			l2cap_chan_unlock(chan);
1762 			continue;
1763 		}
1764 
1765 		if (hcon->type == LE_LINK) {
1766 			l2cap_le_start(chan);
1767 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1768 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1769 				l2cap_chan_ready(chan);
1770 		} else if (chan->state == BT_CONNECT) {
1771 			l2cap_do_start(chan);
1772 		}
1773 
1774 		l2cap_chan_unlock(chan);
1775 	}
1776 
1777 	mutex_unlock(&conn->chan_lock);
1778 
1779 	if (hcon->type == LE_LINK)
1780 		l2cap_le_conn_ready(conn);
1781 
1782 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1783 }
1784 
1785 /* Notify sockets that we cannot guaranty reliability anymore */
1786 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1787 {
1788 	struct l2cap_chan *chan;
1789 
1790 	BT_DBG("conn %p", conn);
1791 
1792 	mutex_lock(&conn->chan_lock);
1793 
1794 	list_for_each_entry(chan, &conn->chan_l, list) {
1795 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1796 			l2cap_chan_set_err(chan, err);
1797 	}
1798 
1799 	mutex_unlock(&conn->chan_lock);
1800 }
1801 
1802 static void l2cap_info_timeout(struct work_struct *work)
1803 {
1804 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1805 					       info_timer.work);
1806 
1807 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1808 	conn->info_ident = 0;
1809 
1810 	l2cap_conn_start(conn);
1811 }
1812 
1813 /*
1814  * l2cap_user
1815  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1816  * callback is called during registration. The ->remove callback is called
1817  * during unregistration.
1818  * An l2cap_user object can either be explicitly unregistered or when the
1819  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1820  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1821  * External modules must own a reference to the l2cap_conn object if they intend
1822  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1823  * any time if they don't.
1824  */
1825 
1826 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1827 {
1828 	struct hci_dev *hdev = conn->hcon->hdev;
1829 	int ret;
1830 
1831 	/* We need to check whether l2cap_conn is registered. If it is not, we
1832 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1833 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1834 	 * relies on the parent hci_conn object to be locked. This itself relies
1835 	 * on the hci_dev object to be locked. So we must lock the hci device
1836 	 * here, too. */
1837 
1838 	hci_dev_lock(hdev);
1839 
1840 	if (!list_empty(&user->list)) {
1841 		ret = -EINVAL;
1842 		goto out_unlock;
1843 	}
1844 
1845 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1846 	if (!conn->hchan) {
1847 		ret = -ENODEV;
1848 		goto out_unlock;
1849 	}
1850 
1851 	ret = user->probe(conn, user);
1852 	if (ret)
1853 		goto out_unlock;
1854 
1855 	list_add(&user->list, &conn->users);
1856 	ret = 0;
1857 
1858 out_unlock:
1859 	hci_dev_unlock(hdev);
1860 	return ret;
1861 }
1862 EXPORT_SYMBOL(l2cap_register_user);
1863 
1864 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1865 {
1866 	struct hci_dev *hdev = conn->hcon->hdev;
1867 
1868 	hci_dev_lock(hdev);
1869 
1870 	if (list_empty(&user->list))
1871 		goto out_unlock;
1872 
1873 	list_del_init(&user->list);
1874 	user->remove(conn, user);
1875 
1876 out_unlock:
1877 	hci_dev_unlock(hdev);
1878 }
1879 EXPORT_SYMBOL(l2cap_unregister_user);
1880 
1881 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1882 {
1883 	struct l2cap_user *user;
1884 
1885 	while (!list_empty(&conn->users)) {
1886 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1887 		list_del_init(&user->list);
1888 		user->remove(conn, user);
1889 	}
1890 }
1891 
1892 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1893 {
1894 	struct l2cap_conn *conn = hcon->l2cap_data;
1895 	struct l2cap_chan *chan, *l;
1896 
1897 	if (!conn)
1898 		return;
1899 
1900 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1901 
1902 	kfree_skb(conn->rx_skb);
1903 
1904 	skb_queue_purge(&conn->pending_rx);
1905 
1906 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1907 	 * might block if we are running on a worker from the same workqueue
1908 	 * pending_rx_work is waiting on.
1909 	 */
1910 	if (work_pending(&conn->pending_rx_work))
1911 		cancel_work_sync(&conn->pending_rx_work);
1912 
1913 	if (work_pending(&conn->id_addr_update_work))
1914 		cancel_work_sync(&conn->id_addr_update_work);
1915 
1916 	l2cap_unregister_all_users(conn);
1917 
1918 	/* Force the connection to be immediately dropped */
1919 	hcon->disc_timeout = 0;
1920 
1921 	mutex_lock(&conn->chan_lock);
1922 
1923 	/* Kill channels */
1924 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1925 		l2cap_chan_hold(chan);
1926 		l2cap_chan_lock(chan);
1927 
1928 		l2cap_chan_del(chan, err);
1929 
1930 		chan->ops->close(chan);
1931 
1932 		l2cap_chan_unlock(chan);
1933 		l2cap_chan_put(chan);
1934 	}
1935 
1936 	mutex_unlock(&conn->chan_lock);
1937 
1938 	hci_chan_del(conn->hchan);
1939 
1940 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1941 		cancel_delayed_work_sync(&conn->info_timer);
1942 
1943 	hcon->l2cap_data = NULL;
1944 	conn->hchan = NULL;
1945 	l2cap_conn_put(conn);
1946 }
1947 
1948 static void l2cap_conn_free(struct kref *ref)
1949 {
1950 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1951 
1952 	hci_conn_put(conn->hcon);
1953 	kfree(conn);
1954 }
1955 
1956 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1957 {
1958 	kref_get(&conn->ref);
1959 	return conn;
1960 }
1961 EXPORT_SYMBOL(l2cap_conn_get);
1962 
1963 void l2cap_conn_put(struct l2cap_conn *conn)
1964 {
1965 	kref_put(&conn->ref, l2cap_conn_free);
1966 }
1967 EXPORT_SYMBOL(l2cap_conn_put);
1968 
1969 /* ---- Socket interface ---- */
1970 
1971 /* Find socket with psm and source / destination bdaddr.
1972  * Returns closest match.
1973  */
1974 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1975 						   bdaddr_t *src,
1976 						   bdaddr_t *dst,
1977 						   u8 link_type)
1978 {
1979 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1980 
1981 	read_lock(&chan_list_lock);
1982 
1983 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1984 		if (state && c->state != state)
1985 			continue;
1986 
1987 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1988 			continue;
1989 
1990 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1991 			continue;
1992 
1993 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1994 			int src_match, dst_match;
1995 			int src_any, dst_any;
1996 
1997 			/* Exact match. */
1998 			src_match = !bacmp(&c->src, src);
1999 			dst_match = !bacmp(&c->dst, dst);
2000 			if (src_match && dst_match) {
2001 				if (!l2cap_chan_hold_unless_zero(c))
2002 					continue;
2003 
2004 				read_unlock(&chan_list_lock);
2005 				return c;
2006 			}
2007 
2008 			/* Closest match */
2009 			src_any = !bacmp(&c->src, BDADDR_ANY);
2010 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
2011 			if ((src_match && dst_any) || (src_any && dst_match) ||
2012 			    (src_any && dst_any))
2013 				c1 = c;
2014 		}
2015 	}
2016 
2017 	if (c1)
2018 		c1 = l2cap_chan_hold_unless_zero(c1);
2019 
2020 	read_unlock(&chan_list_lock);
2021 
2022 	return c1;
2023 }
2024 
2025 static void l2cap_monitor_timeout(struct work_struct *work)
2026 {
2027 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2028 					       monitor_timer.work);
2029 
2030 	BT_DBG("chan %p", chan);
2031 
2032 	l2cap_chan_lock(chan);
2033 
2034 	if (!chan->conn) {
2035 		l2cap_chan_unlock(chan);
2036 		l2cap_chan_put(chan);
2037 		return;
2038 	}
2039 
2040 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2041 
2042 	l2cap_chan_unlock(chan);
2043 	l2cap_chan_put(chan);
2044 }
2045 
2046 static void l2cap_retrans_timeout(struct work_struct *work)
2047 {
2048 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2049 					       retrans_timer.work);
2050 
2051 	BT_DBG("chan %p", chan);
2052 
2053 	l2cap_chan_lock(chan);
2054 
2055 	if (!chan->conn) {
2056 		l2cap_chan_unlock(chan);
2057 		l2cap_chan_put(chan);
2058 		return;
2059 	}
2060 
2061 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2062 	l2cap_chan_unlock(chan);
2063 	l2cap_chan_put(chan);
2064 }
2065 
2066 static void l2cap_streaming_send(struct l2cap_chan *chan,
2067 				 struct sk_buff_head *skbs)
2068 {
2069 	struct sk_buff *skb;
2070 	struct l2cap_ctrl *control;
2071 
2072 	BT_DBG("chan %p, skbs %p", chan, skbs);
2073 
2074 	if (__chan_is_moving(chan))
2075 		return;
2076 
2077 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2078 
2079 	while (!skb_queue_empty(&chan->tx_q)) {
2080 
2081 		skb = skb_dequeue(&chan->tx_q);
2082 
2083 		bt_cb(skb)->l2cap.retries = 1;
2084 		control = &bt_cb(skb)->l2cap;
2085 
2086 		control->reqseq = 0;
2087 		control->txseq = chan->next_tx_seq;
2088 
2089 		__pack_control(chan, control, skb);
2090 
2091 		if (chan->fcs == L2CAP_FCS_CRC16) {
2092 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2093 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2094 		}
2095 
2096 		l2cap_do_send(chan, skb);
2097 
2098 		BT_DBG("Sent txseq %u", control->txseq);
2099 
2100 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2101 		chan->frames_sent++;
2102 	}
2103 }
2104 
2105 static int l2cap_ertm_send(struct l2cap_chan *chan)
2106 {
2107 	struct sk_buff *skb, *tx_skb;
2108 	struct l2cap_ctrl *control;
2109 	int sent = 0;
2110 
2111 	BT_DBG("chan %p", chan);
2112 
2113 	if (chan->state != BT_CONNECTED)
2114 		return -ENOTCONN;
2115 
2116 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2117 		return 0;
2118 
2119 	if (__chan_is_moving(chan))
2120 		return 0;
2121 
2122 	while (chan->tx_send_head &&
2123 	       chan->unacked_frames < chan->remote_tx_win &&
2124 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2125 
2126 		skb = chan->tx_send_head;
2127 
2128 		bt_cb(skb)->l2cap.retries = 1;
2129 		control = &bt_cb(skb)->l2cap;
2130 
2131 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2132 			control->final = 1;
2133 
2134 		control->reqseq = chan->buffer_seq;
2135 		chan->last_acked_seq = chan->buffer_seq;
2136 		control->txseq = chan->next_tx_seq;
2137 
2138 		__pack_control(chan, control, skb);
2139 
2140 		if (chan->fcs == L2CAP_FCS_CRC16) {
2141 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2142 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2143 		}
2144 
2145 		/* Clone after data has been modified. Data is assumed to be
2146 		   read-only (for locking purposes) on cloned sk_buffs.
2147 		 */
2148 		tx_skb = skb_clone(skb, GFP_KERNEL);
2149 
2150 		if (!tx_skb)
2151 			break;
2152 
2153 		__set_retrans_timer(chan);
2154 
2155 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2156 		chan->unacked_frames++;
2157 		chan->frames_sent++;
2158 		sent++;
2159 
2160 		if (skb_queue_is_last(&chan->tx_q, skb))
2161 			chan->tx_send_head = NULL;
2162 		else
2163 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2164 
2165 		l2cap_do_send(chan, tx_skb);
2166 		BT_DBG("Sent txseq %u", control->txseq);
2167 	}
2168 
2169 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2170 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2171 
2172 	return sent;
2173 }
2174 
2175 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2176 {
2177 	struct l2cap_ctrl control;
2178 	struct sk_buff *skb;
2179 	struct sk_buff *tx_skb;
2180 	u16 seq;
2181 
2182 	BT_DBG("chan %p", chan);
2183 
2184 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2185 		return;
2186 
2187 	if (__chan_is_moving(chan))
2188 		return;
2189 
2190 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2191 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2192 
2193 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2194 		if (!skb) {
2195 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2196 			       seq);
2197 			continue;
2198 		}
2199 
2200 		bt_cb(skb)->l2cap.retries++;
2201 		control = bt_cb(skb)->l2cap;
2202 
2203 		if (chan->max_tx != 0 &&
2204 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2205 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2206 			l2cap_send_disconn_req(chan, ECONNRESET);
2207 			l2cap_seq_list_clear(&chan->retrans_list);
2208 			break;
2209 		}
2210 
2211 		control.reqseq = chan->buffer_seq;
2212 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2213 			control.final = 1;
2214 		else
2215 			control.final = 0;
2216 
2217 		if (skb_cloned(skb)) {
2218 			/* Cloned sk_buffs are read-only, so we need a
2219 			 * writeable copy
2220 			 */
2221 			tx_skb = skb_copy(skb, GFP_KERNEL);
2222 		} else {
2223 			tx_skb = skb_clone(skb, GFP_KERNEL);
2224 		}
2225 
2226 		if (!tx_skb) {
2227 			l2cap_seq_list_clear(&chan->retrans_list);
2228 			break;
2229 		}
2230 
2231 		/* Update skb contents */
2232 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2233 			put_unaligned_le32(__pack_extended_control(&control),
2234 					   tx_skb->data + L2CAP_HDR_SIZE);
2235 		} else {
2236 			put_unaligned_le16(__pack_enhanced_control(&control),
2237 					   tx_skb->data + L2CAP_HDR_SIZE);
2238 		}
2239 
2240 		/* Update FCS */
2241 		if (chan->fcs == L2CAP_FCS_CRC16) {
2242 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2243 					tx_skb->len - L2CAP_FCS_SIZE);
2244 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2245 						L2CAP_FCS_SIZE);
2246 		}
2247 
2248 		l2cap_do_send(chan, tx_skb);
2249 
2250 		BT_DBG("Resent txseq %d", control.txseq);
2251 
2252 		chan->last_acked_seq = chan->buffer_seq;
2253 	}
2254 }
2255 
2256 static void l2cap_retransmit(struct l2cap_chan *chan,
2257 			     struct l2cap_ctrl *control)
2258 {
2259 	BT_DBG("chan %p, control %p", chan, control);
2260 
2261 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2262 	l2cap_ertm_resend(chan);
2263 }
2264 
2265 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2266 				 struct l2cap_ctrl *control)
2267 {
2268 	struct sk_buff *skb;
2269 
2270 	BT_DBG("chan %p, control %p", chan, control);
2271 
2272 	if (control->poll)
2273 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2274 
2275 	l2cap_seq_list_clear(&chan->retrans_list);
2276 
2277 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2278 		return;
2279 
2280 	if (chan->unacked_frames) {
2281 		skb_queue_walk(&chan->tx_q, skb) {
2282 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2283 			    skb == chan->tx_send_head)
2284 				break;
2285 		}
2286 
2287 		skb_queue_walk_from(&chan->tx_q, skb) {
2288 			if (skb == chan->tx_send_head)
2289 				break;
2290 
2291 			l2cap_seq_list_append(&chan->retrans_list,
2292 					      bt_cb(skb)->l2cap.txseq);
2293 		}
2294 
2295 		l2cap_ertm_resend(chan);
2296 	}
2297 }
2298 
2299 static void l2cap_send_ack(struct l2cap_chan *chan)
2300 {
2301 	struct l2cap_ctrl control;
2302 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2303 					 chan->last_acked_seq);
2304 	int threshold;
2305 
2306 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2307 	       chan, chan->last_acked_seq, chan->buffer_seq);
2308 
2309 	memset(&control, 0, sizeof(control));
2310 	control.sframe = 1;
2311 
2312 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2313 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2314 		__clear_ack_timer(chan);
2315 		control.super = L2CAP_SUPER_RNR;
2316 		control.reqseq = chan->buffer_seq;
2317 		l2cap_send_sframe(chan, &control);
2318 	} else {
2319 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2320 			l2cap_ertm_send(chan);
2321 			/* If any i-frames were sent, they included an ack */
2322 			if (chan->buffer_seq == chan->last_acked_seq)
2323 				frames_to_ack = 0;
2324 		}
2325 
2326 		/* Ack now if the window is 3/4ths full.
2327 		 * Calculate without mul or div
2328 		 */
2329 		threshold = chan->ack_win;
2330 		threshold += threshold << 1;
2331 		threshold >>= 2;
2332 
2333 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2334 		       threshold);
2335 
2336 		if (frames_to_ack >= threshold) {
2337 			__clear_ack_timer(chan);
2338 			control.super = L2CAP_SUPER_RR;
2339 			control.reqseq = chan->buffer_seq;
2340 			l2cap_send_sframe(chan, &control);
2341 			frames_to_ack = 0;
2342 		}
2343 
2344 		if (frames_to_ack)
2345 			__set_ack_timer(chan);
2346 	}
2347 }
2348 
2349 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2350 					 struct msghdr *msg, int len,
2351 					 int count, struct sk_buff *skb)
2352 {
2353 	struct l2cap_conn *conn = chan->conn;
2354 	struct sk_buff **frag;
2355 	int sent = 0;
2356 
2357 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2358 		return -EFAULT;
2359 
2360 	sent += count;
2361 	len  -= count;
2362 
2363 	/* Continuation fragments (no L2CAP header) */
2364 	frag = &skb_shinfo(skb)->frag_list;
2365 	while (len) {
2366 		struct sk_buff *tmp;
2367 
2368 		count = min_t(unsigned int, conn->mtu, len);
2369 
2370 		tmp = chan->ops->alloc_skb(chan, 0, count,
2371 					   msg->msg_flags & MSG_DONTWAIT);
2372 		if (IS_ERR(tmp))
2373 			return PTR_ERR(tmp);
2374 
2375 		*frag = tmp;
2376 
2377 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2378 				   &msg->msg_iter))
2379 			return -EFAULT;
2380 
2381 		sent += count;
2382 		len  -= count;
2383 
2384 		skb->len += (*frag)->len;
2385 		skb->data_len += (*frag)->len;
2386 
2387 		frag = &(*frag)->next;
2388 	}
2389 
2390 	return sent;
2391 }
2392 
2393 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2394 						 struct msghdr *msg, size_t len)
2395 {
2396 	struct l2cap_conn *conn = chan->conn;
2397 	struct sk_buff *skb;
2398 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2399 	struct l2cap_hdr *lh;
2400 
2401 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2402 	       __le16_to_cpu(chan->psm), len);
2403 
2404 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2405 
2406 	skb = chan->ops->alloc_skb(chan, hlen, count,
2407 				   msg->msg_flags & MSG_DONTWAIT);
2408 	if (IS_ERR(skb))
2409 		return skb;
2410 
2411 	/* Create L2CAP header */
2412 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2413 	lh->cid = cpu_to_le16(chan->dcid);
2414 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2415 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2416 
2417 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2418 	if (unlikely(err < 0)) {
2419 		kfree_skb(skb);
2420 		return ERR_PTR(err);
2421 	}
2422 	return skb;
2423 }
2424 
2425 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2426 					      struct msghdr *msg, size_t len)
2427 {
2428 	struct l2cap_conn *conn = chan->conn;
2429 	struct sk_buff *skb;
2430 	int err, count;
2431 	struct l2cap_hdr *lh;
2432 
2433 	BT_DBG("chan %p len %zu", chan, len);
2434 
2435 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2436 
2437 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2438 				   msg->msg_flags & MSG_DONTWAIT);
2439 	if (IS_ERR(skb))
2440 		return skb;
2441 
2442 	/* Create L2CAP header */
2443 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2444 	lh->cid = cpu_to_le16(chan->dcid);
2445 	lh->len = cpu_to_le16(len);
2446 
2447 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2448 	if (unlikely(err < 0)) {
2449 		kfree_skb(skb);
2450 		return ERR_PTR(err);
2451 	}
2452 	return skb;
2453 }
2454 
2455 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2456 					       struct msghdr *msg, size_t len,
2457 					       u16 sdulen)
2458 {
2459 	struct l2cap_conn *conn = chan->conn;
2460 	struct sk_buff *skb;
2461 	int err, count, hlen;
2462 	struct l2cap_hdr *lh;
2463 
2464 	BT_DBG("chan %p len %zu", chan, len);
2465 
2466 	if (!conn)
2467 		return ERR_PTR(-ENOTCONN);
2468 
2469 	hlen = __ertm_hdr_size(chan);
2470 
2471 	if (sdulen)
2472 		hlen += L2CAP_SDULEN_SIZE;
2473 
2474 	if (chan->fcs == L2CAP_FCS_CRC16)
2475 		hlen += L2CAP_FCS_SIZE;
2476 
2477 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2478 
2479 	skb = chan->ops->alloc_skb(chan, hlen, count,
2480 				   msg->msg_flags & MSG_DONTWAIT);
2481 	if (IS_ERR(skb))
2482 		return skb;
2483 
2484 	/* Create L2CAP header */
2485 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2486 	lh->cid = cpu_to_le16(chan->dcid);
2487 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2488 
2489 	/* Control header is populated later */
2490 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2491 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2492 	else
2493 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2494 
2495 	if (sdulen)
2496 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2497 
2498 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2499 	if (unlikely(err < 0)) {
2500 		kfree_skb(skb);
2501 		return ERR_PTR(err);
2502 	}
2503 
2504 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2505 	bt_cb(skb)->l2cap.retries = 0;
2506 	return skb;
2507 }
2508 
2509 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2510 			     struct sk_buff_head *seg_queue,
2511 			     struct msghdr *msg, size_t len)
2512 {
2513 	struct sk_buff *skb;
2514 	u16 sdu_len;
2515 	size_t pdu_len;
2516 	u8 sar;
2517 
2518 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2519 
2520 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2521 	 * so fragmented skbs are not used.  The HCI layer's handling
2522 	 * of fragmented skbs is not compatible with ERTM's queueing.
2523 	 */
2524 
2525 	/* PDU size is derived from the HCI MTU */
2526 	pdu_len = chan->conn->mtu;
2527 
2528 	/* Constrain PDU size for BR/EDR connections */
2529 	if (!chan->hs_hcon)
2530 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2531 
2532 	/* Adjust for largest possible L2CAP overhead. */
2533 	if (chan->fcs)
2534 		pdu_len -= L2CAP_FCS_SIZE;
2535 
2536 	pdu_len -= __ertm_hdr_size(chan);
2537 
2538 	/* Remote device may have requested smaller PDUs */
2539 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2540 
2541 	if (len <= pdu_len) {
2542 		sar = L2CAP_SAR_UNSEGMENTED;
2543 		sdu_len = 0;
2544 		pdu_len = len;
2545 	} else {
2546 		sar = L2CAP_SAR_START;
2547 		sdu_len = len;
2548 	}
2549 
2550 	while (len > 0) {
2551 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2552 
2553 		if (IS_ERR(skb)) {
2554 			__skb_queue_purge(seg_queue);
2555 			return PTR_ERR(skb);
2556 		}
2557 
2558 		bt_cb(skb)->l2cap.sar = sar;
2559 		__skb_queue_tail(seg_queue, skb);
2560 
2561 		len -= pdu_len;
2562 		if (sdu_len)
2563 			sdu_len = 0;
2564 
2565 		if (len <= pdu_len) {
2566 			sar = L2CAP_SAR_END;
2567 			pdu_len = len;
2568 		} else {
2569 			sar = L2CAP_SAR_CONTINUE;
2570 		}
2571 	}
2572 
2573 	return 0;
2574 }
2575 
2576 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2577 						   struct msghdr *msg,
2578 						   size_t len, u16 sdulen)
2579 {
2580 	struct l2cap_conn *conn = chan->conn;
2581 	struct sk_buff *skb;
2582 	int err, count, hlen;
2583 	struct l2cap_hdr *lh;
2584 
2585 	BT_DBG("chan %p len %zu", chan, len);
2586 
2587 	if (!conn)
2588 		return ERR_PTR(-ENOTCONN);
2589 
2590 	hlen = L2CAP_HDR_SIZE;
2591 
2592 	if (sdulen)
2593 		hlen += L2CAP_SDULEN_SIZE;
2594 
2595 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2596 
2597 	skb = chan->ops->alloc_skb(chan, hlen, count,
2598 				   msg->msg_flags & MSG_DONTWAIT);
2599 	if (IS_ERR(skb))
2600 		return skb;
2601 
2602 	/* Create L2CAP header */
2603 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2604 	lh->cid = cpu_to_le16(chan->dcid);
2605 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2606 
2607 	if (sdulen)
2608 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2609 
2610 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2611 	if (unlikely(err < 0)) {
2612 		kfree_skb(skb);
2613 		return ERR_PTR(err);
2614 	}
2615 
2616 	return skb;
2617 }
2618 
2619 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2620 				struct sk_buff_head *seg_queue,
2621 				struct msghdr *msg, size_t len)
2622 {
2623 	struct sk_buff *skb;
2624 	size_t pdu_len;
2625 	u16 sdu_len;
2626 
2627 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2628 
2629 	sdu_len = len;
2630 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2631 
2632 	while (len > 0) {
2633 		if (len <= pdu_len)
2634 			pdu_len = len;
2635 
2636 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2637 		if (IS_ERR(skb)) {
2638 			__skb_queue_purge(seg_queue);
2639 			return PTR_ERR(skb);
2640 		}
2641 
2642 		__skb_queue_tail(seg_queue, skb);
2643 
2644 		len -= pdu_len;
2645 
2646 		if (sdu_len) {
2647 			sdu_len = 0;
2648 			pdu_len += L2CAP_SDULEN_SIZE;
2649 		}
2650 	}
2651 
2652 	return 0;
2653 }
2654 
2655 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2656 {
2657 	int sent = 0;
2658 
2659 	BT_DBG("chan %p", chan);
2660 
2661 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2662 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2663 		chan->tx_credits--;
2664 		sent++;
2665 	}
2666 
2667 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2668 	       skb_queue_len(&chan->tx_q));
2669 }
2670 
2671 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2672 {
2673 	struct sk_buff *skb;
2674 	int err;
2675 	struct sk_buff_head seg_queue;
2676 
2677 	if (!chan->conn)
2678 		return -ENOTCONN;
2679 
2680 	/* Connectionless channel */
2681 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2682 		skb = l2cap_create_connless_pdu(chan, msg, len);
2683 		if (IS_ERR(skb))
2684 			return PTR_ERR(skb);
2685 
2686 		/* Channel lock is released before requesting new skb and then
2687 		 * reacquired thus we need to recheck channel state.
2688 		 */
2689 		if (chan->state != BT_CONNECTED) {
2690 			kfree_skb(skb);
2691 			return -ENOTCONN;
2692 		}
2693 
2694 		l2cap_do_send(chan, skb);
2695 		return len;
2696 	}
2697 
2698 	switch (chan->mode) {
2699 	case L2CAP_MODE_LE_FLOWCTL:
2700 	case L2CAP_MODE_EXT_FLOWCTL:
2701 		/* Check outgoing MTU */
2702 		if (len > chan->omtu)
2703 			return -EMSGSIZE;
2704 
2705 		__skb_queue_head_init(&seg_queue);
2706 
2707 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2708 
2709 		if (chan->state != BT_CONNECTED) {
2710 			__skb_queue_purge(&seg_queue);
2711 			err = -ENOTCONN;
2712 		}
2713 
2714 		if (err)
2715 			return err;
2716 
2717 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2718 
2719 		l2cap_le_flowctl_send(chan);
2720 
2721 		if (!chan->tx_credits)
2722 			chan->ops->suspend(chan);
2723 
2724 		err = len;
2725 
2726 		break;
2727 
2728 	case L2CAP_MODE_BASIC:
2729 		/* Check outgoing MTU */
2730 		if (len > chan->omtu)
2731 			return -EMSGSIZE;
2732 
2733 		/* Create a basic PDU */
2734 		skb = l2cap_create_basic_pdu(chan, msg, len);
2735 		if (IS_ERR(skb))
2736 			return PTR_ERR(skb);
2737 
2738 		/* Channel lock is released before requesting new skb and then
2739 		 * reacquired thus we need to recheck channel state.
2740 		 */
2741 		if (chan->state != BT_CONNECTED) {
2742 			kfree_skb(skb);
2743 			return -ENOTCONN;
2744 		}
2745 
2746 		l2cap_do_send(chan, skb);
2747 		err = len;
2748 		break;
2749 
2750 	case L2CAP_MODE_ERTM:
2751 	case L2CAP_MODE_STREAMING:
2752 		/* Check outgoing MTU */
2753 		if (len > chan->omtu) {
2754 			err = -EMSGSIZE;
2755 			break;
2756 		}
2757 
2758 		__skb_queue_head_init(&seg_queue);
2759 
2760 		/* Do segmentation before calling in to the state machine,
2761 		 * since it's possible to block while waiting for memory
2762 		 * allocation.
2763 		 */
2764 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2765 
2766 		/* The channel could have been closed while segmenting,
2767 		 * check that it is still connected.
2768 		 */
2769 		if (chan->state != BT_CONNECTED) {
2770 			__skb_queue_purge(&seg_queue);
2771 			err = -ENOTCONN;
2772 		}
2773 
2774 		if (err)
2775 			break;
2776 
2777 		if (chan->mode == L2CAP_MODE_ERTM)
2778 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2779 		else
2780 			l2cap_streaming_send(chan, &seg_queue);
2781 
2782 		err = len;
2783 
2784 		/* If the skbs were not queued for sending, they'll still be in
2785 		 * seg_queue and need to be purged.
2786 		 */
2787 		__skb_queue_purge(&seg_queue);
2788 		break;
2789 
2790 	default:
2791 		BT_DBG("bad state %1.1x", chan->mode);
2792 		err = -EBADFD;
2793 	}
2794 
2795 	return err;
2796 }
2797 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2798 
2799 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2800 {
2801 	struct l2cap_ctrl control;
2802 	u16 seq;
2803 
2804 	BT_DBG("chan %p, txseq %u", chan, txseq);
2805 
2806 	memset(&control, 0, sizeof(control));
2807 	control.sframe = 1;
2808 	control.super = L2CAP_SUPER_SREJ;
2809 
2810 	for (seq = chan->expected_tx_seq; seq != txseq;
2811 	     seq = __next_seq(chan, seq)) {
2812 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2813 			control.reqseq = seq;
2814 			l2cap_send_sframe(chan, &control);
2815 			l2cap_seq_list_append(&chan->srej_list, seq);
2816 		}
2817 	}
2818 
2819 	chan->expected_tx_seq = __next_seq(chan, txseq);
2820 }
2821 
2822 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2823 {
2824 	struct l2cap_ctrl control;
2825 
2826 	BT_DBG("chan %p", chan);
2827 
2828 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2829 		return;
2830 
2831 	memset(&control, 0, sizeof(control));
2832 	control.sframe = 1;
2833 	control.super = L2CAP_SUPER_SREJ;
2834 	control.reqseq = chan->srej_list.tail;
2835 	l2cap_send_sframe(chan, &control);
2836 }
2837 
2838 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2839 {
2840 	struct l2cap_ctrl control;
2841 	u16 initial_head;
2842 	u16 seq;
2843 
2844 	BT_DBG("chan %p, txseq %u", chan, txseq);
2845 
2846 	memset(&control, 0, sizeof(control));
2847 	control.sframe = 1;
2848 	control.super = L2CAP_SUPER_SREJ;
2849 
2850 	/* Capture initial list head to allow only one pass through the list. */
2851 	initial_head = chan->srej_list.head;
2852 
2853 	do {
2854 		seq = l2cap_seq_list_pop(&chan->srej_list);
2855 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2856 			break;
2857 
2858 		control.reqseq = seq;
2859 		l2cap_send_sframe(chan, &control);
2860 		l2cap_seq_list_append(&chan->srej_list, seq);
2861 	} while (chan->srej_list.head != initial_head);
2862 }
2863 
2864 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2865 {
2866 	struct sk_buff *acked_skb;
2867 	u16 ackseq;
2868 
2869 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2870 
2871 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2872 		return;
2873 
2874 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2875 	       chan->expected_ack_seq, chan->unacked_frames);
2876 
2877 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2878 	     ackseq = __next_seq(chan, ackseq)) {
2879 
2880 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2881 		if (acked_skb) {
2882 			skb_unlink(acked_skb, &chan->tx_q);
2883 			kfree_skb(acked_skb);
2884 			chan->unacked_frames--;
2885 		}
2886 	}
2887 
2888 	chan->expected_ack_seq = reqseq;
2889 
2890 	if (chan->unacked_frames == 0)
2891 		__clear_retrans_timer(chan);
2892 
2893 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2894 }
2895 
2896 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2897 {
2898 	BT_DBG("chan %p", chan);
2899 
2900 	chan->expected_tx_seq = chan->buffer_seq;
2901 	l2cap_seq_list_clear(&chan->srej_list);
2902 	skb_queue_purge(&chan->srej_q);
2903 	chan->rx_state = L2CAP_RX_STATE_RECV;
2904 }
2905 
2906 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2907 				struct l2cap_ctrl *control,
2908 				struct sk_buff_head *skbs, u8 event)
2909 {
2910 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2911 	       event);
2912 
2913 	switch (event) {
2914 	case L2CAP_EV_DATA_REQUEST:
2915 		if (chan->tx_send_head == NULL)
2916 			chan->tx_send_head = skb_peek(skbs);
2917 
2918 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2919 		l2cap_ertm_send(chan);
2920 		break;
2921 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2922 		BT_DBG("Enter LOCAL_BUSY");
2923 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2924 
2925 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2926 			/* The SREJ_SENT state must be aborted if we are to
2927 			 * enter the LOCAL_BUSY state.
2928 			 */
2929 			l2cap_abort_rx_srej_sent(chan);
2930 		}
2931 
2932 		l2cap_send_ack(chan);
2933 
2934 		break;
2935 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2936 		BT_DBG("Exit LOCAL_BUSY");
2937 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2938 
2939 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2940 			struct l2cap_ctrl local_control;
2941 
2942 			memset(&local_control, 0, sizeof(local_control));
2943 			local_control.sframe = 1;
2944 			local_control.super = L2CAP_SUPER_RR;
2945 			local_control.poll = 1;
2946 			local_control.reqseq = chan->buffer_seq;
2947 			l2cap_send_sframe(chan, &local_control);
2948 
2949 			chan->retry_count = 1;
2950 			__set_monitor_timer(chan);
2951 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2952 		}
2953 		break;
2954 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2955 		l2cap_process_reqseq(chan, control->reqseq);
2956 		break;
2957 	case L2CAP_EV_EXPLICIT_POLL:
2958 		l2cap_send_rr_or_rnr(chan, 1);
2959 		chan->retry_count = 1;
2960 		__set_monitor_timer(chan);
2961 		__clear_ack_timer(chan);
2962 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2963 		break;
2964 	case L2CAP_EV_RETRANS_TO:
2965 		l2cap_send_rr_or_rnr(chan, 1);
2966 		chan->retry_count = 1;
2967 		__set_monitor_timer(chan);
2968 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2969 		break;
2970 	case L2CAP_EV_RECV_FBIT:
2971 		/* Nothing to process */
2972 		break;
2973 	default:
2974 		break;
2975 	}
2976 }
2977 
2978 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2979 				  struct l2cap_ctrl *control,
2980 				  struct sk_buff_head *skbs, u8 event)
2981 {
2982 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2983 	       event);
2984 
2985 	switch (event) {
2986 	case L2CAP_EV_DATA_REQUEST:
2987 		if (chan->tx_send_head == NULL)
2988 			chan->tx_send_head = skb_peek(skbs);
2989 		/* Queue data, but don't send. */
2990 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2991 		break;
2992 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2993 		BT_DBG("Enter LOCAL_BUSY");
2994 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2995 
2996 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2997 			/* The SREJ_SENT state must be aborted if we are to
2998 			 * enter the LOCAL_BUSY state.
2999 			 */
3000 			l2cap_abort_rx_srej_sent(chan);
3001 		}
3002 
3003 		l2cap_send_ack(chan);
3004 
3005 		break;
3006 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
3007 		BT_DBG("Exit LOCAL_BUSY");
3008 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3009 
3010 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
3011 			struct l2cap_ctrl local_control;
3012 			memset(&local_control, 0, sizeof(local_control));
3013 			local_control.sframe = 1;
3014 			local_control.super = L2CAP_SUPER_RR;
3015 			local_control.poll = 1;
3016 			local_control.reqseq = chan->buffer_seq;
3017 			l2cap_send_sframe(chan, &local_control);
3018 
3019 			chan->retry_count = 1;
3020 			__set_monitor_timer(chan);
3021 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
3022 		}
3023 		break;
3024 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
3025 		l2cap_process_reqseq(chan, control->reqseq);
3026 		fallthrough;
3027 
3028 	case L2CAP_EV_RECV_FBIT:
3029 		if (control && control->final) {
3030 			__clear_monitor_timer(chan);
3031 			if (chan->unacked_frames > 0)
3032 				__set_retrans_timer(chan);
3033 			chan->retry_count = 0;
3034 			chan->tx_state = L2CAP_TX_STATE_XMIT;
3035 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3036 		}
3037 		break;
3038 	case L2CAP_EV_EXPLICIT_POLL:
3039 		/* Ignore */
3040 		break;
3041 	case L2CAP_EV_MONITOR_TO:
3042 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3043 			l2cap_send_rr_or_rnr(chan, 1);
3044 			__set_monitor_timer(chan);
3045 			chan->retry_count++;
3046 		} else {
3047 			l2cap_send_disconn_req(chan, ECONNABORTED);
3048 		}
3049 		break;
3050 	default:
3051 		break;
3052 	}
3053 }
3054 
3055 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3056 		     struct sk_buff_head *skbs, u8 event)
3057 {
3058 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3059 	       chan, control, skbs, event, chan->tx_state);
3060 
3061 	switch (chan->tx_state) {
3062 	case L2CAP_TX_STATE_XMIT:
3063 		l2cap_tx_state_xmit(chan, control, skbs, event);
3064 		break;
3065 	case L2CAP_TX_STATE_WAIT_F:
3066 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3067 		break;
3068 	default:
3069 		/* Ignore event */
3070 		break;
3071 	}
3072 }
3073 
3074 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3075 			     struct l2cap_ctrl *control)
3076 {
3077 	BT_DBG("chan %p, control %p", chan, control);
3078 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3079 }
3080 
3081 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3082 				  struct l2cap_ctrl *control)
3083 {
3084 	BT_DBG("chan %p, control %p", chan, control);
3085 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3086 }
3087 
3088 /* Copy frame to all raw sockets on that connection */
3089 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3090 {
3091 	struct sk_buff *nskb;
3092 	struct l2cap_chan *chan;
3093 
3094 	BT_DBG("conn %p", conn);
3095 
3096 	mutex_lock(&conn->chan_lock);
3097 
3098 	list_for_each_entry(chan, &conn->chan_l, list) {
3099 		if (chan->chan_type != L2CAP_CHAN_RAW)
3100 			continue;
3101 
3102 		/* Don't send frame to the channel it came from */
3103 		if (bt_cb(skb)->l2cap.chan == chan)
3104 			continue;
3105 
3106 		nskb = skb_clone(skb, GFP_KERNEL);
3107 		if (!nskb)
3108 			continue;
3109 		if (chan->ops->recv(chan, nskb))
3110 			kfree_skb(nskb);
3111 	}
3112 
3113 	mutex_unlock(&conn->chan_lock);
3114 }
3115 
3116 /* ---- L2CAP signalling commands ---- */
3117 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3118 				       u8 ident, u16 dlen, void *data)
3119 {
3120 	struct sk_buff *skb, **frag;
3121 	struct l2cap_cmd_hdr *cmd;
3122 	struct l2cap_hdr *lh;
3123 	int len, count;
3124 
3125 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3126 	       conn, code, ident, dlen);
3127 
3128 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3129 		return NULL;
3130 
3131 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3132 	count = min_t(unsigned int, conn->mtu, len);
3133 
3134 	skb = bt_skb_alloc(count, GFP_KERNEL);
3135 	if (!skb)
3136 		return NULL;
3137 
3138 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3139 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3140 
3141 	if (conn->hcon->type == LE_LINK)
3142 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3143 	else
3144 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3145 
3146 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3147 	cmd->code  = code;
3148 	cmd->ident = ident;
3149 	cmd->len   = cpu_to_le16(dlen);
3150 
3151 	if (dlen) {
3152 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3153 		skb_put_data(skb, data, count);
3154 		data += count;
3155 	}
3156 
3157 	len -= skb->len;
3158 
3159 	/* Continuation fragments (no L2CAP header) */
3160 	frag = &skb_shinfo(skb)->frag_list;
3161 	while (len) {
3162 		count = min_t(unsigned int, conn->mtu, len);
3163 
3164 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3165 		if (!*frag)
3166 			goto fail;
3167 
3168 		skb_put_data(*frag, data, count);
3169 
3170 		len  -= count;
3171 		data += count;
3172 
3173 		frag = &(*frag)->next;
3174 	}
3175 
3176 	return skb;
3177 
3178 fail:
3179 	kfree_skb(skb);
3180 	return NULL;
3181 }
3182 
3183 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3184 				     unsigned long *val)
3185 {
3186 	struct l2cap_conf_opt *opt = *ptr;
3187 	int len;
3188 
3189 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3190 	*ptr += len;
3191 
3192 	*type = opt->type;
3193 	*olen = opt->len;
3194 
3195 	switch (opt->len) {
3196 	case 1:
3197 		*val = *((u8 *) opt->val);
3198 		break;
3199 
3200 	case 2:
3201 		*val = get_unaligned_le16(opt->val);
3202 		break;
3203 
3204 	case 4:
3205 		*val = get_unaligned_le32(opt->val);
3206 		break;
3207 
3208 	default:
3209 		*val = (unsigned long) opt->val;
3210 		break;
3211 	}
3212 
3213 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3214 	return len;
3215 }
3216 
3217 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3218 {
3219 	struct l2cap_conf_opt *opt = *ptr;
3220 
3221 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3222 
3223 	if (size < L2CAP_CONF_OPT_SIZE + len)
3224 		return;
3225 
3226 	opt->type = type;
3227 	opt->len  = len;
3228 
3229 	switch (len) {
3230 	case 1:
3231 		*((u8 *) opt->val)  = val;
3232 		break;
3233 
3234 	case 2:
3235 		put_unaligned_le16(val, opt->val);
3236 		break;
3237 
3238 	case 4:
3239 		put_unaligned_le32(val, opt->val);
3240 		break;
3241 
3242 	default:
3243 		memcpy(opt->val, (void *) val, len);
3244 		break;
3245 	}
3246 
3247 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3248 }
3249 
3250 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3251 {
3252 	struct l2cap_conf_efs efs;
3253 
3254 	switch (chan->mode) {
3255 	case L2CAP_MODE_ERTM:
3256 		efs.id		= chan->local_id;
3257 		efs.stype	= chan->local_stype;
3258 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3259 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3260 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3261 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3262 		break;
3263 
3264 	case L2CAP_MODE_STREAMING:
3265 		efs.id		= 1;
3266 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3267 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3268 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3269 		efs.acc_lat	= 0;
3270 		efs.flush_to	= 0;
3271 		break;
3272 
3273 	default:
3274 		return;
3275 	}
3276 
3277 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3278 			   (unsigned long) &efs, size);
3279 }
3280 
3281 static void l2cap_ack_timeout(struct work_struct *work)
3282 {
3283 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3284 					       ack_timer.work);
3285 	u16 frames_to_ack;
3286 
3287 	BT_DBG("chan %p", chan);
3288 
3289 	l2cap_chan_lock(chan);
3290 
3291 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3292 				     chan->last_acked_seq);
3293 
3294 	if (frames_to_ack)
3295 		l2cap_send_rr_or_rnr(chan, 0);
3296 
3297 	l2cap_chan_unlock(chan);
3298 	l2cap_chan_put(chan);
3299 }
3300 
3301 int l2cap_ertm_init(struct l2cap_chan *chan)
3302 {
3303 	int err;
3304 
3305 	chan->next_tx_seq = 0;
3306 	chan->expected_tx_seq = 0;
3307 	chan->expected_ack_seq = 0;
3308 	chan->unacked_frames = 0;
3309 	chan->buffer_seq = 0;
3310 	chan->frames_sent = 0;
3311 	chan->last_acked_seq = 0;
3312 	chan->sdu = NULL;
3313 	chan->sdu_last_frag = NULL;
3314 	chan->sdu_len = 0;
3315 
3316 	skb_queue_head_init(&chan->tx_q);
3317 
3318 	chan->local_amp_id = AMP_ID_BREDR;
3319 	chan->move_id = AMP_ID_BREDR;
3320 	chan->move_state = L2CAP_MOVE_STABLE;
3321 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3322 
3323 	if (chan->mode != L2CAP_MODE_ERTM)
3324 		return 0;
3325 
3326 	chan->rx_state = L2CAP_RX_STATE_RECV;
3327 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3328 
3329 	skb_queue_head_init(&chan->srej_q);
3330 
3331 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3332 	if (err < 0)
3333 		return err;
3334 
3335 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3336 	if (err < 0)
3337 		l2cap_seq_list_free(&chan->srej_list);
3338 
3339 	return err;
3340 }
3341 
3342 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3343 {
3344 	switch (mode) {
3345 	case L2CAP_MODE_STREAMING:
3346 	case L2CAP_MODE_ERTM:
3347 		if (l2cap_mode_supported(mode, remote_feat_mask))
3348 			return mode;
3349 		fallthrough;
3350 	default:
3351 		return L2CAP_MODE_BASIC;
3352 	}
3353 }
3354 
3355 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3356 {
3357 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3358 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3359 }
3360 
3361 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3362 {
3363 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3364 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3365 }
3366 
3367 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3368 				      struct l2cap_conf_rfc *rfc)
3369 {
3370 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3371 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3372 
3373 		/* Class 1 devices have must have ERTM timeouts
3374 		 * exceeding the Link Supervision Timeout.  The
3375 		 * default Link Supervision Timeout for AMP
3376 		 * controllers is 10 seconds.
3377 		 *
3378 		 * Class 1 devices use 0xffffffff for their
3379 		 * best-effort flush timeout, so the clamping logic
3380 		 * will result in a timeout that meets the above
3381 		 * requirement.  ERTM timeouts are 16-bit values, so
3382 		 * the maximum timeout is 65.535 seconds.
3383 		 */
3384 
3385 		/* Convert timeout to milliseconds and round */
3386 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3387 
3388 		/* This is the recommended formula for class 2 devices
3389 		 * that start ERTM timers when packets are sent to the
3390 		 * controller.
3391 		 */
3392 		ertm_to = 3 * ertm_to + 500;
3393 
3394 		if (ertm_to > 0xffff)
3395 			ertm_to = 0xffff;
3396 
3397 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3398 		rfc->monitor_timeout = rfc->retrans_timeout;
3399 	} else {
3400 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3401 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3402 	}
3403 }
3404 
3405 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3406 {
3407 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3408 	    __l2cap_ews_supported(chan->conn)) {
3409 		/* use extended control field */
3410 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3411 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3412 	} else {
3413 		chan->tx_win = min_t(u16, chan->tx_win,
3414 				     L2CAP_DEFAULT_TX_WINDOW);
3415 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3416 	}
3417 	chan->ack_win = chan->tx_win;
3418 }
3419 
3420 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3421 {
3422 	struct hci_conn *conn = chan->conn->hcon;
3423 
3424 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3425 
3426 	/* The 2-DH1 packet has between 2 and 56 information bytes
3427 	 * (including the 2-byte payload header)
3428 	 */
3429 	if (!(conn->pkt_type & HCI_2DH1))
3430 		chan->imtu = 54;
3431 
3432 	/* The 3-DH1 packet has between 2 and 85 information bytes
3433 	 * (including the 2-byte payload header)
3434 	 */
3435 	if (!(conn->pkt_type & HCI_3DH1))
3436 		chan->imtu = 83;
3437 
3438 	/* The 2-DH3 packet has between 2 and 369 information bytes
3439 	 * (including the 2-byte payload header)
3440 	 */
3441 	if (!(conn->pkt_type & HCI_2DH3))
3442 		chan->imtu = 367;
3443 
3444 	/* The 3-DH3 packet has between 2 and 554 information bytes
3445 	 * (including the 2-byte payload header)
3446 	 */
3447 	if (!(conn->pkt_type & HCI_3DH3))
3448 		chan->imtu = 552;
3449 
3450 	/* The 2-DH5 packet has between 2 and 681 information bytes
3451 	 * (including the 2-byte payload header)
3452 	 */
3453 	if (!(conn->pkt_type & HCI_2DH5))
3454 		chan->imtu = 679;
3455 
3456 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3457 	 * (including the 2-byte payload header)
3458 	 */
3459 	if (!(conn->pkt_type & HCI_3DH5))
3460 		chan->imtu = 1021;
3461 }
3462 
3463 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3464 {
3465 	struct l2cap_conf_req *req = data;
3466 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3467 	void *ptr = req->data;
3468 	void *endptr = data + data_size;
3469 	u16 size;
3470 
3471 	BT_DBG("chan %p", chan);
3472 
3473 	if (chan->num_conf_req || chan->num_conf_rsp)
3474 		goto done;
3475 
3476 	switch (chan->mode) {
3477 	case L2CAP_MODE_STREAMING:
3478 	case L2CAP_MODE_ERTM:
3479 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3480 			break;
3481 
3482 		if (__l2cap_efs_supported(chan->conn))
3483 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3484 
3485 		fallthrough;
3486 	default:
3487 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3488 		break;
3489 	}
3490 
3491 done:
3492 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3493 		if (!chan->imtu)
3494 			l2cap_mtu_auto(chan);
3495 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3496 				   endptr - ptr);
3497 	}
3498 
3499 	switch (chan->mode) {
3500 	case L2CAP_MODE_BASIC:
3501 		if (disable_ertm)
3502 			break;
3503 
3504 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3505 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3506 			break;
3507 
3508 		rfc.mode            = L2CAP_MODE_BASIC;
3509 		rfc.txwin_size      = 0;
3510 		rfc.max_transmit    = 0;
3511 		rfc.retrans_timeout = 0;
3512 		rfc.monitor_timeout = 0;
3513 		rfc.max_pdu_size    = 0;
3514 
3515 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3516 				   (unsigned long) &rfc, endptr - ptr);
3517 		break;
3518 
3519 	case L2CAP_MODE_ERTM:
3520 		rfc.mode            = L2CAP_MODE_ERTM;
3521 		rfc.max_transmit    = chan->max_tx;
3522 
3523 		__l2cap_set_ertm_timeouts(chan, &rfc);
3524 
3525 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3526 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3527 			     L2CAP_FCS_SIZE);
3528 		rfc.max_pdu_size = cpu_to_le16(size);
3529 
3530 		l2cap_txwin_setup(chan);
3531 
3532 		rfc.txwin_size = min_t(u16, chan->tx_win,
3533 				       L2CAP_DEFAULT_TX_WINDOW);
3534 
3535 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3536 				   (unsigned long) &rfc, endptr - ptr);
3537 
3538 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3539 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3540 
3541 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3542 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3543 					   chan->tx_win, endptr - ptr);
3544 
3545 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3546 			if (chan->fcs == L2CAP_FCS_NONE ||
3547 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3548 				chan->fcs = L2CAP_FCS_NONE;
3549 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3550 						   chan->fcs, endptr - ptr);
3551 			}
3552 		break;
3553 
3554 	case L2CAP_MODE_STREAMING:
3555 		l2cap_txwin_setup(chan);
3556 		rfc.mode            = L2CAP_MODE_STREAMING;
3557 		rfc.txwin_size      = 0;
3558 		rfc.max_transmit    = 0;
3559 		rfc.retrans_timeout = 0;
3560 		rfc.monitor_timeout = 0;
3561 
3562 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3563 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3564 			     L2CAP_FCS_SIZE);
3565 		rfc.max_pdu_size = cpu_to_le16(size);
3566 
3567 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3568 				   (unsigned long) &rfc, endptr - ptr);
3569 
3570 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3571 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3572 
3573 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3574 			if (chan->fcs == L2CAP_FCS_NONE ||
3575 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3576 				chan->fcs = L2CAP_FCS_NONE;
3577 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3578 						   chan->fcs, endptr - ptr);
3579 			}
3580 		break;
3581 	}
3582 
3583 	req->dcid  = cpu_to_le16(chan->dcid);
3584 	req->flags = cpu_to_le16(0);
3585 
3586 	return ptr - data;
3587 }
3588 
3589 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3590 {
3591 	struct l2cap_conf_rsp *rsp = data;
3592 	void *ptr = rsp->data;
3593 	void *endptr = data + data_size;
3594 	void *req = chan->conf_req;
3595 	int len = chan->conf_len;
3596 	int type, hint, olen;
3597 	unsigned long val;
3598 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3599 	struct l2cap_conf_efs efs;
3600 	u8 remote_efs = 0;
3601 	u16 mtu = L2CAP_DEFAULT_MTU;
3602 	u16 result = L2CAP_CONF_SUCCESS;
3603 	u16 size;
3604 
3605 	BT_DBG("chan %p", chan);
3606 
3607 	while (len >= L2CAP_CONF_OPT_SIZE) {
3608 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3609 		if (len < 0)
3610 			break;
3611 
3612 		hint  = type & L2CAP_CONF_HINT;
3613 		type &= L2CAP_CONF_MASK;
3614 
3615 		switch (type) {
3616 		case L2CAP_CONF_MTU:
3617 			if (olen != 2)
3618 				break;
3619 			mtu = val;
3620 			break;
3621 
3622 		case L2CAP_CONF_FLUSH_TO:
3623 			if (olen != 2)
3624 				break;
3625 			chan->flush_to = val;
3626 			break;
3627 
3628 		case L2CAP_CONF_QOS:
3629 			break;
3630 
3631 		case L2CAP_CONF_RFC:
3632 			if (olen != sizeof(rfc))
3633 				break;
3634 			memcpy(&rfc, (void *) val, olen);
3635 			break;
3636 
3637 		case L2CAP_CONF_FCS:
3638 			if (olen != 1)
3639 				break;
3640 			if (val == L2CAP_FCS_NONE)
3641 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3642 			break;
3643 
3644 		case L2CAP_CONF_EFS:
3645 			if (olen != sizeof(efs))
3646 				break;
3647 			remote_efs = 1;
3648 			memcpy(&efs, (void *) val, olen);
3649 			break;
3650 
3651 		case L2CAP_CONF_EWS:
3652 			if (olen != 2)
3653 				break;
3654 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3655 				return -ECONNREFUSED;
3656 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3657 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3658 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3659 			chan->remote_tx_win = val;
3660 			break;
3661 
3662 		default:
3663 			if (hint)
3664 				break;
3665 			result = L2CAP_CONF_UNKNOWN;
3666 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3667 			break;
3668 		}
3669 	}
3670 
3671 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3672 		goto done;
3673 
3674 	switch (chan->mode) {
3675 	case L2CAP_MODE_STREAMING:
3676 	case L2CAP_MODE_ERTM:
3677 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3678 			chan->mode = l2cap_select_mode(rfc.mode,
3679 						       chan->conn->feat_mask);
3680 			break;
3681 		}
3682 
3683 		if (remote_efs) {
3684 			if (__l2cap_efs_supported(chan->conn))
3685 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3686 			else
3687 				return -ECONNREFUSED;
3688 		}
3689 
3690 		if (chan->mode != rfc.mode)
3691 			return -ECONNREFUSED;
3692 
3693 		break;
3694 	}
3695 
3696 done:
3697 	if (chan->mode != rfc.mode) {
3698 		result = L2CAP_CONF_UNACCEPT;
3699 		rfc.mode = chan->mode;
3700 
3701 		if (chan->num_conf_rsp == 1)
3702 			return -ECONNREFUSED;
3703 
3704 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3705 				   (unsigned long) &rfc, endptr - ptr);
3706 	}
3707 
3708 	if (result == L2CAP_CONF_SUCCESS) {
3709 		/* Configure output options and let the other side know
3710 		 * which ones we don't like. */
3711 
3712 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3713 			result = L2CAP_CONF_UNACCEPT;
3714 		else {
3715 			chan->omtu = mtu;
3716 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3717 		}
3718 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3719 
3720 		if (remote_efs) {
3721 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3722 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3723 			    efs.stype != chan->local_stype) {
3724 
3725 				result = L2CAP_CONF_UNACCEPT;
3726 
3727 				if (chan->num_conf_req >= 1)
3728 					return -ECONNREFUSED;
3729 
3730 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3731 						   sizeof(efs),
3732 						   (unsigned long) &efs, endptr - ptr);
3733 			} else {
3734 				/* Send PENDING Conf Rsp */
3735 				result = L2CAP_CONF_PENDING;
3736 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3737 			}
3738 		}
3739 
3740 		switch (rfc.mode) {
3741 		case L2CAP_MODE_BASIC:
3742 			chan->fcs = L2CAP_FCS_NONE;
3743 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3744 			break;
3745 
3746 		case L2CAP_MODE_ERTM:
3747 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3748 				chan->remote_tx_win = rfc.txwin_size;
3749 			else
3750 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3751 
3752 			chan->remote_max_tx = rfc.max_transmit;
3753 
3754 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3755 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3756 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3757 			rfc.max_pdu_size = cpu_to_le16(size);
3758 			chan->remote_mps = size;
3759 
3760 			__l2cap_set_ertm_timeouts(chan, &rfc);
3761 
3762 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3763 
3764 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3765 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3766 
3767 			if (remote_efs &&
3768 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3769 				chan->remote_id = efs.id;
3770 				chan->remote_stype = efs.stype;
3771 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3772 				chan->remote_flush_to =
3773 					le32_to_cpu(efs.flush_to);
3774 				chan->remote_acc_lat =
3775 					le32_to_cpu(efs.acc_lat);
3776 				chan->remote_sdu_itime =
3777 					le32_to_cpu(efs.sdu_itime);
3778 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3779 						   sizeof(efs),
3780 						   (unsigned long) &efs, endptr - ptr);
3781 			}
3782 			break;
3783 
3784 		case L2CAP_MODE_STREAMING:
3785 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3786 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3787 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3788 			rfc.max_pdu_size = cpu_to_le16(size);
3789 			chan->remote_mps = size;
3790 
3791 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3792 
3793 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3794 					   (unsigned long) &rfc, endptr - ptr);
3795 
3796 			break;
3797 
3798 		default:
3799 			result = L2CAP_CONF_UNACCEPT;
3800 
3801 			memset(&rfc, 0, sizeof(rfc));
3802 			rfc.mode = chan->mode;
3803 		}
3804 
3805 		if (result == L2CAP_CONF_SUCCESS)
3806 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3807 	}
3808 	rsp->scid   = cpu_to_le16(chan->dcid);
3809 	rsp->result = cpu_to_le16(result);
3810 	rsp->flags  = cpu_to_le16(0);
3811 
3812 	return ptr - data;
3813 }
3814 
3815 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3816 				void *data, size_t size, u16 *result)
3817 {
3818 	struct l2cap_conf_req *req = data;
3819 	void *ptr = req->data;
3820 	void *endptr = data + size;
3821 	int type, olen;
3822 	unsigned long val;
3823 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3824 	struct l2cap_conf_efs efs;
3825 
3826 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3827 
3828 	while (len >= L2CAP_CONF_OPT_SIZE) {
3829 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3830 		if (len < 0)
3831 			break;
3832 
3833 		switch (type) {
3834 		case L2CAP_CONF_MTU:
3835 			if (olen != 2)
3836 				break;
3837 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3838 				*result = L2CAP_CONF_UNACCEPT;
3839 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3840 			} else
3841 				chan->imtu = val;
3842 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3843 					   endptr - ptr);
3844 			break;
3845 
3846 		case L2CAP_CONF_FLUSH_TO:
3847 			if (olen != 2)
3848 				break;
3849 			chan->flush_to = val;
3850 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3851 					   chan->flush_to, endptr - ptr);
3852 			break;
3853 
3854 		case L2CAP_CONF_RFC:
3855 			if (olen != sizeof(rfc))
3856 				break;
3857 			memcpy(&rfc, (void *)val, olen);
3858 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3859 			    rfc.mode != chan->mode)
3860 				return -ECONNREFUSED;
3861 			chan->fcs = 0;
3862 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3863 					   (unsigned long) &rfc, endptr - ptr);
3864 			break;
3865 
3866 		case L2CAP_CONF_EWS:
3867 			if (olen != 2)
3868 				break;
3869 			chan->ack_win = min_t(u16, val, chan->ack_win);
3870 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3871 					   chan->tx_win, endptr - ptr);
3872 			break;
3873 
3874 		case L2CAP_CONF_EFS:
3875 			if (olen != sizeof(efs))
3876 				break;
3877 			memcpy(&efs, (void *)val, olen);
3878 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3879 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3880 			    efs.stype != chan->local_stype)
3881 				return -ECONNREFUSED;
3882 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3883 					   (unsigned long) &efs, endptr - ptr);
3884 			break;
3885 
3886 		case L2CAP_CONF_FCS:
3887 			if (olen != 1)
3888 				break;
3889 			if (*result == L2CAP_CONF_PENDING)
3890 				if (val == L2CAP_FCS_NONE)
3891 					set_bit(CONF_RECV_NO_FCS,
3892 						&chan->conf_state);
3893 			break;
3894 		}
3895 	}
3896 
3897 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3898 		return -ECONNREFUSED;
3899 
3900 	chan->mode = rfc.mode;
3901 
3902 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3903 		switch (rfc.mode) {
3904 		case L2CAP_MODE_ERTM:
3905 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3906 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3907 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3908 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3909 				chan->ack_win = min_t(u16, chan->ack_win,
3910 						      rfc.txwin_size);
3911 
3912 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3913 				chan->local_msdu = le16_to_cpu(efs.msdu);
3914 				chan->local_sdu_itime =
3915 					le32_to_cpu(efs.sdu_itime);
3916 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3917 				chan->local_flush_to =
3918 					le32_to_cpu(efs.flush_to);
3919 			}
3920 			break;
3921 
3922 		case L2CAP_MODE_STREAMING:
3923 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3924 		}
3925 	}
3926 
3927 	req->dcid   = cpu_to_le16(chan->dcid);
3928 	req->flags  = cpu_to_le16(0);
3929 
3930 	return ptr - data;
3931 }
3932 
3933 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3934 				u16 result, u16 flags)
3935 {
3936 	struct l2cap_conf_rsp *rsp = data;
3937 	void *ptr = rsp->data;
3938 
3939 	BT_DBG("chan %p", chan);
3940 
3941 	rsp->scid   = cpu_to_le16(chan->dcid);
3942 	rsp->result = cpu_to_le16(result);
3943 	rsp->flags  = cpu_to_le16(flags);
3944 
3945 	return ptr - data;
3946 }
3947 
3948 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3949 {
3950 	struct l2cap_le_conn_rsp rsp;
3951 	struct l2cap_conn *conn = chan->conn;
3952 
3953 	BT_DBG("chan %p", chan);
3954 
3955 	rsp.dcid    = cpu_to_le16(chan->scid);
3956 	rsp.mtu     = cpu_to_le16(chan->imtu);
3957 	rsp.mps     = cpu_to_le16(chan->mps);
3958 	rsp.credits = cpu_to_le16(chan->rx_credits);
3959 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3960 
3961 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3962 		       &rsp);
3963 }
3964 
3965 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3966 {
3967 	struct {
3968 		struct l2cap_ecred_conn_rsp rsp;
3969 		__le16 dcid[5];
3970 	} __packed pdu;
3971 	struct l2cap_conn *conn = chan->conn;
3972 	u16 ident = chan->ident;
3973 	int i = 0;
3974 
3975 	if (!ident)
3976 		return;
3977 
3978 	BT_DBG("chan %p ident %d", chan, ident);
3979 
3980 	pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3981 	pdu.rsp.mps     = cpu_to_le16(chan->mps);
3982 	pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3983 	pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3984 
3985 	mutex_lock(&conn->chan_lock);
3986 
3987 	list_for_each_entry(chan, &conn->chan_l, list) {
3988 		if (chan->ident != ident)
3989 			continue;
3990 
3991 		/* Reset ident so only one response is sent */
3992 		chan->ident = 0;
3993 
3994 		/* Include all channels pending with the same ident */
3995 		pdu.dcid[i++] = cpu_to_le16(chan->scid);
3996 	}
3997 
3998 	mutex_unlock(&conn->chan_lock);
3999 
4000 	l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
4001 			sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
4002 }
4003 
4004 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4005 {
4006 	struct l2cap_conn_rsp rsp;
4007 	struct l2cap_conn *conn = chan->conn;
4008 	u8 buf[128];
4009 	u8 rsp_code;
4010 
4011 	rsp.scid   = cpu_to_le16(chan->dcid);
4012 	rsp.dcid   = cpu_to_le16(chan->scid);
4013 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4014 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4015 
4016 	if (chan->hs_hcon)
4017 		rsp_code = L2CAP_CREATE_CHAN_RSP;
4018 	else
4019 		rsp_code = L2CAP_CONN_RSP;
4020 
4021 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4022 
4023 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4024 
4025 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4026 		return;
4027 
4028 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4029 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4030 	chan->num_conf_req++;
4031 }
4032 
4033 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4034 {
4035 	int type, olen;
4036 	unsigned long val;
4037 	/* Use sane default values in case a misbehaving remote device
4038 	 * did not send an RFC or extended window size option.
4039 	 */
4040 	u16 txwin_ext = chan->ack_win;
4041 	struct l2cap_conf_rfc rfc = {
4042 		.mode = chan->mode,
4043 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4044 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4045 		.max_pdu_size = cpu_to_le16(chan->imtu),
4046 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4047 	};
4048 
4049 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4050 
4051 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4052 		return;
4053 
4054 	while (len >= L2CAP_CONF_OPT_SIZE) {
4055 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4056 		if (len < 0)
4057 			break;
4058 
4059 		switch (type) {
4060 		case L2CAP_CONF_RFC:
4061 			if (olen != sizeof(rfc))
4062 				break;
4063 			memcpy(&rfc, (void *)val, olen);
4064 			break;
4065 		case L2CAP_CONF_EWS:
4066 			if (olen != 2)
4067 				break;
4068 			txwin_ext = val;
4069 			break;
4070 		}
4071 	}
4072 
4073 	switch (rfc.mode) {
4074 	case L2CAP_MODE_ERTM:
4075 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4076 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4077 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4078 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4079 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4080 		else
4081 			chan->ack_win = min_t(u16, chan->ack_win,
4082 					      rfc.txwin_size);
4083 		break;
4084 	case L2CAP_MODE_STREAMING:
4085 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4086 	}
4087 }
4088 
4089 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4090 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4091 				    u8 *data)
4092 {
4093 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4094 
4095 	if (cmd_len < sizeof(*rej))
4096 		return -EPROTO;
4097 
4098 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4099 		return 0;
4100 
4101 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4102 	    cmd->ident == conn->info_ident) {
4103 		cancel_delayed_work(&conn->info_timer);
4104 
4105 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4106 		conn->info_ident = 0;
4107 
4108 		l2cap_conn_start(conn);
4109 	}
4110 
4111 	return 0;
4112 }
4113 
4114 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4115 					struct l2cap_cmd_hdr *cmd,
4116 					u8 *data, u8 rsp_code, u8 amp_id)
4117 {
4118 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4119 	struct l2cap_conn_rsp rsp;
4120 	struct l2cap_chan *chan = NULL, *pchan;
4121 	int result, status = L2CAP_CS_NO_INFO;
4122 
4123 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4124 	__le16 psm = req->psm;
4125 
4126 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4127 
4128 	/* Check if we have socket listening on psm */
4129 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4130 					 &conn->hcon->dst, ACL_LINK);
4131 	if (!pchan) {
4132 		result = L2CAP_CR_BAD_PSM;
4133 		goto sendresp;
4134 	}
4135 
4136 	mutex_lock(&conn->chan_lock);
4137 	l2cap_chan_lock(pchan);
4138 
4139 	/* Check if the ACL is secure enough (if not SDP) */
4140 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4141 	    !hci_conn_check_link_mode(conn->hcon)) {
4142 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4143 		result = L2CAP_CR_SEC_BLOCK;
4144 		goto response;
4145 	}
4146 
4147 	result = L2CAP_CR_NO_MEM;
4148 
4149 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4150 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4151 		result = L2CAP_CR_INVALID_SCID;
4152 		goto response;
4153 	}
4154 
4155 	/* Check if we already have channel with that dcid */
4156 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4157 		result = L2CAP_CR_SCID_IN_USE;
4158 		goto response;
4159 	}
4160 
4161 	chan = pchan->ops->new_connection(pchan);
4162 	if (!chan)
4163 		goto response;
4164 
4165 	/* For certain devices (ex: HID mouse), support for authentication,
4166 	 * pairing and bonding is optional. For such devices, inorder to avoid
4167 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4168 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4169 	 */
4170 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4171 
4172 	bacpy(&chan->src, &conn->hcon->src);
4173 	bacpy(&chan->dst, &conn->hcon->dst);
4174 	chan->src_type = bdaddr_src_type(conn->hcon);
4175 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4176 	chan->psm  = psm;
4177 	chan->dcid = scid;
4178 	chan->local_amp_id = amp_id;
4179 
4180 	__l2cap_chan_add(conn, chan);
4181 
4182 	dcid = chan->scid;
4183 
4184 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4185 
4186 	chan->ident = cmd->ident;
4187 
4188 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4189 		if (l2cap_chan_check_security(chan, false)) {
4190 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4191 				l2cap_state_change(chan, BT_CONNECT2);
4192 				result = L2CAP_CR_PEND;
4193 				status = L2CAP_CS_AUTHOR_PEND;
4194 				chan->ops->defer(chan);
4195 			} else {
4196 				/* Force pending result for AMP controllers.
4197 				 * The connection will succeed after the
4198 				 * physical link is up.
4199 				 */
4200 				if (amp_id == AMP_ID_BREDR) {
4201 					l2cap_state_change(chan, BT_CONFIG);
4202 					result = L2CAP_CR_SUCCESS;
4203 				} else {
4204 					l2cap_state_change(chan, BT_CONNECT2);
4205 					result = L2CAP_CR_PEND;
4206 				}
4207 				status = L2CAP_CS_NO_INFO;
4208 			}
4209 		} else {
4210 			l2cap_state_change(chan, BT_CONNECT2);
4211 			result = L2CAP_CR_PEND;
4212 			status = L2CAP_CS_AUTHEN_PEND;
4213 		}
4214 	} else {
4215 		l2cap_state_change(chan, BT_CONNECT2);
4216 		result = L2CAP_CR_PEND;
4217 		status = L2CAP_CS_NO_INFO;
4218 	}
4219 
4220 response:
4221 	l2cap_chan_unlock(pchan);
4222 	mutex_unlock(&conn->chan_lock);
4223 	l2cap_chan_put(pchan);
4224 
4225 sendresp:
4226 	rsp.scid   = cpu_to_le16(scid);
4227 	rsp.dcid   = cpu_to_le16(dcid);
4228 	rsp.result = cpu_to_le16(result);
4229 	rsp.status = cpu_to_le16(status);
4230 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4231 
4232 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4233 		struct l2cap_info_req info;
4234 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4235 
4236 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4237 		conn->info_ident = l2cap_get_ident(conn);
4238 
4239 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4240 
4241 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4242 			       sizeof(info), &info);
4243 	}
4244 
4245 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4246 	    result == L2CAP_CR_SUCCESS) {
4247 		u8 buf[128];
4248 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4249 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4250 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4251 		chan->num_conf_req++;
4252 	}
4253 
4254 	return chan;
4255 }
4256 
4257 static int l2cap_connect_req(struct l2cap_conn *conn,
4258 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4259 {
4260 	struct hci_dev *hdev = conn->hcon->hdev;
4261 	struct hci_conn *hcon = conn->hcon;
4262 
4263 	if (cmd_len < sizeof(struct l2cap_conn_req))
4264 		return -EPROTO;
4265 
4266 	hci_dev_lock(hdev);
4267 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4268 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4269 		mgmt_device_connected(hdev, hcon, NULL, 0);
4270 	hci_dev_unlock(hdev);
4271 
4272 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4273 	return 0;
4274 }
4275 
4276 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4277 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4278 				    u8 *data)
4279 {
4280 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4281 	u16 scid, dcid, result, status;
4282 	struct l2cap_chan *chan;
4283 	u8 req[128];
4284 	int err;
4285 
4286 	if (cmd_len < sizeof(*rsp))
4287 		return -EPROTO;
4288 
4289 	scid   = __le16_to_cpu(rsp->scid);
4290 	dcid   = __le16_to_cpu(rsp->dcid);
4291 	result = __le16_to_cpu(rsp->result);
4292 	status = __le16_to_cpu(rsp->status);
4293 
4294 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4295 	       dcid, scid, result, status);
4296 
4297 	mutex_lock(&conn->chan_lock);
4298 
4299 	if (scid) {
4300 		chan = __l2cap_get_chan_by_scid(conn, scid);
4301 		if (!chan) {
4302 			err = -EBADSLT;
4303 			goto unlock;
4304 		}
4305 	} else {
4306 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4307 		if (!chan) {
4308 			err = -EBADSLT;
4309 			goto unlock;
4310 		}
4311 	}
4312 
4313 	chan = l2cap_chan_hold_unless_zero(chan);
4314 	if (!chan) {
4315 		err = -EBADSLT;
4316 		goto unlock;
4317 	}
4318 
4319 	err = 0;
4320 
4321 	l2cap_chan_lock(chan);
4322 
4323 	switch (result) {
4324 	case L2CAP_CR_SUCCESS:
4325 		l2cap_state_change(chan, BT_CONFIG);
4326 		chan->ident = 0;
4327 		chan->dcid = dcid;
4328 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4329 
4330 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4331 			break;
4332 
4333 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4334 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4335 		chan->num_conf_req++;
4336 		break;
4337 
4338 	case L2CAP_CR_PEND:
4339 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4340 		break;
4341 
4342 	default:
4343 		l2cap_chan_del(chan, ECONNREFUSED);
4344 		break;
4345 	}
4346 
4347 	l2cap_chan_unlock(chan);
4348 	l2cap_chan_put(chan);
4349 
4350 unlock:
4351 	mutex_unlock(&conn->chan_lock);
4352 
4353 	return err;
4354 }
4355 
4356 static inline void set_default_fcs(struct l2cap_chan *chan)
4357 {
4358 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4359 	 * sides request it.
4360 	 */
4361 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4362 		chan->fcs = L2CAP_FCS_NONE;
4363 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4364 		chan->fcs = L2CAP_FCS_CRC16;
4365 }
4366 
4367 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4368 				    u8 ident, u16 flags)
4369 {
4370 	struct l2cap_conn *conn = chan->conn;
4371 
4372 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4373 	       flags);
4374 
4375 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4376 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4377 
4378 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4379 		       l2cap_build_conf_rsp(chan, data,
4380 					    L2CAP_CONF_SUCCESS, flags), data);
4381 }
4382 
4383 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4384 				   u16 scid, u16 dcid)
4385 {
4386 	struct l2cap_cmd_rej_cid rej;
4387 
4388 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4389 	rej.scid = __cpu_to_le16(scid);
4390 	rej.dcid = __cpu_to_le16(dcid);
4391 
4392 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4393 }
4394 
4395 static inline int l2cap_config_req(struct l2cap_conn *conn,
4396 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4397 				   u8 *data)
4398 {
4399 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4400 	u16 dcid, flags;
4401 	u8 rsp[64];
4402 	struct l2cap_chan *chan;
4403 	int len, err = 0;
4404 
4405 	if (cmd_len < sizeof(*req))
4406 		return -EPROTO;
4407 
4408 	dcid  = __le16_to_cpu(req->dcid);
4409 	flags = __le16_to_cpu(req->flags);
4410 
4411 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4412 
4413 	chan = l2cap_get_chan_by_scid(conn, dcid);
4414 	if (!chan) {
4415 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4416 		return 0;
4417 	}
4418 
4419 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4420 	    chan->state != BT_CONNECTED) {
4421 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4422 				       chan->dcid);
4423 		goto unlock;
4424 	}
4425 
4426 	/* Reject if config buffer is too small. */
4427 	len = cmd_len - sizeof(*req);
4428 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4429 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4430 			       l2cap_build_conf_rsp(chan, rsp,
4431 			       L2CAP_CONF_REJECT, flags), rsp);
4432 		goto unlock;
4433 	}
4434 
4435 	/* Store config. */
4436 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4437 	chan->conf_len += len;
4438 
4439 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4440 		/* Incomplete config. Send empty response. */
4441 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4442 			       l2cap_build_conf_rsp(chan, rsp,
4443 			       L2CAP_CONF_SUCCESS, flags), rsp);
4444 		goto unlock;
4445 	}
4446 
4447 	/* Complete config. */
4448 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4449 	if (len < 0) {
4450 		l2cap_send_disconn_req(chan, ECONNRESET);
4451 		goto unlock;
4452 	}
4453 
4454 	chan->ident = cmd->ident;
4455 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4456 	chan->num_conf_rsp++;
4457 
4458 	/* Reset config buffer. */
4459 	chan->conf_len = 0;
4460 
4461 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4462 		goto unlock;
4463 
4464 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4465 		set_default_fcs(chan);
4466 
4467 		if (chan->mode == L2CAP_MODE_ERTM ||
4468 		    chan->mode == L2CAP_MODE_STREAMING)
4469 			err = l2cap_ertm_init(chan);
4470 
4471 		if (err < 0)
4472 			l2cap_send_disconn_req(chan, -err);
4473 		else
4474 			l2cap_chan_ready(chan);
4475 
4476 		goto unlock;
4477 	}
4478 
4479 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4480 		u8 buf[64];
4481 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4482 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4483 		chan->num_conf_req++;
4484 	}
4485 
4486 	/* Got Conf Rsp PENDING from remote side and assume we sent
4487 	   Conf Rsp PENDING in the code above */
4488 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4489 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4490 
4491 		/* check compatibility */
4492 
4493 		/* Send rsp for BR/EDR channel */
4494 		if (!chan->hs_hcon)
4495 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4496 		else
4497 			chan->ident = cmd->ident;
4498 	}
4499 
4500 unlock:
4501 	l2cap_chan_unlock(chan);
4502 	l2cap_chan_put(chan);
4503 	return err;
4504 }
4505 
4506 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4507 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4508 				   u8 *data)
4509 {
4510 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4511 	u16 scid, flags, result;
4512 	struct l2cap_chan *chan;
4513 	int len = cmd_len - sizeof(*rsp);
4514 	int err = 0;
4515 
4516 	if (cmd_len < sizeof(*rsp))
4517 		return -EPROTO;
4518 
4519 	scid   = __le16_to_cpu(rsp->scid);
4520 	flags  = __le16_to_cpu(rsp->flags);
4521 	result = __le16_to_cpu(rsp->result);
4522 
4523 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4524 	       result, len);
4525 
4526 	chan = l2cap_get_chan_by_scid(conn, scid);
4527 	if (!chan)
4528 		return 0;
4529 
4530 	switch (result) {
4531 	case L2CAP_CONF_SUCCESS:
4532 		l2cap_conf_rfc_get(chan, rsp->data, len);
4533 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4534 		break;
4535 
4536 	case L2CAP_CONF_PENDING:
4537 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4538 
4539 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4540 			char buf[64];
4541 
4542 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4543 						   buf, sizeof(buf), &result);
4544 			if (len < 0) {
4545 				l2cap_send_disconn_req(chan, ECONNRESET);
4546 				goto done;
4547 			}
4548 
4549 			if (!chan->hs_hcon) {
4550 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4551 							0);
4552 			} else {
4553 				if (l2cap_check_efs(chan)) {
4554 					amp_create_logical_link(chan);
4555 					chan->ident = cmd->ident;
4556 				}
4557 			}
4558 		}
4559 		goto done;
4560 
4561 	case L2CAP_CONF_UNKNOWN:
4562 	case L2CAP_CONF_UNACCEPT:
4563 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4564 			char req[64];
4565 
4566 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4567 				l2cap_send_disconn_req(chan, ECONNRESET);
4568 				goto done;
4569 			}
4570 
4571 			/* throw out any old stored conf requests */
4572 			result = L2CAP_CONF_SUCCESS;
4573 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4574 						   req, sizeof(req), &result);
4575 			if (len < 0) {
4576 				l2cap_send_disconn_req(chan, ECONNRESET);
4577 				goto done;
4578 			}
4579 
4580 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4581 				       L2CAP_CONF_REQ, len, req);
4582 			chan->num_conf_req++;
4583 			if (result != L2CAP_CONF_SUCCESS)
4584 				goto done;
4585 			break;
4586 		}
4587 		fallthrough;
4588 
4589 	default:
4590 		l2cap_chan_set_err(chan, ECONNRESET);
4591 
4592 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4593 		l2cap_send_disconn_req(chan, ECONNRESET);
4594 		goto done;
4595 	}
4596 
4597 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4598 		goto done;
4599 
4600 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4601 
4602 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4603 		set_default_fcs(chan);
4604 
4605 		if (chan->mode == L2CAP_MODE_ERTM ||
4606 		    chan->mode == L2CAP_MODE_STREAMING)
4607 			err = l2cap_ertm_init(chan);
4608 
4609 		if (err < 0)
4610 			l2cap_send_disconn_req(chan, -err);
4611 		else
4612 			l2cap_chan_ready(chan);
4613 	}
4614 
4615 done:
4616 	l2cap_chan_unlock(chan);
4617 	l2cap_chan_put(chan);
4618 	return err;
4619 }
4620 
4621 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4622 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4623 				       u8 *data)
4624 {
4625 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4626 	struct l2cap_disconn_rsp rsp;
4627 	u16 dcid, scid;
4628 	struct l2cap_chan *chan;
4629 
4630 	if (cmd_len != sizeof(*req))
4631 		return -EPROTO;
4632 
4633 	scid = __le16_to_cpu(req->scid);
4634 	dcid = __le16_to_cpu(req->dcid);
4635 
4636 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4637 
4638 	mutex_lock(&conn->chan_lock);
4639 
4640 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4641 	if (!chan) {
4642 		mutex_unlock(&conn->chan_lock);
4643 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4644 		return 0;
4645 	}
4646 
4647 	l2cap_chan_hold(chan);
4648 	l2cap_chan_lock(chan);
4649 
4650 	rsp.dcid = cpu_to_le16(chan->scid);
4651 	rsp.scid = cpu_to_le16(chan->dcid);
4652 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4653 
4654 	chan->ops->set_shutdown(chan);
4655 
4656 	l2cap_chan_del(chan, ECONNRESET);
4657 
4658 	chan->ops->close(chan);
4659 
4660 	l2cap_chan_unlock(chan);
4661 	l2cap_chan_put(chan);
4662 
4663 	mutex_unlock(&conn->chan_lock);
4664 
4665 	return 0;
4666 }
4667 
4668 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4669 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4670 				       u8 *data)
4671 {
4672 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4673 	u16 dcid, scid;
4674 	struct l2cap_chan *chan;
4675 
4676 	if (cmd_len != sizeof(*rsp))
4677 		return -EPROTO;
4678 
4679 	scid = __le16_to_cpu(rsp->scid);
4680 	dcid = __le16_to_cpu(rsp->dcid);
4681 
4682 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4683 
4684 	mutex_lock(&conn->chan_lock);
4685 
4686 	chan = __l2cap_get_chan_by_scid(conn, scid);
4687 	if (!chan) {
4688 		mutex_unlock(&conn->chan_lock);
4689 		return 0;
4690 	}
4691 
4692 	l2cap_chan_hold(chan);
4693 	l2cap_chan_lock(chan);
4694 
4695 	if (chan->state != BT_DISCONN) {
4696 		l2cap_chan_unlock(chan);
4697 		l2cap_chan_put(chan);
4698 		mutex_unlock(&conn->chan_lock);
4699 		return 0;
4700 	}
4701 
4702 	l2cap_chan_del(chan, 0);
4703 
4704 	chan->ops->close(chan);
4705 
4706 	l2cap_chan_unlock(chan);
4707 	l2cap_chan_put(chan);
4708 
4709 	mutex_unlock(&conn->chan_lock);
4710 
4711 	return 0;
4712 }
4713 
4714 static inline int l2cap_information_req(struct l2cap_conn *conn,
4715 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4716 					u8 *data)
4717 {
4718 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4719 	u16 type;
4720 
4721 	if (cmd_len != sizeof(*req))
4722 		return -EPROTO;
4723 
4724 	type = __le16_to_cpu(req->type);
4725 
4726 	BT_DBG("type 0x%4.4x", type);
4727 
4728 	if (type == L2CAP_IT_FEAT_MASK) {
4729 		u8 buf[8];
4730 		u32 feat_mask = l2cap_feat_mask;
4731 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4732 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4733 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4734 		if (!disable_ertm)
4735 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4736 				| L2CAP_FEAT_FCS;
4737 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4738 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4739 				| L2CAP_FEAT_EXT_WINDOW;
4740 
4741 		put_unaligned_le32(feat_mask, rsp->data);
4742 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4743 			       buf);
4744 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4745 		u8 buf[12];
4746 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4747 
4748 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4749 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4750 		rsp->data[0] = conn->local_fixed_chan;
4751 		memset(rsp->data + 1, 0, 7);
4752 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4753 			       buf);
4754 	} else {
4755 		struct l2cap_info_rsp rsp;
4756 		rsp.type   = cpu_to_le16(type);
4757 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4758 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4759 			       &rsp);
4760 	}
4761 
4762 	return 0;
4763 }
4764 
4765 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4766 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4767 					u8 *data)
4768 {
4769 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4770 	u16 type, result;
4771 
4772 	if (cmd_len < sizeof(*rsp))
4773 		return -EPROTO;
4774 
4775 	type   = __le16_to_cpu(rsp->type);
4776 	result = __le16_to_cpu(rsp->result);
4777 
4778 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4779 
4780 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4781 	if (cmd->ident != conn->info_ident ||
4782 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4783 		return 0;
4784 
4785 	cancel_delayed_work(&conn->info_timer);
4786 
4787 	if (result != L2CAP_IR_SUCCESS) {
4788 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4789 		conn->info_ident = 0;
4790 
4791 		l2cap_conn_start(conn);
4792 
4793 		return 0;
4794 	}
4795 
4796 	switch (type) {
4797 	case L2CAP_IT_FEAT_MASK:
4798 		conn->feat_mask = get_unaligned_le32(rsp->data);
4799 
4800 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4801 			struct l2cap_info_req req;
4802 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4803 
4804 			conn->info_ident = l2cap_get_ident(conn);
4805 
4806 			l2cap_send_cmd(conn, conn->info_ident,
4807 				       L2CAP_INFO_REQ, sizeof(req), &req);
4808 		} else {
4809 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4810 			conn->info_ident = 0;
4811 
4812 			l2cap_conn_start(conn);
4813 		}
4814 		break;
4815 
4816 	case L2CAP_IT_FIXED_CHAN:
4817 		conn->remote_fixed_chan = rsp->data[0];
4818 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4819 		conn->info_ident = 0;
4820 
4821 		l2cap_conn_start(conn);
4822 		break;
4823 	}
4824 
4825 	return 0;
4826 }
4827 
4828 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4829 				    struct l2cap_cmd_hdr *cmd,
4830 				    u16 cmd_len, void *data)
4831 {
4832 	struct l2cap_create_chan_req *req = data;
4833 	struct l2cap_create_chan_rsp rsp;
4834 	struct l2cap_chan *chan;
4835 	struct hci_dev *hdev;
4836 	u16 psm, scid;
4837 
4838 	if (cmd_len != sizeof(*req))
4839 		return -EPROTO;
4840 
4841 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4842 		return -EINVAL;
4843 
4844 	psm = le16_to_cpu(req->psm);
4845 	scid = le16_to_cpu(req->scid);
4846 
4847 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4848 
4849 	/* For controller id 0 make BR/EDR connection */
4850 	if (req->amp_id == AMP_ID_BREDR) {
4851 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4852 			      req->amp_id);
4853 		return 0;
4854 	}
4855 
4856 	/* Validate AMP controller id */
4857 	hdev = hci_dev_get(req->amp_id);
4858 	if (!hdev)
4859 		goto error;
4860 
4861 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4862 		hci_dev_put(hdev);
4863 		goto error;
4864 	}
4865 
4866 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4867 			     req->amp_id);
4868 	if (chan) {
4869 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4870 		struct hci_conn *hs_hcon;
4871 
4872 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4873 						  &conn->hcon->dst);
4874 		if (!hs_hcon) {
4875 			hci_dev_put(hdev);
4876 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4877 					       chan->dcid);
4878 			return 0;
4879 		}
4880 
4881 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4882 
4883 		mgr->bredr_chan = chan;
4884 		chan->hs_hcon = hs_hcon;
4885 		chan->fcs = L2CAP_FCS_NONE;
4886 		conn->mtu = hdev->block_mtu;
4887 	}
4888 
4889 	hci_dev_put(hdev);
4890 
4891 	return 0;
4892 
4893 error:
4894 	rsp.dcid = 0;
4895 	rsp.scid = cpu_to_le16(scid);
4896 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4897 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4898 
4899 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4900 		       sizeof(rsp), &rsp);
4901 
4902 	return 0;
4903 }
4904 
4905 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4906 {
4907 	struct l2cap_move_chan_req req;
4908 	u8 ident;
4909 
4910 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4911 
4912 	ident = l2cap_get_ident(chan->conn);
4913 	chan->ident = ident;
4914 
4915 	req.icid = cpu_to_le16(chan->scid);
4916 	req.dest_amp_id = dest_amp_id;
4917 
4918 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4919 		       &req);
4920 
4921 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4922 }
4923 
4924 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4925 {
4926 	struct l2cap_move_chan_rsp rsp;
4927 
4928 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4929 
4930 	rsp.icid = cpu_to_le16(chan->dcid);
4931 	rsp.result = cpu_to_le16(result);
4932 
4933 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4934 		       sizeof(rsp), &rsp);
4935 }
4936 
4937 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4938 {
4939 	struct l2cap_move_chan_cfm cfm;
4940 
4941 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4942 
4943 	chan->ident = l2cap_get_ident(chan->conn);
4944 
4945 	cfm.icid = cpu_to_le16(chan->scid);
4946 	cfm.result = cpu_to_le16(result);
4947 
4948 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4949 		       sizeof(cfm), &cfm);
4950 
4951 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4952 }
4953 
4954 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4955 {
4956 	struct l2cap_move_chan_cfm cfm;
4957 
4958 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4959 
4960 	cfm.icid = cpu_to_le16(icid);
4961 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4962 
4963 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4964 		       sizeof(cfm), &cfm);
4965 }
4966 
4967 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4968 					 u16 icid)
4969 {
4970 	struct l2cap_move_chan_cfm_rsp rsp;
4971 
4972 	BT_DBG("icid 0x%4.4x", icid);
4973 
4974 	rsp.icid = cpu_to_le16(icid);
4975 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4976 }
4977 
4978 static void __release_logical_link(struct l2cap_chan *chan)
4979 {
4980 	chan->hs_hchan = NULL;
4981 	chan->hs_hcon = NULL;
4982 
4983 	/* Placeholder - release the logical link */
4984 }
4985 
4986 static void l2cap_logical_fail(struct l2cap_chan *chan)
4987 {
4988 	/* Logical link setup failed */
4989 	if (chan->state != BT_CONNECTED) {
4990 		/* Create channel failure, disconnect */
4991 		l2cap_send_disconn_req(chan, ECONNRESET);
4992 		return;
4993 	}
4994 
4995 	switch (chan->move_role) {
4996 	case L2CAP_MOVE_ROLE_RESPONDER:
4997 		l2cap_move_done(chan);
4998 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4999 		break;
5000 	case L2CAP_MOVE_ROLE_INITIATOR:
5001 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
5002 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
5003 			/* Remote has only sent pending or
5004 			 * success responses, clean up
5005 			 */
5006 			l2cap_move_done(chan);
5007 		}
5008 
5009 		/* Other amp move states imply that the move
5010 		 * has already aborted
5011 		 */
5012 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5013 		break;
5014 	}
5015 }
5016 
5017 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5018 					struct hci_chan *hchan)
5019 {
5020 	struct l2cap_conf_rsp rsp;
5021 
5022 	chan->hs_hchan = hchan;
5023 	chan->hs_hcon->l2cap_data = chan->conn;
5024 
5025 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5026 
5027 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5028 		int err;
5029 
5030 		set_default_fcs(chan);
5031 
5032 		err = l2cap_ertm_init(chan);
5033 		if (err < 0)
5034 			l2cap_send_disconn_req(chan, -err);
5035 		else
5036 			l2cap_chan_ready(chan);
5037 	}
5038 }
5039 
5040 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5041 				      struct hci_chan *hchan)
5042 {
5043 	chan->hs_hcon = hchan->conn;
5044 	chan->hs_hcon->l2cap_data = chan->conn;
5045 
5046 	BT_DBG("move_state %d", chan->move_state);
5047 
5048 	switch (chan->move_state) {
5049 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5050 		/* Move confirm will be sent after a success
5051 		 * response is received
5052 		 */
5053 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5054 		break;
5055 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5056 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5057 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5058 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5059 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5060 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5061 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5062 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5063 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5064 		}
5065 		break;
5066 	default:
5067 		/* Move was not in expected state, free the channel */
5068 		__release_logical_link(chan);
5069 
5070 		chan->move_state = L2CAP_MOVE_STABLE;
5071 	}
5072 }
5073 
5074 /* Call with chan locked */
5075 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5076 		       u8 status)
5077 {
5078 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5079 
5080 	if (status) {
5081 		l2cap_logical_fail(chan);
5082 		__release_logical_link(chan);
5083 		return;
5084 	}
5085 
5086 	if (chan->state != BT_CONNECTED) {
5087 		/* Ignore logical link if channel is on BR/EDR */
5088 		if (chan->local_amp_id != AMP_ID_BREDR)
5089 			l2cap_logical_finish_create(chan, hchan);
5090 	} else {
5091 		l2cap_logical_finish_move(chan, hchan);
5092 	}
5093 }
5094 
5095 void l2cap_move_start(struct l2cap_chan *chan)
5096 {
5097 	BT_DBG("chan %p", chan);
5098 
5099 	if (chan->local_amp_id == AMP_ID_BREDR) {
5100 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5101 			return;
5102 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5103 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5104 		/* Placeholder - start physical link setup */
5105 	} else {
5106 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5107 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5108 		chan->move_id = 0;
5109 		l2cap_move_setup(chan);
5110 		l2cap_send_move_chan_req(chan, 0);
5111 	}
5112 }
5113 
5114 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5115 			    u8 local_amp_id, u8 remote_amp_id)
5116 {
5117 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5118 	       local_amp_id, remote_amp_id);
5119 
5120 	chan->fcs = L2CAP_FCS_NONE;
5121 
5122 	/* Outgoing channel on AMP */
5123 	if (chan->state == BT_CONNECT) {
5124 		if (result == L2CAP_CR_SUCCESS) {
5125 			chan->local_amp_id = local_amp_id;
5126 			l2cap_send_create_chan_req(chan, remote_amp_id);
5127 		} else {
5128 			/* Revert to BR/EDR connect */
5129 			l2cap_send_conn_req(chan);
5130 		}
5131 
5132 		return;
5133 	}
5134 
5135 	/* Incoming channel on AMP */
5136 	if (__l2cap_no_conn_pending(chan)) {
5137 		struct l2cap_conn_rsp rsp;
5138 		char buf[128];
5139 		rsp.scid = cpu_to_le16(chan->dcid);
5140 		rsp.dcid = cpu_to_le16(chan->scid);
5141 
5142 		if (result == L2CAP_CR_SUCCESS) {
5143 			/* Send successful response */
5144 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5145 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5146 		} else {
5147 			/* Send negative response */
5148 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5149 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5150 		}
5151 
5152 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5153 			       sizeof(rsp), &rsp);
5154 
5155 		if (result == L2CAP_CR_SUCCESS) {
5156 			l2cap_state_change(chan, BT_CONFIG);
5157 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5158 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5159 				       L2CAP_CONF_REQ,
5160 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5161 			chan->num_conf_req++;
5162 		}
5163 	}
5164 }
5165 
5166 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5167 				   u8 remote_amp_id)
5168 {
5169 	l2cap_move_setup(chan);
5170 	chan->move_id = local_amp_id;
5171 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5172 
5173 	l2cap_send_move_chan_req(chan, remote_amp_id);
5174 }
5175 
5176 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5177 {
5178 	struct hci_chan *hchan = NULL;
5179 
5180 	/* Placeholder - get hci_chan for logical link */
5181 
5182 	if (hchan) {
5183 		if (hchan->state == BT_CONNECTED) {
5184 			/* Logical link is ready to go */
5185 			chan->hs_hcon = hchan->conn;
5186 			chan->hs_hcon->l2cap_data = chan->conn;
5187 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5188 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5189 
5190 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5191 		} else {
5192 			/* Wait for logical link to be ready */
5193 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5194 		}
5195 	} else {
5196 		/* Logical link not available */
5197 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5198 	}
5199 }
5200 
5201 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5202 {
5203 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5204 		u8 rsp_result;
5205 		if (result == -EINVAL)
5206 			rsp_result = L2CAP_MR_BAD_ID;
5207 		else
5208 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5209 
5210 		l2cap_send_move_chan_rsp(chan, rsp_result);
5211 	}
5212 
5213 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5214 	chan->move_state = L2CAP_MOVE_STABLE;
5215 
5216 	/* Restart data transmission */
5217 	l2cap_ertm_send(chan);
5218 }
5219 
5220 /* Invoke with locked chan */
5221 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5222 {
5223 	u8 local_amp_id = chan->local_amp_id;
5224 	u8 remote_amp_id = chan->remote_amp_id;
5225 
5226 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5227 	       chan, result, local_amp_id, remote_amp_id);
5228 
5229 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5230 		return;
5231 
5232 	if (chan->state != BT_CONNECTED) {
5233 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5234 	} else if (result != L2CAP_MR_SUCCESS) {
5235 		l2cap_do_move_cancel(chan, result);
5236 	} else {
5237 		switch (chan->move_role) {
5238 		case L2CAP_MOVE_ROLE_INITIATOR:
5239 			l2cap_do_move_initiate(chan, local_amp_id,
5240 					       remote_amp_id);
5241 			break;
5242 		case L2CAP_MOVE_ROLE_RESPONDER:
5243 			l2cap_do_move_respond(chan, result);
5244 			break;
5245 		default:
5246 			l2cap_do_move_cancel(chan, result);
5247 			break;
5248 		}
5249 	}
5250 }
5251 
5252 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5253 					 struct l2cap_cmd_hdr *cmd,
5254 					 u16 cmd_len, void *data)
5255 {
5256 	struct l2cap_move_chan_req *req = data;
5257 	struct l2cap_move_chan_rsp rsp;
5258 	struct l2cap_chan *chan;
5259 	u16 icid = 0;
5260 	u16 result = L2CAP_MR_NOT_ALLOWED;
5261 
5262 	if (cmd_len != sizeof(*req))
5263 		return -EPROTO;
5264 
5265 	icid = le16_to_cpu(req->icid);
5266 
5267 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5268 
5269 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5270 		return -EINVAL;
5271 
5272 	chan = l2cap_get_chan_by_dcid(conn, icid);
5273 	if (!chan) {
5274 		rsp.icid = cpu_to_le16(icid);
5275 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5276 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5277 			       sizeof(rsp), &rsp);
5278 		return 0;
5279 	}
5280 
5281 	chan->ident = cmd->ident;
5282 
5283 	if (chan->scid < L2CAP_CID_DYN_START ||
5284 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5285 	    (chan->mode != L2CAP_MODE_ERTM &&
5286 	     chan->mode != L2CAP_MODE_STREAMING)) {
5287 		result = L2CAP_MR_NOT_ALLOWED;
5288 		goto send_move_response;
5289 	}
5290 
5291 	if (chan->local_amp_id == req->dest_amp_id) {
5292 		result = L2CAP_MR_SAME_ID;
5293 		goto send_move_response;
5294 	}
5295 
5296 	if (req->dest_amp_id != AMP_ID_BREDR) {
5297 		struct hci_dev *hdev;
5298 		hdev = hci_dev_get(req->dest_amp_id);
5299 		if (!hdev || hdev->dev_type != HCI_AMP ||
5300 		    !test_bit(HCI_UP, &hdev->flags)) {
5301 			if (hdev)
5302 				hci_dev_put(hdev);
5303 
5304 			result = L2CAP_MR_BAD_ID;
5305 			goto send_move_response;
5306 		}
5307 		hci_dev_put(hdev);
5308 	}
5309 
5310 	/* Detect a move collision.  Only send a collision response
5311 	 * if this side has "lost", otherwise proceed with the move.
5312 	 * The winner has the larger bd_addr.
5313 	 */
5314 	if ((__chan_is_moving(chan) ||
5315 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5316 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5317 		result = L2CAP_MR_COLLISION;
5318 		goto send_move_response;
5319 	}
5320 
5321 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5322 	l2cap_move_setup(chan);
5323 	chan->move_id = req->dest_amp_id;
5324 
5325 	if (req->dest_amp_id == AMP_ID_BREDR) {
5326 		/* Moving to BR/EDR */
5327 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5328 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5329 			result = L2CAP_MR_PEND;
5330 		} else {
5331 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5332 			result = L2CAP_MR_SUCCESS;
5333 		}
5334 	} else {
5335 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5336 		/* Placeholder - uncomment when amp functions are available */
5337 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5338 		result = L2CAP_MR_PEND;
5339 	}
5340 
5341 send_move_response:
5342 	l2cap_send_move_chan_rsp(chan, result);
5343 
5344 	l2cap_chan_unlock(chan);
5345 	l2cap_chan_put(chan);
5346 
5347 	return 0;
5348 }
5349 
5350 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5351 {
5352 	struct l2cap_chan *chan;
5353 	struct hci_chan *hchan = NULL;
5354 
5355 	chan = l2cap_get_chan_by_scid(conn, icid);
5356 	if (!chan) {
5357 		l2cap_send_move_chan_cfm_icid(conn, icid);
5358 		return;
5359 	}
5360 
5361 	__clear_chan_timer(chan);
5362 	if (result == L2CAP_MR_PEND)
5363 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5364 
5365 	switch (chan->move_state) {
5366 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5367 		/* Move confirm will be sent when logical link
5368 		 * is complete.
5369 		 */
5370 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5371 		break;
5372 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5373 		if (result == L2CAP_MR_PEND) {
5374 			break;
5375 		} else if (test_bit(CONN_LOCAL_BUSY,
5376 				    &chan->conn_state)) {
5377 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5378 		} else {
5379 			/* Logical link is up or moving to BR/EDR,
5380 			 * proceed with move
5381 			 */
5382 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5383 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5384 		}
5385 		break;
5386 	case L2CAP_MOVE_WAIT_RSP:
5387 		/* Moving to AMP */
5388 		if (result == L2CAP_MR_SUCCESS) {
5389 			/* Remote is ready, send confirm immediately
5390 			 * after logical link is ready
5391 			 */
5392 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5393 		} else {
5394 			/* Both logical link and move success
5395 			 * are required to confirm
5396 			 */
5397 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5398 		}
5399 
5400 		/* Placeholder - get hci_chan for logical link */
5401 		if (!hchan) {
5402 			/* Logical link not available */
5403 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5404 			break;
5405 		}
5406 
5407 		/* If the logical link is not yet connected, do not
5408 		 * send confirmation.
5409 		 */
5410 		if (hchan->state != BT_CONNECTED)
5411 			break;
5412 
5413 		/* Logical link is already ready to go */
5414 
5415 		chan->hs_hcon = hchan->conn;
5416 		chan->hs_hcon->l2cap_data = chan->conn;
5417 
5418 		if (result == L2CAP_MR_SUCCESS) {
5419 			/* Can confirm now */
5420 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5421 		} else {
5422 			/* Now only need move success
5423 			 * to confirm
5424 			 */
5425 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5426 		}
5427 
5428 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5429 		break;
5430 	default:
5431 		/* Any other amp move state means the move failed. */
5432 		chan->move_id = chan->local_amp_id;
5433 		l2cap_move_done(chan);
5434 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5435 	}
5436 
5437 	l2cap_chan_unlock(chan);
5438 	l2cap_chan_put(chan);
5439 }
5440 
5441 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5442 			    u16 result)
5443 {
5444 	struct l2cap_chan *chan;
5445 
5446 	chan = l2cap_get_chan_by_ident(conn, ident);
5447 	if (!chan) {
5448 		/* Could not locate channel, icid is best guess */
5449 		l2cap_send_move_chan_cfm_icid(conn, icid);
5450 		return;
5451 	}
5452 
5453 	__clear_chan_timer(chan);
5454 
5455 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5456 		if (result == L2CAP_MR_COLLISION) {
5457 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5458 		} else {
5459 			/* Cleanup - cancel move */
5460 			chan->move_id = chan->local_amp_id;
5461 			l2cap_move_done(chan);
5462 		}
5463 	}
5464 
5465 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5466 
5467 	l2cap_chan_unlock(chan);
5468 	l2cap_chan_put(chan);
5469 }
5470 
5471 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5472 				  struct l2cap_cmd_hdr *cmd,
5473 				  u16 cmd_len, void *data)
5474 {
5475 	struct l2cap_move_chan_rsp *rsp = data;
5476 	u16 icid, result;
5477 
5478 	if (cmd_len != sizeof(*rsp))
5479 		return -EPROTO;
5480 
5481 	icid = le16_to_cpu(rsp->icid);
5482 	result = le16_to_cpu(rsp->result);
5483 
5484 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5485 
5486 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5487 		l2cap_move_continue(conn, icid, result);
5488 	else
5489 		l2cap_move_fail(conn, cmd->ident, icid, result);
5490 
5491 	return 0;
5492 }
5493 
5494 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5495 				      struct l2cap_cmd_hdr *cmd,
5496 				      u16 cmd_len, void *data)
5497 {
5498 	struct l2cap_move_chan_cfm *cfm = data;
5499 	struct l2cap_chan *chan;
5500 	u16 icid, result;
5501 
5502 	if (cmd_len != sizeof(*cfm))
5503 		return -EPROTO;
5504 
5505 	icid = le16_to_cpu(cfm->icid);
5506 	result = le16_to_cpu(cfm->result);
5507 
5508 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5509 
5510 	chan = l2cap_get_chan_by_dcid(conn, icid);
5511 	if (!chan) {
5512 		/* Spec requires a response even if the icid was not found */
5513 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5514 		return 0;
5515 	}
5516 
5517 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5518 		if (result == L2CAP_MC_CONFIRMED) {
5519 			chan->local_amp_id = chan->move_id;
5520 			if (chan->local_amp_id == AMP_ID_BREDR)
5521 				__release_logical_link(chan);
5522 		} else {
5523 			chan->move_id = chan->local_amp_id;
5524 		}
5525 
5526 		l2cap_move_done(chan);
5527 	}
5528 
5529 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5530 
5531 	l2cap_chan_unlock(chan);
5532 	l2cap_chan_put(chan);
5533 
5534 	return 0;
5535 }
5536 
5537 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5538 						 struct l2cap_cmd_hdr *cmd,
5539 						 u16 cmd_len, void *data)
5540 {
5541 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5542 	struct l2cap_chan *chan;
5543 	u16 icid;
5544 
5545 	if (cmd_len != sizeof(*rsp))
5546 		return -EPROTO;
5547 
5548 	icid = le16_to_cpu(rsp->icid);
5549 
5550 	BT_DBG("icid 0x%4.4x", icid);
5551 
5552 	chan = l2cap_get_chan_by_scid(conn, icid);
5553 	if (!chan)
5554 		return 0;
5555 
5556 	__clear_chan_timer(chan);
5557 
5558 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5559 		chan->local_amp_id = chan->move_id;
5560 
5561 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5562 			__release_logical_link(chan);
5563 
5564 		l2cap_move_done(chan);
5565 	}
5566 
5567 	l2cap_chan_unlock(chan);
5568 	l2cap_chan_put(chan);
5569 
5570 	return 0;
5571 }
5572 
5573 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5574 					      struct l2cap_cmd_hdr *cmd,
5575 					      u16 cmd_len, u8 *data)
5576 {
5577 	struct hci_conn *hcon = conn->hcon;
5578 	struct l2cap_conn_param_update_req *req;
5579 	struct l2cap_conn_param_update_rsp rsp;
5580 	u16 min, max, latency, to_multiplier;
5581 	int err;
5582 
5583 	if (hcon->role != HCI_ROLE_MASTER)
5584 		return -EINVAL;
5585 
5586 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5587 		return -EPROTO;
5588 
5589 	req = (struct l2cap_conn_param_update_req *) data;
5590 	min		= __le16_to_cpu(req->min);
5591 	max		= __le16_to_cpu(req->max);
5592 	latency		= __le16_to_cpu(req->latency);
5593 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5594 
5595 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5596 	       min, max, latency, to_multiplier);
5597 
5598 	memset(&rsp, 0, sizeof(rsp));
5599 
5600 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5601 	if (err)
5602 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5603 	else
5604 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5605 
5606 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5607 		       sizeof(rsp), &rsp);
5608 
5609 	if (!err) {
5610 		u8 store_hint;
5611 
5612 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5613 						to_multiplier);
5614 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5615 				    store_hint, min, max, latency,
5616 				    to_multiplier);
5617 
5618 	}
5619 
5620 	return 0;
5621 }
5622 
5623 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5624 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5625 				u8 *data)
5626 {
5627 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5628 	struct hci_conn *hcon = conn->hcon;
5629 	u16 dcid, mtu, mps, credits, result;
5630 	struct l2cap_chan *chan;
5631 	int err, sec_level;
5632 
5633 	if (cmd_len < sizeof(*rsp))
5634 		return -EPROTO;
5635 
5636 	dcid    = __le16_to_cpu(rsp->dcid);
5637 	mtu     = __le16_to_cpu(rsp->mtu);
5638 	mps     = __le16_to_cpu(rsp->mps);
5639 	credits = __le16_to_cpu(rsp->credits);
5640 	result  = __le16_to_cpu(rsp->result);
5641 
5642 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5643 					   dcid < L2CAP_CID_DYN_START ||
5644 					   dcid > L2CAP_CID_LE_DYN_END))
5645 		return -EPROTO;
5646 
5647 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5648 	       dcid, mtu, mps, credits, result);
5649 
5650 	mutex_lock(&conn->chan_lock);
5651 
5652 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5653 	if (!chan) {
5654 		err = -EBADSLT;
5655 		goto unlock;
5656 	}
5657 
5658 	err = 0;
5659 
5660 	l2cap_chan_lock(chan);
5661 
5662 	switch (result) {
5663 	case L2CAP_CR_LE_SUCCESS:
5664 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5665 			err = -EBADSLT;
5666 			break;
5667 		}
5668 
5669 		chan->ident = 0;
5670 		chan->dcid = dcid;
5671 		chan->omtu = mtu;
5672 		chan->remote_mps = mps;
5673 		chan->tx_credits = credits;
5674 		l2cap_chan_ready(chan);
5675 		break;
5676 
5677 	case L2CAP_CR_LE_AUTHENTICATION:
5678 	case L2CAP_CR_LE_ENCRYPTION:
5679 		/* If we already have MITM protection we can't do
5680 		 * anything.
5681 		 */
5682 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5683 			l2cap_chan_del(chan, ECONNREFUSED);
5684 			break;
5685 		}
5686 
5687 		sec_level = hcon->sec_level + 1;
5688 		if (chan->sec_level < sec_level)
5689 			chan->sec_level = sec_level;
5690 
5691 		/* We'll need to send a new Connect Request */
5692 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5693 
5694 		smp_conn_security(hcon, chan->sec_level);
5695 		break;
5696 
5697 	default:
5698 		l2cap_chan_del(chan, ECONNREFUSED);
5699 		break;
5700 	}
5701 
5702 	l2cap_chan_unlock(chan);
5703 
5704 unlock:
5705 	mutex_unlock(&conn->chan_lock);
5706 
5707 	return err;
5708 }
5709 
5710 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5711 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5712 				      u8 *data)
5713 {
5714 	int err = 0;
5715 
5716 	switch (cmd->code) {
5717 	case L2CAP_COMMAND_REJ:
5718 		l2cap_command_rej(conn, cmd, cmd_len, data);
5719 		break;
5720 
5721 	case L2CAP_CONN_REQ:
5722 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5723 		break;
5724 
5725 	case L2CAP_CONN_RSP:
5726 	case L2CAP_CREATE_CHAN_RSP:
5727 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5728 		break;
5729 
5730 	case L2CAP_CONF_REQ:
5731 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5732 		break;
5733 
5734 	case L2CAP_CONF_RSP:
5735 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5736 		break;
5737 
5738 	case L2CAP_DISCONN_REQ:
5739 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5740 		break;
5741 
5742 	case L2CAP_DISCONN_RSP:
5743 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5744 		break;
5745 
5746 	case L2CAP_ECHO_REQ:
5747 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5748 		break;
5749 
5750 	case L2CAP_ECHO_RSP:
5751 		break;
5752 
5753 	case L2CAP_INFO_REQ:
5754 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5755 		break;
5756 
5757 	case L2CAP_INFO_RSP:
5758 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5759 		break;
5760 
5761 	case L2CAP_CREATE_CHAN_REQ:
5762 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5763 		break;
5764 
5765 	case L2CAP_MOVE_CHAN_REQ:
5766 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5767 		break;
5768 
5769 	case L2CAP_MOVE_CHAN_RSP:
5770 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5771 		break;
5772 
5773 	case L2CAP_MOVE_CHAN_CFM:
5774 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5775 		break;
5776 
5777 	case L2CAP_MOVE_CHAN_CFM_RSP:
5778 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5779 		break;
5780 
5781 	default:
5782 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5783 		err = -EINVAL;
5784 		break;
5785 	}
5786 
5787 	return err;
5788 }
5789 
5790 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5791 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5792 				u8 *data)
5793 {
5794 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5795 	struct l2cap_le_conn_rsp rsp;
5796 	struct l2cap_chan *chan, *pchan;
5797 	u16 dcid, scid, credits, mtu, mps;
5798 	__le16 psm;
5799 	u8 result;
5800 
5801 	if (cmd_len != sizeof(*req))
5802 		return -EPROTO;
5803 
5804 	scid = __le16_to_cpu(req->scid);
5805 	mtu  = __le16_to_cpu(req->mtu);
5806 	mps  = __le16_to_cpu(req->mps);
5807 	psm  = req->psm;
5808 	dcid = 0;
5809 	credits = 0;
5810 
5811 	if (mtu < 23 || mps < 23)
5812 		return -EPROTO;
5813 
5814 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5815 	       scid, mtu, mps);
5816 
5817 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5818 	 * page 1059:
5819 	 *
5820 	 * Valid range: 0x0001-0x00ff
5821 	 *
5822 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5823 	 */
5824 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5825 		result = L2CAP_CR_LE_BAD_PSM;
5826 		chan = NULL;
5827 		goto response;
5828 	}
5829 
5830 	/* Check if we have socket listening on psm */
5831 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5832 					 &conn->hcon->dst, LE_LINK);
5833 	if (!pchan) {
5834 		result = L2CAP_CR_LE_BAD_PSM;
5835 		chan = NULL;
5836 		goto response;
5837 	}
5838 
5839 	mutex_lock(&conn->chan_lock);
5840 	l2cap_chan_lock(pchan);
5841 
5842 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5843 				     SMP_ALLOW_STK)) {
5844 		result = L2CAP_CR_LE_AUTHENTICATION;
5845 		chan = NULL;
5846 		goto response_unlock;
5847 	}
5848 
5849 	/* Check for valid dynamic CID range */
5850 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5851 		result = L2CAP_CR_LE_INVALID_SCID;
5852 		chan = NULL;
5853 		goto response_unlock;
5854 	}
5855 
5856 	/* Check if we already have channel with that dcid */
5857 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5858 		result = L2CAP_CR_LE_SCID_IN_USE;
5859 		chan = NULL;
5860 		goto response_unlock;
5861 	}
5862 
5863 	chan = pchan->ops->new_connection(pchan);
5864 	if (!chan) {
5865 		result = L2CAP_CR_LE_NO_MEM;
5866 		goto response_unlock;
5867 	}
5868 
5869 	bacpy(&chan->src, &conn->hcon->src);
5870 	bacpy(&chan->dst, &conn->hcon->dst);
5871 	chan->src_type = bdaddr_src_type(conn->hcon);
5872 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5873 	chan->psm  = psm;
5874 	chan->dcid = scid;
5875 	chan->omtu = mtu;
5876 	chan->remote_mps = mps;
5877 
5878 	__l2cap_chan_add(conn, chan);
5879 
5880 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5881 
5882 	dcid = chan->scid;
5883 	credits = chan->rx_credits;
5884 
5885 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5886 
5887 	chan->ident = cmd->ident;
5888 
5889 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5890 		l2cap_state_change(chan, BT_CONNECT2);
5891 		/* The following result value is actually not defined
5892 		 * for LE CoC but we use it to let the function know
5893 		 * that it should bail out after doing its cleanup
5894 		 * instead of sending a response.
5895 		 */
5896 		result = L2CAP_CR_PEND;
5897 		chan->ops->defer(chan);
5898 	} else {
5899 		l2cap_chan_ready(chan);
5900 		result = L2CAP_CR_LE_SUCCESS;
5901 	}
5902 
5903 response_unlock:
5904 	l2cap_chan_unlock(pchan);
5905 	mutex_unlock(&conn->chan_lock);
5906 	l2cap_chan_put(pchan);
5907 
5908 	if (result == L2CAP_CR_PEND)
5909 		return 0;
5910 
5911 response:
5912 	if (chan) {
5913 		rsp.mtu = cpu_to_le16(chan->imtu);
5914 		rsp.mps = cpu_to_le16(chan->mps);
5915 	} else {
5916 		rsp.mtu = 0;
5917 		rsp.mps = 0;
5918 	}
5919 
5920 	rsp.dcid    = cpu_to_le16(dcid);
5921 	rsp.credits = cpu_to_le16(credits);
5922 	rsp.result  = cpu_to_le16(result);
5923 
5924 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5925 
5926 	return 0;
5927 }
5928 
5929 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5930 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5931 				   u8 *data)
5932 {
5933 	struct l2cap_le_credits *pkt;
5934 	struct l2cap_chan *chan;
5935 	u16 cid, credits, max_credits;
5936 
5937 	if (cmd_len != sizeof(*pkt))
5938 		return -EPROTO;
5939 
5940 	pkt = (struct l2cap_le_credits *) data;
5941 	cid	= __le16_to_cpu(pkt->cid);
5942 	credits	= __le16_to_cpu(pkt->credits);
5943 
5944 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5945 
5946 	chan = l2cap_get_chan_by_dcid(conn, cid);
5947 	if (!chan)
5948 		return -EBADSLT;
5949 
5950 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5951 	if (credits > max_credits) {
5952 		BT_ERR("LE credits overflow");
5953 		l2cap_send_disconn_req(chan, ECONNRESET);
5954 
5955 		/* Return 0 so that we don't trigger an unnecessary
5956 		 * command reject packet.
5957 		 */
5958 		goto unlock;
5959 	}
5960 
5961 	chan->tx_credits += credits;
5962 
5963 	/* Resume sending */
5964 	l2cap_le_flowctl_send(chan);
5965 
5966 	if (chan->tx_credits)
5967 		chan->ops->resume(chan);
5968 
5969 unlock:
5970 	l2cap_chan_unlock(chan);
5971 	l2cap_chan_put(chan);
5972 
5973 	return 0;
5974 }
5975 
5976 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5977 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5978 				       u8 *data)
5979 {
5980 	struct l2cap_ecred_conn_req *req = (void *) data;
5981 	struct {
5982 		struct l2cap_ecred_conn_rsp rsp;
5983 		__le16 dcid[L2CAP_ECRED_MAX_CID];
5984 	} __packed pdu;
5985 	struct l2cap_chan *chan, *pchan;
5986 	u16 mtu, mps;
5987 	__le16 psm;
5988 	u8 result, len = 0;
5989 	int i, num_scid;
5990 	bool defer = false;
5991 
5992 	if (!enable_ecred)
5993 		return -EINVAL;
5994 
5995 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5996 		result = L2CAP_CR_LE_INVALID_PARAMS;
5997 		goto response;
5998 	}
5999 
6000 	cmd_len -= sizeof(*req);
6001 	num_scid = cmd_len / sizeof(u16);
6002 
6003 	if (num_scid > ARRAY_SIZE(pdu.dcid)) {
6004 		result = L2CAP_CR_LE_INVALID_PARAMS;
6005 		goto response;
6006 	}
6007 
6008 	mtu  = __le16_to_cpu(req->mtu);
6009 	mps  = __le16_to_cpu(req->mps);
6010 
6011 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
6012 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
6013 		goto response;
6014 	}
6015 
6016 	psm  = req->psm;
6017 
6018 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
6019 	 * page 1059:
6020 	 *
6021 	 * Valid range: 0x0001-0x00ff
6022 	 *
6023 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
6024 	 */
6025 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
6026 		result = L2CAP_CR_LE_BAD_PSM;
6027 		goto response;
6028 	}
6029 
6030 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
6031 
6032 	memset(&pdu, 0, sizeof(pdu));
6033 
6034 	/* Check if we have socket listening on psm */
6035 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6036 					 &conn->hcon->dst, LE_LINK);
6037 	if (!pchan) {
6038 		result = L2CAP_CR_LE_BAD_PSM;
6039 		goto response;
6040 	}
6041 
6042 	mutex_lock(&conn->chan_lock);
6043 	l2cap_chan_lock(pchan);
6044 
6045 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6046 				     SMP_ALLOW_STK)) {
6047 		result = L2CAP_CR_LE_AUTHENTICATION;
6048 		goto unlock;
6049 	}
6050 
6051 	result = L2CAP_CR_LE_SUCCESS;
6052 
6053 	for (i = 0; i < num_scid; i++) {
6054 		u16 scid = __le16_to_cpu(req->scid[i]);
6055 
6056 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
6057 
6058 		pdu.dcid[i] = 0x0000;
6059 		len += sizeof(*pdu.dcid);
6060 
6061 		/* Check for valid dynamic CID range */
6062 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6063 			result = L2CAP_CR_LE_INVALID_SCID;
6064 			continue;
6065 		}
6066 
6067 		/* Check if we already have channel with that dcid */
6068 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
6069 			result = L2CAP_CR_LE_SCID_IN_USE;
6070 			continue;
6071 		}
6072 
6073 		chan = pchan->ops->new_connection(pchan);
6074 		if (!chan) {
6075 			result = L2CAP_CR_LE_NO_MEM;
6076 			continue;
6077 		}
6078 
6079 		bacpy(&chan->src, &conn->hcon->src);
6080 		bacpy(&chan->dst, &conn->hcon->dst);
6081 		chan->src_type = bdaddr_src_type(conn->hcon);
6082 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6083 		chan->psm  = psm;
6084 		chan->dcid = scid;
6085 		chan->omtu = mtu;
6086 		chan->remote_mps = mps;
6087 
6088 		__l2cap_chan_add(conn, chan);
6089 
6090 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6091 
6092 		/* Init response */
6093 		if (!pdu.rsp.credits) {
6094 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6095 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6096 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6097 		}
6098 
6099 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6100 
6101 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6102 
6103 		chan->ident = cmd->ident;
6104 
6105 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6106 			l2cap_state_change(chan, BT_CONNECT2);
6107 			defer = true;
6108 			chan->ops->defer(chan);
6109 		} else {
6110 			l2cap_chan_ready(chan);
6111 		}
6112 	}
6113 
6114 unlock:
6115 	l2cap_chan_unlock(pchan);
6116 	mutex_unlock(&conn->chan_lock);
6117 	l2cap_chan_put(pchan);
6118 
6119 response:
6120 	pdu.rsp.result = cpu_to_le16(result);
6121 
6122 	if (defer)
6123 		return 0;
6124 
6125 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6126 		       sizeof(pdu.rsp) + len, &pdu);
6127 
6128 	return 0;
6129 }
6130 
6131 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6132 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6133 				       u8 *data)
6134 {
6135 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6136 	struct hci_conn *hcon = conn->hcon;
6137 	u16 mtu, mps, credits, result;
6138 	struct l2cap_chan *chan, *tmp;
6139 	int err = 0, sec_level;
6140 	int i = 0;
6141 
6142 	if (cmd_len < sizeof(*rsp))
6143 		return -EPROTO;
6144 
6145 	mtu     = __le16_to_cpu(rsp->mtu);
6146 	mps     = __le16_to_cpu(rsp->mps);
6147 	credits = __le16_to_cpu(rsp->credits);
6148 	result  = __le16_to_cpu(rsp->result);
6149 
6150 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6151 	       result);
6152 
6153 	mutex_lock(&conn->chan_lock);
6154 
6155 	cmd_len -= sizeof(*rsp);
6156 
6157 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6158 		u16 dcid;
6159 
6160 		if (chan->ident != cmd->ident ||
6161 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6162 		    chan->state == BT_CONNECTED)
6163 			continue;
6164 
6165 		l2cap_chan_lock(chan);
6166 
6167 		/* Check that there is a dcid for each pending channel */
6168 		if (cmd_len < sizeof(dcid)) {
6169 			l2cap_chan_del(chan, ECONNREFUSED);
6170 			l2cap_chan_unlock(chan);
6171 			continue;
6172 		}
6173 
6174 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6175 		cmd_len -= sizeof(u16);
6176 
6177 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6178 
6179 		/* Check if dcid is already in use */
6180 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6181 			/* If a device receives a
6182 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6183 			 * already-assigned Destination CID, then both the
6184 			 * original channel and the new channel shall be
6185 			 * immediately discarded and not used.
6186 			 */
6187 			l2cap_chan_del(chan, ECONNREFUSED);
6188 			l2cap_chan_unlock(chan);
6189 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6190 			l2cap_chan_lock(chan);
6191 			l2cap_chan_del(chan, ECONNRESET);
6192 			l2cap_chan_unlock(chan);
6193 			continue;
6194 		}
6195 
6196 		switch (result) {
6197 		case L2CAP_CR_LE_AUTHENTICATION:
6198 		case L2CAP_CR_LE_ENCRYPTION:
6199 			/* If we already have MITM protection we can't do
6200 			 * anything.
6201 			 */
6202 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6203 				l2cap_chan_del(chan, ECONNREFUSED);
6204 				break;
6205 			}
6206 
6207 			sec_level = hcon->sec_level + 1;
6208 			if (chan->sec_level < sec_level)
6209 				chan->sec_level = sec_level;
6210 
6211 			/* We'll need to send a new Connect Request */
6212 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6213 
6214 			smp_conn_security(hcon, chan->sec_level);
6215 			break;
6216 
6217 		case L2CAP_CR_LE_BAD_PSM:
6218 			l2cap_chan_del(chan, ECONNREFUSED);
6219 			break;
6220 
6221 		default:
6222 			/* If dcid was not set it means channels was refused */
6223 			if (!dcid) {
6224 				l2cap_chan_del(chan, ECONNREFUSED);
6225 				break;
6226 			}
6227 
6228 			chan->ident = 0;
6229 			chan->dcid = dcid;
6230 			chan->omtu = mtu;
6231 			chan->remote_mps = mps;
6232 			chan->tx_credits = credits;
6233 			l2cap_chan_ready(chan);
6234 			break;
6235 		}
6236 
6237 		l2cap_chan_unlock(chan);
6238 	}
6239 
6240 	mutex_unlock(&conn->chan_lock);
6241 
6242 	return err;
6243 }
6244 
6245 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6246 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6247 					 u8 *data)
6248 {
6249 	struct l2cap_ecred_reconf_req *req = (void *) data;
6250 	struct l2cap_ecred_reconf_rsp rsp;
6251 	u16 mtu, mps, result;
6252 	struct l2cap_chan *chan;
6253 	int i, num_scid;
6254 
6255 	if (!enable_ecred)
6256 		return -EINVAL;
6257 
6258 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6259 		result = L2CAP_CR_LE_INVALID_PARAMS;
6260 		goto respond;
6261 	}
6262 
6263 	mtu = __le16_to_cpu(req->mtu);
6264 	mps = __le16_to_cpu(req->mps);
6265 
6266 	BT_DBG("mtu %u mps %u", mtu, mps);
6267 
6268 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6269 		result = L2CAP_RECONF_INVALID_MTU;
6270 		goto respond;
6271 	}
6272 
6273 	if (mps < L2CAP_ECRED_MIN_MPS) {
6274 		result = L2CAP_RECONF_INVALID_MPS;
6275 		goto respond;
6276 	}
6277 
6278 	cmd_len -= sizeof(*req);
6279 	num_scid = cmd_len / sizeof(u16);
6280 	result = L2CAP_RECONF_SUCCESS;
6281 
6282 	for (i = 0; i < num_scid; i++) {
6283 		u16 scid;
6284 
6285 		scid = __le16_to_cpu(req->scid[i]);
6286 		if (!scid)
6287 			return -EPROTO;
6288 
6289 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6290 		if (!chan)
6291 			continue;
6292 
6293 		/* If the MTU value is decreased for any of the included
6294 		 * channels, then the receiver shall disconnect all
6295 		 * included channels.
6296 		 */
6297 		if (chan->omtu > mtu) {
6298 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6299 			       chan->omtu, mtu);
6300 			result = L2CAP_RECONF_INVALID_MTU;
6301 		}
6302 
6303 		chan->omtu = mtu;
6304 		chan->remote_mps = mps;
6305 	}
6306 
6307 respond:
6308 	rsp.result = cpu_to_le16(result);
6309 
6310 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6311 		       &rsp);
6312 
6313 	return 0;
6314 }
6315 
6316 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6317 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6318 					 u8 *data)
6319 {
6320 	struct l2cap_chan *chan, *tmp;
6321 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6322 	u16 result;
6323 
6324 	if (cmd_len < sizeof(*rsp))
6325 		return -EPROTO;
6326 
6327 	result = __le16_to_cpu(rsp->result);
6328 
6329 	BT_DBG("result 0x%4.4x", rsp->result);
6330 
6331 	if (!result)
6332 		return 0;
6333 
6334 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6335 		if (chan->ident != cmd->ident)
6336 			continue;
6337 
6338 		l2cap_chan_del(chan, ECONNRESET);
6339 	}
6340 
6341 	return 0;
6342 }
6343 
6344 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6345 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6346 				       u8 *data)
6347 {
6348 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6349 	struct l2cap_chan *chan;
6350 
6351 	if (cmd_len < sizeof(*rej))
6352 		return -EPROTO;
6353 
6354 	mutex_lock(&conn->chan_lock);
6355 
6356 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6357 	if (!chan)
6358 		goto done;
6359 
6360 	l2cap_chan_lock(chan);
6361 	l2cap_chan_del(chan, ECONNREFUSED);
6362 	l2cap_chan_unlock(chan);
6363 
6364 done:
6365 	mutex_unlock(&conn->chan_lock);
6366 	return 0;
6367 }
6368 
6369 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6370 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6371 				   u8 *data)
6372 {
6373 	int err = 0;
6374 
6375 	switch (cmd->code) {
6376 	case L2CAP_COMMAND_REJ:
6377 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6378 		break;
6379 
6380 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6381 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6382 		break;
6383 
6384 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6385 		break;
6386 
6387 	case L2CAP_LE_CONN_RSP:
6388 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6389 		break;
6390 
6391 	case L2CAP_LE_CONN_REQ:
6392 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6393 		break;
6394 
6395 	case L2CAP_LE_CREDITS:
6396 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6397 		break;
6398 
6399 	case L2CAP_ECRED_CONN_REQ:
6400 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6401 		break;
6402 
6403 	case L2CAP_ECRED_CONN_RSP:
6404 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6405 		break;
6406 
6407 	case L2CAP_ECRED_RECONF_REQ:
6408 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6409 		break;
6410 
6411 	case L2CAP_ECRED_RECONF_RSP:
6412 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6413 		break;
6414 
6415 	case L2CAP_DISCONN_REQ:
6416 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6417 		break;
6418 
6419 	case L2CAP_DISCONN_RSP:
6420 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6421 		break;
6422 
6423 	default:
6424 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6425 		err = -EINVAL;
6426 		break;
6427 	}
6428 
6429 	return err;
6430 }
6431 
6432 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6433 					struct sk_buff *skb)
6434 {
6435 	struct hci_conn *hcon = conn->hcon;
6436 	struct l2cap_cmd_hdr *cmd;
6437 	u16 len;
6438 	int err;
6439 
6440 	if (hcon->type != LE_LINK)
6441 		goto drop;
6442 
6443 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6444 		goto drop;
6445 
6446 	cmd = (void *) skb->data;
6447 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6448 
6449 	len = le16_to_cpu(cmd->len);
6450 
6451 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6452 
6453 	if (len != skb->len || !cmd->ident) {
6454 		BT_DBG("corrupted command");
6455 		goto drop;
6456 	}
6457 
6458 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6459 	if (err) {
6460 		struct l2cap_cmd_rej_unk rej;
6461 
6462 		BT_ERR("Wrong link type (%d)", err);
6463 
6464 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6465 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6466 			       sizeof(rej), &rej);
6467 	}
6468 
6469 drop:
6470 	kfree_skb(skb);
6471 }
6472 
6473 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6474 				     struct sk_buff *skb)
6475 {
6476 	struct hci_conn *hcon = conn->hcon;
6477 	struct l2cap_cmd_hdr *cmd;
6478 	int err;
6479 
6480 	l2cap_raw_recv(conn, skb);
6481 
6482 	if (hcon->type != ACL_LINK)
6483 		goto drop;
6484 
6485 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6486 		u16 len;
6487 
6488 		cmd = (void *) skb->data;
6489 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6490 
6491 		len = le16_to_cpu(cmd->len);
6492 
6493 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6494 		       cmd->ident);
6495 
6496 		if (len > skb->len || !cmd->ident) {
6497 			BT_DBG("corrupted command");
6498 			break;
6499 		}
6500 
6501 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6502 		if (err) {
6503 			struct l2cap_cmd_rej_unk rej;
6504 
6505 			BT_ERR("Wrong link type (%d)", err);
6506 
6507 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6508 			l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6509 				       sizeof(rej), &rej);
6510 		}
6511 
6512 		skb_pull(skb, len);
6513 	}
6514 
6515 drop:
6516 	kfree_skb(skb);
6517 }
6518 
6519 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6520 {
6521 	u16 our_fcs, rcv_fcs;
6522 	int hdr_size;
6523 
6524 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6525 		hdr_size = L2CAP_EXT_HDR_SIZE;
6526 	else
6527 		hdr_size = L2CAP_ENH_HDR_SIZE;
6528 
6529 	if (chan->fcs == L2CAP_FCS_CRC16) {
6530 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6531 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6532 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6533 
6534 		if (our_fcs != rcv_fcs)
6535 			return -EBADMSG;
6536 	}
6537 	return 0;
6538 }
6539 
6540 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6541 {
6542 	struct l2cap_ctrl control;
6543 
6544 	BT_DBG("chan %p", chan);
6545 
6546 	memset(&control, 0, sizeof(control));
6547 	control.sframe = 1;
6548 	control.final = 1;
6549 	control.reqseq = chan->buffer_seq;
6550 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6551 
6552 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6553 		control.super = L2CAP_SUPER_RNR;
6554 		l2cap_send_sframe(chan, &control);
6555 	}
6556 
6557 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6558 	    chan->unacked_frames > 0)
6559 		__set_retrans_timer(chan);
6560 
6561 	/* Send pending iframes */
6562 	l2cap_ertm_send(chan);
6563 
6564 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6565 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6566 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6567 		 * send it now.
6568 		 */
6569 		control.super = L2CAP_SUPER_RR;
6570 		l2cap_send_sframe(chan, &control);
6571 	}
6572 }
6573 
6574 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6575 			    struct sk_buff **last_frag)
6576 {
6577 	/* skb->len reflects data in skb as well as all fragments
6578 	 * skb->data_len reflects only data in fragments
6579 	 */
6580 	if (!skb_has_frag_list(skb))
6581 		skb_shinfo(skb)->frag_list = new_frag;
6582 
6583 	new_frag->next = NULL;
6584 
6585 	(*last_frag)->next = new_frag;
6586 	*last_frag = new_frag;
6587 
6588 	skb->len += new_frag->len;
6589 	skb->data_len += new_frag->len;
6590 	skb->truesize += new_frag->truesize;
6591 }
6592 
6593 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6594 				struct l2cap_ctrl *control)
6595 {
6596 	int err = -EINVAL;
6597 
6598 	switch (control->sar) {
6599 	case L2CAP_SAR_UNSEGMENTED:
6600 		if (chan->sdu)
6601 			break;
6602 
6603 		err = chan->ops->recv(chan, skb);
6604 		break;
6605 
6606 	case L2CAP_SAR_START:
6607 		if (chan->sdu)
6608 			break;
6609 
6610 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6611 			break;
6612 
6613 		chan->sdu_len = get_unaligned_le16(skb->data);
6614 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6615 
6616 		if (chan->sdu_len > chan->imtu) {
6617 			err = -EMSGSIZE;
6618 			break;
6619 		}
6620 
6621 		if (skb->len >= chan->sdu_len)
6622 			break;
6623 
6624 		chan->sdu = skb;
6625 		chan->sdu_last_frag = skb;
6626 
6627 		skb = NULL;
6628 		err = 0;
6629 		break;
6630 
6631 	case L2CAP_SAR_CONTINUE:
6632 		if (!chan->sdu)
6633 			break;
6634 
6635 		append_skb_frag(chan->sdu, skb,
6636 				&chan->sdu_last_frag);
6637 		skb = NULL;
6638 
6639 		if (chan->sdu->len >= chan->sdu_len)
6640 			break;
6641 
6642 		err = 0;
6643 		break;
6644 
6645 	case L2CAP_SAR_END:
6646 		if (!chan->sdu)
6647 			break;
6648 
6649 		append_skb_frag(chan->sdu, skb,
6650 				&chan->sdu_last_frag);
6651 		skb = NULL;
6652 
6653 		if (chan->sdu->len != chan->sdu_len)
6654 			break;
6655 
6656 		err = chan->ops->recv(chan, chan->sdu);
6657 
6658 		if (!err) {
6659 			/* Reassembly complete */
6660 			chan->sdu = NULL;
6661 			chan->sdu_last_frag = NULL;
6662 			chan->sdu_len = 0;
6663 		}
6664 		break;
6665 	}
6666 
6667 	if (err) {
6668 		kfree_skb(skb);
6669 		kfree_skb(chan->sdu);
6670 		chan->sdu = NULL;
6671 		chan->sdu_last_frag = NULL;
6672 		chan->sdu_len = 0;
6673 	}
6674 
6675 	return err;
6676 }
6677 
6678 static int l2cap_resegment(struct l2cap_chan *chan)
6679 {
6680 	/* Placeholder */
6681 	return 0;
6682 }
6683 
6684 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6685 {
6686 	u8 event;
6687 
6688 	if (chan->mode != L2CAP_MODE_ERTM)
6689 		return;
6690 
6691 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6692 	l2cap_tx(chan, NULL, NULL, event);
6693 }
6694 
6695 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6696 {
6697 	int err = 0;
6698 	/* Pass sequential frames to l2cap_reassemble_sdu()
6699 	 * until a gap is encountered.
6700 	 */
6701 
6702 	BT_DBG("chan %p", chan);
6703 
6704 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6705 		struct sk_buff *skb;
6706 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6707 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6708 
6709 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6710 
6711 		if (!skb)
6712 			break;
6713 
6714 		skb_unlink(skb, &chan->srej_q);
6715 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6716 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6717 		if (err)
6718 			break;
6719 	}
6720 
6721 	if (skb_queue_empty(&chan->srej_q)) {
6722 		chan->rx_state = L2CAP_RX_STATE_RECV;
6723 		l2cap_send_ack(chan);
6724 	}
6725 
6726 	return err;
6727 }
6728 
6729 static void l2cap_handle_srej(struct l2cap_chan *chan,
6730 			      struct l2cap_ctrl *control)
6731 {
6732 	struct sk_buff *skb;
6733 
6734 	BT_DBG("chan %p, control %p", chan, control);
6735 
6736 	if (control->reqseq == chan->next_tx_seq) {
6737 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6738 		l2cap_send_disconn_req(chan, ECONNRESET);
6739 		return;
6740 	}
6741 
6742 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6743 
6744 	if (skb == NULL) {
6745 		BT_DBG("Seq %d not available for retransmission",
6746 		       control->reqseq);
6747 		return;
6748 	}
6749 
6750 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6751 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6752 		l2cap_send_disconn_req(chan, ECONNRESET);
6753 		return;
6754 	}
6755 
6756 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6757 
6758 	if (control->poll) {
6759 		l2cap_pass_to_tx(chan, control);
6760 
6761 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6762 		l2cap_retransmit(chan, control);
6763 		l2cap_ertm_send(chan);
6764 
6765 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6766 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6767 			chan->srej_save_reqseq = control->reqseq;
6768 		}
6769 	} else {
6770 		l2cap_pass_to_tx_fbit(chan, control);
6771 
6772 		if (control->final) {
6773 			if (chan->srej_save_reqseq != control->reqseq ||
6774 			    !test_and_clear_bit(CONN_SREJ_ACT,
6775 						&chan->conn_state))
6776 				l2cap_retransmit(chan, control);
6777 		} else {
6778 			l2cap_retransmit(chan, control);
6779 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6780 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6781 				chan->srej_save_reqseq = control->reqseq;
6782 			}
6783 		}
6784 	}
6785 }
6786 
6787 static void l2cap_handle_rej(struct l2cap_chan *chan,
6788 			     struct l2cap_ctrl *control)
6789 {
6790 	struct sk_buff *skb;
6791 
6792 	BT_DBG("chan %p, control %p", chan, control);
6793 
6794 	if (control->reqseq == chan->next_tx_seq) {
6795 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6796 		l2cap_send_disconn_req(chan, ECONNRESET);
6797 		return;
6798 	}
6799 
6800 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6801 
6802 	if (chan->max_tx && skb &&
6803 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6804 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6805 		l2cap_send_disconn_req(chan, ECONNRESET);
6806 		return;
6807 	}
6808 
6809 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6810 
6811 	l2cap_pass_to_tx(chan, control);
6812 
6813 	if (control->final) {
6814 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6815 			l2cap_retransmit_all(chan, control);
6816 	} else {
6817 		l2cap_retransmit_all(chan, control);
6818 		l2cap_ertm_send(chan);
6819 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6820 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6821 	}
6822 }
6823 
6824 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6825 {
6826 	BT_DBG("chan %p, txseq %d", chan, txseq);
6827 
6828 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6829 	       chan->expected_tx_seq);
6830 
6831 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6832 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6833 		    chan->tx_win) {
6834 			/* See notes below regarding "double poll" and
6835 			 * invalid packets.
6836 			 */
6837 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6838 				BT_DBG("Invalid/Ignore - after SREJ");
6839 				return L2CAP_TXSEQ_INVALID_IGNORE;
6840 			} else {
6841 				BT_DBG("Invalid - in window after SREJ sent");
6842 				return L2CAP_TXSEQ_INVALID;
6843 			}
6844 		}
6845 
6846 		if (chan->srej_list.head == txseq) {
6847 			BT_DBG("Expected SREJ");
6848 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6849 		}
6850 
6851 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6852 			BT_DBG("Duplicate SREJ - txseq already stored");
6853 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6854 		}
6855 
6856 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6857 			BT_DBG("Unexpected SREJ - not requested");
6858 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6859 		}
6860 	}
6861 
6862 	if (chan->expected_tx_seq == txseq) {
6863 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6864 		    chan->tx_win) {
6865 			BT_DBG("Invalid - txseq outside tx window");
6866 			return L2CAP_TXSEQ_INVALID;
6867 		} else {
6868 			BT_DBG("Expected");
6869 			return L2CAP_TXSEQ_EXPECTED;
6870 		}
6871 	}
6872 
6873 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6874 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6875 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6876 		return L2CAP_TXSEQ_DUPLICATE;
6877 	}
6878 
6879 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6880 		/* A source of invalid packets is a "double poll" condition,
6881 		 * where delays cause us to send multiple poll packets.  If
6882 		 * the remote stack receives and processes both polls,
6883 		 * sequence numbers can wrap around in such a way that a
6884 		 * resent frame has a sequence number that looks like new data
6885 		 * with a sequence gap.  This would trigger an erroneous SREJ
6886 		 * request.
6887 		 *
6888 		 * Fortunately, this is impossible with a tx window that's
6889 		 * less than half of the maximum sequence number, which allows
6890 		 * invalid frames to be safely ignored.
6891 		 *
6892 		 * With tx window sizes greater than half of the tx window
6893 		 * maximum, the frame is invalid and cannot be ignored.  This
6894 		 * causes a disconnect.
6895 		 */
6896 
6897 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6898 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6899 			return L2CAP_TXSEQ_INVALID_IGNORE;
6900 		} else {
6901 			BT_DBG("Invalid - txseq outside tx window");
6902 			return L2CAP_TXSEQ_INVALID;
6903 		}
6904 	} else {
6905 		BT_DBG("Unexpected - txseq indicates missing frames");
6906 		return L2CAP_TXSEQ_UNEXPECTED;
6907 	}
6908 }
6909 
6910 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6911 			       struct l2cap_ctrl *control,
6912 			       struct sk_buff *skb, u8 event)
6913 {
6914 	struct l2cap_ctrl local_control;
6915 	int err = 0;
6916 	bool skb_in_use = false;
6917 
6918 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6919 	       event);
6920 
6921 	switch (event) {
6922 	case L2CAP_EV_RECV_IFRAME:
6923 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6924 		case L2CAP_TXSEQ_EXPECTED:
6925 			l2cap_pass_to_tx(chan, control);
6926 
6927 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6928 				BT_DBG("Busy, discarding expected seq %d",
6929 				       control->txseq);
6930 				break;
6931 			}
6932 
6933 			chan->expected_tx_seq = __next_seq(chan,
6934 							   control->txseq);
6935 
6936 			chan->buffer_seq = chan->expected_tx_seq;
6937 			skb_in_use = true;
6938 
6939 			/* l2cap_reassemble_sdu may free skb, hence invalidate
6940 			 * control, so make a copy in advance to use it after
6941 			 * l2cap_reassemble_sdu returns and to avoid the race
6942 			 * condition, for example:
6943 			 *
6944 			 * The current thread calls:
6945 			 *   l2cap_reassemble_sdu
6946 			 *     chan->ops->recv == l2cap_sock_recv_cb
6947 			 *       __sock_queue_rcv_skb
6948 			 * Another thread calls:
6949 			 *   bt_sock_recvmsg
6950 			 *     skb_recv_datagram
6951 			 *     skb_free_datagram
6952 			 * Then the current thread tries to access control, but
6953 			 * it was freed by skb_free_datagram.
6954 			 */
6955 			local_control = *control;
6956 			err = l2cap_reassemble_sdu(chan, skb, control);
6957 			if (err)
6958 				break;
6959 
6960 			if (local_control.final) {
6961 				if (!test_and_clear_bit(CONN_REJ_ACT,
6962 							&chan->conn_state)) {
6963 					local_control.final = 0;
6964 					l2cap_retransmit_all(chan, &local_control);
6965 					l2cap_ertm_send(chan);
6966 				}
6967 			}
6968 
6969 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6970 				l2cap_send_ack(chan);
6971 			break;
6972 		case L2CAP_TXSEQ_UNEXPECTED:
6973 			l2cap_pass_to_tx(chan, control);
6974 
6975 			/* Can't issue SREJ frames in the local busy state.
6976 			 * Drop this frame, it will be seen as missing
6977 			 * when local busy is exited.
6978 			 */
6979 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6980 				BT_DBG("Busy, discarding unexpected seq %d",
6981 				       control->txseq);
6982 				break;
6983 			}
6984 
6985 			/* There was a gap in the sequence, so an SREJ
6986 			 * must be sent for each missing frame.  The
6987 			 * current frame is stored for later use.
6988 			 */
6989 			skb_queue_tail(&chan->srej_q, skb);
6990 			skb_in_use = true;
6991 			BT_DBG("Queued %p (queue len %d)", skb,
6992 			       skb_queue_len(&chan->srej_q));
6993 
6994 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6995 			l2cap_seq_list_clear(&chan->srej_list);
6996 			l2cap_send_srej(chan, control->txseq);
6997 
6998 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6999 			break;
7000 		case L2CAP_TXSEQ_DUPLICATE:
7001 			l2cap_pass_to_tx(chan, control);
7002 			break;
7003 		case L2CAP_TXSEQ_INVALID_IGNORE:
7004 			break;
7005 		case L2CAP_TXSEQ_INVALID:
7006 		default:
7007 			l2cap_send_disconn_req(chan, ECONNRESET);
7008 			break;
7009 		}
7010 		break;
7011 	case L2CAP_EV_RECV_RR:
7012 		l2cap_pass_to_tx(chan, control);
7013 		if (control->final) {
7014 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7015 
7016 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
7017 			    !__chan_is_moving(chan)) {
7018 				control->final = 0;
7019 				l2cap_retransmit_all(chan, control);
7020 			}
7021 
7022 			l2cap_ertm_send(chan);
7023 		} else if (control->poll) {
7024 			l2cap_send_i_or_rr_or_rnr(chan);
7025 		} else {
7026 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7027 					       &chan->conn_state) &&
7028 			    chan->unacked_frames)
7029 				__set_retrans_timer(chan);
7030 
7031 			l2cap_ertm_send(chan);
7032 		}
7033 		break;
7034 	case L2CAP_EV_RECV_RNR:
7035 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7036 		l2cap_pass_to_tx(chan, control);
7037 		if (control && control->poll) {
7038 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7039 			l2cap_send_rr_or_rnr(chan, 0);
7040 		}
7041 		__clear_retrans_timer(chan);
7042 		l2cap_seq_list_clear(&chan->retrans_list);
7043 		break;
7044 	case L2CAP_EV_RECV_REJ:
7045 		l2cap_handle_rej(chan, control);
7046 		break;
7047 	case L2CAP_EV_RECV_SREJ:
7048 		l2cap_handle_srej(chan, control);
7049 		break;
7050 	default:
7051 		break;
7052 	}
7053 
7054 	if (skb && !skb_in_use) {
7055 		BT_DBG("Freeing %p", skb);
7056 		kfree_skb(skb);
7057 	}
7058 
7059 	return err;
7060 }
7061 
7062 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7063 				    struct l2cap_ctrl *control,
7064 				    struct sk_buff *skb, u8 event)
7065 {
7066 	int err = 0;
7067 	u16 txseq = control->txseq;
7068 	bool skb_in_use = false;
7069 
7070 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7071 	       event);
7072 
7073 	switch (event) {
7074 	case L2CAP_EV_RECV_IFRAME:
7075 		switch (l2cap_classify_txseq(chan, txseq)) {
7076 		case L2CAP_TXSEQ_EXPECTED:
7077 			/* Keep frame for reassembly later */
7078 			l2cap_pass_to_tx(chan, control);
7079 			skb_queue_tail(&chan->srej_q, skb);
7080 			skb_in_use = true;
7081 			BT_DBG("Queued %p (queue len %d)", skb,
7082 			       skb_queue_len(&chan->srej_q));
7083 
7084 			chan->expected_tx_seq = __next_seq(chan, txseq);
7085 			break;
7086 		case L2CAP_TXSEQ_EXPECTED_SREJ:
7087 			l2cap_seq_list_pop(&chan->srej_list);
7088 
7089 			l2cap_pass_to_tx(chan, control);
7090 			skb_queue_tail(&chan->srej_q, skb);
7091 			skb_in_use = true;
7092 			BT_DBG("Queued %p (queue len %d)", skb,
7093 			       skb_queue_len(&chan->srej_q));
7094 
7095 			err = l2cap_rx_queued_iframes(chan);
7096 			if (err)
7097 				break;
7098 
7099 			break;
7100 		case L2CAP_TXSEQ_UNEXPECTED:
7101 			/* Got a frame that can't be reassembled yet.
7102 			 * Save it for later, and send SREJs to cover
7103 			 * the missing frames.
7104 			 */
7105 			skb_queue_tail(&chan->srej_q, skb);
7106 			skb_in_use = true;
7107 			BT_DBG("Queued %p (queue len %d)", skb,
7108 			       skb_queue_len(&chan->srej_q));
7109 
7110 			l2cap_pass_to_tx(chan, control);
7111 			l2cap_send_srej(chan, control->txseq);
7112 			break;
7113 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7114 			/* This frame was requested with an SREJ, but
7115 			 * some expected retransmitted frames are
7116 			 * missing.  Request retransmission of missing
7117 			 * SREJ'd frames.
7118 			 */
7119 			skb_queue_tail(&chan->srej_q, skb);
7120 			skb_in_use = true;
7121 			BT_DBG("Queued %p (queue len %d)", skb,
7122 			       skb_queue_len(&chan->srej_q));
7123 
7124 			l2cap_pass_to_tx(chan, control);
7125 			l2cap_send_srej_list(chan, control->txseq);
7126 			break;
7127 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7128 			/* We've already queued this frame.  Drop this copy. */
7129 			l2cap_pass_to_tx(chan, control);
7130 			break;
7131 		case L2CAP_TXSEQ_DUPLICATE:
7132 			/* Expecting a later sequence number, so this frame
7133 			 * was already received.  Ignore it completely.
7134 			 */
7135 			break;
7136 		case L2CAP_TXSEQ_INVALID_IGNORE:
7137 			break;
7138 		case L2CAP_TXSEQ_INVALID:
7139 		default:
7140 			l2cap_send_disconn_req(chan, ECONNRESET);
7141 			break;
7142 		}
7143 		break;
7144 	case L2CAP_EV_RECV_RR:
7145 		l2cap_pass_to_tx(chan, control);
7146 		if (control->final) {
7147 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7148 
7149 			if (!test_and_clear_bit(CONN_REJ_ACT,
7150 						&chan->conn_state)) {
7151 				control->final = 0;
7152 				l2cap_retransmit_all(chan, control);
7153 			}
7154 
7155 			l2cap_ertm_send(chan);
7156 		} else if (control->poll) {
7157 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7158 					       &chan->conn_state) &&
7159 			    chan->unacked_frames) {
7160 				__set_retrans_timer(chan);
7161 			}
7162 
7163 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7164 			l2cap_send_srej_tail(chan);
7165 		} else {
7166 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7167 					       &chan->conn_state) &&
7168 			    chan->unacked_frames)
7169 				__set_retrans_timer(chan);
7170 
7171 			l2cap_send_ack(chan);
7172 		}
7173 		break;
7174 	case L2CAP_EV_RECV_RNR:
7175 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7176 		l2cap_pass_to_tx(chan, control);
7177 		if (control->poll) {
7178 			l2cap_send_srej_tail(chan);
7179 		} else {
7180 			struct l2cap_ctrl rr_control;
7181 			memset(&rr_control, 0, sizeof(rr_control));
7182 			rr_control.sframe = 1;
7183 			rr_control.super = L2CAP_SUPER_RR;
7184 			rr_control.reqseq = chan->buffer_seq;
7185 			l2cap_send_sframe(chan, &rr_control);
7186 		}
7187 
7188 		break;
7189 	case L2CAP_EV_RECV_REJ:
7190 		l2cap_handle_rej(chan, control);
7191 		break;
7192 	case L2CAP_EV_RECV_SREJ:
7193 		l2cap_handle_srej(chan, control);
7194 		break;
7195 	}
7196 
7197 	if (skb && !skb_in_use) {
7198 		BT_DBG("Freeing %p", skb);
7199 		kfree_skb(skb);
7200 	}
7201 
7202 	return err;
7203 }
7204 
7205 static int l2cap_finish_move(struct l2cap_chan *chan)
7206 {
7207 	BT_DBG("chan %p", chan);
7208 
7209 	chan->rx_state = L2CAP_RX_STATE_RECV;
7210 
7211 	if (chan->hs_hcon)
7212 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7213 	else
7214 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7215 
7216 	return l2cap_resegment(chan);
7217 }
7218 
7219 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7220 				 struct l2cap_ctrl *control,
7221 				 struct sk_buff *skb, u8 event)
7222 {
7223 	int err;
7224 
7225 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7226 	       event);
7227 
7228 	if (!control->poll)
7229 		return -EPROTO;
7230 
7231 	l2cap_process_reqseq(chan, control->reqseq);
7232 
7233 	if (!skb_queue_empty(&chan->tx_q))
7234 		chan->tx_send_head = skb_peek(&chan->tx_q);
7235 	else
7236 		chan->tx_send_head = NULL;
7237 
7238 	/* Rewind next_tx_seq to the point expected
7239 	 * by the receiver.
7240 	 */
7241 	chan->next_tx_seq = control->reqseq;
7242 	chan->unacked_frames = 0;
7243 
7244 	err = l2cap_finish_move(chan);
7245 	if (err)
7246 		return err;
7247 
7248 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7249 	l2cap_send_i_or_rr_or_rnr(chan);
7250 
7251 	if (event == L2CAP_EV_RECV_IFRAME)
7252 		return -EPROTO;
7253 
7254 	return l2cap_rx_state_recv(chan, control, NULL, event);
7255 }
7256 
7257 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7258 				 struct l2cap_ctrl *control,
7259 				 struct sk_buff *skb, u8 event)
7260 {
7261 	int err;
7262 
7263 	if (!control->final)
7264 		return -EPROTO;
7265 
7266 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7267 
7268 	chan->rx_state = L2CAP_RX_STATE_RECV;
7269 	l2cap_process_reqseq(chan, control->reqseq);
7270 
7271 	if (!skb_queue_empty(&chan->tx_q))
7272 		chan->tx_send_head = skb_peek(&chan->tx_q);
7273 	else
7274 		chan->tx_send_head = NULL;
7275 
7276 	/* Rewind next_tx_seq to the point expected
7277 	 * by the receiver.
7278 	 */
7279 	chan->next_tx_seq = control->reqseq;
7280 	chan->unacked_frames = 0;
7281 
7282 	if (chan->hs_hcon)
7283 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7284 	else
7285 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7286 
7287 	err = l2cap_resegment(chan);
7288 
7289 	if (!err)
7290 		err = l2cap_rx_state_recv(chan, control, skb, event);
7291 
7292 	return err;
7293 }
7294 
7295 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7296 {
7297 	/* Make sure reqseq is for a packet that has been sent but not acked */
7298 	u16 unacked;
7299 
7300 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7301 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7302 }
7303 
7304 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7305 		    struct sk_buff *skb, u8 event)
7306 {
7307 	int err = 0;
7308 
7309 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7310 	       control, skb, event, chan->rx_state);
7311 
7312 	if (__valid_reqseq(chan, control->reqseq)) {
7313 		switch (chan->rx_state) {
7314 		case L2CAP_RX_STATE_RECV:
7315 			err = l2cap_rx_state_recv(chan, control, skb, event);
7316 			break;
7317 		case L2CAP_RX_STATE_SREJ_SENT:
7318 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7319 						       event);
7320 			break;
7321 		case L2CAP_RX_STATE_WAIT_P:
7322 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7323 			break;
7324 		case L2CAP_RX_STATE_WAIT_F:
7325 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7326 			break;
7327 		default:
7328 			/* shut it down */
7329 			break;
7330 		}
7331 	} else {
7332 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7333 		       control->reqseq, chan->next_tx_seq,
7334 		       chan->expected_ack_seq);
7335 		l2cap_send_disconn_req(chan, ECONNRESET);
7336 	}
7337 
7338 	return err;
7339 }
7340 
7341 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7342 			   struct sk_buff *skb)
7343 {
7344 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
7345 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
7346 	 * returns and to avoid the race condition, for example:
7347 	 *
7348 	 * The current thread calls:
7349 	 *   l2cap_reassemble_sdu
7350 	 *     chan->ops->recv == l2cap_sock_recv_cb
7351 	 *       __sock_queue_rcv_skb
7352 	 * Another thread calls:
7353 	 *   bt_sock_recvmsg
7354 	 *     skb_recv_datagram
7355 	 *     skb_free_datagram
7356 	 * Then the current thread tries to access control, but it was freed by
7357 	 * skb_free_datagram.
7358 	 */
7359 	u16 txseq = control->txseq;
7360 
7361 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7362 	       chan->rx_state);
7363 
7364 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
7365 		l2cap_pass_to_tx(chan, control);
7366 
7367 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
7368 		       __next_seq(chan, chan->buffer_seq));
7369 
7370 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7371 
7372 		l2cap_reassemble_sdu(chan, skb, control);
7373 	} else {
7374 		if (chan->sdu) {
7375 			kfree_skb(chan->sdu);
7376 			chan->sdu = NULL;
7377 		}
7378 		chan->sdu_last_frag = NULL;
7379 		chan->sdu_len = 0;
7380 
7381 		if (skb) {
7382 			BT_DBG("Freeing %p", skb);
7383 			kfree_skb(skb);
7384 		}
7385 	}
7386 
7387 	chan->last_acked_seq = txseq;
7388 	chan->expected_tx_seq = __next_seq(chan, txseq);
7389 
7390 	return 0;
7391 }
7392 
7393 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7394 {
7395 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7396 	u16 len;
7397 	u8 event;
7398 
7399 	__unpack_control(chan, skb);
7400 
7401 	len = skb->len;
7402 
7403 	/*
7404 	 * We can just drop the corrupted I-frame here.
7405 	 * Receiver will miss it and start proper recovery
7406 	 * procedures and ask for retransmission.
7407 	 */
7408 	if (l2cap_check_fcs(chan, skb))
7409 		goto drop;
7410 
7411 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7412 		len -= L2CAP_SDULEN_SIZE;
7413 
7414 	if (chan->fcs == L2CAP_FCS_CRC16)
7415 		len -= L2CAP_FCS_SIZE;
7416 
7417 	if (len > chan->mps) {
7418 		l2cap_send_disconn_req(chan, ECONNRESET);
7419 		goto drop;
7420 	}
7421 
7422 	if (chan->ops->filter) {
7423 		if (chan->ops->filter(chan, skb))
7424 			goto drop;
7425 	}
7426 
7427 	if (!control->sframe) {
7428 		int err;
7429 
7430 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7431 		       control->sar, control->reqseq, control->final,
7432 		       control->txseq);
7433 
7434 		/* Validate F-bit - F=0 always valid, F=1 only
7435 		 * valid in TX WAIT_F
7436 		 */
7437 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7438 			goto drop;
7439 
7440 		if (chan->mode != L2CAP_MODE_STREAMING) {
7441 			event = L2CAP_EV_RECV_IFRAME;
7442 			err = l2cap_rx(chan, control, skb, event);
7443 		} else {
7444 			err = l2cap_stream_rx(chan, control, skb);
7445 		}
7446 
7447 		if (err)
7448 			l2cap_send_disconn_req(chan, ECONNRESET);
7449 	} else {
7450 		const u8 rx_func_to_event[4] = {
7451 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7452 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7453 		};
7454 
7455 		/* Only I-frames are expected in streaming mode */
7456 		if (chan->mode == L2CAP_MODE_STREAMING)
7457 			goto drop;
7458 
7459 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7460 		       control->reqseq, control->final, control->poll,
7461 		       control->super);
7462 
7463 		if (len != 0) {
7464 			BT_ERR("Trailing bytes: %d in sframe", len);
7465 			l2cap_send_disconn_req(chan, ECONNRESET);
7466 			goto drop;
7467 		}
7468 
7469 		/* Validate F and P bits */
7470 		if (control->final && (control->poll ||
7471 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7472 			goto drop;
7473 
7474 		event = rx_func_to_event[control->super];
7475 		if (l2cap_rx(chan, control, skb, event))
7476 			l2cap_send_disconn_req(chan, ECONNRESET);
7477 	}
7478 
7479 	return 0;
7480 
7481 drop:
7482 	kfree_skb(skb);
7483 	return 0;
7484 }
7485 
7486 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7487 {
7488 	struct l2cap_conn *conn = chan->conn;
7489 	struct l2cap_le_credits pkt;
7490 	u16 return_credits;
7491 
7492 	return_credits = (chan->imtu / chan->mps) + 1;
7493 
7494 	if (chan->rx_credits >= return_credits)
7495 		return;
7496 
7497 	return_credits -= chan->rx_credits;
7498 
7499 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7500 
7501 	chan->rx_credits += return_credits;
7502 
7503 	pkt.cid     = cpu_to_le16(chan->scid);
7504 	pkt.credits = cpu_to_le16(return_credits);
7505 
7506 	chan->ident = l2cap_get_ident(conn);
7507 
7508 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7509 }
7510 
7511 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7512 {
7513 	int err;
7514 
7515 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7516 
7517 	/* Wait recv to confirm reception before updating the credits */
7518 	err = chan->ops->recv(chan, skb);
7519 
7520 	/* Update credits whenever an SDU is received */
7521 	l2cap_chan_le_send_credits(chan);
7522 
7523 	return err;
7524 }
7525 
7526 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7527 {
7528 	int err;
7529 
7530 	if (!chan->rx_credits) {
7531 		BT_ERR("No credits to receive LE L2CAP data");
7532 		l2cap_send_disconn_req(chan, ECONNRESET);
7533 		return -ENOBUFS;
7534 	}
7535 
7536 	if (chan->imtu < skb->len) {
7537 		BT_ERR("Too big LE L2CAP PDU");
7538 		return -ENOBUFS;
7539 	}
7540 
7541 	chan->rx_credits--;
7542 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7543 
7544 	/* Update if remote had run out of credits, this should only happens
7545 	 * if the remote is not using the entire MPS.
7546 	 */
7547 	if (!chan->rx_credits)
7548 		l2cap_chan_le_send_credits(chan);
7549 
7550 	err = 0;
7551 
7552 	if (!chan->sdu) {
7553 		u16 sdu_len;
7554 
7555 		sdu_len = get_unaligned_le16(skb->data);
7556 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7557 
7558 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7559 		       sdu_len, skb->len, chan->imtu);
7560 
7561 		if (sdu_len > chan->imtu) {
7562 			BT_ERR("Too big LE L2CAP SDU length received");
7563 			err = -EMSGSIZE;
7564 			goto failed;
7565 		}
7566 
7567 		if (skb->len > sdu_len) {
7568 			BT_ERR("Too much LE L2CAP data received");
7569 			err = -EINVAL;
7570 			goto failed;
7571 		}
7572 
7573 		if (skb->len == sdu_len)
7574 			return l2cap_ecred_recv(chan, skb);
7575 
7576 		chan->sdu = skb;
7577 		chan->sdu_len = sdu_len;
7578 		chan->sdu_last_frag = skb;
7579 
7580 		/* Detect if remote is not able to use the selected MPS */
7581 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7582 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7583 
7584 			/* Adjust the number of credits */
7585 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7586 			chan->mps = mps_len;
7587 			l2cap_chan_le_send_credits(chan);
7588 		}
7589 
7590 		return 0;
7591 	}
7592 
7593 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7594 	       chan->sdu->len, skb->len, chan->sdu_len);
7595 
7596 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7597 		BT_ERR("Too much LE L2CAP data received");
7598 		err = -EINVAL;
7599 		goto failed;
7600 	}
7601 
7602 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7603 	skb = NULL;
7604 
7605 	if (chan->sdu->len == chan->sdu_len) {
7606 		err = l2cap_ecred_recv(chan, chan->sdu);
7607 		if (!err) {
7608 			chan->sdu = NULL;
7609 			chan->sdu_last_frag = NULL;
7610 			chan->sdu_len = 0;
7611 		}
7612 	}
7613 
7614 failed:
7615 	if (err) {
7616 		kfree_skb(skb);
7617 		kfree_skb(chan->sdu);
7618 		chan->sdu = NULL;
7619 		chan->sdu_last_frag = NULL;
7620 		chan->sdu_len = 0;
7621 	}
7622 
7623 	/* We can't return an error here since we took care of the skb
7624 	 * freeing internally. An error return would cause the caller to
7625 	 * do a double-free of the skb.
7626 	 */
7627 	return 0;
7628 }
7629 
7630 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7631 			       struct sk_buff *skb)
7632 {
7633 	struct l2cap_chan *chan;
7634 
7635 	chan = l2cap_get_chan_by_scid(conn, cid);
7636 	if (!chan) {
7637 		if (cid == L2CAP_CID_A2MP) {
7638 			chan = a2mp_channel_create(conn, skb);
7639 			if (!chan) {
7640 				kfree_skb(skb);
7641 				return;
7642 			}
7643 
7644 			l2cap_chan_hold(chan);
7645 			l2cap_chan_lock(chan);
7646 		} else {
7647 			BT_DBG("unknown cid 0x%4.4x", cid);
7648 			/* Drop packet and return */
7649 			kfree_skb(skb);
7650 			return;
7651 		}
7652 	}
7653 
7654 	BT_DBG("chan %p, len %d", chan, skb->len);
7655 
7656 	/* If we receive data on a fixed channel before the info req/rsp
7657 	 * procedure is done simply assume that the channel is supported
7658 	 * and mark it as ready.
7659 	 */
7660 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7661 		l2cap_chan_ready(chan);
7662 
7663 	if (chan->state != BT_CONNECTED)
7664 		goto drop;
7665 
7666 	switch (chan->mode) {
7667 	case L2CAP_MODE_LE_FLOWCTL:
7668 	case L2CAP_MODE_EXT_FLOWCTL:
7669 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7670 			goto drop;
7671 
7672 		goto done;
7673 
7674 	case L2CAP_MODE_BASIC:
7675 		/* If socket recv buffers overflows we drop data here
7676 		 * which is *bad* because L2CAP has to be reliable.
7677 		 * But we don't have any other choice. L2CAP doesn't
7678 		 * provide flow control mechanism. */
7679 
7680 		if (chan->imtu < skb->len) {
7681 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7682 			goto drop;
7683 		}
7684 
7685 		if (!chan->ops->recv(chan, skb))
7686 			goto done;
7687 		break;
7688 
7689 	case L2CAP_MODE_ERTM:
7690 	case L2CAP_MODE_STREAMING:
7691 		l2cap_data_rcv(chan, skb);
7692 		goto done;
7693 
7694 	default:
7695 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7696 		break;
7697 	}
7698 
7699 drop:
7700 	kfree_skb(skb);
7701 
7702 done:
7703 	l2cap_chan_unlock(chan);
7704 	l2cap_chan_put(chan);
7705 }
7706 
7707 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7708 				  struct sk_buff *skb)
7709 {
7710 	struct hci_conn *hcon = conn->hcon;
7711 	struct l2cap_chan *chan;
7712 
7713 	if (hcon->type != ACL_LINK)
7714 		goto free_skb;
7715 
7716 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7717 					ACL_LINK);
7718 	if (!chan)
7719 		goto free_skb;
7720 
7721 	BT_DBG("chan %p, len %d", chan, skb->len);
7722 
7723 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7724 		goto drop;
7725 
7726 	if (chan->imtu < skb->len)
7727 		goto drop;
7728 
7729 	/* Store remote BD_ADDR and PSM for msg_name */
7730 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7731 	bt_cb(skb)->l2cap.psm = psm;
7732 
7733 	if (!chan->ops->recv(chan, skb)) {
7734 		l2cap_chan_put(chan);
7735 		return;
7736 	}
7737 
7738 drop:
7739 	l2cap_chan_put(chan);
7740 free_skb:
7741 	kfree_skb(skb);
7742 }
7743 
7744 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7745 {
7746 	struct l2cap_hdr *lh = (void *) skb->data;
7747 	struct hci_conn *hcon = conn->hcon;
7748 	u16 cid, len;
7749 	__le16 psm;
7750 
7751 	if (hcon->state != BT_CONNECTED) {
7752 		BT_DBG("queueing pending rx skb");
7753 		skb_queue_tail(&conn->pending_rx, skb);
7754 		return;
7755 	}
7756 
7757 	skb_pull(skb, L2CAP_HDR_SIZE);
7758 	cid = __le16_to_cpu(lh->cid);
7759 	len = __le16_to_cpu(lh->len);
7760 
7761 	if (len != skb->len) {
7762 		kfree_skb(skb);
7763 		return;
7764 	}
7765 
7766 	/* Since we can't actively block incoming LE connections we must
7767 	 * at least ensure that we ignore incoming data from them.
7768 	 */
7769 	if (hcon->type == LE_LINK &&
7770 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7771 				   bdaddr_dst_type(hcon))) {
7772 		kfree_skb(skb);
7773 		return;
7774 	}
7775 
7776 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7777 
7778 	switch (cid) {
7779 	case L2CAP_CID_SIGNALING:
7780 		l2cap_sig_channel(conn, skb);
7781 		break;
7782 
7783 	case L2CAP_CID_CONN_LESS:
7784 		psm = get_unaligned((__le16 *) skb->data);
7785 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7786 		l2cap_conless_channel(conn, psm, skb);
7787 		break;
7788 
7789 	case L2CAP_CID_LE_SIGNALING:
7790 		l2cap_le_sig_channel(conn, skb);
7791 		break;
7792 
7793 	default:
7794 		l2cap_data_channel(conn, cid, skb);
7795 		break;
7796 	}
7797 }
7798 
7799 static void process_pending_rx(struct work_struct *work)
7800 {
7801 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7802 					       pending_rx_work);
7803 	struct sk_buff *skb;
7804 
7805 	BT_DBG("");
7806 
7807 	while ((skb = skb_dequeue(&conn->pending_rx)))
7808 		l2cap_recv_frame(conn, skb);
7809 }
7810 
7811 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7812 {
7813 	struct l2cap_conn *conn = hcon->l2cap_data;
7814 	struct hci_chan *hchan;
7815 
7816 	if (conn)
7817 		return conn;
7818 
7819 	hchan = hci_chan_create(hcon);
7820 	if (!hchan)
7821 		return NULL;
7822 
7823 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7824 	if (!conn) {
7825 		hci_chan_del(hchan);
7826 		return NULL;
7827 	}
7828 
7829 	kref_init(&conn->ref);
7830 	hcon->l2cap_data = conn;
7831 	conn->hcon = hci_conn_get(hcon);
7832 	conn->hchan = hchan;
7833 
7834 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7835 
7836 	switch (hcon->type) {
7837 	case LE_LINK:
7838 		if (hcon->hdev->le_mtu) {
7839 			conn->mtu = hcon->hdev->le_mtu;
7840 			break;
7841 		}
7842 		fallthrough;
7843 	default:
7844 		conn->mtu = hcon->hdev->acl_mtu;
7845 		break;
7846 	}
7847 
7848 	conn->feat_mask = 0;
7849 
7850 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7851 
7852 	if (hcon->type == ACL_LINK &&
7853 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7854 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7855 
7856 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7857 	    (bredr_sc_enabled(hcon->hdev) ||
7858 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7859 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7860 
7861 	mutex_init(&conn->ident_lock);
7862 	mutex_init(&conn->chan_lock);
7863 
7864 	INIT_LIST_HEAD(&conn->chan_l);
7865 	INIT_LIST_HEAD(&conn->users);
7866 
7867 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7868 
7869 	skb_queue_head_init(&conn->pending_rx);
7870 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7871 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7872 
7873 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7874 
7875 	return conn;
7876 }
7877 
7878 static bool is_valid_psm(u16 psm, u8 dst_type)
7879 {
7880 	if (!psm)
7881 		return false;
7882 
7883 	if (bdaddr_type_is_le(dst_type))
7884 		return (psm <= 0x00ff);
7885 
7886 	/* PSM must be odd and lsb of upper byte must be 0 */
7887 	return ((psm & 0x0101) == 0x0001);
7888 }
7889 
7890 struct l2cap_chan_data {
7891 	struct l2cap_chan *chan;
7892 	struct pid *pid;
7893 	int count;
7894 };
7895 
7896 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7897 {
7898 	struct l2cap_chan_data *d = data;
7899 	struct pid *pid;
7900 
7901 	if (chan == d->chan)
7902 		return;
7903 
7904 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7905 		return;
7906 
7907 	pid = chan->ops->get_peer_pid(chan);
7908 
7909 	/* Only count deferred channels with the same PID/PSM */
7910 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7911 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7912 		return;
7913 
7914 	d->count++;
7915 }
7916 
7917 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7918 		       bdaddr_t *dst, u8 dst_type)
7919 {
7920 	struct l2cap_conn *conn;
7921 	struct hci_conn *hcon;
7922 	struct hci_dev *hdev;
7923 	int err;
7924 
7925 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7926 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7927 
7928 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7929 	if (!hdev)
7930 		return -EHOSTUNREACH;
7931 
7932 	hci_dev_lock(hdev);
7933 
7934 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7935 	    chan->chan_type != L2CAP_CHAN_RAW) {
7936 		err = -EINVAL;
7937 		goto done;
7938 	}
7939 
7940 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7941 		err = -EINVAL;
7942 		goto done;
7943 	}
7944 
7945 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7946 		err = -EINVAL;
7947 		goto done;
7948 	}
7949 
7950 	switch (chan->mode) {
7951 	case L2CAP_MODE_BASIC:
7952 		break;
7953 	case L2CAP_MODE_LE_FLOWCTL:
7954 		break;
7955 	case L2CAP_MODE_EXT_FLOWCTL:
7956 		if (!enable_ecred) {
7957 			err = -EOPNOTSUPP;
7958 			goto done;
7959 		}
7960 		break;
7961 	case L2CAP_MODE_ERTM:
7962 	case L2CAP_MODE_STREAMING:
7963 		if (!disable_ertm)
7964 			break;
7965 		fallthrough;
7966 	default:
7967 		err = -EOPNOTSUPP;
7968 		goto done;
7969 	}
7970 
7971 	switch (chan->state) {
7972 	case BT_CONNECT:
7973 	case BT_CONNECT2:
7974 	case BT_CONFIG:
7975 		/* Already connecting */
7976 		err = 0;
7977 		goto done;
7978 
7979 	case BT_CONNECTED:
7980 		/* Already connected */
7981 		err = -EISCONN;
7982 		goto done;
7983 
7984 	case BT_OPEN:
7985 	case BT_BOUND:
7986 		/* Can connect */
7987 		break;
7988 
7989 	default:
7990 		err = -EBADFD;
7991 		goto done;
7992 	}
7993 
7994 	/* Set destination address and psm */
7995 	bacpy(&chan->dst, dst);
7996 	chan->dst_type = dst_type;
7997 
7998 	chan->psm = psm;
7999 	chan->dcid = cid;
8000 
8001 	if (bdaddr_type_is_le(dst_type)) {
8002 		/* Convert from L2CAP channel address type to HCI address type
8003 		 */
8004 		if (dst_type == BDADDR_LE_PUBLIC)
8005 			dst_type = ADDR_LE_DEV_PUBLIC;
8006 		else
8007 			dst_type = ADDR_LE_DEV_RANDOM;
8008 
8009 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8010 			hcon = hci_connect_le(hdev, dst, dst_type, false,
8011 					      chan->sec_level,
8012 					      HCI_LE_CONN_TIMEOUT,
8013 					      HCI_ROLE_SLAVE);
8014 		else
8015 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
8016 						   chan->sec_level,
8017 						   HCI_LE_CONN_TIMEOUT,
8018 						   CONN_REASON_L2CAP_CHAN);
8019 
8020 	} else {
8021 		u8 auth_type = l2cap_get_auth_type(chan);
8022 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
8023 				       CONN_REASON_L2CAP_CHAN);
8024 	}
8025 
8026 	if (IS_ERR(hcon)) {
8027 		err = PTR_ERR(hcon);
8028 		goto done;
8029 	}
8030 
8031 	conn = l2cap_conn_add(hcon);
8032 	if (!conn) {
8033 		hci_conn_drop(hcon);
8034 		err = -ENOMEM;
8035 		goto done;
8036 	}
8037 
8038 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
8039 		struct l2cap_chan_data data;
8040 
8041 		data.chan = chan;
8042 		data.pid = chan->ops->get_peer_pid(chan);
8043 		data.count = 1;
8044 
8045 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
8046 
8047 		/* Check if there isn't too many channels being connected */
8048 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
8049 			hci_conn_drop(hcon);
8050 			err = -EPROTO;
8051 			goto done;
8052 		}
8053 	}
8054 
8055 	mutex_lock(&conn->chan_lock);
8056 	l2cap_chan_lock(chan);
8057 
8058 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
8059 		hci_conn_drop(hcon);
8060 		err = -EBUSY;
8061 		goto chan_unlock;
8062 	}
8063 
8064 	/* Update source addr of the socket */
8065 	bacpy(&chan->src, &hcon->src);
8066 	chan->src_type = bdaddr_src_type(hcon);
8067 
8068 	__l2cap_chan_add(conn, chan);
8069 
8070 	/* l2cap_chan_add takes its own ref so we can drop this one */
8071 	hci_conn_drop(hcon);
8072 
8073 	l2cap_state_change(chan, BT_CONNECT);
8074 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8075 
8076 	/* Release chan->sport so that it can be reused by other
8077 	 * sockets (as it's only used for listening sockets).
8078 	 */
8079 	write_lock(&chan_list_lock);
8080 	chan->sport = 0;
8081 	write_unlock(&chan_list_lock);
8082 
8083 	if (hcon->state == BT_CONNECTED) {
8084 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8085 			__clear_chan_timer(chan);
8086 			if (l2cap_chan_check_security(chan, true))
8087 				l2cap_state_change(chan, BT_CONNECTED);
8088 		} else
8089 			l2cap_do_start(chan);
8090 	}
8091 
8092 	err = 0;
8093 
8094 chan_unlock:
8095 	l2cap_chan_unlock(chan);
8096 	mutex_unlock(&conn->chan_lock);
8097 done:
8098 	hci_dev_unlock(hdev);
8099 	hci_dev_put(hdev);
8100 	return err;
8101 }
8102 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8103 
8104 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8105 {
8106 	struct l2cap_conn *conn = chan->conn;
8107 	struct {
8108 		struct l2cap_ecred_reconf_req req;
8109 		__le16 scid;
8110 	} pdu;
8111 
8112 	pdu.req.mtu = cpu_to_le16(chan->imtu);
8113 	pdu.req.mps = cpu_to_le16(chan->mps);
8114 	pdu.scid    = cpu_to_le16(chan->scid);
8115 
8116 	chan->ident = l2cap_get_ident(conn);
8117 
8118 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8119 		       sizeof(pdu), &pdu);
8120 }
8121 
8122 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8123 {
8124 	if (chan->imtu > mtu)
8125 		return -EINVAL;
8126 
8127 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8128 
8129 	chan->imtu = mtu;
8130 
8131 	l2cap_ecred_reconfigure(chan);
8132 
8133 	return 0;
8134 }
8135 
8136 /* ---- L2CAP interface with lower layer (HCI) ---- */
8137 
8138 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8139 {
8140 	int exact = 0, lm1 = 0, lm2 = 0;
8141 	struct l2cap_chan *c;
8142 
8143 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8144 
8145 	/* Find listening sockets and check their link_mode */
8146 	read_lock(&chan_list_lock);
8147 	list_for_each_entry(c, &chan_list, global_l) {
8148 		if (c->state != BT_LISTEN)
8149 			continue;
8150 
8151 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8152 			lm1 |= HCI_LM_ACCEPT;
8153 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8154 				lm1 |= HCI_LM_MASTER;
8155 			exact++;
8156 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8157 			lm2 |= HCI_LM_ACCEPT;
8158 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8159 				lm2 |= HCI_LM_MASTER;
8160 		}
8161 	}
8162 	read_unlock(&chan_list_lock);
8163 
8164 	return exact ? lm1 : lm2;
8165 }
8166 
8167 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8168  * from an existing channel in the list or from the beginning of the
8169  * global list (by passing NULL as first parameter).
8170  */
8171 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8172 						  struct hci_conn *hcon)
8173 {
8174 	u8 src_type = bdaddr_src_type(hcon);
8175 
8176 	read_lock(&chan_list_lock);
8177 
8178 	if (c)
8179 		c = list_next_entry(c, global_l);
8180 	else
8181 		c = list_entry(chan_list.next, typeof(*c), global_l);
8182 
8183 	list_for_each_entry_from(c, &chan_list, global_l) {
8184 		if (c->chan_type != L2CAP_CHAN_FIXED)
8185 			continue;
8186 		if (c->state != BT_LISTEN)
8187 			continue;
8188 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8189 			continue;
8190 		if (src_type != c->src_type)
8191 			continue;
8192 
8193 		c = l2cap_chan_hold_unless_zero(c);
8194 		read_unlock(&chan_list_lock);
8195 		return c;
8196 	}
8197 
8198 	read_unlock(&chan_list_lock);
8199 
8200 	return NULL;
8201 }
8202 
8203 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8204 {
8205 	struct hci_dev *hdev = hcon->hdev;
8206 	struct l2cap_conn *conn;
8207 	struct l2cap_chan *pchan;
8208 	u8 dst_type;
8209 
8210 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8211 		return;
8212 
8213 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8214 
8215 	if (status) {
8216 		l2cap_conn_del(hcon, bt_to_errno(status));
8217 		return;
8218 	}
8219 
8220 	conn = l2cap_conn_add(hcon);
8221 	if (!conn)
8222 		return;
8223 
8224 	dst_type = bdaddr_dst_type(hcon);
8225 
8226 	/* If device is blocked, do not create channels for it */
8227 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8228 		return;
8229 
8230 	/* Find fixed channels and notify them of the new connection. We
8231 	 * use multiple individual lookups, continuing each time where
8232 	 * we left off, because the list lock would prevent calling the
8233 	 * potentially sleeping l2cap_chan_lock() function.
8234 	 */
8235 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8236 	while (pchan) {
8237 		struct l2cap_chan *chan, *next;
8238 
8239 		/* Client fixed channels should override server ones */
8240 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8241 			goto next;
8242 
8243 		l2cap_chan_lock(pchan);
8244 		chan = pchan->ops->new_connection(pchan);
8245 		if (chan) {
8246 			bacpy(&chan->src, &hcon->src);
8247 			bacpy(&chan->dst, &hcon->dst);
8248 			chan->src_type = bdaddr_src_type(hcon);
8249 			chan->dst_type = dst_type;
8250 
8251 			__l2cap_chan_add(conn, chan);
8252 		}
8253 
8254 		l2cap_chan_unlock(pchan);
8255 next:
8256 		next = l2cap_global_fixed_chan(pchan, hcon);
8257 		l2cap_chan_put(pchan);
8258 		pchan = next;
8259 	}
8260 
8261 	l2cap_conn_ready(conn);
8262 }
8263 
8264 int l2cap_disconn_ind(struct hci_conn *hcon)
8265 {
8266 	struct l2cap_conn *conn = hcon->l2cap_data;
8267 
8268 	BT_DBG("hcon %p", hcon);
8269 
8270 	if (!conn)
8271 		return HCI_ERROR_REMOTE_USER_TERM;
8272 	return conn->disc_reason;
8273 }
8274 
8275 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8276 {
8277 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8278 		return;
8279 
8280 	BT_DBG("hcon %p reason %d", hcon, reason);
8281 
8282 	l2cap_conn_del(hcon, bt_to_errno(reason));
8283 }
8284 
8285 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8286 {
8287 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8288 		return;
8289 
8290 	if (encrypt == 0x00) {
8291 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8292 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8293 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8294 			   chan->sec_level == BT_SECURITY_FIPS)
8295 			l2cap_chan_close(chan, ECONNREFUSED);
8296 	} else {
8297 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8298 			__clear_chan_timer(chan);
8299 	}
8300 }
8301 
8302 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8303 {
8304 	struct l2cap_conn *conn = hcon->l2cap_data;
8305 	struct l2cap_chan *chan;
8306 
8307 	if (!conn)
8308 		return;
8309 
8310 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8311 
8312 	mutex_lock(&conn->chan_lock);
8313 
8314 	list_for_each_entry(chan, &conn->chan_l, list) {
8315 		l2cap_chan_lock(chan);
8316 
8317 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8318 		       state_to_string(chan->state));
8319 
8320 		if (chan->scid == L2CAP_CID_A2MP) {
8321 			l2cap_chan_unlock(chan);
8322 			continue;
8323 		}
8324 
8325 		if (!status && encrypt)
8326 			chan->sec_level = hcon->sec_level;
8327 
8328 		if (!__l2cap_no_conn_pending(chan)) {
8329 			l2cap_chan_unlock(chan);
8330 			continue;
8331 		}
8332 
8333 		if (!status && (chan->state == BT_CONNECTED ||
8334 				chan->state == BT_CONFIG)) {
8335 			chan->ops->resume(chan);
8336 			l2cap_check_encryption(chan, encrypt);
8337 			l2cap_chan_unlock(chan);
8338 			continue;
8339 		}
8340 
8341 		if (chan->state == BT_CONNECT) {
8342 			if (!status && l2cap_check_enc_key_size(hcon))
8343 				l2cap_start_connection(chan);
8344 			else
8345 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8346 		} else if (chan->state == BT_CONNECT2 &&
8347 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8348 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8349 			struct l2cap_conn_rsp rsp;
8350 			__u16 res, stat;
8351 
8352 			if (!status && l2cap_check_enc_key_size(hcon)) {
8353 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8354 					res = L2CAP_CR_PEND;
8355 					stat = L2CAP_CS_AUTHOR_PEND;
8356 					chan->ops->defer(chan);
8357 				} else {
8358 					l2cap_state_change(chan, BT_CONFIG);
8359 					res = L2CAP_CR_SUCCESS;
8360 					stat = L2CAP_CS_NO_INFO;
8361 				}
8362 			} else {
8363 				l2cap_state_change(chan, BT_DISCONN);
8364 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8365 				res = L2CAP_CR_SEC_BLOCK;
8366 				stat = L2CAP_CS_NO_INFO;
8367 			}
8368 
8369 			rsp.scid   = cpu_to_le16(chan->dcid);
8370 			rsp.dcid   = cpu_to_le16(chan->scid);
8371 			rsp.result = cpu_to_le16(res);
8372 			rsp.status = cpu_to_le16(stat);
8373 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8374 				       sizeof(rsp), &rsp);
8375 
8376 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8377 			    res == L2CAP_CR_SUCCESS) {
8378 				char buf[128];
8379 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8380 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8381 					       L2CAP_CONF_REQ,
8382 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8383 					       buf);
8384 				chan->num_conf_req++;
8385 			}
8386 		}
8387 
8388 		l2cap_chan_unlock(chan);
8389 	}
8390 
8391 	mutex_unlock(&conn->chan_lock);
8392 }
8393 
8394 /* Append fragment into frame respecting the maximum len of rx_skb */
8395 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
8396 			   u16 len)
8397 {
8398 	if (!conn->rx_skb) {
8399 		/* Allocate skb for the complete frame (with header) */
8400 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8401 		if (!conn->rx_skb)
8402 			return -ENOMEM;
8403 		/* Init rx_len */
8404 		conn->rx_len = len;
8405 	}
8406 
8407 	/* Copy as much as the rx_skb can hold */
8408 	len = min_t(u16, len, skb->len);
8409 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
8410 	skb_pull(skb, len);
8411 	conn->rx_len -= len;
8412 
8413 	return len;
8414 }
8415 
8416 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
8417 {
8418 	struct sk_buff *rx_skb;
8419 	int len;
8420 
8421 	/* Append just enough to complete the header */
8422 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
8423 
8424 	/* If header could not be read just continue */
8425 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
8426 		return len;
8427 
8428 	rx_skb = conn->rx_skb;
8429 	len = get_unaligned_le16(rx_skb->data);
8430 
8431 	/* Check if rx_skb has enough space to received all fragments */
8432 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
8433 		/* Update expected len */
8434 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
8435 		return L2CAP_LEN_SIZE;
8436 	}
8437 
8438 	/* Reset conn->rx_skb since it will need to be reallocated in order to
8439 	 * fit all fragments.
8440 	 */
8441 	conn->rx_skb = NULL;
8442 
8443 	/* Reallocates rx_skb using the exact expected length */
8444 	len = l2cap_recv_frag(conn, rx_skb,
8445 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
8446 	kfree_skb(rx_skb);
8447 
8448 	return len;
8449 }
8450 
8451 static void l2cap_recv_reset(struct l2cap_conn *conn)
8452 {
8453 	kfree_skb(conn->rx_skb);
8454 	conn->rx_skb = NULL;
8455 	conn->rx_len = 0;
8456 }
8457 
8458 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8459 {
8460 	struct l2cap_conn *conn = hcon->l2cap_data;
8461 	int len;
8462 
8463 	/* For AMP controller do not create l2cap conn */
8464 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8465 		goto drop;
8466 
8467 	if (!conn)
8468 		conn = l2cap_conn_add(hcon);
8469 
8470 	if (!conn)
8471 		goto drop;
8472 
8473 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
8474 
8475 	switch (flags) {
8476 	case ACL_START:
8477 	case ACL_START_NO_FLUSH:
8478 	case ACL_COMPLETE:
8479 		if (conn->rx_skb) {
8480 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8481 			l2cap_recv_reset(conn);
8482 			l2cap_conn_unreliable(conn, ECOMM);
8483 		}
8484 
8485 		/* Start fragment may not contain the L2CAP length so just
8486 		 * copy the initial byte when that happens and use conn->mtu as
8487 		 * expected length.
8488 		 */
8489 		if (skb->len < L2CAP_LEN_SIZE) {
8490 			l2cap_recv_frag(conn, skb, conn->mtu);
8491 			break;
8492 		}
8493 
8494 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
8495 
8496 		if (len == skb->len) {
8497 			/* Complete frame received */
8498 			l2cap_recv_frame(conn, skb);
8499 			return;
8500 		}
8501 
8502 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
8503 
8504 		if (skb->len > len) {
8505 			BT_ERR("Frame is too long (len %u, expected len %d)",
8506 			       skb->len, len);
8507 			l2cap_conn_unreliable(conn, ECOMM);
8508 			goto drop;
8509 		}
8510 
8511 		/* Append fragment into frame (with header) */
8512 		if (l2cap_recv_frag(conn, skb, len) < 0)
8513 			goto drop;
8514 
8515 		break;
8516 
8517 	case ACL_CONT:
8518 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
8519 
8520 		if (!conn->rx_skb) {
8521 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8522 			l2cap_conn_unreliable(conn, ECOMM);
8523 			goto drop;
8524 		}
8525 
8526 		/* Complete the L2CAP length if it has not been read */
8527 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
8528 			if (l2cap_recv_len(conn, skb) < 0) {
8529 				l2cap_conn_unreliable(conn, ECOMM);
8530 				goto drop;
8531 			}
8532 
8533 			/* Header still could not be read just continue */
8534 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
8535 				break;
8536 		}
8537 
8538 		if (skb->len > conn->rx_len) {
8539 			BT_ERR("Fragment is too long (len %u, expected %u)",
8540 			       skb->len, conn->rx_len);
8541 			l2cap_recv_reset(conn);
8542 			l2cap_conn_unreliable(conn, ECOMM);
8543 			goto drop;
8544 		}
8545 
8546 		/* Append fragment into frame (with header) */
8547 		l2cap_recv_frag(conn, skb, skb->len);
8548 
8549 		if (!conn->rx_len) {
8550 			/* Complete frame received. l2cap_recv_frame
8551 			 * takes ownership of the skb so set the global
8552 			 * rx_skb pointer to NULL first.
8553 			 */
8554 			struct sk_buff *rx_skb = conn->rx_skb;
8555 			conn->rx_skb = NULL;
8556 			l2cap_recv_frame(conn, rx_skb);
8557 		}
8558 		break;
8559 	}
8560 
8561 drop:
8562 	kfree_skb(skb);
8563 }
8564 
8565 static struct hci_cb l2cap_cb = {
8566 	.name		= "L2CAP",
8567 	.connect_cfm	= l2cap_connect_cfm,
8568 	.disconn_cfm	= l2cap_disconn_cfm,
8569 	.security_cfm	= l2cap_security_cfm,
8570 };
8571 
8572 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8573 {
8574 	struct l2cap_chan *c;
8575 
8576 	read_lock(&chan_list_lock);
8577 
8578 	list_for_each_entry(c, &chan_list, global_l) {
8579 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8580 			   &c->src, c->src_type, &c->dst, c->dst_type,
8581 			   c->state, __le16_to_cpu(c->psm),
8582 			   c->scid, c->dcid, c->imtu, c->omtu,
8583 			   c->sec_level, c->mode);
8584 	}
8585 
8586 	read_unlock(&chan_list_lock);
8587 
8588 	return 0;
8589 }
8590 
8591 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8592 
8593 static struct dentry *l2cap_debugfs;
8594 
8595 int __init l2cap_init(void)
8596 {
8597 	int err;
8598 
8599 	err = l2cap_init_sockets();
8600 	if (err < 0)
8601 		return err;
8602 
8603 	hci_register_cb(&l2cap_cb);
8604 
8605 	if (IS_ERR_OR_NULL(bt_debugfs))
8606 		return 0;
8607 
8608 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8609 					    NULL, &l2cap_debugfs_fops);
8610 
8611 	return 0;
8612 }
8613 
8614 void l2cap_exit(void)
8615 {
8616 	debugfs_remove(l2cap_debugfs);
8617 	hci_unregister_cb(&l2cap_cb);
8618 	l2cap_cleanup_sockets();
8619 }
8620 
8621 module_param(disable_ertm, bool, 0644);
8622 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8623 
8624 module_param(enable_ecred, bool, 0644);
8625 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8626