xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision 81035e12)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred;
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 static void l2cap_retrans_timeout(struct work_struct *work);
65 static void l2cap_monitor_timeout(struct work_struct *work);
66 static void l2cap_ack_timeout(struct work_struct *work);
67 
68 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
69 {
70 	if (link_type == LE_LINK) {
71 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
72 			return BDADDR_LE_PUBLIC;
73 		else
74 			return BDADDR_LE_RANDOM;
75 	}
76 
77 	return BDADDR_BREDR;
78 }
79 
80 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
81 {
82 	return bdaddr_type(hcon->type, hcon->src_type);
83 }
84 
85 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
86 {
87 	return bdaddr_type(hcon->type, hcon->dst_type);
88 }
89 
90 /* ---- L2CAP channels ---- */
91 
92 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
93 						   u16 cid)
94 {
95 	struct l2cap_chan *c;
96 
97 	list_for_each_entry(c, &conn->chan_l, list) {
98 		if (c->dcid == cid)
99 			return c;
100 	}
101 	return NULL;
102 }
103 
104 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 						   u16 cid)
106 {
107 	struct l2cap_chan *c;
108 
109 	list_for_each_entry(c, &conn->chan_l, list) {
110 		if (c->scid == cid)
111 			return c;
112 	}
113 	return NULL;
114 }
115 
116 /* Find channel with given SCID.
117  * Returns a reference locked channel.
118  */
119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
120 						 u16 cid)
121 {
122 	struct l2cap_chan *c;
123 
124 	mutex_lock(&conn->chan_lock);
125 	c = __l2cap_get_chan_by_scid(conn, cid);
126 	if (c) {
127 		/* Only lock if chan reference is not 0 */
128 		c = l2cap_chan_hold_unless_zero(c);
129 		if (c)
130 			l2cap_chan_lock(c);
131 	}
132 	mutex_unlock(&conn->chan_lock);
133 
134 	return c;
135 }
136 
137 /* Find channel with given DCID.
138  * Returns a reference locked channel.
139  */
140 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
141 						 u16 cid)
142 {
143 	struct l2cap_chan *c;
144 
145 	mutex_lock(&conn->chan_lock);
146 	c = __l2cap_get_chan_by_dcid(conn, cid);
147 	if (c) {
148 		/* Only lock if chan reference is not 0 */
149 		c = l2cap_chan_hold_unless_zero(c);
150 		if (c)
151 			l2cap_chan_lock(c);
152 	}
153 	mutex_unlock(&conn->chan_lock);
154 
155 	return c;
156 }
157 
158 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
159 						    u8 ident)
160 {
161 	struct l2cap_chan *c;
162 
163 	list_for_each_entry(c, &conn->chan_l, list) {
164 		if (c->ident == ident)
165 			return c;
166 	}
167 	return NULL;
168 }
169 
170 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
171 						  u8 ident)
172 {
173 	struct l2cap_chan *c;
174 
175 	mutex_lock(&conn->chan_lock);
176 	c = __l2cap_get_chan_by_ident(conn, ident);
177 	if (c) {
178 		/* Only lock if chan reference is not 0 */
179 		c = l2cap_chan_hold_unless_zero(c);
180 		if (c)
181 			l2cap_chan_lock(c);
182 	}
183 	mutex_unlock(&conn->chan_lock);
184 
185 	return c;
186 }
187 
188 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
189 						      u8 src_type)
190 {
191 	struct l2cap_chan *c;
192 
193 	list_for_each_entry(c, &chan_list, global_l) {
194 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
195 			continue;
196 
197 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
198 			continue;
199 
200 		if (c->sport == psm && !bacmp(&c->src, src))
201 			return c;
202 	}
203 	return NULL;
204 }
205 
206 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
207 {
208 	int err;
209 
210 	write_lock(&chan_list_lock);
211 
212 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
213 		err = -EADDRINUSE;
214 		goto done;
215 	}
216 
217 	if (psm) {
218 		chan->psm = psm;
219 		chan->sport = psm;
220 		err = 0;
221 	} else {
222 		u16 p, start, end, incr;
223 
224 		if (chan->src_type == BDADDR_BREDR) {
225 			start = L2CAP_PSM_DYN_START;
226 			end = L2CAP_PSM_AUTO_END;
227 			incr = 2;
228 		} else {
229 			start = L2CAP_PSM_LE_DYN_START;
230 			end = L2CAP_PSM_LE_DYN_END;
231 			incr = 1;
232 		}
233 
234 		err = -EINVAL;
235 		for (p = start; p <= end; p += incr)
236 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
237 							 chan->src_type)) {
238 				chan->psm   = cpu_to_le16(p);
239 				chan->sport = cpu_to_le16(p);
240 				err = 0;
241 				break;
242 			}
243 	}
244 
245 done:
246 	write_unlock(&chan_list_lock);
247 	return err;
248 }
249 EXPORT_SYMBOL_GPL(l2cap_add_psm);
250 
251 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
252 {
253 	write_lock(&chan_list_lock);
254 
255 	/* Override the defaults (which are for conn-oriented) */
256 	chan->omtu = L2CAP_DEFAULT_MTU;
257 	chan->chan_type = L2CAP_CHAN_FIXED;
258 
259 	chan->scid = scid;
260 
261 	write_unlock(&chan_list_lock);
262 
263 	return 0;
264 }
265 
266 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
267 {
268 	u16 cid, dyn_end;
269 
270 	if (conn->hcon->type == LE_LINK)
271 		dyn_end = L2CAP_CID_LE_DYN_END;
272 	else
273 		dyn_end = L2CAP_CID_DYN_END;
274 
275 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
276 		if (!__l2cap_get_chan_by_scid(conn, cid))
277 			return cid;
278 	}
279 
280 	return 0;
281 }
282 
283 static void l2cap_state_change(struct l2cap_chan *chan, int state)
284 {
285 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
286 	       state_to_string(state));
287 
288 	chan->state = state;
289 	chan->ops->state_change(chan, state, 0);
290 }
291 
292 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
293 						int state, int err)
294 {
295 	chan->state = state;
296 	chan->ops->state_change(chan, chan->state, err);
297 }
298 
299 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
300 {
301 	chan->ops->state_change(chan, chan->state, err);
302 }
303 
304 static void __set_retrans_timer(struct l2cap_chan *chan)
305 {
306 	if (!delayed_work_pending(&chan->monitor_timer) &&
307 	    chan->retrans_timeout) {
308 		l2cap_set_timer(chan, &chan->retrans_timer,
309 				msecs_to_jiffies(chan->retrans_timeout));
310 	}
311 }
312 
313 static void __set_monitor_timer(struct l2cap_chan *chan)
314 {
315 	__clear_retrans_timer(chan);
316 	if (chan->monitor_timeout) {
317 		l2cap_set_timer(chan, &chan->monitor_timer,
318 				msecs_to_jiffies(chan->monitor_timeout));
319 	}
320 }
321 
322 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
323 					       u16 seq)
324 {
325 	struct sk_buff *skb;
326 
327 	skb_queue_walk(head, skb) {
328 		if (bt_cb(skb)->l2cap.txseq == seq)
329 			return skb;
330 	}
331 
332 	return NULL;
333 }
334 
335 /* ---- L2CAP sequence number lists ---- */
336 
337 /* For ERTM, ordered lists of sequence numbers must be tracked for
338  * SREJ requests that are received and for frames that are to be
339  * retransmitted. These seq_list functions implement a singly-linked
340  * list in an array, where membership in the list can also be checked
341  * in constant time. Items can also be added to the tail of the list
342  * and removed from the head in constant time, without further memory
343  * allocs or frees.
344  */
345 
346 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
347 {
348 	size_t alloc_size, i;
349 
350 	/* Allocated size is a power of 2 to map sequence numbers
351 	 * (which may be up to 14 bits) in to a smaller array that is
352 	 * sized for the negotiated ERTM transmit windows.
353 	 */
354 	alloc_size = roundup_pow_of_two(size);
355 
356 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
357 	if (!seq_list->list)
358 		return -ENOMEM;
359 
360 	seq_list->mask = alloc_size - 1;
361 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 	for (i = 0; i < alloc_size; i++)
364 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
365 
366 	return 0;
367 }
368 
369 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
370 {
371 	kfree(seq_list->list);
372 }
373 
374 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
375 					   u16 seq)
376 {
377 	/* Constant-time check for list membership */
378 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
379 }
380 
381 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
382 {
383 	u16 seq = seq_list->head;
384 	u16 mask = seq_list->mask;
385 
386 	seq_list->head = seq_list->list[seq & mask];
387 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
388 
389 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
390 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
392 	}
393 
394 	return seq;
395 }
396 
397 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
398 {
399 	u16 i;
400 
401 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
402 		return;
403 
404 	for (i = 0; i <= seq_list->mask; i++)
405 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
406 
407 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
408 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
409 }
410 
411 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
412 {
413 	u16 mask = seq_list->mask;
414 
415 	/* All appends happen in constant time */
416 
417 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
418 		return;
419 
420 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
421 		seq_list->head = seq;
422 	else
423 		seq_list->list[seq_list->tail & mask] = seq;
424 
425 	seq_list->tail = seq;
426 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
427 }
428 
429 static void l2cap_chan_timeout(struct work_struct *work)
430 {
431 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
432 					       chan_timer.work);
433 	struct l2cap_conn *conn = chan->conn;
434 	int reason;
435 
436 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
437 
438 	mutex_lock(&conn->chan_lock);
439 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
440 	 * this work. No need to call l2cap_chan_hold(chan) here again.
441 	 */
442 	l2cap_chan_lock(chan);
443 
444 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
445 		reason = ECONNREFUSED;
446 	else if (chan->state == BT_CONNECT &&
447 		 chan->sec_level != BT_SECURITY_SDP)
448 		reason = ECONNREFUSED;
449 	else
450 		reason = ETIMEDOUT;
451 
452 	l2cap_chan_close(chan, reason);
453 
454 	chan->ops->close(chan);
455 
456 	l2cap_chan_unlock(chan);
457 	l2cap_chan_put(chan);
458 
459 	mutex_unlock(&conn->chan_lock);
460 }
461 
462 struct l2cap_chan *l2cap_chan_create(void)
463 {
464 	struct l2cap_chan *chan;
465 
466 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
467 	if (!chan)
468 		return NULL;
469 
470 	skb_queue_head_init(&chan->tx_q);
471 	skb_queue_head_init(&chan->srej_q);
472 	mutex_init(&chan->lock);
473 
474 	/* Set default lock nesting level */
475 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
476 
477 	write_lock(&chan_list_lock);
478 	list_add(&chan->global_l, &chan_list);
479 	write_unlock(&chan_list_lock);
480 
481 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
482 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
483 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
484 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
485 
486 	chan->state = BT_OPEN;
487 
488 	kref_init(&chan->kref);
489 
490 	/* This flag is cleared in l2cap_chan_ready() */
491 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
492 
493 	BT_DBG("chan %p", chan);
494 
495 	return chan;
496 }
497 EXPORT_SYMBOL_GPL(l2cap_chan_create);
498 
499 static void l2cap_chan_destroy(struct kref *kref)
500 {
501 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
502 
503 	BT_DBG("chan %p", chan);
504 
505 	write_lock(&chan_list_lock);
506 	list_del(&chan->global_l);
507 	write_unlock(&chan_list_lock);
508 
509 	kfree(chan);
510 }
511 
512 void l2cap_chan_hold(struct l2cap_chan *c)
513 {
514 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
515 
516 	kref_get(&c->kref);
517 }
518 
519 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
520 {
521 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
522 
523 	if (!kref_get_unless_zero(&c->kref))
524 		return NULL;
525 
526 	return c;
527 }
528 
529 void l2cap_chan_put(struct l2cap_chan *c)
530 {
531 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
532 
533 	kref_put(&c->kref, l2cap_chan_destroy);
534 }
535 EXPORT_SYMBOL_GPL(l2cap_chan_put);
536 
537 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
538 {
539 	chan->fcs  = L2CAP_FCS_CRC16;
540 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
541 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
542 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
543 	chan->remote_max_tx = chan->max_tx;
544 	chan->remote_tx_win = chan->tx_win;
545 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
546 	chan->sec_level = BT_SECURITY_LOW;
547 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
548 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
549 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
550 
551 	chan->conf_state = 0;
552 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
553 
554 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
555 }
556 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
557 
558 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
559 {
560 	chan->sdu = NULL;
561 	chan->sdu_last_frag = NULL;
562 	chan->sdu_len = 0;
563 	chan->tx_credits = tx_credits;
564 	/* Derive MPS from connection MTU to stop HCI fragmentation */
565 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
566 	/* Give enough credits for a full packet */
567 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
568 
569 	skb_queue_head_init(&chan->tx_q);
570 }
571 
572 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
573 {
574 	l2cap_le_flowctl_init(chan, tx_credits);
575 
576 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
577 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
578 		chan->mps = L2CAP_ECRED_MIN_MPS;
579 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
580 	}
581 }
582 
583 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
584 {
585 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
586 	       __le16_to_cpu(chan->psm), chan->dcid);
587 
588 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
589 
590 	chan->conn = conn;
591 
592 	switch (chan->chan_type) {
593 	case L2CAP_CHAN_CONN_ORIENTED:
594 		/* Alloc CID for connection-oriented socket */
595 		chan->scid = l2cap_alloc_cid(conn);
596 		if (conn->hcon->type == ACL_LINK)
597 			chan->omtu = L2CAP_DEFAULT_MTU;
598 		break;
599 
600 	case L2CAP_CHAN_CONN_LESS:
601 		/* Connectionless socket */
602 		chan->scid = L2CAP_CID_CONN_LESS;
603 		chan->dcid = L2CAP_CID_CONN_LESS;
604 		chan->omtu = L2CAP_DEFAULT_MTU;
605 		break;
606 
607 	case L2CAP_CHAN_FIXED:
608 		/* Caller will set CID and CID specific MTU values */
609 		break;
610 
611 	default:
612 		/* Raw socket can send/recv signalling messages only */
613 		chan->scid = L2CAP_CID_SIGNALING;
614 		chan->dcid = L2CAP_CID_SIGNALING;
615 		chan->omtu = L2CAP_DEFAULT_MTU;
616 	}
617 
618 	chan->local_id		= L2CAP_BESTEFFORT_ID;
619 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
620 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
621 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
622 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
623 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
624 
625 	l2cap_chan_hold(chan);
626 
627 	/* Only keep a reference for fixed channels if they requested it */
628 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
629 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
630 		hci_conn_hold(conn->hcon);
631 
632 	list_add(&chan->list, &conn->chan_l);
633 }
634 
635 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
636 {
637 	mutex_lock(&conn->chan_lock);
638 	__l2cap_chan_add(conn, chan);
639 	mutex_unlock(&conn->chan_lock);
640 }
641 
642 void l2cap_chan_del(struct l2cap_chan *chan, int err)
643 {
644 	struct l2cap_conn *conn = chan->conn;
645 
646 	__clear_chan_timer(chan);
647 
648 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
649 	       state_to_string(chan->state));
650 
651 	chan->ops->teardown(chan, err);
652 
653 	if (conn) {
654 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
655 		/* Delete from channel list */
656 		list_del(&chan->list);
657 
658 		l2cap_chan_put(chan);
659 
660 		chan->conn = NULL;
661 
662 		/* Reference was only held for non-fixed channels or
663 		 * fixed channels that explicitly requested it using the
664 		 * FLAG_HOLD_HCI_CONN flag.
665 		 */
666 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
667 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
668 			hci_conn_drop(conn->hcon);
669 
670 		if (mgr && mgr->bredr_chan == chan)
671 			mgr->bredr_chan = NULL;
672 	}
673 
674 	if (chan->hs_hchan) {
675 		struct hci_chan *hs_hchan = chan->hs_hchan;
676 
677 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
678 		amp_disconnect_logical_link(hs_hchan);
679 	}
680 
681 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
682 		return;
683 
684 	switch (chan->mode) {
685 	case L2CAP_MODE_BASIC:
686 		break;
687 
688 	case L2CAP_MODE_LE_FLOWCTL:
689 	case L2CAP_MODE_EXT_FLOWCTL:
690 		skb_queue_purge(&chan->tx_q);
691 		break;
692 
693 	case L2CAP_MODE_ERTM:
694 		__clear_retrans_timer(chan);
695 		__clear_monitor_timer(chan);
696 		__clear_ack_timer(chan);
697 
698 		skb_queue_purge(&chan->srej_q);
699 
700 		l2cap_seq_list_free(&chan->srej_list);
701 		l2cap_seq_list_free(&chan->retrans_list);
702 		fallthrough;
703 
704 	case L2CAP_MODE_STREAMING:
705 		skb_queue_purge(&chan->tx_q);
706 		break;
707 	}
708 }
709 EXPORT_SYMBOL_GPL(l2cap_chan_del);
710 
711 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
712 			      void *data)
713 {
714 	struct l2cap_chan *chan;
715 
716 	list_for_each_entry(chan, &conn->chan_l, list) {
717 		func(chan, data);
718 	}
719 }
720 
721 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
722 		     void *data)
723 {
724 	if (!conn)
725 		return;
726 
727 	mutex_lock(&conn->chan_lock);
728 	__l2cap_chan_list(conn, func, data);
729 	mutex_unlock(&conn->chan_lock);
730 }
731 
732 EXPORT_SYMBOL_GPL(l2cap_chan_list);
733 
734 static void l2cap_conn_update_id_addr(struct work_struct *work)
735 {
736 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
737 					       id_addr_update_work);
738 	struct hci_conn *hcon = conn->hcon;
739 	struct l2cap_chan *chan;
740 
741 	mutex_lock(&conn->chan_lock);
742 
743 	list_for_each_entry(chan, &conn->chan_l, list) {
744 		l2cap_chan_lock(chan);
745 		bacpy(&chan->dst, &hcon->dst);
746 		chan->dst_type = bdaddr_dst_type(hcon);
747 		l2cap_chan_unlock(chan);
748 	}
749 
750 	mutex_unlock(&conn->chan_lock);
751 }
752 
753 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
754 {
755 	struct l2cap_conn *conn = chan->conn;
756 	struct l2cap_le_conn_rsp rsp;
757 	u16 result;
758 
759 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
760 		result = L2CAP_CR_LE_AUTHORIZATION;
761 	else
762 		result = L2CAP_CR_LE_BAD_PSM;
763 
764 	l2cap_state_change(chan, BT_DISCONN);
765 
766 	rsp.dcid    = cpu_to_le16(chan->scid);
767 	rsp.mtu     = cpu_to_le16(chan->imtu);
768 	rsp.mps     = cpu_to_le16(chan->mps);
769 	rsp.credits = cpu_to_le16(chan->rx_credits);
770 	rsp.result  = cpu_to_le16(result);
771 
772 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
773 		       &rsp);
774 }
775 
776 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
777 {
778 	struct l2cap_conn *conn = chan->conn;
779 	struct l2cap_ecred_conn_rsp rsp;
780 	u16 result;
781 
782 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
783 		result = L2CAP_CR_LE_AUTHORIZATION;
784 	else
785 		result = L2CAP_CR_LE_BAD_PSM;
786 
787 	l2cap_state_change(chan, BT_DISCONN);
788 
789 	memset(&rsp, 0, sizeof(rsp));
790 
791 	rsp.result  = cpu_to_le16(result);
792 
793 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
794 		       &rsp);
795 }
796 
797 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
798 {
799 	struct l2cap_conn *conn = chan->conn;
800 	struct l2cap_conn_rsp rsp;
801 	u16 result;
802 
803 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
804 		result = L2CAP_CR_SEC_BLOCK;
805 	else
806 		result = L2CAP_CR_BAD_PSM;
807 
808 	l2cap_state_change(chan, BT_DISCONN);
809 
810 	rsp.scid   = cpu_to_le16(chan->dcid);
811 	rsp.dcid   = cpu_to_le16(chan->scid);
812 	rsp.result = cpu_to_le16(result);
813 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
814 
815 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
816 }
817 
818 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
819 {
820 	struct l2cap_conn *conn = chan->conn;
821 
822 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
823 
824 	switch (chan->state) {
825 	case BT_LISTEN:
826 		chan->ops->teardown(chan, 0);
827 		break;
828 
829 	case BT_CONNECTED:
830 	case BT_CONFIG:
831 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
832 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
833 			l2cap_send_disconn_req(chan, reason);
834 		} else
835 			l2cap_chan_del(chan, reason);
836 		break;
837 
838 	case BT_CONNECT2:
839 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
840 			if (conn->hcon->type == ACL_LINK)
841 				l2cap_chan_connect_reject(chan);
842 			else if (conn->hcon->type == LE_LINK) {
843 				switch (chan->mode) {
844 				case L2CAP_MODE_LE_FLOWCTL:
845 					l2cap_chan_le_connect_reject(chan);
846 					break;
847 				case L2CAP_MODE_EXT_FLOWCTL:
848 					l2cap_chan_ecred_connect_reject(chan);
849 					break;
850 				}
851 			}
852 		}
853 
854 		l2cap_chan_del(chan, reason);
855 		break;
856 
857 	case BT_CONNECT:
858 	case BT_DISCONN:
859 		l2cap_chan_del(chan, reason);
860 		break;
861 
862 	default:
863 		chan->ops->teardown(chan, 0);
864 		break;
865 	}
866 }
867 EXPORT_SYMBOL(l2cap_chan_close);
868 
869 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
870 {
871 	switch (chan->chan_type) {
872 	case L2CAP_CHAN_RAW:
873 		switch (chan->sec_level) {
874 		case BT_SECURITY_HIGH:
875 		case BT_SECURITY_FIPS:
876 			return HCI_AT_DEDICATED_BONDING_MITM;
877 		case BT_SECURITY_MEDIUM:
878 			return HCI_AT_DEDICATED_BONDING;
879 		default:
880 			return HCI_AT_NO_BONDING;
881 		}
882 		break;
883 	case L2CAP_CHAN_CONN_LESS:
884 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
885 			if (chan->sec_level == BT_SECURITY_LOW)
886 				chan->sec_level = BT_SECURITY_SDP;
887 		}
888 		if (chan->sec_level == BT_SECURITY_HIGH ||
889 		    chan->sec_level == BT_SECURITY_FIPS)
890 			return HCI_AT_NO_BONDING_MITM;
891 		else
892 			return HCI_AT_NO_BONDING;
893 		break;
894 	case L2CAP_CHAN_CONN_ORIENTED:
895 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
896 			if (chan->sec_level == BT_SECURITY_LOW)
897 				chan->sec_level = BT_SECURITY_SDP;
898 
899 			if (chan->sec_level == BT_SECURITY_HIGH ||
900 			    chan->sec_level == BT_SECURITY_FIPS)
901 				return HCI_AT_NO_BONDING_MITM;
902 			else
903 				return HCI_AT_NO_BONDING;
904 		}
905 		fallthrough;
906 
907 	default:
908 		switch (chan->sec_level) {
909 		case BT_SECURITY_HIGH:
910 		case BT_SECURITY_FIPS:
911 			return HCI_AT_GENERAL_BONDING_MITM;
912 		case BT_SECURITY_MEDIUM:
913 			return HCI_AT_GENERAL_BONDING;
914 		default:
915 			return HCI_AT_NO_BONDING;
916 		}
917 		break;
918 	}
919 }
920 
921 /* Service level security */
922 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
923 {
924 	struct l2cap_conn *conn = chan->conn;
925 	__u8 auth_type;
926 
927 	if (conn->hcon->type == LE_LINK)
928 		return smp_conn_security(conn->hcon, chan->sec_level);
929 
930 	auth_type = l2cap_get_auth_type(chan);
931 
932 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
933 				 initiator);
934 }
935 
936 static u8 l2cap_get_ident(struct l2cap_conn *conn)
937 {
938 	u8 id;
939 
940 	/* Get next available identificator.
941 	 *    1 - 128 are used by kernel.
942 	 *  129 - 199 are reserved.
943 	 *  200 - 254 are used by utilities like l2ping, etc.
944 	 */
945 
946 	mutex_lock(&conn->ident_lock);
947 
948 	if (++conn->tx_ident > 128)
949 		conn->tx_ident = 1;
950 
951 	id = conn->tx_ident;
952 
953 	mutex_unlock(&conn->ident_lock);
954 
955 	return id;
956 }
957 
958 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
959 			   void *data)
960 {
961 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
962 	u8 flags;
963 
964 	BT_DBG("code 0x%2.2x", code);
965 
966 	if (!skb)
967 		return;
968 
969 	/* Use NO_FLUSH if supported or we have an LE link (which does
970 	 * not support auto-flushing packets) */
971 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
972 	    conn->hcon->type == LE_LINK)
973 		flags = ACL_START_NO_FLUSH;
974 	else
975 		flags = ACL_START;
976 
977 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
978 	skb->priority = HCI_PRIO_MAX;
979 
980 	hci_send_acl(conn->hchan, skb, flags);
981 }
982 
983 static bool __chan_is_moving(struct l2cap_chan *chan)
984 {
985 	return chan->move_state != L2CAP_MOVE_STABLE &&
986 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
987 }
988 
989 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
990 {
991 	struct hci_conn *hcon = chan->conn->hcon;
992 	u16 flags;
993 
994 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
995 	       skb->priority);
996 
997 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
998 		if (chan->hs_hchan)
999 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
1000 		else
1001 			kfree_skb(skb);
1002 
1003 		return;
1004 	}
1005 
1006 	/* Use NO_FLUSH for LE links (where this is the only option) or
1007 	 * if the BR/EDR link supports it and flushing has not been
1008 	 * explicitly requested (through FLAG_FLUSHABLE).
1009 	 */
1010 	if (hcon->type == LE_LINK ||
1011 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1012 	     lmp_no_flush_capable(hcon->hdev)))
1013 		flags = ACL_START_NO_FLUSH;
1014 	else
1015 		flags = ACL_START;
1016 
1017 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1018 	hci_send_acl(chan->conn->hchan, skb, flags);
1019 }
1020 
1021 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1022 {
1023 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1024 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1025 
1026 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1027 		/* S-Frame */
1028 		control->sframe = 1;
1029 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1030 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1031 
1032 		control->sar = 0;
1033 		control->txseq = 0;
1034 	} else {
1035 		/* I-Frame */
1036 		control->sframe = 0;
1037 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1038 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1039 
1040 		control->poll = 0;
1041 		control->super = 0;
1042 	}
1043 }
1044 
1045 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1046 {
1047 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1048 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1049 
1050 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1051 		/* S-Frame */
1052 		control->sframe = 1;
1053 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1054 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1055 
1056 		control->sar = 0;
1057 		control->txseq = 0;
1058 	} else {
1059 		/* I-Frame */
1060 		control->sframe = 0;
1061 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1062 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1063 
1064 		control->poll = 0;
1065 		control->super = 0;
1066 	}
1067 }
1068 
1069 static inline void __unpack_control(struct l2cap_chan *chan,
1070 				    struct sk_buff *skb)
1071 {
1072 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1073 		__unpack_extended_control(get_unaligned_le32(skb->data),
1074 					  &bt_cb(skb)->l2cap);
1075 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1076 	} else {
1077 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1078 					  &bt_cb(skb)->l2cap);
1079 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1080 	}
1081 }
1082 
1083 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1084 {
1085 	u32 packed;
1086 
1087 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1088 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1089 
1090 	if (control->sframe) {
1091 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1092 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1093 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1094 	} else {
1095 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1096 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1097 	}
1098 
1099 	return packed;
1100 }
1101 
1102 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1103 {
1104 	u16 packed;
1105 
1106 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1107 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1108 
1109 	if (control->sframe) {
1110 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1111 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1112 		packed |= L2CAP_CTRL_FRAME_TYPE;
1113 	} else {
1114 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1115 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1116 	}
1117 
1118 	return packed;
1119 }
1120 
1121 static inline void __pack_control(struct l2cap_chan *chan,
1122 				  struct l2cap_ctrl *control,
1123 				  struct sk_buff *skb)
1124 {
1125 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1126 		put_unaligned_le32(__pack_extended_control(control),
1127 				   skb->data + L2CAP_HDR_SIZE);
1128 	} else {
1129 		put_unaligned_le16(__pack_enhanced_control(control),
1130 				   skb->data + L2CAP_HDR_SIZE);
1131 	}
1132 }
1133 
1134 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1135 {
1136 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1137 		return L2CAP_EXT_HDR_SIZE;
1138 	else
1139 		return L2CAP_ENH_HDR_SIZE;
1140 }
1141 
1142 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1143 					       u32 control)
1144 {
1145 	struct sk_buff *skb;
1146 	struct l2cap_hdr *lh;
1147 	int hlen = __ertm_hdr_size(chan);
1148 
1149 	if (chan->fcs == L2CAP_FCS_CRC16)
1150 		hlen += L2CAP_FCS_SIZE;
1151 
1152 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1153 
1154 	if (!skb)
1155 		return ERR_PTR(-ENOMEM);
1156 
1157 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1158 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1159 	lh->cid = cpu_to_le16(chan->dcid);
1160 
1161 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1162 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1163 	else
1164 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1165 
1166 	if (chan->fcs == L2CAP_FCS_CRC16) {
1167 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1168 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1169 	}
1170 
1171 	skb->priority = HCI_PRIO_MAX;
1172 	return skb;
1173 }
1174 
1175 static void l2cap_send_sframe(struct l2cap_chan *chan,
1176 			      struct l2cap_ctrl *control)
1177 {
1178 	struct sk_buff *skb;
1179 	u32 control_field;
1180 
1181 	BT_DBG("chan %p, control %p", chan, control);
1182 
1183 	if (!control->sframe)
1184 		return;
1185 
1186 	if (__chan_is_moving(chan))
1187 		return;
1188 
1189 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1190 	    !control->poll)
1191 		control->final = 1;
1192 
1193 	if (control->super == L2CAP_SUPER_RR)
1194 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1195 	else if (control->super == L2CAP_SUPER_RNR)
1196 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1197 
1198 	if (control->super != L2CAP_SUPER_SREJ) {
1199 		chan->last_acked_seq = control->reqseq;
1200 		__clear_ack_timer(chan);
1201 	}
1202 
1203 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1204 	       control->final, control->poll, control->super);
1205 
1206 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1207 		control_field = __pack_extended_control(control);
1208 	else
1209 		control_field = __pack_enhanced_control(control);
1210 
1211 	skb = l2cap_create_sframe_pdu(chan, control_field);
1212 	if (!IS_ERR(skb))
1213 		l2cap_do_send(chan, skb);
1214 }
1215 
1216 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1217 {
1218 	struct l2cap_ctrl control;
1219 
1220 	BT_DBG("chan %p, poll %d", chan, poll);
1221 
1222 	memset(&control, 0, sizeof(control));
1223 	control.sframe = 1;
1224 	control.poll = poll;
1225 
1226 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1227 		control.super = L2CAP_SUPER_RNR;
1228 	else
1229 		control.super = L2CAP_SUPER_RR;
1230 
1231 	control.reqseq = chan->buffer_seq;
1232 	l2cap_send_sframe(chan, &control);
1233 }
1234 
1235 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1236 {
1237 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1238 		return true;
1239 
1240 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1241 }
1242 
1243 static bool __amp_capable(struct l2cap_chan *chan)
1244 {
1245 	struct l2cap_conn *conn = chan->conn;
1246 	struct hci_dev *hdev;
1247 	bool amp_available = false;
1248 
1249 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1250 		return false;
1251 
1252 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1253 		return false;
1254 
1255 	read_lock(&hci_dev_list_lock);
1256 	list_for_each_entry(hdev, &hci_dev_list, list) {
1257 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1258 		    test_bit(HCI_UP, &hdev->flags)) {
1259 			amp_available = true;
1260 			break;
1261 		}
1262 	}
1263 	read_unlock(&hci_dev_list_lock);
1264 
1265 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1266 		return amp_available;
1267 
1268 	return false;
1269 }
1270 
1271 static bool l2cap_check_efs(struct l2cap_chan *chan)
1272 {
1273 	/* Check EFS parameters */
1274 	return true;
1275 }
1276 
1277 void l2cap_send_conn_req(struct l2cap_chan *chan)
1278 {
1279 	struct l2cap_conn *conn = chan->conn;
1280 	struct l2cap_conn_req req;
1281 
1282 	req.scid = cpu_to_le16(chan->scid);
1283 	req.psm  = chan->psm;
1284 
1285 	chan->ident = l2cap_get_ident(conn);
1286 
1287 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1288 
1289 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1290 }
1291 
1292 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1293 {
1294 	struct l2cap_create_chan_req req;
1295 	req.scid = cpu_to_le16(chan->scid);
1296 	req.psm  = chan->psm;
1297 	req.amp_id = amp_id;
1298 
1299 	chan->ident = l2cap_get_ident(chan->conn);
1300 
1301 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1302 		       sizeof(req), &req);
1303 }
1304 
1305 static void l2cap_move_setup(struct l2cap_chan *chan)
1306 {
1307 	struct sk_buff *skb;
1308 
1309 	BT_DBG("chan %p", chan);
1310 
1311 	if (chan->mode != L2CAP_MODE_ERTM)
1312 		return;
1313 
1314 	__clear_retrans_timer(chan);
1315 	__clear_monitor_timer(chan);
1316 	__clear_ack_timer(chan);
1317 
1318 	chan->retry_count = 0;
1319 	skb_queue_walk(&chan->tx_q, skb) {
1320 		if (bt_cb(skb)->l2cap.retries)
1321 			bt_cb(skb)->l2cap.retries = 1;
1322 		else
1323 			break;
1324 	}
1325 
1326 	chan->expected_tx_seq = chan->buffer_seq;
1327 
1328 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1329 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1330 	l2cap_seq_list_clear(&chan->retrans_list);
1331 	l2cap_seq_list_clear(&chan->srej_list);
1332 	skb_queue_purge(&chan->srej_q);
1333 
1334 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1335 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1336 
1337 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1338 }
1339 
1340 static void l2cap_move_done(struct l2cap_chan *chan)
1341 {
1342 	u8 move_role = chan->move_role;
1343 	BT_DBG("chan %p", chan);
1344 
1345 	chan->move_state = L2CAP_MOVE_STABLE;
1346 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1347 
1348 	if (chan->mode != L2CAP_MODE_ERTM)
1349 		return;
1350 
1351 	switch (move_role) {
1352 	case L2CAP_MOVE_ROLE_INITIATOR:
1353 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1354 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1355 		break;
1356 	case L2CAP_MOVE_ROLE_RESPONDER:
1357 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1358 		break;
1359 	}
1360 }
1361 
1362 static void l2cap_chan_ready(struct l2cap_chan *chan)
1363 {
1364 	/* The channel may have already been flagged as connected in
1365 	 * case of receiving data before the L2CAP info req/rsp
1366 	 * procedure is complete.
1367 	 */
1368 	if (chan->state == BT_CONNECTED)
1369 		return;
1370 
1371 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1372 	chan->conf_state = 0;
1373 	__clear_chan_timer(chan);
1374 
1375 	switch (chan->mode) {
1376 	case L2CAP_MODE_LE_FLOWCTL:
1377 	case L2CAP_MODE_EXT_FLOWCTL:
1378 		if (!chan->tx_credits)
1379 			chan->ops->suspend(chan);
1380 		break;
1381 	}
1382 
1383 	chan->state = BT_CONNECTED;
1384 
1385 	chan->ops->ready(chan);
1386 }
1387 
1388 static void l2cap_le_connect(struct l2cap_chan *chan)
1389 {
1390 	struct l2cap_conn *conn = chan->conn;
1391 	struct l2cap_le_conn_req req;
1392 
1393 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1394 		return;
1395 
1396 	if (!chan->imtu)
1397 		chan->imtu = chan->conn->mtu;
1398 
1399 	l2cap_le_flowctl_init(chan, 0);
1400 
1401 	req.psm     = chan->psm;
1402 	req.scid    = cpu_to_le16(chan->scid);
1403 	req.mtu     = cpu_to_le16(chan->imtu);
1404 	req.mps     = cpu_to_le16(chan->mps);
1405 	req.credits = cpu_to_le16(chan->rx_credits);
1406 
1407 	chan->ident = l2cap_get_ident(conn);
1408 
1409 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1410 		       sizeof(req), &req);
1411 }
1412 
1413 struct l2cap_ecred_conn_data {
1414 	struct {
1415 		struct l2cap_ecred_conn_req req;
1416 		__le16 scid[5];
1417 	} __packed pdu;
1418 	struct l2cap_chan *chan;
1419 	struct pid *pid;
1420 	int count;
1421 };
1422 
1423 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1424 {
1425 	struct l2cap_ecred_conn_data *conn = data;
1426 	struct pid *pid;
1427 
1428 	if (chan == conn->chan)
1429 		return;
1430 
1431 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1432 		return;
1433 
1434 	pid = chan->ops->get_peer_pid(chan);
1435 
1436 	/* Only add deferred channels with the same PID/PSM */
1437 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1438 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1439 		return;
1440 
1441 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1442 		return;
1443 
1444 	l2cap_ecred_init(chan, 0);
1445 
1446 	/* Set the same ident so we can match on the rsp */
1447 	chan->ident = conn->chan->ident;
1448 
1449 	/* Include all channels deferred */
1450 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1451 
1452 	conn->count++;
1453 }
1454 
1455 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1456 {
1457 	struct l2cap_conn *conn = chan->conn;
1458 	struct l2cap_ecred_conn_data data;
1459 
1460 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1461 		return;
1462 
1463 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1464 		return;
1465 
1466 	l2cap_ecred_init(chan, 0);
1467 
1468 	memset(&data, 0, sizeof(data));
1469 	data.pdu.req.psm     = chan->psm;
1470 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1471 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1472 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1473 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1474 
1475 	chan->ident = l2cap_get_ident(conn);
1476 	data.pid = chan->ops->get_peer_pid(chan);
1477 
1478 	data.count = 1;
1479 	data.chan = chan;
1480 	data.pid = chan->ops->get_peer_pid(chan);
1481 
1482 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1483 
1484 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1485 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1486 		       &data.pdu);
1487 }
1488 
1489 static void l2cap_le_start(struct l2cap_chan *chan)
1490 {
1491 	struct l2cap_conn *conn = chan->conn;
1492 
1493 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1494 		return;
1495 
1496 	if (!chan->psm) {
1497 		l2cap_chan_ready(chan);
1498 		return;
1499 	}
1500 
1501 	if (chan->state == BT_CONNECT) {
1502 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1503 			l2cap_ecred_connect(chan);
1504 		else
1505 			l2cap_le_connect(chan);
1506 	}
1507 }
1508 
1509 static void l2cap_start_connection(struct l2cap_chan *chan)
1510 {
1511 	if (__amp_capable(chan)) {
1512 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1513 		a2mp_discover_amp(chan);
1514 	} else if (chan->conn->hcon->type == LE_LINK) {
1515 		l2cap_le_start(chan);
1516 	} else {
1517 		l2cap_send_conn_req(chan);
1518 	}
1519 }
1520 
1521 static void l2cap_request_info(struct l2cap_conn *conn)
1522 {
1523 	struct l2cap_info_req req;
1524 
1525 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1526 		return;
1527 
1528 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1529 
1530 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1531 	conn->info_ident = l2cap_get_ident(conn);
1532 
1533 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1534 
1535 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1536 		       sizeof(req), &req);
1537 }
1538 
1539 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1540 {
1541 	/* The minimum encryption key size needs to be enforced by the
1542 	 * host stack before establishing any L2CAP connections. The
1543 	 * specification in theory allows a minimum of 1, but to align
1544 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1545 	 *
1546 	 * This check might also be called for unencrypted connections
1547 	 * that have no key size requirements. Ensure that the link is
1548 	 * actually encrypted before enforcing a key size.
1549 	 */
1550 	int min_key_size = hcon->hdev->min_enc_key_size;
1551 
1552 	/* On FIPS security level, key size must be 16 bytes */
1553 	if (hcon->sec_level == BT_SECURITY_FIPS)
1554 		min_key_size = 16;
1555 
1556 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1557 		hcon->enc_key_size >= min_key_size);
1558 }
1559 
1560 static void l2cap_do_start(struct l2cap_chan *chan)
1561 {
1562 	struct l2cap_conn *conn = chan->conn;
1563 
1564 	if (conn->hcon->type == LE_LINK) {
1565 		l2cap_le_start(chan);
1566 		return;
1567 	}
1568 
1569 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1570 		l2cap_request_info(conn);
1571 		return;
1572 	}
1573 
1574 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1575 		return;
1576 
1577 	if (!l2cap_chan_check_security(chan, true) ||
1578 	    !__l2cap_no_conn_pending(chan))
1579 		return;
1580 
1581 	if (l2cap_check_enc_key_size(conn->hcon))
1582 		l2cap_start_connection(chan);
1583 	else
1584 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1585 }
1586 
1587 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1588 {
1589 	u32 local_feat_mask = l2cap_feat_mask;
1590 	if (!disable_ertm)
1591 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1592 
1593 	switch (mode) {
1594 	case L2CAP_MODE_ERTM:
1595 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1596 	case L2CAP_MODE_STREAMING:
1597 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1598 	default:
1599 		return 0x00;
1600 	}
1601 }
1602 
1603 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1604 {
1605 	struct l2cap_conn *conn = chan->conn;
1606 	struct l2cap_disconn_req req;
1607 
1608 	if (!conn)
1609 		return;
1610 
1611 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1612 		__clear_retrans_timer(chan);
1613 		__clear_monitor_timer(chan);
1614 		__clear_ack_timer(chan);
1615 	}
1616 
1617 	if (chan->scid == L2CAP_CID_A2MP) {
1618 		l2cap_state_change(chan, BT_DISCONN);
1619 		return;
1620 	}
1621 
1622 	req.dcid = cpu_to_le16(chan->dcid);
1623 	req.scid = cpu_to_le16(chan->scid);
1624 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1625 		       sizeof(req), &req);
1626 
1627 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1628 }
1629 
1630 /* ---- L2CAP connections ---- */
1631 static void l2cap_conn_start(struct l2cap_conn *conn)
1632 {
1633 	struct l2cap_chan *chan, *tmp;
1634 
1635 	BT_DBG("conn %p", conn);
1636 
1637 	mutex_lock(&conn->chan_lock);
1638 
1639 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1640 		l2cap_chan_lock(chan);
1641 
1642 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1643 			l2cap_chan_ready(chan);
1644 			l2cap_chan_unlock(chan);
1645 			continue;
1646 		}
1647 
1648 		if (chan->state == BT_CONNECT) {
1649 			if (!l2cap_chan_check_security(chan, true) ||
1650 			    !__l2cap_no_conn_pending(chan)) {
1651 				l2cap_chan_unlock(chan);
1652 				continue;
1653 			}
1654 
1655 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1656 			    && test_bit(CONF_STATE2_DEVICE,
1657 					&chan->conf_state)) {
1658 				l2cap_chan_close(chan, ECONNRESET);
1659 				l2cap_chan_unlock(chan);
1660 				continue;
1661 			}
1662 
1663 			if (l2cap_check_enc_key_size(conn->hcon))
1664 				l2cap_start_connection(chan);
1665 			else
1666 				l2cap_chan_close(chan, ECONNREFUSED);
1667 
1668 		} else if (chan->state == BT_CONNECT2) {
1669 			struct l2cap_conn_rsp rsp;
1670 			char buf[128];
1671 			rsp.scid = cpu_to_le16(chan->dcid);
1672 			rsp.dcid = cpu_to_le16(chan->scid);
1673 
1674 			if (l2cap_chan_check_security(chan, false)) {
1675 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1676 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1677 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1678 					chan->ops->defer(chan);
1679 
1680 				} else {
1681 					l2cap_state_change(chan, BT_CONFIG);
1682 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1683 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1684 				}
1685 			} else {
1686 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1687 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1688 			}
1689 
1690 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1691 				       sizeof(rsp), &rsp);
1692 
1693 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1694 			    rsp.result != L2CAP_CR_SUCCESS) {
1695 				l2cap_chan_unlock(chan);
1696 				continue;
1697 			}
1698 
1699 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1700 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1701 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1702 			chan->num_conf_req++;
1703 		}
1704 
1705 		l2cap_chan_unlock(chan);
1706 	}
1707 
1708 	mutex_unlock(&conn->chan_lock);
1709 }
1710 
1711 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1712 {
1713 	struct hci_conn *hcon = conn->hcon;
1714 	struct hci_dev *hdev = hcon->hdev;
1715 
1716 	BT_DBG("%s conn %p", hdev->name, conn);
1717 
1718 	/* For outgoing pairing which doesn't necessarily have an
1719 	 * associated socket (e.g. mgmt_pair_device).
1720 	 */
1721 	if (hcon->out)
1722 		smp_conn_security(hcon, hcon->pending_sec_level);
1723 
1724 	/* For LE peripheral connections, make sure the connection interval
1725 	 * is in the range of the minimum and maximum interval that has
1726 	 * been configured for this connection. If not, then trigger
1727 	 * the connection update procedure.
1728 	 */
1729 	if (hcon->role == HCI_ROLE_SLAVE &&
1730 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1731 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1732 		struct l2cap_conn_param_update_req req;
1733 
1734 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1735 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1736 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1737 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1738 
1739 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1740 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1741 	}
1742 }
1743 
1744 static void l2cap_conn_ready(struct l2cap_conn *conn)
1745 {
1746 	struct l2cap_chan *chan;
1747 	struct hci_conn *hcon = conn->hcon;
1748 
1749 	BT_DBG("conn %p", conn);
1750 
1751 	if (hcon->type == ACL_LINK)
1752 		l2cap_request_info(conn);
1753 
1754 	mutex_lock(&conn->chan_lock);
1755 
1756 	list_for_each_entry(chan, &conn->chan_l, list) {
1757 
1758 		l2cap_chan_lock(chan);
1759 
1760 		if (chan->scid == L2CAP_CID_A2MP) {
1761 			l2cap_chan_unlock(chan);
1762 			continue;
1763 		}
1764 
1765 		if (hcon->type == LE_LINK) {
1766 			l2cap_le_start(chan);
1767 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1768 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1769 				l2cap_chan_ready(chan);
1770 		} else if (chan->state == BT_CONNECT) {
1771 			l2cap_do_start(chan);
1772 		}
1773 
1774 		l2cap_chan_unlock(chan);
1775 	}
1776 
1777 	mutex_unlock(&conn->chan_lock);
1778 
1779 	if (hcon->type == LE_LINK)
1780 		l2cap_le_conn_ready(conn);
1781 
1782 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1783 }
1784 
1785 /* Notify sockets that we cannot guaranty reliability anymore */
1786 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1787 {
1788 	struct l2cap_chan *chan;
1789 
1790 	BT_DBG("conn %p", conn);
1791 
1792 	mutex_lock(&conn->chan_lock);
1793 
1794 	list_for_each_entry(chan, &conn->chan_l, list) {
1795 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1796 			l2cap_chan_set_err(chan, err);
1797 	}
1798 
1799 	mutex_unlock(&conn->chan_lock);
1800 }
1801 
1802 static void l2cap_info_timeout(struct work_struct *work)
1803 {
1804 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1805 					       info_timer.work);
1806 
1807 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1808 	conn->info_ident = 0;
1809 
1810 	l2cap_conn_start(conn);
1811 }
1812 
1813 /*
1814  * l2cap_user
1815  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1816  * callback is called during registration. The ->remove callback is called
1817  * during unregistration.
1818  * An l2cap_user object can either be explicitly unregistered or when the
1819  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1820  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1821  * External modules must own a reference to the l2cap_conn object if they intend
1822  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1823  * any time if they don't.
1824  */
1825 
1826 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1827 {
1828 	struct hci_dev *hdev = conn->hcon->hdev;
1829 	int ret;
1830 
1831 	/* We need to check whether l2cap_conn is registered. If it is not, we
1832 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1833 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1834 	 * relies on the parent hci_conn object to be locked. This itself relies
1835 	 * on the hci_dev object to be locked. So we must lock the hci device
1836 	 * here, too. */
1837 
1838 	hci_dev_lock(hdev);
1839 
1840 	if (!list_empty(&user->list)) {
1841 		ret = -EINVAL;
1842 		goto out_unlock;
1843 	}
1844 
1845 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1846 	if (!conn->hchan) {
1847 		ret = -ENODEV;
1848 		goto out_unlock;
1849 	}
1850 
1851 	ret = user->probe(conn, user);
1852 	if (ret)
1853 		goto out_unlock;
1854 
1855 	list_add(&user->list, &conn->users);
1856 	ret = 0;
1857 
1858 out_unlock:
1859 	hci_dev_unlock(hdev);
1860 	return ret;
1861 }
1862 EXPORT_SYMBOL(l2cap_register_user);
1863 
1864 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1865 {
1866 	struct hci_dev *hdev = conn->hcon->hdev;
1867 
1868 	hci_dev_lock(hdev);
1869 
1870 	if (list_empty(&user->list))
1871 		goto out_unlock;
1872 
1873 	list_del_init(&user->list);
1874 	user->remove(conn, user);
1875 
1876 out_unlock:
1877 	hci_dev_unlock(hdev);
1878 }
1879 EXPORT_SYMBOL(l2cap_unregister_user);
1880 
1881 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1882 {
1883 	struct l2cap_user *user;
1884 
1885 	while (!list_empty(&conn->users)) {
1886 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1887 		list_del_init(&user->list);
1888 		user->remove(conn, user);
1889 	}
1890 }
1891 
1892 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1893 {
1894 	struct l2cap_conn *conn = hcon->l2cap_data;
1895 	struct l2cap_chan *chan, *l;
1896 
1897 	if (!conn)
1898 		return;
1899 
1900 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1901 
1902 	kfree_skb(conn->rx_skb);
1903 
1904 	skb_queue_purge(&conn->pending_rx);
1905 
1906 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1907 	 * might block if we are running on a worker from the same workqueue
1908 	 * pending_rx_work is waiting on.
1909 	 */
1910 	if (work_pending(&conn->pending_rx_work))
1911 		cancel_work_sync(&conn->pending_rx_work);
1912 
1913 	if (work_pending(&conn->id_addr_update_work))
1914 		cancel_work_sync(&conn->id_addr_update_work);
1915 
1916 	l2cap_unregister_all_users(conn);
1917 
1918 	/* Force the connection to be immediately dropped */
1919 	hcon->disc_timeout = 0;
1920 
1921 	mutex_lock(&conn->chan_lock);
1922 
1923 	/* Kill channels */
1924 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1925 		l2cap_chan_hold(chan);
1926 		l2cap_chan_lock(chan);
1927 
1928 		l2cap_chan_del(chan, err);
1929 
1930 		chan->ops->close(chan);
1931 
1932 		l2cap_chan_unlock(chan);
1933 		l2cap_chan_put(chan);
1934 	}
1935 
1936 	mutex_unlock(&conn->chan_lock);
1937 
1938 	hci_chan_del(conn->hchan);
1939 
1940 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1941 		cancel_delayed_work_sync(&conn->info_timer);
1942 
1943 	hcon->l2cap_data = NULL;
1944 	conn->hchan = NULL;
1945 	l2cap_conn_put(conn);
1946 }
1947 
1948 static void l2cap_conn_free(struct kref *ref)
1949 {
1950 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1951 
1952 	hci_conn_put(conn->hcon);
1953 	kfree(conn);
1954 }
1955 
1956 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1957 {
1958 	kref_get(&conn->ref);
1959 	return conn;
1960 }
1961 EXPORT_SYMBOL(l2cap_conn_get);
1962 
1963 void l2cap_conn_put(struct l2cap_conn *conn)
1964 {
1965 	kref_put(&conn->ref, l2cap_conn_free);
1966 }
1967 EXPORT_SYMBOL(l2cap_conn_put);
1968 
1969 /* ---- Socket interface ---- */
1970 
1971 /* Find socket with psm and source / destination bdaddr.
1972  * Returns closest match.
1973  */
1974 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1975 						   bdaddr_t *src,
1976 						   bdaddr_t *dst,
1977 						   u8 link_type)
1978 {
1979 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1980 
1981 	read_lock(&chan_list_lock);
1982 
1983 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1984 		if (state && c->state != state)
1985 			continue;
1986 
1987 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1988 			continue;
1989 
1990 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1991 			continue;
1992 
1993 		if (c->psm == psm) {
1994 			int src_match, dst_match;
1995 			int src_any, dst_any;
1996 
1997 			/* Exact match. */
1998 			src_match = !bacmp(&c->src, src);
1999 			dst_match = !bacmp(&c->dst, dst);
2000 			if (src_match && dst_match) {
2001 				if (!l2cap_chan_hold_unless_zero(c))
2002 					continue;
2003 
2004 				read_unlock(&chan_list_lock);
2005 				return c;
2006 			}
2007 
2008 			/* Closest match */
2009 			src_any = !bacmp(&c->src, BDADDR_ANY);
2010 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
2011 			if ((src_match && dst_any) || (src_any && dst_match) ||
2012 			    (src_any && dst_any))
2013 				c1 = c;
2014 		}
2015 	}
2016 
2017 	if (c1)
2018 		c1 = l2cap_chan_hold_unless_zero(c1);
2019 
2020 	read_unlock(&chan_list_lock);
2021 
2022 	return c1;
2023 }
2024 
2025 static void l2cap_monitor_timeout(struct work_struct *work)
2026 {
2027 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2028 					       monitor_timer.work);
2029 
2030 	BT_DBG("chan %p", chan);
2031 
2032 	l2cap_chan_lock(chan);
2033 
2034 	if (!chan->conn) {
2035 		l2cap_chan_unlock(chan);
2036 		l2cap_chan_put(chan);
2037 		return;
2038 	}
2039 
2040 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2041 
2042 	l2cap_chan_unlock(chan);
2043 	l2cap_chan_put(chan);
2044 }
2045 
2046 static void l2cap_retrans_timeout(struct work_struct *work)
2047 {
2048 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2049 					       retrans_timer.work);
2050 
2051 	BT_DBG("chan %p", chan);
2052 
2053 	l2cap_chan_lock(chan);
2054 
2055 	if (!chan->conn) {
2056 		l2cap_chan_unlock(chan);
2057 		l2cap_chan_put(chan);
2058 		return;
2059 	}
2060 
2061 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2062 	l2cap_chan_unlock(chan);
2063 	l2cap_chan_put(chan);
2064 }
2065 
2066 static void l2cap_streaming_send(struct l2cap_chan *chan,
2067 				 struct sk_buff_head *skbs)
2068 {
2069 	struct sk_buff *skb;
2070 	struct l2cap_ctrl *control;
2071 
2072 	BT_DBG("chan %p, skbs %p", chan, skbs);
2073 
2074 	if (__chan_is_moving(chan))
2075 		return;
2076 
2077 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2078 
2079 	while (!skb_queue_empty(&chan->tx_q)) {
2080 
2081 		skb = skb_dequeue(&chan->tx_q);
2082 
2083 		bt_cb(skb)->l2cap.retries = 1;
2084 		control = &bt_cb(skb)->l2cap;
2085 
2086 		control->reqseq = 0;
2087 		control->txseq = chan->next_tx_seq;
2088 
2089 		__pack_control(chan, control, skb);
2090 
2091 		if (chan->fcs == L2CAP_FCS_CRC16) {
2092 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2093 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2094 		}
2095 
2096 		l2cap_do_send(chan, skb);
2097 
2098 		BT_DBG("Sent txseq %u", control->txseq);
2099 
2100 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2101 		chan->frames_sent++;
2102 	}
2103 }
2104 
2105 static int l2cap_ertm_send(struct l2cap_chan *chan)
2106 {
2107 	struct sk_buff *skb, *tx_skb;
2108 	struct l2cap_ctrl *control;
2109 	int sent = 0;
2110 
2111 	BT_DBG("chan %p", chan);
2112 
2113 	if (chan->state != BT_CONNECTED)
2114 		return -ENOTCONN;
2115 
2116 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2117 		return 0;
2118 
2119 	if (__chan_is_moving(chan))
2120 		return 0;
2121 
2122 	while (chan->tx_send_head &&
2123 	       chan->unacked_frames < chan->remote_tx_win &&
2124 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2125 
2126 		skb = chan->tx_send_head;
2127 
2128 		bt_cb(skb)->l2cap.retries = 1;
2129 		control = &bt_cb(skb)->l2cap;
2130 
2131 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2132 			control->final = 1;
2133 
2134 		control->reqseq = chan->buffer_seq;
2135 		chan->last_acked_seq = chan->buffer_seq;
2136 		control->txseq = chan->next_tx_seq;
2137 
2138 		__pack_control(chan, control, skb);
2139 
2140 		if (chan->fcs == L2CAP_FCS_CRC16) {
2141 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2142 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2143 		}
2144 
2145 		/* Clone after data has been modified. Data is assumed to be
2146 		   read-only (for locking purposes) on cloned sk_buffs.
2147 		 */
2148 		tx_skb = skb_clone(skb, GFP_KERNEL);
2149 
2150 		if (!tx_skb)
2151 			break;
2152 
2153 		__set_retrans_timer(chan);
2154 
2155 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2156 		chan->unacked_frames++;
2157 		chan->frames_sent++;
2158 		sent++;
2159 
2160 		if (skb_queue_is_last(&chan->tx_q, skb))
2161 			chan->tx_send_head = NULL;
2162 		else
2163 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2164 
2165 		l2cap_do_send(chan, tx_skb);
2166 		BT_DBG("Sent txseq %u", control->txseq);
2167 	}
2168 
2169 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2170 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2171 
2172 	return sent;
2173 }
2174 
2175 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2176 {
2177 	struct l2cap_ctrl control;
2178 	struct sk_buff *skb;
2179 	struct sk_buff *tx_skb;
2180 	u16 seq;
2181 
2182 	BT_DBG("chan %p", chan);
2183 
2184 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2185 		return;
2186 
2187 	if (__chan_is_moving(chan))
2188 		return;
2189 
2190 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2191 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2192 
2193 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2194 		if (!skb) {
2195 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2196 			       seq);
2197 			continue;
2198 		}
2199 
2200 		bt_cb(skb)->l2cap.retries++;
2201 		control = bt_cb(skb)->l2cap;
2202 
2203 		if (chan->max_tx != 0 &&
2204 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2205 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2206 			l2cap_send_disconn_req(chan, ECONNRESET);
2207 			l2cap_seq_list_clear(&chan->retrans_list);
2208 			break;
2209 		}
2210 
2211 		control.reqseq = chan->buffer_seq;
2212 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2213 			control.final = 1;
2214 		else
2215 			control.final = 0;
2216 
2217 		if (skb_cloned(skb)) {
2218 			/* Cloned sk_buffs are read-only, so we need a
2219 			 * writeable copy
2220 			 */
2221 			tx_skb = skb_copy(skb, GFP_KERNEL);
2222 		} else {
2223 			tx_skb = skb_clone(skb, GFP_KERNEL);
2224 		}
2225 
2226 		if (!tx_skb) {
2227 			l2cap_seq_list_clear(&chan->retrans_list);
2228 			break;
2229 		}
2230 
2231 		/* Update skb contents */
2232 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2233 			put_unaligned_le32(__pack_extended_control(&control),
2234 					   tx_skb->data + L2CAP_HDR_SIZE);
2235 		} else {
2236 			put_unaligned_le16(__pack_enhanced_control(&control),
2237 					   tx_skb->data + L2CAP_HDR_SIZE);
2238 		}
2239 
2240 		/* Update FCS */
2241 		if (chan->fcs == L2CAP_FCS_CRC16) {
2242 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2243 					tx_skb->len - L2CAP_FCS_SIZE);
2244 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2245 						L2CAP_FCS_SIZE);
2246 		}
2247 
2248 		l2cap_do_send(chan, tx_skb);
2249 
2250 		BT_DBG("Resent txseq %d", control.txseq);
2251 
2252 		chan->last_acked_seq = chan->buffer_seq;
2253 	}
2254 }
2255 
2256 static void l2cap_retransmit(struct l2cap_chan *chan,
2257 			     struct l2cap_ctrl *control)
2258 {
2259 	BT_DBG("chan %p, control %p", chan, control);
2260 
2261 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2262 	l2cap_ertm_resend(chan);
2263 }
2264 
2265 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2266 				 struct l2cap_ctrl *control)
2267 {
2268 	struct sk_buff *skb;
2269 
2270 	BT_DBG("chan %p, control %p", chan, control);
2271 
2272 	if (control->poll)
2273 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2274 
2275 	l2cap_seq_list_clear(&chan->retrans_list);
2276 
2277 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2278 		return;
2279 
2280 	if (chan->unacked_frames) {
2281 		skb_queue_walk(&chan->tx_q, skb) {
2282 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2283 			    skb == chan->tx_send_head)
2284 				break;
2285 		}
2286 
2287 		skb_queue_walk_from(&chan->tx_q, skb) {
2288 			if (skb == chan->tx_send_head)
2289 				break;
2290 
2291 			l2cap_seq_list_append(&chan->retrans_list,
2292 					      bt_cb(skb)->l2cap.txseq);
2293 		}
2294 
2295 		l2cap_ertm_resend(chan);
2296 	}
2297 }
2298 
2299 static void l2cap_send_ack(struct l2cap_chan *chan)
2300 {
2301 	struct l2cap_ctrl control;
2302 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2303 					 chan->last_acked_seq);
2304 	int threshold;
2305 
2306 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2307 	       chan, chan->last_acked_seq, chan->buffer_seq);
2308 
2309 	memset(&control, 0, sizeof(control));
2310 	control.sframe = 1;
2311 
2312 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2313 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2314 		__clear_ack_timer(chan);
2315 		control.super = L2CAP_SUPER_RNR;
2316 		control.reqseq = chan->buffer_seq;
2317 		l2cap_send_sframe(chan, &control);
2318 	} else {
2319 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2320 			l2cap_ertm_send(chan);
2321 			/* If any i-frames were sent, they included an ack */
2322 			if (chan->buffer_seq == chan->last_acked_seq)
2323 				frames_to_ack = 0;
2324 		}
2325 
2326 		/* Ack now if the window is 3/4ths full.
2327 		 * Calculate without mul or div
2328 		 */
2329 		threshold = chan->ack_win;
2330 		threshold += threshold << 1;
2331 		threshold >>= 2;
2332 
2333 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2334 		       threshold);
2335 
2336 		if (frames_to_ack >= threshold) {
2337 			__clear_ack_timer(chan);
2338 			control.super = L2CAP_SUPER_RR;
2339 			control.reqseq = chan->buffer_seq;
2340 			l2cap_send_sframe(chan, &control);
2341 			frames_to_ack = 0;
2342 		}
2343 
2344 		if (frames_to_ack)
2345 			__set_ack_timer(chan);
2346 	}
2347 }
2348 
2349 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2350 					 struct msghdr *msg, int len,
2351 					 int count, struct sk_buff *skb)
2352 {
2353 	struct l2cap_conn *conn = chan->conn;
2354 	struct sk_buff **frag;
2355 	int sent = 0;
2356 
2357 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2358 		return -EFAULT;
2359 
2360 	sent += count;
2361 	len  -= count;
2362 
2363 	/* Continuation fragments (no L2CAP header) */
2364 	frag = &skb_shinfo(skb)->frag_list;
2365 	while (len) {
2366 		struct sk_buff *tmp;
2367 
2368 		count = min_t(unsigned int, conn->mtu, len);
2369 
2370 		tmp = chan->ops->alloc_skb(chan, 0, count,
2371 					   msg->msg_flags & MSG_DONTWAIT);
2372 		if (IS_ERR(tmp))
2373 			return PTR_ERR(tmp);
2374 
2375 		*frag = tmp;
2376 
2377 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2378 				   &msg->msg_iter))
2379 			return -EFAULT;
2380 
2381 		sent += count;
2382 		len  -= count;
2383 
2384 		skb->len += (*frag)->len;
2385 		skb->data_len += (*frag)->len;
2386 
2387 		frag = &(*frag)->next;
2388 	}
2389 
2390 	return sent;
2391 }
2392 
2393 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2394 						 struct msghdr *msg, size_t len)
2395 {
2396 	struct l2cap_conn *conn = chan->conn;
2397 	struct sk_buff *skb;
2398 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2399 	struct l2cap_hdr *lh;
2400 
2401 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2402 	       __le16_to_cpu(chan->psm), len);
2403 
2404 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2405 
2406 	skb = chan->ops->alloc_skb(chan, hlen, count,
2407 				   msg->msg_flags & MSG_DONTWAIT);
2408 	if (IS_ERR(skb))
2409 		return skb;
2410 
2411 	/* Create L2CAP header */
2412 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2413 	lh->cid = cpu_to_le16(chan->dcid);
2414 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2415 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2416 
2417 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2418 	if (unlikely(err < 0)) {
2419 		kfree_skb(skb);
2420 		return ERR_PTR(err);
2421 	}
2422 	return skb;
2423 }
2424 
2425 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2426 					      struct msghdr *msg, size_t len)
2427 {
2428 	struct l2cap_conn *conn = chan->conn;
2429 	struct sk_buff *skb;
2430 	int err, count;
2431 	struct l2cap_hdr *lh;
2432 
2433 	BT_DBG("chan %p len %zu", chan, len);
2434 
2435 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2436 
2437 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2438 				   msg->msg_flags & MSG_DONTWAIT);
2439 	if (IS_ERR(skb))
2440 		return skb;
2441 
2442 	/* Create L2CAP header */
2443 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2444 	lh->cid = cpu_to_le16(chan->dcid);
2445 	lh->len = cpu_to_le16(len);
2446 
2447 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2448 	if (unlikely(err < 0)) {
2449 		kfree_skb(skb);
2450 		return ERR_PTR(err);
2451 	}
2452 	return skb;
2453 }
2454 
2455 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2456 					       struct msghdr *msg, size_t len,
2457 					       u16 sdulen)
2458 {
2459 	struct l2cap_conn *conn = chan->conn;
2460 	struct sk_buff *skb;
2461 	int err, count, hlen;
2462 	struct l2cap_hdr *lh;
2463 
2464 	BT_DBG("chan %p len %zu", chan, len);
2465 
2466 	if (!conn)
2467 		return ERR_PTR(-ENOTCONN);
2468 
2469 	hlen = __ertm_hdr_size(chan);
2470 
2471 	if (sdulen)
2472 		hlen += L2CAP_SDULEN_SIZE;
2473 
2474 	if (chan->fcs == L2CAP_FCS_CRC16)
2475 		hlen += L2CAP_FCS_SIZE;
2476 
2477 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2478 
2479 	skb = chan->ops->alloc_skb(chan, hlen, count,
2480 				   msg->msg_flags & MSG_DONTWAIT);
2481 	if (IS_ERR(skb))
2482 		return skb;
2483 
2484 	/* Create L2CAP header */
2485 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2486 	lh->cid = cpu_to_le16(chan->dcid);
2487 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2488 
2489 	/* Control header is populated later */
2490 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2491 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2492 	else
2493 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2494 
2495 	if (sdulen)
2496 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2497 
2498 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2499 	if (unlikely(err < 0)) {
2500 		kfree_skb(skb);
2501 		return ERR_PTR(err);
2502 	}
2503 
2504 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2505 	bt_cb(skb)->l2cap.retries = 0;
2506 	return skb;
2507 }
2508 
2509 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2510 			     struct sk_buff_head *seg_queue,
2511 			     struct msghdr *msg, size_t len)
2512 {
2513 	struct sk_buff *skb;
2514 	u16 sdu_len;
2515 	size_t pdu_len;
2516 	u8 sar;
2517 
2518 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2519 
2520 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2521 	 * so fragmented skbs are not used.  The HCI layer's handling
2522 	 * of fragmented skbs is not compatible with ERTM's queueing.
2523 	 */
2524 
2525 	/* PDU size is derived from the HCI MTU */
2526 	pdu_len = chan->conn->mtu;
2527 
2528 	/* Constrain PDU size for BR/EDR connections */
2529 	if (!chan->hs_hcon)
2530 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2531 
2532 	/* Adjust for largest possible L2CAP overhead. */
2533 	if (chan->fcs)
2534 		pdu_len -= L2CAP_FCS_SIZE;
2535 
2536 	pdu_len -= __ertm_hdr_size(chan);
2537 
2538 	/* Remote device may have requested smaller PDUs */
2539 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2540 
2541 	if (len <= pdu_len) {
2542 		sar = L2CAP_SAR_UNSEGMENTED;
2543 		sdu_len = 0;
2544 		pdu_len = len;
2545 	} else {
2546 		sar = L2CAP_SAR_START;
2547 		sdu_len = len;
2548 	}
2549 
2550 	while (len > 0) {
2551 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2552 
2553 		if (IS_ERR(skb)) {
2554 			__skb_queue_purge(seg_queue);
2555 			return PTR_ERR(skb);
2556 		}
2557 
2558 		bt_cb(skb)->l2cap.sar = sar;
2559 		__skb_queue_tail(seg_queue, skb);
2560 
2561 		len -= pdu_len;
2562 		if (sdu_len)
2563 			sdu_len = 0;
2564 
2565 		if (len <= pdu_len) {
2566 			sar = L2CAP_SAR_END;
2567 			pdu_len = len;
2568 		} else {
2569 			sar = L2CAP_SAR_CONTINUE;
2570 		}
2571 	}
2572 
2573 	return 0;
2574 }
2575 
2576 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2577 						   struct msghdr *msg,
2578 						   size_t len, u16 sdulen)
2579 {
2580 	struct l2cap_conn *conn = chan->conn;
2581 	struct sk_buff *skb;
2582 	int err, count, hlen;
2583 	struct l2cap_hdr *lh;
2584 
2585 	BT_DBG("chan %p len %zu", chan, len);
2586 
2587 	if (!conn)
2588 		return ERR_PTR(-ENOTCONN);
2589 
2590 	hlen = L2CAP_HDR_SIZE;
2591 
2592 	if (sdulen)
2593 		hlen += L2CAP_SDULEN_SIZE;
2594 
2595 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2596 
2597 	skb = chan->ops->alloc_skb(chan, hlen, count,
2598 				   msg->msg_flags & MSG_DONTWAIT);
2599 	if (IS_ERR(skb))
2600 		return skb;
2601 
2602 	/* Create L2CAP header */
2603 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2604 	lh->cid = cpu_to_le16(chan->dcid);
2605 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2606 
2607 	if (sdulen)
2608 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2609 
2610 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2611 	if (unlikely(err < 0)) {
2612 		kfree_skb(skb);
2613 		return ERR_PTR(err);
2614 	}
2615 
2616 	return skb;
2617 }
2618 
2619 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2620 				struct sk_buff_head *seg_queue,
2621 				struct msghdr *msg, size_t len)
2622 {
2623 	struct sk_buff *skb;
2624 	size_t pdu_len;
2625 	u16 sdu_len;
2626 
2627 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2628 
2629 	sdu_len = len;
2630 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2631 
2632 	while (len > 0) {
2633 		if (len <= pdu_len)
2634 			pdu_len = len;
2635 
2636 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2637 		if (IS_ERR(skb)) {
2638 			__skb_queue_purge(seg_queue);
2639 			return PTR_ERR(skb);
2640 		}
2641 
2642 		__skb_queue_tail(seg_queue, skb);
2643 
2644 		len -= pdu_len;
2645 
2646 		if (sdu_len) {
2647 			sdu_len = 0;
2648 			pdu_len += L2CAP_SDULEN_SIZE;
2649 		}
2650 	}
2651 
2652 	return 0;
2653 }
2654 
2655 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2656 {
2657 	int sent = 0;
2658 
2659 	BT_DBG("chan %p", chan);
2660 
2661 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2662 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2663 		chan->tx_credits--;
2664 		sent++;
2665 	}
2666 
2667 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2668 	       skb_queue_len(&chan->tx_q));
2669 }
2670 
2671 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2672 {
2673 	struct sk_buff *skb;
2674 	int err;
2675 	struct sk_buff_head seg_queue;
2676 
2677 	if (!chan->conn)
2678 		return -ENOTCONN;
2679 
2680 	/* Connectionless channel */
2681 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2682 		skb = l2cap_create_connless_pdu(chan, msg, len);
2683 		if (IS_ERR(skb))
2684 			return PTR_ERR(skb);
2685 
2686 		/* Channel lock is released before requesting new skb and then
2687 		 * reacquired thus we need to recheck channel state.
2688 		 */
2689 		if (chan->state != BT_CONNECTED) {
2690 			kfree_skb(skb);
2691 			return -ENOTCONN;
2692 		}
2693 
2694 		l2cap_do_send(chan, skb);
2695 		return len;
2696 	}
2697 
2698 	switch (chan->mode) {
2699 	case L2CAP_MODE_LE_FLOWCTL:
2700 	case L2CAP_MODE_EXT_FLOWCTL:
2701 		/* Check outgoing MTU */
2702 		if (len > chan->omtu)
2703 			return -EMSGSIZE;
2704 
2705 		__skb_queue_head_init(&seg_queue);
2706 
2707 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2708 
2709 		if (chan->state != BT_CONNECTED) {
2710 			__skb_queue_purge(&seg_queue);
2711 			err = -ENOTCONN;
2712 		}
2713 
2714 		if (err)
2715 			return err;
2716 
2717 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2718 
2719 		l2cap_le_flowctl_send(chan);
2720 
2721 		if (!chan->tx_credits)
2722 			chan->ops->suspend(chan);
2723 
2724 		err = len;
2725 
2726 		break;
2727 
2728 	case L2CAP_MODE_BASIC:
2729 		/* Check outgoing MTU */
2730 		if (len > chan->omtu)
2731 			return -EMSGSIZE;
2732 
2733 		/* Create a basic PDU */
2734 		skb = l2cap_create_basic_pdu(chan, msg, len);
2735 		if (IS_ERR(skb))
2736 			return PTR_ERR(skb);
2737 
2738 		/* Channel lock is released before requesting new skb and then
2739 		 * reacquired thus we need to recheck channel state.
2740 		 */
2741 		if (chan->state != BT_CONNECTED) {
2742 			kfree_skb(skb);
2743 			return -ENOTCONN;
2744 		}
2745 
2746 		l2cap_do_send(chan, skb);
2747 		err = len;
2748 		break;
2749 
2750 	case L2CAP_MODE_ERTM:
2751 	case L2CAP_MODE_STREAMING:
2752 		/* Check outgoing MTU */
2753 		if (len > chan->omtu) {
2754 			err = -EMSGSIZE;
2755 			break;
2756 		}
2757 
2758 		__skb_queue_head_init(&seg_queue);
2759 
2760 		/* Do segmentation before calling in to the state machine,
2761 		 * since it's possible to block while waiting for memory
2762 		 * allocation.
2763 		 */
2764 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2765 
2766 		/* The channel could have been closed while segmenting,
2767 		 * check that it is still connected.
2768 		 */
2769 		if (chan->state != BT_CONNECTED) {
2770 			__skb_queue_purge(&seg_queue);
2771 			err = -ENOTCONN;
2772 		}
2773 
2774 		if (err)
2775 			break;
2776 
2777 		if (chan->mode == L2CAP_MODE_ERTM)
2778 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2779 		else
2780 			l2cap_streaming_send(chan, &seg_queue);
2781 
2782 		err = len;
2783 
2784 		/* If the skbs were not queued for sending, they'll still be in
2785 		 * seg_queue and need to be purged.
2786 		 */
2787 		__skb_queue_purge(&seg_queue);
2788 		break;
2789 
2790 	default:
2791 		BT_DBG("bad state %1.1x", chan->mode);
2792 		err = -EBADFD;
2793 	}
2794 
2795 	return err;
2796 }
2797 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2798 
2799 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2800 {
2801 	struct l2cap_ctrl control;
2802 	u16 seq;
2803 
2804 	BT_DBG("chan %p, txseq %u", chan, txseq);
2805 
2806 	memset(&control, 0, sizeof(control));
2807 	control.sframe = 1;
2808 	control.super = L2CAP_SUPER_SREJ;
2809 
2810 	for (seq = chan->expected_tx_seq; seq != txseq;
2811 	     seq = __next_seq(chan, seq)) {
2812 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2813 			control.reqseq = seq;
2814 			l2cap_send_sframe(chan, &control);
2815 			l2cap_seq_list_append(&chan->srej_list, seq);
2816 		}
2817 	}
2818 
2819 	chan->expected_tx_seq = __next_seq(chan, txseq);
2820 }
2821 
2822 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2823 {
2824 	struct l2cap_ctrl control;
2825 
2826 	BT_DBG("chan %p", chan);
2827 
2828 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2829 		return;
2830 
2831 	memset(&control, 0, sizeof(control));
2832 	control.sframe = 1;
2833 	control.super = L2CAP_SUPER_SREJ;
2834 	control.reqseq = chan->srej_list.tail;
2835 	l2cap_send_sframe(chan, &control);
2836 }
2837 
2838 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2839 {
2840 	struct l2cap_ctrl control;
2841 	u16 initial_head;
2842 	u16 seq;
2843 
2844 	BT_DBG("chan %p, txseq %u", chan, txseq);
2845 
2846 	memset(&control, 0, sizeof(control));
2847 	control.sframe = 1;
2848 	control.super = L2CAP_SUPER_SREJ;
2849 
2850 	/* Capture initial list head to allow only one pass through the list. */
2851 	initial_head = chan->srej_list.head;
2852 
2853 	do {
2854 		seq = l2cap_seq_list_pop(&chan->srej_list);
2855 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2856 			break;
2857 
2858 		control.reqseq = seq;
2859 		l2cap_send_sframe(chan, &control);
2860 		l2cap_seq_list_append(&chan->srej_list, seq);
2861 	} while (chan->srej_list.head != initial_head);
2862 }
2863 
2864 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2865 {
2866 	struct sk_buff *acked_skb;
2867 	u16 ackseq;
2868 
2869 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2870 
2871 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2872 		return;
2873 
2874 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2875 	       chan->expected_ack_seq, chan->unacked_frames);
2876 
2877 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2878 	     ackseq = __next_seq(chan, ackseq)) {
2879 
2880 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2881 		if (acked_skb) {
2882 			skb_unlink(acked_skb, &chan->tx_q);
2883 			kfree_skb(acked_skb);
2884 			chan->unacked_frames--;
2885 		}
2886 	}
2887 
2888 	chan->expected_ack_seq = reqseq;
2889 
2890 	if (chan->unacked_frames == 0)
2891 		__clear_retrans_timer(chan);
2892 
2893 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2894 }
2895 
2896 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2897 {
2898 	BT_DBG("chan %p", chan);
2899 
2900 	chan->expected_tx_seq = chan->buffer_seq;
2901 	l2cap_seq_list_clear(&chan->srej_list);
2902 	skb_queue_purge(&chan->srej_q);
2903 	chan->rx_state = L2CAP_RX_STATE_RECV;
2904 }
2905 
2906 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2907 				struct l2cap_ctrl *control,
2908 				struct sk_buff_head *skbs, u8 event)
2909 {
2910 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2911 	       event);
2912 
2913 	switch (event) {
2914 	case L2CAP_EV_DATA_REQUEST:
2915 		if (chan->tx_send_head == NULL)
2916 			chan->tx_send_head = skb_peek(skbs);
2917 
2918 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2919 		l2cap_ertm_send(chan);
2920 		break;
2921 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2922 		BT_DBG("Enter LOCAL_BUSY");
2923 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2924 
2925 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2926 			/* The SREJ_SENT state must be aborted if we are to
2927 			 * enter the LOCAL_BUSY state.
2928 			 */
2929 			l2cap_abort_rx_srej_sent(chan);
2930 		}
2931 
2932 		l2cap_send_ack(chan);
2933 
2934 		break;
2935 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2936 		BT_DBG("Exit LOCAL_BUSY");
2937 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2938 
2939 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2940 			struct l2cap_ctrl local_control;
2941 
2942 			memset(&local_control, 0, sizeof(local_control));
2943 			local_control.sframe = 1;
2944 			local_control.super = L2CAP_SUPER_RR;
2945 			local_control.poll = 1;
2946 			local_control.reqseq = chan->buffer_seq;
2947 			l2cap_send_sframe(chan, &local_control);
2948 
2949 			chan->retry_count = 1;
2950 			__set_monitor_timer(chan);
2951 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2952 		}
2953 		break;
2954 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2955 		l2cap_process_reqseq(chan, control->reqseq);
2956 		break;
2957 	case L2CAP_EV_EXPLICIT_POLL:
2958 		l2cap_send_rr_or_rnr(chan, 1);
2959 		chan->retry_count = 1;
2960 		__set_monitor_timer(chan);
2961 		__clear_ack_timer(chan);
2962 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2963 		break;
2964 	case L2CAP_EV_RETRANS_TO:
2965 		l2cap_send_rr_or_rnr(chan, 1);
2966 		chan->retry_count = 1;
2967 		__set_monitor_timer(chan);
2968 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2969 		break;
2970 	case L2CAP_EV_RECV_FBIT:
2971 		/* Nothing to process */
2972 		break;
2973 	default:
2974 		break;
2975 	}
2976 }
2977 
2978 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2979 				  struct l2cap_ctrl *control,
2980 				  struct sk_buff_head *skbs, u8 event)
2981 {
2982 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2983 	       event);
2984 
2985 	switch (event) {
2986 	case L2CAP_EV_DATA_REQUEST:
2987 		if (chan->tx_send_head == NULL)
2988 			chan->tx_send_head = skb_peek(skbs);
2989 		/* Queue data, but don't send. */
2990 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2991 		break;
2992 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2993 		BT_DBG("Enter LOCAL_BUSY");
2994 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2995 
2996 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2997 			/* The SREJ_SENT state must be aborted if we are to
2998 			 * enter the LOCAL_BUSY state.
2999 			 */
3000 			l2cap_abort_rx_srej_sent(chan);
3001 		}
3002 
3003 		l2cap_send_ack(chan);
3004 
3005 		break;
3006 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
3007 		BT_DBG("Exit LOCAL_BUSY");
3008 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3009 
3010 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
3011 			struct l2cap_ctrl local_control;
3012 			memset(&local_control, 0, sizeof(local_control));
3013 			local_control.sframe = 1;
3014 			local_control.super = L2CAP_SUPER_RR;
3015 			local_control.poll = 1;
3016 			local_control.reqseq = chan->buffer_seq;
3017 			l2cap_send_sframe(chan, &local_control);
3018 
3019 			chan->retry_count = 1;
3020 			__set_monitor_timer(chan);
3021 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
3022 		}
3023 		break;
3024 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
3025 		l2cap_process_reqseq(chan, control->reqseq);
3026 		fallthrough;
3027 
3028 	case L2CAP_EV_RECV_FBIT:
3029 		if (control && control->final) {
3030 			__clear_monitor_timer(chan);
3031 			if (chan->unacked_frames > 0)
3032 				__set_retrans_timer(chan);
3033 			chan->retry_count = 0;
3034 			chan->tx_state = L2CAP_TX_STATE_XMIT;
3035 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3036 		}
3037 		break;
3038 	case L2CAP_EV_EXPLICIT_POLL:
3039 		/* Ignore */
3040 		break;
3041 	case L2CAP_EV_MONITOR_TO:
3042 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3043 			l2cap_send_rr_or_rnr(chan, 1);
3044 			__set_monitor_timer(chan);
3045 			chan->retry_count++;
3046 		} else {
3047 			l2cap_send_disconn_req(chan, ECONNABORTED);
3048 		}
3049 		break;
3050 	default:
3051 		break;
3052 	}
3053 }
3054 
3055 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3056 		     struct sk_buff_head *skbs, u8 event)
3057 {
3058 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3059 	       chan, control, skbs, event, chan->tx_state);
3060 
3061 	switch (chan->tx_state) {
3062 	case L2CAP_TX_STATE_XMIT:
3063 		l2cap_tx_state_xmit(chan, control, skbs, event);
3064 		break;
3065 	case L2CAP_TX_STATE_WAIT_F:
3066 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3067 		break;
3068 	default:
3069 		/* Ignore event */
3070 		break;
3071 	}
3072 }
3073 
3074 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3075 			     struct l2cap_ctrl *control)
3076 {
3077 	BT_DBG("chan %p, control %p", chan, control);
3078 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3079 }
3080 
3081 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3082 				  struct l2cap_ctrl *control)
3083 {
3084 	BT_DBG("chan %p, control %p", chan, control);
3085 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3086 }
3087 
3088 /* Copy frame to all raw sockets on that connection */
3089 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3090 {
3091 	struct sk_buff *nskb;
3092 	struct l2cap_chan *chan;
3093 
3094 	BT_DBG("conn %p", conn);
3095 
3096 	mutex_lock(&conn->chan_lock);
3097 
3098 	list_for_each_entry(chan, &conn->chan_l, list) {
3099 		if (chan->chan_type != L2CAP_CHAN_RAW)
3100 			continue;
3101 
3102 		/* Don't send frame to the channel it came from */
3103 		if (bt_cb(skb)->l2cap.chan == chan)
3104 			continue;
3105 
3106 		nskb = skb_clone(skb, GFP_KERNEL);
3107 		if (!nskb)
3108 			continue;
3109 		if (chan->ops->recv(chan, nskb))
3110 			kfree_skb(nskb);
3111 	}
3112 
3113 	mutex_unlock(&conn->chan_lock);
3114 }
3115 
3116 /* ---- L2CAP signalling commands ---- */
3117 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3118 				       u8 ident, u16 dlen, void *data)
3119 {
3120 	struct sk_buff *skb, **frag;
3121 	struct l2cap_cmd_hdr *cmd;
3122 	struct l2cap_hdr *lh;
3123 	int len, count;
3124 
3125 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3126 	       conn, code, ident, dlen);
3127 
3128 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3129 		return NULL;
3130 
3131 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3132 	count = min_t(unsigned int, conn->mtu, len);
3133 
3134 	skb = bt_skb_alloc(count, GFP_KERNEL);
3135 	if (!skb)
3136 		return NULL;
3137 
3138 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3139 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3140 
3141 	if (conn->hcon->type == LE_LINK)
3142 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3143 	else
3144 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3145 
3146 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3147 	cmd->code  = code;
3148 	cmd->ident = ident;
3149 	cmd->len   = cpu_to_le16(dlen);
3150 
3151 	if (dlen) {
3152 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3153 		skb_put_data(skb, data, count);
3154 		data += count;
3155 	}
3156 
3157 	len -= skb->len;
3158 
3159 	/* Continuation fragments (no L2CAP header) */
3160 	frag = &skb_shinfo(skb)->frag_list;
3161 	while (len) {
3162 		count = min_t(unsigned int, conn->mtu, len);
3163 
3164 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3165 		if (!*frag)
3166 			goto fail;
3167 
3168 		skb_put_data(*frag, data, count);
3169 
3170 		len  -= count;
3171 		data += count;
3172 
3173 		frag = &(*frag)->next;
3174 	}
3175 
3176 	return skb;
3177 
3178 fail:
3179 	kfree_skb(skb);
3180 	return NULL;
3181 }
3182 
3183 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3184 				     unsigned long *val)
3185 {
3186 	struct l2cap_conf_opt *opt = *ptr;
3187 	int len;
3188 
3189 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3190 	*ptr += len;
3191 
3192 	*type = opt->type;
3193 	*olen = opt->len;
3194 
3195 	switch (opt->len) {
3196 	case 1:
3197 		*val = *((u8 *) opt->val);
3198 		break;
3199 
3200 	case 2:
3201 		*val = get_unaligned_le16(opt->val);
3202 		break;
3203 
3204 	case 4:
3205 		*val = get_unaligned_le32(opt->val);
3206 		break;
3207 
3208 	default:
3209 		*val = (unsigned long) opt->val;
3210 		break;
3211 	}
3212 
3213 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3214 	return len;
3215 }
3216 
3217 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3218 {
3219 	struct l2cap_conf_opt *opt = *ptr;
3220 
3221 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3222 
3223 	if (size < L2CAP_CONF_OPT_SIZE + len)
3224 		return;
3225 
3226 	opt->type = type;
3227 	opt->len  = len;
3228 
3229 	switch (len) {
3230 	case 1:
3231 		*((u8 *) opt->val)  = val;
3232 		break;
3233 
3234 	case 2:
3235 		put_unaligned_le16(val, opt->val);
3236 		break;
3237 
3238 	case 4:
3239 		put_unaligned_le32(val, opt->val);
3240 		break;
3241 
3242 	default:
3243 		memcpy(opt->val, (void *) val, len);
3244 		break;
3245 	}
3246 
3247 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3248 }
3249 
3250 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3251 {
3252 	struct l2cap_conf_efs efs;
3253 
3254 	switch (chan->mode) {
3255 	case L2CAP_MODE_ERTM:
3256 		efs.id		= chan->local_id;
3257 		efs.stype	= chan->local_stype;
3258 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3259 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3260 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3261 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3262 		break;
3263 
3264 	case L2CAP_MODE_STREAMING:
3265 		efs.id		= 1;
3266 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3267 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3268 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3269 		efs.acc_lat	= 0;
3270 		efs.flush_to	= 0;
3271 		break;
3272 
3273 	default:
3274 		return;
3275 	}
3276 
3277 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3278 			   (unsigned long) &efs, size);
3279 }
3280 
3281 static void l2cap_ack_timeout(struct work_struct *work)
3282 {
3283 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3284 					       ack_timer.work);
3285 	u16 frames_to_ack;
3286 
3287 	BT_DBG("chan %p", chan);
3288 
3289 	l2cap_chan_lock(chan);
3290 
3291 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3292 				     chan->last_acked_seq);
3293 
3294 	if (frames_to_ack)
3295 		l2cap_send_rr_or_rnr(chan, 0);
3296 
3297 	l2cap_chan_unlock(chan);
3298 	l2cap_chan_put(chan);
3299 }
3300 
3301 int l2cap_ertm_init(struct l2cap_chan *chan)
3302 {
3303 	int err;
3304 
3305 	chan->next_tx_seq = 0;
3306 	chan->expected_tx_seq = 0;
3307 	chan->expected_ack_seq = 0;
3308 	chan->unacked_frames = 0;
3309 	chan->buffer_seq = 0;
3310 	chan->frames_sent = 0;
3311 	chan->last_acked_seq = 0;
3312 	chan->sdu = NULL;
3313 	chan->sdu_last_frag = NULL;
3314 	chan->sdu_len = 0;
3315 
3316 	skb_queue_head_init(&chan->tx_q);
3317 
3318 	chan->local_amp_id = AMP_ID_BREDR;
3319 	chan->move_id = AMP_ID_BREDR;
3320 	chan->move_state = L2CAP_MOVE_STABLE;
3321 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3322 
3323 	if (chan->mode != L2CAP_MODE_ERTM)
3324 		return 0;
3325 
3326 	chan->rx_state = L2CAP_RX_STATE_RECV;
3327 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3328 
3329 	skb_queue_head_init(&chan->srej_q);
3330 
3331 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3332 	if (err < 0)
3333 		return err;
3334 
3335 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3336 	if (err < 0)
3337 		l2cap_seq_list_free(&chan->srej_list);
3338 
3339 	return err;
3340 }
3341 
3342 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3343 {
3344 	switch (mode) {
3345 	case L2CAP_MODE_STREAMING:
3346 	case L2CAP_MODE_ERTM:
3347 		if (l2cap_mode_supported(mode, remote_feat_mask))
3348 			return mode;
3349 		fallthrough;
3350 	default:
3351 		return L2CAP_MODE_BASIC;
3352 	}
3353 }
3354 
3355 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3356 {
3357 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3358 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3359 }
3360 
3361 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3362 {
3363 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3364 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3365 }
3366 
3367 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3368 				      struct l2cap_conf_rfc *rfc)
3369 {
3370 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3371 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3372 
3373 		/* Class 1 devices have must have ERTM timeouts
3374 		 * exceeding the Link Supervision Timeout.  The
3375 		 * default Link Supervision Timeout for AMP
3376 		 * controllers is 10 seconds.
3377 		 *
3378 		 * Class 1 devices use 0xffffffff for their
3379 		 * best-effort flush timeout, so the clamping logic
3380 		 * will result in a timeout that meets the above
3381 		 * requirement.  ERTM timeouts are 16-bit values, so
3382 		 * the maximum timeout is 65.535 seconds.
3383 		 */
3384 
3385 		/* Convert timeout to milliseconds and round */
3386 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3387 
3388 		/* This is the recommended formula for class 2 devices
3389 		 * that start ERTM timers when packets are sent to the
3390 		 * controller.
3391 		 */
3392 		ertm_to = 3 * ertm_to + 500;
3393 
3394 		if (ertm_to > 0xffff)
3395 			ertm_to = 0xffff;
3396 
3397 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3398 		rfc->monitor_timeout = rfc->retrans_timeout;
3399 	} else {
3400 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3401 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3402 	}
3403 }
3404 
3405 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3406 {
3407 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3408 	    __l2cap_ews_supported(chan->conn)) {
3409 		/* use extended control field */
3410 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3411 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3412 	} else {
3413 		chan->tx_win = min_t(u16, chan->tx_win,
3414 				     L2CAP_DEFAULT_TX_WINDOW);
3415 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3416 	}
3417 	chan->ack_win = chan->tx_win;
3418 }
3419 
3420 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3421 {
3422 	struct hci_conn *conn = chan->conn->hcon;
3423 
3424 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3425 
3426 	/* The 2-DH1 packet has between 2 and 56 information bytes
3427 	 * (including the 2-byte payload header)
3428 	 */
3429 	if (!(conn->pkt_type & HCI_2DH1))
3430 		chan->imtu = 54;
3431 
3432 	/* The 3-DH1 packet has between 2 and 85 information bytes
3433 	 * (including the 2-byte payload header)
3434 	 */
3435 	if (!(conn->pkt_type & HCI_3DH1))
3436 		chan->imtu = 83;
3437 
3438 	/* The 2-DH3 packet has between 2 and 369 information bytes
3439 	 * (including the 2-byte payload header)
3440 	 */
3441 	if (!(conn->pkt_type & HCI_2DH3))
3442 		chan->imtu = 367;
3443 
3444 	/* The 3-DH3 packet has between 2 and 554 information bytes
3445 	 * (including the 2-byte payload header)
3446 	 */
3447 	if (!(conn->pkt_type & HCI_3DH3))
3448 		chan->imtu = 552;
3449 
3450 	/* The 2-DH5 packet has between 2 and 681 information bytes
3451 	 * (including the 2-byte payload header)
3452 	 */
3453 	if (!(conn->pkt_type & HCI_2DH5))
3454 		chan->imtu = 679;
3455 
3456 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3457 	 * (including the 2-byte payload header)
3458 	 */
3459 	if (!(conn->pkt_type & HCI_3DH5))
3460 		chan->imtu = 1021;
3461 }
3462 
3463 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3464 {
3465 	struct l2cap_conf_req *req = data;
3466 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3467 	void *ptr = req->data;
3468 	void *endptr = data + data_size;
3469 	u16 size;
3470 
3471 	BT_DBG("chan %p", chan);
3472 
3473 	if (chan->num_conf_req || chan->num_conf_rsp)
3474 		goto done;
3475 
3476 	switch (chan->mode) {
3477 	case L2CAP_MODE_STREAMING:
3478 	case L2CAP_MODE_ERTM:
3479 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3480 			break;
3481 
3482 		if (__l2cap_efs_supported(chan->conn))
3483 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3484 
3485 		fallthrough;
3486 	default:
3487 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3488 		break;
3489 	}
3490 
3491 done:
3492 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3493 		if (!chan->imtu)
3494 			l2cap_mtu_auto(chan);
3495 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3496 				   endptr - ptr);
3497 	}
3498 
3499 	switch (chan->mode) {
3500 	case L2CAP_MODE_BASIC:
3501 		if (disable_ertm)
3502 			break;
3503 
3504 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3505 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3506 			break;
3507 
3508 		rfc.mode            = L2CAP_MODE_BASIC;
3509 		rfc.txwin_size      = 0;
3510 		rfc.max_transmit    = 0;
3511 		rfc.retrans_timeout = 0;
3512 		rfc.monitor_timeout = 0;
3513 		rfc.max_pdu_size    = 0;
3514 
3515 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3516 				   (unsigned long) &rfc, endptr - ptr);
3517 		break;
3518 
3519 	case L2CAP_MODE_ERTM:
3520 		rfc.mode            = L2CAP_MODE_ERTM;
3521 		rfc.max_transmit    = chan->max_tx;
3522 
3523 		__l2cap_set_ertm_timeouts(chan, &rfc);
3524 
3525 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3526 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3527 			     L2CAP_FCS_SIZE);
3528 		rfc.max_pdu_size = cpu_to_le16(size);
3529 
3530 		l2cap_txwin_setup(chan);
3531 
3532 		rfc.txwin_size = min_t(u16, chan->tx_win,
3533 				       L2CAP_DEFAULT_TX_WINDOW);
3534 
3535 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3536 				   (unsigned long) &rfc, endptr - ptr);
3537 
3538 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3539 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3540 
3541 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3542 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3543 					   chan->tx_win, endptr - ptr);
3544 
3545 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3546 			if (chan->fcs == L2CAP_FCS_NONE ||
3547 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3548 				chan->fcs = L2CAP_FCS_NONE;
3549 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3550 						   chan->fcs, endptr - ptr);
3551 			}
3552 		break;
3553 
3554 	case L2CAP_MODE_STREAMING:
3555 		l2cap_txwin_setup(chan);
3556 		rfc.mode            = L2CAP_MODE_STREAMING;
3557 		rfc.txwin_size      = 0;
3558 		rfc.max_transmit    = 0;
3559 		rfc.retrans_timeout = 0;
3560 		rfc.monitor_timeout = 0;
3561 
3562 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3563 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3564 			     L2CAP_FCS_SIZE);
3565 		rfc.max_pdu_size = cpu_to_le16(size);
3566 
3567 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3568 				   (unsigned long) &rfc, endptr - ptr);
3569 
3570 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3571 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3572 
3573 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3574 			if (chan->fcs == L2CAP_FCS_NONE ||
3575 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3576 				chan->fcs = L2CAP_FCS_NONE;
3577 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3578 						   chan->fcs, endptr - ptr);
3579 			}
3580 		break;
3581 	}
3582 
3583 	req->dcid  = cpu_to_le16(chan->dcid);
3584 	req->flags = cpu_to_le16(0);
3585 
3586 	return ptr - data;
3587 }
3588 
3589 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3590 {
3591 	struct l2cap_conf_rsp *rsp = data;
3592 	void *ptr = rsp->data;
3593 	void *endptr = data + data_size;
3594 	void *req = chan->conf_req;
3595 	int len = chan->conf_len;
3596 	int type, hint, olen;
3597 	unsigned long val;
3598 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3599 	struct l2cap_conf_efs efs;
3600 	u8 remote_efs = 0;
3601 	u16 mtu = L2CAP_DEFAULT_MTU;
3602 	u16 result = L2CAP_CONF_SUCCESS;
3603 	u16 size;
3604 
3605 	BT_DBG("chan %p", chan);
3606 
3607 	while (len >= L2CAP_CONF_OPT_SIZE) {
3608 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3609 		if (len < 0)
3610 			break;
3611 
3612 		hint  = type & L2CAP_CONF_HINT;
3613 		type &= L2CAP_CONF_MASK;
3614 
3615 		switch (type) {
3616 		case L2CAP_CONF_MTU:
3617 			if (olen != 2)
3618 				break;
3619 			mtu = val;
3620 			break;
3621 
3622 		case L2CAP_CONF_FLUSH_TO:
3623 			if (olen != 2)
3624 				break;
3625 			chan->flush_to = val;
3626 			break;
3627 
3628 		case L2CAP_CONF_QOS:
3629 			break;
3630 
3631 		case L2CAP_CONF_RFC:
3632 			if (olen != sizeof(rfc))
3633 				break;
3634 			memcpy(&rfc, (void *) val, olen);
3635 			break;
3636 
3637 		case L2CAP_CONF_FCS:
3638 			if (olen != 1)
3639 				break;
3640 			if (val == L2CAP_FCS_NONE)
3641 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3642 			break;
3643 
3644 		case L2CAP_CONF_EFS:
3645 			if (olen != sizeof(efs))
3646 				break;
3647 			remote_efs = 1;
3648 			memcpy(&efs, (void *) val, olen);
3649 			break;
3650 
3651 		case L2CAP_CONF_EWS:
3652 			if (olen != 2)
3653 				break;
3654 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3655 				return -ECONNREFUSED;
3656 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3657 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3658 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3659 			chan->remote_tx_win = val;
3660 			break;
3661 
3662 		default:
3663 			if (hint)
3664 				break;
3665 			result = L2CAP_CONF_UNKNOWN;
3666 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3667 			break;
3668 		}
3669 	}
3670 
3671 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3672 		goto done;
3673 
3674 	switch (chan->mode) {
3675 	case L2CAP_MODE_STREAMING:
3676 	case L2CAP_MODE_ERTM:
3677 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3678 			chan->mode = l2cap_select_mode(rfc.mode,
3679 						       chan->conn->feat_mask);
3680 			break;
3681 		}
3682 
3683 		if (remote_efs) {
3684 			if (__l2cap_efs_supported(chan->conn))
3685 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3686 			else
3687 				return -ECONNREFUSED;
3688 		}
3689 
3690 		if (chan->mode != rfc.mode)
3691 			return -ECONNREFUSED;
3692 
3693 		break;
3694 	}
3695 
3696 done:
3697 	if (chan->mode != rfc.mode) {
3698 		result = L2CAP_CONF_UNACCEPT;
3699 		rfc.mode = chan->mode;
3700 
3701 		if (chan->num_conf_rsp == 1)
3702 			return -ECONNREFUSED;
3703 
3704 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3705 				   (unsigned long) &rfc, endptr - ptr);
3706 	}
3707 
3708 	if (result == L2CAP_CONF_SUCCESS) {
3709 		/* Configure output options and let the other side know
3710 		 * which ones we don't like. */
3711 
3712 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3713 			result = L2CAP_CONF_UNACCEPT;
3714 		else {
3715 			chan->omtu = mtu;
3716 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3717 		}
3718 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3719 
3720 		if (remote_efs) {
3721 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3722 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3723 			    efs.stype != chan->local_stype) {
3724 
3725 				result = L2CAP_CONF_UNACCEPT;
3726 
3727 				if (chan->num_conf_req >= 1)
3728 					return -ECONNREFUSED;
3729 
3730 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3731 						   sizeof(efs),
3732 						   (unsigned long) &efs, endptr - ptr);
3733 			} else {
3734 				/* Send PENDING Conf Rsp */
3735 				result = L2CAP_CONF_PENDING;
3736 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3737 			}
3738 		}
3739 
3740 		switch (rfc.mode) {
3741 		case L2CAP_MODE_BASIC:
3742 			chan->fcs = L2CAP_FCS_NONE;
3743 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3744 			break;
3745 
3746 		case L2CAP_MODE_ERTM:
3747 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3748 				chan->remote_tx_win = rfc.txwin_size;
3749 			else
3750 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3751 
3752 			chan->remote_max_tx = rfc.max_transmit;
3753 
3754 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3755 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3756 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3757 			rfc.max_pdu_size = cpu_to_le16(size);
3758 			chan->remote_mps = size;
3759 
3760 			__l2cap_set_ertm_timeouts(chan, &rfc);
3761 
3762 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3763 
3764 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3765 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3766 
3767 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3768 				chan->remote_id = efs.id;
3769 				chan->remote_stype = efs.stype;
3770 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3771 				chan->remote_flush_to =
3772 					le32_to_cpu(efs.flush_to);
3773 				chan->remote_acc_lat =
3774 					le32_to_cpu(efs.acc_lat);
3775 				chan->remote_sdu_itime =
3776 					le32_to_cpu(efs.sdu_itime);
3777 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3778 						   sizeof(efs),
3779 						   (unsigned long) &efs, endptr - ptr);
3780 			}
3781 			break;
3782 
3783 		case L2CAP_MODE_STREAMING:
3784 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3785 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3786 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3787 			rfc.max_pdu_size = cpu_to_le16(size);
3788 			chan->remote_mps = size;
3789 
3790 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3791 
3792 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3793 					   (unsigned long) &rfc, endptr - ptr);
3794 
3795 			break;
3796 
3797 		default:
3798 			result = L2CAP_CONF_UNACCEPT;
3799 
3800 			memset(&rfc, 0, sizeof(rfc));
3801 			rfc.mode = chan->mode;
3802 		}
3803 
3804 		if (result == L2CAP_CONF_SUCCESS)
3805 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3806 	}
3807 	rsp->scid   = cpu_to_le16(chan->dcid);
3808 	rsp->result = cpu_to_le16(result);
3809 	rsp->flags  = cpu_to_le16(0);
3810 
3811 	return ptr - data;
3812 }
3813 
3814 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3815 				void *data, size_t size, u16 *result)
3816 {
3817 	struct l2cap_conf_req *req = data;
3818 	void *ptr = req->data;
3819 	void *endptr = data + size;
3820 	int type, olen;
3821 	unsigned long val;
3822 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3823 	struct l2cap_conf_efs efs;
3824 
3825 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3826 
3827 	while (len >= L2CAP_CONF_OPT_SIZE) {
3828 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3829 		if (len < 0)
3830 			break;
3831 
3832 		switch (type) {
3833 		case L2CAP_CONF_MTU:
3834 			if (olen != 2)
3835 				break;
3836 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3837 				*result = L2CAP_CONF_UNACCEPT;
3838 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3839 			} else
3840 				chan->imtu = val;
3841 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3842 					   endptr - ptr);
3843 			break;
3844 
3845 		case L2CAP_CONF_FLUSH_TO:
3846 			if (olen != 2)
3847 				break;
3848 			chan->flush_to = val;
3849 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3850 					   chan->flush_to, endptr - ptr);
3851 			break;
3852 
3853 		case L2CAP_CONF_RFC:
3854 			if (olen != sizeof(rfc))
3855 				break;
3856 			memcpy(&rfc, (void *)val, olen);
3857 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3858 			    rfc.mode != chan->mode)
3859 				return -ECONNREFUSED;
3860 			chan->fcs = 0;
3861 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3862 					   (unsigned long) &rfc, endptr - ptr);
3863 			break;
3864 
3865 		case L2CAP_CONF_EWS:
3866 			if (olen != 2)
3867 				break;
3868 			chan->ack_win = min_t(u16, val, chan->ack_win);
3869 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3870 					   chan->tx_win, endptr - ptr);
3871 			break;
3872 
3873 		case L2CAP_CONF_EFS:
3874 			if (olen != sizeof(efs))
3875 				break;
3876 			memcpy(&efs, (void *)val, olen);
3877 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3878 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3879 			    efs.stype != chan->local_stype)
3880 				return -ECONNREFUSED;
3881 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3882 					   (unsigned long) &efs, endptr - ptr);
3883 			break;
3884 
3885 		case L2CAP_CONF_FCS:
3886 			if (olen != 1)
3887 				break;
3888 			if (*result == L2CAP_CONF_PENDING)
3889 				if (val == L2CAP_FCS_NONE)
3890 					set_bit(CONF_RECV_NO_FCS,
3891 						&chan->conf_state);
3892 			break;
3893 		}
3894 	}
3895 
3896 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3897 		return -ECONNREFUSED;
3898 
3899 	chan->mode = rfc.mode;
3900 
3901 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3902 		switch (rfc.mode) {
3903 		case L2CAP_MODE_ERTM:
3904 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3905 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3906 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3907 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3908 				chan->ack_win = min_t(u16, chan->ack_win,
3909 						      rfc.txwin_size);
3910 
3911 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3912 				chan->local_msdu = le16_to_cpu(efs.msdu);
3913 				chan->local_sdu_itime =
3914 					le32_to_cpu(efs.sdu_itime);
3915 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3916 				chan->local_flush_to =
3917 					le32_to_cpu(efs.flush_to);
3918 			}
3919 			break;
3920 
3921 		case L2CAP_MODE_STREAMING:
3922 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3923 		}
3924 	}
3925 
3926 	req->dcid   = cpu_to_le16(chan->dcid);
3927 	req->flags  = cpu_to_le16(0);
3928 
3929 	return ptr - data;
3930 }
3931 
3932 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3933 				u16 result, u16 flags)
3934 {
3935 	struct l2cap_conf_rsp *rsp = data;
3936 	void *ptr = rsp->data;
3937 
3938 	BT_DBG("chan %p", chan);
3939 
3940 	rsp->scid   = cpu_to_le16(chan->dcid);
3941 	rsp->result = cpu_to_le16(result);
3942 	rsp->flags  = cpu_to_le16(flags);
3943 
3944 	return ptr - data;
3945 }
3946 
3947 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3948 {
3949 	struct l2cap_le_conn_rsp rsp;
3950 	struct l2cap_conn *conn = chan->conn;
3951 
3952 	BT_DBG("chan %p", chan);
3953 
3954 	rsp.dcid    = cpu_to_le16(chan->scid);
3955 	rsp.mtu     = cpu_to_le16(chan->imtu);
3956 	rsp.mps     = cpu_to_le16(chan->mps);
3957 	rsp.credits = cpu_to_le16(chan->rx_credits);
3958 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3959 
3960 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3961 		       &rsp);
3962 }
3963 
3964 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3965 {
3966 	struct {
3967 		struct l2cap_ecred_conn_rsp rsp;
3968 		__le16 dcid[5];
3969 	} __packed pdu;
3970 	struct l2cap_conn *conn = chan->conn;
3971 	u16 ident = chan->ident;
3972 	int i = 0;
3973 
3974 	if (!ident)
3975 		return;
3976 
3977 	BT_DBG("chan %p ident %d", chan, ident);
3978 
3979 	pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3980 	pdu.rsp.mps     = cpu_to_le16(chan->mps);
3981 	pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3982 	pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3983 
3984 	mutex_lock(&conn->chan_lock);
3985 
3986 	list_for_each_entry(chan, &conn->chan_l, list) {
3987 		if (chan->ident != ident)
3988 			continue;
3989 
3990 		/* Reset ident so only one response is sent */
3991 		chan->ident = 0;
3992 
3993 		/* Include all channels pending with the same ident */
3994 		pdu.dcid[i++] = cpu_to_le16(chan->scid);
3995 	}
3996 
3997 	mutex_unlock(&conn->chan_lock);
3998 
3999 	l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
4000 			sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
4001 }
4002 
4003 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4004 {
4005 	struct l2cap_conn_rsp rsp;
4006 	struct l2cap_conn *conn = chan->conn;
4007 	u8 buf[128];
4008 	u8 rsp_code;
4009 
4010 	rsp.scid   = cpu_to_le16(chan->dcid);
4011 	rsp.dcid   = cpu_to_le16(chan->scid);
4012 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4013 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4014 
4015 	if (chan->hs_hcon)
4016 		rsp_code = L2CAP_CREATE_CHAN_RSP;
4017 	else
4018 		rsp_code = L2CAP_CONN_RSP;
4019 
4020 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4021 
4022 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4023 
4024 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4025 		return;
4026 
4027 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4028 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4029 	chan->num_conf_req++;
4030 }
4031 
4032 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4033 {
4034 	int type, olen;
4035 	unsigned long val;
4036 	/* Use sane default values in case a misbehaving remote device
4037 	 * did not send an RFC or extended window size option.
4038 	 */
4039 	u16 txwin_ext = chan->ack_win;
4040 	struct l2cap_conf_rfc rfc = {
4041 		.mode = chan->mode,
4042 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4043 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4044 		.max_pdu_size = cpu_to_le16(chan->imtu),
4045 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4046 	};
4047 
4048 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4049 
4050 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4051 		return;
4052 
4053 	while (len >= L2CAP_CONF_OPT_SIZE) {
4054 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4055 		if (len < 0)
4056 			break;
4057 
4058 		switch (type) {
4059 		case L2CAP_CONF_RFC:
4060 			if (olen != sizeof(rfc))
4061 				break;
4062 			memcpy(&rfc, (void *)val, olen);
4063 			break;
4064 		case L2CAP_CONF_EWS:
4065 			if (olen != 2)
4066 				break;
4067 			txwin_ext = val;
4068 			break;
4069 		}
4070 	}
4071 
4072 	switch (rfc.mode) {
4073 	case L2CAP_MODE_ERTM:
4074 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4075 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4076 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4077 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4078 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4079 		else
4080 			chan->ack_win = min_t(u16, chan->ack_win,
4081 					      rfc.txwin_size);
4082 		break;
4083 	case L2CAP_MODE_STREAMING:
4084 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4085 	}
4086 }
4087 
4088 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4089 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4090 				    u8 *data)
4091 {
4092 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4093 
4094 	if (cmd_len < sizeof(*rej))
4095 		return -EPROTO;
4096 
4097 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4098 		return 0;
4099 
4100 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4101 	    cmd->ident == conn->info_ident) {
4102 		cancel_delayed_work(&conn->info_timer);
4103 
4104 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4105 		conn->info_ident = 0;
4106 
4107 		l2cap_conn_start(conn);
4108 	}
4109 
4110 	return 0;
4111 }
4112 
4113 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4114 					struct l2cap_cmd_hdr *cmd,
4115 					u8 *data, u8 rsp_code, u8 amp_id)
4116 {
4117 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4118 	struct l2cap_conn_rsp rsp;
4119 	struct l2cap_chan *chan = NULL, *pchan;
4120 	int result, status = L2CAP_CS_NO_INFO;
4121 
4122 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4123 	__le16 psm = req->psm;
4124 
4125 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4126 
4127 	/* Check if we have socket listening on psm */
4128 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4129 					 &conn->hcon->dst, ACL_LINK);
4130 	if (!pchan) {
4131 		result = L2CAP_CR_BAD_PSM;
4132 		goto sendresp;
4133 	}
4134 
4135 	mutex_lock(&conn->chan_lock);
4136 	l2cap_chan_lock(pchan);
4137 
4138 	/* Check if the ACL is secure enough (if not SDP) */
4139 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4140 	    !hci_conn_check_link_mode(conn->hcon)) {
4141 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4142 		result = L2CAP_CR_SEC_BLOCK;
4143 		goto response;
4144 	}
4145 
4146 	result = L2CAP_CR_NO_MEM;
4147 
4148 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4149 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4150 		result = L2CAP_CR_INVALID_SCID;
4151 		goto response;
4152 	}
4153 
4154 	/* Check if we already have channel with that dcid */
4155 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4156 		result = L2CAP_CR_SCID_IN_USE;
4157 		goto response;
4158 	}
4159 
4160 	chan = pchan->ops->new_connection(pchan);
4161 	if (!chan)
4162 		goto response;
4163 
4164 	/* For certain devices (ex: HID mouse), support for authentication,
4165 	 * pairing and bonding is optional. For such devices, inorder to avoid
4166 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4167 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4168 	 */
4169 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4170 
4171 	bacpy(&chan->src, &conn->hcon->src);
4172 	bacpy(&chan->dst, &conn->hcon->dst);
4173 	chan->src_type = bdaddr_src_type(conn->hcon);
4174 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4175 	chan->psm  = psm;
4176 	chan->dcid = scid;
4177 	chan->local_amp_id = amp_id;
4178 
4179 	__l2cap_chan_add(conn, chan);
4180 
4181 	dcid = chan->scid;
4182 
4183 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4184 
4185 	chan->ident = cmd->ident;
4186 
4187 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4188 		if (l2cap_chan_check_security(chan, false)) {
4189 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4190 				l2cap_state_change(chan, BT_CONNECT2);
4191 				result = L2CAP_CR_PEND;
4192 				status = L2CAP_CS_AUTHOR_PEND;
4193 				chan->ops->defer(chan);
4194 			} else {
4195 				/* Force pending result for AMP controllers.
4196 				 * The connection will succeed after the
4197 				 * physical link is up.
4198 				 */
4199 				if (amp_id == AMP_ID_BREDR) {
4200 					l2cap_state_change(chan, BT_CONFIG);
4201 					result = L2CAP_CR_SUCCESS;
4202 				} else {
4203 					l2cap_state_change(chan, BT_CONNECT2);
4204 					result = L2CAP_CR_PEND;
4205 				}
4206 				status = L2CAP_CS_NO_INFO;
4207 			}
4208 		} else {
4209 			l2cap_state_change(chan, BT_CONNECT2);
4210 			result = L2CAP_CR_PEND;
4211 			status = L2CAP_CS_AUTHEN_PEND;
4212 		}
4213 	} else {
4214 		l2cap_state_change(chan, BT_CONNECT2);
4215 		result = L2CAP_CR_PEND;
4216 		status = L2CAP_CS_NO_INFO;
4217 	}
4218 
4219 response:
4220 	l2cap_chan_unlock(pchan);
4221 	mutex_unlock(&conn->chan_lock);
4222 	l2cap_chan_put(pchan);
4223 
4224 sendresp:
4225 	rsp.scid   = cpu_to_le16(scid);
4226 	rsp.dcid   = cpu_to_le16(dcid);
4227 	rsp.result = cpu_to_le16(result);
4228 	rsp.status = cpu_to_le16(status);
4229 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4230 
4231 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4232 		struct l2cap_info_req info;
4233 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4234 
4235 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4236 		conn->info_ident = l2cap_get_ident(conn);
4237 
4238 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4239 
4240 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4241 			       sizeof(info), &info);
4242 	}
4243 
4244 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4245 	    result == L2CAP_CR_SUCCESS) {
4246 		u8 buf[128];
4247 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4248 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4249 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4250 		chan->num_conf_req++;
4251 	}
4252 
4253 	return chan;
4254 }
4255 
4256 static int l2cap_connect_req(struct l2cap_conn *conn,
4257 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4258 {
4259 	struct hci_dev *hdev = conn->hcon->hdev;
4260 	struct hci_conn *hcon = conn->hcon;
4261 
4262 	if (cmd_len < sizeof(struct l2cap_conn_req))
4263 		return -EPROTO;
4264 
4265 	hci_dev_lock(hdev);
4266 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4267 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4268 		mgmt_device_connected(hdev, hcon, NULL, 0);
4269 	hci_dev_unlock(hdev);
4270 
4271 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4272 	return 0;
4273 }
4274 
4275 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4276 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4277 				    u8 *data)
4278 {
4279 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4280 	u16 scid, dcid, result, status;
4281 	struct l2cap_chan *chan;
4282 	u8 req[128];
4283 	int err;
4284 
4285 	if (cmd_len < sizeof(*rsp))
4286 		return -EPROTO;
4287 
4288 	scid   = __le16_to_cpu(rsp->scid);
4289 	dcid   = __le16_to_cpu(rsp->dcid);
4290 	result = __le16_to_cpu(rsp->result);
4291 	status = __le16_to_cpu(rsp->status);
4292 
4293 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4294 	       dcid, scid, result, status);
4295 
4296 	mutex_lock(&conn->chan_lock);
4297 
4298 	if (scid) {
4299 		chan = __l2cap_get_chan_by_scid(conn, scid);
4300 		if (!chan) {
4301 			err = -EBADSLT;
4302 			goto unlock;
4303 		}
4304 	} else {
4305 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4306 		if (!chan) {
4307 			err = -EBADSLT;
4308 			goto unlock;
4309 		}
4310 	}
4311 
4312 	chan = l2cap_chan_hold_unless_zero(chan);
4313 	if (!chan) {
4314 		err = -EBADSLT;
4315 		goto unlock;
4316 	}
4317 
4318 	err = 0;
4319 
4320 	l2cap_chan_lock(chan);
4321 
4322 	switch (result) {
4323 	case L2CAP_CR_SUCCESS:
4324 		l2cap_state_change(chan, BT_CONFIG);
4325 		chan->ident = 0;
4326 		chan->dcid = dcid;
4327 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4328 
4329 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4330 			break;
4331 
4332 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4333 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4334 		chan->num_conf_req++;
4335 		break;
4336 
4337 	case L2CAP_CR_PEND:
4338 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4339 		break;
4340 
4341 	default:
4342 		l2cap_chan_del(chan, ECONNREFUSED);
4343 		break;
4344 	}
4345 
4346 	l2cap_chan_unlock(chan);
4347 	l2cap_chan_put(chan);
4348 
4349 unlock:
4350 	mutex_unlock(&conn->chan_lock);
4351 
4352 	return err;
4353 }
4354 
4355 static inline void set_default_fcs(struct l2cap_chan *chan)
4356 {
4357 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4358 	 * sides request it.
4359 	 */
4360 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4361 		chan->fcs = L2CAP_FCS_NONE;
4362 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4363 		chan->fcs = L2CAP_FCS_CRC16;
4364 }
4365 
4366 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4367 				    u8 ident, u16 flags)
4368 {
4369 	struct l2cap_conn *conn = chan->conn;
4370 
4371 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4372 	       flags);
4373 
4374 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4375 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4376 
4377 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4378 		       l2cap_build_conf_rsp(chan, data,
4379 					    L2CAP_CONF_SUCCESS, flags), data);
4380 }
4381 
4382 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4383 				   u16 scid, u16 dcid)
4384 {
4385 	struct l2cap_cmd_rej_cid rej;
4386 
4387 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4388 	rej.scid = __cpu_to_le16(scid);
4389 	rej.dcid = __cpu_to_le16(dcid);
4390 
4391 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4392 }
4393 
4394 static inline int l2cap_config_req(struct l2cap_conn *conn,
4395 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4396 				   u8 *data)
4397 {
4398 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4399 	u16 dcid, flags;
4400 	u8 rsp[64];
4401 	struct l2cap_chan *chan;
4402 	int len, err = 0;
4403 
4404 	if (cmd_len < sizeof(*req))
4405 		return -EPROTO;
4406 
4407 	dcid  = __le16_to_cpu(req->dcid);
4408 	flags = __le16_to_cpu(req->flags);
4409 
4410 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4411 
4412 	chan = l2cap_get_chan_by_scid(conn, dcid);
4413 	if (!chan) {
4414 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4415 		return 0;
4416 	}
4417 
4418 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4419 	    chan->state != BT_CONNECTED) {
4420 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4421 				       chan->dcid);
4422 		goto unlock;
4423 	}
4424 
4425 	/* Reject if config buffer is too small. */
4426 	len = cmd_len - sizeof(*req);
4427 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4428 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4429 			       l2cap_build_conf_rsp(chan, rsp,
4430 			       L2CAP_CONF_REJECT, flags), rsp);
4431 		goto unlock;
4432 	}
4433 
4434 	/* Store config. */
4435 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4436 	chan->conf_len += len;
4437 
4438 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4439 		/* Incomplete config. Send empty response. */
4440 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4441 			       l2cap_build_conf_rsp(chan, rsp,
4442 			       L2CAP_CONF_SUCCESS, flags), rsp);
4443 		goto unlock;
4444 	}
4445 
4446 	/* Complete config. */
4447 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4448 	if (len < 0) {
4449 		l2cap_send_disconn_req(chan, ECONNRESET);
4450 		goto unlock;
4451 	}
4452 
4453 	chan->ident = cmd->ident;
4454 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4455 	chan->num_conf_rsp++;
4456 
4457 	/* Reset config buffer. */
4458 	chan->conf_len = 0;
4459 
4460 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4461 		goto unlock;
4462 
4463 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4464 		set_default_fcs(chan);
4465 
4466 		if (chan->mode == L2CAP_MODE_ERTM ||
4467 		    chan->mode == L2CAP_MODE_STREAMING)
4468 			err = l2cap_ertm_init(chan);
4469 
4470 		if (err < 0)
4471 			l2cap_send_disconn_req(chan, -err);
4472 		else
4473 			l2cap_chan_ready(chan);
4474 
4475 		goto unlock;
4476 	}
4477 
4478 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4479 		u8 buf[64];
4480 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4481 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4482 		chan->num_conf_req++;
4483 	}
4484 
4485 	/* Got Conf Rsp PENDING from remote side and assume we sent
4486 	   Conf Rsp PENDING in the code above */
4487 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4488 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4489 
4490 		/* check compatibility */
4491 
4492 		/* Send rsp for BR/EDR channel */
4493 		if (!chan->hs_hcon)
4494 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4495 		else
4496 			chan->ident = cmd->ident;
4497 	}
4498 
4499 unlock:
4500 	l2cap_chan_unlock(chan);
4501 	l2cap_chan_put(chan);
4502 	return err;
4503 }
4504 
4505 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4506 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4507 				   u8 *data)
4508 {
4509 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4510 	u16 scid, flags, result;
4511 	struct l2cap_chan *chan;
4512 	int len = cmd_len - sizeof(*rsp);
4513 	int err = 0;
4514 
4515 	if (cmd_len < sizeof(*rsp))
4516 		return -EPROTO;
4517 
4518 	scid   = __le16_to_cpu(rsp->scid);
4519 	flags  = __le16_to_cpu(rsp->flags);
4520 	result = __le16_to_cpu(rsp->result);
4521 
4522 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4523 	       result, len);
4524 
4525 	chan = l2cap_get_chan_by_scid(conn, scid);
4526 	if (!chan)
4527 		return 0;
4528 
4529 	switch (result) {
4530 	case L2CAP_CONF_SUCCESS:
4531 		l2cap_conf_rfc_get(chan, rsp->data, len);
4532 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4533 		break;
4534 
4535 	case L2CAP_CONF_PENDING:
4536 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4537 
4538 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4539 			char buf[64];
4540 
4541 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4542 						   buf, sizeof(buf), &result);
4543 			if (len < 0) {
4544 				l2cap_send_disconn_req(chan, ECONNRESET);
4545 				goto done;
4546 			}
4547 
4548 			if (!chan->hs_hcon) {
4549 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4550 							0);
4551 			} else {
4552 				if (l2cap_check_efs(chan)) {
4553 					amp_create_logical_link(chan);
4554 					chan->ident = cmd->ident;
4555 				}
4556 			}
4557 		}
4558 		goto done;
4559 
4560 	case L2CAP_CONF_UNKNOWN:
4561 	case L2CAP_CONF_UNACCEPT:
4562 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4563 			char req[64];
4564 
4565 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4566 				l2cap_send_disconn_req(chan, ECONNRESET);
4567 				goto done;
4568 			}
4569 
4570 			/* throw out any old stored conf requests */
4571 			result = L2CAP_CONF_SUCCESS;
4572 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4573 						   req, sizeof(req), &result);
4574 			if (len < 0) {
4575 				l2cap_send_disconn_req(chan, ECONNRESET);
4576 				goto done;
4577 			}
4578 
4579 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4580 				       L2CAP_CONF_REQ, len, req);
4581 			chan->num_conf_req++;
4582 			if (result != L2CAP_CONF_SUCCESS)
4583 				goto done;
4584 			break;
4585 		}
4586 		fallthrough;
4587 
4588 	default:
4589 		l2cap_chan_set_err(chan, ECONNRESET);
4590 
4591 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4592 		l2cap_send_disconn_req(chan, ECONNRESET);
4593 		goto done;
4594 	}
4595 
4596 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4597 		goto done;
4598 
4599 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4600 
4601 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4602 		set_default_fcs(chan);
4603 
4604 		if (chan->mode == L2CAP_MODE_ERTM ||
4605 		    chan->mode == L2CAP_MODE_STREAMING)
4606 			err = l2cap_ertm_init(chan);
4607 
4608 		if (err < 0)
4609 			l2cap_send_disconn_req(chan, -err);
4610 		else
4611 			l2cap_chan_ready(chan);
4612 	}
4613 
4614 done:
4615 	l2cap_chan_unlock(chan);
4616 	l2cap_chan_put(chan);
4617 	return err;
4618 }
4619 
4620 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4621 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4622 				       u8 *data)
4623 {
4624 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4625 	struct l2cap_disconn_rsp rsp;
4626 	u16 dcid, scid;
4627 	struct l2cap_chan *chan;
4628 
4629 	if (cmd_len != sizeof(*req))
4630 		return -EPROTO;
4631 
4632 	scid = __le16_to_cpu(req->scid);
4633 	dcid = __le16_to_cpu(req->dcid);
4634 
4635 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4636 
4637 	mutex_lock(&conn->chan_lock);
4638 
4639 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4640 	if (!chan) {
4641 		mutex_unlock(&conn->chan_lock);
4642 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4643 		return 0;
4644 	}
4645 
4646 	l2cap_chan_hold(chan);
4647 	l2cap_chan_lock(chan);
4648 
4649 	rsp.dcid = cpu_to_le16(chan->scid);
4650 	rsp.scid = cpu_to_le16(chan->dcid);
4651 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4652 
4653 	chan->ops->set_shutdown(chan);
4654 
4655 	l2cap_chan_del(chan, ECONNRESET);
4656 
4657 	chan->ops->close(chan);
4658 
4659 	l2cap_chan_unlock(chan);
4660 	l2cap_chan_put(chan);
4661 
4662 	mutex_unlock(&conn->chan_lock);
4663 
4664 	return 0;
4665 }
4666 
4667 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4668 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4669 				       u8 *data)
4670 {
4671 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4672 	u16 dcid, scid;
4673 	struct l2cap_chan *chan;
4674 
4675 	if (cmd_len != sizeof(*rsp))
4676 		return -EPROTO;
4677 
4678 	scid = __le16_to_cpu(rsp->scid);
4679 	dcid = __le16_to_cpu(rsp->dcid);
4680 
4681 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4682 
4683 	mutex_lock(&conn->chan_lock);
4684 
4685 	chan = __l2cap_get_chan_by_scid(conn, scid);
4686 	if (!chan) {
4687 		mutex_unlock(&conn->chan_lock);
4688 		return 0;
4689 	}
4690 
4691 	l2cap_chan_hold(chan);
4692 	l2cap_chan_lock(chan);
4693 
4694 	if (chan->state != BT_DISCONN) {
4695 		l2cap_chan_unlock(chan);
4696 		l2cap_chan_put(chan);
4697 		mutex_unlock(&conn->chan_lock);
4698 		return 0;
4699 	}
4700 
4701 	l2cap_chan_del(chan, 0);
4702 
4703 	chan->ops->close(chan);
4704 
4705 	l2cap_chan_unlock(chan);
4706 	l2cap_chan_put(chan);
4707 
4708 	mutex_unlock(&conn->chan_lock);
4709 
4710 	return 0;
4711 }
4712 
4713 static inline int l2cap_information_req(struct l2cap_conn *conn,
4714 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4715 					u8 *data)
4716 {
4717 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4718 	u16 type;
4719 
4720 	if (cmd_len != sizeof(*req))
4721 		return -EPROTO;
4722 
4723 	type = __le16_to_cpu(req->type);
4724 
4725 	BT_DBG("type 0x%4.4x", type);
4726 
4727 	if (type == L2CAP_IT_FEAT_MASK) {
4728 		u8 buf[8];
4729 		u32 feat_mask = l2cap_feat_mask;
4730 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4731 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4732 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4733 		if (!disable_ertm)
4734 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4735 				| L2CAP_FEAT_FCS;
4736 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4737 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4738 				| L2CAP_FEAT_EXT_WINDOW;
4739 
4740 		put_unaligned_le32(feat_mask, rsp->data);
4741 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4742 			       buf);
4743 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4744 		u8 buf[12];
4745 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4746 
4747 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4748 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4749 		rsp->data[0] = conn->local_fixed_chan;
4750 		memset(rsp->data + 1, 0, 7);
4751 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4752 			       buf);
4753 	} else {
4754 		struct l2cap_info_rsp rsp;
4755 		rsp.type   = cpu_to_le16(type);
4756 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4757 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4758 			       &rsp);
4759 	}
4760 
4761 	return 0;
4762 }
4763 
4764 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4765 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4766 					u8 *data)
4767 {
4768 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4769 	u16 type, result;
4770 
4771 	if (cmd_len < sizeof(*rsp))
4772 		return -EPROTO;
4773 
4774 	type   = __le16_to_cpu(rsp->type);
4775 	result = __le16_to_cpu(rsp->result);
4776 
4777 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4778 
4779 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4780 	if (cmd->ident != conn->info_ident ||
4781 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4782 		return 0;
4783 
4784 	cancel_delayed_work(&conn->info_timer);
4785 
4786 	if (result != L2CAP_IR_SUCCESS) {
4787 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4788 		conn->info_ident = 0;
4789 
4790 		l2cap_conn_start(conn);
4791 
4792 		return 0;
4793 	}
4794 
4795 	switch (type) {
4796 	case L2CAP_IT_FEAT_MASK:
4797 		conn->feat_mask = get_unaligned_le32(rsp->data);
4798 
4799 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4800 			struct l2cap_info_req req;
4801 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4802 
4803 			conn->info_ident = l2cap_get_ident(conn);
4804 
4805 			l2cap_send_cmd(conn, conn->info_ident,
4806 				       L2CAP_INFO_REQ, sizeof(req), &req);
4807 		} else {
4808 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4809 			conn->info_ident = 0;
4810 
4811 			l2cap_conn_start(conn);
4812 		}
4813 		break;
4814 
4815 	case L2CAP_IT_FIXED_CHAN:
4816 		conn->remote_fixed_chan = rsp->data[0];
4817 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4818 		conn->info_ident = 0;
4819 
4820 		l2cap_conn_start(conn);
4821 		break;
4822 	}
4823 
4824 	return 0;
4825 }
4826 
4827 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4828 				    struct l2cap_cmd_hdr *cmd,
4829 				    u16 cmd_len, void *data)
4830 {
4831 	struct l2cap_create_chan_req *req = data;
4832 	struct l2cap_create_chan_rsp rsp;
4833 	struct l2cap_chan *chan;
4834 	struct hci_dev *hdev;
4835 	u16 psm, scid;
4836 
4837 	if (cmd_len != sizeof(*req))
4838 		return -EPROTO;
4839 
4840 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4841 		return -EINVAL;
4842 
4843 	psm = le16_to_cpu(req->psm);
4844 	scid = le16_to_cpu(req->scid);
4845 
4846 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4847 
4848 	/* For controller id 0 make BR/EDR connection */
4849 	if (req->amp_id == AMP_ID_BREDR) {
4850 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4851 			      req->amp_id);
4852 		return 0;
4853 	}
4854 
4855 	/* Validate AMP controller id */
4856 	hdev = hci_dev_get(req->amp_id);
4857 	if (!hdev)
4858 		goto error;
4859 
4860 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4861 		hci_dev_put(hdev);
4862 		goto error;
4863 	}
4864 
4865 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4866 			     req->amp_id);
4867 	if (chan) {
4868 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4869 		struct hci_conn *hs_hcon;
4870 
4871 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4872 						  &conn->hcon->dst);
4873 		if (!hs_hcon) {
4874 			hci_dev_put(hdev);
4875 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4876 					       chan->dcid);
4877 			return 0;
4878 		}
4879 
4880 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4881 
4882 		mgr->bredr_chan = chan;
4883 		chan->hs_hcon = hs_hcon;
4884 		chan->fcs = L2CAP_FCS_NONE;
4885 		conn->mtu = hdev->block_mtu;
4886 	}
4887 
4888 	hci_dev_put(hdev);
4889 
4890 	return 0;
4891 
4892 error:
4893 	rsp.dcid = 0;
4894 	rsp.scid = cpu_to_le16(scid);
4895 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4896 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4897 
4898 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4899 		       sizeof(rsp), &rsp);
4900 
4901 	return 0;
4902 }
4903 
4904 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4905 {
4906 	struct l2cap_move_chan_req req;
4907 	u8 ident;
4908 
4909 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4910 
4911 	ident = l2cap_get_ident(chan->conn);
4912 	chan->ident = ident;
4913 
4914 	req.icid = cpu_to_le16(chan->scid);
4915 	req.dest_amp_id = dest_amp_id;
4916 
4917 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4918 		       &req);
4919 
4920 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4921 }
4922 
4923 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4924 {
4925 	struct l2cap_move_chan_rsp rsp;
4926 
4927 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4928 
4929 	rsp.icid = cpu_to_le16(chan->dcid);
4930 	rsp.result = cpu_to_le16(result);
4931 
4932 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4933 		       sizeof(rsp), &rsp);
4934 }
4935 
4936 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4937 {
4938 	struct l2cap_move_chan_cfm cfm;
4939 
4940 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4941 
4942 	chan->ident = l2cap_get_ident(chan->conn);
4943 
4944 	cfm.icid = cpu_to_le16(chan->scid);
4945 	cfm.result = cpu_to_le16(result);
4946 
4947 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4948 		       sizeof(cfm), &cfm);
4949 
4950 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4951 }
4952 
4953 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4954 {
4955 	struct l2cap_move_chan_cfm cfm;
4956 
4957 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4958 
4959 	cfm.icid = cpu_to_le16(icid);
4960 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4961 
4962 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4963 		       sizeof(cfm), &cfm);
4964 }
4965 
4966 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4967 					 u16 icid)
4968 {
4969 	struct l2cap_move_chan_cfm_rsp rsp;
4970 
4971 	BT_DBG("icid 0x%4.4x", icid);
4972 
4973 	rsp.icid = cpu_to_le16(icid);
4974 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4975 }
4976 
4977 static void __release_logical_link(struct l2cap_chan *chan)
4978 {
4979 	chan->hs_hchan = NULL;
4980 	chan->hs_hcon = NULL;
4981 
4982 	/* Placeholder - release the logical link */
4983 }
4984 
4985 static void l2cap_logical_fail(struct l2cap_chan *chan)
4986 {
4987 	/* Logical link setup failed */
4988 	if (chan->state != BT_CONNECTED) {
4989 		/* Create channel failure, disconnect */
4990 		l2cap_send_disconn_req(chan, ECONNRESET);
4991 		return;
4992 	}
4993 
4994 	switch (chan->move_role) {
4995 	case L2CAP_MOVE_ROLE_RESPONDER:
4996 		l2cap_move_done(chan);
4997 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4998 		break;
4999 	case L2CAP_MOVE_ROLE_INITIATOR:
5000 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
5001 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
5002 			/* Remote has only sent pending or
5003 			 * success responses, clean up
5004 			 */
5005 			l2cap_move_done(chan);
5006 		}
5007 
5008 		/* Other amp move states imply that the move
5009 		 * has already aborted
5010 		 */
5011 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5012 		break;
5013 	}
5014 }
5015 
5016 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5017 					struct hci_chan *hchan)
5018 {
5019 	struct l2cap_conf_rsp rsp;
5020 
5021 	chan->hs_hchan = hchan;
5022 	chan->hs_hcon->l2cap_data = chan->conn;
5023 
5024 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5025 
5026 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5027 		int err;
5028 
5029 		set_default_fcs(chan);
5030 
5031 		err = l2cap_ertm_init(chan);
5032 		if (err < 0)
5033 			l2cap_send_disconn_req(chan, -err);
5034 		else
5035 			l2cap_chan_ready(chan);
5036 	}
5037 }
5038 
5039 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5040 				      struct hci_chan *hchan)
5041 {
5042 	chan->hs_hcon = hchan->conn;
5043 	chan->hs_hcon->l2cap_data = chan->conn;
5044 
5045 	BT_DBG("move_state %d", chan->move_state);
5046 
5047 	switch (chan->move_state) {
5048 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5049 		/* Move confirm will be sent after a success
5050 		 * response is received
5051 		 */
5052 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5053 		break;
5054 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5055 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5056 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5057 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5058 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5059 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5060 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5061 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5062 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5063 		}
5064 		break;
5065 	default:
5066 		/* Move was not in expected state, free the channel */
5067 		__release_logical_link(chan);
5068 
5069 		chan->move_state = L2CAP_MOVE_STABLE;
5070 	}
5071 }
5072 
5073 /* Call with chan locked */
5074 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5075 		       u8 status)
5076 {
5077 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5078 
5079 	if (status) {
5080 		l2cap_logical_fail(chan);
5081 		__release_logical_link(chan);
5082 		return;
5083 	}
5084 
5085 	if (chan->state != BT_CONNECTED) {
5086 		/* Ignore logical link if channel is on BR/EDR */
5087 		if (chan->local_amp_id != AMP_ID_BREDR)
5088 			l2cap_logical_finish_create(chan, hchan);
5089 	} else {
5090 		l2cap_logical_finish_move(chan, hchan);
5091 	}
5092 }
5093 
5094 void l2cap_move_start(struct l2cap_chan *chan)
5095 {
5096 	BT_DBG("chan %p", chan);
5097 
5098 	if (chan->local_amp_id == AMP_ID_BREDR) {
5099 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5100 			return;
5101 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5102 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5103 		/* Placeholder - start physical link setup */
5104 	} else {
5105 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5106 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5107 		chan->move_id = 0;
5108 		l2cap_move_setup(chan);
5109 		l2cap_send_move_chan_req(chan, 0);
5110 	}
5111 }
5112 
5113 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5114 			    u8 local_amp_id, u8 remote_amp_id)
5115 {
5116 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5117 	       local_amp_id, remote_amp_id);
5118 
5119 	chan->fcs = L2CAP_FCS_NONE;
5120 
5121 	/* Outgoing channel on AMP */
5122 	if (chan->state == BT_CONNECT) {
5123 		if (result == L2CAP_CR_SUCCESS) {
5124 			chan->local_amp_id = local_amp_id;
5125 			l2cap_send_create_chan_req(chan, remote_amp_id);
5126 		} else {
5127 			/* Revert to BR/EDR connect */
5128 			l2cap_send_conn_req(chan);
5129 		}
5130 
5131 		return;
5132 	}
5133 
5134 	/* Incoming channel on AMP */
5135 	if (__l2cap_no_conn_pending(chan)) {
5136 		struct l2cap_conn_rsp rsp;
5137 		char buf[128];
5138 		rsp.scid = cpu_to_le16(chan->dcid);
5139 		rsp.dcid = cpu_to_le16(chan->scid);
5140 
5141 		if (result == L2CAP_CR_SUCCESS) {
5142 			/* Send successful response */
5143 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5144 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5145 		} else {
5146 			/* Send negative response */
5147 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5148 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5149 		}
5150 
5151 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5152 			       sizeof(rsp), &rsp);
5153 
5154 		if (result == L2CAP_CR_SUCCESS) {
5155 			l2cap_state_change(chan, BT_CONFIG);
5156 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5157 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5158 				       L2CAP_CONF_REQ,
5159 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5160 			chan->num_conf_req++;
5161 		}
5162 	}
5163 }
5164 
5165 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5166 				   u8 remote_amp_id)
5167 {
5168 	l2cap_move_setup(chan);
5169 	chan->move_id = local_amp_id;
5170 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5171 
5172 	l2cap_send_move_chan_req(chan, remote_amp_id);
5173 }
5174 
5175 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5176 {
5177 	struct hci_chan *hchan = NULL;
5178 
5179 	/* Placeholder - get hci_chan for logical link */
5180 
5181 	if (hchan) {
5182 		if (hchan->state == BT_CONNECTED) {
5183 			/* Logical link is ready to go */
5184 			chan->hs_hcon = hchan->conn;
5185 			chan->hs_hcon->l2cap_data = chan->conn;
5186 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5187 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5188 
5189 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5190 		} else {
5191 			/* Wait for logical link to be ready */
5192 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5193 		}
5194 	} else {
5195 		/* Logical link not available */
5196 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5197 	}
5198 }
5199 
5200 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5201 {
5202 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5203 		u8 rsp_result;
5204 		if (result == -EINVAL)
5205 			rsp_result = L2CAP_MR_BAD_ID;
5206 		else
5207 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5208 
5209 		l2cap_send_move_chan_rsp(chan, rsp_result);
5210 	}
5211 
5212 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5213 	chan->move_state = L2CAP_MOVE_STABLE;
5214 
5215 	/* Restart data transmission */
5216 	l2cap_ertm_send(chan);
5217 }
5218 
5219 /* Invoke with locked chan */
5220 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5221 {
5222 	u8 local_amp_id = chan->local_amp_id;
5223 	u8 remote_amp_id = chan->remote_amp_id;
5224 
5225 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5226 	       chan, result, local_amp_id, remote_amp_id);
5227 
5228 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5229 		return;
5230 
5231 	if (chan->state != BT_CONNECTED) {
5232 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5233 	} else if (result != L2CAP_MR_SUCCESS) {
5234 		l2cap_do_move_cancel(chan, result);
5235 	} else {
5236 		switch (chan->move_role) {
5237 		case L2CAP_MOVE_ROLE_INITIATOR:
5238 			l2cap_do_move_initiate(chan, local_amp_id,
5239 					       remote_amp_id);
5240 			break;
5241 		case L2CAP_MOVE_ROLE_RESPONDER:
5242 			l2cap_do_move_respond(chan, result);
5243 			break;
5244 		default:
5245 			l2cap_do_move_cancel(chan, result);
5246 			break;
5247 		}
5248 	}
5249 }
5250 
5251 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5252 					 struct l2cap_cmd_hdr *cmd,
5253 					 u16 cmd_len, void *data)
5254 {
5255 	struct l2cap_move_chan_req *req = data;
5256 	struct l2cap_move_chan_rsp rsp;
5257 	struct l2cap_chan *chan;
5258 	u16 icid = 0;
5259 	u16 result = L2CAP_MR_NOT_ALLOWED;
5260 
5261 	if (cmd_len != sizeof(*req))
5262 		return -EPROTO;
5263 
5264 	icid = le16_to_cpu(req->icid);
5265 
5266 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5267 
5268 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5269 		return -EINVAL;
5270 
5271 	chan = l2cap_get_chan_by_dcid(conn, icid);
5272 	if (!chan) {
5273 		rsp.icid = cpu_to_le16(icid);
5274 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5275 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5276 			       sizeof(rsp), &rsp);
5277 		return 0;
5278 	}
5279 
5280 	chan->ident = cmd->ident;
5281 
5282 	if (chan->scid < L2CAP_CID_DYN_START ||
5283 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5284 	    (chan->mode != L2CAP_MODE_ERTM &&
5285 	     chan->mode != L2CAP_MODE_STREAMING)) {
5286 		result = L2CAP_MR_NOT_ALLOWED;
5287 		goto send_move_response;
5288 	}
5289 
5290 	if (chan->local_amp_id == req->dest_amp_id) {
5291 		result = L2CAP_MR_SAME_ID;
5292 		goto send_move_response;
5293 	}
5294 
5295 	if (req->dest_amp_id != AMP_ID_BREDR) {
5296 		struct hci_dev *hdev;
5297 		hdev = hci_dev_get(req->dest_amp_id);
5298 		if (!hdev || hdev->dev_type != HCI_AMP ||
5299 		    !test_bit(HCI_UP, &hdev->flags)) {
5300 			if (hdev)
5301 				hci_dev_put(hdev);
5302 
5303 			result = L2CAP_MR_BAD_ID;
5304 			goto send_move_response;
5305 		}
5306 		hci_dev_put(hdev);
5307 	}
5308 
5309 	/* Detect a move collision.  Only send a collision response
5310 	 * if this side has "lost", otherwise proceed with the move.
5311 	 * The winner has the larger bd_addr.
5312 	 */
5313 	if ((__chan_is_moving(chan) ||
5314 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5315 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5316 		result = L2CAP_MR_COLLISION;
5317 		goto send_move_response;
5318 	}
5319 
5320 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5321 	l2cap_move_setup(chan);
5322 	chan->move_id = req->dest_amp_id;
5323 
5324 	if (req->dest_amp_id == AMP_ID_BREDR) {
5325 		/* Moving to BR/EDR */
5326 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5327 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5328 			result = L2CAP_MR_PEND;
5329 		} else {
5330 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5331 			result = L2CAP_MR_SUCCESS;
5332 		}
5333 	} else {
5334 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5335 		/* Placeholder - uncomment when amp functions are available */
5336 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5337 		result = L2CAP_MR_PEND;
5338 	}
5339 
5340 send_move_response:
5341 	l2cap_send_move_chan_rsp(chan, result);
5342 
5343 	l2cap_chan_unlock(chan);
5344 	l2cap_chan_put(chan);
5345 
5346 	return 0;
5347 }
5348 
5349 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5350 {
5351 	struct l2cap_chan *chan;
5352 	struct hci_chan *hchan = NULL;
5353 
5354 	chan = l2cap_get_chan_by_scid(conn, icid);
5355 	if (!chan) {
5356 		l2cap_send_move_chan_cfm_icid(conn, icid);
5357 		return;
5358 	}
5359 
5360 	__clear_chan_timer(chan);
5361 	if (result == L2CAP_MR_PEND)
5362 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5363 
5364 	switch (chan->move_state) {
5365 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5366 		/* Move confirm will be sent when logical link
5367 		 * is complete.
5368 		 */
5369 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5370 		break;
5371 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5372 		if (result == L2CAP_MR_PEND) {
5373 			break;
5374 		} else if (test_bit(CONN_LOCAL_BUSY,
5375 				    &chan->conn_state)) {
5376 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5377 		} else {
5378 			/* Logical link is up or moving to BR/EDR,
5379 			 * proceed with move
5380 			 */
5381 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5382 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5383 		}
5384 		break;
5385 	case L2CAP_MOVE_WAIT_RSP:
5386 		/* Moving to AMP */
5387 		if (result == L2CAP_MR_SUCCESS) {
5388 			/* Remote is ready, send confirm immediately
5389 			 * after logical link is ready
5390 			 */
5391 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5392 		} else {
5393 			/* Both logical link and move success
5394 			 * are required to confirm
5395 			 */
5396 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5397 		}
5398 
5399 		/* Placeholder - get hci_chan for logical link */
5400 		if (!hchan) {
5401 			/* Logical link not available */
5402 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5403 			break;
5404 		}
5405 
5406 		/* If the logical link is not yet connected, do not
5407 		 * send confirmation.
5408 		 */
5409 		if (hchan->state != BT_CONNECTED)
5410 			break;
5411 
5412 		/* Logical link is already ready to go */
5413 
5414 		chan->hs_hcon = hchan->conn;
5415 		chan->hs_hcon->l2cap_data = chan->conn;
5416 
5417 		if (result == L2CAP_MR_SUCCESS) {
5418 			/* Can confirm now */
5419 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5420 		} else {
5421 			/* Now only need move success
5422 			 * to confirm
5423 			 */
5424 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5425 		}
5426 
5427 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5428 		break;
5429 	default:
5430 		/* Any other amp move state means the move failed. */
5431 		chan->move_id = chan->local_amp_id;
5432 		l2cap_move_done(chan);
5433 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5434 	}
5435 
5436 	l2cap_chan_unlock(chan);
5437 	l2cap_chan_put(chan);
5438 }
5439 
5440 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5441 			    u16 result)
5442 {
5443 	struct l2cap_chan *chan;
5444 
5445 	chan = l2cap_get_chan_by_ident(conn, ident);
5446 	if (!chan) {
5447 		/* Could not locate channel, icid is best guess */
5448 		l2cap_send_move_chan_cfm_icid(conn, icid);
5449 		return;
5450 	}
5451 
5452 	__clear_chan_timer(chan);
5453 
5454 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5455 		if (result == L2CAP_MR_COLLISION) {
5456 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5457 		} else {
5458 			/* Cleanup - cancel move */
5459 			chan->move_id = chan->local_amp_id;
5460 			l2cap_move_done(chan);
5461 		}
5462 	}
5463 
5464 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5465 
5466 	l2cap_chan_unlock(chan);
5467 	l2cap_chan_put(chan);
5468 }
5469 
5470 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5471 				  struct l2cap_cmd_hdr *cmd,
5472 				  u16 cmd_len, void *data)
5473 {
5474 	struct l2cap_move_chan_rsp *rsp = data;
5475 	u16 icid, result;
5476 
5477 	if (cmd_len != sizeof(*rsp))
5478 		return -EPROTO;
5479 
5480 	icid = le16_to_cpu(rsp->icid);
5481 	result = le16_to_cpu(rsp->result);
5482 
5483 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5484 
5485 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5486 		l2cap_move_continue(conn, icid, result);
5487 	else
5488 		l2cap_move_fail(conn, cmd->ident, icid, result);
5489 
5490 	return 0;
5491 }
5492 
5493 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5494 				      struct l2cap_cmd_hdr *cmd,
5495 				      u16 cmd_len, void *data)
5496 {
5497 	struct l2cap_move_chan_cfm *cfm = data;
5498 	struct l2cap_chan *chan;
5499 	u16 icid, result;
5500 
5501 	if (cmd_len != sizeof(*cfm))
5502 		return -EPROTO;
5503 
5504 	icid = le16_to_cpu(cfm->icid);
5505 	result = le16_to_cpu(cfm->result);
5506 
5507 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5508 
5509 	chan = l2cap_get_chan_by_dcid(conn, icid);
5510 	if (!chan) {
5511 		/* Spec requires a response even if the icid was not found */
5512 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5513 		return 0;
5514 	}
5515 
5516 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5517 		if (result == L2CAP_MC_CONFIRMED) {
5518 			chan->local_amp_id = chan->move_id;
5519 			if (chan->local_amp_id == AMP_ID_BREDR)
5520 				__release_logical_link(chan);
5521 		} else {
5522 			chan->move_id = chan->local_amp_id;
5523 		}
5524 
5525 		l2cap_move_done(chan);
5526 	}
5527 
5528 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5529 
5530 	l2cap_chan_unlock(chan);
5531 	l2cap_chan_put(chan);
5532 
5533 	return 0;
5534 }
5535 
5536 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5537 						 struct l2cap_cmd_hdr *cmd,
5538 						 u16 cmd_len, void *data)
5539 {
5540 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5541 	struct l2cap_chan *chan;
5542 	u16 icid;
5543 
5544 	if (cmd_len != sizeof(*rsp))
5545 		return -EPROTO;
5546 
5547 	icid = le16_to_cpu(rsp->icid);
5548 
5549 	BT_DBG("icid 0x%4.4x", icid);
5550 
5551 	chan = l2cap_get_chan_by_scid(conn, icid);
5552 	if (!chan)
5553 		return 0;
5554 
5555 	__clear_chan_timer(chan);
5556 
5557 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5558 		chan->local_amp_id = chan->move_id;
5559 
5560 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5561 			__release_logical_link(chan);
5562 
5563 		l2cap_move_done(chan);
5564 	}
5565 
5566 	l2cap_chan_unlock(chan);
5567 	l2cap_chan_put(chan);
5568 
5569 	return 0;
5570 }
5571 
5572 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5573 					      struct l2cap_cmd_hdr *cmd,
5574 					      u16 cmd_len, u8 *data)
5575 {
5576 	struct hci_conn *hcon = conn->hcon;
5577 	struct l2cap_conn_param_update_req *req;
5578 	struct l2cap_conn_param_update_rsp rsp;
5579 	u16 min, max, latency, to_multiplier;
5580 	int err;
5581 
5582 	if (hcon->role != HCI_ROLE_MASTER)
5583 		return -EINVAL;
5584 
5585 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5586 		return -EPROTO;
5587 
5588 	req = (struct l2cap_conn_param_update_req *) data;
5589 	min		= __le16_to_cpu(req->min);
5590 	max		= __le16_to_cpu(req->max);
5591 	latency		= __le16_to_cpu(req->latency);
5592 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5593 
5594 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5595 	       min, max, latency, to_multiplier);
5596 
5597 	memset(&rsp, 0, sizeof(rsp));
5598 
5599 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5600 	if (err)
5601 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5602 	else
5603 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5604 
5605 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5606 		       sizeof(rsp), &rsp);
5607 
5608 	if (!err) {
5609 		u8 store_hint;
5610 
5611 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5612 						to_multiplier);
5613 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5614 				    store_hint, min, max, latency,
5615 				    to_multiplier);
5616 
5617 	}
5618 
5619 	return 0;
5620 }
5621 
5622 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5623 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5624 				u8 *data)
5625 {
5626 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5627 	struct hci_conn *hcon = conn->hcon;
5628 	u16 dcid, mtu, mps, credits, result;
5629 	struct l2cap_chan *chan;
5630 	int err, sec_level;
5631 
5632 	if (cmd_len < sizeof(*rsp))
5633 		return -EPROTO;
5634 
5635 	dcid    = __le16_to_cpu(rsp->dcid);
5636 	mtu     = __le16_to_cpu(rsp->mtu);
5637 	mps     = __le16_to_cpu(rsp->mps);
5638 	credits = __le16_to_cpu(rsp->credits);
5639 	result  = __le16_to_cpu(rsp->result);
5640 
5641 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5642 					   dcid < L2CAP_CID_DYN_START ||
5643 					   dcid > L2CAP_CID_LE_DYN_END))
5644 		return -EPROTO;
5645 
5646 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5647 	       dcid, mtu, mps, credits, result);
5648 
5649 	mutex_lock(&conn->chan_lock);
5650 
5651 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5652 	if (!chan) {
5653 		err = -EBADSLT;
5654 		goto unlock;
5655 	}
5656 
5657 	err = 0;
5658 
5659 	l2cap_chan_lock(chan);
5660 
5661 	switch (result) {
5662 	case L2CAP_CR_LE_SUCCESS:
5663 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5664 			err = -EBADSLT;
5665 			break;
5666 		}
5667 
5668 		chan->ident = 0;
5669 		chan->dcid = dcid;
5670 		chan->omtu = mtu;
5671 		chan->remote_mps = mps;
5672 		chan->tx_credits = credits;
5673 		l2cap_chan_ready(chan);
5674 		break;
5675 
5676 	case L2CAP_CR_LE_AUTHENTICATION:
5677 	case L2CAP_CR_LE_ENCRYPTION:
5678 		/* If we already have MITM protection we can't do
5679 		 * anything.
5680 		 */
5681 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5682 			l2cap_chan_del(chan, ECONNREFUSED);
5683 			break;
5684 		}
5685 
5686 		sec_level = hcon->sec_level + 1;
5687 		if (chan->sec_level < sec_level)
5688 			chan->sec_level = sec_level;
5689 
5690 		/* We'll need to send a new Connect Request */
5691 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5692 
5693 		smp_conn_security(hcon, chan->sec_level);
5694 		break;
5695 
5696 	default:
5697 		l2cap_chan_del(chan, ECONNREFUSED);
5698 		break;
5699 	}
5700 
5701 	l2cap_chan_unlock(chan);
5702 
5703 unlock:
5704 	mutex_unlock(&conn->chan_lock);
5705 
5706 	return err;
5707 }
5708 
5709 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5710 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5711 				      u8 *data)
5712 {
5713 	int err = 0;
5714 
5715 	switch (cmd->code) {
5716 	case L2CAP_COMMAND_REJ:
5717 		l2cap_command_rej(conn, cmd, cmd_len, data);
5718 		break;
5719 
5720 	case L2CAP_CONN_REQ:
5721 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5722 		break;
5723 
5724 	case L2CAP_CONN_RSP:
5725 	case L2CAP_CREATE_CHAN_RSP:
5726 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5727 		break;
5728 
5729 	case L2CAP_CONF_REQ:
5730 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5731 		break;
5732 
5733 	case L2CAP_CONF_RSP:
5734 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5735 		break;
5736 
5737 	case L2CAP_DISCONN_REQ:
5738 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5739 		break;
5740 
5741 	case L2CAP_DISCONN_RSP:
5742 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5743 		break;
5744 
5745 	case L2CAP_ECHO_REQ:
5746 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5747 		break;
5748 
5749 	case L2CAP_ECHO_RSP:
5750 		break;
5751 
5752 	case L2CAP_INFO_REQ:
5753 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5754 		break;
5755 
5756 	case L2CAP_INFO_RSP:
5757 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5758 		break;
5759 
5760 	case L2CAP_CREATE_CHAN_REQ:
5761 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5762 		break;
5763 
5764 	case L2CAP_MOVE_CHAN_REQ:
5765 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5766 		break;
5767 
5768 	case L2CAP_MOVE_CHAN_RSP:
5769 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5770 		break;
5771 
5772 	case L2CAP_MOVE_CHAN_CFM:
5773 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5774 		break;
5775 
5776 	case L2CAP_MOVE_CHAN_CFM_RSP:
5777 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5778 		break;
5779 
5780 	default:
5781 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5782 		err = -EINVAL;
5783 		break;
5784 	}
5785 
5786 	return err;
5787 }
5788 
5789 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5790 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5791 				u8 *data)
5792 {
5793 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5794 	struct l2cap_le_conn_rsp rsp;
5795 	struct l2cap_chan *chan, *pchan;
5796 	u16 dcid, scid, credits, mtu, mps;
5797 	__le16 psm;
5798 	u8 result;
5799 
5800 	if (cmd_len != sizeof(*req))
5801 		return -EPROTO;
5802 
5803 	scid = __le16_to_cpu(req->scid);
5804 	mtu  = __le16_to_cpu(req->mtu);
5805 	mps  = __le16_to_cpu(req->mps);
5806 	psm  = req->psm;
5807 	dcid = 0;
5808 	credits = 0;
5809 
5810 	if (mtu < 23 || mps < 23)
5811 		return -EPROTO;
5812 
5813 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5814 	       scid, mtu, mps);
5815 
5816 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5817 	 * page 1059:
5818 	 *
5819 	 * Valid range: 0x0001-0x00ff
5820 	 *
5821 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5822 	 */
5823 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5824 		result = L2CAP_CR_LE_BAD_PSM;
5825 		chan = NULL;
5826 		goto response;
5827 	}
5828 
5829 	/* Check if we have socket listening on psm */
5830 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5831 					 &conn->hcon->dst, LE_LINK);
5832 	if (!pchan) {
5833 		result = L2CAP_CR_LE_BAD_PSM;
5834 		chan = NULL;
5835 		goto response;
5836 	}
5837 
5838 	mutex_lock(&conn->chan_lock);
5839 	l2cap_chan_lock(pchan);
5840 
5841 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5842 				     SMP_ALLOW_STK)) {
5843 		result = L2CAP_CR_LE_AUTHENTICATION;
5844 		chan = NULL;
5845 		goto response_unlock;
5846 	}
5847 
5848 	/* Check for valid dynamic CID range */
5849 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5850 		result = L2CAP_CR_LE_INVALID_SCID;
5851 		chan = NULL;
5852 		goto response_unlock;
5853 	}
5854 
5855 	/* Check if we already have channel with that dcid */
5856 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5857 		result = L2CAP_CR_LE_SCID_IN_USE;
5858 		chan = NULL;
5859 		goto response_unlock;
5860 	}
5861 
5862 	chan = pchan->ops->new_connection(pchan);
5863 	if (!chan) {
5864 		result = L2CAP_CR_LE_NO_MEM;
5865 		goto response_unlock;
5866 	}
5867 
5868 	bacpy(&chan->src, &conn->hcon->src);
5869 	bacpy(&chan->dst, &conn->hcon->dst);
5870 	chan->src_type = bdaddr_src_type(conn->hcon);
5871 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5872 	chan->psm  = psm;
5873 	chan->dcid = scid;
5874 	chan->omtu = mtu;
5875 	chan->remote_mps = mps;
5876 
5877 	__l2cap_chan_add(conn, chan);
5878 
5879 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5880 
5881 	dcid = chan->scid;
5882 	credits = chan->rx_credits;
5883 
5884 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5885 
5886 	chan->ident = cmd->ident;
5887 
5888 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5889 		l2cap_state_change(chan, BT_CONNECT2);
5890 		/* The following result value is actually not defined
5891 		 * for LE CoC but we use it to let the function know
5892 		 * that it should bail out after doing its cleanup
5893 		 * instead of sending a response.
5894 		 */
5895 		result = L2CAP_CR_PEND;
5896 		chan->ops->defer(chan);
5897 	} else {
5898 		l2cap_chan_ready(chan);
5899 		result = L2CAP_CR_LE_SUCCESS;
5900 	}
5901 
5902 response_unlock:
5903 	l2cap_chan_unlock(pchan);
5904 	mutex_unlock(&conn->chan_lock);
5905 	l2cap_chan_put(pchan);
5906 
5907 	if (result == L2CAP_CR_PEND)
5908 		return 0;
5909 
5910 response:
5911 	if (chan) {
5912 		rsp.mtu = cpu_to_le16(chan->imtu);
5913 		rsp.mps = cpu_to_le16(chan->mps);
5914 	} else {
5915 		rsp.mtu = 0;
5916 		rsp.mps = 0;
5917 	}
5918 
5919 	rsp.dcid    = cpu_to_le16(dcid);
5920 	rsp.credits = cpu_to_le16(credits);
5921 	rsp.result  = cpu_to_le16(result);
5922 
5923 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5924 
5925 	return 0;
5926 }
5927 
5928 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5929 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5930 				   u8 *data)
5931 {
5932 	struct l2cap_le_credits *pkt;
5933 	struct l2cap_chan *chan;
5934 	u16 cid, credits, max_credits;
5935 
5936 	if (cmd_len != sizeof(*pkt))
5937 		return -EPROTO;
5938 
5939 	pkt = (struct l2cap_le_credits *) data;
5940 	cid	= __le16_to_cpu(pkt->cid);
5941 	credits	= __le16_to_cpu(pkt->credits);
5942 
5943 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5944 
5945 	chan = l2cap_get_chan_by_dcid(conn, cid);
5946 	if (!chan)
5947 		return -EBADSLT;
5948 
5949 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5950 	if (credits > max_credits) {
5951 		BT_ERR("LE credits overflow");
5952 		l2cap_send_disconn_req(chan, ECONNRESET);
5953 
5954 		/* Return 0 so that we don't trigger an unnecessary
5955 		 * command reject packet.
5956 		 */
5957 		goto unlock;
5958 	}
5959 
5960 	chan->tx_credits += credits;
5961 
5962 	/* Resume sending */
5963 	l2cap_le_flowctl_send(chan);
5964 
5965 	if (chan->tx_credits)
5966 		chan->ops->resume(chan);
5967 
5968 unlock:
5969 	l2cap_chan_unlock(chan);
5970 	l2cap_chan_put(chan);
5971 
5972 	return 0;
5973 }
5974 
5975 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5976 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5977 				       u8 *data)
5978 {
5979 	struct l2cap_ecred_conn_req *req = (void *) data;
5980 	struct {
5981 		struct l2cap_ecred_conn_rsp rsp;
5982 		__le16 dcid[L2CAP_ECRED_MAX_CID];
5983 	} __packed pdu;
5984 	struct l2cap_chan *chan, *pchan;
5985 	u16 mtu, mps;
5986 	__le16 psm;
5987 	u8 result, len = 0;
5988 	int i, num_scid;
5989 	bool defer = false;
5990 
5991 	if (!enable_ecred)
5992 		return -EINVAL;
5993 
5994 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5995 		result = L2CAP_CR_LE_INVALID_PARAMS;
5996 		goto response;
5997 	}
5998 
5999 	cmd_len -= sizeof(*req);
6000 	num_scid = cmd_len / sizeof(u16);
6001 
6002 	if (num_scid > ARRAY_SIZE(pdu.dcid)) {
6003 		result = L2CAP_CR_LE_INVALID_PARAMS;
6004 		goto response;
6005 	}
6006 
6007 	mtu  = __le16_to_cpu(req->mtu);
6008 	mps  = __le16_to_cpu(req->mps);
6009 
6010 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
6011 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
6012 		goto response;
6013 	}
6014 
6015 	psm  = req->psm;
6016 
6017 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
6018 	 * page 1059:
6019 	 *
6020 	 * Valid range: 0x0001-0x00ff
6021 	 *
6022 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
6023 	 */
6024 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
6025 		result = L2CAP_CR_LE_BAD_PSM;
6026 		goto response;
6027 	}
6028 
6029 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
6030 
6031 	memset(&pdu, 0, sizeof(pdu));
6032 
6033 	/* Check if we have socket listening on psm */
6034 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6035 					 &conn->hcon->dst, LE_LINK);
6036 	if (!pchan) {
6037 		result = L2CAP_CR_LE_BAD_PSM;
6038 		goto response;
6039 	}
6040 
6041 	mutex_lock(&conn->chan_lock);
6042 	l2cap_chan_lock(pchan);
6043 
6044 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6045 				     SMP_ALLOW_STK)) {
6046 		result = L2CAP_CR_LE_AUTHENTICATION;
6047 		goto unlock;
6048 	}
6049 
6050 	result = L2CAP_CR_LE_SUCCESS;
6051 
6052 	for (i = 0; i < num_scid; i++) {
6053 		u16 scid = __le16_to_cpu(req->scid[i]);
6054 
6055 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
6056 
6057 		pdu.dcid[i] = 0x0000;
6058 		len += sizeof(*pdu.dcid);
6059 
6060 		/* Check for valid dynamic CID range */
6061 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6062 			result = L2CAP_CR_LE_INVALID_SCID;
6063 			continue;
6064 		}
6065 
6066 		/* Check if we already have channel with that dcid */
6067 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
6068 			result = L2CAP_CR_LE_SCID_IN_USE;
6069 			continue;
6070 		}
6071 
6072 		chan = pchan->ops->new_connection(pchan);
6073 		if (!chan) {
6074 			result = L2CAP_CR_LE_NO_MEM;
6075 			continue;
6076 		}
6077 
6078 		bacpy(&chan->src, &conn->hcon->src);
6079 		bacpy(&chan->dst, &conn->hcon->dst);
6080 		chan->src_type = bdaddr_src_type(conn->hcon);
6081 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6082 		chan->psm  = psm;
6083 		chan->dcid = scid;
6084 		chan->omtu = mtu;
6085 		chan->remote_mps = mps;
6086 
6087 		__l2cap_chan_add(conn, chan);
6088 
6089 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6090 
6091 		/* Init response */
6092 		if (!pdu.rsp.credits) {
6093 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6094 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6095 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6096 		}
6097 
6098 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6099 
6100 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6101 
6102 		chan->ident = cmd->ident;
6103 
6104 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6105 			l2cap_state_change(chan, BT_CONNECT2);
6106 			defer = true;
6107 			chan->ops->defer(chan);
6108 		} else {
6109 			l2cap_chan_ready(chan);
6110 		}
6111 	}
6112 
6113 unlock:
6114 	l2cap_chan_unlock(pchan);
6115 	mutex_unlock(&conn->chan_lock);
6116 	l2cap_chan_put(pchan);
6117 
6118 response:
6119 	pdu.rsp.result = cpu_to_le16(result);
6120 
6121 	if (defer)
6122 		return 0;
6123 
6124 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6125 		       sizeof(pdu.rsp) + len, &pdu);
6126 
6127 	return 0;
6128 }
6129 
6130 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6131 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6132 				       u8 *data)
6133 {
6134 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6135 	struct hci_conn *hcon = conn->hcon;
6136 	u16 mtu, mps, credits, result;
6137 	struct l2cap_chan *chan, *tmp;
6138 	int err = 0, sec_level;
6139 	int i = 0;
6140 
6141 	if (cmd_len < sizeof(*rsp))
6142 		return -EPROTO;
6143 
6144 	mtu     = __le16_to_cpu(rsp->mtu);
6145 	mps     = __le16_to_cpu(rsp->mps);
6146 	credits = __le16_to_cpu(rsp->credits);
6147 	result  = __le16_to_cpu(rsp->result);
6148 
6149 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6150 	       result);
6151 
6152 	mutex_lock(&conn->chan_lock);
6153 
6154 	cmd_len -= sizeof(*rsp);
6155 
6156 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6157 		u16 dcid;
6158 
6159 		if (chan->ident != cmd->ident ||
6160 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6161 		    chan->state == BT_CONNECTED)
6162 			continue;
6163 
6164 		l2cap_chan_lock(chan);
6165 
6166 		/* Check that there is a dcid for each pending channel */
6167 		if (cmd_len < sizeof(dcid)) {
6168 			l2cap_chan_del(chan, ECONNREFUSED);
6169 			l2cap_chan_unlock(chan);
6170 			continue;
6171 		}
6172 
6173 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6174 		cmd_len -= sizeof(u16);
6175 
6176 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6177 
6178 		/* Check if dcid is already in use */
6179 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6180 			/* If a device receives a
6181 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6182 			 * already-assigned Destination CID, then both the
6183 			 * original channel and the new channel shall be
6184 			 * immediately discarded and not used.
6185 			 */
6186 			l2cap_chan_del(chan, ECONNREFUSED);
6187 			l2cap_chan_unlock(chan);
6188 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6189 			l2cap_chan_lock(chan);
6190 			l2cap_chan_del(chan, ECONNRESET);
6191 			l2cap_chan_unlock(chan);
6192 			continue;
6193 		}
6194 
6195 		switch (result) {
6196 		case L2CAP_CR_LE_AUTHENTICATION:
6197 		case L2CAP_CR_LE_ENCRYPTION:
6198 			/* If we already have MITM protection we can't do
6199 			 * anything.
6200 			 */
6201 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6202 				l2cap_chan_del(chan, ECONNREFUSED);
6203 				break;
6204 			}
6205 
6206 			sec_level = hcon->sec_level + 1;
6207 			if (chan->sec_level < sec_level)
6208 				chan->sec_level = sec_level;
6209 
6210 			/* We'll need to send a new Connect Request */
6211 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6212 
6213 			smp_conn_security(hcon, chan->sec_level);
6214 			break;
6215 
6216 		case L2CAP_CR_LE_BAD_PSM:
6217 			l2cap_chan_del(chan, ECONNREFUSED);
6218 			break;
6219 
6220 		default:
6221 			/* If dcid was not set it means channels was refused */
6222 			if (!dcid) {
6223 				l2cap_chan_del(chan, ECONNREFUSED);
6224 				break;
6225 			}
6226 
6227 			chan->ident = 0;
6228 			chan->dcid = dcid;
6229 			chan->omtu = mtu;
6230 			chan->remote_mps = mps;
6231 			chan->tx_credits = credits;
6232 			l2cap_chan_ready(chan);
6233 			break;
6234 		}
6235 
6236 		l2cap_chan_unlock(chan);
6237 	}
6238 
6239 	mutex_unlock(&conn->chan_lock);
6240 
6241 	return err;
6242 }
6243 
6244 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6245 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6246 					 u8 *data)
6247 {
6248 	struct l2cap_ecred_reconf_req *req = (void *) data;
6249 	struct l2cap_ecred_reconf_rsp rsp;
6250 	u16 mtu, mps, result;
6251 	struct l2cap_chan *chan;
6252 	int i, num_scid;
6253 
6254 	if (!enable_ecred)
6255 		return -EINVAL;
6256 
6257 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6258 		result = L2CAP_CR_LE_INVALID_PARAMS;
6259 		goto respond;
6260 	}
6261 
6262 	mtu = __le16_to_cpu(req->mtu);
6263 	mps = __le16_to_cpu(req->mps);
6264 
6265 	BT_DBG("mtu %u mps %u", mtu, mps);
6266 
6267 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6268 		result = L2CAP_RECONF_INVALID_MTU;
6269 		goto respond;
6270 	}
6271 
6272 	if (mps < L2CAP_ECRED_MIN_MPS) {
6273 		result = L2CAP_RECONF_INVALID_MPS;
6274 		goto respond;
6275 	}
6276 
6277 	cmd_len -= sizeof(*req);
6278 	num_scid = cmd_len / sizeof(u16);
6279 	result = L2CAP_RECONF_SUCCESS;
6280 
6281 	for (i = 0; i < num_scid; i++) {
6282 		u16 scid;
6283 
6284 		scid = __le16_to_cpu(req->scid[i]);
6285 		if (!scid)
6286 			return -EPROTO;
6287 
6288 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6289 		if (!chan)
6290 			continue;
6291 
6292 		/* If the MTU value is decreased for any of the included
6293 		 * channels, then the receiver shall disconnect all
6294 		 * included channels.
6295 		 */
6296 		if (chan->omtu > mtu) {
6297 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6298 			       chan->omtu, mtu);
6299 			result = L2CAP_RECONF_INVALID_MTU;
6300 		}
6301 
6302 		chan->omtu = mtu;
6303 		chan->remote_mps = mps;
6304 	}
6305 
6306 respond:
6307 	rsp.result = cpu_to_le16(result);
6308 
6309 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6310 		       &rsp);
6311 
6312 	return 0;
6313 }
6314 
6315 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6316 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6317 					 u8 *data)
6318 {
6319 	struct l2cap_chan *chan, *tmp;
6320 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6321 	u16 result;
6322 
6323 	if (cmd_len < sizeof(*rsp))
6324 		return -EPROTO;
6325 
6326 	result = __le16_to_cpu(rsp->result);
6327 
6328 	BT_DBG("result 0x%4.4x", rsp->result);
6329 
6330 	if (!result)
6331 		return 0;
6332 
6333 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6334 		if (chan->ident != cmd->ident)
6335 			continue;
6336 
6337 		l2cap_chan_del(chan, ECONNRESET);
6338 	}
6339 
6340 	return 0;
6341 }
6342 
6343 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6344 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6345 				       u8 *data)
6346 {
6347 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6348 	struct l2cap_chan *chan;
6349 
6350 	if (cmd_len < sizeof(*rej))
6351 		return -EPROTO;
6352 
6353 	mutex_lock(&conn->chan_lock);
6354 
6355 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6356 	if (!chan)
6357 		goto done;
6358 
6359 	l2cap_chan_lock(chan);
6360 	l2cap_chan_del(chan, ECONNREFUSED);
6361 	l2cap_chan_unlock(chan);
6362 
6363 done:
6364 	mutex_unlock(&conn->chan_lock);
6365 	return 0;
6366 }
6367 
6368 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6369 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6370 				   u8 *data)
6371 {
6372 	int err = 0;
6373 
6374 	switch (cmd->code) {
6375 	case L2CAP_COMMAND_REJ:
6376 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6377 		break;
6378 
6379 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6380 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6381 		break;
6382 
6383 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6384 		break;
6385 
6386 	case L2CAP_LE_CONN_RSP:
6387 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6388 		break;
6389 
6390 	case L2CAP_LE_CONN_REQ:
6391 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6392 		break;
6393 
6394 	case L2CAP_LE_CREDITS:
6395 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6396 		break;
6397 
6398 	case L2CAP_ECRED_CONN_REQ:
6399 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6400 		break;
6401 
6402 	case L2CAP_ECRED_CONN_RSP:
6403 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6404 		break;
6405 
6406 	case L2CAP_ECRED_RECONF_REQ:
6407 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6408 		break;
6409 
6410 	case L2CAP_ECRED_RECONF_RSP:
6411 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6412 		break;
6413 
6414 	case L2CAP_DISCONN_REQ:
6415 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6416 		break;
6417 
6418 	case L2CAP_DISCONN_RSP:
6419 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6420 		break;
6421 
6422 	default:
6423 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6424 		err = -EINVAL;
6425 		break;
6426 	}
6427 
6428 	return err;
6429 }
6430 
6431 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6432 					struct sk_buff *skb)
6433 {
6434 	struct hci_conn *hcon = conn->hcon;
6435 	struct l2cap_cmd_hdr *cmd;
6436 	u16 len;
6437 	int err;
6438 
6439 	if (hcon->type != LE_LINK)
6440 		goto drop;
6441 
6442 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6443 		goto drop;
6444 
6445 	cmd = (void *) skb->data;
6446 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6447 
6448 	len = le16_to_cpu(cmd->len);
6449 
6450 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6451 
6452 	if (len != skb->len || !cmd->ident) {
6453 		BT_DBG("corrupted command");
6454 		goto drop;
6455 	}
6456 
6457 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6458 	if (err) {
6459 		struct l2cap_cmd_rej_unk rej;
6460 
6461 		BT_ERR("Wrong link type (%d)", err);
6462 
6463 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6464 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6465 			       sizeof(rej), &rej);
6466 	}
6467 
6468 drop:
6469 	kfree_skb(skb);
6470 }
6471 
6472 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6473 				     struct sk_buff *skb)
6474 {
6475 	struct hci_conn *hcon = conn->hcon;
6476 	struct l2cap_cmd_hdr *cmd;
6477 	int err;
6478 
6479 	l2cap_raw_recv(conn, skb);
6480 
6481 	if (hcon->type != ACL_LINK)
6482 		goto drop;
6483 
6484 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6485 		u16 len;
6486 
6487 		cmd = (void *) skb->data;
6488 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6489 
6490 		len = le16_to_cpu(cmd->len);
6491 
6492 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6493 		       cmd->ident);
6494 
6495 		if (len > skb->len || !cmd->ident) {
6496 			BT_DBG("corrupted command");
6497 			break;
6498 		}
6499 
6500 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6501 		if (err) {
6502 			struct l2cap_cmd_rej_unk rej;
6503 
6504 			BT_ERR("Wrong link type (%d)", err);
6505 
6506 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6507 			l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6508 				       sizeof(rej), &rej);
6509 		}
6510 
6511 		skb_pull(skb, len);
6512 	}
6513 
6514 drop:
6515 	kfree_skb(skb);
6516 }
6517 
6518 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6519 {
6520 	u16 our_fcs, rcv_fcs;
6521 	int hdr_size;
6522 
6523 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6524 		hdr_size = L2CAP_EXT_HDR_SIZE;
6525 	else
6526 		hdr_size = L2CAP_ENH_HDR_SIZE;
6527 
6528 	if (chan->fcs == L2CAP_FCS_CRC16) {
6529 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6530 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6531 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6532 
6533 		if (our_fcs != rcv_fcs)
6534 			return -EBADMSG;
6535 	}
6536 	return 0;
6537 }
6538 
6539 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6540 {
6541 	struct l2cap_ctrl control;
6542 
6543 	BT_DBG("chan %p", chan);
6544 
6545 	memset(&control, 0, sizeof(control));
6546 	control.sframe = 1;
6547 	control.final = 1;
6548 	control.reqseq = chan->buffer_seq;
6549 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6550 
6551 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6552 		control.super = L2CAP_SUPER_RNR;
6553 		l2cap_send_sframe(chan, &control);
6554 	}
6555 
6556 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6557 	    chan->unacked_frames > 0)
6558 		__set_retrans_timer(chan);
6559 
6560 	/* Send pending iframes */
6561 	l2cap_ertm_send(chan);
6562 
6563 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6564 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6565 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6566 		 * send it now.
6567 		 */
6568 		control.super = L2CAP_SUPER_RR;
6569 		l2cap_send_sframe(chan, &control);
6570 	}
6571 }
6572 
6573 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6574 			    struct sk_buff **last_frag)
6575 {
6576 	/* skb->len reflects data in skb as well as all fragments
6577 	 * skb->data_len reflects only data in fragments
6578 	 */
6579 	if (!skb_has_frag_list(skb))
6580 		skb_shinfo(skb)->frag_list = new_frag;
6581 
6582 	new_frag->next = NULL;
6583 
6584 	(*last_frag)->next = new_frag;
6585 	*last_frag = new_frag;
6586 
6587 	skb->len += new_frag->len;
6588 	skb->data_len += new_frag->len;
6589 	skb->truesize += new_frag->truesize;
6590 }
6591 
6592 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6593 				struct l2cap_ctrl *control)
6594 {
6595 	int err = -EINVAL;
6596 
6597 	switch (control->sar) {
6598 	case L2CAP_SAR_UNSEGMENTED:
6599 		if (chan->sdu)
6600 			break;
6601 
6602 		err = chan->ops->recv(chan, skb);
6603 		break;
6604 
6605 	case L2CAP_SAR_START:
6606 		if (chan->sdu)
6607 			break;
6608 
6609 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6610 			break;
6611 
6612 		chan->sdu_len = get_unaligned_le16(skb->data);
6613 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6614 
6615 		if (chan->sdu_len > chan->imtu) {
6616 			err = -EMSGSIZE;
6617 			break;
6618 		}
6619 
6620 		if (skb->len >= chan->sdu_len)
6621 			break;
6622 
6623 		chan->sdu = skb;
6624 		chan->sdu_last_frag = skb;
6625 
6626 		skb = NULL;
6627 		err = 0;
6628 		break;
6629 
6630 	case L2CAP_SAR_CONTINUE:
6631 		if (!chan->sdu)
6632 			break;
6633 
6634 		append_skb_frag(chan->sdu, skb,
6635 				&chan->sdu_last_frag);
6636 		skb = NULL;
6637 
6638 		if (chan->sdu->len >= chan->sdu_len)
6639 			break;
6640 
6641 		err = 0;
6642 		break;
6643 
6644 	case L2CAP_SAR_END:
6645 		if (!chan->sdu)
6646 			break;
6647 
6648 		append_skb_frag(chan->sdu, skb,
6649 				&chan->sdu_last_frag);
6650 		skb = NULL;
6651 
6652 		if (chan->sdu->len != chan->sdu_len)
6653 			break;
6654 
6655 		err = chan->ops->recv(chan, chan->sdu);
6656 
6657 		if (!err) {
6658 			/* Reassembly complete */
6659 			chan->sdu = NULL;
6660 			chan->sdu_last_frag = NULL;
6661 			chan->sdu_len = 0;
6662 		}
6663 		break;
6664 	}
6665 
6666 	if (err) {
6667 		kfree_skb(skb);
6668 		kfree_skb(chan->sdu);
6669 		chan->sdu = NULL;
6670 		chan->sdu_last_frag = NULL;
6671 		chan->sdu_len = 0;
6672 	}
6673 
6674 	return err;
6675 }
6676 
6677 static int l2cap_resegment(struct l2cap_chan *chan)
6678 {
6679 	/* Placeholder */
6680 	return 0;
6681 }
6682 
6683 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6684 {
6685 	u8 event;
6686 
6687 	if (chan->mode != L2CAP_MODE_ERTM)
6688 		return;
6689 
6690 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6691 	l2cap_tx(chan, NULL, NULL, event);
6692 }
6693 
6694 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6695 {
6696 	int err = 0;
6697 	/* Pass sequential frames to l2cap_reassemble_sdu()
6698 	 * until a gap is encountered.
6699 	 */
6700 
6701 	BT_DBG("chan %p", chan);
6702 
6703 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6704 		struct sk_buff *skb;
6705 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6706 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6707 
6708 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6709 
6710 		if (!skb)
6711 			break;
6712 
6713 		skb_unlink(skb, &chan->srej_q);
6714 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6715 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6716 		if (err)
6717 			break;
6718 	}
6719 
6720 	if (skb_queue_empty(&chan->srej_q)) {
6721 		chan->rx_state = L2CAP_RX_STATE_RECV;
6722 		l2cap_send_ack(chan);
6723 	}
6724 
6725 	return err;
6726 }
6727 
6728 static void l2cap_handle_srej(struct l2cap_chan *chan,
6729 			      struct l2cap_ctrl *control)
6730 {
6731 	struct sk_buff *skb;
6732 
6733 	BT_DBG("chan %p, control %p", chan, control);
6734 
6735 	if (control->reqseq == chan->next_tx_seq) {
6736 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6737 		l2cap_send_disconn_req(chan, ECONNRESET);
6738 		return;
6739 	}
6740 
6741 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6742 
6743 	if (skb == NULL) {
6744 		BT_DBG("Seq %d not available for retransmission",
6745 		       control->reqseq);
6746 		return;
6747 	}
6748 
6749 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6750 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6751 		l2cap_send_disconn_req(chan, ECONNRESET);
6752 		return;
6753 	}
6754 
6755 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6756 
6757 	if (control->poll) {
6758 		l2cap_pass_to_tx(chan, control);
6759 
6760 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6761 		l2cap_retransmit(chan, control);
6762 		l2cap_ertm_send(chan);
6763 
6764 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6765 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6766 			chan->srej_save_reqseq = control->reqseq;
6767 		}
6768 	} else {
6769 		l2cap_pass_to_tx_fbit(chan, control);
6770 
6771 		if (control->final) {
6772 			if (chan->srej_save_reqseq != control->reqseq ||
6773 			    !test_and_clear_bit(CONN_SREJ_ACT,
6774 						&chan->conn_state))
6775 				l2cap_retransmit(chan, control);
6776 		} else {
6777 			l2cap_retransmit(chan, control);
6778 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6779 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6780 				chan->srej_save_reqseq = control->reqseq;
6781 			}
6782 		}
6783 	}
6784 }
6785 
6786 static void l2cap_handle_rej(struct l2cap_chan *chan,
6787 			     struct l2cap_ctrl *control)
6788 {
6789 	struct sk_buff *skb;
6790 
6791 	BT_DBG("chan %p, control %p", chan, control);
6792 
6793 	if (control->reqseq == chan->next_tx_seq) {
6794 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6795 		l2cap_send_disconn_req(chan, ECONNRESET);
6796 		return;
6797 	}
6798 
6799 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6800 
6801 	if (chan->max_tx && skb &&
6802 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6803 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6804 		l2cap_send_disconn_req(chan, ECONNRESET);
6805 		return;
6806 	}
6807 
6808 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6809 
6810 	l2cap_pass_to_tx(chan, control);
6811 
6812 	if (control->final) {
6813 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6814 			l2cap_retransmit_all(chan, control);
6815 	} else {
6816 		l2cap_retransmit_all(chan, control);
6817 		l2cap_ertm_send(chan);
6818 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6819 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6820 	}
6821 }
6822 
6823 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6824 {
6825 	BT_DBG("chan %p, txseq %d", chan, txseq);
6826 
6827 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6828 	       chan->expected_tx_seq);
6829 
6830 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6831 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6832 		    chan->tx_win) {
6833 			/* See notes below regarding "double poll" and
6834 			 * invalid packets.
6835 			 */
6836 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6837 				BT_DBG("Invalid/Ignore - after SREJ");
6838 				return L2CAP_TXSEQ_INVALID_IGNORE;
6839 			} else {
6840 				BT_DBG("Invalid - in window after SREJ sent");
6841 				return L2CAP_TXSEQ_INVALID;
6842 			}
6843 		}
6844 
6845 		if (chan->srej_list.head == txseq) {
6846 			BT_DBG("Expected SREJ");
6847 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6848 		}
6849 
6850 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6851 			BT_DBG("Duplicate SREJ - txseq already stored");
6852 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6853 		}
6854 
6855 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6856 			BT_DBG("Unexpected SREJ - not requested");
6857 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6858 		}
6859 	}
6860 
6861 	if (chan->expected_tx_seq == txseq) {
6862 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6863 		    chan->tx_win) {
6864 			BT_DBG("Invalid - txseq outside tx window");
6865 			return L2CAP_TXSEQ_INVALID;
6866 		} else {
6867 			BT_DBG("Expected");
6868 			return L2CAP_TXSEQ_EXPECTED;
6869 		}
6870 	}
6871 
6872 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6873 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6874 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6875 		return L2CAP_TXSEQ_DUPLICATE;
6876 	}
6877 
6878 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6879 		/* A source of invalid packets is a "double poll" condition,
6880 		 * where delays cause us to send multiple poll packets.  If
6881 		 * the remote stack receives and processes both polls,
6882 		 * sequence numbers can wrap around in such a way that a
6883 		 * resent frame has a sequence number that looks like new data
6884 		 * with a sequence gap.  This would trigger an erroneous SREJ
6885 		 * request.
6886 		 *
6887 		 * Fortunately, this is impossible with a tx window that's
6888 		 * less than half of the maximum sequence number, which allows
6889 		 * invalid frames to be safely ignored.
6890 		 *
6891 		 * With tx window sizes greater than half of the tx window
6892 		 * maximum, the frame is invalid and cannot be ignored.  This
6893 		 * causes a disconnect.
6894 		 */
6895 
6896 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6897 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6898 			return L2CAP_TXSEQ_INVALID_IGNORE;
6899 		} else {
6900 			BT_DBG("Invalid - txseq outside tx window");
6901 			return L2CAP_TXSEQ_INVALID;
6902 		}
6903 	} else {
6904 		BT_DBG("Unexpected - txseq indicates missing frames");
6905 		return L2CAP_TXSEQ_UNEXPECTED;
6906 	}
6907 }
6908 
6909 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6910 			       struct l2cap_ctrl *control,
6911 			       struct sk_buff *skb, u8 event)
6912 {
6913 	struct l2cap_ctrl local_control;
6914 	int err = 0;
6915 	bool skb_in_use = false;
6916 
6917 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6918 	       event);
6919 
6920 	switch (event) {
6921 	case L2CAP_EV_RECV_IFRAME:
6922 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6923 		case L2CAP_TXSEQ_EXPECTED:
6924 			l2cap_pass_to_tx(chan, control);
6925 
6926 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6927 				BT_DBG("Busy, discarding expected seq %d",
6928 				       control->txseq);
6929 				break;
6930 			}
6931 
6932 			chan->expected_tx_seq = __next_seq(chan,
6933 							   control->txseq);
6934 
6935 			chan->buffer_seq = chan->expected_tx_seq;
6936 			skb_in_use = true;
6937 
6938 			/* l2cap_reassemble_sdu may free skb, hence invalidate
6939 			 * control, so make a copy in advance to use it after
6940 			 * l2cap_reassemble_sdu returns and to avoid the race
6941 			 * condition, for example:
6942 			 *
6943 			 * The current thread calls:
6944 			 *   l2cap_reassemble_sdu
6945 			 *     chan->ops->recv == l2cap_sock_recv_cb
6946 			 *       __sock_queue_rcv_skb
6947 			 * Another thread calls:
6948 			 *   bt_sock_recvmsg
6949 			 *     skb_recv_datagram
6950 			 *     skb_free_datagram
6951 			 * Then the current thread tries to access control, but
6952 			 * it was freed by skb_free_datagram.
6953 			 */
6954 			local_control = *control;
6955 			err = l2cap_reassemble_sdu(chan, skb, control);
6956 			if (err)
6957 				break;
6958 
6959 			if (local_control.final) {
6960 				if (!test_and_clear_bit(CONN_REJ_ACT,
6961 							&chan->conn_state)) {
6962 					local_control.final = 0;
6963 					l2cap_retransmit_all(chan, &local_control);
6964 					l2cap_ertm_send(chan);
6965 				}
6966 			}
6967 
6968 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6969 				l2cap_send_ack(chan);
6970 			break;
6971 		case L2CAP_TXSEQ_UNEXPECTED:
6972 			l2cap_pass_to_tx(chan, control);
6973 
6974 			/* Can't issue SREJ frames in the local busy state.
6975 			 * Drop this frame, it will be seen as missing
6976 			 * when local busy is exited.
6977 			 */
6978 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6979 				BT_DBG("Busy, discarding unexpected seq %d",
6980 				       control->txseq);
6981 				break;
6982 			}
6983 
6984 			/* There was a gap in the sequence, so an SREJ
6985 			 * must be sent for each missing frame.  The
6986 			 * current frame is stored for later use.
6987 			 */
6988 			skb_queue_tail(&chan->srej_q, skb);
6989 			skb_in_use = true;
6990 			BT_DBG("Queued %p (queue len %d)", skb,
6991 			       skb_queue_len(&chan->srej_q));
6992 
6993 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6994 			l2cap_seq_list_clear(&chan->srej_list);
6995 			l2cap_send_srej(chan, control->txseq);
6996 
6997 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6998 			break;
6999 		case L2CAP_TXSEQ_DUPLICATE:
7000 			l2cap_pass_to_tx(chan, control);
7001 			break;
7002 		case L2CAP_TXSEQ_INVALID_IGNORE:
7003 			break;
7004 		case L2CAP_TXSEQ_INVALID:
7005 		default:
7006 			l2cap_send_disconn_req(chan, ECONNRESET);
7007 			break;
7008 		}
7009 		break;
7010 	case L2CAP_EV_RECV_RR:
7011 		l2cap_pass_to_tx(chan, control);
7012 		if (control->final) {
7013 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7014 
7015 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
7016 			    !__chan_is_moving(chan)) {
7017 				control->final = 0;
7018 				l2cap_retransmit_all(chan, control);
7019 			}
7020 
7021 			l2cap_ertm_send(chan);
7022 		} else if (control->poll) {
7023 			l2cap_send_i_or_rr_or_rnr(chan);
7024 		} else {
7025 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7026 					       &chan->conn_state) &&
7027 			    chan->unacked_frames)
7028 				__set_retrans_timer(chan);
7029 
7030 			l2cap_ertm_send(chan);
7031 		}
7032 		break;
7033 	case L2CAP_EV_RECV_RNR:
7034 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7035 		l2cap_pass_to_tx(chan, control);
7036 		if (control && control->poll) {
7037 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7038 			l2cap_send_rr_or_rnr(chan, 0);
7039 		}
7040 		__clear_retrans_timer(chan);
7041 		l2cap_seq_list_clear(&chan->retrans_list);
7042 		break;
7043 	case L2CAP_EV_RECV_REJ:
7044 		l2cap_handle_rej(chan, control);
7045 		break;
7046 	case L2CAP_EV_RECV_SREJ:
7047 		l2cap_handle_srej(chan, control);
7048 		break;
7049 	default:
7050 		break;
7051 	}
7052 
7053 	if (skb && !skb_in_use) {
7054 		BT_DBG("Freeing %p", skb);
7055 		kfree_skb(skb);
7056 	}
7057 
7058 	return err;
7059 }
7060 
7061 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7062 				    struct l2cap_ctrl *control,
7063 				    struct sk_buff *skb, u8 event)
7064 {
7065 	int err = 0;
7066 	u16 txseq = control->txseq;
7067 	bool skb_in_use = false;
7068 
7069 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7070 	       event);
7071 
7072 	switch (event) {
7073 	case L2CAP_EV_RECV_IFRAME:
7074 		switch (l2cap_classify_txseq(chan, txseq)) {
7075 		case L2CAP_TXSEQ_EXPECTED:
7076 			/* Keep frame for reassembly later */
7077 			l2cap_pass_to_tx(chan, control);
7078 			skb_queue_tail(&chan->srej_q, skb);
7079 			skb_in_use = true;
7080 			BT_DBG("Queued %p (queue len %d)", skb,
7081 			       skb_queue_len(&chan->srej_q));
7082 
7083 			chan->expected_tx_seq = __next_seq(chan, txseq);
7084 			break;
7085 		case L2CAP_TXSEQ_EXPECTED_SREJ:
7086 			l2cap_seq_list_pop(&chan->srej_list);
7087 
7088 			l2cap_pass_to_tx(chan, control);
7089 			skb_queue_tail(&chan->srej_q, skb);
7090 			skb_in_use = true;
7091 			BT_DBG("Queued %p (queue len %d)", skb,
7092 			       skb_queue_len(&chan->srej_q));
7093 
7094 			err = l2cap_rx_queued_iframes(chan);
7095 			if (err)
7096 				break;
7097 
7098 			break;
7099 		case L2CAP_TXSEQ_UNEXPECTED:
7100 			/* Got a frame that can't be reassembled yet.
7101 			 * Save it for later, and send SREJs to cover
7102 			 * the missing frames.
7103 			 */
7104 			skb_queue_tail(&chan->srej_q, skb);
7105 			skb_in_use = true;
7106 			BT_DBG("Queued %p (queue len %d)", skb,
7107 			       skb_queue_len(&chan->srej_q));
7108 
7109 			l2cap_pass_to_tx(chan, control);
7110 			l2cap_send_srej(chan, control->txseq);
7111 			break;
7112 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7113 			/* This frame was requested with an SREJ, but
7114 			 * some expected retransmitted frames are
7115 			 * missing.  Request retransmission of missing
7116 			 * SREJ'd frames.
7117 			 */
7118 			skb_queue_tail(&chan->srej_q, skb);
7119 			skb_in_use = true;
7120 			BT_DBG("Queued %p (queue len %d)", skb,
7121 			       skb_queue_len(&chan->srej_q));
7122 
7123 			l2cap_pass_to_tx(chan, control);
7124 			l2cap_send_srej_list(chan, control->txseq);
7125 			break;
7126 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7127 			/* We've already queued this frame.  Drop this copy. */
7128 			l2cap_pass_to_tx(chan, control);
7129 			break;
7130 		case L2CAP_TXSEQ_DUPLICATE:
7131 			/* Expecting a later sequence number, so this frame
7132 			 * was already received.  Ignore it completely.
7133 			 */
7134 			break;
7135 		case L2CAP_TXSEQ_INVALID_IGNORE:
7136 			break;
7137 		case L2CAP_TXSEQ_INVALID:
7138 		default:
7139 			l2cap_send_disconn_req(chan, ECONNRESET);
7140 			break;
7141 		}
7142 		break;
7143 	case L2CAP_EV_RECV_RR:
7144 		l2cap_pass_to_tx(chan, control);
7145 		if (control->final) {
7146 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7147 
7148 			if (!test_and_clear_bit(CONN_REJ_ACT,
7149 						&chan->conn_state)) {
7150 				control->final = 0;
7151 				l2cap_retransmit_all(chan, control);
7152 			}
7153 
7154 			l2cap_ertm_send(chan);
7155 		} else if (control->poll) {
7156 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7157 					       &chan->conn_state) &&
7158 			    chan->unacked_frames) {
7159 				__set_retrans_timer(chan);
7160 			}
7161 
7162 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7163 			l2cap_send_srej_tail(chan);
7164 		} else {
7165 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7166 					       &chan->conn_state) &&
7167 			    chan->unacked_frames)
7168 				__set_retrans_timer(chan);
7169 
7170 			l2cap_send_ack(chan);
7171 		}
7172 		break;
7173 	case L2CAP_EV_RECV_RNR:
7174 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7175 		l2cap_pass_to_tx(chan, control);
7176 		if (control->poll) {
7177 			l2cap_send_srej_tail(chan);
7178 		} else {
7179 			struct l2cap_ctrl rr_control;
7180 			memset(&rr_control, 0, sizeof(rr_control));
7181 			rr_control.sframe = 1;
7182 			rr_control.super = L2CAP_SUPER_RR;
7183 			rr_control.reqseq = chan->buffer_seq;
7184 			l2cap_send_sframe(chan, &rr_control);
7185 		}
7186 
7187 		break;
7188 	case L2CAP_EV_RECV_REJ:
7189 		l2cap_handle_rej(chan, control);
7190 		break;
7191 	case L2CAP_EV_RECV_SREJ:
7192 		l2cap_handle_srej(chan, control);
7193 		break;
7194 	}
7195 
7196 	if (skb && !skb_in_use) {
7197 		BT_DBG("Freeing %p", skb);
7198 		kfree_skb(skb);
7199 	}
7200 
7201 	return err;
7202 }
7203 
7204 static int l2cap_finish_move(struct l2cap_chan *chan)
7205 {
7206 	BT_DBG("chan %p", chan);
7207 
7208 	chan->rx_state = L2CAP_RX_STATE_RECV;
7209 
7210 	if (chan->hs_hcon)
7211 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7212 	else
7213 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7214 
7215 	return l2cap_resegment(chan);
7216 }
7217 
7218 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7219 				 struct l2cap_ctrl *control,
7220 				 struct sk_buff *skb, u8 event)
7221 {
7222 	int err;
7223 
7224 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7225 	       event);
7226 
7227 	if (!control->poll)
7228 		return -EPROTO;
7229 
7230 	l2cap_process_reqseq(chan, control->reqseq);
7231 
7232 	if (!skb_queue_empty(&chan->tx_q))
7233 		chan->tx_send_head = skb_peek(&chan->tx_q);
7234 	else
7235 		chan->tx_send_head = NULL;
7236 
7237 	/* Rewind next_tx_seq to the point expected
7238 	 * by the receiver.
7239 	 */
7240 	chan->next_tx_seq = control->reqseq;
7241 	chan->unacked_frames = 0;
7242 
7243 	err = l2cap_finish_move(chan);
7244 	if (err)
7245 		return err;
7246 
7247 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7248 	l2cap_send_i_or_rr_or_rnr(chan);
7249 
7250 	if (event == L2CAP_EV_RECV_IFRAME)
7251 		return -EPROTO;
7252 
7253 	return l2cap_rx_state_recv(chan, control, NULL, event);
7254 }
7255 
7256 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7257 				 struct l2cap_ctrl *control,
7258 				 struct sk_buff *skb, u8 event)
7259 {
7260 	int err;
7261 
7262 	if (!control->final)
7263 		return -EPROTO;
7264 
7265 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7266 
7267 	chan->rx_state = L2CAP_RX_STATE_RECV;
7268 	l2cap_process_reqseq(chan, control->reqseq);
7269 
7270 	if (!skb_queue_empty(&chan->tx_q))
7271 		chan->tx_send_head = skb_peek(&chan->tx_q);
7272 	else
7273 		chan->tx_send_head = NULL;
7274 
7275 	/* Rewind next_tx_seq to the point expected
7276 	 * by the receiver.
7277 	 */
7278 	chan->next_tx_seq = control->reqseq;
7279 	chan->unacked_frames = 0;
7280 
7281 	if (chan->hs_hcon)
7282 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7283 	else
7284 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7285 
7286 	err = l2cap_resegment(chan);
7287 
7288 	if (!err)
7289 		err = l2cap_rx_state_recv(chan, control, skb, event);
7290 
7291 	return err;
7292 }
7293 
7294 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7295 {
7296 	/* Make sure reqseq is for a packet that has been sent but not acked */
7297 	u16 unacked;
7298 
7299 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7300 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7301 }
7302 
7303 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7304 		    struct sk_buff *skb, u8 event)
7305 {
7306 	int err = 0;
7307 
7308 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7309 	       control, skb, event, chan->rx_state);
7310 
7311 	if (__valid_reqseq(chan, control->reqseq)) {
7312 		switch (chan->rx_state) {
7313 		case L2CAP_RX_STATE_RECV:
7314 			err = l2cap_rx_state_recv(chan, control, skb, event);
7315 			break;
7316 		case L2CAP_RX_STATE_SREJ_SENT:
7317 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7318 						       event);
7319 			break;
7320 		case L2CAP_RX_STATE_WAIT_P:
7321 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7322 			break;
7323 		case L2CAP_RX_STATE_WAIT_F:
7324 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7325 			break;
7326 		default:
7327 			/* shut it down */
7328 			break;
7329 		}
7330 	} else {
7331 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7332 		       control->reqseq, chan->next_tx_seq,
7333 		       chan->expected_ack_seq);
7334 		l2cap_send_disconn_req(chan, ECONNRESET);
7335 	}
7336 
7337 	return err;
7338 }
7339 
7340 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7341 			   struct sk_buff *skb)
7342 {
7343 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
7344 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
7345 	 * returns and to avoid the race condition, for example:
7346 	 *
7347 	 * The current thread calls:
7348 	 *   l2cap_reassemble_sdu
7349 	 *     chan->ops->recv == l2cap_sock_recv_cb
7350 	 *       __sock_queue_rcv_skb
7351 	 * Another thread calls:
7352 	 *   bt_sock_recvmsg
7353 	 *     skb_recv_datagram
7354 	 *     skb_free_datagram
7355 	 * Then the current thread tries to access control, but it was freed by
7356 	 * skb_free_datagram.
7357 	 */
7358 	u16 txseq = control->txseq;
7359 
7360 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7361 	       chan->rx_state);
7362 
7363 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
7364 		l2cap_pass_to_tx(chan, control);
7365 
7366 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
7367 		       __next_seq(chan, chan->buffer_seq));
7368 
7369 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7370 
7371 		l2cap_reassemble_sdu(chan, skb, control);
7372 	} else {
7373 		if (chan->sdu) {
7374 			kfree_skb(chan->sdu);
7375 			chan->sdu = NULL;
7376 		}
7377 		chan->sdu_last_frag = NULL;
7378 		chan->sdu_len = 0;
7379 
7380 		if (skb) {
7381 			BT_DBG("Freeing %p", skb);
7382 			kfree_skb(skb);
7383 		}
7384 	}
7385 
7386 	chan->last_acked_seq = txseq;
7387 	chan->expected_tx_seq = __next_seq(chan, txseq);
7388 
7389 	return 0;
7390 }
7391 
7392 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7393 {
7394 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7395 	u16 len;
7396 	u8 event;
7397 
7398 	__unpack_control(chan, skb);
7399 
7400 	len = skb->len;
7401 
7402 	/*
7403 	 * We can just drop the corrupted I-frame here.
7404 	 * Receiver will miss it and start proper recovery
7405 	 * procedures and ask for retransmission.
7406 	 */
7407 	if (l2cap_check_fcs(chan, skb))
7408 		goto drop;
7409 
7410 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7411 		len -= L2CAP_SDULEN_SIZE;
7412 
7413 	if (chan->fcs == L2CAP_FCS_CRC16)
7414 		len -= L2CAP_FCS_SIZE;
7415 
7416 	if (len > chan->mps) {
7417 		l2cap_send_disconn_req(chan, ECONNRESET);
7418 		goto drop;
7419 	}
7420 
7421 	if (chan->ops->filter) {
7422 		if (chan->ops->filter(chan, skb))
7423 			goto drop;
7424 	}
7425 
7426 	if (!control->sframe) {
7427 		int err;
7428 
7429 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7430 		       control->sar, control->reqseq, control->final,
7431 		       control->txseq);
7432 
7433 		/* Validate F-bit - F=0 always valid, F=1 only
7434 		 * valid in TX WAIT_F
7435 		 */
7436 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7437 			goto drop;
7438 
7439 		if (chan->mode != L2CAP_MODE_STREAMING) {
7440 			event = L2CAP_EV_RECV_IFRAME;
7441 			err = l2cap_rx(chan, control, skb, event);
7442 		} else {
7443 			err = l2cap_stream_rx(chan, control, skb);
7444 		}
7445 
7446 		if (err)
7447 			l2cap_send_disconn_req(chan, ECONNRESET);
7448 	} else {
7449 		const u8 rx_func_to_event[4] = {
7450 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7451 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7452 		};
7453 
7454 		/* Only I-frames are expected in streaming mode */
7455 		if (chan->mode == L2CAP_MODE_STREAMING)
7456 			goto drop;
7457 
7458 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7459 		       control->reqseq, control->final, control->poll,
7460 		       control->super);
7461 
7462 		if (len != 0) {
7463 			BT_ERR("Trailing bytes: %d in sframe", len);
7464 			l2cap_send_disconn_req(chan, ECONNRESET);
7465 			goto drop;
7466 		}
7467 
7468 		/* Validate F and P bits */
7469 		if (control->final && (control->poll ||
7470 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7471 			goto drop;
7472 
7473 		event = rx_func_to_event[control->super];
7474 		if (l2cap_rx(chan, control, skb, event))
7475 			l2cap_send_disconn_req(chan, ECONNRESET);
7476 	}
7477 
7478 	return 0;
7479 
7480 drop:
7481 	kfree_skb(skb);
7482 	return 0;
7483 }
7484 
7485 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7486 {
7487 	struct l2cap_conn *conn = chan->conn;
7488 	struct l2cap_le_credits pkt;
7489 	u16 return_credits;
7490 
7491 	return_credits = (chan->imtu / chan->mps) + 1;
7492 
7493 	if (chan->rx_credits >= return_credits)
7494 		return;
7495 
7496 	return_credits -= chan->rx_credits;
7497 
7498 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7499 
7500 	chan->rx_credits += return_credits;
7501 
7502 	pkt.cid     = cpu_to_le16(chan->scid);
7503 	pkt.credits = cpu_to_le16(return_credits);
7504 
7505 	chan->ident = l2cap_get_ident(conn);
7506 
7507 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7508 }
7509 
7510 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7511 {
7512 	int err;
7513 
7514 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7515 
7516 	/* Wait recv to confirm reception before updating the credits */
7517 	err = chan->ops->recv(chan, skb);
7518 
7519 	/* Update credits whenever an SDU is received */
7520 	l2cap_chan_le_send_credits(chan);
7521 
7522 	return err;
7523 }
7524 
7525 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7526 {
7527 	int err;
7528 
7529 	if (!chan->rx_credits) {
7530 		BT_ERR("No credits to receive LE L2CAP data");
7531 		l2cap_send_disconn_req(chan, ECONNRESET);
7532 		return -ENOBUFS;
7533 	}
7534 
7535 	if (chan->imtu < skb->len) {
7536 		BT_ERR("Too big LE L2CAP PDU");
7537 		return -ENOBUFS;
7538 	}
7539 
7540 	chan->rx_credits--;
7541 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7542 
7543 	/* Update if remote had run out of credits, this should only happens
7544 	 * if the remote is not using the entire MPS.
7545 	 */
7546 	if (!chan->rx_credits)
7547 		l2cap_chan_le_send_credits(chan);
7548 
7549 	err = 0;
7550 
7551 	if (!chan->sdu) {
7552 		u16 sdu_len;
7553 
7554 		sdu_len = get_unaligned_le16(skb->data);
7555 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7556 
7557 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7558 		       sdu_len, skb->len, chan->imtu);
7559 
7560 		if (sdu_len > chan->imtu) {
7561 			BT_ERR("Too big LE L2CAP SDU length received");
7562 			err = -EMSGSIZE;
7563 			goto failed;
7564 		}
7565 
7566 		if (skb->len > sdu_len) {
7567 			BT_ERR("Too much LE L2CAP data received");
7568 			err = -EINVAL;
7569 			goto failed;
7570 		}
7571 
7572 		if (skb->len == sdu_len)
7573 			return l2cap_ecred_recv(chan, skb);
7574 
7575 		chan->sdu = skb;
7576 		chan->sdu_len = sdu_len;
7577 		chan->sdu_last_frag = skb;
7578 
7579 		/* Detect if remote is not able to use the selected MPS */
7580 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7581 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7582 
7583 			/* Adjust the number of credits */
7584 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7585 			chan->mps = mps_len;
7586 			l2cap_chan_le_send_credits(chan);
7587 		}
7588 
7589 		return 0;
7590 	}
7591 
7592 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7593 	       chan->sdu->len, skb->len, chan->sdu_len);
7594 
7595 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7596 		BT_ERR("Too much LE L2CAP data received");
7597 		err = -EINVAL;
7598 		goto failed;
7599 	}
7600 
7601 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7602 	skb = NULL;
7603 
7604 	if (chan->sdu->len == chan->sdu_len) {
7605 		err = l2cap_ecred_recv(chan, chan->sdu);
7606 		if (!err) {
7607 			chan->sdu = NULL;
7608 			chan->sdu_last_frag = NULL;
7609 			chan->sdu_len = 0;
7610 		}
7611 	}
7612 
7613 failed:
7614 	if (err) {
7615 		kfree_skb(skb);
7616 		kfree_skb(chan->sdu);
7617 		chan->sdu = NULL;
7618 		chan->sdu_last_frag = NULL;
7619 		chan->sdu_len = 0;
7620 	}
7621 
7622 	/* We can't return an error here since we took care of the skb
7623 	 * freeing internally. An error return would cause the caller to
7624 	 * do a double-free of the skb.
7625 	 */
7626 	return 0;
7627 }
7628 
7629 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7630 			       struct sk_buff *skb)
7631 {
7632 	struct l2cap_chan *chan;
7633 
7634 	chan = l2cap_get_chan_by_scid(conn, cid);
7635 	if (!chan) {
7636 		if (cid == L2CAP_CID_A2MP) {
7637 			chan = a2mp_channel_create(conn, skb);
7638 			if (!chan) {
7639 				kfree_skb(skb);
7640 				return;
7641 			}
7642 
7643 			l2cap_chan_hold(chan);
7644 			l2cap_chan_lock(chan);
7645 		} else {
7646 			BT_DBG("unknown cid 0x%4.4x", cid);
7647 			/* Drop packet and return */
7648 			kfree_skb(skb);
7649 			return;
7650 		}
7651 	}
7652 
7653 	BT_DBG("chan %p, len %d", chan, skb->len);
7654 
7655 	/* If we receive data on a fixed channel before the info req/rsp
7656 	 * procedure is done simply assume that the channel is supported
7657 	 * and mark it as ready.
7658 	 */
7659 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7660 		l2cap_chan_ready(chan);
7661 
7662 	if (chan->state != BT_CONNECTED)
7663 		goto drop;
7664 
7665 	switch (chan->mode) {
7666 	case L2CAP_MODE_LE_FLOWCTL:
7667 	case L2CAP_MODE_EXT_FLOWCTL:
7668 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7669 			goto drop;
7670 
7671 		goto done;
7672 
7673 	case L2CAP_MODE_BASIC:
7674 		/* If socket recv buffers overflows we drop data here
7675 		 * which is *bad* because L2CAP has to be reliable.
7676 		 * But we don't have any other choice. L2CAP doesn't
7677 		 * provide flow control mechanism. */
7678 
7679 		if (chan->imtu < skb->len) {
7680 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7681 			goto drop;
7682 		}
7683 
7684 		if (!chan->ops->recv(chan, skb))
7685 			goto done;
7686 		break;
7687 
7688 	case L2CAP_MODE_ERTM:
7689 	case L2CAP_MODE_STREAMING:
7690 		l2cap_data_rcv(chan, skb);
7691 		goto done;
7692 
7693 	default:
7694 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7695 		break;
7696 	}
7697 
7698 drop:
7699 	kfree_skb(skb);
7700 
7701 done:
7702 	l2cap_chan_unlock(chan);
7703 	l2cap_chan_put(chan);
7704 }
7705 
7706 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7707 				  struct sk_buff *skb)
7708 {
7709 	struct hci_conn *hcon = conn->hcon;
7710 	struct l2cap_chan *chan;
7711 
7712 	if (hcon->type != ACL_LINK)
7713 		goto free_skb;
7714 
7715 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7716 					ACL_LINK);
7717 	if (!chan)
7718 		goto free_skb;
7719 
7720 	BT_DBG("chan %p, len %d", chan, skb->len);
7721 
7722 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7723 		goto drop;
7724 
7725 	if (chan->imtu < skb->len)
7726 		goto drop;
7727 
7728 	/* Store remote BD_ADDR and PSM for msg_name */
7729 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7730 	bt_cb(skb)->l2cap.psm = psm;
7731 
7732 	if (!chan->ops->recv(chan, skb)) {
7733 		l2cap_chan_put(chan);
7734 		return;
7735 	}
7736 
7737 drop:
7738 	l2cap_chan_put(chan);
7739 free_skb:
7740 	kfree_skb(skb);
7741 }
7742 
7743 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7744 {
7745 	struct l2cap_hdr *lh = (void *) skb->data;
7746 	struct hci_conn *hcon = conn->hcon;
7747 	u16 cid, len;
7748 	__le16 psm;
7749 
7750 	if (hcon->state != BT_CONNECTED) {
7751 		BT_DBG("queueing pending rx skb");
7752 		skb_queue_tail(&conn->pending_rx, skb);
7753 		return;
7754 	}
7755 
7756 	skb_pull(skb, L2CAP_HDR_SIZE);
7757 	cid = __le16_to_cpu(lh->cid);
7758 	len = __le16_to_cpu(lh->len);
7759 
7760 	if (len != skb->len) {
7761 		kfree_skb(skb);
7762 		return;
7763 	}
7764 
7765 	/* Since we can't actively block incoming LE connections we must
7766 	 * at least ensure that we ignore incoming data from them.
7767 	 */
7768 	if (hcon->type == LE_LINK &&
7769 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7770 				   bdaddr_dst_type(hcon))) {
7771 		kfree_skb(skb);
7772 		return;
7773 	}
7774 
7775 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7776 
7777 	switch (cid) {
7778 	case L2CAP_CID_SIGNALING:
7779 		l2cap_sig_channel(conn, skb);
7780 		break;
7781 
7782 	case L2CAP_CID_CONN_LESS:
7783 		psm = get_unaligned((__le16 *) skb->data);
7784 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7785 		l2cap_conless_channel(conn, psm, skb);
7786 		break;
7787 
7788 	case L2CAP_CID_LE_SIGNALING:
7789 		l2cap_le_sig_channel(conn, skb);
7790 		break;
7791 
7792 	default:
7793 		l2cap_data_channel(conn, cid, skb);
7794 		break;
7795 	}
7796 }
7797 
7798 static void process_pending_rx(struct work_struct *work)
7799 {
7800 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7801 					       pending_rx_work);
7802 	struct sk_buff *skb;
7803 
7804 	BT_DBG("");
7805 
7806 	while ((skb = skb_dequeue(&conn->pending_rx)))
7807 		l2cap_recv_frame(conn, skb);
7808 }
7809 
7810 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7811 {
7812 	struct l2cap_conn *conn = hcon->l2cap_data;
7813 	struct hci_chan *hchan;
7814 
7815 	if (conn)
7816 		return conn;
7817 
7818 	hchan = hci_chan_create(hcon);
7819 	if (!hchan)
7820 		return NULL;
7821 
7822 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7823 	if (!conn) {
7824 		hci_chan_del(hchan);
7825 		return NULL;
7826 	}
7827 
7828 	kref_init(&conn->ref);
7829 	hcon->l2cap_data = conn;
7830 	conn->hcon = hci_conn_get(hcon);
7831 	conn->hchan = hchan;
7832 
7833 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7834 
7835 	switch (hcon->type) {
7836 	case LE_LINK:
7837 		if (hcon->hdev->le_mtu) {
7838 			conn->mtu = hcon->hdev->le_mtu;
7839 			break;
7840 		}
7841 		fallthrough;
7842 	default:
7843 		conn->mtu = hcon->hdev->acl_mtu;
7844 		break;
7845 	}
7846 
7847 	conn->feat_mask = 0;
7848 
7849 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7850 
7851 	if (hcon->type == ACL_LINK &&
7852 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7853 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7854 
7855 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7856 	    (bredr_sc_enabled(hcon->hdev) ||
7857 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7858 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7859 
7860 	mutex_init(&conn->ident_lock);
7861 	mutex_init(&conn->chan_lock);
7862 
7863 	INIT_LIST_HEAD(&conn->chan_l);
7864 	INIT_LIST_HEAD(&conn->users);
7865 
7866 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7867 
7868 	skb_queue_head_init(&conn->pending_rx);
7869 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7870 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7871 
7872 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7873 
7874 	return conn;
7875 }
7876 
7877 static bool is_valid_psm(u16 psm, u8 dst_type)
7878 {
7879 	if (!psm)
7880 		return false;
7881 
7882 	if (bdaddr_type_is_le(dst_type))
7883 		return (psm <= 0x00ff);
7884 
7885 	/* PSM must be odd and lsb of upper byte must be 0 */
7886 	return ((psm & 0x0101) == 0x0001);
7887 }
7888 
7889 struct l2cap_chan_data {
7890 	struct l2cap_chan *chan;
7891 	struct pid *pid;
7892 	int count;
7893 };
7894 
7895 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7896 {
7897 	struct l2cap_chan_data *d = data;
7898 	struct pid *pid;
7899 
7900 	if (chan == d->chan)
7901 		return;
7902 
7903 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7904 		return;
7905 
7906 	pid = chan->ops->get_peer_pid(chan);
7907 
7908 	/* Only count deferred channels with the same PID/PSM */
7909 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7910 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7911 		return;
7912 
7913 	d->count++;
7914 }
7915 
7916 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7917 		       bdaddr_t *dst, u8 dst_type)
7918 {
7919 	struct l2cap_conn *conn;
7920 	struct hci_conn *hcon;
7921 	struct hci_dev *hdev;
7922 	int err;
7923 
7924 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7925 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7926 
7927 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7928 	if (!hdev)
7929 		return -EHOSTUNREACH;
7930 
7931 	hci_dev_lock(hdev);
7932 
7933 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7934 	    chan->chan_type != L2CAP_CHAN_RAW) {
7935 		err = -EINVAL;
7936 		goto done;
7937 	}
7938 
7939 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7940 		err = -EINVAL;
7941 		goto done;
7942 	}
7943 
7944 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7945 		err = -EINVAL;
7946 		goto done;
7947 	}
7948 
7949 	switch (chan->mode) {
7950 	case L2CAP_MODE_BASIC:
7951 		break;
7952 	case L2CAP_MODE_LE_FLOWCTL:
7953 		break;
7954 	case L2CAP_MODE_EXT_FLOWCTL:
7955 		if (!enable_ecred) {
7956 			err = -EOPNOTSUPP;
7957 			goto done;
7958 		}
7959 		break;
7960 	case L2CAP_MODE_ERTM:
7961 	case L2CAP_MODE_STREAMING:
7962 		if (!disable_ertm)
7963 			break;
7964 		fallthrough;
7965 	default:
7966 		err = -EOPNOTSUPP;
7967 		goto done;
7968 	}
7969 
7970 	switch (chan->state) {
7971 	case BT_CONNECT:
7972 	case BT_CONNECT2:
7973 	case BT_CONFIG:
7974 		/* Already connecting */
7975 		err = 0;
7976 		goto done;
7977 
7978 	case BT_CONNECTED:
7979 		/* Already connected */
7980 		err = -EISCONN;
7981 		goto done;
7982 
7983 	case BT_OPEN:
7984 	case BT_BOUND:
7985 		/* Can connect */
7986 		break;
7987 
7988 	default:
7989 		err = -EBADFD;
7990 		goto done;
7991 	}
7992 
7993 	/* Set destination address and psm */
7994 	bacpy(&chan->dst, dst);
7995 	chan->dst_type = dst_type;
7996 
7997 	chan->psm = psm;
7998 	chan->dcid = cid;
7999 
8000 	if (bdaddr_type_is_le(dst_type)) {
8001 		/* Convert from L2CAP channel address type to HCI address type
8002 		 */
8003 		if (dst_type == BDADDR_LE_PUBLIC)
8004 			dst_type = ADDR_LE_DEV_PUBLIC;
8005 		else
8006 			dst_type = ADDR_LE_DEV_RANDOM;
8007 
8008 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8009 			hcon = hci_connect_le(hdev, dst, dst_type,
8010 					      chan->sec_level,
8011 					      HCI_LE_CONN_TIMEOUT,
8012 					      HCI_ROLE_SLAVE, NULL);
8013 		else
8014 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
8015 						   chan->sec_level,
8016 						   HCI_LE_CONN_TIMEOUT,
8017 						   CONN_REASON_L2CAP_CHAN);
8018 
8019 	} else {
8020 		u8 auth_type = l2cap_get_auth_type(chan);
8021 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
8022 				       CONN_REASON_L2CAP_CHAN);
8023 	}
8024 
8025 	if (IS_ERR(hcon)) {
8026 		err = PTR_ERR(hcon);
8027 		goto done;
8028 	}
8029 
8030 	conn = l2cap_conn_add(hcon);
8031 	if (!conn) {
8032 		hci_conn_drop(hcon);
8033 		err = -ENOMEM;
8034 		goto done;
8035 	}
8036 
8037 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
8038 		struct l2cap_chan_data data;
8039 
8040 		data.chan = chan;
8041 		data.pid = chan->ops->get_peer_pid(chan);
8042 		data.count = 1;
8043 
8044 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
8045 
8046 		/* Check if there isn't too many channels being connected */
8047 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
8048 			hci_conn_drop(hcon);
8049 			err = -EPROTO;
8050 			goto done;
8051 		}
8052 	}
8053 
8054 	mutex_lock(&conn->chan_lock);
8055 	l2cap_chan_lock(chan);
8056 
8057 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
8058 		hci_conn_drop(hcon);
8059 		err = -EBUSY;
8060 		goto chan_unlock;
8061 	}
8062 
8063 	/* Update source addr of the socket */
8064 	bacpy(&chan->src, &hcon->src);
8065 	chan->src_type = bdaddr_src_type(hcon);
8066 
8067 	__l2cap_chan_add(conn, chan);
8068 
8069 	/* l2cap_chan_add takes its own ref so we can drop this one */
8070 	hci_conn_drop(hcon);
8071 
8072 	l2cap_state_change(chan, BT_CONNECT);
8073 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8074 
8075 	/* Release chan->sport so that it can be reused by other
8076 	 * sockets (as it's only used for listening sockets).
8077 	 */
8078 	write_lock(&chan_list_lock);
8079 	chan->sport = 0;
8080 	write_unlock(&chan_list_lock);
8081 
8082 	if (hcon->state == BT_CONNECTED) {
8083 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8084 			__clear_chan_timer(chan);
8085 			if (l2cap_chan_check_security(chan, true))
8086 				l2cap_state_change(chan, BT_CONNECTED);
8087 		} else
8088 			l2cap_do_start(chan);
8089 	}
8090 
8091 	err = 0;
8092 
8093 chan_unlock:
8094 	l2cap_chan_unlock(chan);
8095 	mutex_unlock(&conn->chan_lock);
8096 done:
8097 	hci_dev_unlock(hdev);
8098 	hci_dev_put(hdev);
8099 	return err;
8100 }
8101 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8102 
8103 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8104 {
8105 	struct l2cap_conn *conn = chan->conn;
8106 	struct {
8107 		struct l2cap_ecred_reconf_req req;
8108 		__le16 scid;
8109 	} pdu;
8110 
8111 	pdu.req.mtu = cpu_to_le16(chan->imtu);
8112 	pdu.req.mps = cpu_to_le16(chan->mps);
8113 	pdu.scid    = cpu_to_le16(chan->scid);
8114 
8115 	chan->ident = l2cap_get_ident(conn);
8116 
8117 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8118 		       sizeof(pdu), &pdu);
8119 }
8120 
8121 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8122 {
8123 	if (chan->imtu > mtu)
8124 		return -EINVAL;
8125 
8126 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8127 
8128 	chan->imtu = mtu;
8129 
8130 	l2cap_ecred_reconfigure(chan);
8131 
8132 	return 0;
8133 }
8134 
8135 /* ---- L2CAP interface with lower layer (HCI) ---- */
8136 
8137 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8138 {
8139 	int exact = 0, lm1 = 0, lm2 = 0;
8140 	struct l2cap_chan *c;
8141 
8142 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8143 
8144 	/* Find listening sockets and check their link_mode */
8145 	read_lock(&chan_list_lock);
8146 	list_for_each_entry(c, &chan_list, global_l) {
8147 		if (c->state != BT_LISTEN)
8148 			continue;
8149 
8150 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8151 			lm1 |= HCI_LM_ACCEPT;
8152 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8153 				lm1 |= HCI_LM_MASTER;
8154 			exact++;
8155 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8156 			lm2 |= HCI_LM_ACCEPT;
8157 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8158 				lm2 |= HCI_LM_MASTER;
8159 		}
8160 	}
8161 	read_unlock(&chan_list_lock);
8162 
8163 	return exact ? lm1 : lm2;
8164 }
8165 
8166 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8167  * from an existing channel in the list or from the beginning of the
8168  * global list (by passing NULL as first parameter).
8169  */
8170 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8171 						  struct hci_conn *hcon)
8172 {
8173 	u8 src_type = bdaddr_src_type(hcon);
8174 
8175 	read_lock(&chan_list_lock);
8176 
8177 	if (c)
8178 		c = list_next_entry(c, global_l);
8179 	else
8180 		c = list_entry(chan_list.next, typeof(*c), global_l);
8181 
8182 	list_for_each_entry_from(c, &chan_list, global_l) {
8183 		if (c->chan_type != L2CAP_CHAN_FIXED)
8184 			continue;
8185 		if (c->state != BT_LISTEN)
8186 			continue;
8187 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8188 			continue;
8189 		if (src_type != c->src_type)
8190 			continue;
8191 
8192 		c = l2cap_chan_hold_unless_zero(c);
8193 		read_unlock(&chan_list_lock);
8194 		return c;
8195 	}
8196 
8197 	read_unlock(&chan_list_lock);
8198 
8199 	return NULL;
8200 }
8201 
8202 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8203 {
8204 	struct hci_dev *hdev = hcon->hdev;
8205 	struct l2cap_conn *conn;
8206 	struct l2cap_chan *pchan;
8207 	u8 dst_type;
8208 
8209 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8210 		return;
8211 
8212 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8213 
8214 	if (status) {
8215 		l2cap_conn_del(hcon, bt_to_errno(status));
8216 		return;
8217 	}
8218 
8219 	conn = l2cap_conn_add(hcon);
8220 	if (!conn)
8221 		return;
8222 
8223 	dst_type = bdaddr_dst_type(hcon);
8224 
8225 	/* If device is blocked, do not create channels for it */
8226 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8227 		return;
8228 
8229 	/* Find fixed channels and notify them of the new connection. We
8230 	 * use multiple individual lookups, continuing each time where
8231 	 * we left off, because the list lock would prevent calling the
8232 	 * potentially sleeping l2cap_chan_lock() function.
8233 	 */
8234 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8235 	while (pchan) {
8236 		struct l2cap_chan *chan, *next;
8237 
8238 		/* Client fixed channels should override server ones */
8239 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8240 			goto next;
8241 
8242 		l2cap_chan_lock(pchan);
8243 		chan = pchan->ops->new_connection(pchan);
8244 		if (chan) {
8245 			bacpy(&chan->src, &hcon->src);
8246 			bacpy(&chan->dst, &hcon->dst);
8247 			chan->src_type = bdaddr_src_type(hcon);
8248 			chan->dst_type = dst_type;
8249 
8250 			__l2cap_chan_add(conn, chan);
8251 		}
8252 
8253 		l2cap_chan_unlock(pchan);
8254 next:
8255 		next = l2cap_global_fixed_chan(pchan, hcon);
8256 		l2cap_chan_put(pchan);
8257 		pchan = next;
8258 	}
8259 
8260 	l2cap_conn_ready(conn);
8261 }
8262 
8263 int l2cap_disconn_ind(struct hci_conn *hcon)
8264 {
8265 	struct l2cap_conn *conn = hcon->l2cap_data;
8266 
8267 	BT_DBG("hcon %p", hcon);
8268 
8269 	if (!conn)
8270 		return HCI_ERROR_REMOTE_USER_TERM;
8271 	return conn->disc_reason;
8272 }
8273 
8274 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8275 {
8276 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8277 		return;
8278 
8279 	BT_DBG("hcon %p reason %d", hcon, reason);
8280 
8281 	l2cap_conn_del(hcon, bt_to_errno(reason));
8282 }
8283 
8284 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8285 {
8286 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8287 		return;
8288 
8289 	if (encrypt == 0x00) {
8290 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8291 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8292 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8293 			   chan->sec_level == BT_SECURITY_FIPS)
8294 			l2cap_chan_close(chan, ECONNREFUSED);
8295 	} else {
8296 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8297 			__clear_chan_timer(chan);
8298 	}
8299 }
8300 
8301 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8302 {
8303 	struct l2cap_conn *conn = hcon->l2cap_data;
8304 	struct l2cap_chan *chan;
8305 
8306 	if (!conn)
8307 		return;
8308 
8309 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8310 
8311 	mutex_lock(&conn->chan_lock);
8312 
8313 	list_for_each_entry(chan, &conn->chan_l, list) {
8314 		l2cap_chan_lock(chan);
8315 
8316 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8317 		       state_to_string(chan->state));
8318 
8319 		if (chan->scid == L2CAP_CID_A2MP) {
8320 			l2cap_chan_unlock(chan);
8321 			continue;
8322 		}
8323 
8324 		if (!status && encrypt)
8325 			chan->sec_level = hcon->sec_level;
8326 
8327 		if (!__l2cap_no_conn_pending(chan)) {
8328 			l2cap_chan_unlock(chan);
8329 			continue;
8330 		}
8331 
8332 		if (!status && (chan->state == BT_CONNECTED ||
8333 				chan->state == BT_CONFIG)) {
8334 			chan->ops->resume(chan);
8335 			l2cap_check_encryption(chan, encrypt);
8336 			l2cap_chan_unlock(chan);
8337 			continue;
8338 		}
8339 
8340 		if (chan->state == BT_CONNECT) {
8341 			if (!status && l2cap_check_enc_key_size(hcon))
8342 				l2cap_start_connection(chan);
8343 			else
8344 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8345 		} else if (chan->state == BT_CONNECT2 &&
8346 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8347 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8348 			struct l2cap_conn_rsp rsp;
8349 			__u16 res, stat;
8350 
8351 			if (!status && l2cap_check_enc_key_size(hcon)) {
8352 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8353 					res = L2CAP_CR_PEND;
8354 					stat = L2CAP_CS_AUTHOR_PEND;
8355 					chan->ops->defer(chan);
8356 				} else {
8357 					l2cap_state_change(chan, BT_CONFIG);
8358 					res = L2CAP_CR_SUCCESS;
8359 					stat = L2CAP_CS_NO_INFO;
8360 				}
8361 			} else {
8362 				l2cap_state_change(chan, BT_DISCONN);
8363 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8364 				res = L2CAP_CR_SEC_BLOCK;
8365 				stat = L2CAP_CS_NO_INFO;
8366 			}
8367 
8368 			rsp.scid   = cpu_to_le16(chan->dcid);
8369 			rsp.dcid   = cpu_to_le16(chan->scid);
8370 			rsp.result = cpu_to_le16(res);
8371 			rsp.status = cpu_to_le16(stat);
8372 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8373 				       sizeof(rsp), &rsp);
8374 
8375 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8376 			    res == L2CAP_CR_SUCCESS) {
8377 				char buf[128];
8378 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8379 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8380 					       L2CAP_CONF_REQ,
8381 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8382 					       buf);
8383 				chan->num_conf_req++;
8384 			}
8385 		}
8386 
8387 		l2cap_chan_unlock(chan);
8388 	}
8389 
8390 	mutex_unlock(&conn->chan_lock);
8391 }
8392 
8393 /* Append fragment into frame respecting the maximum len of rx_skb */
8394 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
8395 			   u16 len)
8396 {
8397 	if (!conn->rx_skb) {
8398 		/* Allocate skb for the complete frame (with header) */
8399 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8400 		if (!conn->rx_skb)
8401 			return -ENOMEM;
8402 		/* Init rx_len */
8403 		conn->rx_len = len;
8404 	}
8405 
8406 	/* Copy as much as the rx_skb can hold */
8407 	len = min_t(u16, len, skb->len);
8408 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
8409 	skb_pull(skb, len);
8410 	conn->rx_len -= len;
8411 
8412 	return len;
8413 }
8414 
8415 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
8416 {
8417 	struct sk_buff *rx_skb;
8418 	int len;
8419 
8420 	/* Append just enough to complete the header */
8421 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
8422 
8423 	/* If header could not be read just continue */
8424 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
8425 		return len;
8426 
8427 	rx_skb = conn->rx_skb;
8428 	len = get_unaligned_le16(rx_skb->data);
8429 
8430 	/* Check if rx_skb has enough space to received all fragments */
8431 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
8432 		/* Update expected len */
8433 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
8434 		return L2CAP_LEN_SIZE;
8435 	}
8436 
8437 	/* Reset conn->rx_skb since it will need to be reallocated in order to
8438 	 * fit all fragments.
8439 	 */
8440 	conn->rx_skb = NULL;
8441 
8442 	/* Reallocates rx_skb using the exact expected length */
8443 	len = l2cap_recv_frag(conn, rx_skb,
8444 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
8445 	kfree_skb(rx_skb);
8446 
8447 	return len;
8448 }
8449 
8450 static void l2cap_recv_reset(struct l2cap_conn *conn)
8451 {
8452 	kfree_skb(conn->rx_skb);
8453 	conn->rx_skb = NULL;
8454 	conn->rx_len = 0;
8455 }
8456 
8457 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8458 {
8459 	struct l2cap_conn *conn = hcon->l2cap_data;
8460 	int len;
8461 
8462 	/* For AMP controller do not create l2cap conn */
8463 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8464 		goto drop;
8465 
8466 	if (!conn)
8467 		conn = l2cap_conn_add(hcon);
8468 
8469 	if (!conn)
8470 		goto drop;
8471 
8472 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
8473 
8474 	switch (flags) {
8475 	case ACL_START:
8476 	case ACL_START_NO_FLUSH:
8477 	case ACL_COMPLETE:
8478 		if (conn->rx_skb) {
8479 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8480 			l2cap_recv_reset(conn);
8481 			l2cap_conn_unreliable(conn, ECOMM);
8482 		}
8483 
8484 		/* Start fragment may not contain the L2CAP length so just
8485 		 * copy the initial byte when that happens and use conn->mtu as
8486 		 * expected length.
8487 		 */
8488 		if (skb->len < L2CAP_LEN_SIZE) {
8489 			l2cap_recv_frag(conn, skb, conn->mtu);
8490 			break;
8491 		}
8492 
8493 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
8494 
8495 		if (len == skb->len) {
8496 			/* Complete frame received */
8497 			l2cap_recv_frame(conn, skb);
8498 			return;
8499 		}
8500 
8501 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
8502 
8503 		if (skb->len > len) {
8504 			BT_ERR("Frame is too long (len %u, expected len %d)",
8505 			       skb->len, len);
8506 			l2cap_conn_unreliable(conn, ECOMM);
8507 			goto drop;
8508 		}
8509 
8510 		/* Append fragment into frame (with header) */
8511 		if (l2cap_recv_frag(conn, skb, len) < 0)
8512 			goto drop;
8513 
8514 		break;
8515 
8516 	case ACL_CONT:
8517 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
8518 
8519 		if (!conn->rx_skb) {
8520 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8521 			l2cap_conn_unreliable(conn, ECOMM);
8522 			goto drop;
8523 		}
8524 
8525 		/* Complete the L2CAP length if it has not been read */
8526 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
8527 			if (l2cap_recv_len(conn, skb) < 0) {
8528 				l2cap_conn_unreliable(conn, ECOMM);
8529 				goto drop;
8530 			}
8531 
8532 			/* Header still could not be read just continue */
8533 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
8534 				break;
8535 		}
8536 
8537 		if (skb->len > conn->rx_len) {
8538 			BT_ERR("Fragment is too long (len %u, expected %u)",
8539 			       skb->len, conn->rx_len);
8540 			l2cap_recv_reset(conn);
8541 			l2cap_conn_unreliable(conn, ECOMM);
8542 			goto drop;
8543 		}
8544 
8545 		/* Append fragment into frame (with header) */
8546 		l2cap_recv_frag(conn, skb, skb->len);
8547 
8548 		if (!conn->rx_len) {
8549 			/* Complete frame received. l2cap_recv_frame
8550 			 * takes ownership of the skb so set the global
8551 			 * rx_skb pointer to NULL first.
8552 			 */
8553 			struct sk_buff *rx_skb = conn->rx_skb;
8554 			conn->rx_skb = NULL;
8555 			l2cap_recv_frame(conn, rx_skb);
8556 		}
8557 		break;
8558 	}
8559 
8560 drop:
8561 	kfree_skb(skb);
8562 }
8563 
8564 static struct hci_cb l2cap_cb = {
8565 	.name		= "L2CAP",
8566 	.connect_cfm	= l2cap_connect_cfm,
8567 	.disconn_cfm	= l2cap_disconn_cfm,
8568 	.security_cfm	= l2cap_security_cfm,
8569 };
8570 
8571 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8572 {
8573 	struct l2cap_chan *c;
8574 
8575 	read_lock(&chan_list_lock);
8576 
8577 	list_for_each_entry(c, &chan_list, global_l) {
8578 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8579 			   &c->src, c->src_type, &c->dst, c->dst_type,
8580 			   c->state, __le16_to_cpu(c->psm),
8581 			   c->scid, c->dcid, c->imtu, c->omtu,
8582 			   c->sec_level, c->mode);
8583 	}
8584 
8585 	read_unlock(&chan_list_lock);
8586 
8587 	return 0;
8588 }
8589 
8590 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8591 
8592 static struct dentry *l2cap_debugfs;
8593 
8594 int __init l2cap_init(void)
8595 {
8596 	int err;
8597 
8598 	err = l2cap_init_sockets();
8599 	if (err < 0)
8600 		return err;
8601 
8602 	hci_register_cb(&l2cap_cb);
8603 
8604 	if (IS_ERR_OR_NULL(bt_debugfs))
8605 		return 0;
8606 
8607 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8608 					    NULL, &l2cap_debugfs_fops);
8609 
8610 	return 0;
8611 }
8612 
8613 void l2cap_exit(void)
8614 {
8615 	debugfs_remove(l2cap_debugfs);
8616 	hci_unregister_cb(&l2cap_cb);
8617 	l2cap_cleanup_sockets();
8618 }
8619 
8620 module_param(disable_ertm, bool, 0644);
8621 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8622 
8623 module_param(enable_ecred, bool, 0644);
8624 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8625