xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision aa16cac0)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred;
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 static void l2cap_retrans_timeout(struct work_struct *work);
65 static void l2cap_monitor_timeout(struct work_struct *work);
66 static void l2cap_ack_timeout(struct work_struct *work);
67 
68 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
69 {
70 	if (link_type == LE_LINK) {
71 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
72 			return BDADDR_LE_PUBLIC;
73 		else
74 			return BDADDR_LE_RANDOM;
75 	}
76 
77 	return BDADDR_BREDR;
78 }
79 
80 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
81 {
82 	return bdaddr_type(hcon->type, hcon->src_type);
83 }
84 
85 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
86 {
87 	return bdaddr_type(hcon->type, hcon->dst_type);
88 }
89 
90 /* ---- L2CAP channels ---- */
91 
92 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
93 						   u16 cid)
94 {
95 	struct l2cap_chan *c;
96 
97 	list_for_each_entry(c, &conn->chan_l, list) {
98 		if (c->dcid == cid)
99 			return c;
100 	}
101 	return NULL;
102 }
103 
104 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 						   u16 cid)
106 {
107 	struct l2cap_chan *c;
108 
109 	list_for_each_entry(c, &conn->chan_l, list) {
110 		if (c->scid == cid)
111 			return c;
112 	}
113 	return NULL;
114 }
115 
116 /* Find channel with given SCID.
117  * Returns a reference locked channel.
118  */
119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
120 						 u16 cid)
121 {
122 	struct l2cap_chan *c;
123 
124 	mutex_lock(&conn->chan_lock);
125 	c = __l2cap_get_chan_by_scid(conn, cid);
126 	if (c) {
127 		/* Only lock if chan reference is not 0 */
128 		c = l2cap_chan_hold_unless_zero(c);
129 		if (c)
130 			l2cap_chan_lock(c);
131 	}
132 	mutex_unlock(&conn->chan_lock);
133 
134 	return c;
135 }
136 
137 /* Find channel with given DCID.
138  * Returns a reference locked channel.
139  */
140 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
141 						 u16 cid)
142 {
143 	struct l2cap_chan *c;
144 
145 	mutex_lock(&conn->chan_lock);
146 	c = __l2cap_get_chan_by_dcid(conn, cid);
147 	if (c) {
148 		/* Only lock if chan reference is not 0 */
149 		c = l2cap_chan_hold_unless_zero(c);
150 		if (c)
151 			l2cap_chan_lock(c);
152 	}
153 	mutex_unlock(&conn->chan_lock);
154 
155 	return c;
156 }
157 
158 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
159 						    u8 ident)
160 {
161 	struct l2cap_chan *c;
162 
163 	list_for_each_entry(c, &conn->chan_l, list) {
164 		if (c->ident == ident)
165 			return c;
166 	}
167 	return NULL;
168 }
169 
170 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
171 						  u8 ident)
172 {
173 	struct l2cap_chan *c;
174 
175 	mutex_lock(&conn->chan_lock);
176 	c = __l2cap_get_chan_by_ident(conn, ident);
177 	if (c) {
178 		/* Only lock if chan reference is not 0 */
179 		c = l2cap_chan_hold_unless_zero(c);
180 		if (c)
181 			l2cap_chan_lock(c);
182 	}
183 	mutex_unlock(&conn->chan_lock);
184 
185 	return c;
186 }
187 
188 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
189 						      u8 src_type)
190 {
191 	struct l2cap_chan *c;
192 
193 	list_for_each_entry(c, &chan_list, global_l) {
194 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
195 			continue;
196 
197 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
198 			continue;
199 
200 		if (c->sport == psm && !bacmp(&c->src, src))
201 			return c;
202 	}
203 	return NULL;
204 }
205 
206 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
207 {
208 	int err;
209 
210 	write_lock(&chan_list_lock);
211 
212 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
213 		err = -EADDRINUSE;
214 		goto done;
215 	}
216 
217 	if (psm) {
218 		chan->psm = psm;
219 		chan->sport = psm;
220 		err = 0;
221 	} else {
222 		u16 p, start, end, incr;
223 
224 		if (chan->src_type == BDADDR_BREDR) {
225 			start = L2CAP_PSM_DYN_START;
226 			end = L2CAP_PSM_AUTO_END;
227 			incr = 2;
228 		} else {
229 			start = L2CAP_PSM_LE_DYN_START;
230 			end = L2CAP_PSM_LE_DYN_END;
231 			incr = 1;
232 		}
233 
234 		err = -EINVAL;
235 		for (p = start; p <= end; p += incr)
236 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
237 							 chan->src_type)) {
238 				chan->psm   = cpu_to_le16(p);
239 				chan->sport = cpu_to_le16(p);
240 				err = 0;
241 				break;
242 			}
243 	}
244 
245 done:
246 	write_unlock(&chan_list_lock);
247 	return err;
248 }
249 EXPORT_SYMBOL_GPL(l2cap_add_psm);
250 
251 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
252 {
253 	write_lock(&chan_list_lock);
254 
255 	/* Override the defaults (which are for conn-oriented) */
256 	chan->omtu = L2CAP_DEFAULT_MTU;
257 	chan->chan_type = L2CAP_CHAN_FIXED;
258 
259 	chan->scid = scid;
260 
261 	write_unlock(&chan_list_lock);
262 
263 	return 0;
264 }
265 
266 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
267 {
268 	u16 cid, dyn_end;
269 
270 	if (conn->hcon->type == LE_LINK)
271 		dyn_end = L2CAP_CID_LE_DYN_END;
272 	else
273 		dyn_end = L2CAP_CID_DYN_END;
274 
275 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
276 		if (!__l2cap_get_chan_by_scid(conn, cid))
277 			return cid;
278 	}
279 
280 	return 0;
281 }
282 
283 static void l2cap_state_change(struct l2cap_chan *chan, int state)
284 {
285 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
286 	       state_to_string(state));
287 
288 	chan->state = state;
289 	chan->ops->state_change(chan, state, 0);
290 }
291 
292 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
293 						int state, int err)
294 {
295 	chan->state = state;
296 	chan->ops->state_change(chan, chan->state, err);
297 }
298 
299 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
300 {
301 	chan->ops->state_change(chan, chan->state, err);
302 }
303 
304 static void __set_retrans_timer(struct l2cap_chan *chan)
305 {
306 	if (!delayed_work_pending(&chan->monitor_timer) &&
307 	    chan->retrans_timeout) {
308 		l2cap_set_timer(chan, &chan->retrans_timer,
309 				msecs_to_jiffies(chan->retrans_timeout));
310 	}
311 }
312 
313 static void __set_monitor_timer(struct l2cap_chan *chan)
314 {
315 	__clear_retrans_timer(chan);
316 	if (chan->monitor_timeout) {
317 		l2cap_set_timer(chan, &chan->monitor_timer,
318 				msecs_to_jiffies(chan->monitor_timeout));
319 	}
320 }
321 
322 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
323 					       u16 seq)
324 {
325 	struct sk_buff *skb;
326 
327 	skb_queue_walk(head, skb) {
328 		if (bt_cb(skb)->l2cap.txseq == seq)
329 			return skb;
330 	}
331 
332 	return NULL;
333 }
334 
335 /* ---- L2CAP sequence number lists ---- */
336 
337 /* For ERTM, ordered lists of sequence numbers must be tracked for
338  * SREJ requests that are received and for frames that are to be
339  * retransmitted. These seq_list functions implement a singly-linked
340  * list in an array, where membership in the list can also be checked
341  * in constant time. Items can also be added to the tail of the list
342  * and removed from the head in constant time, without further memory
343  * allocs or frees.
344  */
345 
346 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
347 {
348 	size_t alloc_size, i;
349 
350 	/* Allocated size is a power of 2 to map sequence numbers
351 	 * (which may be up to 14 bits) in to a smaller array that is
352 	 * sized for the negotiated ERTM transmit windows.
353 	 */
354 	alloc_size = roundup_pow_of_two(size);
355 
356 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
357 	if (!seq_list->list)
358 		return -ENOMEM;
359 
360 	seq_list->mask = alloc_size - 1;
361 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 	for (i = 0; i < alloc_size; i++)
364 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
365 
366 	return 0;
367 }
368 
369 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
370 {
371 	kfree(seq_list->list);
372 }
373 
374 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
375 					   u16 seq)
376 {
377 	/* Constant-time check for list membership */
378 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
379 }
380 
381 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
382 {
383 	u16 seq = seq_list->head;
384 	u16 mask = seq_list->mask;
385 
386 	seq_list->head = seq_list->list[seq & mask];
387 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
388 
389 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
390 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
392 	}
393 
394 	return seq;
395 }
396 
397 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
398 {
399 	u16 i;
400 
401 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
402 		return;
403 
404 	for (i = 0; i <= seq_list->mask; i++)
405 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
406 
407 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
408 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
409 }
410 
411 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
412 {
413 	u16 mask = seq_list->mask;
414 
415 	/* All appends happen in constant time */
416 
417 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
418 		return;
419 
420 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
421 		seq_list->head = seq;
422 	else
423 		seq_list->list[seq_list->tail & mask] = seq;
424 
425 	seq_list->tail = seq;
426 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
427 }
428 
429 static void l2cap_chan_timeout(struct work_struct *work)
430 {
431 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
432 					       chan_timer.work);
433 	struct l2cap_conn *conn = chan->conn;
434 	int reason;
435 
436 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
437 
438 	mutex_lock(&conn->chan_lock);
439 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
440 	 * this work. No need to call l2cap_chan_hold(chan) here again.
441 	 */
442 	l2cap_chan_lock(chan);
443 
444 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
445 		reason = ECONNREFUSED;
446 	else if (chan->state == BT_CONNECT &&
447 		 chan->sec_level != BT_SECURITY_SDP)
448 		reason = ECONNREFUSED;
449 	else
450 		reason = ETIMEDOUT;
451 
452 	l2cap_chan_close(chan, reason);
453 
454 	chan->ops->close(chan);
455 
456 	l2cap_chan_unlock(chan);
457 	l2cap_chan_put(chan);
458 
459 	mutex_unlock(&conn->chan_lock);
460 }
461 
462 struct l2cap_chan *l2cap_chan_create(void)
463 {
464 	struct l2cap_chan *chan;
465 
466 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
467 	if (!chan)
468 		return NULL;
469 
470 	skb_queue_head_init(&chan->tx_q);
471 	skb_queue_head_init(&chan->srej_q);
472 	mutex_init(&chan->lock);
473 
474 	/* Set default lock nesting level */
475 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
476 
477 	write_lock(&chan_list_lock);
478 	list_add(&chan->global_l, &chan_list);
479 	write_unlock(&chan_list_lock);
480 
481 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
482 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
483 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
484 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
485 
486 	chan->state = BT_OPEN;
487 
488 	kref_init(&chan->kref);
489 
490 	/* This flag is cleared in l2cap_chan_ready() */
491 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
492 
493 	BT_DBG("chan %p", chan);
494 
495 	return chan;
496 }
497 EXPORT_SYMBOL_GPL(l2cap_chan_create);
498 
499 static void l2cap_chan_destroy(struct kref *kref)
500 {
501 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
502 
503 	BT_DBG("chan %p", chan);
504 
505 	write_lock(&chan_list_lock);
506 	list_del(&chan->global_l);
507 	write_unlock(&chan_list_lock);
508 
509 	kfree(chan);
510 }
511 
512 void l2cap_chan_hold(struct l2cap_chan *c)
513 {
514 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
515 
516 	kref_get(&c->kref);
517 }
518 
519 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
520 {
521 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
522 
523 	if (!kref_get_unless_zero(&c->kref))
524 		return NULL;
525 
526 	return c;
527 }
528 
529 void l2cap_chan_put(struct l2cap_chan *c)
530 {
531 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
532 
533 	kref_put(&c->kref, l2cap_chan_destroy);
534 }
535 EXPORT_SYMBOL_GPL(l2cap_chan_put);
536 
537 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
538 {
539 	chan->fcs  = L2CAP_FCS_CRC16;
540 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
541 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
542 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
543 	chan->remote_max_tx = chan->max_tx;
544 	chan->remote_tx_win = chan->tx_win;
545 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
546 	chan->sec_level = BT_SECURITY_LOW;
547 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
548 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
549 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
550 
551 	chan->conf_state = 0;
552 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
553 
554 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
555 }
556 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
557 
558 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
559 {
560 	chan->sdu = NULL;
561 	chan->sdu_last_frag = NULL;
562 	chan->sdu_len = 0;
563 	chan->tx_credits = tx_credits;
564 	/* Derive MPS from connection MTU to stop HCI fragmentation */
565 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
566 	/* Give enough credits for a full packet */
567 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
568 
569 	skb_queue_head_init(&chan->tx_q);
570 }
571 
572 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
573 {
574 	l2cap_le_flowctl_init(chan, tx_credits);
575 
576 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
577 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
578 		chan->mps = L2CAP_ECRED_MIN_MPS;
579 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
580 	}
581 }
582 
583 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
584 {
585 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
586 	       __le16_to_cpu(chan->psm), chan->dcid);
587 
588 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
589 
590 	chan->conn = conn;
591 
592 	switch (chan->chan_type) {
593 	case L2CAP_CHAN_CONN_ORIENTED:
594 		/* Alloc CID for connection-oriented socket */
595 		chan->scid = l2cap_alloc_cid(conn);
596 		if (conn->hcon->type == ACL_LINK)
597 			chan->omtu = L2CAP_DEFAULT_MTU;
598 		break;
599 
600 	case L2CAP_CHAN_CONN_LESS:
601 		/* Connectionless socket */
602 		chan->scid = L2CAP_CID_CONN_LESS;
603 		chan->dcid = L2CAP_CID_CONN_LESS;
604 		chan->omtu = L2CAP_DEFAULT_MTU;
605 		break;
606 
607 	case L2CAP_CHAN_FIXED:
608 		/* Caller will set CID and CID specific MTU values */
609 		break;
610 
611 	default:
612 		/* Raw socket can send/recv signalling messages only */
613 		chan->scid = L2CAP_CID_SIGNALING;
614 		chan->dcid = L2CAP_CID_SIGNALING;
615 		chan->omtu = L2CAP_DEFAULT_MTU;
616 	}
617 
618 	chan->local_id		= L2CAP_BESTEFFORT_ID;
619 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
620 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
621 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
622 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
623 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
624 
625 	l2cap_chan_hold(chan);
626 
627 	/* Only keep a reference for fixed channels if they requested it */
628 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
629 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
630 		hci_conn_hold(conn->hcon);
631 
632 	list_add(&chan->list, &conn->chan_l);
633 }
634 
635 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
636 {
637 	mutex_lock(&conn->chan_lock);
638 	__l2cap_chan_add(conn, chan);
639 	mutex_unlock(&conn->chan_lock);
640 }
641 
642 void l2cap_chan_del(struct l2cap_chan *chan, int err)
643 {
644 	struct l2cap_conn *conn = chan->conn;
645 
646 	__clear_chan_timer(chan);
647 
648 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
649 	       state_to_string(chan->state));
650 
651 	chan->ops->teardown(chan, err);
652 
653 	if (conn) {
654 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
655 		/* Delete from channel list */
656 		list_del(&chan->list);
657 
658 		l2cap_chan_put(chan);
659 
660 		chan->conn = NULL;
661 
662 		/* Reference was only held for non-fixed channels or
663 		 * fixed channels that explicitly requested it using the
664 		 * FLAG_HOLD_HCI_CONN flag.
665 		 */
666 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
667 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
668 			hci_conn_drop(conn->hcon);
669 
670 		if (mgr && mgr->bredr_chan == chan)
671 			mgr->bredr_chan = NULL;
672 	}
673 
674 	if (chan->hs_hchan) {
675 		struct hci_chan *hs_hchan = chan->hs_hchan;
676 
677 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
678 		amp_disconnect_logical_link(hs_hchan);
679 	}
680 
681 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
682 		return;
683 
684 	switch (chan->mode) {
685 	case L2CAP_MODE_BASIC:
686 		break;
687 
688 	case L2CAP_MODE_LE_FLOWCTL:
689 	case L2CAP_MODE_EXT_FLOWCTL:
690 		skb_queue_purge(&chan->tx_q);
691 		break;
692 
693 	case L2CAP_MODE_ERTM:
694 		__clear_retrans_timer(chan);
695 		__clear_monitor_timer(chan);
696 		__clear_ack_timer(chan);
697 
698 		skb_queue_purge(&chan->srej_q);
699 
700 		l2cap_seq_list_free(&chan->srej_list);
701 		l2cap_seq_list_free(&chan->retrans_list);
702 		fallthrough;
703 
704 	case L2CAP_MODE_STREAMING:
705 		skb_queue_purge(&chan->tx_q);
706 		break;
707 	}
708 }
709 EXPORT_SYMBOL_GPL(l2cap_chan_del);
710 
711 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
712 			      void *data)
713 {
714 	struct l2cap_chan *chan;
715 
716 	list_for_each_entry(chan, &conn->chan_l, list) {
717 		func(chan, data);
718 	}
719 }
720 
721 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
722 		     void *data)
723 {
724 	if (!conn)
725 		return;
726 
727 	mutex_lock(&conn->chan_lock);
728 	__l2cap_chan_list(conn, func, data);
729 	mutex_unlock(&conn->chan_lock);
730 }
731 
732 EXPORT_SYMBOL_GPL(l2cap_chan_list);
733 
734 static void l2cap_conn_update_id_addr(struct work_struct *work)
735 {
736 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
737 					       id_addr_update_work);
738 	struct hci_conn *hcon = conn->hcon;
739 	struct l2cap_chan *chan;
740 
741 	mutex_lock(&conn->chan_lock);
742 
743 	list_for_each_entry(chan, &conn->chan_l, list) {
744 		l2cap_chan_lock(chan);
745 		bacpy(&chan->dst, &hcon->dst);
746 		chan->dst_type = bdaddr_dst_type(hcon);
747 		l2cap_chan_unlock(chan);
748 	}
749 
750 	mutex_unlock(&conn->chan_lock);
751 }
752 
753 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
754 {
755 	struct l2cap_conn *conn = chan->conn;
756 	struct l2cap_le_conn_rsp rsp;
757 	u16 result;
758 
759 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
760 		result = L2CAP_CR_LE_AUTHORIZATION;
761 	else
762 		result = L2CAP_CR_LE_BAD_PSM;
763 
764 	l2cap_state_change(chan, BT_DISCONN);
765 
766 	rsp.dcid    = cpu_to_le16(chan->scid);
767 	rsp.mtu     = cpu_to_le16(chan->imtu);
768 	rsp.mps     = cpu_to_le16(chan->mps);
769 	rsp.credits = cpu_to_le16(chan->rx_credits);
770 	rsp.result  = cpu_to_le16(result);
771 
772 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
773 		       &rsp);
774 }
775 
776 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
777 {
778 	struct l2cap_conn *conn = chan->conn;
779 	struct l2cap_ecred_conn_rsp rsp;
780 	u16 result;
781 
782 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
783 		result = L2CAP_CR_LE_AUTHORIZATION;
784 	else
785 		result = L2CAP_CR_LE_BAD_PSM;
786 
787 	l2cap_state_change(chan, BT_DISCONN);
788 
789 	memset(&rsp, 0, sizeof(rsp));
790 
791 	rsp.result  = cpu_to_le16(result);
792 
793 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
794 		       &rsp);
795 }
796 
797 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
798 {
799 	struct l2cap_conn *conn = chan->conn;
800 	struct l2cap_conn_rsp rsp;
801 	u16 result;
802 
803 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
804 		result = L2CAP_CR_SEC_BLOCK;
805 	else
806 		result = L2CAP_CR_BAD_PSM;
807 
808 	l2cap_state_change(chan, BT_DISCONN);
809 
810 	rsp.scid   = cpu_to_le16(chan->dcid);
811 	rsp.dcid   = cpu_to_le16(chan->scid);
812 	rsp.result = cpu_to_le16(result);
813 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
814 
815 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
816 }
817 
818 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
819 {
820 	struct l2cap_conn *conn = chan->conn;
821 
822 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
823 
824 	switch (chan->state) {
825 	case BT_LISTEN:
826 		chan->ops->teardown(chan, 0);
827 		break;
828 
829 	case BT_CONNECTED:
830 	case BT_CONFIG:
831 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
832 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
833 			l2cap_send_disconn_req(chan, reason);
834 		} else
835 			l2cap_chan_del(chan, reason);
836 		break;
837 
838 	case BT_CONNECT2:
839 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
840 			if (conn->hcon->type == ACL_LINK)
841 				l2cap_chan_connect_reject(chan);
842 			else if (conn->hcon->type == LE_LINK) {
843 				switch (chan->mode) {
844 				case L2CAP_MODE_LE_FLOWCTL:
845 					l2cap_chan_le_connect_reject(chan);
846 					break;
847 				case L2CAP_MODE_EXT_FLOWCTL:
848 					l2cap_chan_ecred_connect_reject(chan);
849 					break;
850 				}
851 			}
852 		}
853 
854 		l2cap_chan_del(chan, reason);
855 		break;
856 
857 	case BT_CONNECT:
858 	case BT_DISCONN:
859 		l2cap_chan_del(chan, reason);
860 		break;
861 
862 	default:
863 		chan->ops->teardown(chan, 0);
864 		break;
865 	}
866 }
867 EXPORT_SYMBOL(l2cap_chan_close);
868 
869 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
870 {
871 	switch (chan->chan_type) {
872 	case L2CAP_CHAN_RAW:
873 		switch (chan->sec_level) {
874 		case BT_SECURITY_HIGH:
875 		case BT_SECURITY_FIPS:
876 			return HCI_AT_DEDICATED_BONDING_MITM;
877 		case BT_SECURITY_MEDIUM:
878 			return HCI_AT_DEDICATED_BONDING;
879 		default:
880 			return HCI_AT_NO_BONDING;
881 		}
882 		break;
883 	case L2CAP_CHAN_CONN_LESS:
884 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
885 			if (chan->sec_level == BT_SECURITY_LOW)
886 				chan->sec_level = BT_SECURITY_SDP;
887 		}
888 		if (chan->sec_level == BT_SECURITY_HIGH ||
889 		    chan->sec_level == BT_SECURITY_FIPS)
890 			return HCI_AT_NO_BONDING_MITM;
891 		else
892 			return HCI_AT_NO_BONDING;
893 		break;
894 	case L2CAP_CHAN_CONN_ORIENTED:
895 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
896 			if (chan->sec_level == BT_SECURITY_LOW)
897 				chan->sec_level = BT_SECURITY_SDP;
898 
899 			if (chan->sec_level == BT_SECURITY_HIGH ||
900 			    chan->sec_level == BT_SECURITY_FIPS)
901 				return HCI_AT_NO_BONDING_MITM;
902 			else
903 				return HCI_AT_NO_BONDING;
904 		}
905 		fallthrough;
906 
907 	default:
908 		switch (chan->sec_level) {
909 		case BT_SECURITY_HIGH:
910 		case BT_SECURITY_FIPS:
911 			return HCI_AT_GENERAL_BONDING_MITM;
912 		case BT_SECURITY_MEDIUM:
913 			return HCI_AT_GENERAL_BONDING;
914 		default:
915 			return HCI_AT_NO_BONDING;
916 		}
917 		break;
918 	}
919 }
920 
921 /* Service level security */
922 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
923 {
924 	struct l2cap_conn *conn = chan->conn;
925 	__u8 auth_type;
926 
927 	if (conn->hcon->type == LE_LINK)
928 		return smp_conn_security(conn->hcon, chan->sec_level);
929 
930 	auth_type = l2cap_get_auth_type(chan);
931 
932 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
933 				 initiator);
934 }
935 
936 static u8 l2cap_get_ident(struct l2cap_conn *conn)
937 {
938 	u8 id;
939 
940 	/* Get next available identificator.
941 	 *    1 - 128 are used by kernel.
942 	 *  129 - 199 are reserved.
943 	 *  200 - 254 are used by utilities like l2ping, etc.
944 	 */
945 
946 	mutex_lock(&conn->ident_lock);
947 
948 	if (++conn->tx_ident > 128)
949 		conn->tx_ident = 1;
950 
951 	id = conn->tx_ident;
952 
953 	mutex_unlock(&conn->ident_lock);
954 
955 	return id;
956 }
957 
958 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
959 			   void *data)
960 {
961 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
962 	u8 flags;
963 
964 	BT_DBG("code 0x%2.2x", code);
965 
966 	if (!skb)
967 		return;
968 
969 	/* Use NO_FLUSH if supported or we have an LE link (which does
970 	 * not support auto-flushing packets) */
971 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
972 	    conn->hcon->type == LE_LINK)
973 		flags = ACL_START_NO_FLUSH;
974 	else
975 		flags = ACL_START;
976 
977 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
978 	skb->priority = HCI_PRIO_MAX;
979 
980 	hci_send_acl(conn->hchan, skb, flags);
981 }
982 
983 static bool __chan_is_moving(struct l2cap_chan *chan)
984 {
985 	return chan->move_state != L2CAP_MOVE_STABLE &&
986 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
987 }
988 
989 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
990 {
991 	struct hci_conn *hcon = chan->conn->hcon;
992 	u16 flags;
993 
994 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
995 	       skb->priority);
996 
997 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
998 		if (chan->hs_hchan)
999 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
1000 		else
1001 			kfree_skb(skb);
1002 
1003 		return;
1004 	}
1005 
1006 	/* Use NO_FLUSH for LE links (where this is the only option) or
1007 	 * if the BR/EDR link supports it and flushing has not been
1008 	 * explicitly requested (through FLAG_FLUSHABLE).
1009 	 */
1010 	if (hcon->type == LE_LINK ||
1011 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1012 	     lmp_no_flush_capable(hcon->hdev)))
1013 		flags = ACL_START_NO_FLUSH;
1014 	else
1015 		flags = ACL_START;
1016 
1017 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1018 	hci_send_acl(chan->conn->hchan, skb, flags);
1019 }
1020 
1021 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1022 {
1023 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1024 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1025 
1026 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1027 		/* S-Frame */
1028 		control->sframe = 1;
1029 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1030 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1031 
1032 		control->sar = 0;
1033 		control->txseq = 0;
1034 	} else {
1035 		/* I-Frame */
1036 		control->sframe = 0;
1037 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1038 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1039 
1040 		control->poll = 0;
1041 		control->super = 0;
1042 	}
1043 }
1044 
1045 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1046 {
1047 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1048 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1049 
1050 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1051 		/* S-Frame */
1052 		control->sframe = 1;
1053 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1054 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1055 
1056 		control->sar = 0;
1057 		control->txseq = 0;
1058 	} else {
1059 		/* I-Frame */
1060 		control->sframe = 0;
1061 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1062 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1063 
1064 		control->poll = 0;
1065 		control->super = 0;
1066 	}
1067 }
1068 
1069 static inline void __unpack_control(struct l2cap_chan *chan,
1070 				    struct sk_buff *skb)
1071 {
1072 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1073 		__unpack_extended_control(get_unaligned_le32(skb->data),
1074 					  &bt_cb(skb)->l2cap);
1075 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1076 	} else {
1077 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1078 					  &bt_cb(skb)->l2cap);
1079 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1080 	}
1081 }
1082 
1083 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1084 {
1085 	u32 packed;
1086 
1087 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1088 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1089 
1090 	if (control->sframe) {
1091 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1092 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1093 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1094 	} else {
1095 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1096 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1097 	}
1098 
1099 	return packed;
1100 }
1101 
1102 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1103 {
1104 	u16 packed;
1105 
1106 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1107 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1108 
1109 	if (control->sframe) {
1110 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1111 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1112 		packed |= L2CAP_CTRL_FRAME_TYPE;
1113 	} else {
1114 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1115 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1116 	}
1117 
1118 	return packed;
1119 }
1120 
1121 static inline void __pack_control(struct l2cap_chan *chan,
1122 				  struct l2cap_ctrl *control,
1123 				  struct sk_buff *skb)
1124 {
1125 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1126 		put_unaligned_le32(__pack_extended_control(control),
1127 				   skb->data + L2CAP_HDR_SIZE);
1128 	} else {
1129 		put_unaligned_le16(__pack_enhanced_control(control),
1130 				   skb->data + L2CAP_HDR_SIZE);
1131 	}
1132 }
1133 
1134 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1135 {
1136 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1137 		return L2CAP_EXT_HDR_SIZE;
1138 	else
1139 		return L2CAP_ENH_HDR_SIZE;
1140 }
1141 
1142 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1143 					       u32 control)
1144 {
1145 	struct sk_buff *skb;
1146 	struct l2cap_hdr *lh;
1147 	int hlen = __ertm_hdr_size(chan);
1148 
1149 	if (chan->fcs == L2CAP_FCS_CRC16)
1150 		hlen += L2CAP_FCS_SIZE;
1151 
1152 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1153 
1154 	if (!skb)
1155 		return ERR_PTR(-ENOMEM);
1156 
1157 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1158 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1159 	lh->cid = cpu_to_le16(chan->dcid);
1160 
1161 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1162 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1163 	else
1164 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1165 
1166 	if (chan->fcs == L2CAP_FCS_CRC16) {
1167 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1168 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1169 	}
1170 
1171 	skb->priority = HCI_PRIO_MAX;
1172 	return skb;
1173 }
1174 
1175 static void l2cap_send_sframe(struct l2cap_chan *chan,
1176 			      struct l2cap_ctrl *control)
1177 {
1178 	struct sk_buff *skb;
1179 	u32 control_field;
1180 
1181 	BT_DBG("chan %p, control %p", chan, control);
1182 
1183 	if (!control->sframe)
1184 		return;
1185 
1186 	if (__chan_is_moving(chan))
1187 		return;
1188 
1189 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1190 	    !control->poll)
1191 		control->final = 1;
1192 
1193 	if (control->super == L2CAP_SUPER_RR)
1194 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1195 	else if (control->super == L2CAP_SUPER_RNR)
1196 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1197 
1198 	if (control->super != L2CAP_SUPER_SREJ) {
1199 		chan->last_acked_seq = control->reqseq;
1200 		__clear_ack_timer(chan);
1201 	}
1202 
1203 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1204 	       control->final, control->poll, control->super);
1205 
1206 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1207 		control_field = __pack_extended_control(control);
1208 	else
1209 		control_field = __pack_enhanced_control(control);
1210 
1211 	skb = l2cap_create_sframe_pdu(chan, control_field);
1212 	if (!IS_ERR(skb))
1213 		l2cap_do_send(chan, skb);
1214 }
1215 
1216 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1217 {
1218 	struct l2cap_ctrl control;
1219 
1220 	BT_DBG("chan %p, poll %d", chan, poll);
1221 
1222 	memset(&control, 0, sizeof(control));
1223 	control.sframe = 1;
1224 	control.poll = poll;
1225 
1226 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1227 		control.super = L2CAP_SUPER_RNR;
1228 	else
1229 		control.super = L2CAP_SUPER_RR;
1230 
1231 	control.reqseq = chan->buffer_seq;
1232 	l2cap_send_sframe(chan, &control);
1233 }
1234 
1235 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1236 {
1237 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1238 		return true;
1239 
1240 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1241 }
1242 
1243 static bool __amp_capable(struct l2cap_chan *chan)
1244 {
1245 	struct l2cap_conn *conn = chan->conn;
1246 	struct hci_dev *hdev;
1247 	bool amp_available = false;
1248 
1249 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1250 		return false;
1251 
1252 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1253 		return false;
1254 
1255 	read_lock(&hci_dev_list_lock);
1256 	list_for_each_entry(hdev, &hci_dev_list, list) {
1257 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1258 		    test_bit(HCI_UP, &hdev->flags)) {
1259 			amp_available = true;
1260 			break;
1261 		}
1262 	}
1263 	read_unlock(&hci_dev_list_lock);
1264 
1265 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1266 		return amp_available;
1267 
1268 	return false;
1269 }
1270 
1271 static bool l2cap_check_efs(struct l2cap_chan *chan)
1272 {
1273 	/* Check EFS parameters */
1274 	return true;
1275 }
1276 
1277 void l2cap_send_conn_req(struct l2cap_chan *chan)
1278 {
1279 	struct l2cap_conn *conn = chan->conn;
1280 	struct l2cap_conn_req req;
1281 
1282 	req.scid = cpu_to_le16(chan->scid);
1283 	req.psm  = chan->psm;
1284 
1285 	chan->ident = l2cap_get_ident(conn);
1286 
1287 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1288 
1289 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1290 }
1291 
1292 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1293 {
1294 	struct l2cap_create_chan_req req;
1295 	req.scid = cpu_to_le16(chan->scid);
1296 	req.psm  = chan->psm;
1297 	req.amp_id = amp_id;
1298 
1299 	chan->ident = l2cap_get_ident(chan->conn);
1300 
1301 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1302 		       sizeof(req), &req);
1303 }
1304 
1305 static void l2cap_move_setup(struct l2cap_chan *chan)
1306 {
1307 	struct sk_buff *skb;
1308 
1309 	BT_DBG("chan %p", chan);
1310 
1311 	if (chan->mode != L2CAP_MODE_ERTM)
1312 		return;
1313 
1314 	__clear_retrans_timer(chan);
1315 	__clear_monitor_timer(chan);
1316 	__clear_ack_timer(chan);
1317 
1318 	chan->retry_count = 0;
1319 	skb_queue_walk(&chan->tx_q, skb) {
1320 		if (bt_cb(skb)->l2cap.retries)
1321 			bt_cb(skb)->l2cap.retries = 1;
1322 		else
1323 			break;
1324 	}
1325 
1326 	chan->expected_tx_seq = chan->buffer_seq;
1327 
1328 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1329 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1330 	l2cap_seq_list_clear(&chan->retrans_list);
1331 	l2cap_seq_list_clear(&chan->srej_list);
1332 	skb_queue_purge(&chan->srej_q);
1333 
1334 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1335 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1336 
1337 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1338 }
1339 
1340 static void l2cap_move_done(struct l2cap_chan *chan)
1341 {
1342 	u8 move_role = chan->move_role;
1343 	BT_DBG("chan %p", chan);
1344 
1345 	chan->move_state = L2CAP_MOVE_STABLE;
1346 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1347 
1348 	if (chan->mode != L2CAP_MODE_ERTM)
1349 		return;
1350 
1351 	switch (move_role) {
1352 	case L2CAP_MOVE_ROLE_INITIATOR:
1353 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1354 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1355 		break;
1356 	case L2CAP_MOVE_ROLE_RESPONDER:
1357 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1358 		break;
1359 	}
1360 }
1361 
1362 static void l2cap_chan_ready(struct l2cap_chan *chan)
1363 {
1364 	/* The channel may have already been flagged as connected in
1365 	 * case of receiving data before the L2CAP info req/rsp
1366 	 * procedure is complete.
1367 	 */
1368 	if (chan->state == BT_CONNECTED)
1369 		return;
1370 
1371 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1372 	chan->conf_state = 0;
1373 	__clear_chan_timer(chan);
1374 
1375 	switch (chan->mode) {
1376 	case L2CAP_MODE_LE_FLOWCTL:
1377 	case L2CAP_MODE_EXT_FLOWCTL:
1378 		if (!chan->tx_credits)
1379 			chan->ops->suspend(chan);
1380 		break;
1381 	}
1382 
1383 	chan->state = BT_CONNECTED;
1384 
1385 	chan->ops->ready(chan);
1386 }
1387 
1388 static void l2cap_le_connect(struct l2cap_chan *chan)
1389 {
1390 	struct l2cap_conn *conn = chan->conn;
1391 	struct l2cap_le_conn_req req;
1392 
1393 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1394 		return;
1395 
1396 	if (!chan->imtu)
1397 		chan->imtu = chan->conn->mtu;
1398 
1399 	l2cap_le_flowctl_init(chan, 0);
1400 
1401 	req.psm     = chan->psm;
1402 	req.scid    = cpu_to_le16(chan->scid);
1403 	req.mtu     = cpu_to_le16(chan->imtu);
1404 	req.mps     = cpu_to_le16(chan->mps);
1405 	req.credits = cpu_to_le16(chan->rx_credits);
1406 
1407 	chan->ident = l2cap_get_ident(conn);
1408 
1409 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1410 		       sizeof(req), &req);
1411 }
1412 
1413 struct l2cap_ecred_conn_data {
1414 	struct {
1415 		struct l2cap_ecred_conn_req req;
1416 		__le16 scid[5];
1417 	} __packed pdu;
1418 	struct l2cap_chan *chan;
1419 	struct pid *pid;
1420 	int count;
1421 };
1422 
1423 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1424 {
1425 	struct l2cap_ecred_conn_data *conn = data;
1426 	struct pid *pid;
1427 
1428 	if (chan == conn->chan)
1429 		return;
1430 
1431 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1432 		return;
1433 
1434 	pid = chan->ops->get_peer_pid(chan);
1435 
1436 	/* Only add deferred channels with the same PID/PSM */
1437 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1438 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1439 		return;
1440 
1441 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1442 		return;
1443 
1444 	l2cap_ecred_init(chan, 0);
1445 
1446 	/* Set the same ident so we can match on the rsp */
1447 	chan->ident = conn->chan->ident;
1448 
1449 	/* Include all channels deferred */
1450 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1451 
1452 	conn->count++;
1453 }
1454 
1455 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1456 {
1457 	struct l2cap_conn *conn = chan->conn;
1458 	struct l2cap_ecred_conn_data data;
1459 
1460 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1461 		return;
1462 
1463 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1464 		return;
1465 
1466 	l2cap_ecred_init(chan, 0);
1467 
1468 	memset(&data, 0, sizeof(data));
1469 	data.pdu.req.psm     = chan->psm;
1470 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1471 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1472 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1473 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1474 
1475 	chan->ident = l2cap_get_ident(conn);
1476 	data.pid = chan->ops->get_peer_pid(chan);
1477 
1478 	data.count = 1;
1479 	data.chan = chan;
1480 	data.pid = chan->ops->get_peer_pid(chan);
1481 
1482 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1483 
1484 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1485 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1486 		       &data.pdu);
1487 }
1488 
1489 static void l2cap_le_start(struct l2cap_chan *chan)
1490 {
1491 	struct l2cap_conn *conn = chan->conn;
1492 
1493 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1494 		return;
1495 
1496 	if (!chan->psm) {
1497 		l2cap_chan_ready(chan);
1498 		return;
1499 	}
1500 
1501 	if (chan->state == BT_CONNECT) {
1502 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1503 			l2cap_ecred_connect(chan);
1504 		else
1505 			l2cap_le_connect(chan);
1506 	}
1507 }
1508 
1509 static void l2cap_start_connection(struct l2cap_chan *chan)
1510 {
1511 	if (__amp_capable(chan)) {
1512 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1513 		a2mp_discover_amp(chan);
1514 	} else if (chan->conn->hcon->type == LE_LINK) {
1515 		l2cap_le_start(chan);
1516 	} else {
1517 		l2cap_send_conn_req(chan);
1518 	}
1519 }
1520 
1521 static void l2cap_request_info(struct l2cap_conn *conn)
1522 {
1523 	struct l2cap_info_req req;
1524 
1525 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1526 		return;
1527 
1528 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1529 
1530 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1531 	conn->info_ident = l2cap_get_ident(conn);
1532 
1533 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1534 
1535 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1536 		       sizeof(req), &req);
1537 }
1538 
1539 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1540 {
1541 	/* The minimum encryption key size needs to be enforced by the
1542 	 * host stack before establishing any L2CAP connections. The
1543 	 * specification in theory allows a minimum of 1, but to align
1544 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1545 	 *
1546 	 * This check might also be called for unencrypted connections
1547 	 * that have no key size requirements. Ensure that the link is
1548 	 * actually encrypted before enforcing a key size.
1549 	 */
1550 	int min_key_size = hcon->hdev->min_enc_key_size;
1551 
1552 	/* On FIPS security level, key size must be 16 bytes */
1553 	if (hcon->sec_level == BT_SECURITY_FIPS)
1554 		min_key_size = 16;
1555 
1556 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1557 		hcon->enc_key_size >= min_key_size);
1558 }
1559 
1560 static void l2cap_do_start(struct l2cap_chan *chan)
1561 {
1562 	struct l2cap_conn *conn = chan->conn;
1563 
1564 	if (conn->hcon->type == LE_LINK) {
1565 		l2cap_le_start(chan);
1566 		return;
1567 	}
1568 
1569 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1570 		l2cap_request_info(conn);
1571 		return;
1572 	}
1573 
1574 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1575 		return;
1576 
1577 	if (!l2cap_chan_check_security(chan, true) ||
1578 	    !__l2cap_no_conn_pending(chan))
1579 		return;
1580 
1581 	if (l2cap_check_enc_key_size(conn->hcon))
1582 		l2cap_start_connection(chan);
1583 	else
1584 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1585 }
1586 
1587 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1588 {
1589 	u32 local_feat_mask = l2cap_feat_mask;
1590 	if (!disable_ertm)
1591 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1592 
1593 	switch (mode) {
1594 	case L2CAP_MODE_ERTM:
1595 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1596 	case L2CAP_MODE_STREAMING:
1597 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1598 	default:
1599 		return 0x00;
1600 	}
1601 }
1602 
1603 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1604 {
1605 	struct l2cap_conn *conn = chan->conn;
1606 	struct l2cap_disconn_req req;
1607 
1608 	if (!conn)
1609 		return;
1610 
1611 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1612 		__clear_retrans_timer(chan);
1613 		__clear_monitor_timer(chan);
1614 		__clear_ack_timer(chan);
1615 	}
1616 
1617 	if (chan->scid == L2CAP_CID_A2MP) {
1618 		l2cap_state_change(chan, BT_DISCONN);
1619 		return;
1620 	}
1621 
1622 	req.dcid = cpu_to_le16(chan->dcid);
1623 	req.scid = cpu_to_le16(chan->scid);
1624 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1625 		       sizeof(req), &req);
1626 
1627 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1628 }
1629 
1630 /* ---- L2CAP connections ---- */
1631 static void l2cap_conn_start(struct l2cap_conn *conn)
1632 {
1633 	struct l2cap_chan *chan, *tmp;
1634 
1635 	BT_DBG("conn %p", conn);
1636 
1637 	mutex_lock(&conn->chan_lock);
1638 
1639 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1640 		l2cap_chan_lock(chan);
1641 
1642 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1643 			l2cap_chan_ready(chan);
1644 			l2cap_chan_unlock(chan);
1645 			continue;
1646 		}
1647 
1648 		if (chan->state == BT_CONNECT) {
1649 			if (!l2cap_chan_check_security(chan, true) ||
1650 			    !__l2cap_no_conn_pending(chan)) {
1651 				l2cap_chan_unlock(chan);
1652 				continue;
1653 			}
1654 
1655 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1656 			    && test_bit(CONF_STATE2_DEVICE,
1657 					&chan->conf_state)) {
1658 				l2cap_chan_close(chan, ECONNRESET);
1659 				l2cap_chan_unlock(chan);
1660 				continue;
1661 			}
1662 
1663 			if (l2cap_check_enc_key_size(conn->hcon))
1664 				l2cap_start_connection(chan);
1665 			else
1666 				l2cap_chan_close(chan, ECONNREFUSED);
1667 
1668 		} else if (chan->state == BT_CONNECT2) {
1669 			struct l2cap_conn_rsp rsp;
1670 			char buf[128];
1671 			rsp.scid = cpu_to_le16(chan->dcid);
1672 			rsp.dcid = cpu_to_le16(chan->scid);
1673 
1674 			if (l2cap_chan_check_security(chan, false)) {
1675 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1676 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1677 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1678 					chan->ops->defer(chan);
1679 
1680 				} else {
1681 					l2cap_state_change(chan, BT_CONFIG);
1682 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1683 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1684 				}
1685 			} else {
1686 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1687 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1688 			}
1689 
1690 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1691 				       sizeof(rsp), &rsp);
1692 
1693 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1694 			    rsp.result != L2CAP_CR_SUCCESS) {
1695 				l2cap_chan_unlock(chan);
1696 				continue;
1697 			}
1698 
1699 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1700 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1701 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1702 			chan->num_conf_req++;
1703 		}
1704 
1705 		l2cap_chan_unlock(chan);
1706 	}
1707 
1708 	mutex_unlock(&conn->chan_lock);
1709 }
1710 
1711 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1712 {
1713 	struct hci_conn *hcon = conn->hcon;
1714 	struct hci_dev *hdev = hcon->hdev;
1715 
1716 	BT_DBG("%s conn %p", hdev->name, conn);
1717 
1718 	/* For outgoing pairing which doesn't necessarily have an
1719 	 * associated socket (e.g. mgmt_pair_device).
1720 	 */
1721 	if (hcon->out)
1722 		smp_conn_security(hcon, hcon->pending_sec_level);
1723 
1724 	/* For LE peripheral connections, make sure the connection interval
1725 	 * is in the range of the minimum and maximum interval that has
1726 	 * been configured for this connection. If not, then trigger
1727 	 * the connection update procedure.
1728 	 */
1729 	if (hcon->role == HCI_ROLE_SLAVE &&
1730 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1731 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1732 		struct l2cap_conn_param_update_req req;
1733 
1734 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1735 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1736 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1737 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1738 
1739 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1740 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1741 	}
1742 }
1743 
1744 static void l2cap_conn_ready(struct l2cap_conn *conn)
1745 {
1746 	struct l2cap_chan *chan;
1747 	struct hci_conn *hcon = conn->hcon;
1748 
1749 	BT_DBG("conn %p", conn);
1750 
1751 	if (hcon->type == ACL_LINK)
1752 		l2cap_request_info(conn);
1753 
1754 	mutex_lock(&conn->chan_lock);
1755 
1756 	list_for_each_entry(chan, &conn->chan_l, list) {
1757 
1758 		l2cap_chan_lock(chan);
1759 
1760 		if (chan->scid == L2CAP_CID_A2MP) {
1761 			l2cap_chan_unlock(chan);
1762 			continue;
1763 		}
1764 
1765 		if (hcon->type == LE_LINK) {
1766 			l2cap_le_start(chan);
1767 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1768 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1769 				l2cap_chan_ready(chan);
1770 		} else if (chan->state == BT_CONNECT) {
1771 			l2cap_do_start(chan);
1772 		}
1773 
1774 		l2cap_chan_unlock(chan);
1775 	}
1776 
1777 	mutex_unlock(&conn->chan_lock);
1778 
1779 	if (hcon->type == LE_LINK)
1780 		l2cap_le_conn_ready(conn);
1781 
1782 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1783 }
1784 
1785 /* Notify sockets that we cannot guaranty reliability anymore */
1786 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1787 {
1788 	struct l2cap_chan *chan;
1789 
1790 	BT_DBG("conn %p", conn);
1791 
1792 	mutex_lock(&conn->chan_lock);
1793 
1794 	list_for_each_entry(chan, &conn->chan_l, list) {
1795 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1796 			l2cap_chan_set_err(chan, err);
1797 	}
1798 
1799 	mutex_unlock(&conn->chan_lock);
1800 }
1801 
1802 static void l2cap_info_timeout(struct work_struct *work)
1803 {
1804 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1805 					       info_timer.work);
1806 
1807 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1808 	conn->info_ident = 0;
1809 
1810 	l2cap_conn_start(conn);
1811 }
1812 
1813 /*
1814  * l2cap_user
1815  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1816  * callback is called during registration. The ->remove callback is called
1817  * during unregistration.
1818  * An l2cap_user object can either be explicitly unregistered or when the
1819  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1820  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1821  * External modules must own a reference to the l2cap_conn object if they intend
1822  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1823  * any time if they don't.
1824  */
1825 
1826 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1827 {
1828 	struct hci_dev *hdev = conn->hcon->hdev;
1829 	int ret;
1830 
1831 	/* We need to check whether l2cap_conn is registered. If it is not, we
1832 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1833 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1834 	 * relies on the parent hci_conn object to be locked. This itself relies
1835 	 * on the hci_dev object to be locked. So we must lock the hci device
1836 	 * here, too. */
1837 
1838 	hci_dev_lock(hdev);
1839 
1840 	if (!list_empty(&user->list)) {
1841 		ret = -EINVAL;
1842 		goto out_unlock;
1843 	}
1844 
1845 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1846 	if (!conn->hchan) {
1847 		ret = -ENODEV;
1848 		goto out_unlock;
1849 	}
1850 
1851 	ret = user->probe(conn, user);
1852 	if (ret)
1853 		goto out_unlock;
1854 
1855 	list_add(&user->list, &conn->users);
1856 	ret = 0;
1857 
1858 out_unlock:
1859 	hci_dev_unlock(hdev);
1860 	return ret;
1861 }
1862 EXPORT_SYMBOL(l2cap_register_user);
1863 
1864 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1865 {
1866 	struct hci_dev *hdev = conn->hcon->hdev;
1867 
1868 	hci_dev_lock(hdev);
1869 
1870 	if (list_empty(&user->list))
1871 		goto out_unlock;
1872 
1873 	list_del_init(&user->list);
1874 	user->remove(conn, user);
1875 
1876 out_unlock:
1877 	hci_dev_unlock(hdev);
1878 }
1879 EXPORT_SYMBOL(l2cap_unregister_user);
1880 
1881 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1882 {
1883 	struct l2cap_user *user;
1884 
1885 	while (!list_empty(&conn->users)) {
1886 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1887 		list_del_init(&user->list);
1888 		user->remove(conn, user);
1889 	}
1890 }
1891 
1892 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1893 {
1894 	struct l2cap_conn *conn = hcon->l2cap_data;
1895 	struct l2cap_chan *chan, *l;
1896 
1897 	if (!conn)
1898 		return;
1899 
1900 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1901 
1902 	kfree_skb(conn->rx_skb);
1903 
1904 	skb_queue_purge(&conn->pending_rx);
1905 
1906 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1907 	 * might block if we are running on a worker from the same workqueue
1908 	 * pending_rx_work is waiting on.
1909 	 */
1910 	if (work_pending(&conn->pending_rx_work))
1911 		cancel_work_sync(&conn->pending_rx_work);
1912 
1913 	if (work_pending(&conn->id_addr_update_work))
1914 		cancel_work_sync(&conn->id_addr_update_work);
1915 
1916 	l2cap_unregister_all_users(conn);
1917 
1918 	/* Force the connection to be immediately dropped */
1919 	hcon->disc_timeout = 0;
1920 
1921 	mutex_lock(&conn->chan_lock);
1922 
1923 	/* Kill channels */
1924 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1925 		l2cap_chan_hold(chan);
1926 		l2cap_chan_lock(chan);
1927 
1928 		l2cap_chan_del(chan, err);
1929 
1930 		chan->ops->close(chan);
1931 
1932 		l2cap_chan_unlock(chan);
1933 		l2cap_chan_put(chan);
1934 	}
1935 
1936 	mutex_unlock(&conn->chan_lock);
1937 
1938 	hci_chan_del(conn->hchan);
1939 
1940 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1941 		cancel_delayed_work_sync(&conn->info_timer);
1942 
1943 	hcon->l2cap_data = NULL;
1944 	conn->hchan = NULL;
1945 	l2cap_conn_put(conn);
1946 }
1947 
1948 static void l2cap_conn_free(struct kref *ref)
1949 {
1950 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1951 
1952 	hci_conn_put(conn->hcon);
1953 	kfree(conn);
1954 }
1955 
1956 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1957 {
1958 	kref_get(&conn->ref);
1959 	return conn;
1960 }
1961 EXPORT_SYMBOL(l2cap_conn_get);
1962 
1963 void l2cap_conn_put(struct l2cap_conn *conn)
1964 {
1965 	kref_put(&conn->ref, l2cap_conn_free);
1966 }
1967 EXPORT_SYMBOL(l2cap_conn_put);
1968 
1969 /* ---- Socket interface ---- */
1970 
1971 /* Find socket with psm and source / destination bdaddr.
1972  * Returns closest match.
1973  */
1974 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1975 						   bdaddr_t *src,
1976 						   bdaddr_t *dst,
1977 						   u8 link_type)
1978 {
1979 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1980 
1981 	read_lock(&chan_list_lock);
1982 
1983 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1984 		if (state && c->state != state)
1985 			continue;
1986 
1987 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1988 			continue;
1989 
1990 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1991 			continue;
1992 
1993 		if (c->psm == psm) {
1994 			int src_match, dst_match;
1995 			int src_any, dst_any;
1996 
1997 			/* Exact match. */
1998 			src_match = !bacmp(&c->src, src);
1999 			dst_match = !bacmp(&c->dst, dst);
2000 			if (src_match && dst_match) {
2001 				if (!l2cap_chan_hold_unless_zero(c))
2002 					continue;
2003 
2004 				read_unlock(&chan_list_lock);
2005 				return c;
2006 			}
2007 
2008 			/* Closest match */
2009 			src_any = !bacmp(&c->src, BDADDR_ANY);
2010 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
2011 			if ((src_match && dst_any) || (src_any && dst_match) ||
2012 			    (src_any && dst_any))
2013 				c1 = c;
2014 		}
2015 	}
2016 
2017 	if (c1)
2018 		c1 = l2cap_chan_hold_unless_zero(c1);
2019 
2020 	read_unlock(&chan_list_lock);
2021 
2022 	return c1;
2023 }
2024 
2025 static void l2cap_monitor_timeout(struct work_struct *work)
2026 {
2027 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2028 					       monitor_timer.work);
2029 
2030 	BT_DBG("chan %p", chan);
2031 
2032 	l2cap_chan_lock(chan);
2033 
2034 	if (!chan->conn) {
2035 		l2cap_chan_unlock(chan);
2036 		l2cap_chan_put(chan);
2037 		return;
2038 	}
2039 
2040 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2041 
2042 	l2cap_chan_unlock(chan);
2043 	l2cap_chan_put(chan);
2044 }
2045 
2046 static void l2cap_retrans_timeout(struct work_struct *work)
2047 {
2048 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2049 					       retrans_timer.work);
2050 
2051 	BT_DBG("chan %p", chan);
2052 
2053 	l2cap_chan_lock(chan);
2054 
2055 	if (!chan->conn) {
2056 		l2cap_chan_unlock(chan);
2057 		l2cap_chan_put(chan);
2058 		return;
2059 	}
2060 
2061 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2062 	l2cap_chan_unlock(chan);
2063 	l2cap_chan_put(chan);
2064 }
2065 
2066 static void l2cap_streaming_send(struct l2cap_chan *chan,
2067 				 struct sk_buff_head *skbs)
2068 {
2069 	struct sk_buff *skb;
2070 	struct l2cap_ctrl *control;
2071 
2072 	BT_DBG("chan %p, skbs %p", chan, skbs);
2073 
2074 	if (__chan_is_moving(chan))
2075 		return;
2076 
2077 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2078 
2079 	while (!skb_queue_empty(&chan->tx_q)) {
2080 
2081 		skb = skb_dequeue(&chan->tx_q);
2082 
2083 		bt_cb(skb)->l2cap.retries = 1;
2084 		control = &bt_cb(skb)->l2cap;
2085 
2086 		control->reqseq = 0;
2087 		control->txseq = chan->next_tx_seq;
2088 
2089 		__pack_control(chan, control, skb);
2090 
2091 		if (chan->fcs == L2CAP_FCS_CRC16) {
2092 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2093 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2094 		}
2095 
2096 		l2cap_do_send(chan, skb);
2097 
2098 		BT_DBG("Sent txseq %u", control->txseq);
2099 
2100 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2101 		chan->frames_sent++;
2102 	}
2103 }
2104 
2105 static int l2cap_ertm_send(struct l2cap_chan *chan)
2106 {
2107 	struct sk_buff *skb, *tx_skb;
2108 	struct l2cap_ctrl *control;
2109 	int sent = 0;
2110 
2111 	BT_DBG("chan %p", chan);
2112 
2113 	if (chan->state != BT_CONNECTED)
2114 		return -ENOTCONN;
2115 
2116 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2117 		return 0;
2118 
2119 	if (__chan_is_moving(chan))
2120 		return 0;
2121 
2122 	while (chan->tx_send_head &&
2123 	       chan->unacked_frames < chan->remote_tx_win &&
2124 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2125 
2126 		skb = chan->tx_send_head;
2127 
2128 		bt_cb(skb)->l2cap.retries = 1;
2129 		control = &bt_cb(skb)->l2cap;
2130 
2131 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2132 			control->final = 1;
2133 
2134 		control->reqseq = chan->buffer_seq;
2135 		chan->last_acked_seq = chan->buffer_seq;
2136 		control->txseq = chan->next_tx_seq;
2137 
2138 		__pack_control(chan, control, skb);
2139 
2140 		if (chan->fcs == L2CAP_FCS_CRC16) {
2141 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2142 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2143 		}
2144 
2145 		/* Clone after data has been modified. Data is assumed to be
2146 		   read-only (for locking purposes) on cloned sk_buffs.
2147 		 */
2148 		tx_skb = skb_clone(skb, GFP_KERNEL);
2149 
2150 		if (!tx_skb)
2151 			break;
2152 
2153 		__set_retrans_timer(chan);
2154 
2155 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2156 		chan->unacked_frames++;
2157 		chan->frames_sent++;
2158 		sent++;
2159 
2160 		if (skb_queue_is_last(&chan->tx_q, skb))
2161 			chan->tx_send_head = NULL;
2162 		else
2163 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2164 
2165 		l2cap_do_send(chan, tx_skb);
2166 		BT_DBG("Sent txseq %u", control->txseq);
2167 	}
2168 
2169 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2170 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2171 
2172 	return sent;
2173 }
2174 
2175 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2176 {
2177 	struct l2cap_ctrl control;
2178 	struct sk_buff *skb;
2179 	struct sk_buff *tx_skb;
2180 	u16 seq;
2181 
2182 	BT_DBG("chan %p", chan);
2183 
2184 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2185 		return;
2186 
2187 	if (__chan_is_moving(chan))
2188 		return;
2189 
2190 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2191 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2192 
2193 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2194 		if (!skb) {
2195 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2196 			       seq);
2197 			continue;
2198 		}
2199 
2200 		bt_cb(skb)->l2cap.retries++;
2201 		control = bt_cb(skb)->l2cap;
2202 
2203 		if (chan->max_tx != 0 &&
2204 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2205 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2206 			l2cap_send_disconn_req(chan, ECONNRESET);
2207 			l2cap_seq_list_clear(&chan->retrans_list);
2208 			break;
2209 		}
2210 
2211 		control.reqseq = chan->buffer_seq;
2212 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2213 			control.final = 1;
2214 		else
2215 			control.final = 0;
2216 
2217 		if (skb_cloned(skb)) {
2218 			/* Cloned sk_buffs are read-only, so we need a
2219 			 * writeable copy
2220 			 */
2221 			tx_skb = skb_copy(skb, GFP_KERNEL);
2222 		} else {
2223 			tx_skb = skb_clone(skb, GFP_KERNEL);
2224 		}
2225 
2226 		if (!tx_skb) {
2227 			l2cap_seq_list_clear(&chan->retrans_list);
2228 			break;
2229 		}
2230 
2231 		/* Update skb contents */
2232 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2233 			put_unaligned_le32(__pack_extended_control(&control),
2234 					   tx_skb->data + L2CAP_HDR_SIZE);
2235 		} else {
2236 			put_unaligned_le16(__pack_enhanced_control(&control),
2237 					   tx_skb->data + L2CAP_HDR_SIZE);
2238 		}
2239 
2240 		/* Update FCS */
2241 		if (chan->fcs == L2CAP_FCS_CRC16) {
2242 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2243 					tx_skb->len - L2CAP_FCS_SIZE);
2244 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2245 						L2CAP_FCS_SIZE);
2246 		}
2247 
2248 		l2cap_do_send(chan, tx_skb);
2249 
2250 		BT_DBG("Resent txseq %d", control.txseq);
2251 
2252 		chan->last_acked_seq = chan->buffer_seq;
2253 	}
2254 }
2255 
2256 static void l2cap_retransmit(struct l2cap_chan *chan,
2257 			     struct l2cap_ctrl *control)
2258 {
2259 	BT_DBG("chan %p, control %p", chan, control);
2260 
2261 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2262 	l2cap_ertm_resend(chan);
2263 }
2264 
2265 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2266 				 struct l2cap_ctrl *control)
2267 {
2268 	struct sk_buff *skb;
2269 
2270 	BT_DBG("chan %p, control %p", chan, control);
2271 
2272 	if (control->poll)
2273 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2274 
2275 	l2cap_seq_list_clear(&chan->retrans_list);
2276 
2277 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2278 		return;
2279 
2280 	if (chan->unacked_frames) {
2281 		skb_queue_walk(&chan->tx_q, skb) {
2282 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2283 			    skb == chan->tx_send_head)
2284 				break;
2285 		}
2286 
2287 		skb_queue_walk_from(&chan->tx_q, skb) {
2288 			if (skb == chan->tx_send_head)
2289 				break;
2290 
2291 			l2cap_seq_list_append(&chan->retrans_list,
2292 					      bt_cb(skb)->l2cap.txseq);
2293 		}
2294 
2295 		l2cap_ertm_resend(chan);
2296 	}
2297 }
2298 
2299 static void l2cap_send_ack(struct l2cap_chan *chan)
2300 {
2301 	struct l2cap_ctrl control;
2302 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2303 					 chan->last_acked_seq);
2304 	int threshold;
2305 
2306 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2307 	       chan, chan->last_acked_seq, chan->buffer_seq);
2308 
2309 	memset(&control, 0, sizeof(control));
2310 	control.sframe = 1;
2311 
2312 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2313 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2314 		__clear_ack_timer(chan);
2315 		control.super = L2CAP_SUPER_RNR;
2316 		control.reqseq = chan->buffer_seq;
2317 		l2cap_send_sframe(chan, &control);
2318 	} else {
2319 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2320 			l2cap_ertm_send(chan);
2321 			/* If any i-frames were sent, they included an ack */
2322 			if (chan->buffer_seq == chan->last_acked_seq)
2323 				frames_to_ack = 0;
2324 		}
2325 
2326 		/* Ack now if the window is 3/4ths full.
2327 		 * Calculate without mul or div
2328 		 */
2329 		threshold = chan->ack_win;
2330 		threshold += threshold << 1;
2331 		threshold >>= 2;
2332 
2333 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2334 		       threshold);
2335 
2336 		if (frames_to_ack >= threshold) {
2337 			__clear_ack_timer(chan);
2338 			control.super = L2CAP_SUPER_RR;
2339 			control.reqseq = chan->buffer_seq;
2340 			l2cap_send_sframe(chan, &control);
2341 			frames_to_ack = 0;
2342 		}
2343 
2344 		if (frames_to_ack)
2345 			__set_ack_timer(chan);
2346 	}
2347 }
2348 
2349 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2350 					 struct msghdr *msg, int len,
2351 					 int count, struct sk_buff *skb)
2352 {
2353 	struct l2cap_conn *conn = chan->conn;
2354 	struct sk_buff **frag;
2355 	int sent = 0;
2356 
2357 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2358 		return -EFAULT;
2359 
2360 	sent += count;
2361 	len  -= count;
2362 
2363 	/* Continuation fragments (no L2CAP header) */
2364 	frag = &skb_shinfo(skb)->frag_list;
2365 	while (len) {
2366 		struct sk_buff *tmp;
2367 
2368 		count = min_t(unsigned int, conn->mtu, len);
2369 
2370 		tmp = chan->ops->alloc_skb(chan, 0, count,
2371 					   msg->msg_flags & MSG_DONTWAIT);
2372 		if (IS_ERR(tmp))
2373 			return PTR_ERR(tmp);
2374 
2375 		*frag = tmp;
2376 
2377 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2378 				   &msg->msg_iter))
2379 			return -EFAULT;
2380 
2381 		sent += count;
2382 		len  -= count;
2383 
2384 		skb->len += (*frag)->len;
2385 		skb->data_len += (*frag)->len;
2386 
2387 		frag = &(*frag)->next;
2388 	}
2389 
2390 	return sent;
2391 }
2392 
2393 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2394 						 struct msghdr *msg, size_t len)
2395 {
2396 	struct l2cap_conn *conn = chan->conn;
2397 	struct sk_buff *skb;
2398 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2399 	struct l2cap_hdr *lh;
2400 
2401 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2402 	       __le16_to_cpu(chan->psm), len);
2403 
2404 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2405 
2406 	skb = chan->ops->alloc_skb(chan, hlen, count,
2407 				   msg->msg_flags & MSG_DONTWAIT);
2408 	if (IS_ERR(skb))
2409 		return skb;
2410 
2411 	/* Create L2CAP header */
2412 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2413 	lh->cid = cpu_to_le16(chan->dcid);
2414 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2415 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2416 
2417 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2418 	if (unlikely(err < 0)) {
2419 		kfree_skb(skb);
2420 		return ERR_PTR(err);
2421 	}
2422 	return skb;
2423 }
2424 
2425 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2426 					      struct msghdr *msg, size_t len)
2427 {
2428 	struct l2cap_conn *conn = chan->conn;
2429 	struct sk_buff *skb;
2430 	int err, count;
2431 	struct l2cap_hdr *lh;
2432 
2433 	BT_DBG("chan %p len %zu", chan, len);
2434 
2435 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2436 
2437 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2438 				   msg->msg_flags & MSG_DONTWAIT);
2439 	if (IS_ERR(skb))
2440 		return skb;
2441 
2442 	/* Create L2CAP header */
2443 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2444 	lh->cid = cpu_to_le16(chan->dcid);
2445 	lh->len = cpu_to_le16(len);
2446 
2447 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2448 	if (unlikely(err < 0)) {
2449 		kfree_skb(skb);
2450 		return ERR_PTR(err);
2451 	}
2452 	return skb;
2453 }
2454 
2455 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2456 					       struct msghdr *msg, size_t len,
2457 					       u16 sdulen)
2458 {
2459 	struct l2cap_conn *conn = chan->conn;
2460 	struct sk_buff *skb;
2461 	int err, count, hlen;
2462 	struct l2cap_hdr *lh;
2463 
2464 	BT_DBG("chan %p len %zu", chan, len);
2465 
2466 	if (!conn)
2467 		return ERR_PTR(-ENOTCONN);
2468 
2469 	hlen = __ertm_hdr_size(chan);
2470 
2471 	if (sdulen)
2472 		hlen += L2CAP_SDULEN_SIZE;
2473 
2474 	if (chan->fcs == L2CAP_FCS_CRC16)
2475 		hlen += L2CAP_FCS_SIZE;
2476 
2477 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2478 
2479 	skb = chan->ops->alloc_skb(chan, hlen, count,
2480 				   msg->msg_flags & MSG_DONTWAIT);
2481 	if (IS_ERR(skb))
2482 		return skb;
2483 
2484 	/* Create L2CAP header */
2485 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2486 	lh->cid = cpu_to_le16(chan->dcid);
2487 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2488 
2489 	/* Control header is populated later */
2490 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2491 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2492 	else
2493 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2494 
2495 	if (sdulen)
2496 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2497 
2498 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2499 	if (unlikely(err < 0)) {
2500 		kfree_skb(skb);
2501 		return ERR_PTR(err);
2502 	}
2503 
2504 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2505 	bt_cb(skb)->l2cap.retries = 0;
2506 	return skb;
2507 }
2508 
2509 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2510 			     struct sk_buff_head *seg_queue,
2511 			     struct msghdr *msg, size_t len)
2512 {
2513 	struct sk_buff *skb;
2514 	u16 sdu_len;
2515 	size_t pdu_len;
2516 	u8 sar;
2517 
2518 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2519 
2520 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2521 	 * so fragmented skbs are not used.  The HCI layer's handling
2522 	 * of fragmented skbs is not compatible with ERTM's queueing.
2523 	 */
2524 
2525 	/* PDU size is derived from the HCI MTU */
2526 	pdu_len = chan->conn->mtu;
2527 
2528 	/* Constrain PDU size for BR/EDR connections */
2529 	if (!chan->hs_hcon)
2530 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2531 
2532 	/* Adjust for largest possible L2CAP overhead. */
2533 	if (chan->fcs)
2534 		pdu_len -= L2CAP_FCS_SIZE;
2535 
2536 	pdu_len -= __ertm_hdr_size(chan);
2537 
2538 	/* Remote device may have requested smaller PDUs */
2539 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2540 
2541 	if (len <= pdu_len) {
2542 		sar = L2CAP_SAR_UNSEGMENTED;
2543 		sdu_len = 0;
2544 		pdu_len = len;
2545 	} else {
2546 		sar = L2CAP_SAR_START;
2547 		sdu_len = len;
2548 	}
2549 
2550 	while (len > 0) {
2551 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2552 
2553 		if (IS_ERR(skb)) {
2554 			__skb_queue_purge(seg_queue);
2555 			return PTR_ERR(skb);
2556 		}
2557 
2558 		bt_cb(skb)->l2cap.sar = sar;
2559 		__skb_queue_tail(seg_queue, skb);
2560 
2561 		len -= pdu_len;
2562 		if (sdu_len)
2563 			sdu_len = 0;
2564 
2565 		if (len <= pdu_len) {
2566 			sar = L2CAP_SAR_END;
2567 			pdu_len = len;
2568 		} else {
2569 			sar = L2CAP_SAR_CONTINUE;
2570 		}
2571 	}
2572 
2573 	return 0;
2574 }
2575 
2576 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2577 						   struct msghdr *msg,
2578 						   size_t len, u16 sdulen)
2579 {
2580 	struct l2cap_conn *conn = chan->conn;
2581 	struct sk_buff *skb;
2582 	int err, count, hlen;
2583 	struct l2cap_hdr *lh;
2584 
2585 	BT_DBG("chan %p len %zu", chan, len);
2586 
2587 	if (!conn)
2588 		return ERR_PTR(-ENOTCONN);
2589 
2590 	hlen = L2CAP_HDR_SIZE;
2591 
2592 	if (sdulen)
2593 		hlen += L2CAP_SDULEN_SIZE;
2594 
2595 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2596 
2597 	skb = chan->ops->alloc_skb(chan, hlen, count,
2598 				   msg->msg_flags & MSG_DONTWAIT);
2599 	if (IS_ERR(skb))
2600 		return skb;
2601 
2602 	/* Create L2CAP header */
2603 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2604 	lh->cid = cpu_to_le16(chan->dcid);
2605 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2606 
2607 	if (sdulen)
2608 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2609 
2610 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2611 	if (unlikely(err < 0)) {
2612 		kfree_skb(skb);
2613 		return ERR_PTR(err);
2614 	}
2615 
2616 	return skb;
2617 }
2618 
2619 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2620 				struct sk_buff_head *seg_queue,
2621 				struct msghdr *msg, size_t len)
2622 {
2623 	struct sk_buff *skb;
2624 	size_t pdu_len;
2625 	u16 sdu_len;
2626 
2627 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2628 
2629 	sdu_len = len;
2630 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2631 
2632 	while (len > 0) {
2633 		if (len <= pdu_len)
2634 			pdu_len = len;
2635 
2636 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2637 		if (IS_ERR(skb)) {
2638 			__skb_queue_purge(seg_queue);
2639 			return PTR_ERR(skb);
2640 		}
2641 
2642 		__skb_queue_tail(seg_queue, skb);
2643 
2644 		len -= pdu_len;
2645 
2646 		if (sdu_len) {
2647 			sdu_len = 0;
2648 			pdu_len += L2CAP_SDULEN_SIZE;
2649 		}
2650 	}
2651 
2652 	return 0;
2653 }
2654 
2655 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2656 {
2657 	int sent = 0;
2658 
2659 	BT_DBG("chan %p", chan);
2660 
2661 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2662 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2663 		chan->tx_credits--;
2664 		sent++;
2665 	}
2666 
2667 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2668 	       skb_queue_len(&chan->tx_q));
2669 }
2670 
2671 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2672 {
2673 	struct sk_buff *skb;
2674 	int err;
2675 	struct sk_buff_head seg_queue;
2676 
2677 	if (!chan->conn)
2678 		return -ENOTCONN;
2679 
2680 	/* Connectionless channel */
2681 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2682 		skb = l2cap_create_connless_pdu(chan, msg, len);
2683 		if (IS_ERR(skb))
2684 			return PTR_ERR(skb);
2685 
2686 		/* Channel lock is released before requesting new skb and then
2687 		 * reacquired thus we need to recheck channel state.
2688 		 */
2689 		if (chan->state != BT_CONNECTED) {
2690 			kfree_skb(skb);
2691 			return -ENOTCONN;
2692 		}
2693 
2694 		l2cap_do_send(chan, skb);
2695 		return len;
2696 	}
2697 
2698 	switch (chan->mode) {
2699 	case L2CAP_MODE_LE_FLOWCTL:
2700 	case L2CAP_MODE_EXT_FLOWCTL:
2701 		/* Check outgoing MTU */
2702 		if (len > chan->omtu)
2703 			return -EMSGSIZE;
2704 
2705 		__skb_queue_head_init(&seg_queue);
2706 
2707 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2708 
2709 		if (chan->state != BT_CONNECTED) {
2710 			__skb_queue_purge(&seg_queue);
2711 			err = -ENOTCONN;
2712 		}
2713 
2714 		if (err)
2715 			return err;
2716 
2717 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2718 
2719 		l2cap_le_flowctl_send(chan);
2720 
2721 		if (!chan->tx_credits)
2722 			chan->ops->suspend(chan);
2723 
2724 		err = len;
2725 
2726 		break;
2727 
2728 	case L2CAP_MODE_BASIC:
2729 		/* Check outgoing MTU */
2730 		if (len > chan->omtu)
2731 			return -EMSGSIZE;
2732 
2733 		/* Create a basic PDU */
2734 		skb = l2cap_create_basic_pdu(chan, msg, len);
2735 		if (IS_ERR(skb))
2736 			return PTR_ERR(skb);
2737 
2738 		/* Channel lock is released before requesting new skb and then
2739 		 * reacquired thus we need to recheck channel state.
2740 		 */
2741 		if (chan->state != BT_CONNECTED) {
2742 			kfree_skb(skb);
2743 			return -ENOTCONN;
2744 		}
2745 
2746 		l2cap_do_send(chan, skb);
2747 		err = len;
2748 		break;
2749 
2750 	case L2CAP_MODE_ERTM:
2751 	case L2CAP_MODE_STREAMING:
2752 		/* Check outgoing MTU */
2753 		if (len > chan->omtu) {
2754 			err = -EMSGSIZE;
2755 			break;
2756 		}
2757 
2758 		__skb_queue_head_init(&seg_queue);
2759 
2760 		/* Do segmentation before calling in to the state machine,
2761 		 * since it's possible to block while waiting for memory
2762 		 * allocation.
2763 		 */
2764 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2765 
2766 		/* The channel could have been closed while segmenting,
2767 		 * check that it is still connected.
2768 		 */
2769 		if (chan->state != BT_CONNECTED) {
2770 			__skb_queue_purge(&seg_queue);
2771 			err = -ENOTCONN;
2772 		}
2773 
2774 		if (err)
2775 			break;
2776 
2777 		if (chan->mode == L2CAP_MODE_ERTM)
2778 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2779 		else
2780 			l2cap_streaming_send(chan, &seg_queue);
2781 
2782 		err = len;
2783 
2784 		/* If the skbs were not queued for sending, they'll still be in
2785 		 * seg_queue and need to be purged.
2786 		 */
2787 		__skb_queue_purge(&seg_queue);
2788 		break;
2789 
2790 	default:
2791 		BT_DBG("bad state %1.1x", chan->mode);
2792 		err = -EBADFD;
2793 	}
2794 
2795 	return err;
2796 }
2797 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2798 
2799 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2800 {
2801 	struct l2cap_ctrl control;
2802 	u16 seq;
2803 
2804 	BT_DBG("chan %p, txseq %u", chan, txseq);
2805 
2806 	memset(&control, 0, sizeof(control));
2807 	control.sframe = 1;
2808 	control.super = L2CAP_SUPER_SREJ;
2809 
2810 	for (seq = chan->expected_tx_seq; seq != txseq;
2811 	     seq = __next_seq(chan, seq)) {
2812 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2813 			control.reqseq = seq;
2814 			l2cap_send_sframe(chan, &control);
2815 			l2cap_seq_list_append(&chan->srej_list, seq);
2816 		}
2817 	}
2818 
2819 	chan->expected_tx_seq = __next_seq(chan, txseq);
2820 }
2821 
2822 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2823 {
2824 	struct l2cap_ctrl control;
2825 
2826 	BT_DBG("chan %p", chan);
2827 
2828 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2829 		return;
2830 
2831 	memset(&control, 0, sizeof(control));
2832 	control.sframe = 1;
2833 	control.super = L2CAP_SUPER_SREJ;
2834 	control.reqseq = chan->srej_list.tail;
2835 	l2cap_send_sframe(chan, &control);
2836 }
2837 
2838 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2839 {
2840 	struct l2cap_ctrl control;
2841 	u16 initial_head;
2842 	u16 seq;
2843 
2844 	BT_DBG("chan %p, txseq %u", chan, txseq);
2845 
2846 	memset(&control, 0, sizeof(control));
2847 	control.sframe = 1;
2848 	control.super = L2CAP_SUPER_SREJ;
2849 
2850 	/* Capture initial list head to allow only one pass through the list. */
2851 	initial_head = chan->srej_list.head;
2852 
2853 	do {
2854 		seq = l2cap_seq_list_pop(&chan->srej_list);
2855 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2856 			break;
2857 
2858 		control.reqseq = seq;
2859 		l2cap_send_sframe(chan, &control);
2860 		l2cap_seq_list_append(&chan->srej_list, seq);
2861 	} while (chan->srej_list.head != initial_head);
2862 }
2863 
2864 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2865 {
2866 	struct sk_buff *acked_skb;
2867 	u16 ackseq;
2868 
2869 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2870 
2871 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2872 		return;
2873 
2874 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2875 	       chan->expected_ack_seq, chan->unacked_frames);
2876 
2877 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2878 	     ackseq = __next_seq(chan, ackseq)) {
2879 
2880 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2881 		if (acked_skb) {
2882 			skb_unlink(acked_skb, &chan->tx_q);
2883 			kfree_skb(acked_skb);
2884 			chan->unacked_frames--;
2885 		}
2886 	}
2887 
2888 	chan->expected_ack_seq = reqseq;
2889 
2890 	if (chan->unacked_frames == 0)
2891 		__clear_retrans_timer(chan);
2892 
2893 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2894 }
2895 
2896 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2897 {
2898 	BT_DBG("chan %p", chan);
2899 
2900 	chan->expected_tx_seq = chan->buffer_seq;
2901 	l2cap_seq_list_clear(&chan->srej_list);
2902 	skb_queue_purge(&chan->srej_q);
2903 	chan->rx_state = L2CAP_RX_STATE_RECV;
2904 }
2905 
2906 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2907 				struct l2cap_ctrl *control,
2908 				struct sk_buff_head *skbs, u8 event)
2909 {
2910 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2911 	       event);
2912 
2913 	switch (event) {
2914 	case L2CAP_EV_DATA_REQUEST:
2915 		if (chan->tx_send_head == NULL)
2916 			chan->tx_send_head = skb_peek(skbs);
2917 
2918 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2919 		l2cap_ertm_send(chan);
2920 		break;
2921 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2922 		BT_DBG("Enter LOCAL_BUSY");
2923 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2924 
2925 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2926 			/* The SREJ_SENT state must be aborted if we are to
2927 			 * enter the LOCAL_BUSY state.
2928 			 */
2929 			l2cap_abort_rx_srej_sent(chan);
2930 		}
2931 
2932 		l2cap_send_ack(chan);
2933 
2934 		break;
2935 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2936 		BT_DBG("Exit LOCAL_BUSY");
2937 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2938 
2939 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2940 			struct l2cap_ctrl local_control;
2941 
2942 			memset(&local_control, 0, sizeof(local_control));
2943 			local_control.sframe = 1;
2944 			local_control.super = L2CAP_SUPER_RR;
2945 			local_control.poll = 1;
2946 			local_control.reqseq = chan->buffer_seq;
2947 			l2cap_send_sframe(chan, &local_control);
2948 
2949 			chan->retry_count = 1;
2950 			__set_monitor_timer(chan);
2951 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2952 		}
2953 		break;
2954 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2955 		l2cap_process_reqseq(chan, control->reqseq);
2956 		break;
2957 	case L2CAP_EV_EXPLICIT_POLL:
2958 		l2cap_send_rr_or_rnr(chan, 1);
2959 		chan->retry_count = 1;
2960 		__set_monitor_timer(chan);
2961 		__clear_ack_timer(chan);
2962 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2963 		break;
2964 	case L2CAP_EV_RETRANS_TO:
2965 		l2cap_send_rr_or_rnr(chan, 1);
2966 		chan->retry_count = 1;
2967 		__set_monitor_timer(chan);
2968 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2969 		break;
2970 	case L2CAP_EV_RECV_FBIT:
2971 		/* Nothing to process */
2972 		break;
2973 	default:
2974 		break;
2975 	}
2976 }
2977 
2978 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2979 				  struct l2cap_ctrl *control,
2980 				  struct sk_buff_head *skbs, u8 event)
2981 {
2982 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2983 	       event);
2984 
2985 	switch (event) {
2986 	case L2CAP_EV_DATA_REQUEST:
2987 		if (chan->tx_send_head == NULL)
2988 			chan->tx_send_head = skb_peek(skbs);
2989 		/* Queue data, but don't send. */
2990 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2991 		break;
2992 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2993 		BT_DBG("Enter LOCAL_BUSY");
2994 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2995 
2996 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2997 			/* The SREJ_SENT state must be aborted if we are to
2998 			 * enter the LOCAL_BUSY state.
2999 			 */
3000 			l2cap_abort_rx_srej_sent(chan);
3001 		}
3002 
3003 		l2cap_send_ack(chan);
3004 
3005 		break;
3006 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
3007 		BT_DBG("Exit LOCAL_BUSY");
3008 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3009 
3010 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
3011 			struct l2cap_ctrl local_control;
3012 			memset(&local_control, 0, sizeof(local_control));
3013 			local_control.sframe = 1;
3014 			local_control.super = L2CAP_SUPER_RR;
3015 			local_control.poll = 1;
3016 			local_control.reqseq = chan->buffer_seq;
3017 			l2cap_send_sframe(chan, &local_control);
3018 
3019 			chan->retry_count = 1;
3020 			__set_monitor_timer(chan);
3021 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
3022 		}
3023 		break;
3024 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
3025 		l2cap_process_reqseq(chan, control->reqseq);
3026 		fallthrough;
3027 
3028 	case L2CAP_EV_RECV_FBIT:
3029 		if (control && control->final) {
3030 			__clear_monitor_timer(chan);
3031 			if (chan->unacked_frames > 0)
3032 				__set_retrans_timer(chan);
3033 			chan->retry_count = 0;
3034 			chan->tx_state = L2CAP_TX_STATE_XMIT;
3035 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3036 		}
3037 		break;
3038 	case L2CAP_EV_EXPLICIT_POLL:
3039 		/* Ignore */
3040 		break;
3041 	case L2CAP_EV_MONITOR_TO:
3042 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3043 			l2cap_send_rr_or_rnr(chan, 1);
3044 			__set_monitor_timer(chan);
3045 			chan->retry_count++;
3046 		} else {
3047 			l2cap_send_disconn_req(chan, ECONNABORTED);
3048 		}
3049 		break;
3050 	default:
3051 		break;
3052 	}
3053 }
3054 
3055 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3056 		     struct sk_buff_head *skbs, u8 event)
3057 {
3058 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3059 	       chan, control, skbs, event, chan->tx_state);
3060 
3061 	switch (chan->tx_state) {
3062 	case L2CAP_TX_STATE_XMIT:
3063 		l2cap_tx_state_xmit(chan, control, skbs, event);
3064 		break;
3065 	case L2CAP_TX_STATE_WAIT_F:
3066 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3067 		break;
3068 	default:
3069 		/* Ignore event */
3070 		break;
3071 	}
3072 }
3073 
3074 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3075 			     struct l2cap_ctrl *control)
3076 {
3077 	BT_DBG("chan %p, control %p", chan, control);
3078 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3079 }
3080 
3081 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3082 				  struct l2cap_ctrl *control)
3083 {
3084 	BT_DBG("chan %p, control %p", chan, control);
3085 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3086 }
3087 
3088 /* Copy frame to all raw sockets on that connection */
3089 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3090 {
3091 	struct sk_buff *nskb;
3092 	struct l2cap_chan *chan;
3093 
3094 	BT_DBG("conn %p", conn);
3095 
3096 	mutex_lock(&conn->chan_lock);
3097 
3098 	list_for_each_entry(chan, &conn->chan_l, list) {
3099 		if (chan->chan_type != L2CAP_CHAN_RAW)
3100 			continue;
3101 
3102 		/* Don't send frame to the channel it came from */
3103 		if (bt_cb(skb)->l2cap.chan == chan)
3104 			continue;
3105 
3106 		nskb = skb_clone(skb, GFP_KERNEL);
3107 		if (!nskb)
3108 			continue;
3109 		if (chan->ops->recv(chan, nskb))
3110 			kfree_skb(nskb);
3111 	}
3112 
3113 	mutex_unlock(&conn->chan_lock);
3114 }
3115 
3116 /* ---- L2CAP signalling commands ---- */
3117 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3118 				       u8 ident, u16 dlen, void *data)
3119 {
3120 	struct sk_buff *skb, **frag;
3121 	struct l2cap_cmd_hdr *cmd;
3122 	struct l2cap_hdr *lh;
3123 	int len, count;
3124 
3125 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3126 	       conn, code, ident, dlen);
3127 
3128 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3129 		return NULL;
3130 
3131 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3132 	count = min_t(unsigned int, conn->mtu, len);
3133 
3134 	skb = bt_skb_alloc(count, GFP_KERNEL);
3135 	if (!skb)
3136 		return NULL;
3137 
3138 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3139 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3140 
3141 	if (conn->hcon->type == LE_LINK)
3142 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3143 	else
3144 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3145 
3146 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3147 	cmd->code  = code;
3148 	cmd->ident = ident;
3149 	cmd->len   = cpu_to_le16(dlen);
3150 
3151 	if (dlen) {
3152 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3153 		skb_put_data(skb, data, count);
3154 		data += count;
3155 	}
3156 
3157 	len -= skb->len;
3158 
3159 	/* Continuation fragments (no L2CAP header) */
3160 	frag = &skb_shinfo(skb)->frag_list;
3161 	while (len) {
3162 		count = min_t(unsigned int, conn->mtu, len);
3163 
3164 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3165 		if (!*frag)
3166 			goto fail;
3167 
3168 		skb_put_data(*frag, data, count);
3169 
3170 		len  -= count;
3171 		data += count;
3172 
3173 		frag = &(*frag)->next;
3174 	}
3175 
3176 	return skb;
3177 
3178 fail:
3179 	kfree_skb(skb);
3180 	return NULL;
3181 }
3182 
3183 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3184 				     unsigned long *val)
3185 {
3186 	struct l2cap_conf_opt *opt = *ptr;
3187 	int len;
3188 
3189 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3190 	*ptr += len;
3191 
3192 	*type = opt->type;
3193 	*olen = opt->len;
3194 
3195 	switch (opt->len) {
3196 	case 1:
3197 		*val = *((u8 *) opt->val);
3198 		break;
3199 
3200 	case 2:
3201 		*val = get_unaligned_le16(opt->val);
3202 		break;
3203 
3204 	case 4:
3205 		*val = get_unaligned_le32(opt->val);
3206 		break;
3207 
3208 	default:
3209 		*val = (unsigned long) opt->val;
3210 		break;
3211 	}
3212 
3213 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3214 	return len;
3215 }
3216 
3217 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3218 {
3219 	struct l2cap_conf_opt *opt = *ptr;
3220 
3221 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3222 
3223 	if (size < L2CAP_CONF_OPT_SIZE + len)
3224 		return;
3225 
3226 	opt->type = type;
3227 	opt->len  = len;
3228 
3229 	switch (len) {
3230 	case 1:
3231 		*((u8 *) opt->val)  = val;
3232 		break;
3233 
3234 	case 2:
3235 		put_unaligned_le16(val, opt->val);
3236 		break;
3237 
3238 	case 4:
3239 		put_unaligned_le32(val, opt->val);
3240 		break;
3241 
3242 	default:
3243 		memcpy(opt->val, (void *) val, len);
3244 		break;
3245 	}
3246 
3247 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3248 }
3249 
3250 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3251 {
3252 	struct l2cap_conf_efs efs;
3253 
3254 	switch (chan->mode) {
3255 	case L2CAP_MODE_ERTM:
3256 		efs.id		= chan->local_id;
3257 		efs.stype	= chan->local_stype;
3258 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3259 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3260 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3261 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3262 		break;
3263 
3264 	case L2CAP_MODE_STREAMING:
3265 		efs.id		= 1;
3266 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3267 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3268 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3269 		efs.acc_lat	= 0;
3270 		efs.flush_to	= 0;
3271 		break;
3272 
3273 	default:
3274 		return;
3275 	}
3276 
3277 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3278 			   (unsigned long) &efs, size);
3279 }
3280 
3281 static void l2cap_ack_timeout(struct work_struct *work)
3282 {
3283 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3284 					       ack_timer.work);
3285 	u16 frames_to_ack;
3286 
3287 	BT_DBG("chan %p", chan);
3288 
3289 	l2cap_chan_lock(chan);
3290 
3291 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3292 				     chan->last_acked_seq);
3293 
3294 	if (frames_to_ack)
3295 		l2cap_send_rr_or_rnr(chan, 0);
3296 
3297 	l2cap_chan_unlock(chan);
3298 	l2cap_chan_put(chan);
3299 }
3300 
3301 int l2cap_ertm_init(struct l2cap_chan *chan)
3302 {
3303 	int err;
3304 
3305 	chan->next_tx_seq = 0;
3306 	chan->expected_tx_seq = 0;
3307 	chan->expected_ack_seq = 0;
3308 	chan->unacked_frames = 0;
3309 	chan->buffer_seq = 0;
3310 	chan->frames_sent = 0;
3311 	chan->last_acked_seq = 0;
3312 	chan->sdu = NULL;
3313 	chan->sdu_last_frag = NULL;
3314 	chan->sdu_len = 0;
3315 
3316 	skb_queue_head_init(&chan->tx_q);
3317 
3318 	chan->local_amp_id = AMP_ID_BREDR;
3319 	chan->move_id = AMP_ID_BREDR;
3320 	chan->move_state = L2CAP_MOVE_STABLE;
3321 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3322 
3323 	if (chan->mode != L2CAP_MODE_ERTM)
3324 		return 0;
3325 
3326 	chan->rx_state = L2CAP_RX_STATE_RECV;
3327 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3328 
3329 	skb_queue_head_init(&chan->srej_q);
3330 
3331 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3332 	if (err < 0)
3333 		return err;
3334 
3335 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3336 	if (err < 0)
3337 		l2cap_seq_list_free(&chan->srej_list);
3338 
3339 	return err;
3340 }
3341 
3342 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3343 {
3344 	switch (mode) {
3345 	case L2CAP_MODE_STREAMING:
3346 	case L2CAP_MODE_ERTM:
3347 		if (l2cap_mode_supported(mode, remote_feat_mask))
3348 			return mode;
3349 		fallthrough;
3350 	default:
3351 		return L2CAP_MODE_BASIC;
3352 	}
3353 }
3354 
3355 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3356 {
3357 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3358 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3359 }
3360 
3361 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3362 {
3363 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3364 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3365 }
3366 
3367 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3368 				      struct l2cap_conf_rfc *rfc)
3369 {
3370 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3371 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3372 
3373 		/* Class 1 devices have must have ERTM timeouts
3374 		 * exceeding the Link Supervision Timeout.  The
3375 		 * default Link Supervision Timeout for AMP
3376 		 * controllers is 10 seconds.
3377 		 *
3378 		 * Class 1 devices use 0xffffffff for their
3379 		 * best-effort flush timeout, so the clamping logic
3380 		 * will result in a timeout that meets the above
3381 		 * requirement.  ERTM timeouts are 16-bit values, so
3382 		 * the maximum timeout is 65.535 seconds.
3383 		 */
3384 
3385 		/* Convert timeout to milliseconds and round */
3386 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3387 
3388 		/* This is the recommended formula for class 2 devices
3389 		 * that start ERTM timers when packets are sent to the
3390 		 * controller.
3391 		 */
3392 		ertm_to = 3 * ertm_to + 500;
3393 
3394 		if (ertm_to > 0xffff)
3395 			ertm_to = 0xffff;
3396 
3397 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3398 		rfc->monitor_timeout = rfc->retrans_timeout;
3399 	} else {
3400 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3401 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3402 	}
3403 }
3404 
3405 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3406 {
3407 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3408 	    __l2cap_ews_supported(chan->conn)) {
3409 		/* use extended control field */
3410 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3411 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3412 	} else {
3413 		chan->tx_win = min_t(u16, chan->tx_win,
3414 				     L2CAP_DEFAULT_TX_WINDOW);
3415 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3416 	}
3417 	chan->ack_win = chan->tx_win;
3418 }
3419 
3420 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3421 {
3422 	struct hci_conn *conn = chan->conn->hcon;
3423 
3424 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3425 
3426 	/* The 2-DH1 packet has between 2 and 56 information bytes
3427 	 * (including the 2-byte payload header)
3428 	 */
3429 	if (!(conn->pkt_type & HCI_2DH1))
3430 		chan->imtu = 54;
3431 
3432 	/* The 3-DH1 packet has between 2 and 85 information bytes
3433 	 * (including the 2-byte payload header)
3434 	 */
3435 	if (!(conn->pkt_type & HCI_3DH1))
3436 		chan->imtu = 83;
3437 
3438 	/* The 2-DH3 packet has between 2 and 369 information bytes
3439 	 * (including the 2-byte payload header)
3440 	 */
3441 	if (!(conn->pkt_type & HCI_2DH3))
3442 		chan->imtu = 367;
3443 
3444 	/* The 3-DH3 packet has between 2 and 554 information bytes
3445 	 * (including the 2-byte payload header)
3446 	 */
3447 	if (!(conn->pkt_type & HCI_3DH3))
3448 		chan->imtu = 552;
3449 
3450 	/* The 2-DH5 packet has between 2 and 681 information bytes
3451 	 * (including the 2-byte payload header)
3452 	 */
3453 	if (!(conn->pkt_type & HCI_2DH5))
3454 		chan->imtu = 679;
3455 
3456 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3457 	 * (including the 2-byte payload header)
3458 	 */
3459 	if (!(conn->pkt_type & HCI_3DH5))
3460 		chan->imtu = 1021;
3461 }
3462 
3463 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3464 {
3465 	struct l2cap_conf_req *req = data;
3466 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3467 	void *ptr = req->data;
3468 	void *endptr = data + data_size;
3469 	u16 size;
3470 
3471 	BT_DBG("chan %p", chan);
3472 
3473 	if (chan->num_conf_req || chan->num_conf_rsp)
3474 		goto done;
3475 
3476 	switch (chan->mode) {
3477 	case L2CAP_MODE_STREAMING:
3478 	case L2CAP_MODE_ERTM:
3479 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3480 			break;
3481 
3482 		if (__l2cap_efs_supported(chan->conn))
3483 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3484 
3485 		fallthrough;
3486 	default:
3487 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3488 		break;
3489 	}
3490 
3491 done:
3492 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3493 		if (!chan->imtu)
3494 			l2cap_mtu_auto(chan);
3495 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3496 				   endptr - ptr);
3497 	}
3498 
3499 	switch (chan->mode) {
3500 	case L2CAP_MODE_BASIC:
3501 		if (disable_ertm)
3502 			break;
3503 
3504 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3505 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3506 			break;
3507 
3508 		rfc.mode            = L2CAP_MODE_BASIC;
3509 		rfc.txwin_size      = 0;
3510 		rfc.max_transmit    = 0;
3511 		rfc.retrans_timeout = 0;
3512 		rfc.monitor_timeout = 0;
3513 		rfc.max_pdu_size    = 0;
3514 
3515 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3516 				   (unsigned long) &rfc, endptr - ptr);
3517 		break;
3518 
3519 	case L2CAP_MODE_ERTM:
3520 		rfc.mode            = L2CAP_MODE_ERTM;
3521 		rfc.max_transmit    = chan->max_tx;
3522 
3523 		__l2cap_set_ertm_timeouts(chan, &rfc);
3524 
3525 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3526 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3527 			     L2CAP_FCS_SIZE);
3528 		rfc.max_pdu_size = cpu_to_le16(size);
3529 
3530 		l2cap_txwin_setup(chan);
3531 
3532 		rfc.txwin_size = min_t(u16, chan->tx_win,
3533 				       L2CAP_DEFAULT_TX_WINDOW);
3534 
3535 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3536 				   (unsigned long) &rfc, endptr - ptr);
3537 
3538 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3539 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3540 
3541 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3542 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3543 					   chan->tx_win, endptr - ptr);
3544 
3545 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3546 			if (chan->fcs == L2CAP_FCS_NONE ||
3547 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3548 				chan->fcs = L2CAP_FCS_NONE;
3549 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3550 						   chan->fcs, endptr - ptr);
3551 			}
3552 		break;
3553 
3554 	case L2CAP_MODE_STREAMING:
3555 		l2cap_txwin_setup(chan);
3556 		rfc.mode            = L2CAP_MODE_STREAMING;
3557 		rfc.txwin_size      = 0;
3558 		rfc.max_transmit    = 0;
3559 		rfc.retrans_timeout = 0;
3560 		rfc.monitor_timeout = 0;
3561 
3562 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3563 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3564 			     L2CAP_FCS_SIZE);
3565 		rfc.max_pdu_size = cpu_to_le16(size);
3566 
3567 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3568 				   (unsigned long) &rfc, endptr - ptr);
3569 
3570 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3571 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3572 
3573 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3574 			if (chan->fcs == L2CAP_FCS_NONE ||
3575 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3576 				chan->fcs = L2CAP_FCS_NONE;
3577 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3578 						   chan->fcs, endptr - ptr);
3579 			}
3580 		break;
3581 	}
3582 
3583 	req->dcid  = cpu_to_le16(chan->dcid);
3584 	req->flags = cpu_to_le16(0);
3585 
3586 	return ptr - data;
3587 }
3588 
3589 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3590 {
3591 	struct l2cap_conf_rsp *rsp = data;
3592 	void *ptr = rsp->data;
3593 	void *endptr = data + data_size;
3594 	void *req = chan->conf_req;
3595 	int len = chan->conf_len;
3596 	int type, hint, olen;
3597 	unsigned long val;
3598 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3599 	struct l2cap_conf_efs efs;
3600 	u8 remote_efs = 0;
3601 	u16 mtu = L2CAP_DEFAULT_MTU;
3602 	u16 result = L2CAP_CONF_SUCCESS;
3603 	u16 size;
3604 
3605 	BT_DBG("chan %p", chan);
3606 
3607 	while (len >= L2CAP_CONF_OPT_SIZE) {
3608 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3609 		if (len < 0)
3610 			break;
3611 
3612 		hint  = type & L2CAP_CONF_HINT;
3613 		type &= L2CAP_CONF_MASK;
3614 
3615 		switch (type) {
3616 		case L2CAP_CONF_MTU:
3617 			if (olen != 2)
3618 				break;
3619 			mtu = val;
3620 			break;
3621 
3622 		case L2CAP_CONF_FLUSH_TO:
3623 			if (olen != 2)
3624 				break;
3625 			chan->flush_to = val;
3626 			break;
3627 
3628 		case L2CAP_CONF_QOS:
3629 			break;
3630 
3631 		case L2CAP_CONF_RFC:
3632 			if (olen != sizeof(rfc))
3633 				break;
3634 			memcpy(&rfc, (void *) val, olen);
3635 			break;
3636 
3637 		case L2CAP_CONF_FCS:
3638 			if (olen != 1)
3639 				break;
3640 			if (val == L2CAP_FCS_NONE)
3641 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3642 			break;
3643 
3644 		case L2CAP_CONF_EFS:
3645 			if (olen != sizeof(efs))
3646 				break;
3647 			remote_efs = 1;
3648 			memcpy(&efs, (void *) val, olen);
3649 			break;
3650 
3651 		case L2CAP_CONF_EWS:
3652 			if (olen != 2)
3653 				break;
3654 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3655 				return -ECONNREFUSED;
3656 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3657 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3658 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3659 			chan->remote_tx_win = val;
3660 			break;
3661 
3662 		default:
3663 			if (hint)
3664 				break;
3665 			result = L2CAP_CONF_UNKNOWN;
3666 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3667 			break;
3668 		}
3669 	}
3670 
3671 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3672 		goto done;
3673 
3674 	switch (chan->mode) {
3675 	case L2CAP_MODE_STREAMING:
3676 	case L2CAP_MODE_ERTM:
3677 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3678 			chan->mode = l2cap_select_mode(rfc.mode,
3679 						       chan->conn->feat_mask);
3680 			break;
3681 		}
3682 
3683 		if (remote_efs) {
3684 			if (__l2cap_efs_supported(chan->conn))
3685 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3686 			else
3687 				return -ECONNREFUSED;
3688 		}
3689 
3690 		if (chan->mode != rfc.mode)
3691 			return -ECONNREFUSED;
3692 
3693 		break;
3694 	}
3695 
3696 done:
3697 	if (chan->mode != rfc.mode) {
3698 		result = L2CAP_CONF_UNACCEPT;
3699 		rfc.mode = chan->mode;
3700 
3701 		if (chan->num_conf_rsp == 1)
3702 			return -ECONNREFUSED;
3703 
3704 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3705 				   (unsigned long) &rfc, endptr - ptr);
3706 	}
3707 
3708 	if (result == L2CAP_CONF_SUCCESS) {
3709 		/* Configure output options and let the other side know
3710 		 * which ones we don't like. */
3711 
3712 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3713 			result = L2CAP_CONF_UNACCEPT;
3714 		else {
3715 			chan->omtu = mtu;
3716 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3717 		}
3718 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3719 
3720 		if (remote_efs) {
3721 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3722 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3723 			    efs.stype != chan->local_stype) {
3724 
3725 				result = L2CAP_CONF_UNACCEPT;
3726 
3727 				if (chan->num_conf_req >= 1)
3728 					return -ECONNREFUSED;
3729 
3730 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3731 						   sizeof(efs),
3732 						   (unsigned long) &efs, endptr - ptr);
3733 			} else {
3734 				/* Send PENDING Conf Rsp */
3735 				result = L2CAP_CONF_PENDING;
3736 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3737 			}
3738 		}
3739 
3740 		switch (rfc.mode) {
3741 		case L2CAP_MODE_BASIC:
3742 			chan->fcs = L2CAP_FCS_NONE;
3743 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3744 			break;
3745 
3746 		case L2CAP_MODE_ERTM:
3747 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3748 				chan->remote_tx_win = rfc.txwin_size;
3749 			else
3750 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3751 
3752 			chan->remote_max_tx = rfc.max_transmit;
3753 
3754 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3755 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3756 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3757 			rfc.max_pdu_size = cpu_to_le16(size);
3758 			chan->remote_mps = size;
3759 
3760 			__l2cap_set_ertm_timeouts(chan, &rfc);
3761 
3762 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3763 
3764 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3765 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3766 
3767 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3768 				chan->remote_id = efs.id;
3769 				chan->remote_stype = efs.stype;
3770 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3771 				chan->remote_flush_to =
3772 					le32_to_cpu(efs.flush_to);
3773 				chan->remote_acc_lat =
3774 					le32_to_cpu(efs.acc_lat);
3775 				chan->remote_sdu_itime =
3776 					le32_to_cpu(efs.sdu_itime);
3777 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3778 						   sizeof(efs),
3779 						   (unsigned long) &efs, endptr - ptr);
3780 			}
3781 			break;
3782 
3783 		case L2CAP_MODE_STREAMING:
3784 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3785 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3786 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3787 			rfc.max_pdu_size = cpu_to_le16(size);
3788 			chan->remote_mps = size;
3789 
3790 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3791 
3792 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3793 					   (unsigned long) &rfc, endptr - ptr);
3794 
3795 			break;
3796 
3797 		default:
3798 			result = L2CAP_CONF_UNACCEPT;
3799 
3800 			memset(&rfc, 0, sizeof(rfc));
3801 			rfc.mode = chan->mode;
3802 		}
3803 
3804 		if (result == L2CAP_CONF_SUCCESS)
3805 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3806 	}
3807 	rsp->scid   = cpu_to_le16(chan->dcid);
3808 	rsp->result = cpu_to_le16(result);
3809 	rsp->flags  = cpu_to_le16(0);
3810 
3811 	return ptr - data;
3812 }
3813 
3814 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3815 				void *data, size_t size, u16 *result)
3816 {
3817 	struct l2cap_conf_req *req = data;
3818 	void *ptr = req->data;
3819 	void *endptr = data + size;
3820 	int type, olen;
3821 	unsigned long val;
3822 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3823 	struct l2cap_conf_efs efs;
3824 
3825 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3826 
3827 	while (len >= L2CAP_CONF_OPT_SIZE) {
3828 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3829 		if (len < 0)
3830 			break;
3831 
3832 		switch (type) {
3833 		case L2CAP_CONF_MTU:
3834 			if (olen != 2)
3835 				break;
3836 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3837 				*result = L2CAP_CONF_UNACCEPT;
3838 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3839 			} else
3840 				chan->imtu = val;
3841 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3842 					   endptr - ptr);
3843 			break;
3844 
3845 		case L2CAP_CONF_FLUSH_TO:
3846 			if (olen != 2)
3847 				break;
3848 			chan->flush_to = val;
3849 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3850 					   chan->flush_to, endptr - ptr);
3851 			break;
3852 
3853 		case L2CAP_CONF_RFC:
3854 			if (olen != sizeof(rfc))
3855 				break;
3856 			memcpy(&rfc, (void *)val, olen);
3857 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3858 			    rfc.mode != chan->mode)
3859 				return -ECONNREFUSED;
3860 			chan->fcs = 0;
3861 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3862 					   (unsigned long) &rfc, endptr - ptr);
3863 			break;
3864 
3865 		case L2CAP_CONF_EWS:
3866 			if (olen != 2)
3867 				break;
3868 			chan->ack_win = min_t(u16, val, chan->ack_win);
3869 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3870 					   chan->tx_win, endptr - ptr);
3871 			break;
3872 
3873 		case L2CAP_CONF_EFS:
3874 			if (olen != sizeof(efs))
3875 				break;
3876 			memcpy(&efs, (void *)val, olen);
3877 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3878 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3879 			    efs.stype != chan->local_stype)
3880 				return -ECONNREFUSED;
3881 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3882 					   (unsigned long) &efs, endptr - ptr);
3883 			break;
3884 
3885 		case L2CAP_CONF_FCS:
3886 			if (olen != 1)
3887 				break;
3888 			if (*result == L2CAP_CONF_PENDING)
3889 				if (val == L2CAP_FCS_NONE)
3890 					set_bit(CONF_RECV_NO_FCS,
3891 						&chan->conf_state);
3892 			break;
3893 		}
3894 	}
3895 
3896 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3897 		return -ECONNREFUSED;
3898 
3899 	chan->mode = rfc.mode;
3900 
3901 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3902 		switch (rfc.mode) {
3903 		case L2CAP_MODE_ERTM:
3904 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3905 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3906 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3907 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3908 				chan->ack_win = min_t(u16, chan->ack_win,
3909 						      rfc.txwin_size);
3910 
3911 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3912 				chan->local_msdu = le16_to_cpu(efs.msdu);
3913 				chan->local_sdu_itime =
3914 					le32_to_cpu(efs.sdu_itime);
3915 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3916 				chan->local_flush_to =
3917 					le32_to_cpu(efs.flush_to);
3918 			}
3919 			break;
3920 
3921 		case L2CAP_MODE_STREAMING:
3922 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3923 		}
3924 	}
3925 
3926 	req->dcid   = cpu_to_le16(chan->dcid);
3927 	req->flags  = cpu_to_le16(0);
3928 
3929 	return ptr - data;
3930 }
3931 
3932 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3933 				u16 result, u16 flags)
3934 {
3935 	struct l2cap_conf_rsp *rsp = data;
3936 	void *ptr = rsp->data;
3937 
3938 	BT_DBG("chan %p", chan);
3939 
3940 	rsp->scid   = cpu_to_le16(chan->dcid);
3941 	rsp->result = cpu_to_le16(result);
3942 	rsp->flags  = cpu_to_le16(flags);
3943 
3944 	return ptr - data;
3945 }
3946 
3947 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3948 {
3949 	struct l2cap_le_conn_rsp rsp;
3950 	struct l2cap_conn *conn = chan->conn;
3951 
3952 	BT_DBG("chan %p", chan);
3953 
3954 	rsp.dcid    = cpu_to_le16(chan->scid);
3955 	rsp.mtu     = cpu_to_le16(chan->imtu);
3956 	rsp.mps     = cpu_to_le16(chan->mps);
3957 	rsp.credits = cpu_to_le16(chan->rx_credits);
3958 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3959 
3960 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3961 		       &rsp);
3962 }
3963 
3964 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3965 {
3966 	struct {
3967 		struct l2cap_ecred_conn_rsp rsp;
3968 		__le16 dcid[5];
3969 	} __packed pdu;
3970 	struct l2cap_conn *conn = chan->conn;
3971 	u16 ident = chan->ident;
3972 	int i = 0;
3973 
3974 	if (!ident)
3975 		return;
3976 
3977 	BT_DBG("chan %p ident %d", chan, ident);
3978 
3979 	pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3980 	pdu.rsp.mps     = cpu_to_le16(chan->mps);
3981 	pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3982 	pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3983 
3984 	mutex_lock(&conn->chan_lock);
3985 
3986 	list_for_each_entry(chan, &conn->chan_l, list) {
3987 		if (chan->ident != ident)
3988 			continue;
3989 
3990 		/* Reset ident so only one response is sent */
3991 		chan->ident = 0;
3992 
3993 		/* Include all channels pending with the same ident */
3994 		pdu.dcid[i++] = cpu_to_le16(chan->scid);
3995 	}
3996 
3997 	mutex_unlock(&conn->chan_lock);
3998 
3999 	l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
4000 			sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
4001 }
4002 
4003 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4004 {
4005 	struct l2cap_conn_rsp rsp;
4006 	struct l2cap_conn *conn = chan->conn;
4007 	u8 buf[128];
4008 	u8 rsp_code;
4009 
4010 	rsp.scid   = cpu_to_le16(chan->dcid);
4011 	rsp.dcid   = cpu_to_le16(chan->scid);
4012 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4013 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4014 
4015 	if (chan->hs_hcon)
4016 		rsp_code = L2CAP_CREATE_CHAN_RSP;
4017 	else
4018 		rsp_code = L2CAP_CONN_RSP;
4019 
4020 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4021 
4022 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4023 
4024 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4025 		return;
4026 
4027 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4028 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4029 	chan->num_conf_req++;
4030 }
4031 
4032 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4033 {
4034 	int type, olen;
4035 	unsigned long val;
4036 	/* Use sane default values in case a misbehaving remote device
4037 	 * did not send an RFC or extended window size option.
4038 	 */
4039 	u16 txwin_ext = chan->ack_win;
4040 	struct l2cap_conf_rfc rfc = {
4041 		.mode = chan->mode,
4042 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4043 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4044 		.max_pdu_size = cpu_to_le16(chan->imtu),
4045 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4046 	};
4047 
4048 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4049 
4050 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4051 		return;
4052 
4053 	while (len >= L2CAP_CONF_OPT_SIZE) {
4054 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4055 		if (len < 0)
4056 			break;
4057 
4058 		switch (type) {
4059 		case L2CAP_CONF_RFC:
4060 			if (olen != sizeof(rfc))
4061 				break;
4062 			memcpy(&rfc, (void *)val, olen);
4063 			break;
4064 		case L2CAP_CONF_EWS:
4065 			if (olen != 2)
4066 				break;
4067 			txwin_ext = val;
4068 			break;
4069 		}
4070 	}
4071 
4072 	switch (rfc.mode) {
4073 	case L2CAP_MODE_ERTM:
4074 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4075 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4076 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4077 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4078 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4079 		else
4080 			chan->ack_win = min_t(u16, chan->ack_win,
4081 					      rfc.txwin_size);
4082 		break;
4083 	case L2CAP_MODE_STREAMING:
4084 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4085 	}
4086 }
4087 
4088 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4089 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4090 				    u8 *data)
4091 {
4092 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4093 
4094 	if (cmd_len < sizeof(*rej))
4095 		return -EPROTO;
4096 
4097 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4098 		return 0;
4099 
4100 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4101 	    cmd->ident == conn->info_ident) {
4102 		cancel_delayed_work(&conn->info_timer);
4103 
4104 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4105 		conn->info_ident = 0;
4106 
4107 		l2cap_conn_start(conn);
4108 	}
4109 
4110 	return 0;
4111 }
4112 
4113 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4114 					struct l2cap_cmd_hdr *cmd,
4115 					u8 *data, u8 rsp_code, u8 amp_id)
4116 {
4117 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4118 	struct l2cap_conn_rsp rsp;
4119 	struct l2cap_chan *chan = NULL, *pchan;
4120 	int result, status = L2CAP_CS_NO_INFO;
4121 
4122 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4123 	__le16 psm = req->psm;
4124 
4125 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4126 
4127 	/* Check if we have socket listening on psm */
4128 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4129 					 &conn->hcon->dst, ACL_LINK);
4130 	if (!pchan) {
4131 		result = L2CAP_CR_BAD_PSM;
4132 		goto sendresp;
4133 	}
4134 
4135 	mutex_lock(&conn->chan_lock);
4136 	l2cap_chan_lock(pchan);
4137 
4138 	/* Check if the ACL is secure enough (if not SDP) */
4139 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4140 	    !hci_conn_check_link_mode(conn->hcon)) {
4141 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4142 		result = L2CAP_CR_SEC_BLOCK;
4143 		goto response;
4144 	}
4145 
4146 	result = L2CAP_CR_NO_MEM;
4147 
4148 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4149 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4150 		result = L2CAP_CR_INVALID_SCID;
4151 		goto response;
4152 	}
4153 
4154 	/* Check if we already have channel with that dcid */
4155 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4156 		result = L2CAP_CR_SCID_IN_USE;
4157 		goto response;
4158 	}
4159 
4160 	chan = pchan->ops->new_connection(pchan);
4161 	if (!chan)
4162 		goto response;
4163 
4164 	/* For certain devices (ex: HID mouse), support for authentication,
4165 	 * pairing and bonding is optional. For such devices, inorder to avoid
4166 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4167 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4168 	 */
4169 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4170 
4171 	bacpy(&chan->src, &conn->hcon->src);
4172 	bacpy(&chan->dst, &conn->hcon->dst);
4173 	chan->src_type = bdaddr_src_type(conn->hcon);
4174 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4175 	chan->psm  = psm;
4176 	chan->dcid = scid;
4177 	chan->local_amp_id = amp_id;
4178 
4179 	__l2cap_chan_add(conn, chan);
4180 
4181 	dcid = chan->scid;
4182 
4183 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4184 
4185 	chan->ident = cmd->ident;
4186 
4187 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4188 		if (l2cap_chan_check_security(chan, false)) {
4189 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4190 				l2cap_state_change(chan, BT_CONNECT2);
4191 				result = L2CAP_CR_PEND;
4192 				status = L2CAP_CS_AUTHOR_PEND;
4193 				chan->ops->defer(chan);
4194 			} else {
4195 				/* Force pending result for AMP controllers.
4196 				 * The connection will succeed after the
4197 				 * physical link is up.
4198 				 */
4199 				if (amp_id == AMP_ID_BREDR) {
4200 					l2cap_state_change(chan, BT_CONFIG);
4201 					result = L2CAP_CR_SUCCESS;
4202 				} else {
4203 					l2cap_state_change(chan, BT_CONNECT2);
4204 					result = L2CAP_CR_PEND;
4205 				}
4206 				status = L2CAP_CS_NO_INFO;
4207 			}
4208 		} else {
4209 			l2cap_state_change(chan, BT_CONNECT2);
4210 			result = L2CAP_CR_PEND;
4211 			status = L2CAP_CS_AUTHEN_PEND;
4212 		}
4213 	} else {
4214 		l2cap_state_change(chan, BT_CONNECT2);
4215 		result = L2CAP_CR_PEND;
4216 		status = L2CAP_CS_NO_INFO;
4217 	}
4218 
4219 response:
4220 	l2cap_chan_unlock(pchan);
4221 	mutex_unlock(&conn->chan_lock);
4222 	l2cap_chan_put(pchan);
4223 
4224 sendresp:
4225 	rsp.scid   = cpu_to_le16(scid);
4226 	rsp.dcid   = cpu_to_le16(dcid);
4227 	rsp.result = cpu_to_le16(result);
4228 	rsp.status = cpu_to_le16(status);
4229 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4230 
4231 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4232 		struct l2cap_info_req info;
4233 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4234 
4235 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4236 		conn->info_ident = l2cap_get_ident(conn);
4237 
4238 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4239 
4240 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4241 			       sizeof(info), &info);
4242 	}
4243 
4244 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4245 	    result == L2CAP_CR_SUCCESS) {
4246 		u8 buf[128];
4247 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4248 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4249 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4250 		chan->num_conf_req++;
4251 	}
4252 
4253 	return chan;
4254 }
4255 
4256 static int l2cap_connect_req(struct l2cap_conn *conn,
4257 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4258 {
4259 	struct hci_dev *hdev = conn->hcon->hdev;
4260 	struct hci_conn *hcon = conn->hcon;
4261 
4262 	if (cmd_len < sizeof(struct l2cap_conn_req))
4263 		return -EPROTO;
4264 
4265 	hci_dev_lock(hdev);
4266 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4267 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4268 		mgmt_device_connected(hdev, hcon, NULL, 0);
4269 	hci_dev_unlock(hdev);
4270 
4271 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4272 	return 0;
4273 }
4274 
4275 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4276 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4277 				    u8 *data)
4278 {
4279 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4280 	u16 scid, dcid, result, status;
4281 	struct l2cap_chan *chan;
4282 	u8 req[128];
4283 	int err;
4284 
4285 	if (cmd_len < sizeof(*rsp))
4286 		return -EPROTO;
4287 
4288 	scid   = __le16_to_cpu(rsp->scid);
4289 	dcid   = __le16_to_cpu(rsp->dcid);
4290 	result = __le16_to_cpu(rsp->result);
4291 	status = __le16_to_cpu(rsp->status);
4292 
4293 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4294 	       dcid, scid, result, status);
4295 
4296 	mutex_lock(&conn->chan_lock);
4297 
4298 	if (scid) {
4299 		chan = __l2cap_get_chan_by_scid(conn, scid);
4300 		if (!chan) {
4301 			err = -EBADSLT;
4302 			goto unlock;
4303 		}
4304 	} else {
4305 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4306 		if (!chan) {
4307 			err = -EBADSLT;
4308 			goto unlock;
4309 		}
4310 	}
4311 
4312 	chan = l2cap_chan_hold_unless_zero(chan);
4313 	if (!chan) {
4314 		err = -EBADSLT;
4315 		goto unlock;
4316 	}
4317 
4318 	err = 0;
4319 
4320 	l2cap_chan_lock(chan);
4321 
4322 	switch (result) {
4323 	case L2CAP_CR_SUCCESS:
4324 		l2cap_state_change(chan, BT_CONFIG);
4325 		chan->ident = 0;
4326 		chan->dcid = dcid;
4327 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4328 
4329 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4330 			break;
4331 
4332 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4333 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4334 		chan->num_conf_req++;
4335 		break;
4336 
4337 	case L2CAP_CR_PEND:
4338 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4339 		break;
4340 
4341 	default:
4342 		l2cap_chan_del(chan, ECONNREFUSED);
4343 		break;
4344 	}
4345 
4346 	l2cap_chan_unlock(chan);
4347 	l2cap_chan_put(chan);
4348 
4349 unlock:
4350 	mutex_unlock(&conn->chan_lock);
4351 
4352 	return err;
4353 }
4354 
4355 static inline void set_default_fcs(struct l2cap_chan *chan)
4356 {
4357 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4358 	 * sides request it.
4359 	 */
4360 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4361 		chan->fcs = L2CAP_FCS_NONE;
4362 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4363 		chan->fcs = L2CAP_FCS_CRC16;
4364 }
4365 
4366 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4367 				    u8 ident, u16 flags)
4368 {
4369 	struct l2cap_conn *conn = chan->conn;
4370 
4371 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4372 	       flags);
4373 
4374 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4375 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4376 
4377 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4378 		       l2cap_build_conf_rsp(chan, data,
4379 					    L2CAP_CONF_SUCCESS, flags), data);
4380 }
4381 
4382 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4383 				   u16 scid, u16 dcid)
4384 {
4385 	struct l2cap_cmd_rej_cid rej;
4386 
4387 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4388 	rej.scid = __cpu_to_le16(scid);
4389 	rej.dcid = __cpu_to_le16(dcid);
4390 
4391 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4392 }
4393 
4394 static inline int l2cap_config_req(struct l2cap_conn *conn,
4395 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4396 				   u8 *data)
4397 {
4398 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4399 	u16 dcid, flags;
4400 	u8 rsp[64];
4401 	struct l2cap_chan *chan;
4402 	int len, err = 0;
4403 
4404 	if (cmd_len < sizeof(*req))
4405 		return -EPROTO;
4406 
4407 	dcid  = __le16_to_cpu(req->dcid);
4408 	flags = __le16_to_cpu(req->flags);
4409 
4410 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4411 
4412 	chan = l2cap_get_chan_by_scid(conn, dcid);
4413 	if (!chan) {
4414 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4415 		return 0;
4416 	}
4417 
4418 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4419 	    chan->state != BT_CONNECTED) {
4420 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4421 				       chan->dcid);
4422 		goto unlock;
4423 	}
4424 
4425 	/* Reject if config buffer is too small. */
4426 	len = cmd_len - sizeof(*req);
4427 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4428 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4429 			       l2cap_build_conf_rsp(chan, rsp,
4430 			       L2CAP_CONF_REJECT, flags), rsp);
4431 		goto unlock;
4432 	}
4433 
4434 	/* Store config. */
4435 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4436 	chan->conf_len += len;
4437 
4438 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4439 		/* Incomplete config. Send empty response. */
4440 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4441 			       l2cap_build_conf_rsp(chan, rsp,
4442 			       L2CAP_CONF_SUCCESS, flags), rsp);
4443 		goto unlock;
4444 	}
4445 
4446 	/* Complete config. */
4447 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4448 	if (len < 0) {
4449 		l2cap_send_disconn_req(chan, ECONNRESET);
4450 		goto unlock;
4451 	}
4452 
4453 	chan->ident = cmd->ident;
4454 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4455 	chan->num_conf_rsp++;
4456 
4457 	/* Reset config buffer. */
4458 	chan->conf_len = 0;
4459 
4460 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4461 		goto unlock;
4462 
4463 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4464 		set_default_fcs(chan);
4465 
4466 		if (chan->mode == L2CAP_MODE_ERTM ||
4467 		    chan->mode == L2CAP_MODE_STREAMING)
4468 			err = l2cap_ertm_init(chan);
4469 
4470 		if (err < 0)
4471 			l2cap_send_disconn_req(chan, -err);
4472 		else
4473 			l2cap_chan_ready(chan);
4474 
4475 		goto unlock;
4476 	}
4477 
4478 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4479 		u8 buf[64];
4480 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4481 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4482 		chan->num_conf_req++;
4483 	}
4484 
4485 	/* Got Conf Rsp PENDING from remote side and assume we sent
4486 	   Conf Rsp PENDING in the code above */
4487 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4488 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4489 
4490 		/* check compatibility */
4491 
4492 		/* Send rsp for BR/EDR channel */
4493 		if (!chan->hs_hcon)
4494 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4495 		else
4496 			chan->ident = cmd->ident;
4497 	}
4498 
4499 unlock:
4500 	l2cap_chan_unlock(chan);
4501 	l2cap_chan_put(chan);
4502 	return err;
4503 }
4504 
4505 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4506 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4507 				   u8 *data)
4508 {
4509 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4510 	u16 scid, flags, result;
4511 	struct l2cap_chan *chan;
4512 	int len = cmd_len - sizeof(*rsp);
4513 	int err = 0;
4514 
4515 	if (cmd_len < sizeof(*rsp))
4516 		return -EPROTO;
4517 
4518 	scid   = __le16_to_cpu(rsp->scid);
4519 	flags  = __le16_to_cpu(rsp->flags);
4520 	result = __le16_to_cpu(rsp->result);
4521 
4522 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4523 	       result, len);
4524 
4525 	chan = l2cap_get_chan_by_scid(conn, scid);
4526 	if (!chan)
4527 		return 0;
4528 
4529 	switch (result) {
4530 	case L2CAP_CONF_SUCCESS:
4531 		l2cap_conf_rfc_get(chan, rsp->data, len);
4532 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4533 		break;
4534 
4535 	case L2CAP_CONF_PENDING:
4536 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4537 
4538 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4539 			char buf[64];
4540 
4541 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4542 						   buf, sizeof(buf), &result);
4543 			if (len < 0) {
4544 				l2cap_send_disconn_req(chan, ECONNRESET);
4545 				goto done;
4546 			}
4547 
4548 			if (!chan->hs_hcon) {
4549 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4550 							0);
4551 			} else {
4552 				if (l2cap_check_efs(chan)) {
4553 					amp_create_logical_link(chan);
4554 					chan->ident = cmd->ident;
4555 				}
4556 			}
4557 		}
4558 		goto done;
4559 
4560 	case L2CAP_CONF_UNKNOWN:
4561 	case L2CAP_CONF_UNACCEPT:
4562 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4563 			char req[64];
4564 
4565 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4566 				l2cap_send_disconn_req(chan, ECONNRESET);
4567 				goto done;
4568 			}
4569 
4570 			/* throw out any old stored conf requests */
4571 			result = L2CAP_CONF_SUCCESS;
4572 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4573 						   req, sizeof(req), &result);
4574 			if (len < 0) {
4575 				l2cap_send_disconn_req(chan, ECONNRESET);
4576 				goto done;
4577 			}
4578 
4579 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4580 				       L2CAP_CONF_REQ, len, req);
4581 			chan->num_conf_req++;
4582 			if (result != L2CAP_CONF_SUCCESS)
4583 				goto done;
4584 			break;
4585 		}
4586 		fallthrough;
4587 
4588 	default:
4589 		l2cap_chan_set_err(chan, ECONNRESET);
4590 
4591 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4592 		l2cap_send_disconn_req(chan, ECONNRESET);
4593 		goto done;
4594 	}
4595 
4596 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4597 		goto done;
4598 
4599 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4600 
4601 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4602 		set_default_fcs(chan);
4603 
4604 		if (chan->mode == L2CAP_MODE_ERTM ||
4605 		    chan->mode == L2CAP_MODE_STREAMING)
4606 			err = l2cap_ertm_init(chan);
4607 
4608 		if (err < 0)
4609 			l2cap_send_disconn_req(chan, -err);
4610 		else
4611 			l2cap_chan_ready(chan);
4612 	}
4613 
4614 done:
4615 	l2cap_chan_unlock(chan);
4616 	l2cap_chan_put(chan);
4617 	return err;
4618 }
4619 
4620 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4621 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4622 				       u8 *data)
4623 {
4624 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4625 	struct l2cap_disconn_rsp rsp;
4626 	u16 dcid, scid;
4627 	struct l2cap_chan *chan;
4628 
4629 	if (cmd_len != sizeof(*req))
4630 		return -EPROTO;
4631 
4632 	scid = __le16_to_cpu(req->scid);
4633 	dcid = __le16_to_cpu(req->dcid);
4634 
4635 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4636 
4637 	mutex_lock(&conn->chan_lock);
4638 
4639 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4640 	if (!chan) {
4641 		mutex_unlock(&conn->chan_lock);
4642 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4643 		return 0;
4644 	}
4645 
4646 	l2cap_chan_hold(chan);
4647 	l2cap_chan_lock(chan);
4648 
4649 	rsp.dcid = cpu_to_le16(chan->scid);
4650 	rsp.scid = cpu_to_le16(chan->dcid);
4651 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4652 
4653 	chan->ops->set_shutdown(chan);
4654 
4655 	l2cap_chan_del(chan, ECONNRESET);
4656 
4657 	chan->ops->close(chan);
4658 
4659 	l2cap_chan_unlock(chan);
4660 	l2cap_chan_put(chan);
4661 
4662 	mutex_unlock(&conn->chan_lock);
4663 
4664 	return 0;
4665 }
4666 
4667 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4668 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4669 				       u8 *data)
4670 {
4671 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4672 	u16 dcid, scid;
4673 	struct l2cap_chan *chan;
4674 
4675 	if (cmd_len != sizeof(*rsp))
4676 		return -EPROTO;
4677 
4678 	scid = __le16_to_cpu(rsp->scid);
4679 	dcid = __le16_to_cpu(rsp->dcid);
4680 
4681 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4682 
4683 	mutex_lock(&conn->chan_lock);
4684 
4685 	chan = __l2cap_get_chan_by_scid(conn, scid);
4686 	if (!chan) {
4687 		mutex_unlock(&conn->chan_lock);
4688 		return 0;
4689 	}
4690 
4691 	l2cap_chan_hold(chan);
4692 	l2cap_chan_lock(chan);
4693 
4694 	if (chan->state != BT_DISCONN) {
4695 		l2cap_chan_unlock(chan);
4696 		l2cap_chan_put(chan);
4697 		mutex_unlock(&conn->chan_lock);
4698 		return 0;
4699 	}
4700 
4701 	l2cap_chan_del(chan, 0);
4702 
4703 	chan->ops->close(chan);
4704 
4705 	l2cap_chan_unlock(chan);
4706 	l2cap_chan_put(chan);
4707 
4708 	mutex_unlock(&conn->chan_lock);
4709 
4710 	return 0;
4711 }
4712 
4713 static inline int l2cap_information_req(struct l2cap_conn *conn,
4714 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4715 					u8 *data)
4716 {
4717 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4718 	u16 type;
4719 
4720 	if (cmd_len != sizeof(*req))
4721 		return -EPROTO;
4722 
4723 	type = __le16_to_cpu(req->type);
4724 
4725 	BT_DBG("type 0x%4.4x", type);
4726 
4727 	if (type == L2CAP_IT_FEAT_MASK) {
4728 		u8 buf[8];
4729 		u32 feat_mask = l2cap_feat_mask;
4730 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4731 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4732 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4733 		if (!disable_ertm)
4734 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4735 				| L2CAP_FEAT_FCS;
4736 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4737 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4738 				| L2CAP_FEAT_EXT_WINDOW;
4739 
4740 		put_unaligned_le32(feat_mask, rsp->data);
4741 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4742 			       buf);
4743 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4744 		u8 buf[12];
4745 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4746 
4747 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4748 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4749 		rsp->data[0] = conn->local_fixed_chan;
4750 		memset(rsp->data + 1, 0, 7);
4751 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4752 			       buf);
4753 	} else {
4754 		struct l2cap_info_rsp rsp;
4755 		rsp.type   = cpu_to_le16(type);
4756 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4757 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4758 			       &rsp);
4759 	}
4760 
4761 	return 0;
4762 }
4763 
4764 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4765 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4766 					u8 *data)
4767 {
4768 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4769 	u16 type, result;
4770 
4771 	if (cmd_len < sizeof(*rsp))
4772 		return -EPROTO;
4773 
4774 	type   = __le16_to_cpu(rsp->type);
4775 	result = __le16_to_cpu(rsp->result);
4776 
4777 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4778 
4779 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4780 	if (cmd->ident != conn->info_ident ||
4781 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4782 		return 0;
4783 
4784 	cancel_delayed_work(&conn->info_timer);
4785 
4786 	if (result != L2CAP_IR_SUCCESS) {
4787 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4788 		conn->info_ident = 0;
4789 
4790 		l2cap_conn_start(conn);
4791 
4792 		return 0;
4793 	}
4794 
4795 	switch (type) {
4796 	case L2CAP_IT_FEAT_MASK:
4797 		conn->feat_mask = get_unaligned_le32(rsp->data);
4798 
4799 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4800 			struct l2cap_info_req req;
4801 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4802 
4803 			conn->info_ident = l2cap_get_ident(conn);
4804 
4805 			l2cap_send_cmd(conn, conn->info_ident,
4806 				       L2CAP_INFO_REQ, sizeof(req), &req);
4807 		} else {
4808 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4809 			conn->info_ident = 0;
4810 
4811 			l2cap_conn_start(conn);
4812 		}
4813 		break;
4814 
4815 	case L2CAP_IT_FIXED_CHAN:
4816 		conn->remote_fixed_chan = rsp->data[0];
4817 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4818 		conn->info_ident = 0;
4819 
4820 		l2cap_conn_start(conn);
4821 		break;
4822 	}
4823 
4824 	return 0;
4825 }
4826 
4827 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4828 				    struct l2cap_cmd_hdr *cmd,
4829 				    u16 cmd_len, void *data)
4830 {
4831 	struct l2cap_create_chan_req *req = data;
4832 	struct l2cap_create_chan_rsp rsp;
4833 	struct l2cap_chan *chan;
4834 	struct hci_dev *hdev;
4835 	u16 psm, scid;
4836 
4837 	if (cmd_len != sizeof(*req))
4838 		return -EPROTO;
4839 
4840 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4841 		return -EINVAL;
4842 
4843 	psm = le16_to_cpu(req->psm);
4844 	scid = le16_to_cpu(req->scid);
4845 
4846 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4847 
4848 	/* For controller id 0 make BR/EDR connection */
4849 	if (req->amp_id == AMP_ID_BREDR) {
4850 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4851 			      req->amp_id);
4852 		return 0;
4853 	}
4854 
4855 	/* Validate AMP controller id */
4856 	hdev = hci_dev_get(req->amp_id);
4857 	if (!hdev)
4858 		goto error;
4859 
4860 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4861 		hci_dev_put(hdev);
4862 		goto error;
4863 	}
4864 
4865 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4866 			     req->amp_id);
4867 	if (chan) {
4868 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4869 		struct hci_conn *hs_hcon;
4870 
4871 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4872 						  &conn->hcon->dst);
4873 		if (!hs_hcon) {
4874 			hci_dev_put(hdev);
4875 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4876 					       chan->dcid);
4877 			return 0;
4878 		}
4879 
4880 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4881 
4882 		mgr->bredr_chan = chan;
4883 		chan->hs_hcon = hs_hcon;
4884 		chan->fcs = L2CAP_FCS_NONE;
4885 		conn->mtu = hdev->block_mtu;
4886 	}
4887 
4888 	hci_dev_put(hdev);
4889 
4890 	return 0;
4891 
4892 error:
4893 	rsp.dcid = 0;
4894 	rsp.scid = cpu_to_le16(scid);
4895 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4896 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4897 
4898 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4899 		       sizeof(rsp), &rsp);
4900 
4901 	return 0;
4902 }
4903 
4904 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4905 {
4906 	struct l2cap_move_chan_req req;
4907 	u8 ident;
4908 
4909 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4910 
4911 	ident = l2cap_get_ident(chan->conn);
4912 	chan->ident = ident;
4913 
4914 	req.icid = cpu_to_le16(chan->scid);
4915 	req.dest_amp_id = dest_amp_id;
4916 
4917 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4918 		       &req);
4919 
4920 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4921 }
4922 
4923 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4924 {
4925 	struct l2cap_move_chan_rsp rsp;
4926 
4927 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4928 
4929 	rsp.icid = cpu_to_le16(chan->dcid);
4930 	rsp.result = cpu_to_le16(result);
4931 
4932 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4933 		       sizeof(rsp), &rsp);
4934 }
4935 
4936 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4937 {
4938 	struct l2cap_move_chan_cfm cfm;
4939 
4940 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4941 
4942 	chan->ident = l2cap_get_ident(chan->conn);
4943 
4944 	cfm.icid = cpu_to_le16(chan->scid);
4945 	cfm.result = cpu_to_le16(result);
4946 
4947 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4948 		       sizeof(cfm), &cfm);
4949 
4950 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4951 }
4952 
4953 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4954 {
4955 	struct l2cap_move_chan_cfm cfm;
4956 
4957 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4958 
4959 	cfm.icid = cpu_to_le16(icid);
4960 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4961 
4962 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4963 		       sizeof(cfm), &cfm);
4964 }
4965 
4966 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4967 					 u16 icid)
4968 {
4969 	struct l2cap_move_chan_cfm_rsp rsp;
4970 
4971 	BT_DBG("icid 0x%4.4x", icid);
4972 
4973 	rsp.icid = cpu_to_le16(icid);
4974 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4975 }
4976 
4977 static void __release_logical_link(struct l2cap_chan *chan)
4978 {
4979 	chan->hs_hchan = NULL;
4980 	chan->hs_hcon = NULL;
4981 
4982 	/* Placeholder - release the logical link */
4983 }
4984 
4985 static void l2cap_logical_fail(struct l2cap_chan *chan)
4986 {
4987 	/* Logical link setup failed */
4988 	if (chan->state != BT_CONNECTED) {
4989 		/* Create channel failure, disconnect */
4990 		l2cap_send_disconn_req(chan, ECONNRESET);
4991 		return;
4992 	}
4993 
4994 	switch (chan->move_role) {
4995 	case L2CAP_MOVE_ROLE_RESPONDER:
4996 		l2cap_move_done(chan);
4997 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4998 		break;
4999 	case L2CAP_MOVE_ROLE_INITIATOR:
5000 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
5001 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
5002 			/* Remote has only sent pending or
5003 			 * success responses, clean up
5004 			 */
5005 			l2cap_move_done(chan);
5006 		}
5007 
5008 		/* Other amp move states imply that the move
5009 		 * has already aborted
5010 		 */
5011 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5012 		break;
5013 	}
5014 }
5015 
5016 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5017 					struct hci_chan *hchan)
5018 {
5019 	struct l2cap_conf_rsp rsp;
5020 
5021 	chan->hs_hchan = hchan;
5022 	chan->hs_hcon->l2cap_data = chan->conn;
5023 
5024 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5025 
5026 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5027 		int err;
5028 
5029 		set_default_fcs(chan);
5030 
5031 		err = l2cap_ertm_init(chan);
5032 		if (err < 0)
5033 			l2cap_send_disconn_req(chan, -err);
5034 		else
5035 			l2cap_chan_ready(chan);
5036 	}
5037 }
5038 
5039 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5040 				      struct hci_chan *hchan)
5041 {
5042 	chan->hs_hcon = hchan->conn;
5043 	chan->hs_hcon->l2cap_data = chan->conn;
5044 
5045 	BT_DBG("move_state %d", chan->move_state);
5046 
5047 	switch (chan->move_state) {
5048 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5049 		/* Move confirm will be sent after a success
5050 		 * response is received
5051 		 */
5052 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5053 		break;
5054 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5055 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5056 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5057 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5058 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5059 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5060 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5061 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5062 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5063 		}
5064 		break;
5065 	default:
5066 		/* Move was not in expected state, free the channel */
5067 		__release_logical_link(chan);
5068 
5069 		chan->move_state = L2CAP_MOVE_STABLE;
5070 	}
5071 }
5072 
5073 /* Call with chan locked */
5074 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5075 		       u8 status)
5076 {
5077 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5078 
5079 	if (status) {
5080 		l2cap_logical_fail(chan);
5081 		__release_logical_link(chan);
5082 		return;
5083 	}
5084 
5085 	if (chan->state != BT_CONNECTED) {
5086 		/* Ignore logical link if channel is on BR/EDR */
5087 		if (chan->local_amp_id != AMP_ID_BREDR)
5088 			l2cap_logical_finish_create(chan, hchan);
5089 	} else {
5090 		l2cap_logical_finish_move(chan, hchan);
5091 	}
5092 }
5093 
5094 void l2cap_move_start(struct l2cap_chan *chan)
5095 {
5096 	BT_DBG("chan %p", chan);
5097 
5098 	if (chan->local_amp_id == AMP_ID_BREDR) {
5099 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5100 			return;
5101 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5102 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5103 		/* Placeholder - start physical link setup */
5104 	} else {
5105 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5106 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5107 		chan->move_id = 0;
5108 		l2cap_move_setup(chan);
5109 		l2cap_send_move_chan_req(chan, 0);
5110 	}
5111 }
5112 
5113 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5114 			    u8 local_amp_id, u8 remote_amp_id)
5115 {
5116 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5117 	       local_amp_id, remote_amp_id);
5118 
5119 	chan->fcs = L2CAP_FCS_NONE;
5120 
5121 	/* Outgoing channel on AMP */
5122 	if (chan->state == BT_CONNECT) {
5123 		if (result == L2CAP_CR_SUCCESS) {
5124 			chan->local_amp_id = local_amp_id;
5125 			l2cap_send_create_chan_req(chan, remote_amp_id);
5126 		} else {
5127 			/* Revert to BR/EDR connect */
5128 			l2cap_send_conn_req(chan);
5129 		}
5130 
5131 		return;
5132 	}
5133 
5134 	/* Incoming channel on AMP */
5135 	if (__l2cap_no_conn_pending(chan)) {
5136 		struct l2cap_conn_rsp rsp;
5137 		char buf[128];
5138 		rsp.scid = cpu_to_le16(chan->dcid);
5139 		rsp.dcid = cpu_to_le16(chan->scid);
5140 
5141 		if (result == L2CAP_CR_SUCCESS) {
5142 			/* Send successful response */
5143 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5144 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5145 		} else {
5146 			/* Send negative response */
5147 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5148 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5149 		}
5150 
5151 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5152 			       sizeof(rsp), &rsp);
5153 
5154 		if (result == L2CAP_CR_SUCCESS) {
5155 			l2cap_state_change(chan, BT_CONFIG);
5156 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5157 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5158 				       L2CAP_CONF_REQ,
5159 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5160 			chan->num_conf_req++;
5161 		}
5162 	}
5163 }
5164 
5165 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5166 				   u8 remote_amp_id)
5167 {
5168 	l2cap_move_setup(chan);
5169 	chan->move_id = local_amp_id;
5170 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5171 
5172 	l2cap_send_move_chan_req(chan, remote_amp_id);
5173 }
5174 
5175 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5176 {
5177 	struct hci_chan *hchan = NULL;
5178 
5179 	/* Placeholder - get hci_chan for logical link */
5180 
5181 	if (hchan) {
5182 		if (hchan->state == BT_CONNECTED) {
5183 			/* Logical link is ready to go */
5184 			chan->hs_hcon = hchan->conn;
5185 			chan->hs_hcon->l2cap_data = chan->conn;
5186 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5187 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5188 
5189 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5190 		} else {
5191 			/* Wait for logical link to be ready */
5192 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5193 		}
5194 	} else {
5195 		/* Logical link not available */
5196 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5197 	}
5198 }
5199 
5200 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5201 {
5202 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5203 		u8 rsp_result;
5204 		if (result == -EINVAL)
5205 			rsp_result = L2CAP_MR_BAD_ID;
5206 		else
5207 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5208 
5209 		l2cap_send_move_chan_rsp(chan, rsp_result);
5210 	}
5211 
5212 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5213 	chan->move_state = L2CAP_MOVE_STABLE;
5214 
5215 	/* Restart data transmission */
5216 	l2cap_ertm_send(chan);
5217 }
5218 
5219 /* Invoke with locked chan */
5220 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5221 {
5222 	u8 local_amp_id = chan->local_amp_id;
5223 	u8 remote_amp_id = chan->remote_amp_id;
5224 
5225 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5226 	       chan, result, local_amp_id, remote_amp_id);
5227 
5228 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5229 		return;
5230 
5231 	if (chan->state != BT_CONNECTED) {
5232 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5233 	} else if (result != L2CAP_MR_SUCCESS) {
5234 		l2cap_do_move_cancel(chan, result);
5235 	} else {
5236 		switch (chan->move_role) {
5237 		case L2CAP_MOVE_ROLE_INITIATOR:
5238 			l2cap_do_move_initiate(chan, local_amp_id,
5239 					       remote_amp_id);
5240 			break;
5241 		case L2CAP_MOVE_ROLE_RESPONDER:
5242 			l2cap_do_move_respond(chan, result);
5243 			break;
5244 		default:
5245 			l2cap_do_move_cancel(chan, result);
5246 			break;
5247 		}
5248 	}
5249 }
5250 
5251 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5252 					 struct l2cap_cmd_hdr *cmd,
5253 					 u16 cmd_len, void *data)
5254 {
5255 	struct l2cap_move_chan_req *req = data;
5256 	struct l2cap_move_chan_rsp rsp;
5257 	struct l2cap_chan *chan;
5258 	u16 icid = 0;
5259 	u16 result = L2CAP_MR_NOT_ALLOWED;
5260 
5261 	if (cmd_len != sizeof(*req))
5262 		return -EPROTO;
5263 
5264 	icid = le16_to_cpu(req->icid);
5265 
5266 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5267 
5268 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5269 		return -EINVAL;
5270 
5271 	chan = l2cap_get_chan_by_dcid(conn, icid);
5272 	if (!chan) {
5273 		rsp.icid = cpu_to_le16(icid);
5274 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5275 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5276 			       sizeof(rsp), &rsp);
5277 		return 0;
5278 	}
5279 
5280 	chan->ident = cmd->ident;
5281 
5282 	if (chan->scid < L2CAP_CID_DYN_START ||
5283 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5284 	    (chan->mode != L2CAP_MODE_ERTM &&
5285 	     chan->mode != L2CAP_MODE_STREAMING)) {
5286 		result = L2CAP_MR_NOT_ALLOWED;
5287 		goto send_move_response;
5288 	}
5289 
5290 	if (chan->local_amp_id == req->dest_amp_id) {
5291 		result = L2CAP_MR_SAME_ID;
5292 		goto send_move_response;
5293 	}
5294 
5295 	if (req->dest_amp_id != AMP_ID_BREDR) {
5296 		struct hci_dev *hdev;
5297 		hdev = hci_dev_get(req->dest_amp_id);
5298 		if (!hdev || hdev->dev_type != HCI_AMP ||
5299 		    !test_bit(HCI_UP, &hdev->flags)) {
5300 			if (hdev)
5301 				hci_dev_put(hdev);
5302 
5303 			result = L2CAP_MR_BAD_ID;
5304 			goto send_move_response;
5305 		}
5306 		hci_dev_put(hdev);
5307 	}
5308 
5309 	/* Detect a move collision.  Only send a collision response
5310 	 * if this side has "lost", otherwise proceed with the move.
5311 	 * The winner has the larger bd_addr.
5312 	 */
5313 	if ((__chan_is_moving(chan) ||
5314 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5315 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5316 		result = L2CAP_MR_COLLISION;
5317 		goto send_move_response;
5318 	}
5319 
5320 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5321 	l2cap_move_setup(chan);
5322 	chan->move_id = req->dest_amp_id;
5323 
5324 	if (req->dest_amp_id == AMP_ID_BREDR) {
5325 		/* Moving to BR/EDR */
5326 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5327 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5328 			result = L2CAP_MR_PEND;
5329 		} else {
5330 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5331 			result = L2CAP_MR_SUCCESS;
5332 		}
5333 	} else {
5334 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5335 		/* Placeholder - uncomment when amp functions are available */
5336 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5337 		result = L2CAP_MR_PEND;
5338 	}
5339 
5340 send_move_response:
5341 	l2cap_send_move_chan_rsp(chan, result);
5342 
5343 	l2cap_chan_unlock(chan);
5344 	l2cap_chan_put(chan);
5345 
5346 	return 0;
5347 }
5348 
5349 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5350 {
5351 	struct l2cap_chan *chan;
5352 	struct hci_chan *hchan = NULL;
5353 
5354 	chan = l2cap_get_chan_by_scid(conn, icid);
5355 	if (!chan) {
5356 		l2cap_send_move_chan_cfm_icid(conn, icid);
5357 		return;
5358 	}
5359 
5360 	__clear_chan_timer(chan);
5361 	if (result == L2CAP_MR_PEND)
5362 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5363 
5364 	switch (chan->move_state) {
5365 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5366 		/* Move confirm will be sent when logical link
5367 		 * is complete.
5368 		 */
5369 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5370 		break;
5371 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5372 		if (result == L2CAP_MR_PEND) {
5373 			break;
5374 		} else if (test_bit(CONN_LOCAL_BUSY,
5375 				    &chan->conn_state)) {
5376 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5377 		} else {
5378 			/* Logical link is up or moving to BR/EDR,
5379 			 * proceed with move
5380 			 */
5381 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5382 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5383 		}
5384 		break;
5385 	case L2CAP_MOVE_WAIT_RSP:
5386 		/* Moving to AMP */
5387 		if (result == L2CAP_MR_SUCCESS) {
5388 			/* Remote is ready, send confirm immediately
5389 			 * after logical link is ready
5390 			 */
5391 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5392 		} else {
5393 			/* Both logical link and move success
5394 			 * are required to confirm
5395 			 */
5396 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5397 		}
5398 
5399 		/* Placeholder - get hci_chan for logical link */
5400 		if (!hchan) {
5401 			/* Logical link not available */
5402 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5403 			break;
5404 		}
5405 
5406 		/* If the logical link is not yet connected, do not
5407 		 * send confirmation.
5408 		 */
5409 		if (hchan->state != BT_CONNECTED)
5410 			break;
5411 
5412 		/* Logical link is already ready to go */
5413 
5414 		chan->hs_hcon = hchan->conn;
5415 		chan->hs_hcon->l2cap_data = chan->conn;
5416 
5417 		if (result == L2CAP_MR_SUCCESS) {
5418 			/* Can confirm now */
5419 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5420 		} else {
5421 			/* Now only need move success
5422 			 * to confirm
5423 			 */
5424 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5425 		}
5426 
5427 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5428 		break;
5429 	default:
5430 		/* Any other amp move state means the move failed. */
5431 		chan->move_id = chan->local_amp_id;
5432 		l2cap_move_done(chan);
5433 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5434 	}
5435 
5436 	l2cap_chan_unlock(chan);
5437 	l2cap_chan_put(chan);
5438 }
5439 
5440 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5441 			    u16 result)
5442 {
5443 	struct l2cap_chan *chan;
5444 
5445 	chan = l2cap_get_chan_by_ident(conn, ident);
5446 	if (!chan) {
5447 		/* Could not locate channel, icid is best guess */
5448 		l2cap_send_move_chan_cfm_icid(conn, icid);
5449 		return;
5450 	}
5451 
5452 	__clear_chan_timer(chan);
5453 
5454 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5455 		if (result == L2CAP_MR_COLLISION) {
5456 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5457 		} else {
5458 			/* Cleanup - cancel move */
5459 			chan->move_id = chan->local_amp_id;
5460 			l2cap_move_done(chan);
5461 		}
5462 	}
5463 
5464 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5465 
5466 	l2cap_chan_unlock(chan);
5467 	l2cap_chan_put(chan);
5468 }
5469 
5470 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5471 				  struct l2cap_cmd_hdr *cmd,
5472 				  u16 cmd_len, void *data)
5473 {
5474 	struct l2cap_move_chan_rsp *rsp = data;
5475 	u16 icid, result;
5476 
5477 	if (cmd_len != sizeof(*rsp))
5478 		return -EPROTO;
5479 
5480 	icid = le16_to_cpu(rsp->icid);
5481 	result = le16_to_cpu(rsp->result);
5482 
5483 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5484 
5485 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5486 		l2cap_move_continue(conn, icid, result);
5487 	else
5488 		l2cap_move_fail(conn, cmd->ident, icid, result);
5489 
5490 	return 0;
5491 }
5492 
5493 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5494 				      struct l2cap_cmd_hdr *cmd,
5495 				      u16 cmd_len, void *data)
5496 {
5497 	struct l2cap_move_chan_cfm *cfm = data;
5498 	struct l2cap_chan *chan;
5499 	u16 icid, result;
5500 
5501 	if (cmd_len != sizeof(*cfm))
5502 		return -EPROTO;
5503 
5504 	icid = le16_to_cpu(cfm->icid);
5505 	result = le16_to_cpu(cfm->result);
5506 
5507 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5508 
5509 	chan = l2cap_get_chan_by_dcid(conn, icid);
5510 	if (!chan) {
5511 		/* Spec requires a response even if the icid was not found */
5512 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5513 		return 0;
5514 	}
5515 
5516 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5517 		if (result == L2CAP_MC_CONFIRMED) {
5518 			chan->local_amp_id = chan->move_id;
5519 			if (chan->local_amp_id == AMP_ID_BREDR)
5520 				__release_logical_link(chan);
5521 		} else {
5522 			chan->move_id = chan->local_amp_id;
5523 		}
5524 
5525 		l2cap_move_done(chan);
5526 	}
5527 
5528 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5529 
5530 	l2cap_chan_unlock(chan);
5531 	l2cap_chan_put(chan);
5532 
5533 	return 0;
5534 }
5535 
5536 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5537 						 struct l2cap_cmd_hdr *cmd,
5538 						 u16 cmd_len, void *data)
5539 {
5540 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5541 	struct l2cap_chan *chan;
5542 	u16 icid;
5543 
5544 	if (cmd_len != sizeof(*rsp))
5545 		return -EPROTO;
5546 
5547 	icid = le16_to_cpu(rsp->icid);
5548 
5549 	BT_DBG("icid 0x%4.4x", icid);
5550 
5551 	chan = l2cap_get_chan_by_scid(conn, icid);
5552 	if (!chan)
5553 		return 0;
5554 
5555 	__clear_chan_timer(chan);
5556 
5557 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5558 		chan->local_amp_id = chan->move_id;
5559 
5560 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5561 			__release_logical_link(chan);
5562 
5563 		l2cap_move_done(chan);
5564 	}
5565 
5566 	l2cap_chan_unlock(chan);
5567 	l2cap_chan_put(chan);
5568 
5569 	return 0;
5570 }
5571 
5572 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5573 					      struct l2cap_cmd_hdr *cmd,
5574 					      u16 cmd_len, u8 *data)
5575 {
5576 	struct hci_conn *hcon = conn->hcon;
5577 	struct l2cap_conn_param_update_req *req;
5578 	struct l2cap_conn_param_update_rsp rsp;
5579 	u16 min, max, latency, to_multiplier;
5580 	int err;
5581 
5582 	if (hcon->role != HCI_ROLE_MASTER)
5583 		return -EINVAL;
5584 
5585 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5586 		return -EPROTO;
5587 
5588 	req = (struct l2cap_conn_param_update_req *) data;
5589 	min		= __le16_to_cpu(req->min);
5590 	max		= __le16_to_cpu(req->max);
5591 	latency		= __le16_to_cpu(req->latency);
5592 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5593 
5594 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5595 	       min, max, latency, to_multiplier);
5596 
5597 	memset(&rsp, 0, sizeof(rsp));
5598 
5599 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5600 	if (err)
5601 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5602 	else
5603 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5604 
5605 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5606 		       sizeof(rsp), &rsp);
5607 
5608 	if (!err) {
5609 		u8 store_hint;
5610 
5611 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5612 						to_multiplier);
5613 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5614 				    store_hint, min, max, latency,
5615 				    to_multiplier);
5616 
5617 	}
5618 
5619 	return 0;
5620 }
5621 
5622 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5623 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5624 				u8 *data)
5625 {
5626 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5627 	struct hci_conn *hcon = conn->hcon;
5628 	u16 dcid, mtu, mps, credits, result;
5629 	struct l2cap_chan *chan;
5630 	int err, sec_level;
5631 
5632 	if (cmd_len < sizeof(*rsp))
5633 		return -EPROTO;
5634 
5635 	dcid    = __le16_to_cpu(rsp->dcid);
5636 	mtu     = __le16_to_cpu(rsp->mtu);
5637 	mps     = __le16_to_cpu(rsp->mps);
5638 	credits = __le16_to_cpu(rsp->credits);
5639 	result  = __le16_to_cpu(rsp->result);
5640 
5641 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5642 					   dcid < L2CAP_CID_DYN_START ||
5643 					   dcid > L2CAP_CID_LE_DYN_END))
5644 		return -EPROTO;
5645 
5646 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5647 	       dcid, mtu, mps, credits, result);
5648 
5649 	mutex_lock(&conn->chan_lock);
5650 
5651 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5652 	if (!chan) {
5653 		err = -EBADSLT;
5654 		goto unlock;
5655 	}
5656 
5657 	err = 0;
5658 
5659 	l2cap_chan_lock(chan);
5660 
5661 	switch (result) {
5662 	case L2CAP_CR_LE_SUCCESS:
5663 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5664 			err = -EBADSLT;
5665 			break;
5666 		}
5667 
5668 		chan->ident = 0;
5669 		chan->dcid = dcid;
5670 		chan->omtu = mtu;
5671 		chan->remote_mps = mps;
5672 		chan->tx_credits = credits;
5673 		l2cap_chan_ready(chan);
5674 		break;
5675 
5676 	case L2CAP_CR_LE_AUTHENTICATION:
5677 	case L2CAP_CR_LE_ENCRYPTION:
5678 		/* If we already have MITM protection we can't do
5679 		 * anything.
5680 		 */
5681 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5682 			l2cap_chan_del(chan, ECONNREFUSED);
5683 			break;
5684 		}
5685 
5686 		sec_level = hcon->sec_level + 1;
5687 		if (chan->sec_level < sec_level)
5688 			chan->sec_level = sec_level;
5689 
5690 		/* We'll need to send a new Connect Request */
5691 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5692 
5693 		smp_conn_security(hcon, chan->sec_level);
5694 		break;
5695 
5696 	default:
5697 		l2cap_chan_del(chan, ECONNREFUSED);
5698 		break;
5699 	}
5700 
5701 	l2cap_chan_unlock(chan);
5702 
5703 unlock:
5704 	mutex_unlock(&conn->chan_lock);
5705 
5706 	return err;
5707 }
5708 
5709 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5710 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5711 				      u8 *data)
5712 {
5713 	int err = 0;
5714 
5715 	switch (cmd->code) {
5716 	case L2CAP_COMMAND_REJ:
5717 		l2cap_command_rej(conn, cmd, cmd_len, data);
5718 		break;
5719 
5720 	case L2CAP_CONN_REQ:
5721 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5722 		break;
5723 
5724 	case L2CAP_CONN_RSP:
5725 	case L2CAP_CREATE_CHAN_RSP:
5726 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5727 		break;
5728 
5729 	case L2CAP_CONF_REQ:
5730 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5731 		break;
5732 
5733 	case L2CAP_CONF_RSP:
5734 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5735 		break;
5736 
5737 	case L2CAP_DISCONN_REQ:
5738 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5739 		break;
5740 
5741 	case L2CAP_DISCONN_RSP:
5742 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5743 		break;
5744 
5745 	case L2CAP_ECHO_REQ:
5746 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5747 		break;
5748 
5749 	case L2CAP_ECHO_RSP:
5750 		break;
5751 
5752 	case L2CAP_INFO_REQ:
5753 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5754 		break;
5755 
5756 	case L2CAP_INFO_RSP:
5757 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5758 		break;
5759 
5760 	case L2CAP_CREATE_CHAN_REQ:
5761 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5762 		break;
5763 
5764 	case L2CAP_MOVE_CHAN_REQ:
5765 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5766 		break;
5767 
5768 	case L2CAP_MOVE_CHAN_RSP:
5769 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5770 		break;
5771 
5772 	case L2CAP_MOVE_CHAN_CFM:
5773 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5774 		break;
5775 
5776 	case L2CAP_MOVE_CHAN_CFM_RSP:
5777 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5778 		break;
5779 
5780 	default:
5781 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5782 		err = -EINVAL;
5783 		break;
5784 	}
5785 
5786 	return err;
5787 }
5788 
5789 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5790 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5791 				u8 *data)
5792 {
5793 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5794 	struct l2cap_le_conn_rsp rsp;
5795 	struct l2cap_chan *chan, *pchan;
5796 	u16 dcid, scid, credits, mtu, mps;
5797 	__le16 psm;
5798 	u8 result;
5799 
5800 	if (cmd_len != sizeof(*req))
5801 		return -EPROTO;
5802 
5803 	scid = __le16_to_cpu(req->scid);
5804 	mtu  = __le16_to_cpu(req->mtu);
5805 	mps  = __le16_to_cpu(req->mps);
5806 	psm  = req->psm;
5807 	dcid = 0;
5808 	credits = 0;
5809 
5810 	if (mtu < 23 || mps < 23)
5811 		return -EPROTO;
5812 
5813 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5814 	       scid, mtu, mps);
5815 
5816 	/* Check if we have socket listening on psm */
5817 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5818 					 &conn->hcon->dst, LE_LINK);
5819 	if (!pchan) {
5820 		result = L2CAP_CR_LE_BAD_PSM;
5821 		chan = NULL;
5822 		goto response;
5823 	}
5824 
5825 	mutex_lock(&conn->chan_lock);
5826 	l2cap_chan_lock(pchan);
5827 
5828 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5829 				     SMP_ALLOW_STK)) {
5830 		result = L2CAP_CR_LE_AUTHENTICATION;
5831 		chan = NULL;
5832 		goto response_unlock;
5833 	}
5834 
5835 	/* Check for valid dynamic CID range */
5836 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5837 		result = L2CAP_CR_LE_INVALID_SCID;
5838 		chan = NULL;
5839 		goto response_unlock;
5840 	}
5841 
5842 	/* Check if we already have channel with that dcid */
5843 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5844 		result = L2CAP_CR_LE_SCID_IN_USE;
5845 		chan = NULL;
5846 		goto response_unlock;
5847 	}
5848 
5849 	chan = pchan->ops->new_connection(pchan);
5850 	if (!chan) {
5851 		result = L2CAP_CR_LE_NO_MEM;
5852 		goto response_unlock;
5853 	}
5854 
5855 	bacpy(&chan->src, &conn->hcon->src);
5856 	bacpy(&chan->dst, &conn->hcon->dst);
5857 	chan->src_type = bdaddr_src_type(conn->hcon);
5858 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5859 	chan->psm  = psm;
5860 	chan->dcid = scid;
5861 	chan->omtu = mtu;
5862 	chan->remote_mps = mps;
5863 
5864 	__l2cap_chan_add(conn, chan);
5865 
5866 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5867 
5868 	dcid = chan->scid;
5869 	credits = chan->rx_credits;
5870 
5871 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5872 
5873 	chan->ident = cmd->ident;
5874 
5875 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5876 		l2cap_state_change(chan, BT_CONNECT2);
5877 		/* The following result value is actually not defined
5878 		 * for LE CoC but we use it to let the function know
5879 		 * that it should bail out after doing its cleanup
5880 		 * instead of sending a response.
5881 		 */
5882 		result = L2CAP_CR_PEND;
5883 		chan->ops->defer(chan);
5884 	} else {
5885 		l2cap_chan_ready(chan);
5886 		result = L2CAP_CR_LE_SUCCESS;
5887 	}
5888 
5889 response_unlock:
5890 	l2cap_chan_unlock(pchan);
5891 	mutex_unlock(&conn->chan_lock);
5892 	l2cap_chan_put(pchan);
5893 
5894 	if (result == L2CAP_CR_PEND)
5895 		return 0;
5896 
5897 response:
5898 	if (chan) {
5899 		rsp.mtu = cpu_to_le16(chan->imtu);
5900 		rsp.mps = cpu_to_le16(chan->mps);
5901 	} else {
5902 		rsp.mtu = 0;
5903 		rsp.mps = 0;
5904 	}
5905 
5906 	rsp.dcid    = cpu_to_le16(dcid);
5907 	rsp.credits = cpu_to_le16(credits);
5908 	rsp.result  = cpu_to_le16(result);
5909 
5910 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5911 
5912 	return 0;
5913 }
5914 
5915 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5916 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5917 				   u8 *data)
5918 {
5919 	struct l2cap_le_credits *pkt;
5920 	struct l2cap_chan *chan;
5921 	u16 cid, credits, max_credits;
5922 
5923 	if (cmd_len != sizeof(*pkt))
5924 		return -EPROTO;
5925 
5926 	pkt = (struct l2cap_le_credits *) data;
5927 	cid	= __le16_to_cpu(pkt->cid);
5928 	credits	= __le16_to_cpu(pkt->credits);
5929 
5930 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5931 
5932 	chan = l2cap_get_chan_by_dcid(conn, cid);
5933 	if (!chan)
5934 		return -EBADSLT;
5935 
5936 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5937 	if (credits > max_credits) {
5938 		BT_ERR("LE credits overflow");
5939 		l2cap_send_disconn_req(chan, ECONNRESET);
5940 
5941 		/* Return 0 so that we don't trigger an unnecessary
5942 		 * command reject packet.
5943 		 */
5944 		goto unlock;
5945 	}
5946 
5947 	chan->tx_credits += credits;
5948 
5949 	/* Resume sending */
5950 	l2cap_le_flowctl_send(chan);
5951 
5952 	if (chan->tx_credits)
5953 		chan->ops->resume(chan);
5954 
5955 unlock:
5956 	l2cap_chan_unlock(chan);
5957 	l2cap_chan_put(chan);
5958 
5959 	return 0;
5960 }
5961 
5962 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5963 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5964 				       u8 *data)
5965 {
5966 	struct l2cap_ecred_conn_req *req = (void *) data;
5967 	struct {
5968 		struct l2cap_ecred_conn_rsp rsp;
5969 		__le16 dcid[L2CAP_ECRED_MAX_CID];
5970 	} __packed pdu;
5971 	struct l2cap_chan *chan, *pchan;
5972 	u16 mtu, mps;
5973 	__le16 psm;
5974 	u8 result, len = 0;
5975 	int i, num_scid;
5976 	bool defer = false;
5977 
5978 	if (!enable_ecred)
5979 		return -EINVAL;
5980 
5981 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5982 		result = L2CAP_CR_LE_INVALID_PARAMS;
5983 		goto response;
5984 	}
5985 
5986 	cmd_len -= sizeof(*req);
5987 	num_scid = cmd_len / sizeof(u16);
5988 
5989 	if (num_scid > ARRAY_SIZE(pdu.dcid)) {
5990 		result = L2CAP_CR_LE_INVALID_PARAMS;
5991 		goto response;
5992 	}
5993 
5994 	mtu  = __le16_to_cpu(req->mtu);
5995 	mps  = __le16_to_cpu(req->mps);
5996 
5997 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5998 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5999 		goto response;
6000 	}
6001 
6002 	psm  = req->psm;
6003 
6004 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
6005 
6006 	memset(&pdu, 0, sizeof(pdu));
6007 
6008 	/* Check if we have socket listening on psm */
6009 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6010 					 &conn->hcon->dst, LE_LINK);
6011 	if (!pchan) {
6012 		result = L2CAP_CR_LE_BAD_PSM;
6013 		goto response;
6014 	}
6015 
6016 	mutex_lock(&conn->chan_lock);
6017 	l2cap_chan_lock(pchan);
6018 
6019 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6020 				     SMP_ALLOW_STK)) {
6021 		result = L2CAP_CR_LE_AUTHENTICATION;
6022 		goto unlock;
6023 	}
6024 
6025 	result = L2CAP_CR_LE_SUCCESS;
6026 
6027 	for (i = 0; i < num_scid; i++) {
6028 		u16 scid = __le16_to_cpu(req->scid[i]);
6029 
6030 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
6031 
6032 		pdu.dcid[i] = 0x0000;
6033 		len += sizeof(*pdu.dcid);
6034 
6035 		/* Check for valid dynamic CID range */
6036 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6037 			result = L2CAP_CR_LE_INVALID_SCID;
6038 			continue;
6039 		}
6040 
6041 		/* Check if we already have channel with that dcid */
6042 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
6043 			result = L2CAP_CR_LE_SCID_IN_USE;
6044 			continue;
6045 		}
6046 
6047 		chan = pchan->ops->new_connection(pchan);
6048 		if (!chan) {
6049 			result = L2CAP_CR_LE_NO_MEM;
6050 			continue;
6051 		}
6052 
6053 		bacpy(&chan->src, &conn->hcon->src);
6054 		bacpy(&chan->dst, &conn->hcon->dst);
6055 		chan->src_type = bdaddr_src_type(conn->hcon);
6056 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6057 		chan->psm  = psm;
6058 		chan->dcid = scid;
6059 		chan->omtu = mtu;
6060 		chan->remote_mps = mps;
6061 
6062 		__l2cap_chan_add(conn, chan);
6063 
6064 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6065 
6066 		/* Init response */
6067 		if (!pdu.rsp.credits) {
6068 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6069 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6070 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6071 		}
6072 
6073 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6074 
6075 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6076 
6077 		chan->ident = cmd->ident;
6078 
6079 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6080 			l2cap_state_change(chan, BT_CONNECT2);
6081 			defer = true;
6082 			chan->ops->defer(chan);
6083 		} else {
6084 			l2cap_chan_ready(chan);
6085 		}
6086 	}
6087 
6088 unlock:
6089 	l2cap_chan_unlock(pchan);
6090 	mutex_unlock(&conn->chan_lock);
6091 	l2cap_chan_put(pchan);
6092 
6093 response:
6094 	pdu.rsp.result = cpu_to_le16(result);
6095 
6096 	if (defer)
6097 		return 0;
6098 
6099 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6100 		       sizeof(pdu.rsp) + len, &pdu);
6101 
6102 	return 0;
6103 }
6104 
6105 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6106 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6107 				       u8 *data)
6108 {
6109 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6110 	struct hci_conn *hcon = conn->hcon;
6111 	u16 mtu, mps, credits, result;
6112 	struct l2cap_chan *chan, *tmp;
6113 	int err = 0, sec_level;
6114 	int i = 0;
6115 
6116 	if (cmd_len < sizeof(*rsp))
6117 		return -EPROTO;
6118 
6119 	mtu     = __le16_to_cpu(rsp->mtu);
6120 	mps     = __le16_to_cpu(rsp->mps);
6121 	credits = __le16_to_cpu(rsp->credits);
6122 	result  = __le16_to_cpu(rsp->result);
6123 
6124 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6125 	       result);
6126 
6127 	mutex_lock(&conn->chan_lock);
6128 
6129 	cmd_len -= sizeof(*rsp);
6130 
6131 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6132 		u16 dcid;
6133 
6134 		if (chan->ident != cmd->ident ||
6135 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6136 		    chan->state == BT_CONNECTED)
6137 			continue;
6138 
6139 		l2cap_chan_lock(chan);
6140 
6141 		/* Check that there is a dcid for each pending channel */
6142 		if (cmd_len < sizeof(dcid)) {
6143 			l2cap_chan_del(chan, ECONNREFUSED);
6144 			l2cap_chan_unlock(chan);
6145 			continue;
6146 		}
6147 
6148 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6149 		cmd_len -= sizeof(u16);
6150 
6151 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6152 
6153 		/* Check if dcid is already in use */
6154 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6155 			/* If a device receives a
6156 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6157 			 * already-assigned Destination CID, then both the
6158 			 * original channel and the new channel shall be
6159 			 * immediately discarded and not used.
6160 			 */
6161 			l2cap_chan_del(chan, ECONNREFUSED);
6162 			l2cap_chan_unlock(chan);
6163 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6164 			l2cap_chan_lock(chan);
6165 			l2cap_chan_del(chan, ECONNRESET);
6166 			l2cap_chan_unlock(chan);
6167 			continue;
6168 		}
6169 
6170 		switch (result) {
6171 		case L2CAP_CR_LE_AUTHENTICATION:
6172 		case L2CAP_CR_LE_ENCRYPTION:
6173 			/* If we already have MITM protection we can't do
6174 			 * anything.
6175 			 */
6176 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6177 				l2cap_chan_del(chan, ECONNREFUSED);
6178 				break;
6179 			}
6180 
6181 			sec_level = hcon->sec_level + 1;
6182 			if (chan->sec_level < sec_level)
6183 				chan->sec_level = sec_level;
6184 
6185 			/* We'll need to send a new Connect Request */
6186 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6187 
6188 			smp_conn_security(hcon, chan->sec_level);
6189 			break;
6190 
6191 		case L2CAP_CR_LE_BAD_PSM:
6192 			l2cap_chan_del(chan, ECONNREFUSED);
6193 			break;
6194 
6195 		default:
6196 			/* If dcid was not set it means channels was refused */
6197 			if (!dcid) {
6198 				l2cap_chan_del(chan, ECONNREFUSED);
6199 				break;
6200 			}
6201 
6202 			chan->ident = 0;
6203 			chan->dcid = dcid;
6204 			chan->omtu = mtu;
6205 			chan->remote_mps = mps;
6206 			chan->tx_credits = credits;
6207 			l2cap_chan_ready(chan);
6208 			break;
6209 		}
6210 
6211 		l2cap_chan_unlock(chan);
6212 	}
6213 
6214 	mutex_unlock(&conn->chan_lock);
6215 
6216 	return err;
6217 }
6218 
6219 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6220 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6221 					 u8 *data)
6222 {
6223 	struct l2cap_ecred_reconf_req *req = (void *) data;
6224 	struct l2cap_ecred_reconf_rsp rsp;
6225 	u16 mtu, mps, result;
6226 	struct l2cap_chan *chan;
6227 	int i, num_scid;
6228 
6229 	if (!enable_ecred)
6230 		return -EINVAL;
6231 
6232 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6233 		result = L2CAP_CR_LE_INVALID_PARAMS;
6234 		goto respond;
6235 	}
6236 
6237 	mtu = __le16_to_cpu(req->mtu);
6238 	mps = __le16_to_cpu(req->mps);
6239 
6240 	BT_DBG("mtu %u mps %u", mtu, mps);
6241 
6242 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6243 		result = L2CAP_RECONF_INVALID_MTU;
6244 		goto respond;
6245 	}
6246 
6247 	if (mps < L2CAP_ECRED_MIN_MPS) {
6248 		result = L2CAP_RECONF_INVALID_MPS;
6249 		goto respond;
6250 	}
6251 
6252 	cmd_len -= sizeof(*req);
6253 	num_scid = cmd_len / sizeof(u16);
6254 	result = L2CAP_RECONF_SUCCESS;
6255 
6256 	for (i = 0; i < num_scid; i++) {
6257 		u16 scid;
6258 
6259 		scid = __le16_to_cpu(req->scid[i]);
6260 		if (!scid)
6261 			return -EPROTO;
6262 
6263 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6264 		if (!chan)
6265 			continue;
6266 
6267 		/* If the MTU value is decreased for any of the included
6268 		 * channels, then the receiver shall disconnect all
6269 		 * included channels.
6270 		 */
6271 		if (chan->omtu > mtu) {
6272 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6273 			       chan->omtu, mtu);
6274 			result = L2CAP_RECONF_INVALID_MTU;
6275 		}
6276 
6277 		chan->omtu = mtu;
6278 		chan->remote_mps = mps;
6279 	}
6280 
6281 respond:
6282 	rsp.result = cpu_to_le16(result);
6283 
6284 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6285 		       &rsp);
6286 
6287 	return 0;
6288 }
6289 
6290 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6291 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6292 					 u8 *data)
6293 {
6294 	struct l2cap_chan *chan, *tmp;
6295 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6296 	u16 result;
6297 
6298 	if (cmd_len < sizeof(*rsp))
6299 		return -EPROTO;
6300 
6301 	result = __le16_to_cpu(rsp->result);
6302 
6303 	BT_DBG("result 0x%4.4x", rsp->result);
6304 
6305 	if (!result)
6306 		return 0;
6307 
6308 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6309 		if (chan->ident != cmd->ident)
6310 			continue;
6311 
6312 		l2cap_chan_del(chan, ECONNRESET);
6313 	}
6314 
6315 	return 0;
6316 }
6317 
6318 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6319 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6320 				       u8 *data)
6321 {
6322 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6323 	struct l2cap_chan *chan;
6324 
6325 	if (cmd_len < sizeof(*rej))
6326 		return -EPROTO;
6327 
6328 	mutex_lock(&conn->chan_lock);
6329 
6330 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6331 	if (!chan)
6332 		goto done;
6333 
6334 	l2cap_chan_lock(chan);
6335 	l2cap_chan_del(chan, ECONNREFUSED);
6336 	l2cap_chan_unlock(chan);
6337 
6338 done:
6339 	mutex_unlock(&conn->chan_lock);
6340 	return 0;
6341 }
6342 
6343 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6344 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6345 				   u8 *data)
6346 {
6347 	int err = 0;
6348 
6349 	switch (cmd->code) {
6350 	case L2CAP_COMMAND_REJ:
6351 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6352 		break;
6353 
6354 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6355 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6356 		break;
6357 
6358 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6359 		break;
6360 
6361 	case L2CAP_LE_CONN_RSP:
6362 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6363 		break;
6364 
6365 	case L2CAP_LE_CONN_REQ:
6366 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6367 		break;
6368 
6369 	case L2CAP_LE_CREDITS:
6370 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6371 		break;
6372 
6373 	case L2CAP_ECRED_CONN_REQ:
6374 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6375 		break;
6376 
6377 	case L2CAP_ECRED_CONN_RSP:
6378 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6379 		break;
6380 
6381 	case L2CAP_ECRED_RECONF_REQ:
6382 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6383 		break;
6384 
6385 	case L2CAP_ECRED_RECONF_RSP:
6386 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6387 		break;
6388 
6389 	case L2CAP_DISCONN_REQ:
6390 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6391 		break;
6392 
6393 	case L2CAP_DISCONN_RSP:
6394 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6395 		break;
6396 
6397 	default:
6398 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6399 		err = -EINVAL;
6400 		break;
6401 	}
6402 
6403 	return err;
6404 }
6405 
6406 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6407 					struct sk_buff *skb)
6408 {
6409 	struct hci_conn *hcon = conn->hcon;
6410 	struct l2cap_cmd_hdr *cmd;
6411 	u16 len;
6412 	int err;
6413 
6414 	if (hcon->type != LE_LINK)
6415 		goto drop;
6416 
6417 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6418 		goto drop;
6419 
6420 	cmd = (void *) skb->data;
6421 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6422 
6423 	len = le16_to_cpu(cmd->len);
6424 
6425 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6426 
6427 	if (len != skb->len || !cmd->ident) {
6428 		BT_DBG("corrupted command");
6429 		goto drop;
6430 	}
6431 
6432 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6433 	if (err) {
6434 		struct l2cap_cmd_rej_unk rej;
6435 
6436 		BT_ERR("Wrong link type (%d)", err);
6437 
6438 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6439 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6440 			       sizeof(rej), &rej);
6441 	}
6442 
6443 drop:
6444 	kfree_skb(skb);
6445 }
6446 
6447 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6448 				     struct sk_buff *skb)
6449 {
6450 	struct hci_conn *hcon = conn->hcon;
6451 	struct l2cap_cmd_hdr *cmd;
6452 	int err;
6453 
6454 	l2cap_raw_recv(conn, skb);
6455 
6456 	if (hcon->type != ACL_LINK)
6457 		goto drop;
6458 
6459 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6460 		u16 len;
6461 
6462 		cmd = (void *) skb->data;
6463 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6464 
6465 		len = le16_to_cpu(cmd->len);
6466 
6467 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6468 		       cmd->ident);
6469 
6470 		if (len > skb->len || !cmd->ident) {
6471 			BT_DBG("corrupted command");
6472 			break;
6473 		}
6474 
6475 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6476 		if (err) {
6477 			struct l2cap_cmd_rej_unk rej;
6478 
6479 			BT_ERR("Wrong link type (%d)", err);
6480 
6481 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6482 			l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6483 				       sizeof(rej), &rej);
6484 		}
6485 
6486 		skb_pull(skb, len);
6487 	}
6488 
6489 drop:
6490 	kfree_skb(skb);
6491 }
6492 
6493 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6494 {
6495 	u16 our_fcs, rcv_fcs;
6496 	int hdr_size;
6497 
6498 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6499 		hdr_size = L2CAP_EXT_HDR_SIZE;
6500 	else
6501 		hdr_size = L2CAP_ENH_HDR_SIZE;
6502 
6503 	if (chan->fcs == L2CAP_FCS_CRC16) {
6504 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6505 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6506 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6507 
6508 		if (our_fcs != rcv_fcs)
6509 			return -EBADMSG;
6510 	}
6511 	return 0;
6512 }
6513 
6514 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6515 {
6516 	struct l2cap_ctrl control;
6517 
6518 	BT_DBG("chan %p", chan);
6519 
6520 	memset(&control, 0, sizeof(control));
6521 	control.sframe = 1;
6522 	control.final = 1;
6523 	control.reqseq = chan->buffer_seq;
6524 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6525 
6526 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6527 		control.super = L2CAP_SUPER_RNR;
6528 		l2cap_send_sframe(chan, &control);
6529 	}
6530 
6531 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6532 	    chan->unacked_frames > 0)
6533 		__set_retrans_timer(chan);
6534 
6535 	/* Send pending iframes */
6536 	l2cap_ertm_send(chan);
6537 
6538 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6539 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6540 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6541 		 * send it now.
6542 		 */
6543 		control.super = L2CAP_SUPER_RR;
6544 		l2cap_send_sframe(chan, &control);
6545 	}
6546 }
6547 
6548 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6549 			    struct sk_buff **last_frag)
6550 {
6551 	/* skb->len reflects data in skb as well as all fragments
6552 	 * skb->data_len reflects only data in fragments
6553 	 */
6554 	if (!skb_has_frag_list(skb))
6555 		skb_shinfo(skb)->frag_list = new_frag;
6556 
6557 	new_frag->next = NULL;
6558 
6559 	(*last_frag)->next = new_frag;
6560 	*last_frag = new_frag;
6561 
6562 	skb->len += new_frag->len;
6563 	skb->data_len += new_frag->len;
6564 	skb->truesize += new_frag->truesize;
6565 }
6566 
6567 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6568 				struct l2cap_ctrl *control)
6569 {
6570 	int err = -EINVAL;
6571 
6572 	switch (control->sar) {
6573 	case L2CAP_SAR_UNSEGMENTED:
6574 		if (chan->sdu)
6575 			break;
6576 
6577 		err = chan->ops->recv(chan, skb);
6578 		break;
6579 
6580 	case L2CAP_SAR_START:
6581 		if (chan->sdu)
6582 			break;
6583 
6584 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6585 			break;
6586 
6587 		chan->sdu_len = get_unaligned_le16(skb->data);
6588 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6589 
6590 		if (chan->sdu_len > chan->imtu) {
6591 			err = -EMSGSIZE;
6592 			break;
6593 		}
6594 
6595 		if (skb->len >= chan->sdu_len)
6596 			break;
6597 
6598 		chan->sdu = skb;
6599 		chan->sdu_last_frag = skb;
6600 
6601 		skb = NULL;
6602 		err = 0;
6603 		break;
6604 
6605 	case L2CAP_SAR_CONTINUE:
6606 		if (!chan->sdu)
6607 			break;
6608 
6609 		append_skb_frag(chan->sdu, skb,
6610 				&chan->sdu_last_frag);
6611 		skb = NULL;
6612 
6613 		if (chan->sdu->len >= chan->sdu_len)
6614 			break;
6615 
6616 		err = 0;
6617 		break;
6618 
6619 	case L2CAP_SAR_END:
6620 		if (!chan->sdu)
6621 			break;
6622 
6623 		append_skb_frag(chan->sdu, skb,
6624 				&chan->sdu_last_frag);
6625 		skb = NULL;
6626 
6627 		if (chan->sdu->len != chan->sdu_len)
6628 			break;
6629 
6630 		err = chan->ops->recv(chan, chan->sdu);
6631 
6632 		if (!err) {
6633 			/* Reassembly complete */
6634 			chan->sdu = NULL;
6635 			chan->sdu_last_frag = NULL;
6636 			chan->sdu_len = 0;
6637 		}
6638 		break;
6639 	}
6640 
6641 	if (err) {
6642 		kfree_skb(skb);
6643 		kfree_skb(chan->sdu);
6644 		chan->sdu = NULL;
6645 		chan->sdu_last_frag = NULL;
6646 		chan->sdu_len = 0;
6647 	}
6648 
6649 	return err;
6650 }
6651 
6652 static int l2cap_resegment(struct l2cap_chan *chan)
6653 {
6654 	/* Placeholder */
6655 	return 0;
6656 }
6657 
6658 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6659 {
6660 	u8 event;
6661 
6662 	if (chan->mode != L2CAP_MODE_ERTM)
6663 		return;
6664 
6665 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6666 	l2cap_tx(chan, NULL, NULL, event);
6667 }
6668 
6669 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6670 {
6671 	int err = 0;
6672 	/* Pass sequential frames to l2cap_reassemble_sdu()
6673 	 * until a gap is encountered.
6674 	 */
6675 
6676 	BT_DBG("chan %p", chan);
6677 
6678 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6679 		struct sk_buff *skb;
6680 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6681 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6682 
6683 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6684 
6685 		if (!skb)
6686 			break;
6687 
6688 		skb_unlink(skb, &chan->srej_q);
6689 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6690 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6691 		if (err)
6692 			break;
6693 	}
6694 
6695 	if (skb_queue_empty(&chan->srej_q)) {
6696 		chan->rx_state = L2CAP_RX_STATE_RECV;
6697 		l2cap_send_ack(chan);
6698 	}
6699 
6700 	return err;
6701 }
6702 
6703 static void l2cap_handle_srej(struct l2cap_chan *chan,
6704 			      struct l2cap_ctrl *control)
6705 {
6706 	struct sk_buff *skb;
6707 
6708 	BT_DBG("chan %p, control %p", chan, control);
6709 
6710 	if (control->reqseq == chan->next_tx_seq) {
6711 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6712 		l2cap_send_disconn_req(chan, ECONNRESET);
6713 		return;
6714 	}
6715 
6716 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6717 
6718 	if (skb == NULL) {
6719 		BT_DBG("Seq %d not available for retransmission",
6720 		       control->reqseq);
6721 		return;
6722 	}
6723 
6724 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6725 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6726 		l2cap_send_disconn_req(chan, ECONNRESET);
6727 		return;
6728 	}
6729 
6730 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6731 
6732 	if (control->poll) {
6733 		l2cap_pass_to_tx(chan, control);
6734 
6735 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6736 		l2cap_retransmit(chan, control);
6737 		l2cap_ertm_send(chan);
6738 
6739 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6740 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6741 			chan->srej_save_reqseq = control->reqseq;
6742 		}
6743 	} else {
6744 		l2cap_pass_to_tx_fbit(chan, control);
6745 
6746 		if (control->final) {
6747 			if (chan->srej_save_reqseq != control->reqseq ||
6748 			    !test_and_clear_bit(CONN_SREJ_ACT,
6749 						&chan->conn_state))
6750 				l2cap_retransmit(chan, control);
6751 		} else {
6752 			l2cap_retransmit(chan, control);
6753 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6754 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6755 				chan->srej_save_reqseq = control->reqseq;
6756 			}
6757 		}
6758 	}
6759 }
6760 
6761 static void l2cap_handle_rej(struct l2cap_chan *chan,
6762 			     struct l2cap_ctrl *control)
6763 {
6764 	struct sk_buff *skb;
6765 
6766 	BT_DBG("chan %p, control %p", chan, control);
6767 
6768 	if (control->reqseq == chan->next_tx_seq) {
6769 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6770 		l2cap_send_disconn_req(chan, ECONNRESET);
6771 		return;
6772 	}
6773 
6774 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6775 
6776 	if (chan->max_tx && skb &&
6777 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6778 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6779 		l2cap_send_disconn_req(chan, ECONNRESET);
6780 		return;
6781 	}
6782 
6783 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6784 
6785 	l2cap_pass_to_tx(chan, control);
6786 
6787 	if (control->final) {
6788 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6789 			l2cap_retransmit_all(chan, control);
6790 	} else {
6791 		l2cap_retransmit_all(chan, control);
6792 		l2cap_ertm_send(chan);
6793 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6794 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6795 	}
6796 }
6797 
6798 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6799 {
6800 	BT_DBG("chan %p, txseq %d", chan, txseq);
6801 
6802 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6803 	       chan->expected_tx_seq);
6804 
6805 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6806 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6807 		    chan->tx_win) {
6808 			/* See notes below regarding "double poll" and
6809 			 * invalid packets.
6810 			 */
6811 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6812 				BT_DBG("Invalid/Ignore - after SREJ");
6813 				return L2CAP_TXSEQ_INVALID_IGNORE;
6814 			} else {
6815 				BT_DBG("Invalid - in window after SREJ sent");
6816 				return L2CAP_TXSEQ_INVALID;
6817 			}
6818 		}
6819 
6820 		if (chan->srej_list.head == txseq) {
6821 			BT_DBG("Expected SREJ");
6822 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6823 		}
6824 
6825 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6826 			BT_DBG("Duplicate SREJ - txseq already stored");
6827 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6828 		}
6829 
6830 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6831 			BT_DBG("Unexpected SREJ - not requested");
6832 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6833 		}
6834 	}
6835 
6836 	if (chan->expected_tx_seq == txseq) {
6837 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6838 		    chan->tx_win) {
6839 			BT_DBG("Invalid - txseq outside tx window");
6840 			return L2CAP_TXSEQ_INVALID;
6841 		} else {
6842 			BT_DBG("Expected");
6843 			return L2CAP_TXSEQ_EXPECTED;
6844 		}
6845 	}
6846 
6847 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6848 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6849 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6850 		return L2CAP_TXSEQ_DUPLICATE;
6851 	}
6852 
6853 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6854 		/* A source of invalid packets is a "double poll" condition,
6855 		 * where delays cause us to send multiple poll packets.  If
6856 		 * the remote stack receives and processes both polls,
6857 		 * sequence numbers can wrap around in such a way that a
6858 		 * resent frame has a sequence number that looks like new data
6859 		 * with a sequence gap.  This would trigger an erroneous SREJ
6860 		 * request.
6861 		 *
6862 		 * Fortunately, this is impossible with a tx window that's
6863 		 * less than half of the maximum sequence number, which allows
6864 		 * invalid frames to be safely ignored.
6865 		 *
6866 		 * With tx window sizes greater than half of the tx window
6867 		 * maximum, the frame is invalid and cannot be ignored.  This
6868 		 * causes a disconnect.
6869 		 */
6870 
6871 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6872 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6873 			return L2CAP_TXSEQ_INVALID_IGNORE;
6874 		} else {
6875 			BT_DBG("Invalid - txseq outside tx window");
6876 			return L2CAP_TXSEQ_INVALID;
6877 		}
6878 	} else {
6879 		BT_DBG("Unexpected - txseq indicates missing frames");
6880 		return L2CAP_TXSEQ_UNEXPECTED;
6881 	}
6882 }
6883 
6884 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6885 			       struct l2cap_ctrl *control,
6886 			       struct sk_buff *skb, u8 event)
6887 {
6888 	struct l2cap_ctrl local_control;
6889 	int err = 0;
6890 	bool skb_in_use = false;
6891 
6892 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6893 	       event);
6894 
6895 	switch (event) {
6896 	case L2CAP_EV_RECV_IFRAME:
6897 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6898 		case L2CAP_TXSEQ_EXPECTED:
6899 			l2cap_pass_to_tx(chan, control);
6900 
6901 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6902 				BT_DBG("Busy, discarding expected seq %d",
6903 				       control->txseq);
6904 				break;
6905 			}
6906 
6907 			chan->expected_tx_seq = __next_seq(chan,
6908 							   control->txseq);
6909 
6910 			chan->buffer_seq = chan->expected_tx_seq;
6911 			skb_in_use = true;
6912 
6913 			/* l2cap_reassemble_sdu may free skb, hence invalidate
6914 			 * control, so make a copy in advance to use it after
6915 			 * l2cap_reassemble_sdu returns and to avoid the race
6916 			 * condition, for example:
6917 			 *
6918 			 * The current thread calls:
6919 			 *   l2cap_reassemble_sdu
6920 			 *     chan->ops->recv == l2cap_sock_recv_cb
6921 			 *       __sock_queue_rcv_skb
6922 			 * Another thread calls:
6923 			 *   bt_sock_recvmsg
6924 			 *     skb_recv_datagram
6925 			 *     skb_free_datagram
6926 			 * Then the current thread tries to access control, but
6927 			 * it was freed by skb_free_datagram.
6928 			 */
6929 			local_control = *control;
6930 			err = l2cap_reassemble_sdu(chan, skb, control);
6931 			if (err)
6932 				break;
6933 
6934 			if (local_control.final) {
6935 				if (!test_and_clear_bit(CONN_REJ_ACT,
6936 							&chan->conn_state)) {
6937 					local_control.final = 0;
6938 					l2cap_retransmit_all(chan, &local_control);
6939 					l2cap_ertm_send(chan);
6940 				}
6941 			}
6942 
6943 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6944 				l2cap_send_ack(chan);
6945 			break;
6946 		case L2CAP_TXSEQ_UNEXPECTED:
6947 			l2cap_pass_to_tx(chan, control);
6948 
6949 			/* Can't issue SREJ frames in the local busy state.
6950 			 * Drop this frame, it will be seen as missing
6951 			 * when local busy is exited.
6952 			 */
6953 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6954 				BT_DBG("Busy, discarding unexpected seq %d",
6955 				       control->txseq);
6956 				break;
6957 			}
6958 
6959 			/* There was a gap in the sequence, so an SREJ
6960 			 * must be sent for each missing frame.  The
6961 			 * current frame is stored for later use.
6962 			 */
6963 			skb_queue_tail(&chan->srej_q, skb);
6964 			skb_in_use = true;
6965 			BT_DBG("Queued %p (queue len %d)", skb,
6966 			       skb_queue_len(&chan->srej_q));
6967 
6968 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6969 			l2cap_seq_list_clear(&chan->srej_list);
6970 			l2cap_send_srej(chan, control->txseq);
6971 
6972 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6973 			break;
6974 		case L2CAP_TXSEQ_DUPLICATE:
6975 			l2cap_pass_to_tx(chan, control);
6976 			break;
6977 		case L2CAP_TXSEQ_INVALID_IGNORE:
6978 			break;
6979 		case L2CAP_TXSEQ_INVALID:
6980 		default:
6981 			l2cap_send_disconn_req(chan, ECONNRESET);
6982 			break;
6983 		}
6984 		break;
6985 	case L2CAP_EV_RECV_RR:
6986 		l2cap_pass_to_tx(chan, control);
6987 		if (control->final) {
6988 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6989 
6990 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6991 			    !__chan_is_moving(chan)) {
6992 				control->final = 0;
6993 				l2cap_retransmit_all(chan, control);
6994 			}
6995 
6996 			l2cap_ertm_send(chan);
6997 		} else if (control->poll) {
6998 			l2cap_send_i_or_rr_or_rnr(chan);
6999 		} else {
7000 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7001 					       &chan->conn_state) &&
7002 			    chan->unacked_frames)
7003 				__set_retrans_timer(chan);
7004 
7005 			l2cap_ertm_send(chan);
7006 		}
7007 		break;
7008 	case L2CAP_EV_RECV_RNR:
7009 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7010 		l2cap_pass_to_tx(chan, control);
7011 		if (control && control->poll) {
7012 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7013 			l2cap_send_rr_or_rnr(chan, 0);
7014 		}
7015 		__clear_retrans_timer(chan);
7016 		l2cap_seq_list_clear(&chan->retrans_list);
7017 		break;
7018 	case L2CAP_EV_RECV_REJ:
7019 		l2cap_handle_rej(chan, control);
7020 		break;
7021 	case L2CAP_EV_RECV_SREJ:
7022 		l2cap_handle_srej(chan, control);
7023 		break;
7024 	default:
7025 		break;
7026 	}
7027 
7028 	if (skb && !skb_in_use) {
7029 		BT_DBG("Freeing %p", skb);
7030 		kfree_skb(skb);
7031 	}
7032 
7033 	return err;
7034 }
7035 
7036 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7037 				    struct l2cap_ctrl *control,
7038 				    struct sk_buff *skb, u8 event)
7039 {
7040 	int err = 0;
7041 	u16 txseq = control->txseq;
7042 	bool skb_in_use = false;
7043 
7044 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7045 	       event);
7046 
7047 	switch (event) {
7048 	case L2CAP_EV_RECV_IFRAME:
7049 		switch (l2cap_classify_txseq(chan, txseq)) {
7050 		case L2CAP_TXSEQ_EXPECTED:
7051 			/* Keep frame for reassembly later */
7052 			l2cap_pass_to_tx(chan, control);
7053 			skb_queue_tail(&chan->srej_q, skb);
7054 			skb_in_use = true;
7055 			BT_DBG("Queued %p (queue len %d)", skb,
7056 			       skb_queue_len(&chan->srej_q));
7057 
7058 			chan->expected_tx_seq = __next_seq(chan, txseq);
7059 			break;
7060 		case L2CAP_TXSEQ_EXPECTED_SREJ:
7061 			l2cap_seq_list_pop(&chan->srej_list);
7062 
7063 			l2cap_pass_to_tx(chan, control);
7064 			skb_queue_tail(&chan->srej_q, skb);
7065 			skb_in_use = true;
7066 			BT_DBG("Queued %p (queue len %d)", skb,
7067 			       skb_queue_len(&chan->srej_q));
7068 
7069 			err = l2cap_rx_queued_iframes(chan);
7070 			if (err)
7071 				break;
7072 
7073 			break;
7074 		case L2CAP_TXSEQ_UNEXPECTED:
7075 			/* Got a frame that can't be reassembled yet.
7076 			 * Save it for later, and send SREJs to cover
7077 			 * the missing frames.
7078 			 */
7079 			skb_queue_tail(&chan->srej_q, skb);
7080 			skb_in_use = true;
7081 			BT_DBG("Queued %p (queue len %d)", skb,
7082 			       skb_queue_len(&chan->srej_q));
7083 
7084 			l2cap_pass_to_tx(chan, control);
7085 			l2cap_send_srej(chan, control->txseq);
7086 			break;
7087 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7088 			/* This frame was requested with an SREJ, but
7089 			 * some expected retransmitted frames are
7090 			 * missing.  Request retransmission of missing
7091 			 * SREJ'd frames.
7092 			 */
7093 			skb_queue_tail(&chan->srej_q, skb);
7094 			skb_in_use = true;
7095 			BT_DBG("Queued %p (queue len %d)", skb,
7096 			       skb_queue_len(&chan->srej_q));
7097 
7098 			l2cap_pass_to_tx(chan, control);
7099 			l2cap_send_srej_list(chan, control->txseq);
7100 			break;
7101 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7102 			/* We've already queued this frame.  Drop this copy. */
7103 			l2cap_pass_to_tx(chan, control);
7104 			break;
7105 		case L2CAP_TXSEQ_DUPLICATE:
7106 			/* Expecting a later sequence number, so this frame
7107 			 * was already received.  Ignore it completely.
7108 			 */
7109 			break;
7110 		case L2CAP_TXSEQ_INVALID_IGNORE:
7111 			break;
7112 		case L2CAP_TXSEQ_INVALID:
7113 		default:
7114 			l2cap_send_disconn_req(chan, ECONNRESET);
7115 			break;
7116 		}
7117 		break;
7118 	case L2CAP_EV_RECV_RR:
7119 		l2cap_pass_to_tx(chan, control);
7120 		if (control->final) {
7121 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7122 
7123 			if (!test_and_clear_bit(CONN_REJ_ACT,
7124 						&chan->conn_state)) {
7125 				control->final = 0;
7126 				l2cap_retransmit_all(chan, control);
7127 			}
7128 
7129 			l2cap_ertm_send(chan);
7130 		} else if (control->poll) {
7131 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7132 					       &chan->conn_state) &&
7133 			    chan->unacked_frames) {
7134 				__set_retrans_timer(chan);
7135 			}
7136 
7137 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7138 			l2cap_send_srej_tail(chan);
7139 		} else {
7140 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7141 					       &chan->conn_state) &&
7142 			    chan->unacked_frames)
7143 				__set_retrans_timer(chan);
7144 
7145 			l2cap_send_ack(chan);
7146 		}
7147 		break;
7148 	case L2CAP_EV_RECV_RNR:
7149 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7150 		l2cap_pass_to_tx(chan, control);
7151 		if (control->poll) {
7152 			l2cap_send_srej_tail(chan);
7153 		} else {
7154 			struct l2cap_ctrl rr_control;
7155 			memset(&rr_control, 0, sizeof(rr_control));
7156 			rr_control.sframe = 1;
7157 			rr_control.super = L2CAP_SUPER_RR;
7158 			rr_control.reqseq = chan->buffer_seq;
7159 			l2cap_send_sframe(chan, &rr_control);
7160 		}
7161 
7162 		break;
7163 	case L2CAP_EV_RECV_REJ:
7164 		l2cap_handle_rej(chan, control);
7165 		break;
7166 	case L2CAP_EV_RECV_SREJ:
7167 		l2cap_handle_srej(chan, control);
7168 		break;
7169 	}
7170 
7171 	if (skb && !skb_in_use) {
7172 		BT_DBG("Freeing %p", skb);
7173 		kfree_skb(skb);
7174 	}
7175 
7176 	return err;
7177 }
7178 
7179 static int l2cap_finish_move(struct l2cap_chan *chan)
7180 {
7181 	BT_DBG("chan %p", chan);
7182 
7183 	chan->rx_state = L2CAP_RX_STATE_RECV;
7184 
7185 	if (chan->hs_hcon)
7186 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7187 	else
7188 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7189 
7190 	return l2cap_resegment(chan);
7191 }
7192 
7193 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7194 				 struct l2cap_ctrl *control,
7195 				 struct sk_buff *skb, u8 event)
7196 {
7197 	int err;
7198 
7199 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7200 	       event);
7201 
7202 	if (!control->poll)
7203 		return -EPROTO;
7204 
7205 	l2cap_process_reqseq(chan, control->reqseq);
7206 
7207 	if (!skb_queue_empty(&chan->tx_q))
7208 		chan->tx_send_head = skb_peek(&chan->tx_q);
7209 	else
7210 		chan->tx_send_head = NULL;
7211 
7212 	/* Rewind next_tx_seq to the point expected
7213 	 * by the receiver.
7214 	 */
7215 	chan->next_tx_seq = control->reqseq;
7216 	chan->unacked_frames = 0;
7217 
7218 	err = l2cap_finish_move(chan);
7219 	if (err)
7220 		return err;
7221 
7222 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7223 	l2cap_send_i_or_rr_or_rnr(chan);
7224 
7225 	if (event == L2CAP_EV_RECV_IFRAME)
7226 		return -EPROTO;
7227 
7228 	return l2cap_rx_state_recv(chan, control, NULL, event);
7229 }
7230 
7231 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7232 				 struct l2cap_ctrl *control,
7233 				 struct sk_buff *skb, u8 event)
7234 {
7235 	int err;
7236 
7237 	if (!control->final)
7238 		return -EPROTO;
7239 
7240 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7241 
7242 	chan->rx_state = L2CAP_RX_STATE_RECV;
7243 	l2cap_process_reqseq(chan, control->reqseq);
7244 
7245 	if (!skb_queue_empty(&chan->tx_q))
7246 		chan->tx_send_head = skb_peek(&chan->tx_q);
7247 	else
7248 		chan->tx_send_head = NULL;
7249 
7250 	/* Rewind next_tx_seq to the point expected
7251 	 * by the receiver.
7252 	 */
7253 	chan->next_tx_seq = control->reqseq;
7254 	chan->unacked_frames = 0;
7255 
7256 	if (chan->hs_hcon)
7257 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7258 	else
7259 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7260 
7261 	err = l2cap_resegment(chan);
7262 
7263 	if (!err)
7264 		err = l2cap_rx_state_recv(chan, control, skb, event);
7265 
7266 	return err;
7267 }
7268 
7269 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7270 {
7271 	/* Make sure reqseq is for a packet that has been sent but not acked */
7272 	u16 unacked;
7273 
7274 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7275 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7276 }
7277 
7278 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7279 		    struct sk_buff *skb, u8 event)
7280 {
7281 	int err = 0;
7282 
7283 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7284 	       control, skb, event, chan->rx_state);
7285 
7286 	if (__valid_reqseq(chan, control->reqseq)) {
7287 		switch (chan->rx_state) {
7288 		case L2CAP_RX_STATE_RECV:
7289 			err = l2cap_rx_state_recv(chan, control, skb, event);
7290 			break;
7291 		case L2CAP_RX_STATE_SREJ_SENT:
7292 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7293 						       event);
7294 			break;
7295 		case L2CAP_RX_STATE_WAIT_P:
7296 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7297 			break;
7298 		case L2CAP_RX_STATE_WAIT_F:
7299 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7300 			break;
7301 		default:
7302 			/* shut it down */
7303 			break;
7304 		}
7305 	} else {
7306 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7307 		       control->reqseq, chan->next_tx_seq,
7308 		       chan->expected_ack_seq);
7309 		l2cap_send_disconn_req(chan, ECONNRESET);
7310 	}
7311 
7312 	return err;
7313 }
7314 
7315 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7316 			   struct sk_buff *skb)
7317 {
7318 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
7319 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
7320 	 * returns and to avoid the race condition, for example:
7321 	 *
7322 	 * The current thread calls:
7323 	 *   l2cap_reassemble_sdu
7324 	 *     chan->ops->recv == l2cap_sock_recv_cb
7325 	 *       __sock_queue_rcv_skb
7326 	 * Another thread calls:
7327 	 *   bt_sock_recvmsg
7328 	 *     skb_recv_datagram
7329 	 *     skb_free_datagram
7330 	 * Then the current thread tries to access control, but it was freed by
7331 	 * skb_free_datagram.
7332 	 */
7333 	u16 txseq = control->txseq;
7334 
7335 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7336 	       chan->rx_state);
7337 
7338 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
7339 		l2cap_pass_to_tx(chan, control);
7340 
7341 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
7342 		       __next_seq(chan, chan->buffer_seq));
7343 
7344 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7345 
7346 		l2cap_reassemble_sdu(chan, skb, control);
7347 	} else {
7348 		if (chan->sdu) {
7349 			kfree_skb(chan->sdu);
7350 			chan->sdu = NULL;
7351 		}
7352 		chan->sdu_last_frag = NULL;
7353 		chan->sdu_len = 0;
7354 
7355 		if (skb) {
7356 			BT_DBG("Freeing %p", skb);
7357 			kfree_skb(skb);
7358 		}
7359 	}
7360 
7361 	chan->last_acked_seq = txseq;
7362 	chan->expected_tx_seq = __next_seq(chan, txseq);
7363 
7364 	return 0;
7365 }
7366 
7367 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7368 {
7369 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7370 	u16 len;
7371 	u8 event;
7372 
7373 	__unpack_control(chan, skb);
7374 
7375 	len = skb->len;
7376 
7377 	/*
7378 	 * We can just drop the corrupted I-frame here.
7379 	 * Receiver will miss it and start proper recovery
7380 	 * procedures and ask for retransmission.
7381 	 */
7382 	if (l2cap_check_fcs(chan, skb))
7383 		goto drop;
7384 
7385 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7386 		len -= L2CAP_SDULEN_SIZE;
7387 
7388 	if (chan->fcs == L2CAP_FCS_CRC16)
7389 		len -= L2CAP_FCS_SIZE;
7390 
7391 	if (len > chan->mps) {
7392 		l2cap_send_disconn_req(chan, ECONNRESET);
7393 		goto drop;
7394 	}
7395 
7396 	if (chan->ops->filter) {
7397 		if (chan->ops->filter(chan, skb))
7398 			goto drop;
7399 	}
7400 
7401 	if (!control->sframe) {
7402 		int err;
7403 
7404 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7405 		       control->sar, control->reqseq, control->final,
7406 		       control->txseq);
7407 
7408 		/* Validate F-bit - F=0 always valid, F=1 only
7409 		 * valid in TX WAIT_F
7410 		 */
7411 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7412 			goto drop;
7413 
7414 		if (chan->mode != L2CAP_MODE_STREAMING) {
7415 			event = L2CAP_EV_RECV_IFRAME;
7416 			err = l2cap_rx(chan, control, skb, event);
7417 		} else {
7418 			err = l2cap_stream_rx(chan, control, skb);
7419 		}
7420 
7421 		if (err)
7422 			l2cap_send_disconn_req(chan, ECONNRESET);
7423 	} else {
7424 		const u8 rx_func_to_event[4] = {
7425 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7426 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7427 		};
7428 
7429 		/* Only I-frames are expected in streaming mode */
7430 		if (chan->mode == L2CAP_MODE_STREAMING)
7431 			goto drop;
7432 
7433 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7434 		       control->reqseq, control->final, control->poll,
7435 		       control->super);
7436 
7437 		if (len != 0) {
7438 			BT_ERR("Trailing bytes: %d in sframe", len);
7439 			l2cap_send_disconn_req(chan, ECONNRESET);
7440 			goto drop;
7441 		}
7442 
7443 		/* Validate F and P bits */
7444 		if (control->final && (control->poll ||
7445 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7446 			goto drop;
7447 
7448 		event = rx_func_to_event[control->super];
7449 		if (l2cap_rx(chan, control, skb, event))
7450 			l2cap_send_disconn_req(chan, ECONNRESET);
7451 	}
7452 
7453 	return 0;
7454 
7455 drop:
7456 	kfree_skb(skb);
7457 	return 0;
7458 }
7459 
7460 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7461 {
7462 	struct l2cap_conn *conn = chan->conn;
7463 	struct l2cap_le_credits pkt;
7464 	u16 return_credits;
7465 
7466 	return_credits = (chan->imtu / chan->mps) + 1;
7467 
7468 	if (chan->rx_credits >= return_credits)
7469 		return;
7470 
7471 	return_credits -= chan->rx_credits;
7472 
7473 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7474 
7475 	chan->rx_credits += return_credits;
7476 
7477 	pkt.cid     = cpu_to_le16(chan->scid);
7478 	pkt.credits = cpu_to_le16(return_credits);
7479 
7480 	chan->ident = l2cap_get_ident(conn);
7481 
7482 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7483 }
7484 
7485 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7486 {
7487 	int err;
7488 
7489 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7490 
7491 	/* Wait recv to confirm reception before updating the credits */
7492 	err = chan->ops->recv(chan, skb);
7493 
7494 	/* Update credits whenever an SDU is received */
7495 	l2cap_chan_le_send_credits(chan);
7496 
7497 	return err;
7498 }
7499 
7500 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7501 {
7502 	int err;
7503 
7504 	if (!chan->rx_credits) {
7505 		BT_ERR("No credits to receive LE L2CAP data");
7506 		l2cap_send_disconn_req(chan, ECONNRESET);
7507 		return -ENOBUFS;
7508 	}
7509 
7510 	if (chan->imtu < skb->len) {
7511 		BT_ERR("Too big LE L2CAP PDU");
7512 		return -ENOBUFS;
7513 	}
7514 
7515 	chan->rx_credits--;
7516 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7517 
7518 	/* Update if remote had run out of credits, this should only happens
7519 	 * if the remote is not using the entire MPS.
7520 	 */
7521 	if (!chan->rx_credits)
7522 		l2cap_chan_le_send_credits(chan);
7523 
7524 	err = 0;
7525 
7526 	if (!chan->sdu) {
7527 		u16 sdu_len;
7528 
7529 		sdu_len = get_unaligned_le16(skb->data);
7530 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7531 
7532 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7533 		       sdu_len, skb->len, chan->imtu);
7534 
7535 		if (sdu_len > chan->imtu) {
7536 			BT_ERR("Too big LE L2CAP SDU length received");
7537 			err = -EMSGSIZE;
7538 			goto failed;
7539 		}
7540 
7541 		if (skb->len > sdu_len) {
7542 			BT_ERR("Too much LE L2CAP data received");
7543 			err = -EINVAL;
7544 			goto failed;
7545 		}
7546 
7547 		if (skb->len == sdu_len)
7548 			return l2cap_ecred_recv(chan, skb);
7549 
7550 		chan->sdu = skb;
7551 		chan->sdu_len = sdu_len;
7552 		chan->sdu_last_frag = skb;
7553 
7554 		/* Detect if remote is not able to use the selected MPS */
7555 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7556 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7557 
7558 			/* Adjust the number of credits */
7559 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7560 			chan->mps = mps_len;
7561 			l2cap_chan_le_send_credits(chan);
7562 		}
7563 
7564 		return 0;
7565 	}
7566 
7567 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7568 	       chan->sdu->len, skb->len, chan->sdu_len);
7569 
7570 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7571 		BT_ERR("Too much LE L2CAP data received");
7572 		err = -EINVAL;
7573 		goto failed;
7574 	}
7575 
7576 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7577 	skb = NULL;
7578 
7579 	if (chan->sdu->len == chan->sdu_len) {
7580 		err = l2cap_ecred_recv(chan, chan->sdu);
7581 		if (!err) {
7582 			chan->sdu = NULL;
7583 			chan->sdu_last_frag = NULL;
7584 			chan->sdu_len = 0;
7585 		}
7586 	}
7587 
7588 failed:
7589 	if (err) {
7590 		kfree_skb(skb);
7591 		kfree_skb(chan->sdu);
7592 		chan->sdu = NULL;
7593 		chan->sdu_last_frag = NULL;
7594 		chan->sdu_len = 0;
7595 	}
7596 
7597 	/* We can't return an error here since we took care of the skb
7598 	 * freeing internally. An error return would cause the caller to
7599 	 * do a double-free of the skb.
7600 	 */
7601 	return 0;
7602 }
7603 
7604 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7605 			       struct sk_buff *skb)
7606 {
7607 	struct l2cap_chan *chan;
7608 
7609 	chan = l2cap_get_chan_by_scid(conn, cid);
7610 	if (!chan) {
7611 		if (cid == L2CAP_CID_A2MP) {
7612 			chan = a2mp_channel_create(conn, skb);
7613 			if (!chan) {
7614 				kfree_skb(skb);
7615 				return;
7616 			}
7617 
7618 			l2cap_chan_hold(chan);
7619 			l2cap_chan_lock(chan);
7620 		} else {
7621 			BT_DBG("unknown cid 0x%4.4x", cid);
7622 			/* Drop packet and return */
7623 			kfree_skb(skb);
7624 			return;
7625 		}
7626 	}
7627 
7628 	BT_DBG("chan %p, len %d", chan, skb->len);
7629 
7630 	/* If we receive data on a fixed channel before the info req/rsp
7631 	 * procedure is done simply assume that the channel is supported
7632 	 * and mark it as ready.
7633 	 */
7634 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7635 		l2cap_chan_ready(chan);
7636 
7637 	if (chan->state != BT_CONNECTED)
7638 		goto drop;
7639 
7640 	switch (chan->mode) {
7641 	case L2CAP_MODE_LE_FLOWCTL:
7642 	case L2CAP_MODE_EXT_FLOWCTL:
7643 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7644 			goto drop;
7645 
7646 		goto done;
7647 
7648 	case L2CAP_MODE_BASIC:
7649 		/* If socket recv buffers overflows we drop data here
7650 		 * which is *bad* because L2CAP has to be reliable.
7651 		 * But we don't have any other choice. L2CAP doesn't
7652 		 * provide flow control mechanism. */
7653 
7654 		if (chan->imtu < skb->len) {
7655 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7656 			goto drop;
7657 		}
7658 
7659 		if (!chan->ops->recv(chan, skb))
7660 			goto done;
7661 		break;
7662 
7663 	case L2CAP_MODE_ERTM:
7664 	case L2CAP_MODE_STREAMING:
7665 		l2cap_data_rcv(chan, skb);
7666 		goto done;
7667 
7668 	default:
7669 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7670 		break;
7671 	}
7672 
7673 drop:
7674 	kfree_skb(skb);
7675 
7676 done:
7677 	l2cap_chan_unlock(chan);
7678 	l2cap_chan_put(chan);
7679 }
7680 
7681 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7682 				  struct sk_buff *skb)
7683 {
7684 	struct hci_conn *hcon = conn->hcon;
7685 	struct l2cap_chan *chan;
7686 
7687 	if (hcon->type != ACL_LINK)
7688 		goto free_skb;
7689 
7690 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7691 					ACL_LINK);
7692 	if (!chan)
7693 		goto free_skb;
7694 
7695 	BT_DBG("chan %p, len %d", chan, skb->len);
7696 
7697 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7698 		goto drop;
7699 
7700 	if (chan->imtu < skb->len)
7701 		goto drop;
7702 
7703 	/* Store remote BD_ADDR and PSM for msg_name */
7704 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7705 	bt_cb(skb)->l2cap.psm = psm;
7706 
7707 	if (!chan->ops->recv(chan, skb)) {
7708 		l2cap_chan_put(chan);
7709 		return;
7710 	}
7711 
7712 drop:
7713 	l2cap_chan_put(chan);
7714 free_skb:
7715 	kfree_skb(skb);
7716 }
7717 
7718 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7719 {
7720 	struct l2cap_hdr *lh = (void *) skb->data;
7721 	struct hci_conn *hcon = conn->hcon;
7722 	u16 cid, len;
7723 	__le16 psm;
7724 
7725 	if (hcon->state != BT_CONNECTED) {
7726 		BT_DBG("queueing pending rx skb");
7727 		skb_queue_tail(&conn->pending_rx, skb);
7728 		return;
7729 	}
7730 
7731 	skb_pull(skb, L2CAP_HDR_SIZE);
7732 	cid = __le16_to_cpu(lh->cid);
7733 	len = __le16_to_cpu(lh->len);
7734 
7735 	if (len != skb->len) {
7736 		kfree_skb(skb);
7737 		return;
7738 	}
7739 
7740 	/* Since we can't actively block incoming LE connections we must
7741 	 * at least ensure that we ignore incoming data from them.
7742 	 */
7743 	if (hcon->type == LE_LINK &&
7744 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7745 				   bdaddr_dst_type(hcon))) {
7746 		kfree_skb(skb);
7747 		return;
7748 	}
7749 
7750 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7751 
7752 	switch (cid) {
7753 	case L2CAP_CID_SIGNALING:
7754 		l2cap_sig_channel(conn, skb);
7755 		break;
7756 
7757 	case L2CAP_CID_CONN_LESS:
7758 		psm = get_unaligned((__le16 *) skb->data);
7759 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7760 		l2cap_conless_channel(conn, psm, skb);
7761 		break;
7762 
7763 	case L2CAP_CID_LE_SIGNALING:
7764 		l2cap_le_sig_channel(conn, skb);
7765 		break;
7766 
7767 	default:
7768 		l2cap_data_channel(conn, cid, skb);
7769 		break;
7770 	}
7771 }
7772 
7773 static void process_pending_rx(struct work_struct *work)
7774 {
7775 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7776 					       pending_rx_work);
7777 	struct sk_buff *skb;
7778 
7779 	BT_DBG("");
7780 
7781 	while ((skb = skb_dequeue(&conn->pending_rx)))
7782 		l2cap_recv_frame(conn, skb);
7783 }
7784 
7785 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7786 {
7787 	struct l2cap_conn *conn = hcon->l2cap_data;
7788 	struct hci_chan *hchan;
7789 
7790 	if (conn)
7791 		return conn;
7792 
7793 	hchan = hci_chan_create(hcon);
7794 	if (!hchan)
7795 		return NULL;
7796 
7797 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7798 	if (!conn) {
7799 		hci_chan_del(hchan);
7800 		return NULL;
7801 	}
7802 
7803 	kref_init(&conn->ref);
7804 	hcon->l2cap_data = conn;
7805 	conn->hcon = hci_conn_get(hcon);
7806 	conn->hchan = hchan;
7807 
7808 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7809 
7810 	switch (hcon->type) {
7811 	case LE_LINK:
7812 		if (hcon->hdev->le_mtu) {
7813 			conn->mtu = hcon->hdev->le_mtu;
7814 			break;
7815 		}
7816 		fallthrough;
7817 	default:
7818 		conn->mtu = hcon->hdev->acl_mtu;
7819 		break;
7820 	}
7821 
7822 	conn->feat_mask = 0;
7823 
7824 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7825 
7826 	if (hcon->type == ACL_LINK &&
7827 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7828 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7829 
7830 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7831 	    (bredr_sc_enabled(hcon->hdev) ||
7832 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7833 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7834 
7835 	mutex_init(&conn->ident_lock);
7836 	mutex_init(&conn->chan_lock);
7837 
7838 	INIT_LIST_HEAD(&conn->chan_l);
7839 	INIT_LIST_HEAD(&conn->users);
7840 
7841 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7842 
7843 	skb_queue_head_init(&conn->pending_rx);
7844 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7845 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7846 
7847 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7848 
7849 	return conn;
7850 }
7851 
7852 static bool is_valid_psm(u16 psm, u8 dst_type)
7853 {
7854 	if (!psm)
7855 		return false;
7856 
7857 	if (bdaddr_type_is_le(dst_type))
7858 		return (psm <= 0x00ff);
7859 
7860 	/* PSM must be odd and lsb of upper byte must be 0 */
7861 	return ((psm & 0x0101) == 0x0001);
7862 }
7863 
7864 struct l2cap_chan_data {
7865 	struct l2cap_chan *chan;
7866 	struct pid *pid;
7867 	int count;
7868 };
7869 
7870 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7871 {
7872 	struct l2cap_chan_data *d = data;
7873 	struct pid *pid;
7874 
7875 	if (chan == d->chan)
7876 		return;
7877 
7878 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7879 		return;
7880 
7881 	pid = chan->ops->get_peer_pid(chan);
7882 
7883 	/* Only count deferred channels with the same PID/PSM */
7884 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7885 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7886 		return;
7887 
7888 	d->count++;
7889 }
7890 
7891 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7892 		       bdaddr_t *dst, u8 dst_type)
7893 {
7894 	struct l2cap_conn *conn;
7895 	struct hci_conn *hcon;
7896 	struct hci_dev *hdev;
7897 	int err;
7898 
7899 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7900 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7901 
7902 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7903 	if (!hdev)
7904 		return -EHOSTUNREACH;
7905 
7906 	hci_dev_lock(hdev);
7907 
7908 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7909 	    chan->chan_type != L2CAP_CHAN_RAW) {
7910 		err = -EINVAL;
7911 		goto done;
7912 	}
7913 
7914 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7915 		err = -EINVAL;
7916 		goto done;
7917 	}
7918 
7919 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7920 		err = -EINVAL;
7921 		goto done;
7922 	}
7923 
7924 	switch (chan->mode) {
7925 	case L2CAP_MODE_BASIC:
7926 		break;
7927 	case L2CAP_MODE_LE_FLOWCTL:
7928 		break;
7929 	case L2CAP_MODE_EXT_FLOWCTL:
7930 		if (!enable_ecred) {
7931 			err = -EOPNOTSUPP;
7932 			goto done;
7933 		}
7934 		break;
7935 	case L2CAP_MODE_ERTM:
7936 	case L2CAP_MODE_STREAMING:
7937 		if (!disable_ertm)
7938 			break;
7939 		fallthrough;
7940 	default:
7941 		err = -EOPNOTSUPP;
7942 		goto done;
7943 	}
7944 
7945 	switch (chan->state) {
7946 	case BT_CONNECT:
7947 	case BT_CONNECT2:
7948 	case BT_CONFIG:
7949 		/* Already connecting */
7950 		err = 0;
7951 		goto done;
7952 
7953 	case BT_CONNECTED:
7954 		/* Already connected */
7955 		err = -EISCONN;
7956 		goto done;
7957 
7958 	case BT_OPEN:
7959 	case BT_BOUND:
7960 		/* Can connect */
7961 		break;
7962 
7963 	default:
7964 		err = -EBADFD;
7965 		goto done;
7966 	}
7967 
7968 	/* Set destination address and psm */
7969 	bacpy(&chan->dst, dst);
7970 	chan->dst_type = dst_type;
7971 
7972 	chan->psm = psm;
7973 	chan->dcid = cid;
7974 
7975 	if (bdaddr_type_is_le(dst_type)) {
7976 		/* Convert from L2CAP channel address type to HCI address type
7977 		 */
7978 		if (dst_type == BDADDR_LE_PUBLIC)
7979 			dst_type = ADDR_LE_DEV_PUBLIC;
7980 		else
7981 			dst_type = ADDR_LE_DEV_RANDOM;
7982 
7983 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7984 			hcon = hci_connect_le(hdev, dst, dst_type,
7985 					      chan->sec_level,
7986 					      HCI_LE_CONN_TIMEOUT,
7987 					      HCI_ROLE_SLAVE, NULL);
7988 		else
7989 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7990 						   chan->sec_level,
7991 						   HCI_LE_CONN_TIMEOUT,
7992 						   CONN_REASON_L2CAP_CHAN);
7993 
7994 	} else {
7995 		u8 auth_type = l2cap_get_auth_type(chan);
7996 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7997 				       CONN_REASON_L2CAP_CHAN);
7998 	}
7999 
8000 	if (IS_ERR(hcon)) {
8001 		err = PTR_ERR(hcon);
8002 		goto done;
8003 	}
8004 
8005 	conn = l2cap_conn_add(hcon);
8006 	if (!conn) {
8007 		hci_conn_drop(hcon);
8008 		err = -ENOMEM;
8009 		goto done;
8010 	}
8011 
8012 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
8013 		struct l2cap_chan_data data;
8014 
8015 		data.chan = chan;
8016 		data.pid = chan->ops->get_peer_pid(chan);
8017 		data.count = 1;
8018 
8019 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
8020 
8021 		/* Check if there isn't too many channels being connected */
8022 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
8023 			hci_conn_drop(hcon);
8024 			err = -EPROTO;
8025 			goto done;
8026 		}
8027 	}
8028 
8029 	mutex_lock(&conn->chan_lock);
8030 	l2cap_chan_lock(chan);
8031 
8032 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
8033 		hci_conn_drop(hcon);
8034 		err = -EBUSY;
8035 		goto chan_unlock;
8036 	}
8037 
8038 	/* Update source addr of the socket */
8039 	bacpy(&chan->src, &hcon->src);
8040 	chan->src_type = bdaddr_src_type(hcon);
8041 
8042 	__l2cap_chan_add(conn, chan);
8043 
8044 	/* l2cap_chan_add takes its own ref so we can drop this one */
8045 	hci_conn_drop(hcon);
8046 
8047 	l2cap_state_change(chan, BT_CONNECT);
8048 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8049 
8050 	/* Release chan->sport so that it can be reused by other
8051 	 * sockets (as it's only used for listening sockets).
8052 	 */
8053 	write_lock(&chan_list_lock);
8054 	chan->sport = 0;
8055 	write_unlock(&chan_list_lock);
8056 
8057 	if (hcon->state == BT_CONNECTED) {
8058 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8059 			__clear_chan_timer(chan);
8060 			if (l2cap_chan_check_security(chan, true))
8061 				l2cap_state_change(chan, BT_CONNECTED);
8062 		} else
8063 			l2cap_do_start(chan);
8064 	}
8065 
8066 	err = 0;
8067 
8068 chan_unlock:
8069 	l2cap_chan_unlock(chan);
8070 	mutex_unlock(&conn->chan_lock);
8071 done:
8072 	hci_dev_unlock(hdev);
8073 	hci_dev_put(hdev);
8074 	return err;
8075 }
8076 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8077 
8078 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8079 {
8080 	struct l2cap_conn *conn = chan->conn;
8081 	struct {
8082 		struct l2cap_ecred_reconf_req req;
8083 		__le16 scid;
8084 	} pdu;
8085 
8086 	pdu.req.mtu = cpu_to_le16(chan->imtu);
8087 	pdu.req.mps = cpu_to_le16(chan->mps);
8088 	pdu.scid    = cpu_to_le16(chan->scid);
8089 
8090 	chan->ident = l2cap_get_ident(conn);
8091 
8092 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8093 		       sizeof(pdu), &pdu);
8094 }
8095 
8096 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8097 {
8098 	if (chan->imtu > mtu)
8099 		return -EINVAL;
8100 
8101 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8102 
8103 	chan->imtu = mtu;
8104 
8105 	l2cap_ecred_reconfigure(chan);
8106 
8107 	return 0;
8108 }
8109 
8110 /* ---- L2CAP interface with lower layer (HCI) ---- */
8111 
8112 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8113 {
8114 	int exact = 0, lm1 = 0, lm2 = 0;
8115 	struct l2cap_chan *c;
8116 
8117 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8118 
8119 	/* Find listening sockets and check their link_mode */
8120 	read_lock(&chan_list_lock);
8121 	list_for_each_entry(c, &chan_list, global_l) {
8122 		if (c->state != BT_LISTEN)
8123 			continue;
8124 
8125 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8126 			lm1 |= HCI_LM_ACCEPT;
8127 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8128 				lm1 |= HCI_LM_MASTER;
8129 			exact++;
8130 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8131 			lm2 |= HCI_LM_ACCEPT;
8132 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8133 				lm2 |= HCI_LM_MASTER;
8134 		}
8135 	}
8136 	read_unlock(&chan_list_lock);
8137 
8138 	return exact ? lm1 : lm2;
8139 }
8140 
8141 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8142  * from an existing channel in the list or from the beginning of the
8143  * global list (by passing NULL as first parameter).
8144  */
8145 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8146 						  struct hci_conn *hcon)
8147 {
8148 	u8 src_type = bdaddr_src_type(hcon);
8149 
8150 	read_lock(&chan_list_lock);
8151 
8152 	if (c)
8153 		c = list_next_entry(c, global_l);
8154 	else
8155 		c = list_entry(chan_list.next, typeof(*c), global_l);
8156 
8157 	list_for_each_entry_from(c, &chan_list, global_l) {
8158 		if (c->chan_type != L2CAP_CHAN_FIXED)
8159 			continue;
8160 		if (c->state != BT_LISTEN)
8161 			continue;
8162 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8163 			continue;
8164 		if (src_type != c->src_type)
8165 			continue;
8166 
8167 		c = l2cap_chan_hold_unless_zero(c);
8168 		read_unlock(&chan_list_lock);
8169 		return c;
8170 	}
8171 
8172 	read_unlock(&chan_list_lock);
8173 
8174 	return NULL;
8175 }
8176 
8177 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8178 {
8179 	struct hci_dev *hdev = hcon->hdev;
8180 	struct l2cap_conn *conn;
8181 	struct l2cap_chan *pchan;
8182 	u8 dst_type;
8183 
8184 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8185 		return;
8186 
8187 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8188 
8189 	if (status) {
8190 		l2cap_conn_del(hcon, bt_to_errno(status));
8191 		return;
8192 	}
8193 
8194 	conn = l2cap_conn_add(hcon);
8195 	if (!conn)
8196 		return;
8197 
8198 	dst_type = bdaddr_dst_type(hcon);
8199 
8200 	/* If device is blocked, do not create channels for it */
8201 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8202 		return;
8203 
8204 	/* Find fixed channels and notify them of the new connection. We
8205 	 * use multiple individual lookups, continuing each time where
8206 	 * we left off, because the list lock would prevent calling the
8207 	 * potentially sleeping l2cap_chan_lock() function.
8208 	 */
8209 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8210 	while (pchan) {
8211 		struct l2cap_chan *chan, *next;
8212 
8213 		/* Client fixed channels should override server ones */
8214 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8215 			goto next;
8216 
8217 		l2cap_chan_lock(pchan);
8218 		chan = pchan->ops->new_connection(pchan);
8219 		if (chan) {
8220 			bacpy(&chan->src, &hcon->src);
8221 			bacpy(&chan->dst, &hcon->dst);
8222 			chan->src_type = bdaddr_src_type(hcon);
8223 			chan->dst_type = dst_type;
8224 
8225 			__l2cap_chan_add(conn, chan);
8226 		}
8227 
8228 		l2cap_chan_unlock(pchan);
8229 next:
8230 		next = l2cap_global_fixed_chan(pchan, hcon);
8231 		l2cap_chan_put(pchan);
8232 		pchan = next;
8233 	}
8234 
8235 	l2cap_conn_ready(conn);
8236 }
8237 
8238 int l2cap_disconn_ind(struct hci_conn *hcon)
8239 {
8240 	struct l2cap_conn *conn = hcon->l2cap_data;
8241 
8242 	BT_DBG("hcon %p", hcon);
8243 
8244 	if (!conn)
8245 		return HCI_ERROR_REMOTE_USER_TERM;
8246 	return conn->disc_reason;
8247 }
8248 
8249 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8250 {
8251 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8252 		return;
8253 
8254 	BT_DBG("hcon %p reason %d", hcon, reason);
8255 
8256 	l2cap_conn_del(hcon, bt_to_errno(reason));
8257 }
8258 
8259 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8260 {
8261 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8262 		return;
8263 
8264 	if (encrypt == 0x00) {
8265 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8266 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8267 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8268 			   chan->sec_level == BT_SECURITY_FIPS)
8269 			l2cap_chan_close(chan, ECONNREFUSED);
8270 	} else {
8271 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8272 			__clear_chan_timer(chan);
8273 	}
8274 }
8275 
8276 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8277 {
8278 	struct l2cap_conn *conn = hcon->l2cap_data;
8279 	struct l2cap_chan *chan;
8280 
8281 	if (!conn)
8282 		return;
8283 
8284 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8285 
8286 	mutex_lock(&conn->chan_lock);
8287 
8288 	list_for_each_entry(chan, &conn->chan_l, list) {
8289 		l2cap_chan_lock(chan);
8290 
8291 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8292 		       state_to_string(chan->state));
8293 
8294 		if (chan->scid == L2CAP_CID_A2MP) {
8295 			l2cap_chan_unlock(chan);
8296 			continue;
8297 		}
8298 
8299 		if (!status && encrypt)
8300 			chan->sec_level = hcon->sec_level;
8301 
8302 		if (!__l2cap_no_conn_pending(chan)) {
8303 			l2cap_chan_unlock(chan);
8304 			continue;
8305 		}
8306 
8307 		if (!status && (chan->state == BT_CONNECTED ||
8308 				chan->state == BT_CONFIG)) {
8309 			chan->ops->resume(chan);
8310 			l2cap_check_encryption(chan, encrypt);
8311 			l2cap_chan_unlock(chan);
8312 			continue;
8313 		}
8314 
8315 		if (chan->state == BT_CONNECT) {
8316 			if (!status && l2cap_check_enc_key_size(hcon))
8317 				l2cap_start_connection(chan);
8318 			else
8319 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8320 		} else if (chan->state == BT_CONNECT2 &&
8321 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8322 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8323 			struct l2cap_conn_rsp rsp;
8324 			__u16 res, stat;
8325 
8326 			if (!status && l2cap_check_enc_key_size(hcon)) {
8327 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8328 					res = L2CAP_CR_PEND;
8329 					stat = L2CAP_CS_AUTHOR_PEND;
8330 					chan->ops->defer(chan);
8331 				} else {
8332 					l2cap_state_change(chan, BT_CONFIG);
8333 					res = L2CAP_CR_SUCCESS;
8334 					stat = L2CAP_CS_NO_INFO;
8335 				}
8336 			} else {
8337 				l2cap_state_change(chan, BT_DISCONN);
8338 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8339 				res = L2CAP_CR_SEC_BLOCK;
8340 				stat = L2CAP_CS_NO_INFO;
8341 			}
8342 
8343 			rsp.scid   = cpu_to_le16(chan->dcid);
8344 			rsp.dcid   = cpu_to_le16(chan->scid);
8345 			rsp.result = cpu_to_le16(res);
8346 			rsp.status = cpu_to_le16(stat);
8347 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8348 				       sizeof(rsp), &rsp);
8349 
8350 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8351 			    res == L2CAP_CR_SUCCESS) {
8352 				char buf[128];
8353 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8354 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8355 					       L2CAP_CONF_REQ,
8356 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8357 					       buf);
8358 				chan->num_conf_req++;
8359 			}
8360 		}
8361 
8362 		l2cap_chan_unlock(chan);
8363 	}
8364 
8365 	mutex_unlock(&conn->chan_lock);
8366 }
8367 
8368 /* Append fragment into frame respecting the maximum len of rx_skb */
8369 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
8370 			   u16 len)
8371 {
8372 	if (!conn->rx_skb) {
8373 		/* Allocate skb for the complete frame (with header) */
8374 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8375 		if (!conn->rx_skb)
8376 			return -ENOMEM;
8377 		/* Init rx_len */
8378 		conn->rx_len = len;
8379 	}
8380 
8381 	/* Copy as much as the rx_skb can hold */
8382 	len = min_t(u16, len, skb->len);
8383 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
8384 	skb_pull(skb, len);
8385 	conn->rx_len -= len;
8386 
8387 	return len;
8388 }
8389 
8390 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
8391 {
8392 	struct sk_buff *rx_skb;
8393 	int len;
8394 
8395 	/* Append just enough to complete the header */
8396 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
8397 
8398 	/* If header could not be read just continue */
8399 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
8400 		return len;
8401 
8402 	rx_skb = conn->rx_skb;
8403 	len = get_unaligned_le16(rx_skb->data);
8404 
8405 	/* Check if rx_skb has enough space to received all fragments */
8406 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
8407 		/* Update expected len */
8408 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
8409 		return L2CAP_LEN_SIZE;
8410 	}
8411 
8412 	/* Reset conn->rx_skb since it will need to be reallocated in order to
8413 	 * fit all fragments.
8414 	 */
8415 	conn->rx_skb = NULL;
8416 
8417 	/* Reallocates rx_skb using the exact expected length */
8418 	len = l2cap_recv_frag(conn, rx_skb,
8419 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
8420 	kfree_skb(rx_skb);
8421 
8422 	return len;
8423 }
8424 
8425 static void l2cap_recv_reset(struct l2cap_conn *conn)
8426 {
8427 	kfree_skb(conn->rx_skb);
8428 	conn->rx_skb = NULL;
8429 	conn->rx_len = 0;
8430 }
8431 
8432 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8433 {
8434 	struct l2cap_conn *conn = hcon->l2cap_data;
8435 	int len;
8436 
8437 	/* For AMP controller do not create l2cap conn */
8438 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8439 		goto drop;
8440 
8441 	if (!conn)
8442 		conn = l2cap_conn_add(hcon);
8443 
8444 	if (!conn)
8445 		goto drop;
8446 
8447 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
8448 
8449 	switch (flags) {
8450 	case ACL_START:
8451 	case ACL_START_NO_FLUSH:
8452 	case ACL_COMPLETE:
8453 		if (conn->rx_skb) {
8454 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8455 			l2cap_recv_reset(conn);
8456 			l2cap_conn_unreliable(conn, ECOMM);
8457 		}
8458 
8459 		/* Start fragment may not contain the L2CAP length so just
8460 		 * copy the initial byte when that happens and use conn->mtu as
8461 		 * expected length.
8462 		 */
8463 		if (skb->len < L2CAP_LEN_SIZE) {
8464 			l2cap_recv_frag(conn, skb, conn->mtu);
8465 			break;
8466 		}
8467 
8468 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
8469 
8470 		if (len == skb->len) {
8471 			/* Complete frame received */
8472 			l2cap_recv_frame(conn, skb);
8473 			return;
8474 		}
8475 
8476 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
8477 
8478 		if (skb->len > len) {
8479 			BT_ERR("Frame is too long (len %u, expected len %d)",
8480 			       skb->len, len);
8481 			l2cap_conn_unreliable(conn, ECOMM);
8482 			goto drop;
8483 		}
8484 
8485 		/* Append fragment into frame (with header) */
8486 		if (l2cap_recv_frag(conn, skb, len) < 0)
8487 			goto drop;
8488 
8489 		break;
8490 
8491 	case ACL_CONT:
8492 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
8493 
8494 		if (!conn->rx_skb) {
8495 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8496 			l2cap_conn_unreliable(conn, ECOMM);
8497 			goto drop;
8498 		}
8499 
8500 		/* Complete the L2CAP length if it has not been read */
8501 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
8502 			if (l2cap_recv_len(conn, skb) < 0) {
8503 				l2cap_conn_unreliable(conn, ECOMM);
8504 				goto drop;
8505 			}
8506 
8507 			/* Header still could not be read just continue */
8508 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
8509 				break;
8510 		}
8511 
8512 		if (skb->len > conn->rx_len) {
8513 			BT_ERR("Fragment is too long (len %u, expected %u)",
8514 			       skb->len, conn->rx_len);
8515 			l2cap_recv_reset(conn);
8516 			l2cap_conn_unreliable(conn, ECOMM);
8517 			goto drop;
8518 		}
8519 
8520 		/* Append fragment into frame (with header) */
8521 		l2cap_recv_frag(conn, skb, skb->len);
8522 
8523 		if (!conn->rx_len) {
8524 			/* Complete frame received. l2cap_recv_frame
8525 			 * takes ownership of the skb so set the global
8526 			 * rx_skb pointer to NULL first.
8527 			 */
8528 			struct sk_buff *rx_skb = conn->rx_skb;
8529 			conn->rx_skb = NULL;
8530 			l2cap_recv_frame(conn, rx_skb);
8531 		}
8532 		break;
8533 	}
8534 
8535 drop:
8536 	kfree_skb(skb);
8537 }
8538 
8539 static struct hci_cb l2cap_cb = {
8540 	.name		= "L2CAP",
8541 	.connect_cfm	= l2cap_connect_cfm,
8542 	.disconn_cfm	= l2cap_disconn_cfm,
8543 	.security_cfm	= l2cap_security_cfm,
8544 };
8545 
8546 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8547 {
8548 	struct l2cap_chan *c;
8549 
8550 	read_lock(&chan_list_lock);
8551 
8552 	list_for_each_entry(c, &chan_list, global_l) {
8553 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8554 			   &c->src, c->src_type, &c->dst, c->dst_type,
8555 			   c->state, __le16_to_cpu(c->psm),
8556 			   c->scid, c->dcid, c->imtu, c->omtu,
8557 			   c->sec_level, c->mode);
8558 	}
8559 
8560 	read_unlock(&chan_list_lock);
8561 
8562 	return 0;
8563 }
8564 
8565 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8566 
8567 static struct dentry *l2cap_debugfs;
8568 
8569 int __init l2cap_init(void)
8570 {
8571 	int err;
8572 
8573 	err = l2cap_init_sockets();
8574 	if (err < 0)
8575 		return err;
8576 
8577 	hci_register_cb(&l2cap_cb);
8578 
8579 	if (IS_ERR_OR_NULL(bt_debugfs))
8580 		return 0;
8581 
8582 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8583 					    NULL, &l2cap_debugfs_fops);
8584 
8585 	return 0;
8586 }
8587 
8588 void l2cap_exit(void)
8589 {
8590 	debugfs_remove(l2cap_debugfs);
8591 	hci_unregister_cb(&l2cap_cb);
8592 	l2cap_cleanup_sockets();
8593 }
8594 
8595 module_param(disable_ertm, bool, 0644);
8596 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8597 
8598 module_param(enable_ecred, bool, 0644);
8599 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8600