xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision dff03381)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred;
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 
65 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
66 {
67 	if (link_type == LE_LINK) {
68 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
69 			return BDADDR_LE_PUBLIC;
70 		else
71 			return BDADDR_LE_RANDOM;
72 	}
73 
74 	return BDADDR_BREDR;
75 }
76 
77 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
78 {
79 	return bdaddr_type(hcon->type, hcon->src_type);
80 }
81 
82 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
83 {
84 	return bdaddr_type(hcon->type, hcon->dst_type);
85 }
86 
87 /* ---- L2CAP channels ---- */
88 
89 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
90 						   u16 cid)
91 {
92 	struct l2cap_chan *c;
93 
94 	list_for_each_entry(c, &conn->chan_l, list) {
95 		if (c->dcid == cid)
96 			return c;
97 	}
98 	return NULL;
99 }
100 
101 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
102 						   u16 cid)
103 {
104 	struct l2cap_chan *c;
105 
106 	list_for_each_entry(c, &conn->chan_l, list) {
107 		if (c->scid == cid)
108 			return c;
109 	}
110 	return NULL;
111 }
112 
113 /* Find channel with given SCID.
114  * Returns a reference locked channel.
115  */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
117 						 u16 cid)
118 {
119 	struct l2cap_chan *c;
120 
121 	mutex_lock(&conn->chan_lock);
122 	c = __l2cap_get_chan_by_scid(conn, cid);
123 	if (c) {
124 		/* Only lock if chan reference is not 0 */
125 		c = l2cap_chan_hold_unless_zero(c);
126 		if (c)
127 			l2cap_chan_lock(c);
128 	}
129 	mutex_unlock(&conn->chan_lock);
130 
131 	return c;
132 }
133 
134 /* Find channel with given DCID.
135  * Returns a reference locked channel.
136  */
137 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
138 						 u16 cid)
139 {
140 	struct l2cap_chan *c;
141 
142 	mutex_lock(&conn->chan_lock);
143 	c = __l2cap_get_chan_by_dcid(conn, cid);
144 	if (c) {
145 		/* Only lock if chan reference is not 0 */
146 		c = l2cap_chan_hold_unless_zero(c);
147 		if (c)
148 			l2cap_chan_lock(c);
149 	}
150 	mutex_unlock(&conn->chan_lock);
151 
152 	return c;
153 }
154 
155 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
156 						    u8 ident)
157 {
158 	struct l2cap_chan *c;
159 
160 	list_for_each_entry(c, &conn->chan_l, list) {
161 		if (c->ident == ident)
162 			return c;
163 	}
164 	return NULL;
165 }
166 
167 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
168 						  u8 ident)
169 {
170 	struct l2cap_chan *c;
171 
172 	mutex_lock(&conn->chan_lock);
173 	c = __l2cap_get_chan_by_ident(conn, ident);
174 	if (c) {
175 		/* Only lock if chan reference is not 0 */
176 		c = l2cap_chan_hold_unless_zero(c);
177 		if (c)
178 			l2cap_chan_lock(c);
179 	}
180 	mutex_unlock(&conn->chan_lock);
181 
182 	return c;
183 }
184 
185 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
186 						      u8 src_type)
187 {
188 	struct l2cap_chan *c;
189 
190 	list_for_each_entry(c, &chan_list, global_l) {
191 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
192 			continue;
193 
194 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
195 			continue;
196 
197 		if (c->sport == psm && !bacmp(&c->src, src))
198 			return c;
199 	}
200 	return NULL;
201 }
202 
203 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
204 {
205 	int err;
206 
207 	write_lock(&chan_list_lock);
208 
209 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
210 		err = -EADDRINUSE;
211 		goto done;
212 	}
213 
214 	if (psm) {
215 		chan->psm = psm;
216 		chan->sport = psm;
217 		err = 0;
218 	} else {
219 		u16 p, start, end, incr;
220 
221 		if (chan->src_type == BDADDR_BREDR) {
222 			start = L2CAP_PSM_DYN_START;
223 			end = L2CAP_PSM_AUTO_END;
224 			incr = 2;
225 		} else {
226 			start = L2CAP_PSM_LE_DYN_START;
227 			end = L2CAP_PSM_LE_DYN_END;
228 			incr = 1;
229 		}
230 
231 		err = -EINVAL;
232 		for (p = start; p <= end; p += incr)
233 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
234 							 chan->src_type)) {
235 				chan->psm   = cpu_to_le16(p);
236 				chan->sport = cpu_to_le16(p);
237 				err = 0;
238 				break;
239 			}
240 	}
241 
242 done:
243 	write_unlock(&chan_list_lock);
244 	return err;
245 }
246 EXPORT_SYMBOL_GPL(l2cap_add_psm);
247 
248 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
249 {
250 	write_lock(&chan_list_lock);
251 
252 	/* Override the defaults (which are for conn-oriented) */
253 	chan->omtu = L2CAP_DEFAULT_MTU;
254 	chan->chan_type = L2CAP_CHAN_FIXED;
255 
256 	chan->scid = scid;
257 
258 	write_unlock(&chan_list_lock);
259 
260 	return 0;
261 }
262 
263 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
264 {
265 	u16 cid, dyn_end;
266 
267 	if (conn->hcon->type == LE_LINK)
268 		dyn_end = L2CAP_CID_LE_DYN_END;
269 	else
270 		dyn_end = L2CAP_CID_DYN_END;
271 
272 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
273 		if (!__l2cap_get_chan_by_scid(conn, cid))
274 			return cid;
275 	}
276 
277 	return 0;
278 }
279 
280 static void l2cap_state_change(struct l2cap_chan *chan, int state)
281 {
282 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
283 	       state_to_string(state));
284 
285 	chan->state = state;
286 	chan->ops->state_change(chan, state, 0);
287 }
288 
289 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
290 						int state, int err)
291 {
292 	chan->state = state;
293 	chan->ops->state_change(chan, chan->state, err);
294 }
295 
296 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
297 {
298 	chan->ops->state_change(chan, chan->state, err);
299 }
300 
301 static void __set_retrans_timer(struct l2cap_chan *chan)
302 {
303 	if (!delayed_work_pending(&chan->monitor_timer) &&
304 	    chan->retrans_timeout) {
305 		l2cap_set_timer(chan, &chan->retrans_timer,
306 				msecs_to_jiffies(chan->retrans_timeout));
307 	}
308 }
309 
310 static void __set_monitor_timer(struct l2cap_chan *chan)
311 {
312 	__clear_retrans_timer(chan);
313 	if (chan->monitor_timeout) {
314 		l2cap_set_timer(chan, &chan->monitor_timer,
315 				msecs_to_jiffies(chan->monitor_timeout));
316 	}
317 }
318 
319 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
320 					       u16 seq)
321 {
322 	struct sk_buff *skb;
323 
324 	skb_queue_walk(head, skb) {
325 		if (bt_cb(skb)->l2cap.txseq == seq)
326 			return skb;
327 	}
328 
329 	return NULL;
330 }
331 
332 /* ---- L2CAP sequence number lists ---- */
333 
334 /* For ERTM, ordered lists of sequence numbers must be tracked for
335  * SREJ requests that are received and for frames that are to be
336  * retransmitted. These seq_list functions implement a singly-linked
337  * list in an array, where membership in the list can also be checked
338  * in constant time. Items can also be added to the tail of the list
339  * and removed from the head in constant time, without further memory
340  * allocs or frees.
341  */
342 
343 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
344 {
345 	size_t alloc_size, i;
346 
347 	/* Allocated size is a power of 2 to map sequence numbers
348 	 * (which may be up to 14 bits) in to a smaller array that is
349 	 * sized for the negotiated ERTM transmit windows.
350 	 */
351 	alloc_size = roundup_pow_of_two(size);
352 
353 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
354 	if (!seq_list->list)
355 		return -ENOMEM;
356 
357 	seq_list->mask = alloc_size - 1;
358 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
359 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
360 	for (i = 0; i < alloc_size; i++)
361 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
362 
363 	return 0;
364 }
365 
366 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
367 {
368 	kfree(seq_list->list);
369 }
370 
371 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
372 					   u16 seq)
373 {
374 	/* Constant-time check for list membership */
375 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
376 }
377 
378 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
379 {
380 	u16 seq = seq_list->head;
381 	u16 mask = seq_list->mask;
382 
383 	seq_list->head = seq_list->list[seq & mask];
384 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
385 
386 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
387 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
388 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
389 	}
390 
391 	return seq;
392 }
393 
394 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
395 {
396 	u16 i;
397 
398 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
399 		return;
400 
401 	for (i = 0; i <= seq_list->mask; i++)
402 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
403 
404 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
405 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
406 }
407 
408 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
409 {
410 	u16 mask = seq_list->mask;
411 
412 	/* All appends happen in constant time */
413 
414 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
415 		return;
416 
417 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
418 		seq_list->head = seq;
419 	else
420 		seq_list->list[seq_list->tail & mask] = seq;
421 
422 	seq_list->tail = seq;
423 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
424 }
425 
426 static void l2cap_chan_timeout(struct work_struct *work)
427 {
428 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
429 					       chan_timer.work);
430 	struct l2cap_conn *conn = chan->conn;
431 	int reason;
432 
433 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
434 
435 	mutex_lock(&conn->chan_lock);
436 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
437 	 * this work. No need to call l2cap_chan_hold(chan) here again.
438 	 */
439 	l2cap_chan_lock(chan);
440 
441 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
442 		reason = ECONNREFUSED;
443 	else if (chan->state == BT_CONNECT &&
444 		 chan->sec_level != BT_SECURITY_SDP)
445 		reason = ECONNREFUSED;
446 	else
447 		reason = ETIMEDOUT;
448 
449 	l2cap_chan_close(chan, reason);
450 
451 	chan->ops->close(chan);
452 
453 	l2cap_chan_unlock(chan);
454 	l2cap_chan_put(chan);
455 
456 	mutex_unlock(&conn->chan_lock);
457 }
458 
459 struct l2cap_chan *l2cap_chan_create(void)
460 {
461 	struct l2cap_chan *chan;
462 
463 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
464 	if (!chan)
465 		return NULL;
466 
467 	skb_queue_head_init(&chan->tx_q);
468 	skb_queue_head_init(&chan->srej_q);
469 	mutex_init(&chan->lock);
470 
471 	/* Set default lock nesting level */
472 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
473 
474 	write_lock(&chan_list_lock);
475 	list_add(&chan->global_l, &chan_list);
476 	write_unlock(&chan_list_lock);
477 
478 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
479 
480 	chan->state = BT_OPEN;
481 
482 	kref_init(&chan->kref);
483 
484 	/* This flag is cleared in l2cap_chan_ready() */
485 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
486 
487 	BT_DBG("chan %p", chan);
488 
489 	return chan;
490 }
491 EXPORT_SYMBOL_GPL(l2cap_chan_create);
492 
493 static void l2cap_chan_destroy(struct kref *kref)
494 {
495 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
496 
497 	BT_DBG("chan %p", chan);
498 
499 	write_lock(&chan_list_lock);
500 	list_del(&chan->global_l);
501 	write_unlock(&chan_list_lock);
502 
503 	kfree(chan);
504 }
505 
506 void l2cap_chan_hold(struct l2cap_chan *c)
507 {
508 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
509 
510 	kref_get(&c->kref);
511 }
512 
513 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
514 {
515 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
516 
517 	if (!kref_get_unless_zero(&c->kref))
518 		return NULL;
519 
520 	return c;
521 }
522 
523 void l2cap_chan_put(struct l2cap_chan *c)
524 {
525 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
526 
527 	kref_put(&c->kref, l2cap_chan_destroy);
528 }
529 EXPORT_SYMBOL_GPL(l2cap_chan_put);
530 
531 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
532 {
533 	chan->fcs  = L2CAP_FCS_CRC16;
534 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
535 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
536 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
537 	chan->remote_max_tx = chan->max_tx;
538 	chan->remote_tx_win = chan->tx_win;
539 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
540 	chan->sec_level = BT_SECURITY_LOW;
541 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
542 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
543 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
544 
545 	chan->conf_state = 0;
546 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
547 
548 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
549 }
550 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
551 
552 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
553 {
554 	chan->sdu = NULL;
555 	chan->sdu_last_frag = NULL;
556 	chan->sdu_len = 0;
557 	chan->tx_credits = tx_credits;
558 	/* Derive MPS from connection MTU to stop HCI fragmentation */
559 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
560 	/* Give enough credits for a full packet */
561 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
562 
563 	skb_queue_head_init(&chan->tx_q);
564 }
565 
566 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
567 {
568 	l2cap_le_flowctl_init(chan, tx_credits);
569 
570 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
571 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
572 		chan->mps = L2CAP_ECRED_MIN_MPS;
573 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
574 	}
575 }
576 
577 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
578 {
579 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
580 	       __le16_to_cpu(chan->psm), chan->dcid);
581 
582 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
583 
584 	chan->conn = conn;
585 
586 	switch (chan->chan_type) {
587 	case L2CAP_CHAN_CONN_ORIENTED:
588 		/* Alloc CID for connection-oriented socket */
589 		chan->scid = l2cap_alloc_cid(conn);
590 		if (conn->hcon->type == ACL_LINK)
591 			chan->omtu = L2CAP_DEFAULT_MTU;
592 		break;
593 
594 	case L2CAP_CHAN_CONN_LESS:
595 		/* Connectionless socket */
596 		chan->scid = L2CAP_CID_CONN_LESS;
597 		chan->dcid = L2CAP_CID_CONN_LESS;
598 		chan->omtu = L2CAP_DEFAULT_MTU;
599 		break;
600 
601 	case L2CAP_CHAN_FIXED:
602 		/* Caller will set CID and CID specific MTU values */
603 		break;
604 
605 	default:
606 		/* Raw socket can send/recv signalling messages only */
607 		chan->scid = L2CAP_CID_SIGNALING;
608 		chan->dcid = L2CAP_CID_SIGNALING;
609 		chan->omtu = L2CAP_DEFAULT_MTU;
610 	}
611 
612 	chan->local_id		= L2CAP_BESTEFFORT_ID;
613 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
614 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
615 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
616 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
617 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
618 
619 	l2cap_chan_hold(chan);
620 
621 	/* Only keep a reference for fixed channels if they requested it */
622 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
623 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
624 		hci_conn_hold(conn->hcon);
625 
626 	list_add(&chan->list, &conn->chan_l);
627 }
628 
629 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
630 {
631 	mutex_lock(&conn->chan_lock);
632 	__l2cap_chan_add(conn, chan);
633 	mutex_unlock(&conn->chan_lock);
634 }
635 
636 void l2cap_chan_del(struct l2cap_chan *chan, int err)
637 {
638 	struct l2cap_conn *conn = chan->conn;
639 
640 	__clear_chan_timer(chan);
641 
642 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
643 	       state_to_string(chan->state));
644 
645 	chan->ops->teardown(chan, err);
646 
647 	if (conn) {
648 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
649 		/* Delete from channel list */
650 		list_del(&chan->list);
651 
652 		l2cap_chan_put(chan);
653 
654 		chan->conn = NULL;
655 
656 		/* Reference was only held for non-fixed channels or
657 		 * fixed channels that explicitly requested it using the
658 		 * FLAG_HOLD_HCI_CONN flag.
659 		 */
660 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
661 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
662 			hci_conn_drop(conn->hcon);
663 
664 		if (mgr && mgr->bredr_chan == chan)
665 			mgr->bredr_chan = NULL;
666 	}
667 
668 	if (chan->hs_hchan) {
669 		struct hci_chan *hs_hchan = chan->hs_hchan;
670 
671 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
672 		amp_disconnect_logical_link(hs_hchan);
673 	}
674 
675 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
676 		return;
677 
678 	switch (chan->mode) {
679 	case L2CAP_MODE_BASIC:
680 		break;
681 
682 	case L2CAP_MODE_LE_FLOWCTL:
683 	case L2CAP_MODE_EXT_FLOWCTL:
684 		skb_queue_purge(&chan->tx_q);
685 		break;
686 
687 	case L2CAP_MODE_ERTM:
688 		__clear_retrans_timer(chan);
689 		__clear_monitor_timer(chan);
690 		__clear_ack_timer(chan);
691 
692 		skb_queue_purge(&chan->srej_q);
693 
694 		l2cap_seq_list_free(&chan->srej_list);
695 		l2cap_seq_list_free(&chan->retrans_list);
696 		fallthrough;
697 
698 	case L2CAP_MODE_STREAMING:
699 		skb_queue_purge(&chan->tx_q);
700 		break;
701 	}
702 }
703 EXPORT_SYMBOL_GPL(l2cap_chan_del);
704 
705 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
706 			      void *data)
707 {
708 	struct l2cap_chan *chan;
709 
710 	list_for_each_entry(chan, &conn->chan_l, list) {
711 		func(chan, data);
712 	}
713 }
714 
715 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
716 		     void *data)
717 {
718 	if (!conn)
719 		return;
720 
721 	mutex_lock(&conn->chan_lock);
722 	__l2cap_chan_list(conn, func, data);
723 	mutex_unlock(&conn->chan_lock);
724 }
725 
726 EXPORT_SYMBOL_GPL(l2cap_chan_list);
727 
728 static void l2cap_conn_update_id_addr(struct work_struct *work)
729 {
730 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
731 					       id_addr_update_work);
732 	struct hci_conn *hcon = conn->hcon;
733 	struct l2cap_chan *chan;
734 
735 	mutex_lock(&conn->chan_lock);
736 
737 	list_for_each_entry(chan, &conn->chan_l, list) {
738 		l2cap_chan_lock(chan);
739 		bacpy(&chan->dst, &hcon->dst);
740 		chan->dst_type = bdaddr_dst_type(hcon);
741 		l2cap_chan_unlock(chan);
742 	}
743 
744 	mutex_unlock(&conn->chan_lock);
745 }
746 
747 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
748 {
749 	struct l2cap_conn *conn = chan->conn;
750 	struct l2cap_le_conn_rsp rsp;
751 	u16 result;
752 
753 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
754 		result = L2CAP_CR_LE_AUTHORIZATION;
755 	else
756 		result = L2CAP_CR_LE_BAD_PSM;
757 
758 	l2cap_state_change(chan, BT_DISCONN);
759 
760 	rsp.dcid    = cpu_to_le16(chan->scid);
761 	rsp.mtu     = cpu_to_le16(chan->imtu);
762 	rsp.mps     = cpu_to_le16(chan->mps);
763 	rsp.credits = cpu_to_le16(chan->rx_credits);
764 	rsp.result  = cpu_to_le16(result);
765 
766 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
767 		       &rsp);
768 }
769 
770 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
771 {
772 	struct l2cap_conn *conn = chan->conn;
773 	struct l2cap_ecred_conn_rsp rsp;
774 	u16 result;
775 
776 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
777 		result = L2CAP_CR_LE_AUTHORIZATION;
778 	else
779 		result = L2CAP_CR_LE_BAD_PSM;
780 
781 	l2cap_state_change(chan, BT_DISCONN);
782 
783 	memset(&rsp, 0, sizeof(rsp));
784 
785 	rsp.result  = cpu_to_le16(result);
786 
787 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
788 		       &rsp);
789 }
790 
791 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
792 {
793 	struct l2cap_conn *conn = chan->conn;
794 	struct l2cap_conn_rsp rsp;
795 	u16 result;
796 
797 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
798 		result = L2CAP_CR_SEC_BLOCK;
799 	else
800 		result = L2CAP_CR_BAD_PSM;
801 
802 	l2cap_state_change(chan, BT_DISCONN);
803 
804 	rsp.scid   = cpu_to_le16(chan->dcid);
805 	rsp.dcid   = cpu_to_le16(chan->scid);
806 	rsp.result = cpu_to_le16(result);
807 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
808 
809 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
810 }
811 
812 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
813 {
814 	struct l2cap_conn *conn = chan->conn;
815 
816 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
817 
818 	switch (chan->state) {
819 	case BT_LISTEN:
820 		chan->ops->teardown(chan, 0);
821 		break;
822 
823 	case BT_CONNECTED:
824 	case BT_CONFIG:
825 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
826 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
827 			l2cap_send_disconn_req(chan, reason);
828 		} else
829 			l2cap_chan_del(chan, reason);
830 		break;
831 
832 	case BT_CONNECT2:
833 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
834 			if (conn->hcon->type == ACL_LINK)
835 				l2cap_chan_connect_reject(chan);
836 			else if (conn->hcon->type == LE_LINK) {
837 				switch (chan->mode) {
838 				case L2CAP_MODE_LE_FLOWCTL:
839 					l2cap_chan_le_connect_reject(chan);
840 					break;
841 				case L2CAP_MODE_EXT_FLOWCTL:
842 					l2cap_chan_ecred_connect_reject(chan);
843 					break;
844 				}
845 			}
846 		}
847 
848 		l2cap_chan_del(chan, reason);
849 		break;
850 
851 	case BT_CONNECT:
852 	case BT_DISCONN:
853 		l2cap_chan_del(chan, reason);
854 		break;
855 
856 	default:
857 		chan->ops->teardown(chan, 0);
858 		break;
859 	}
860 }
861 EXPORT_SYMBOL(l2cap_chan_close);
862 
863 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
864 {
865 	switch (chan->chan_type) {
866 	case L2CAP_CHAN_RAW:
867 		switch (chan->sec_level) {
868 		case BT_SECURITY_HIGH:
869 		case BT_SECURITY_FIPS:
870 			return HCI_AT_DEDICATED_BONDING_MITM;
871 		case BT_SECURITY_MEDIUM:
872 			return HCI_AT_DEDICATED_BONDING;
873 		default:
874 			return HCI_AT_NO_BONDING;
875 		}
876 		break;
877 	case L2CAP_CHAN_CONN_LESS:
878 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
879 			if (chan->sec_level == BT_SECURITY_LOW)
880 				chan->sec_level = BT_SECURITY_SDP;
881 		}
882 		if (chan->sec_level == BT_SECURITY_HIGH ||
883 		    chan->sec_level == BT_SECURITY_FIPS)
884 			return HCI_AT_NO_BONDING_MITM;
885 		else
886 			return HCI_AT_NO_BONDING;
887 		break;
888 	case L2CAP_CHAN_CONN_ORIENTED:
889 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
890 			if (chan->sec_level == BT_SECURITY_LOW)
891 				chan->sec_level = BT_SECURITY_SDP;
892 
893 			if (chan->sec_level == BT_SECURITY_HIGH ||
894 			    chan->sec_level == BT_SECURITY_FIPS)
895 				return HCI_AT_NO_BONDING_MITM;
896 			else
897 				return HCI_AT_NO_BONDING;
898 		}
899 		fallthrough;
900 
901 	default:
902 		switch (chan->sec_level) {
903 		case BT_SECURITY_HIGH:
904 		case BT_SECURITY_FIPS:
905 			return HCI_AT_GENERAL_BONDING_MITM;
906 		case BT_SECURITY_MEDIUM:
907 			return HCI_AT_GENERAL_BONDING;
908 		default:
909 			return HCI_AT_NO_BONDING;
910 		}
911 		break;
912 	}
913 }
914 
915 /* Service level security */
916 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
917 {
918 	struct l2cap_conn *conn = chan->conn;
919 	__u8 auth_type;
920 
921 	if (conn->hcon->type == LE_LINK)
922 		return smp_conn_security(conn->hcon, chan->sec_level);
923 
924 	auth_type = l2cap_get_auth_type(chan);
925 
926 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
927 				 initiator);
928 }
929 
930 static u8 l2cap_get_ident(struct l2cap_conn *conn)
931 {
932 	u8 id;
933 
934 	/* Get next available identificator.
935 	 *    1 - 128 are used by kernel.
936 	 *  129 - 199 are reserved.
937 	 *  200 - 254 are used by utilities like l2ping, etc.
938 	 */
939 
940 	mutex_lock(&conn->ident_lock);
941 
942 	if (++conn->tx_ident > 128)
943 		conn->tx_ident = 1;
944 
945 	id = conn->tx_ident;
946 
947 	mutex_unlock(&conn->ident_lock);
948 
949 	return id;
950 }
951 
952 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
953 			   void *data)
954 {
955 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
956 	u8 flags;
957 
958 	BT_DBG("code 0x%2.2x", code);
959 
960 	if (!skb)
961 		return;
962 
963 	/* Use NO_FLUSH if supported or we have an LE link (which does
964 	 * not support auto-flushing packets) */
965 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
966 	    conn->hcon->type == LE_LINK)
967 		flags = ACL_START_NO_FLUSH;
968 	else
969 		flags = ACL_START;
970 
971 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
972 	skb->priority = HCI_PRIO_MAX;
973 
974 	hci_send_acl(conn->hchan, skb, flags);
975 }
976 
977 static bool __chan_is_moving(struct l2cap_chan *chan)
978 {
979 	return chan->move_state != L2CAP_MOVE_STABLE &&
980 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
981 }
982 
983 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
984 {
985 	struct hci_conn *hcon = chan->conn->hcon;
986 	u16 flags;
987 
988 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
989 	       skb->priority);
990 
991 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
992 		if (chan->hs_hchan)
993 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
994 		else
995 			kfree_skb(skb);
996 
997 		return;
998 	}
999 
1000 	/* Use NO_FLUSH for LE links (where this is the only option) or
1001 	 * if the BR/EDR link supports it and flushing has not been
1002 	 * explicitly requested (through FLAG_FLUSHABLE).
1003 	 */
1004 	if (hcon->type == LE_LINK ||
1005 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1006 	     lmp_no_flush_capable(hcon->hdev)))
1007 		flags = ACL_START_NO_FLUSH;
1008 	else
1009 		flags = ACL_START;
1010 
1011 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1012 	hci_send_acl(chan->conn->hchan, skb, flags);
1013 }
1014 
1015 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1016 {
1017 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1018 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1019 
1020 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1021 		/* S-Frame */
1022 		control->sframe = 1;
1023 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1024 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1025 
1026 		control->sar = 0;
1027 		control->txseq = 0;
1028 	} else {
1029 		/* I-Frame */
1030 		control->sframe = 0;
1031 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1032 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1033 
1034 		control->poll = 0;
1035 		control->super = 0;
1036 	}
1037 }
1038 
1039 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1040 {
1041 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1042 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1043 
1044 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1045 		/* S-Frame */
1046 		control->sframe = 1;
1047 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1048 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1049 
1050 		control->sar = 0;
1051 		control->txseq = 0;
1052 	} else {
1053 		/* I-Frame */
1054 		control->sframe = 0;
1055 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1056 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1057 
1058 		control->poll = 0;
1059 		control->super = 0;
1060 	}
1061 }
1062 
1063 static inline void __unpack_control(struct l2cap_chan *chan,
1064 				    struct sk_buff *skb)
1065 {
1066 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1067 		__unpack_extended_control(get_unaligned_le32(skb->data),
1068 					  &bt_cb(skb)->l2cap);
1069 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1070 	} else {
1071 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1072 					  &bt_cb(skb)->l2cap);
1073 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1074 	}
1075 }
1076 
1077 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1078 {
1079 	u32 packed;
1080 
1081 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1082 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1083 
1084 	if (control->sframe) {
1085 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1086 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1087 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1088 	} else {
1089 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1090 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1091 	}
1092 
1093 	return packed;
1094 }
1095 
1096 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1097 {
1098 	u16 packed;
1099 
1100 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1101 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1102 
1103 	if (control->sframe) {
1104 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1105 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1106 		packed |= L2CAP_CTRL_FRAME_TYPE;
1107 	} else {
1108 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1109 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1110 	}
1111 
1112 	return packed;
1113 }
1114 
1115 static inline void __pack_control(struct l2cap_chan *chan,
1116 				  struct l2cap_ctrl *control,
1117 				  struct sk_buff *skb)
1118 {
1119 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1120 		put_unaligned_le32(__pack_extended_control(control),
1121 				   skb->data + L2CAP_HDR_SIZE);
1122 	} else {
1123 		put_unaligned_le16(__pack_enhanced_control(control),
1124 				   skb->data + L2CAP_HDR_SIZE);
1125 	}
1126 }
1127 
1128 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1129 {
1130 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1131 		return L2CAP_EXT_HDR_SIZE;
1132 	else
1133 		return L2CAP_ENH_HDR_SIZE;
1134 }
1135 
1136 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1137 					       u32 control)
1138 {
1139 	struct sk_buff *skb;
1140 	struct l2cap_hdr *lh;
1141 	int hlen = __ertm_hdr_size(chan);
1142 
1143 	if (chan->fcs == L2CAP_FCS_CRC16)
1144 		hlen += L2CAP_FCS_SIZE;
1145 
1146 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1147 
1148 	if (!skb)
1149 		return ERR_PTR(-ENOMEM);
1150 
1151 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1152 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1153 	lh->cid = cpu_to_le16(chan->dcid);
1154 
1155 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1156 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1157 	else
1158 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1159 
1160 	if (chan->fcs == L2CAP_FCS_CRC16) {
1161 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1162 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1163 	}
1164 
1165 	skb->priority = HCI_PRIO_MAX;
1166 	return skb;
1167 }
1168 
1169 static void l2cap_send_sframe(struct l2cap_chan *chan,
1170 			      struct l2cap_ctrl *control)
1171 {
1172 	struct sk_buff *skb;
1173 	u32 control_field;
1174 
1175 	BT_DBG("chan %p, control %p", chan, control);
1176 
1177 	if (!control->sframe)
1178 		return;
1179 
1180 	if (__chan_is_moving(chan))
1181 		return;
1182 
1183 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1184 	    !control->poll)
1185 		control->final = 1;
1186 
1187 	if (control->super == L2CAP_SUPER_RR)
1188 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1189 	else if (control->super == L2CAP_SUPER_RNR)
1190 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1191 
1192 	if (control->super != L2CAP_SUPER_SREJ) {
1193 		chan->last_acked_seq = control->reqseq;
1194 		__clear_ack_timer(chan);
1195 	}
1196 
1197 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1198 	       control->final, control->poll, control->super);
1199 
1200 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1201 		control_field = __pack_extended_control(control);
1202 	else
1203 		control_field = __pack_enhanced_control(control);
1204 
1205 	skb = l2cap_create_sframe_pdu(chan, control_field);
1206 	if (!IS_ERR(skb))
1207 		l2cap_do_send(chan, skb);
1208 }
1209 
1210 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1211 {
1212 	struct l2cap_ctrl control;
1213 
1214 	BT_DBG("chan %p, poll %d", chan, poll);
1215 
1216 	memset(&control, 0, sizeof(control));
1217 	control.sframe = 1;
1218 	control.poll = poll;
1219 
1220 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1221 		control.super = L2CAP_SUPER_RNR;
1222 	else
1223 		control.super = L2CAP_SUPER_RR;
1224 
1225 	control.reqseq = chan->buffer_seq;
1226 	l2cap_send_sframe(chan, &control);
1227 }
1228 
1229 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1230 {
1231 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1232 		return true;
1233 
1234 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1235 }
1236 
1237 static bool __amp_capable(struct l2cap_chan *chan)
1238 {
1239 	struct l2cap_conn *conn = chan->conn;
1240 	struct hci_dev *hdev;
1241 	bool amp_available = false;
1242 
1243 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1244 		return false;
1245 
1246 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1247 		return false;
1248 
1249 	read_lock(&hci_dev_list_lock);
1250 	list_for_each_entry(hdev, &hci_dev_list, list) {
1251 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1252 		    test_bit(HCI_UP, &hdev->flags)) {
1253 			amp_available = true;
1254 			break;
1255 		}
1256 	}
1257 	read_unlock(&hci_dev_list_lock);
1258 
1259 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1260 		return amp_available;
1261 
1262 	return false;
1263 }
1264 
1265 static bool l2cap_check_efs(struct l2cap_chan *chan)
1266 {
1267 	/* Check EFS parameters */
1268 	return true;
1269 }
1270 
1271 void l2cap_send_conn_req(struct l2cap_chan *chan)
1272 {
1273 	struct l2cap_conn *conn = chan->conn;
1274 	struct l2cap_conn_req req;
1275 
1276 	req.scid = cpu_to_le16(chan->scid);
1277 	req.psm  = chan->psm;
1278 
1279 	chan->ident = l2cap_get_ident(conn);
1280 
1281 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1282 
1283 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1284 }
1285 
1286 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1287 {
1288 	struct l2cap_create_chan_req req;
1289 	req.scid = cpu_to_le16(chan->scid);
1290 	req.psm  = chan->psm;
1291 	req.amp_id = amp_id;
1292 
1293 	chan->ident = l2cap_get_ident(chan->conn);
1294 
1295 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1296 		       sizeof(req), &req);
1297 }
1298 
1299 static void l2cap_move_setup(struct l2cap_chan *chan)
1300 {
1301 	struct sk_buff *skb;
1302 
1303 	BT_DBG("chan %p", chan);
1304 
1305 	if (chan->mode != L2CAP_MODE_ERTM)
1306 		return;
1307 
1308 	__clear_retrans_timer(chan);
1309 	__clear_monitor_timer(chan);
1310 	__clear_ack_timer(chan);
1311 
1312 	chan->retry_count = 0;
1313 	skb_queue_walk(&chan->tx_q, skb) {
1314 		if (bt_cb(skb)->l2cap.retries)
1315 			bt_cb(skb)->l2cap.retries = 1;
1316 		else
1317 			break;
1318 	}
1319 
1320 	chan->expected_tx_seq = chan->buffer_seq;
1321 
1322 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1323 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1324 	l2cap_seq_list_clear(&chan->retrans_list);
1325 	l2cap_seq_list_clear(&chan->srej_list);
1326 	skb_queue_purge(&chan->srej_q);
1327 
1328 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1329 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1330 
1331 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1332 }
1333 
1334 static void l2cap_move_done(struct l2cap_chan *chan)
1335 {
1336 	u8 move_role = chan->move_role;
1337 	BT_DBG("chan %p", chan);
1338 
1339 	chan->move_state = L2CAP_MOVE_STABLE;
1340 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1341 
1342 	if (chan->mode != L2CAP_MODE_ERTM)
1343 		return;
1344 
1345 	switch (move_role) {
1346 	case L2CAP_MOVE_ROLE_INITIATOR:
1347 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1348 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1349 		break;
1350 	case L2CAP_MOVE_ROLE_RESPONDER:
1351 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1352 		break;
1353 	}
1354 }
1355 
1356 static void l2cap_chan_ready(struct l2cap_chan *chan)
1357 {
1358 	/* The channel may have already been flagged as connected in
1359 	 * case of receiving data before the L2CAP info req/rsp
1360 	 * procedure is complete.
1361 	 */
1362 	if (chan->state == BT_CONNECTED)
1363 		return;
1364 
1365 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1366 	chan->conf_state = 0;
1367 	__clear_chan_timer(chan);
1368 
1369 	switch (chan->mode) {
1370 	case L2CAP_MODE_LE_FLOWCTL:
1371 	case L2CAP_MODE_EXT_FLOWCTL:
1372 		if (!chan->tx_credits)
1373 			chan->ops->suspend(chan);
1374 		break;
1375 	}
1376 
1377 	chan->state = BT_CONNECTED;
1378 
1379 	chan->ops->ready(chan);
1380 }
1381 
1382 static void l2cap_le_connect(struct l2cap_chan *chan)
1383 {
1384 	struct l2cap_conn *conn = chan->conn;
1385 	struct l2cap_le_conn_req req;
1386 
1387 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1388 		return;
1389 
1390 	if (!chan->imtu)
1391 		chan->imtu = chan->conn->mtu;
1392 
1393 	l2cap_le_flowctl_init(chan, 0);
1394 
1395 	memset(&req, 0, sizeof(req));
1396 	req.psm     = chan->psm;
1397 	req.scid    = cpu_to_le16(chan->scid);
1398 	req.mtu     = cpu_to_le16(chan->imtu);
1399 	req.mps     = cpu_to_le16(chan->mps);
1400 	req.credits = cpu_to_le16(chan->rx_credits);
1401 
1402 	chan->ident = l2cap_get_ident(conn);
1403 
1404 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1405 		       sizeof(req), &req);
1406 }
1407 
1408 struct l2cap_ecred_conn_data {
1409 	struct {
1410 		struct l2cap_ecred_conn_req req;
1411 		__le16 scid[5];
1412 	} __packed pdu;
1413 	struct l2cap_chan *chan;
1414 	struct pid *pid;
1415 	int count;
1416 };
1417 
1418 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1419 {
1420 	struct l2cap_ecred_conn_data *conn = data;
1421 	struct pid *pid;
1422 
1423 	if (chan == conn->chan)
1424 		return;
1425 
1426 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1427 		return;
1428 
1429 	pid = chan->ops->get_peer_pid(chan);
1430 
1431 	/* Only add deferred channels with the same PID/PSM */
1432 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1433 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1434 		return;
1435 
1436 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1437 		return;
1438 
1439 	l2cap_ecred_init(chan, 0);
1440 
1441 	/* Set the same ident so we can match on the rsp */
1442 	chan->ident = conn->chan->ident;
1443 
1444 	/* Include all channels deferred */
1445 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1446 
1447 	conn->count++;
1448 }
1449 
1450 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1451 {
1452 	struct l2cap_conn *conn = chan->conn;
1453 	struct l2cap_ecred_conn_data data;
1454 
1455 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1456 		return;
1457 
1458 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1459 		return;
1460 
1461 	l2cap_ecred_init(chan, 0);
1462 
1463 	memset(&data, 0, sizeof(data));
1464 	data.pdu.req.psm     = chan->psm;
1465 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1466 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1467 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1468 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1469 
1470 	chan->ident = l2cap_get_ident(conn);
1471 
1472 	data.count = 1;
1473 	data.chan = chan;
1474 	data.pid = chan->ops->get_peer_pid(chan);
1475 
1476 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1477 
1478 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1479 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1480 		       &data.pdu);
1481 }
1482 
1483 static void l2cap_le_start(struct l2cap_chan *chan)
1484 {
1485 	struct l2cap_conn *conn = chan->conn;
1486 
1487 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1488 		return;
1489 
1490 	if (!chan->psm) {
1491 		l2cap_chan_ready(chan);
1492 		return;
1493 	}
1494 
1495 	if (chan->state == BT_CONNECT) {
1496 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1497 			l2cap_ecred_connect(chan);
1498 		else
1499 			l2cap_le_connect(chan);
1500 	}
1501 }
1502 
1503 static void l2cap_start_connection(struct l2cap_chan *chan)
1504 {
1505 	if (__amp_capable(chan)) {
1506 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1507 		a2mp_discover_amp(chan);
1508 	} else if (chan->conn->hcon->type == LE_LINK) {
1509 		l2cap_le_start(chan);
1510 	} else {
1511 		l2cap_send_conn_req(chan);
1512 	}
1513 }
1514 
1515 static void l2cap_request_info(struct l2cap_conn *conn)
1516 {
1517 	struct l2cap_info_req req;
1518 
1519 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1520 		return;
1521 
1522 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1523 
1524 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1525 	conn->info_ident = l2cap_get_ident(conn);
1526 
1527 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1528 
1529 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1530 		       sizeof(req), &req);
1531 }
1532 
1533 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1534 {
1535 	/* The minimum encryption key size needs to be enforced by the
1536 	 * host stack before establishing any L2CAP connections. The
1537 	 * specification in theory allows a minimum of 1, but to align
1538 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1539 	 *
1540 	 * This check might also be called for unencrypted connections
1541 	 * that have no key size requirements. Ensure that the link is
1542 	 * actually encrypted before enforcing a key size.
1543 	 */
1544 	int min_key_size = hcon->hdev->min_enc_key_size;
1545 
1546 	/* On FIPS security level, key size must be 16 bytes */
1547 	if (hcon->sec_level == BT_SECURITY_FIPS)
1548 		min_key_size = 16;
1549 
1550 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1551 		hcon->enc_key_size >= min_key_size);
1552 }
1553 
1554 static void l2cap_do_start(struct l2cap_chan *chan)
1555 {
1556 	struct l2cap_conn *conn = chan->conn;
1557 
1558 	if (conn->hcon->type == LE_LINK) {
1559 		l2cap_le_start(chan);
1560 		return;
1561 	}
1562 
1563 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1564 		l2cap_request_info(conn);
1565 		return;
1566 	}
1567 
1568 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1569 		return;
1570 
1571 	if (!l2cap_chan_check_security(chan, true) ||
1572 	    !__l2cap_no_conn_pending(chan))
1573 		return;
1574 
1575 	if (l2cap_check_enc_key_size(conn->hcon))
1576 		l2cap_start_connection(chan);
1577 	else
1578 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1579 }
1580 
1581 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1582 {
1583 	u32 local_feat_mask = l2cap_feat_mask;
1584 	if (!disable_ertm)
1585 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1586 
1587 	switch (mode) {
1588 	case L2CAP_MODE_ERTM:
1589 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1590 	case L2CAP_MODE_STREAMING:
1591 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1592 	default:
1593 		return 0x00;
1594 	}
1595 }
1596 
1597 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1598 {
1599 	struct l2cap_conn *conn = chan->conn;
1600 	struct l2cap_disconn_req req;
1601 
1602 	if (!conn)
1603 		return;
1604 
1605 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1606 		__clear_retrans_timer(chan);
1607 		__clear_monitor_timer(chan);
1608 		__clear_ack_timer(chan);
1609 	}
1610 
1611 	if (chan->scid == L2CAP_CID_A2MP) {
1612 		l2cap_state_change(chan, BT_DISCONN);
1613 		return;
1614 	}
1615 
1616 	req.dcid = cpu_to_le16(chan->dcid);
1617 	req.scid = cpu_to_le16(chan->scid);
1618 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1619 		       sizeof(req), &req);
1620 
1621 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1622 }
1623 
1624 /* ---- L2CAP connections ---- */
1625 static void l2cap_conn_start(struct l2cap_conn *conn)
1626 {
1627 	struct l2cap_chan *chan, *tmp;
1628 
1629 	BT_DBG("conn %p", conn);
1630 
1631 	mutex_lock(&conn->chan_lock);
1632 
1633 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1634 		l2cap_chan_lock(chan);
1635 
1636 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1637 			l2cap_chan_ready(chan);
1638 			l2cap_chan_unlock(chan);
1639 			continue;
1640 		}
1641 
1642 		if (chan->state == BT_CONNECT) {
1643 			if (!l2cap_chan_check_security(chan, true) ||
1644 			    !__l2cap_no_conn_pending(chan)) {
1645 				l2cap_chan_unlock(chan);
1646 				continue;
1647 			}
1648 
1649 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1650 			    && test_bit(CONF_STATE2_DEVICE,
1651 					&chan->conf_state)) {
1652 				l2cap_chan_close(chan, ECONNRESET);
1653 				l2cap_chan_unlock(chan);
1654 				continue;
1655 			}
1656 
1657 			if (l2cap_check_enc_key_size(conn->hcon))
1658 				l2cap_start_connection(chan);
1659 			else
1660 				l2cap_chan_close(chan, ECONNREFUSED);
1661 
1662 		} else if (chan->state == BT_CONNECT2) {
1663 			struct l2cap_conn_rsp rsp;
1664 			char buf[128];
1665 			rsp.scid = cpu_to_le16(chan->dcid);
1666 			rsp.dcid = cpu_to_le16(chan->scid);
1667 
1668 			if (l2cap_chan_check_security(chan, false)) {
1669 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1670 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1671 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1672 					chan->ops->defer(chan);
1673 
1674 				} else {
1675 					l2cap_state_change(chan, BT_CONFIG);
1676 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1677 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1678 				}
1679 			} else {
1680 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1681 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1682 			}
1683 
1684 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1685 				       sizeof(rsp), &rsp);
1686 
1687 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1688 			    rsp.result != L2CAP_CR_SUCCESS) {
1689 				l2cap_chan_unlock(chan);
1690 				continue;
1691 			}
1692 
1693 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1694 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1695 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1696 			chan->num_conf_req++;
1697 		}
1698 
1699 		l2cap_chan_unlock(chan);
1700 	}
1701 
1702 	mutex_unlock(&conn->chan_lock);
1703 }
1704 
1705 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1706 {
1707 	struct hci_conn *hcon = conn->hcon;
1708 	struct hci_dev *hdev = hcon->hdev;
1709 
1710 	BT_DBG("%s conn %p", hdev->name, conn);
1711 
1712 	/* For outgoing pairing which doesn't necessarily have an
1713 	 * associated socket (e.g. mgmt_pair_device).
1714 	 */
1715 	if (hcon->out)
1716 		smp_conn_security(hcon, hcon->pending_sec_level);
1717 
1718 	/* For LE peripheral connections, make sure the connection interval
1719 	 * is in the range of the minimum and maximum interval that has
1720 	 * been configured for this connection. If not, then trigger
1721 	 * the connection update procedure.
1722 	 */
1723 	if (hcon->role == HCI_ROLE_SLAVE &&
1724 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1725 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1726 		struct l2cap_conn_param_update_req req;
1727 
1728 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1729 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1730 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1731 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1732 
1733 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1734 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1735 	}
1736 }
1737 
1738 static void l2cap_conn_ready(struct l2cap_conn *conn)
1739 {
1740 	struct l2cap_chan *chan;
1741 	struct hci_conn *hcon = conn->hcon;
1742 
1743 	BT_DBG("conn %p", conn);
1744 
1745 	if (hcon->type == ACL_LINK)
1746 		l2cap_request_info(conn);
1747 
1748 	mutex_lock(&conn->chan_lock);
1749 
1750 	list_for_each_entry(chan, &conn->chan_l, list) {
1751 
1752 		l2cap_chan_lock(chan);
1753 
1754 		if (chan->scid == L2CAP_CID_A2MP) {
1755 			l2cap_chan_unlock(chan);
1756 			continue;
1757 		}
1758 
1759 		if (hcon->type == LE_LINK) {
1760 			l2cap_le_start(chan);
1761 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1762 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1763 				l2cap_chan_ready(chan);
1764 		} else if (chan->state == BT_CONNECT) {
1765 			l2cap_do_start(chan);
1766 		}
1767 
1768 		l2cap_chan_unlock(chan);
1769 	}
1770 
1771 	mutex_unlock(&conn->chan_lock);
1772 
1773 	if (hcon->type == LE_LINK)
1774 		l2cap_le_conn_ready(conn);
1775 
1776 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1777 }
1778 
1779 /* Notify sockets that we cannot guaranty reliability anymore */
1780 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1781 {
1782 	struct l2cap_chan *chan;
1783 
1784 	BT_DBG("conn %p", conn);
1785 
1786 	mutex_lock(&conn->chan_lock);
1787 
1788 	list_for_each_entry(chan, &conn->chan_l, list) {
1789 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1790 			l2cap_chan_set_err(chan, err);
1791 	}
1792 
1793 	mutex_unlock(&conn->chan_lock);
1794 }
1795 
1796 static void l2cap_info_timeout(struct work_struct *work)
1797 {
1798 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1799 					       info_timer.work);
1800 
1801 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1802 	conn->info_ident = 0;
1803 
1804 	l2cap_conn_start(conn);
1805 }
1806 
1807 /*
1808  * l2cap_user
1809  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1810  * callback is called during registration. The ->remove callback is called
1811  * during unregistration.
1812  * An l2cap_user object can either be explicitly unregistered or when the
1813  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1814  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1815  * External modules must own a reference to the l2cap_conn object if they intend
1816  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1817  * any time if they don't.
1818  */
1819 
1820 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1821 {
1822 	struct hci_dev *hdev = conn->hcon->hdev;
1823 	int ret;
1824 
1825 	/* We need to check whether l2cap_conn is registered. If it is not, we
1826 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1827 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1828 	 * relies on the parent hci_conn object to be locked. This itself relies
1829 	 * on the hci_dev object to be locked. So we must lock the hci device
1830 	 * here, too. */
1831 
1832 	hci_dev_lock(hdev);
1833 
1834 	if (!list_empty(&user->list)) {
1835 		ret = -EINVAL;
1836 		goto out_unlock;
1837 	}
1838 
1839 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1840 	if (!conn->hchan) {
1841 		ret = -ENODEV;
1842 		goto out_unlock;
1843 	}
1844 
1845 	ret = user->probe(conn, user);
1846 	if (ret)
1847 		goto out_unlock;
1848 
1849 	list_add(&user->list, &conn->users);
1850 	ret = 0;
1851 
1852 out_unlock:
1853 	hci_dev_unlock(hdev);
1854 	return ret;
1855 }
1856 EXPORT_SYMBOL(l2cap_register_user);
1857 
1858 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1859 {
1860 	struct hci_dev *hdev = conn->hcon->hdev;
1861 
1862 	hci_dev_lock(hdev);
1863 
1864 	if (list_empty(&user->list))
1865 		goto out_unlock;
1866 
1867 	list_del_init(&user->list);
1868 	user->remove(conn, user);
1869 
1870 out_unlock:
1871 	hci_dev_unlock(hdev);
1872 }
1873 EXPORT_SYMBOL(l2cap_unregister_user);
1874 
1875 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1876 {
1877 	struct l2cap_user *user;
1878 
1879 	while (!list_empty(&conn->users)) {
1880 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1881 		list_del_init(&user->list);
1882 		user->remove(conn, user);
1883 	}
1884 }
1885 
1886 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1887 {
1888 	struct l2cap_conn *conn = hcon->l2cap_data;
1889 	struct l2cap_chan *chan, *l;
1890 
1891 	if (!conn)
1892 		return;
1893 
1894 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1895 
1896 	kfree_skb(conn->rx_skb);
1897 
1898 	skb_queue_purge(&conn->pending_rx);
1899 
1900 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1901 	 * might block if we are running on a worker from the same workqueue
1902 	 * pending_rx_work is waiting on.
1903 	 */
1904 	if (work_pending(&conn->pending_rx_work))
1905 		cancel_work_sync(&conn->pending_rx_work);
1906 
1907 	if (work_pending(&conn->id_addr_update_work))
1908 		cancel_work_sync(&conn->id_addr_update_work);
1909 
1910 	l2cap_unregister_all_users(conn);
1911 
1912 	/* Force the connection to be immediately dropped */
1913 	hcon->disc_timeout = 0;
1914 
1915 	mutex_lock(&conn->chan_lock);
1916 
1917 	/* Kill channels */
1918 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1919 		l2cap_chan_hold(chan);
1920 		l2cap_chan_lock(chan);
1921 
1922 		l2cap_chan_del(chan, err);
1923 
1924 		chan->ops->close(chan);
1925 
1926 		l2cap_chan_unlock(chan);
1927 		l2cap_chan_put(chan);
1928 	}
1929 
1930 	mutex_unlock(&conn->chan_lock);
1931 
1932 	hci_chan_del(conn->hchan);
1933 
1934 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1935 		cancel_delayed_work_sync(&conn->info_timer);
1936 
1937 	hcon->l2cap_data = NULL;
1938 	conn->hchan = NULL;
1939 	l2cap_conn_put(conn);
1940 }
1941 
1942 static void l2cap_conn_free(struct kref *ref)
1943 {
1944 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1945 
1946 	hci_conn_put(conn->hcon);
1947 	kfree(conn);
1948 }
1949 
1950 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1951 {
1952 	kref_get(&conn->ref);
1953 	return conn;
1954 }
1955 EXPORT_SYMBOL(l2cap_conn_get);
1956 
1957 void l2cap_conn_put(struct l2cap_conn *conn)
1958 {
1959 	kref_put(&conn->ref, l2cap_conn_free);
1960 }
1961 EXPORT_SYMBOL(l2cap_conn_put);
1962 
1963 /* ---- Socket interface ---- */
1964 
1965 /* Find socket with psm and source / destination bdaddr.
1966  * Returns closest match.
1967  */
1968 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1969 						   bdaddr_t *src,
1970 						   bdaddr_t *dst,
1971 						   u8 link_type)
1972 {
1973 	struct l2cap_chan *c, *c1 = NULL;
1974 
1975 	read_lock(&chan_list_lock);
1976 
1977 	list_for_each_entry(c, &chan_list, global_l) {
1978 		if (state && c->state != state)
1979 			continue;
1980 
1981 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1982 			continue;
1983 
1984 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1985 			continue;
1986 
1987 		if (c->psm == psm) {
1988 			int src_match, dst_match;
1989 			int src_any, dst_any;
1990 
1991 			/* Exact match. */
1992 			src_match = !bacmp(&c->src, src);
1993 			dst_match = !bacmp(&c->dst, dst);
1994 			if (src_match && dst_match) {
1995 				c = l2cap_chan_hold_unless_zero(c);
1996 				if (!c)
1997 					continue;
1998 
1999 				read_unlock(&chan_list_lock);
2000 				return c;
2001 			}
2002 
2003 			/* Closest match */
2004 			src_any = !bacmp(&c->src, BDADDR_ANY);
2005 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
2006 			if ((src_match && dst_any) || (src_any && dst_match) ||
2007 			    (src_any && dst_any))
2008 				c1 = c;
2009 		}
2010 	}
2011 
2012 	if (c1)
2013 		c1 = l2cap_chan_hold_unless_zero(c1);
2014 
2015 	read_unlock(&chan_list_lock);
2016 
2017 	return c1;
2018 }
2019 
2020 static void l2cap_monitor_timeout(struct work_struct *work)
2021 {
2022 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2023 					       monitor_timer.work);
2024 
2025 	BT_DBG("chan %p", chan);
2026 
2027 	l2cap_chan_lock(chan);
2028 
2029 	if (!chan->conn) {
2030 		l2cap_chan_unlock(chan);
2031 		l2cap_chan_put(chan);
2032 		return;
2033 	}
2034 
2035 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2036 
2037 	l2cap_chan_unlock(chan);
2038 	l2cap_chan_put(chan);
2039 }
2040 
2041 static void l2cap_retrans_timeout(struct work_struct *work)
2042 {
2043 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2044 					       retrans_timer.work);
2045 
2046 	BT_DBG("chan %p", chan);
2047 
2048 	l2cap_chan_lock(chan);
2049 
2050 	if (!chan->conn) {
2051 		l2cap_chan_unlock(chan);
2052 		l2cap_chan_put(chan);
2053 		return;
2054 	}
2055 
2056 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2057 	l2cap_chan_unlock(chan);
2058 	l2cap_chan_put(chan);
2059 }
2060 
2061 static void l2cap_streaming_send(struct l2cap_chan *chan,
2062 				 struct sk_buff_head *skbs)
2063 {
2064 	struct sk_buff *skb;
2065 	struct l2cap_ctrl *control;
2066 
2067 	BT_DBG("chan %p, skbs %p", chan, skbs);
2068 
2069 	if (__chan_is_moving(chan))
2070 		return;
2071 
2072 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2073 
2074 	while (!skb_queue_empty(&chan->tx_q)) {
2075 
2076 		skb = skb_dequeue(&chan->tx_q);
2077 
2078 		bt_cb(skb)->l2cap.retries = 1;
2079 		control = &bt_cb(skb)->l2cap;
2080 
2081 		control->reqseq = 0;
2082 		control->txseq = chan->next_tx_seq;
2083 
2084 		__pack_control(chan, control, skb);
2085 
2086 		if (chan->fcs == L2CAP_FCS_CRC16) {
2087 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2088 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2089 		}
2090 
2091 		l2cap_do_send(chan, skb);
2092 
2093 		BT_DBG("Sent txseq %u", control->txseq);
2094 
2095 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2096 		chan->frames_sent++;
2097 	}
2098 }
2099 
2100 static int l2cap_ertm_send(struct l2cap_chan *chan)
2101 {
2102 	struct sk_buff *skb, *tx_skb;
2103 	struct l2cap_ctrl *control;
2104 	int sent = 0;
2105 
2106 	BT_DBG("chan %p", chan);
2107 
2108 	if (chan->state != BT_CONNECTED)
2109 		return -ENOTCONN;
2110 
2111 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2112 		return 0;
2113 
2114 	if (__chan_is_moving(chan))
2115 		return 0;
2116 
2117 	while (chan->tx_send_head &&
2118 	       chan->unacked_frames < chan->remote_tx_win &&
2119 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2120 
2121 		skb = chan->tx_send_head;
2122 
2123 		bt_cb(skb)->l2cap.retries = 1;
2124 		control = &bt_cb(skb)->l2cap;
2125 
2126 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2127 			control->final = 1;
2128 
2129 		control->reqseq = chan->buffer_seq;
2130 		chan->last_acked_seq = chan->buffer_seq;
2131 		control->txseq = chan->next_tx_seq;
2132 
2133 		__pack_control(chan, control, skb);
2134 
2135 		if (chan->fcs == L2CAP_FCS_CRC16) {
2136 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2137 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2138 		}
2139 
2140 		/* Clone after data has been modified. Data is assumed to be
2141 		   read-only (for locking purposes) on cloned sk_buffs.
2142 		 */
2143 		tx_skb = skb_clone(skb, GFP_KERNEL);
2144 
2145 		if (!tx_skb)
2146 			break;
2147 
2148 		__set_retrans_timer(chan);
2149 
2150 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2151 		chan->unacked_frames++;
2152 		chan->frames_sent++;
2153 		sent++;
2154 
2155 		if (skb_queue_is_last(&chan->tx_q, skb))
2156 			chan->tx_send_head = NULL;
2157 		else
2158 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2159 
2160 		l2cap_do_send(chan, tx_skb);
2161 		BT_DBG("Sent txseq %u", control->txseq);
2162 	}
2163 
2164 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2165 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2166 
2167 	return sent;
2168 }
2169 
2170 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2171 {
2172 	struct l2cap_ctrl control;
2173 	struct sk_buff *skb;
2174 	struct sk_buff *tx_skb;
2175 	u16 seq;
2176 
2177 	BT_DBG("chan %p", chan);
2178 
2179 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2180 		return;
2181 
2182 	if (__chan_is_moving(chan))
2183 		return;
2184 
2185 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2186 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2187 
2188 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2189 		if (!skb) {
2190 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2191 			       seq);
2192 			continue;
2193 		}
2194 
2195 		bt_cb(skb)->l2cap.retries++;
2196 		control = bt_cb(skb)->l2cap;
2197 
2198 		if (chan->max_tx != 0 &&
2199 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2200 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2201 			l2cap_send_disconn_req(chan, ECONNRESET);
2202 			l2cap_seq_list_clear(&chan->retrans_list);
2203 			break;
2204 		}
2205 
2206 		control.reqseq = chan->buffer_seq;
2207 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2208 			control.final = 1;
2209 		else
2210 			control.final = 0;
2211 
2212 		if (skb_cloned(skb)) {
2213 			/* Cloned sk_buffs are read-only, so we need a
2214 			 * writeable copy
2215 			 */
2216 			tx_skb = skb_copy(skb, GFP_KERNEL);
2217 		} else {
2218 			tx_skb = skb_clone(skb, GFP_KERNEL);
2219 		}
2220 
2221 		if (!tx_skb) {
2222 			l2cap_seq_list_clear(&chan->retrans_list);
2223 			break;
2224 		}
2225 
2226 		/* Update skb contents */
2227 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2228 			put_unaligned_le32(__pack_extended_control(&control),
2229 					   tx_skb->data + L2CAP_HDR_SIZE);
2230 		} else {
2231 			put_unaligned_le16(__pack_enhanced_control(&control),
2232 					   tx_skb->data + L2CAP_HDR_SIZE);
2233 		}
2234 
2235 		/* Update FCS */
2236 		if (chan->fcs == L2CAP_FCS_CRC16) {
2237 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2238 					tx_skb->len - L2CAP_FCS_SIZE);
2239 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2240 						L2CAP_FCS_SIZE);
2241 		}
2242 
2243 		l2cap_do_send(chan, tx_skb);
2244 
2245 		BT_DBG("Resent txseq %d", control.txseq);
2246 
2247 		chan->last_acked_seq = chan->buffer_seq;
2248 	}
2249 }
2250 
2251 static void l2cap_retransmit(struct l2cap_chan *chan,
2252 			     struct l2cap_ctrl *control)
2253 {
2254 	BT_DBG("chan %p, control %p", chan, control);
2255 
2256 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2257 	l2cap_ertm_resend(chan);
2258 }
2259 
2260 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2261 				 struct l2cap_ctrl *control)
2262 {
2263 	struct sk_buff *skb;
2264 
2265 	BT_DBG("chan %p, control %p", chan, control);
2266 
2267 	if (control->poll)
2268 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2269 
2270 	l2cap_seq_list_clear(&chan->retrans_list);
2271 
2272 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2273 		return;
2274 
2275 	if (chan->unacked_frames) {
2276 		skb_queue_walk(&chan->tx_q, skb) {
2277 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2278 			    skb == chan->tx_send_head)
2279 				break;
2280 		}
2281 
2282 		skb_queue_walk_from(&chan->tx_q, skb) {
2283 			if (skb == chan->tx_send_head)
2284 				break;
2285 
2286 			l2cap_seq_list_append(&chan->retrans_list,
2287 					      bt_cb(skb)->l2cap.txseq);
2288 		}
2289 
2290 		l2cap_ertm_resend(chan);
2291 	}
2292 }
2293 
2294 static void l2cap_send_ack(struct l2cap_chan *chan)
2295 {
2296 	struct l2cap_ctrl control;
2297 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2298 					 chan->last_acked_seq);
2299 	int threshold;
2300 
2301 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2302 	       chan, chan->last_acked_seq, chan->buffer_seq);
2303 
2304 	memset(&control, 0, sizeof(control));
2305 	control.sframe = 1;
2306 
2307 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2308 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2309 		__clear_ack_timer(chan);
2310 		control.super = L2CAP_SUPER_RNR;
2311 		control.reqseq = chan->buffer_seq;
2312 		l2cap_send_sframe(chan, &control);
2313 	} else {
2314 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2315 			l2cap_ertm_send(chan);
2316 			/* If any i-frames were sent, they included an ack */
2317 			if (chan->buffer_seq == chan->last_acked_seq)
2318 				frames_to_ack = 0;
2319 		}
2320 
2321 		/* Ack now if the window is 3/4ths full.
2322 		 * Calculate without mul or div
2323 		 */
2324 		threshold = chan->ack_win;
2325 		threshold += threshold << 1;
2326 		threshold >>= 2;
2327 
2328 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2329 		       threshold);
2330 
2331 		if (frames_to_ack >= threshold) {
2332 			__clear_ack_timer(chan);
2333 			control.super = L2CAP_SUPER_RR;
2334 			control.reqseq = chan->buffer_seq;
2335 			l2cap_send_sframe(chan, &control);
2336 			frames_to_ack = 0;
2337 		}
2338 
2339 		if (frames_to_ack)
2340 			__set_ack_timer(chan);
2341 	}
2342 }
2343 
2344 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2345 					 struct msghdr *msg, int len,
2346 					 int count, struct sk_buff *skb)
2347 {
2348 	struct l2cap_conn *conn = chan->conn;
2349 	struct sk_buff **frag;
2350 	int sent = 0;
2351 
2352 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2353 		return -EFAULT;
2354 
2355 	sent += count;
2356 	len  -= count;
2357 
2358 	/* Continuation fragments (no L2CAP header) */
2359 	frag = &skb_shinfo(skb)->frag_list;
2360 	while (len) {
2361 		struct sk_buff *tmp;
2362 
2363 		count = min_t(unsigned int, conn->mtu, len);
2364 
2365 		tmp = chan->ops->alloc_skb(chan, 0, count,
2366 					   msg->msg_flags & MSG_DONTWAIT);
2367 		if (IS_ERR(tmp))
2368 			return PTR_ERR(tmp);
2369 
2370 		*frag = tmp;
2371 
2372 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2373 				   &msg->msg_iter))
2374 			return -EFAULT;
2375 
2376 		sent += count;
2377 		len  -= count;
2378 
2379 		skb->len += (*frag)->len;
2380 		skb->data_len += (*frag)->len;
2381 
2382 		frag = &(*frag)->next;
2383 	}
2384 
2385 	return sent;
2386 }
2387 
2388 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2389 						 struct msghdr *msg, size_t len)
2390 {
2391 	struct l2cap_conn *conn = chan->conn;
2392 	struct sk_buff *skb;
2393 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2394 	struct l2cap_hdr *lh;
2395 
2396 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2397 	       __le16_to_cpu(chan->psm), len);
2398 
2399 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2400 
2401 	skb = chan->ops->alloc_skb(chan, hlen, count,
2402 				   msg->msg_flags & MSG_DONTWAIT);
2403 	if (IS_ERR(skb))
2404 		return skb;
2405 
2406 	/* Create L2CAP header */
2407 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2408 	lh->cid = cpu_to_le16(chan->dcid);
2409 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2410 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2411 
2412 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2413 	if (unlikely(err < 0)) {
2414 		kfree_skb(skb);
2415 		return ERR_PTR(err);
2416 	}
2417 	return skb;
2418 }
2419 
2420 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2421 					      struct msghdr *msg, size_t len)
2422 {
2423 	struct l2cap_conn *conn = chan->conn;
2424 	struct sk_buff *skb;
2425 	int err, count;
2426 	struct l2cap_hdr *lh;
2427 
2428 	BT_DBG("chan %p len %zu", chan, len);
2429 
2430 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2431 
2432 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2433 				   msg->msg_flags & MSG_DONTWAIT);
2434 	if (IS_ERR(skb))
2435 		return skb;
2436 
2437 	/* Create L2CAP header */
2438 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2439 	lh->cid = cpu_to_le16(chan->dcid);
2440 	lh->len = cpu_to_le16(len);
2441 
2442 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2443 	if (unlikely(err < 0)) {
2444 		kfree_skb(skb);
2445 		return ERR_PTR(err);
2446 	}
2447 	return skb;
2448 }
2449 
2450 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2451 					       struct msghdr *msg, size_t len,
2452 					       u16 sdulen)
2453 {
2454 	struct l2cap_conn *conn = chan->conn;
2455 	struct sk_buff *skb;
2456 	int err, count, hlen;
2457 	struct l2cap_hdr *lh;
2458 
2459 	BT_DBG("chan %p len %zu", chan, len);
2460 
2461 	if (!conn)
2462 		return ERR_PTR(-ENOTCONN);
2463 
2464 	hlen = __ertm_hdr_size(chan);
2465 
2466 	if (sdulen)
2467 		hlen += L2CAP_SDULEN_SIZE;
2468 
2469 	if (chan->fcs == L2CAP_FCS_CRC16)
2470 		hlen += L2CAP_FCS_SIZE;
2471 
2472 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2473 
2474 	skb = chan->ops->alloc_skb(chan, hlen, count,
2475 				   msg->msg_flags & MSG_DONTWAIT);
2476 	if (IS_ERR(skb))
2477 		return skb;
2478 
2479 	/* Create L2CAP header */
2480 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2481 	lh->cid = cpu_to_le16(chan->dcid);
2482 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2483 
2484 	/* Control header is populated later */
2485 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2486 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2487 	else
2488 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2489 
2490 	if (sdulen)
2491 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2492 
2493 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2494 	if (unlikely(err < 0)) {
2495 		kfree_skb(skb);
2496 		return ERR_PTR(err);
2497 	}
2498 
2499 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2500 	bt_cb(skb)->l2cap.retries = 0;
2501 	return skb;
2502 }
2503 
2504 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2505 			     struct sk_buff_head *seg_queue,
2506 			     struct msghdr *msg, size_t len)
2507 {
2508 	struct sk_buff *skb;
2509 	u16 sdu_len;
2510 	size_t pdu_len;
2511 	u8 sar;
2512 
2513 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2514 
2515 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2516 	 * so fragmented skbs are not used.  The HCI layer's handling
2517 	 * of fragmented skbs is not compatible with ERTM's queueing.
2518 	 */
2519 
2520 	/* PDU size is derived from the HCI MTU */
2521 	pdu_len = chan->conn->mtu;
2522 
2523 	/* Constrain PDU size for BR/EDR connections */
2524 	if (!chan->hs_hcon)
2525 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2526 
2527 	/* Adjust for largest possible L2CAP overhead. */
2528 	if (chan->fcs)
2529 		pdu_len -= L2CAP_FCS_SIZE;
2530 
2531 	pdu_len -= __ertm_hdr_size(chan);
2532 
2533 	/* Remote device may have requested smaller PDUs */
2534 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2535 
2536 	if (len <= pdu_len) {
2537 		sar = L2CAP_SAR_UNSEGMENTED;
2538 		sdu_len = 0;
2539 		pdu_len = len;
2540 	} else {
2541 		sar = L2CAP_SAR_START;
2542 		sdu_len = len;
2543 	}
2544 
2545 	while (len > 0) {
2546 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2547 
2548 		if (IS_ERR(skb)) {
2549 			__skb_queue_purge(seg_queue);
2550 			return PTR_ERR(skb);
2551 		}
2552 
2553 		bt_cb(skb)->l2cap.sar = sar;
2554 		__skb_queue_tail(seg_queue, skb);
2555 
2556 		len -= pdu_len;
2557 		if (sdu_len)
2558 			sdu_len = 0;
2559 
2560 		if (len <= pdu_len) {
2561 			sar = L2CAP_SAR_END;
2562 			pdu_len = len;
2563 		} else {
2564 			sar = L2CAP_SAR_CONTINUE;
2565 		}
2566 	}
2567 
2568 	return 0;
2569 }
2570 
2571 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2572 						   struct msghdr *msg,
2573 						   size_t len, u16 sdulen)
2574 {
2575 	struct l2cap_conn *conn = chan->conn;
2576 	struct sk_buff *skb;
2577 	int err, count, hlen;
2578 	struct l2cap_hdr *lh;
2579 
2580 	BT_DBG("chan %p len %zu", chan, len);
2581 
2582 	if (!conn)
2583 		return ERR_PTR(-ENOTCONN);
2584 
2585 	hlen = L2CAP_HDR_SIZE;
2586 
2587 	if (sdulen)
2588 		hlen += L2CAP_SDULEN_SIZE;
2589 
2590 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2591 
2592 	skb = chan->ops->alloc_skb(chan, hlen, count,
2593 				   msg->msg_flags & MSG_DONTWAIT);
2594 	if (IS_ERR(skb))
2595 		return skb;
2596 
2597 	/* Create L2CAP header */
2598 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2599 	lh->cid = cpu_to_le16(chan->dcid);
2600 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2601 
2602 	if (sdulen)
2603 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2604 
2605 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2606 	if (unlikely(err < 0)) {
2607 		kfree_skb(skb);
2608 		return ERR_PTR(err);
2609 	}
2610 
2611 	return skb;
2612 }
2613 
2614 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2615 				struct sk_buff_head *seg_queue,
2616 				struct msghdr *msg, size_t len)
2617 {
2618 	struct sk_buff *skb;
2619 	size_t pdu_len;
2620 	u16 sdu_len;
2621 
2622 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2623 
2624 	sdu_len = len;
2625 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2626 
2627 	while (len > 0) {
2628 		if (len <= pdu_len)
2629 			pdu_len = len;
2630 
2631 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2632 		if (IS_ERR(skb)) {
2633 			__skb_queue_purge(seg_queue);
2634 			return PTR_ERR(skb);
2635 		}
2636 
2637 		__skb_queue_tail(seg_queue, skb);
2638 
2639 		len -= pdu_len;
2640 
2641 		if (sdu_len) {
2642 			sdu_len = 0;
2643 			pdu_len += L2CAP_SDULEN_SIZE;
2644 		}
2645 	}
2646 
2647 	return 0;
2648 }
2649 
2650 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2651 {
2652 	int sent = 0;
2653 
2654 	BT_DBG("chan %p", chan);
2655 
2656 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2657 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2658 		chan->tx_credits--;
2659 		sent++;
2660 	}
2661 
2662 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2663 	       skb_queue_len(&chan->tx_q));
2664 }
2665 
2666 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2667 {
2668 	struct sk_buff *skb;
2669 	int err;
2670 	struct sk_buff_head seg_queue;
2671 
2672 	if (!chan->conn)
2673 		return -ENOTCONN;
2674 
2675 	/* Connectionless channel */
2676 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2677 		skb = l2cap_create_connless_pdu(chan, msg, len);
2678 		if (IS_ERR(skb))
2679 			return PTR_ERR(skb);
2680 
2681 		/* Channel lock is released before requesting new skb and then
2682 		 * reacquired thus we need to recheck channel state.
2683 		 */
2684 		if (chan->state != BT_CONNECTED) {
2685 			kfree_skb(skb);
2686 			return -ENOTCONN;
2687 		}
2688 
2689 		l2cap_do_send(chan, skb);
2690 		return len;
2691 	}
2692 
2693 	switch (chan->mode) {
2694 	case L2CAP_MODE_LE_FLOWCTL:
2695 	case L2CAP_MODE_EXT_FLOWCTL:
2696 		/* Check outgoing MTU */
2697 		if (len > chan->omtu)
2698 			return -EMSGSIZE;
2699 
2700 		__skb_queue_head_init(&seg_queue);
2701 
2702 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2703 
2704 		if (chan->state != BT_CONNECTED) {
2705 			__skb_queue_purge(&seg_queue);
2706 			err = -ENOTCONN;
2707 		}
2708 
2709 		if (err)
2710 			return err;
2711 
2712 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2713 
2714 		l2cap_le_flowctl_send(chan);
2715 
2716 		if (!chan->tx_credits)
2717 			chan->ops->suspend(chan);
2718 
2719 		err = len;
2720 
2721 		break;
2722 
2723 	case L2CAP_MODE_BASIC:
2724 		/* Check outgoing MTU */
2725 		if (len > chan->omtu)
2726 			return -EMSGSIZE;
2727 
2728 		/* Create a basic PDU */
2729 		skb = l2cap_create_basic_pdu(chan, msg, len);
2730 		if (IS_ERR(skb))
2731 			return PTR_ERR(skb);
2732 
2733 		/* Channel lock is released before requesting new skb and then
2734 		 * reacquired thus we need to recheck channel state.
2735 		 */
2736 		if (chan->state != BT_CONNECTED) {
2737 			kfree_skb(skb);
2738 			return -ENOTCONN;
2739 		}
2740 
2741 		l2cap_do_send(chan, skb);
2742 		err = len;
2743 		break;
2744 
2745 	case L2CAP_MODE_ERTM:
2746 	case L2CAP_MODE_STREAMING:
2747 		/* Check outgoing MTU */
2748 		if (len > chan->omtu) {
2749 			err = -EMSGSIZE;
2750 			break;
2751 		}
2752 
2753 		__skb_queue_head_init(&seg_queue);
2754 
2755 		/* Do segmentation before calling in to the state machine,
2756 		 * since it's possible to block while waiting for memory
2757 		 * allocation.
2758 		 */
2759 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2760 
2761 		/* The channel could have been closed while segmenting,
2762 		 * check that it is still connected.
2763 		 */
2764 		if (chan->state != BT_CONNECTED) {
2765 			__skb_queue_purge(&seg_queue);
2766 			err = -ENOTCONN;
2767 		}
2768 
2769 		if (err)
2770 			break;
2771 
2772 		if (chan->mode == L2CAP_MODE_ERTM)
2773 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2774 		else
2775 			l2cap_streaming_send(chan, &seg_queue);
2776 
2777 		err = len;
2778 
2779 		/* If the skbs were not queued for sending, they'll still be in
2780 		 * seg_queue and need to be purged.
2781 		 */
2782 		__skb_queue_purge(&seg_queue);
2783 		break;
2784 
2785 	default:
2786 		BT_DBG("bad state %1.1x", chan->mode);
2787 		err = -EBADFD;
2788 	}
2789 
2790 	return err;
2791 }
2792 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2793 
2794 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2795 {
2796 	struct l2cap_ctrl control;
2797 	u16 seq;
2798 
2799 	BT_DBG("chan %p, txseq %u", chan, txseq);
2800 
2801 	memset(&control, 0, sizeof(control));
2802 	control.sframe = 1;
2803 	control.super = L2CAP_SUPER_SREJ;
2804 
2805 	for (seq = chan->expected_tx_seq; seq != txseq;
2806 	     seq = __next_seq(chan, seq)) {
2807 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2808 			control.reqseq = seq;
2809 			l2cap_send_sframe(chan, &control);
2810 			l2cap_seq_list_append(&chan->srej_list, seq);
2811 		}
2812 	}
2813 
2814 	chan->expected_tx_seq = __next_seq(chan, txseq);
2815 }
2816 
2817 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2818 {
2819 	struct l2cap_ctrl control;
2820 
2821 	BT_DBG("chan %p", chan);
2822 
2823 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2824 		return;
2825 
2826 	memset(&control, 0, sizeof(control));
2827 	control.sframe = 1;
2828 	control.super = L2CAP_SUPER_SREJ;
2829 	control.reqseq = chan->srej_list.tail;
2830 	l2cap_send_sframe(chan, &control);
2831 }
2832 
2833 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2834 {
2835 	struct l2cap_ctrl control;
2836 	u16 initial_head;
2837 	u16 seq;
2838 
2839 	BT_DBG("chan %p, txseq %u", chan, txseq);
2840 
2841 	memset(&control, 0, sizeof(control));
2842 	control.sframe = 1;
2843 	control.super = L2CAP_SUPER_SREJ;
2844 
2845 	/* Capture initial list head to allow only one pass through the list. */
2846 	initial_head = chan->srej_list.head;
2847 
2848 	do {
2849 		seq = l2cap_seq_list_pop(&chan->srej_list);
2850 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2851 			break;
2852 
2853 		control.reqseq = seq;
2854 		l2cap_send_sframe(chan, &control);
2855 		l2cap_seq_list_append(&chan->srej_list, seq);
2856 	} while (chan->srej_list.head != initial_head);
2857 }
2858 
2859 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2860 {
2861 	struct sk_buff *acked_skb;
2862 	u16 ackseq;
2863 
2864 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2865 
2866 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2867 		return;
2868 
2869 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2870 	       chan->expected_ack_seq, chan->unacked_frames);
2871 
2872 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2873 	     ackseq = __next_seq(chan, ackseq)) {
2874 
2875 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2876 		if (acked_skb) {
2877 			skb_unlink(acked_skb, &chan->tx_q);
2878 			kfree_skb(acked_skb);
2879 			chan->unacked_frames--;
2880 		}
2881 	}
2882 
2883 	chan->expected_ack_seq = reqseq;
2884 
2885 	if (chan->unacked_frames == 0)
2886 		__clear_retrans_timer(chan);
2887 
2888 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2889 }
2890 
2891 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2892 {
2893 	BT_DBG("chan %p", chan);
2894 
2895 	chan->expected_tx_seq = chan->buffer_seq;
2896 	l2cap_seq_list_clear(&chan->srej_list);
2897 	skb_queue_purge(&chan->srej_q);
2898 	chan->rx_state = L2CAP_RX_STATE_RECV;
2899 }
2900 
2901 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2902 				struct l2cap_ctrl *control,
2903 				struct sk_buff_head *skbs, u8 event)
2904 {
2905 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2906 	       event);
2907 
2908 	switch (event) {
2909 	case L2CAP_EV_DATA_REQUEST:
2910 		if (chan->tx_send_head == NULL)
2911 			chan->tx_send_head = skb_peek(skbs);
2912 
2913 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2914 		l2cap_ertm_send(chan);
2915 		break;
2916 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2917 		BT_DBG("Enter LOCAL_BUSY");
2918 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2919 
2920 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2921 			/* The SREJ_SENT state must be aborted if we are to
2922 			 * enter the LOCAL_BUSY state.
2923 			 */
2924 			l2cap_abort_rx_srej_sent(chan);
2925 		}
2926 
2927 		l2cap_send_ack(chan);
2928 
2929 		break;
2930 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2931 		BT_DBG("Exit LOCAL_BUSY");
2932 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2933 
2934 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2935 			struct l2cap_ctrl local_control;
2936 
2937 			memset(&local_control, 0, sizeof(local_control));
2938 			local_control.sframe = 1;
2939 			local_control.super = L2CAP_SUPER_RR;
2940 			local_control.poll = 1;
2941 			local_control.reqseq = chan->buffer_seq;
2942 			l2cap_send_sframe(chan, &local_control);
2943 
2944 			chan->retry_count = 1;
2945 			__set_monitor_timer(chan);
2946 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2947 		}
2948 		break;
2949 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2950 		l2cap_process_reqseq(chan, control->reqseq);
2951 		break;
2952 	case L2CAP_EV_EXPLICIT_POLL:
2953 		l2cap_send_rr_or_rnr(chan, 1);
2954 		chan->retry_count = 1;
2955 		__set_monitor_timer(chan);
2956 		__clear_ack_timer(chan);
2957 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2958 		break;
2959 	case L2CAP_EV_RETRANS_TO:
2960 		l2cap_send_rr_or_rnr(chan, 1);
2961 		chan->retry_count = 1;
2962 		__set_monitor_timer(chan);
2963 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2964 		break;
2965 	case L2CAP_EV_RECV_FBIT:
2966 		/* Nothing to process */
2967 		break;
2968 	default:
2969 		break;
2970 	}
2971 }
2972 
2973 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2974 				  struct l2cap_ctrl *control,
2975 				  struct sk_buff_head *skbs, u8 event)
2976 {
2977 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2978 	       event);
2979 
2980 	switch (event) {
2981 	case L2CAP_EV_DATA_REQUEST:
2982 		if (chan->tx_send_head == NULL)
2983 			chan->tx_send_head = skb_peek(skbs);
2984 		/* Queue data, but don't send. */
2985 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2986 		break;
2987 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2988 		BT_DBG("Enter LOCAL_BUSY");
2989 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2990 
2991 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2992 			/* The SREJ_SENT state must be aborted if we are to
2993 			 * enter the LOCAL_BUSY state.
2994 			 */
2995 			l2cap_abort_rx_srej_sent(chan);
2996 		}
2997 
2998 		l2cap_send_ack(chan);
2999 
3000 		break;
3001 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
3002 		BT_DBG("Exit LOCAL_BUSY");
3003 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3004 
3005 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
3006 			struct l2cap_ctrl local_control;
3007 			memset(&local_control, 0, sizeof(local_control));
3008 			local_control.sframe = 1;
3009 			local_control.super = L2CAP_SUPER_RR;
3010 			local_control.poll = 1;
3011 			local_control.reqseq = chan->buffer_seq;
3012 			l2cap_send_sframe(chan, &local_control);
3013 
3014 			chan->retry_count = 1;
3015 			__set_monitor_timer(chan);
3016 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
3017 		}
3018 		break;
3019 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
3020 		l2cap_process_reqseq(chan, control->reqseq);
3021 		fallthrough;
3022 
3023 	case L2CAP_EV_RECV_FBIT:
3024 		if (control && control->final) {
3025 			__clear_monitor_timer(chan);
3026 			if (chan->unacked_frames > 0)
3027 				__set_retrans_timer(chan);
3028 			chan->retry_count = 0;
3029 			chan->tx_state = L2CAP_TX_STATE_XMIT;
3030 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3031 		}
3032 		break;
3033 	case L2CAP_EV_EXPLICIT_POLL:
3034 		/* Ignore */
3035 		break;
3036 	case L2CAP_EV_MONITOR_TO:
3037 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3038 			l2cap_send_rr_or_rnr(chan, 1);
3039 			__set_monitor_timer(chan);
3040 			chan->retry_count++;
3041 		} else {
3042 			l2cap_send_disconn_req(chan, ECONNABORTED);
3043 		}
3044 		break;
3045 	default:
3046 		break;
3047 	}
3048 }
3049 
3050 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3051 		     struct sk_buff_head *skbs, u8 event)
3052 {
3053 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3054 	       chan, control, skbs, event, chan->tx_state);
3055 
3056 	switch (chan->tx_state) {
3057 	case L2CAP_TX_STATE_XMIT:
3058 		l2cap_tx_state_xmit(chan, control, skbs, event);
3059 		break;
3060 	case L2CAP_TX_STATE_WAIT_F:
3061 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3062 		break;
3063 	default:
3064 		/* Ignore event */
3065 		break;
3066 	}
3067 }
3068 
3069 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3070 			     struct l2cap_ctrl *control)
3071 {
3072 	BT_DBG("chan %p, control %p", chan, control);
3073 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3074 }
3075 
3076 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3077 				  struct l2cap_ctrl *control)
3078 {
3079 	BT_DBG("chan %p, control %p", chan, control);
3080 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3081 }
3082 
3083 /* Copy frame to all raw sockets on that connection */
3084 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3085 {
3086 	struct sk_buff *nskb;
3087 	struct l2cap_chan *chan;
3088 
3089 	BT_DBG("conn %p", conn);
3090 
3091 	mutex_lock(&conn->chan_lock);
3092 
3093 	list_for_each_entry(chan, &conn->chan_l, list) {
3094 		if (chan->chan_type != L2CAP_CHAN_RAW)
3095 			continue;
3096 
3097 		/* Don't send frame to the channel it came from */
3098 		if (bt_cb(skb)->l2cap.chan == chan)
3099 			continue;
3100 
3101 		nskb = skb_clone(skb, GFP_KERNEL);
3102 		if (!nskb)
3103 			continue;
3104 		if (chan->ops->recv(chan, nskb))
3105 			kfree_skb(nskb);
3106 	}
3107 
3108 	mutex_unlock(&conn->chan_lock);
3109 }
3110 
3111 /* ---- L2CAP signalling commands ---- */
3112 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3113 				       u8 ident, u16 dlen, void *data)
3114 {
3115 	struct sk_buff *skb, **frag;
3116 	struct l2cap_cmd_hdr *cmd;
3117 	struct l2cap_hdr *lh;
3118 	int len, count;
3119 
3120 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3121 	       conn, code, ident, dlen);
3122 
3123 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3124 		return NULL;
3125 
3126 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3127 	count = min_t(unsigned int, conn->mtu, len);
3128 
3129 	skb = bt_skb_alloc(count, GFP_KERNEL);
3130 	if (!skb)
3131 		return NULL;
3132 
3133 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3134 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3135 
3136 	if (conn->hcon->type == LE_LINK)
3137 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3138 	else
3139 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3140 
3141 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3142 	cmd->code  = code;
3143 	cmd->ident = ident;
3144 	cmd->len   = cpu_to_le16(dlen);
3145 
3146 	if (dlen) {
3147 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3148 		skb_put_data(skb, data, count);
3149 		data += count;
3150 	}
3151 
3152 	len -= skb->len;
3153 
3154 	/* Continuation fragments (no L2CAP header) */
3155 	frag = &skb_shinfo(skb)->frag_list;
3156 	while (len) {
3157 		count = min_t(unsigned int, conn->mtu, len);
3158 
3159 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3160 		if (!*frag)
3161 			goto fail;
3162 
3163 		skb_put_data(*frag, data, count);
3164 
3165 		len  -= count;
3166 		data += count;
3167 
3168 		frag = &(*frag)->next;
3169 	}
3170 
3171 	return skb;
3172 
3173 fail:
3174 	kfree_skb(skb);
3175 	return NULL;
3176 }
3177 
3178 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3179 				     unsigned long *val)
3180 {
3181 	struct l2cap_conf_opt *opt = *ptr;
3182 	int len;
3183 
3184 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3185 	*ptr += len;
3186 
3187 	*type = opt->type;
3188 	*olen = opt->len;
3189 
3190 	switch (opt->len) {
3191 	case 1:
3192 		*val = *((u8 *) opt->val);
3193 		break;
3194 
3195 	case 2:
3196 		*val = get_unaligned_le16(opt->val);
3197 		break;
3198 
3199 	case 4:
3200 		*val = get_unaligned_le32(opt->val);
3201 		break;
3202 
3203 	default:
3204 		*val = (unsigned long) opt->val;
3205 		break;
3206 	}
3207 
3208 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3209 	return len;
3210 }
3211 
3212 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3213 {
3214 	struct l2cap_conf_opt *opt = *ptr;
3215 
3216 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3217 
3218 	if (size < L2CAP_CONF_OPT_SIZE + len)
3219 		return;
3220 
3221 	opt->type = type;
3222 	opt->len  = len;
3223 
3224 	switch (len) {
3225 	case 1:
3226 		*((u8 *) opt->val)  = val;
3227 		break;
3228 
3229 	case 2:
3230 		put_unaligned_le16(val, opt->val);
3231 		break;
3232 
3233 	case 4:
3234 		put_unaligned_le32(val, opt->val);
3235 		break;
3236 
3237 	default:
3238 		memcpy(opt->val, (void *) val, len);
3239 		break;
3240 	}
3241 
3242 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3243 }
3244 
3245 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3246 {
3247 	struct l2cap_conf_efs efs;
3248 
3249 	switch (chan->mode) {
3250 	case L2CAP_MODE_ERTM:
3251 		efs.id		= chan->local_id;
3252 		efs.stype	= chan->local_stype;
3253 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3254 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3255 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3256 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3257 		break;
3258 
3259 	case L2CAP_MODE_STREAMING:
3260 		efs.id		= 1;
3261 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3262 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3263 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3264 		efs.acc_lat	= 0;
3265 		efs.flush_to	= 0;
3266 		break;
3267 
3268 	default:
3269 		return;
3270 	}
3271 
3272 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3273 			   (unsigned long) &efs, size);
3274 }
3275 
3276 static void l2cap_ack_timeout(struct work_struct *work)
3277 {
3278 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3279 					       ack_timer.work);
3280 	u16 frames_to_ack;
3281 
3282 	BT_DBG("chan %p", chan);
3283 
3284 	l2cap_chan_lock(chan);
3285 
3286 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3287 				     chan->last_acked_seq);
3288 
3289 	if (frames_to_ack)
3290 		l2cap_send_rr_or_rnr(chan, 0);
3291 
3292 	l2cap_chan_unlock(chan);
3293 	l2cap_chan_put(chan);
3294 }
3295 
3296 int l2cap_ertm_init(struct l2cap_chan *chan)
3297 {
3298 	int err;
3299 
3300 	chan->next_tx_seq = 0;
3301 	chan->expected_tx_seq = 0;
3302 	chan->expected_ack_seq = 0;
3303 	chan->unacked_frames = 0;
3304 	chan->buffer_seq = 0;
3305 	chan->frames_sent = 0;
3306 	chan->last_acked_seq = 0;
3307 	chan->sdu = NULL;
3308 	chan->sdu_last_frag = NULL;
3309 	chan->sdu_len = 0;
3310 
3311 	skb_queue_head_init(&chan->tx_q);
3312 
3313 	chan->local_amp_id = AMP_ID_BREDR;
3314 	chan->move_id = AMP_ID_BREDR;
3315 	chan->move_state = L2CAP_MOVE_STABLE;
3316 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3317 
3318 	if (chan->mode != L2CAP_MODE_ERTM)
3319 		return 0;
3320 
3321 	chan->rx_state = L2CAP_RX_STATE_RECV;
3322 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3323 
3324 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3325 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3326 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3327 
3328 	skb_queue_head_init(&chan->srej_q);
3329 
3330 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3331 	if (err < 0)
3332 		return err;
3333 
3334 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3335 	if (err < 0)
3336 		l2cap_seq_list_free(&chan->srej_list);
3337 
3338 	return err;
3339 }
3340 
3341 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3342 {
3343 	switch (mode) {
3344 	case L2CAP_MODE_STREAMING:
3345 	case L2CAP_MODE_ERTM:
3346 		if (l2cap_mode_supported(mode, remote_feat_mask))
3347 			return mode;
3348 		fallthrough;
3349 	default:
3350 		return L2CAP_MODE_BASIC;
3351 	}
3352 }
3353 
3354 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3355 {
3356 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3357 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3358 }
3359 
3360 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3361 {
3362 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3363 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3364 }
3365 
3366 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3367 				      struct l2cap_conf_rfc *rfc)
3368 {
3369 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3370 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3371 
3372 		/* Class 1 devices have must have ERTM timeouts
3373 		 * exceeding the Link Supervision Timeout.  The
3374 		 * default Link Supervision Timeout for AMP
3375 		 * controllers is 10 seconds.
3376 		 *
3377 		 * Class 1 devices use 0xffffffff for their
3378 		 * best-effort flush timeout, so the clamping logic
3379 		 * will result in a timeout that meets the above
3380 		 * requirement.  ERTM timeouts are 16-bit values, so
3381 		 * the maximum timeout is 65.535 seconds.
3382 		 */
3383 
3384 		/* Convert timeout to milliseconds and round */
3385 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3386 
3387 		/* This is the recommended formula for class 2 devices
3388 		 * that start ERTM timers when packets are sent to the
3389 		 * controller.
3390 		 */
3391 		ertm_to = 3 * ertm_to + 500;
3392 
3393 		if (ertm_to > 0xffff)
3394 			ertm_to = 0xffff;
3395 
3396 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3397 		rfc->monitor_timeout = rfc->retrans_timeout;
3398 	} else {
3399 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3400 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3401 	}
3402 }
3403 
3404 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3405 {
3406 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3407 	    __l2cap_ews_supported(chan->conn)) {
3408 		/* use extended control field */
3409 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3410 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3411 	} else {
3412 		chan->tx_win = min_t(u16, chan->tx_win,
3413 				     L2CAP_DEFAULT_TX_WINDOW);
3414 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3415 	}
3416 	chan->ack_win = chan->tx_win;
3417 }
3418 
3419 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3420 {
3421 	struct hci_conn *conn = chan->conn->hcon;
3422 
3423 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3424 
3425 	/* The 2-DH1 packet has between 2 and 56 information bytes
3426 	 * (including the 2-byte payload header)
3427 	 */
3428 	if (!(conn->pkt_type & HCI_2DH1))
3429 		chan->imtu = 54;
3430 
3431 	/* The 3-DH1 packet has between 2 and 85 information bytes
3432 	 * (including the 2-byte payload header)
3433 	 */
3434 	if (!(conn->pkt_type & HCI_3DH1))
3435 		chan->imtu = 83;
3436 
3437 	/* The 2-DH3 packet has between 2 and 369 information bytes
3438 	 * (including the 2-byte payload header)
3439 	 */
3440 	if (!(conn->pkt_type & HCI_2DH3))
3441 		chan->imtu = 367;
3442 
3443 	/* The 3-DH3 packet has between 2 and 554 information bytes
3444 	 * (including the 2-byte payload header)
3445 	 */
3446 	if (!(conn->pkt_type & HCI_3DH3))
3447 		chan->imtu = 552;
3448 
3449 	/* The 2-DH5 packet has between 2 and 681 information bytes
3450 	 * (including the 2-byte payload header)
3451 	 */
3452 	if (!(conn->pkt_type & HCI_2DH5))
3453 		chan->imtu = 679;
3454 
3455 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3456 	 * (including the 2-byte payload header)
3457 	 */
3458 	if (!(conn->pkt_type & HCI_3DH5))
3459 		chan->imtu = 1021;
3460 }
3461 
3462 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3463 {
3464 	struct l2cap_conf_req *req = data;
3465 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3466 	void *ptr = req->data;
3467 	void *endptr = data + data_size;
3468 	u16 size;
3469 
3470 	BT_DBG("chan %p", chan);
3471 
3472 	if (chan->num_conf_req || chan->num_conf_rsp)
3473 		goto done;
3474 
3475 	switch (chan->mode) {
3476 	case L2CAP_MODE_STREAMING:
3477 	case L2CAP_MODE_ERTM:
3478 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3479 			break;
3480 
3481 		if (__l2cap_efs_supported(chan->conn))
3482 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3483 
3484 		fallthrough;
3485 	default:
3486 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3487 		break;
3488 	}
3489 
3490 done:
3491 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3492 		if (!chan->imtu)
3493 			l2cap_mtu_auto(chan);
3494 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3495 				   endptr - ptr);
3496 	}
3497 
3498 	switch (chan->mode) {
3499 	case L2CAP_MODE_BASIC:
3500 		if (disable_ertm)
3501 			break;
3502 
3503 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3504 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3505 			break;
3506 
3507 		rfc.mode            = L2CAP_MODE_BASIC;
3508 		rfc.txwin_size      = 0;
3509 		rfc.max_transmit    = 0;
3510 		rfc.retrans_timeout = 0;
3511 		rfc.monitor_timeout = 0;
3512 		rfc.max_pdu_size    = 0;
3513 
3514 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3515 				   (unsigned long) &rfc, endptr - ptr);
3516 		break;
3517 
3518 	case L2CAP_MODE_ERTM:
3519 		rfc.mode            = L2CAP_MODE_ERTM;
3520 		rfc.max_transmit    = chan->max_tx;
3521 
3522 		__l2cap_set_ertm_timeouts(chan, &rfc);
3523 
3524 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3525 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3526 			     L2CAP_FCS_SIZE);
3527 		rfc.max_pdu_size = cpu_to_le16(size);
3528 
3529 		l2cap_txwin_setup(chan);
3530 
3531 		rfc.txwin_size = min_t(u16, chan->tx_win,
3532 				       L2CAP_DEFAULT_TX_WINDOW);
3533 
3534 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3535 				   (unsigned long) &rfc, endptr - ptr);
3536 
3537 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3538 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3539 
3540 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3541 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3542 					   chan->tx_win, endptr - ptr);
3543 
3544 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3545 			if (chan->fcs == L2CAP_FCS_NONE ||
3546 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3547 				chan->fcs = L2CAP_FCS_NONE;
3548 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3549 						   chan->fcs, endptr - ptr);
3550 			}
3551 		break;
3552 
3553 	case L2CAP_MODE_STREAMING:
3554 		l2cap_txwin_setup(chan);
3555 		rfc.mode            = L2CAP_MODE_STREAMING;
3556 		rfc.txwin_size      = 0;
3557 		rfc.max_transmit    = 0;
3558 		rfc.retrans_timeout = 0;
3559 		rfc.monitor_timeout = 0;
3560 
3561 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3562 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3563 			     L2CAP_FCS_SIZE);
3564 		rfc.max_pdu_size = cpu_to_le16(size);
3565 
3566 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3567 				   (unsigned long) &rfc, endptr - ptr);
3568 
3569 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3570 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3571 
3572 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3573 			if (chan->fcs == L2CAP_FCS_NONE ||
3574 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3575 				chan->fcs = L2CAP_FCS_NONE;
3576 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3577 						   chan->fcs, endptr - ptr);
3578 			}
3579 		break;
3580 	}
3581 
3582 	req->dcid  = cpu_to_le16(chan->dcid);
3583 	req->flags = cpu_to_le16(0);
3584 
3585 	return ptr - data;
3586 }
3587 
3588 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3589 {
3590 	struct l2cap_conf_rsp *rsp = data;
3591 	void *ptr = rsp->data;
3592 	void *endptr = data + data_size;
3593 	void *req = chan->conf_req;
3594 	int len = chan->conf_len;
3595 	int type, hint, olen;
3596 	unsigned long val;
3597 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3598 	struct l2cap_conf_efs efs;
3599 	u8 remote_efs = 0;
3600 	u16 mtu = L2CAP_DEFAULT_MTU;
3601 	u16 result = L2CAP_CONF_SUCCESS;
3602 	u16 size;
3603 
3604 	BT_DBG("chan %p", chan);
3605 
3606 	while (len >= L2CAP_CONF_OPT_SIZE) {
3607 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3608 		if (len < 0)
3609 			break;
3610 
3611 		hint  = type & L2CAP_CONF_HINT;
3612 		type &= L2CAP_CONF_MASK;
3613 
3614 		switch (type) {
3615 		case L2CAP_CONF_MTU:
3616 			if (olen != 2)
3617 				break;
3618 			mtu = val;
3619 			break;
3620 
3621 		case L2CAP_CONF_FLUSH_TO:
3622 			if (olen != 2)
3623 				break;
3624 			chan->flush_to = val;
3625 			break;
3626 
3627 		case L2CAP_CONF_QOS:
3628 			break;
3629 
3630 		case L2CAP_CONF_RFC:
3631 			if (olen != sizeof(rfc))
3632 				break;
3633 			memcpy(&rfc, (void *) val, olen);
3634 			break;
3635 
3636 		case L2CAP_CONF_FCS:
3637 			if (olen != 1)
3638 				break;
3639 			if (val == L2CAP_FCS_NONE)
3640 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3641 			break;
3642 
3643 		case L2CAP_CONF_EFS:
3644 			if (olen != sizeof(efs))
3645 				break;
3646 			remote_efs = 1;
3647 			memcpy(&efs, (void *) val, olen);
3648 			break;
3649 
3650 		case L2CAP_CONF_EWS:
3651 			if (olen != 2)
3652 				break;
3653 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3654 				return -ECONNREFUSED;
3655 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3656 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3657 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3658 			chan->remote_tx_win = val;
3659 			break;
3660 
3661 		default:
3662 			if (hint)
3663 				break;
3664 			result = L2CAP_CONF_UNKNOWN;
3665 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3666 			break;
3667 		}
3668 	}
3669 
3670 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3671 		goto done;
3672 
3673 	switch (chan->mode) {
3674 	case L2CAP_MODE_STREAMING:
3675 	case L2CAP_MODE_ERTM:
3676 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3677 			chan->mode = l2cap_select_mode(rfc.mode,
3678 						       chan->conn->feat_mask);
3679 			break;
3680 		}
3681 
3682 		if (remote_efs) {
3683 			if (__l2cap_efs_supported(chan->conn))
3684 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3685 			else
3686 				return -ECONNREFUSED;
3687 		}
3688 
3689 		if (chan->mode != rfc.mode)
3690 			return -ECONNREFUSED;
3691 
3692 		break;
3693 	}
3694 
3695 done:
3696 	if (chan->mode != rfc.mode) {
3697 		result = L2CAP_CONF_UNACCEPT;
3698 		rfc.mode = chan->mode;
3699 
3700 		if (chan->num_conf_rsp == 1)
3701 			return -ECONNREFUSED;
3702 
3703 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3704 				   (unsigned long) &rfc, endptr - ptr);
3705 	}
3706 
3707 	if (result == L2CAP_CONF_SUCCESS) {
3708 		/* Configure output options and let the other side know
3709 		 * which ones we don't like. */
3710 
3711 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3712 			result = L2CAP_CONF_UNACCEPT;
3713 		else {
3714 			chan->omtu = mtu;
3715 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3716 		}
3717 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3718 
3719 		if (remote_efs) {
3720 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3721 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3722 			    efs.stype != chan->local_stype) {
3723 
3724 				result = L2CAP_CONF_UNACCEPT;
3725 
3726 				if (chan->num_conf_req >= 1)
3727 					return -ECONNREFUSED;
3728 
3729 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3730 						   sizeof(efs),
3731 						   (unsigned long) &efs, endptr - ptr);
3732 			} else {
3733 				/* Send PENDING Conf Rsp */
3734 				result = L2CAP_CONF_PENDING;
3735 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3736 			}
3737 		}
3738 
3739 		switch (rfc.mode) {
3740 		case L2CAP_MODE_BASIC:
3741 			chan->fcs = L2CAP_FCS_NONE;
3742 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3743 			break;
3744 
3745 		case L2CAP_MODE_ERTM:
3746 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3747 				chan->remote_tx_win = rfc.txwin_size;
3748 			else
3749 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3750 
3751 			chan->remote_max_tx = rfc.max_transmit;
3752 
3753 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3754 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3755 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3756 			rfc.max_pdu_size = cpu_to_le16(size);
3757 			chan->remote_mps = size;
3758 
3759 			__l2cap_set_ertm_timeouts(chan, &rfc);
3760 
3761 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3762 
3763 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3764 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3765 
3766 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3767 				chan->remote_id = efs.id;
3768 				chan->remote_stype = efs.stype;
3769 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3770 				chan->remote_flush_to =
3771 					le32_to_cpu(efs.flush_to);
3772 				chan->remote_acc_lat =
3773 					le32_to_cpu(efs.acc_lat);
3774 				chan->remote_sdu_itime =
3775 					le32_to_cpu(efs.sdu_itime);
3776 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3777 						   sizeof(efs),
3778 						   (unsigned long) &efs, endptr - ptr);
3779 			}
3780 			break;
3781 
3782 		case L2CAP_MODE_STREAMING:
3783 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3784 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3785 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3786 			rfc.max_pdu_size = cpu_to_le16(size);
3787 			chan->remote_mps = size;
3788 
3789 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3790 
3791 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3792 					   (unsigned long) &rfc, endptr - ptr);
3793 
3794 			break;
3795 
3796 		default:
3797 			result = L2CAP_CONF_UNACCEPT;
3798 
3799 			memset(&rfc, 0, sizeof(rfc));
3800 			rfc.mode = chan->mode;
3801 		}
3802 
3803 		if (result == L2CAP_CONF_SUCCESS)
3804 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3805 	}
3806 	rsp->scid   = cpu_to_le16(chan->dcid);
3807 	rsp->result = cpu_to_le16(result);
3808 	rsp->flags  = cpu_to_le16(0);
3809 
3810 	return ptr - data;
3811 }
3812 
3813 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3814 				void *data, size_t size, u16 *result)
3815 {
3816 	struct l2cap_conf_req *req = data;
3817 	void *ptr = req->data;
3818 	void *endptr = data + size;
3819 	int type, olen;
3820 	unsigned long val;
3821 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3822 	struct l2cap_conf_efs efs;
3823 
3824 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3825 
3826 	while (len >= L2CAP_CONF_OPT_SIZE) {
3827 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3828 		if (len < 0)
3829 			break;
3830 
3831 		switch (type) {
3832 		case L2CAP_CONF_MTU:
3833 			if (olen != 2)
3834 				break;
3835 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3836 				*result = L2CAP_CONF_UNACCEPT;
3837 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3838 			} else
3839 				chan->imtu = val;
3840 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3841 					   endptr - ptr);
3842 			break;
3843 
3844 		case L2CAP_CONF_FLUSH_TO:
3845 			if (olen != 2)
3846 				break;
3847 			chan->flush_to = val;
3848 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3849 					   chan->flush_to, endptr - ptr);
3850 			break;
3851 
3852 		case L2CAP_CONF_RFC:
3853 			if (olen != sizeof(rfc))
3854 				break;
3855 			memcpy(&rfc, (void *)val, olen);
3856 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3857 			    rfc.mode != chan->mode)
3858 				return -ECONNREFUSED;
3859 			chan->fcs = 0;
3860 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3861 					   (unsigned long) &rfc, endptr - ptr);
3862 			break;
3863 
3864 		case L2CAP_CONF_EWS:
3865 			if (olen != 2)
3866 				break;
3867 			chan->ack_win = min_t(u16, val, chan->ack_win);
3868 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3869 					   chan->tx_win, endptr - ptr);
3870 			break;
3871 
3872 		case L2CAP_CONF_EFS:
3873 			if (olen != sizeof(efs))
3874 				break;
3875 			memcpy(&efs, (void *)val, olen);
3876 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3877 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3878 			    efs.stype != chan->local_stype)
3879 				return -ECONNREFUSED;
3880 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3881 					   (unsigned long) &efs, endptr - ptr);
3882 			break;
3883 
3884 		case L2CAP_CONF_FCS:
3885 			if (olen != 1)
3886 				break;
3887 			if (*result == L2CAP_CONF_PENDING)
3888 				if (val == L2CAP_FCS_NONE)
3889 					set_bit(CONF_RECV_NO_FCS,
3890 						&chan->conf_state);
3891 			break;
3892 		}
3893 	}
3894 
3895 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3896 		return -ECONNREFUSED;
3897 
3898 	chan->mode = rfc.mode;
3899 
3900 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3901 		switch (rfc.mode) {
3902 		case L2CAP_MODE_ERTM:
3903 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3904 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3905 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3906 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3907 				chan->ack_win = min_t(u16, chan->ack_win,
3908 						      rfc.txwin_size);
3909 
3910 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3911 				chan->local_msdu = le16_to_cpu(efs.msdu);
3912 				chan->local_sdu_itime =
3913 					le32_to_cpu(efs.sdu_itime);
3914 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3915 				chan->local_flush_to =
3916 					le32_to_cpu(efs.flush_to);
3917 			}
3918 			break;
3919 
3920 		case L2CAP_MODE_STREAMING:
3921 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3922 		}
3923 	}
3924 
3925 	req->dcid   = cpu_to_le16(chan->dcid);
3926 	req->flags  = cpu_to_le16(0);
3927 
3928 	return ptr - data;
3929 }
3930 
3931 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3932 				u16 result, u16 flags)
3933 {
3934 	struct l2cap_conf_rsp *rsp = data;
3935 	void *ptr = rsp->data;
3936 
3937 	BT_DBG("chan %p", chan);
3938 
3939 	rsp->scid   = cpu_to_le16(chan->dcid);
3940 	rsp->result = cpu_to_le16(result);
3941 	rsp->flags  = cpu_to_le16(flags);
3942 
3943 	return ptr - data;
3944 }
3945 
3946 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3947 {
3948 	struct l2cap_le_conn_rsp rsp;
3949 	struct l2cap_conn *conn = chan->conn;
3950 
3951 	BT_DBG("chan %p", chan);
3952 
3953 	rsp.dcid    = cpu_to_le16(chan->scid);
3954 	rsp.mtu     = cpu_to_le16(chan->imtu);
3955 	rsp.mps     = cpu_to_le16(chan->mps);
3956 	rsp.credits = cpu_to_le16(chan->rx_credits);
3957 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3958 
3959 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3960 		       &rsp);
3961 }
3962 
3963 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3964 {
3965 	struct {
3966 		struct l2cap_ecred_conn_rsp rsp;
3967 		__le16 dcid[5];
3968 	} __packed pdu;
3969 	struct l2cap_conn *conn = chan->conn;
3970 	u16 ident = chan->ident;
3971 	int i = 0;
3972 
3973 	if (!ident)
3974 		return;
3975 
3976 	BT_DBG("chan %p ident %d", chan, ident);
3977 
3978 	pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3979 	pdu.rsp.mps     = cpu_to_le16(chan->mps);
3980 	pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3981 	pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3982 
3983 	mutex_lock(&conn->chan_lock);
3984 
3985 	list_for_each_entry(chan, &conn->chan_l, list) {
3986 		if (chan->ident != ident)
3987 			continue;
3988 
3989 		/* Reset ident so only one response is sent */
3990 		chan->ident = 0;
3991 
3992 		/* Include all channels pending with the same ident */
3993 		pdu.dcid[i++] = cpu_to_le16(chan->scid);
3994 	}
3995 
3996 	mutex_unlock(&conn->chan_lock);
3997 
3998 	l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
3999 			sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
4000 }
4001 
4002 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4003 {
4004 	struct l2cap_conn_rsp rsp;
4005 	struct l2cap_conn *conn = chan->conn;
4006 	u8 buf[128];
4007 	u8 rsp_code;
4008 
4009 	rsp.scid   = cpu_to_le16(chan->dcid);
4010 	rsp.dcid   = cpu_to_le16(chan->scid);
4011 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4012 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4013 
4014 	if (chan->hs_hcon)
4015 		rsp_code = L2CAP_CREATE_CHAN_RSP;
4016 	else
4017 		rsp_code = L2CAP_CONN_RSP;
4018 
4019 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4020 
4021 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4022 
4023 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4024 		return;
4025 
4026 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4027 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4028 	chan->num_conf_req++;
4029 }
4030 
4031 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4032 {
4033 	int type, olen;
4034 	unsigned long val;
4035 	/* Use sane default values in case a misbehaving remote device
4036 	 * did not send an RFC or extended window size option.
4037 	 */
4038 	u16 txwin_ext = chan->ack_win;
4039 	struct l2cap_conf_rfc rfc = {
4040 		.mode = chan->mode,
4041 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4042 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4043 		.max_pdu_size = cpu_to_le16(chan->imtu),
4044 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4045 	};
4046 
4047 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4048 
4049 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4050 		return;
4051 
4052 	while (len >= L2CAP_CONF_OPT_SIZE) {
4053 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4054 		if (len < 0)
4055 			break;
4056 
4057 		switch (type) {
4058 		case L2CAP_CONF_RFC:
4059 			if (olen != sizeof(rfc))
4060 				break;
4061 			memcpy(&rfc, (void *)val, olen);
4062 			break;
4063 		case L2CAP_CONF_EWS:
4064 			if (olen != 2)
4065 				break;
4066 			txwin_ext = val;
4067 			break;
4068 		}
4069 	}
4070 
4071 	switch (rfc.mode) {
4072 	case L2CAP_MODE_ERTM:
4073 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4074 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4075 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4076 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4077 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4078 		else
4079 			chan->ack_win = min_t(u16, chan->ack_win,
4080 					      rfc.txwin_size);
4081 		break;
4082 	case L2CAP_MODE_STREAMING:
4083 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4084 	}
4085 }
4086 
4087 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4088 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4089 				    u8 *data)
4090 {
4091 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4092 
4093 	if (cmd_len < sizeof(*rej))
4094 		return -EPROTO;
4095 
4096 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4097 		return 0;
4098 
4099 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4100 	    cmd->ident == conn->info_ident) {
4101 		cancel_delayed_work(&conn->info_timer);
4102 
4103 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4104 		conn->info_ident = 0;
4105 
4106 		l2cap_conn_start(conn);
4107 	}
4108 
4109 	return 0;
4110 }
4111 
4112 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4113 					struct l2cap_cmd_hdr *cmd,
4114 					u8 *data, u8 rsp_code, u8 amp_id)
4115 {
4116 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4117 	struct l2cap_conn_rsp rsp;
4118 	struct l2cap_chan *chan = NULL, *pchan;
4119 	int result, status = L2CAP_CS_NO_INFO;
4120 
4121 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4122 	__le16 psm = req->psm;
4123 
4124 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4125 
4126 	/* Check if we have socket listening on psm */
4127 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4128 					 &conn->hcon->dst, ACL_LINK);
4129 	if (!pchan) {
4130 		result = L2CAP_CR_BAD_PSM;
4131 		goto sendresp;
4132 	}
4133 
4134 	mutex_lock(&conn->chan_lock);
4135 	l2cap_chan_lock(pchan);
4136 
4137 	/* Check if the ACL is secure enough (if not SDP) */
4138 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4139 	    !hci_conn_check_link_mode(conn->hcon)) {
4140 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4141 		result = L2CAP_CR_SEC_BLOCK;
4142 		goto response;
4143 	}
4144 
4145 	result = L2CAP_CR_NO_MEM;
4146 
4147 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4148 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4149 		result = L2CAP_CR_INVALID_SCID;
4150 		goto response;
4151 	}
4152 
4153 	/* Check if we already have channel with that dcid */
4154 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4155 		result = L2CAP_CR_SCID_IN_USE;
4156 		goto response;
4157 	}
4158 
4159 	chan = pchan->ops->new_connection(pchan);
4160 	if (!chan)
4161 		goto response;
4162 
4163 	/* For certain devices (ex: HID mouse), support for authentication,
4164 	 * pairing and bonding is optional. For such devices, inorder to avoid
4165 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4166 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4167 	 */
4168 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4169 
4170 	bacpy(&chan->src, &conn->hcon->src);
4171 	bacpy(&chan->dst, &conn->hcon->dst);
4172 	chan->src_type = bdaddr_src_type(conn->hcon);
4173 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4174 	chan->psm  = psm;
4175 	chan->dcid = scid;
4176 	chan->local_amp_id = amp_id;
4177 
4178 	__l2cap_chan_add(conn, chan);
4179 
4180 	dcid = chan->scid;
4181 
4182 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4183 
4184 	chan->ident = cmd->ident;
4185 
4186 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4187 		if (l2cap_chan_check_security(chan, false)) {
4188 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4189 				l2cap_state_change(chan, BT_CONNECT2);
4190 				result = L2CAP_CR_PEND;
4191 				status = L2CAP_CS_AUTHOR_PEND;
4192 				chan->ops->defer(chan);
4193 			} else {
4194 				/* Force pending result for AMP controllers.
4195 				 * The connection will succeed after the
4196 				 * physical link is up.
4197 				 */
4198 				if (amp_id == AMP_ID_BREDR) {
4199 					l2cap_state_change(chan, BT_CONFIG);
4200 					result = L2CAP_CR_SUCCESS;
4201 				} else {
4202 					l2cap_state_change(chan, BT_CONNECT2);
4203 					result = L2CAP_CR_PEND;
4204 				}
4205 				status = L2CAP_CS_NO_INFO;
4206 			}
4207 		} else {
4208 			l2cap_state_change(chan, BT_CONNECT2);
4209 			result = L2CAP_CR_PEND;
4210 			status = L2CAP_CS_AUTHEN_PEND;
4211 		}
4212 	} else {
4213 		l2cap_state_change(chan, BT_CONNECT2);
4214 		result = L2CAP_CR_PEND;
4215 		status = L2CAP_CS_NO_INFO;
4216 	}
4217 
4218 response:
4219 	l2cap_chan_unlock(pchan);
4220 	mutex_unlock(&conn->chan_lock);
4221 	l2cap_chan_put(pchan);
4222 
4223 sendresp:
4224 	rsp.scid   = cpu_to_le16(scid);
4225 	rsp.dcid   = cpu_to_le16(dcid);
4226 	rsp.result = cpu_to_le16(result);
4227 	rsp.status = cpu_to_le16(status);
4228 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4229 
4230 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4231 		struct l2cap_info_req info;
4232 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4233 
4234 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4235 		conn->info_ident = l2cap_get_ident(conn);
4236 
4237 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4238 
4239 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4240 			       sizeof(info), &info);
4241 	}
4242 
4243 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4244 	    result == L2CAP_CR_SUCCESS) {
4245 		u8 buf[128];
4246 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4247 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4248 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4249 		chan->num_conf_req++;
4250 	}
4251 
4252 	return chan;
4253 }
4254 
4255 static int l2cap_connect_req(struct l2cap_conn *conn,
4256 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4257 {
4258 	struct hci_dev *hdev = conn->hcon->hdev;
4259 	struct hci_conn *hcon = conn->hcon;
4260 
4261 	if (cmd_len < sizeof(struct l2cap_conn_req))
4262 		return -EPROTO;
4263 
4264 	hci_dev_lock(hdev);
4265 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4266 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4267 		mgmt_device_connected(hdev, hcon, NULL, 0);
4268 	hci_dev_unlock(hdev);
4269 
4270 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4271 	return 0;
4272 }
4273 
4274 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4275 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4276 				    u8 *data)
4277 {
4278 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4279 	u16 scid, dcid, result, status;
4280 	struct l2cap_chan *chan;
4281 	u8 req[128];
4282 	int err;
4283 
4284 	if (cmd_len < sizeof(*rsp))
4285 		return -EPROTO;
4286 
4287 	scid   = __le16_to_cpu(rsp->scid);
4288 	dcid   = __le16_to_cpu(rsp->dcid);
4289 	result = __le16_to_cpu(rsp->result);
4290 	status = __le16_to_cpu(rsp->status);
4291 
4292 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4293 	       dcid, scid, result, status);
4294 
4295 	mutex_lock(&conn->chan_lock);
4296 
4297 	if (scid) {
4298 		chan = __l2cap_get_chan_by_scid(conn, scid);
4299 		if (!chan) {
4300 			err = -EBADSLT;
4301 			goto unlock;
4302 		}
4303 	} else {
4304 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4305 		if (!chan) {
4306 			err = -EBADSLT;
4307 			goto unlock;
4308 		}
4309 	}
4310 
4311 	err = 0;
4312 
4313 	l2cap_chan_lock(chan);
4314 
4315 	switch (result) {
4316 	case L2CAP_CR_SUCCESS:
4317 		l2cap_state_change(chan, BT_CONFIG);
4318 		chan->ident = 0;
4319 		chan->dcid = dcid;
4320 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4321 
4322 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4323 			break;
4324 
4325 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4326 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4327 		chan->num_conf_req++;
4328 		break;
4329 
4330 	case L2CAP_CR_PEND:
4331 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4332 		break;
4333 
4334 	default:
4335 		l2cap_chan_del(chan, ECONNREFUSED);
4336 		break;
4337 	}
4338 
4339 	l2cap_chan_unlock(chan);
4340 
4341 unlock:
4342 	mutex_unlock(&conn->chan_lock);
4343 
4344 	return err;
4345 }
4346 
4347 static inline void set_default_fcs(struct l2cap_chan *chan)
4348 {
4349 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4350 	 * sides request it.
4351 	 */
4352 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4353 		chan->fcs = L2CAP_FCS_NONE;
4354 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4355 		chan->fcs = L2CAP_FCS_CRC16;
4356 }
4357 
4358 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4359 				    u8 ident, u16 flags)
4360 {
4361 	struct l2cap_conn *conn = chan->conn;
4362 
4363 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4364 	       flags);
4365 
4366 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4367 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4368 
4369 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4370 		       l2cap_build_conf_rsp(chan, data,
4371 					    L2CAP_CONF_SUCCESS, flags), data);
4372 }
4373 
4374 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4375 				   u16 scid, u16 dcid)
4376 {
4377 	struct l2cap_cmd_rej_cid rej;
4378 
4379 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4380 	rej.scid = __cpu_to_le16(scid);
4381 	rej.dcid = __cpu_to_le16(dcid);
4382 
4383 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4384 }
4385 
4386 static inline int l2cap_config_req(struct l2cap_conn *conn,
4387 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4388 				   u8 *data)
4389 {
4390 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4391 	u16 dcid, flags;
4392 	u8 rsp[64];
4393 	struct l2cap_chan *chan;
4394 	int len, err = 0;
4395 
4396 	if (cmd_len < sizeof(*req))
4397 		return -EPROTO;
4398 
4399 	dcid  = __le16_to_cpu(req->dcid);
4400 	flags = __le16_to_cpu(req->flags);
4401 
4402 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4403 
4404 	chan = l2cap_get_chan_by_scid(conn, dcid);
4405 	if (!chan) {
4406 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4407 		return 0;
4408 	}
4409 
4410 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4411 	    chan->state != BT_CONNECTED) {
4412 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4413 				       chan->dcid);
4414 		goto unlock;
4415 	}
4416 
4417 	/* Reject if config buffer is too small. */
4418 	len = cmd_len - sizeof(*req);
4419 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4420 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4421 			       l2cap_build_conf_rsp(chan, rsp,
4422 			       L2CAP_CONF_REJECT, flags), rsp);
4423 		goto unlock;
4424 	}
4425 
4426 	/* Store config. */
4427 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4428 	chan->conf_len += len;
4429 
4430 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4431 		/* Incomplete config. Send empty response. */
4432 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4433 			       l2cap_build_conf_rsp(chan, rsp,
4434 			       L2CAP_CONF_SUCCESS, flags), rsp);
4435 		goto unlock;
4436 	}
4437 
4438 	/* Complete config. */
4439 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4440 	if (len < 0) {
4441 		l2cap_send_disconn_req(chan, ECONNRESET);
4442 		goto unlock;
4443 	}
4444 
4445 	chan->ident = cmd->ident;
4446 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4447 	chan->num_conf_rsp++;
4448 
4449 	/* Reset config buffer. */
4450 	chan->conf_len = 0;
4451 
4452 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4453 		goto unlock;
4454 
4455 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4456 		set_default_fcs(chan);
4457 
4458 		if (chan->mode == L2CAP_MODE_ERTM ||
4459 		    chan->mode == L2CAP_MODE_STREAMING)
4460 			err = l2cap_ertm_init(chan);
4461 
4462 		if (err < 0)
4463 			l2cap_send_disconn_req(chan, -err);
4464 		else
4465 			l2cap_chan_ready(chan);
4466 
4467 		goto unlock;
4468 	}
4469 
4470 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4471 		u8 buf[64];
4472 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4473 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4474 		chan->num_conf_req++;
4475 	}
4476 
4477 	/* Got Conf Rsp PENDING from remote side and assume we sent
4478 	   Conf Rsp PENDING in the code above */
4479 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4480 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4481 
4482 		/* check compatibility */
4483 
4484 		/* Send rsp for BR/EDR channel */
4485 		if (!chan->hs_hcon)
4486 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4487 		else
4488 			chan->ident = cmd->ident;
4489 	}
4490 
4491 unlock:
4492 	l2cap_chan_unlock(chan);
4493 	l2cap_chan_put(chan);
4494 	return err;
4495 }
4496 
4497 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4498 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4499 				   u8 *data)
4500 {
4501 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4502 	u16 scid, flags, result;
4503 	struct l2cap_chan *chan;
4504 	int len = cmd_len - sizeof(*rsp);
4505 	int err = 0;
4506 
4507 	if (cmd_len < sizeof(*rsp))
4508 		return -EPROTO;
4509 
4510 	scid   = __le16_to_cpu(rsp->scid);
4511 	flags  = __le16_to_cpu(rsp->flags);
4512 	result = __le16_to_cpu(rsp->result);
4513 
4514 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4515 	       result, len);
4516 
4517 	chan = l2cap_get_chan_by_scid(conn, scid);
4518 	if (!chan)
4519 		return 0;
4520 
4521 	switch (result) {
4522 	case L2CAP_CONF_SUCCESS:
4523 		l2cap_conf_rfc_get(chan, rsp->data, len);
4524 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4525 		break;
4526 
4527 	case L2CAP_CONF_PENDING:
4528 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4529 
4530 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4531 			char buf[64];
4532 
4533 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4534 						   buf, sizeof(buf), &result);
4535 			if (len < 0) {
4536 				l2cap_send_disconn_req(chan, ECONNRESET);
4537 				goto done;
4538 			}
4539 
4540 			if (!chan->hs_hcon) {
4541 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4542 							0);
4543 			} else {
4544 				if (l2cap_check_efs(chan)) {
4545 					amp_create_logical_link(chan);
4546 					chan->ident = cmd->ident;
4547 				}
4548 			}
4549 		}
4550 		goto done;
4551 
4552 	case L2CAP_CONF_UNKNOWN:
4553 	case L2CAP_CONF_UNACCEPT:
4554 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4555 			char req[64];
4556 
4557 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4558 				l2cap_send_disconn_req(chan, ECONNRESET);
4559 				goto done;
4560 			}
4561 
4562 			/* throw out any old stored conf requests */
4563 			result = L2CAP_CONF_SUCCESS;
4564 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4565 						   req, sizeof(req), &result);
4566 			if (len < 0) {
4567 				l2cap_send_disconn_req(chan, ECONNRESET);
4568 				goto done;
4569 			}
4570 
4571 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4572 				       L2CAP_CONF_REQ, len, req);
4573 			chan->num_conf_req++;
4574 			if (result != L2CAP_CONF_SUCCESS)
4575 				goto done;
4576 			break;
4577 		}
4578 		fallthrough;
4579 
4580 	default:
4581 		l2cap_chan_set_err(chan, ECONNRESET);
4582 
4583 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4584 		l2cap_send_disconn_req(chan, ECONNRESET);
4585 		goto done;
4586 	}
4587 
4588 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4589 		goto done;
4590 
4591 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4592 
4593 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4594 		set_default_fcs(chan);
4595 
4596 		if (chan->mode == L2CAP_MODE_ERTM ||
4597 		    chan->mode == L2CAP_MODE_STREAMING)
4598 			err = l2cap_ertm_init(chan);
4599 
4600 		if (err < 0)
4601 			l2cap_send_disconn_req(chan, -err);
4602 		else
4603 			l2cap_chan_ready(chan);
4604 	}
4605 
4606 done:
4607 	l2cap_chan_unlock(chan);
4608 	l2cap_chan_put(chan);
4609 	return err;
4610 }
4611 
4612 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4613 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4614 				       u8 *data)
4615 {
4616 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4617 	struct l2cap_disconn_rsp rsp;
4618 	u16 dcid, scid;
4619 	struct l2cap_chan *chan;
4620 
4621 	if (cmd_len != sizeof(*req))
4622 		return -EPROTO;
4623 
4624 	scid = __le16_to_cpu(req->scid);
4625 	dcid = __le16_to_cpu(req->dcid);
4626 
4627 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4628 
4629 	mutex_lock(&conn->chan_lock);
4630 
4631 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4632 	if (!chan) {
4633 		mutex_unlock(&conn->chan_lock);
4634 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4635 		return 0;
4636 	}
4637 
4638 	l2cap_chan_hold(chan);
4639 	l2cap_chan_lock(chan);
4640 
4641 	rsp.dcid = cpu_to_le16(chan->scid);
4642 	rsp.scid = cpu_to_le16(chan->dcid);
4643 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4644 
4645 	chan->ops->set_shutdown(chan);
4646 
4647 	l2cap_chan_del(chan, ECONNRESET);
4648 
4649 	chan->ops->close(chan);
4650 
4651 	l2cap_chan_unlock(chan);
4652 	l2cap_chan_put(chan);
4653 
4654 	mutex_unlock(&conn->chan_lock);
4655 
4656 	return 0;
4657 }
4658 
4659 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4660 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4661 				       u8 *data)
4662 {
4663 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4664 	u16 dcid, scid;
4665 	struct l2cap_chan *chan;
4666 
4667 	if (cmd_len != sizeof(*rsp))
4668 		return -EPROTO;
4669 
4670 	scid = __le16_to_cpu(rsp->scid);
4671 	dcid = __le16_to_cpu(rsp->dcid);
4672 
4673 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4674 
4675 	mutex_lock(&conn->chan_lock);
4676 
4677 	chan = __l2cap_get_chan_by_scid(conn, scid);
4678 	if (!chan) {
4679 		mutex_unlock(&conn->chan_lock);
4680 		return 0;
4681 	}
4682 
4683 	l2cap_chan_hold(chan);
4684 	l2cap_chan_lock(chan);
4685 
4686 	if (chan->state != BT_DISCONN) {
4687 		l2cap_chan_unlock(chan);
4688 		l2cap_chan_put(chan);
4689 		mutex_unlock(&conn->chan_lock);
4690 		return 0;
4691 	}
4692 
4693 	l2cap_chan_del(chan, 0);
4694 
4695 	chan->ops->close(chan);
4696 
4697 	l2cap_chan_unlock(chan);
4698 	l2cap_chan_put(chan);
4699 
4700 	mutex_unlock(&conn->chan_lock);
4701 
4702 	return 0;
4703 }
4704 
4705 static inline int l2cap_information_req(struct l2cap_conn *conn,
4706 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4707 					u8 *data)
4708 {
4709 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4710 	u16 type;
4711 
4712 	if (cmd_len != sizeof(*req))
4713 		return -EPROTO;
4714 
4715 	type = __le16_to_cpu(req->type);
4716 
4717 	BT_DBG("type 0x%4.4x", type);
4718 
4719 	if (type == L2CAP_IT_FEAT_MASK) {
4720 		u8 buf[8];
4721 		u32 feat_mask = l2cap_feat_mask;
4722 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4723 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4724 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4725 		if (!disable_ertm)
4726 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4727 				| L2CAP_FEAT_FCS;
4728 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4729 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4730 				| L2CAP_FEAT_EXT_WINDOW;
4731 
4732 		put_unaligned_le32(feat_mask, rsp->data);
4733 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4734 			       buf);
4735 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4736 		u8 buf[12];
4737 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4738 
4739 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4740 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4741 		rsp->data[0] = conn->local_fixed_chan;
4742 		memset(rsp->data + 1, 0, 7);
4743 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4744 			       buf);
4745 	} else {
4746 		struct l2cap_info_rsp rsp;
4747 		rsp.type   = cpu_to_le16(type);
4748 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4749 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4750 			       &rsp);
4751 	}
4752 
4753 	return 0;
4754 }
4755 
4756 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4757 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4758 					u8 *data)
4759 {
4760 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4761 	u16 type, result;
4762 
4763 	if (cmd_len < sizeof(*rsp))
4764 		return -EPROTO;
4765 
4766 	type   = __le16_to_cpu(rsp->type);
4767 	result = __le16_to_cpu(rsp->result);
4768 
4769 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4770 
4771 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4772 	if (cmd->ident != conn->info_ident ||
4773 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4774 		return 0;
4775 
4776 	cancel_delayed_work(&conn->info_timer);
4777 
4778 	if (result != L2CAP_IR_SUCCESS) {
4779 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4780 		conn->info_ident = 0;
4781 
4782 		l2cap_conn_start(conn);
4783 
4784 		return 0;
4785 	}
4786 
4787 	switch (type) {
4788 	case L2CAP_IT_FEAT_MASK:
4789 		conn->feat_mask = get_unaligned_le32(rsp->data);
4790 
4791 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4792 			struct l2cap_info_req req;
4793 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4794 
4795 			conn->info_ident = l2cap_get_ident(conn);
4796 
4797 			l2cap_send_cmd(conn, conn->info_ident,
4798 				       L2CAP_INFO_REQ, sizeof(req), &req);
4799 		} else {
4800 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4801 			conn->info_ident = 0;
4802 
4803 			l2cap_conn_start(conn);
4804 		}
4805 		break;
4806 
4807 	case L2CAP_IT_FIXED_CHAN:
4808 		conn->remote_fixed_chan = rsp->data[0];
4809 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4810 		conn->info_ident = 0;
4811 
4812 		l2cap_conn_start(conn);
4813 		break;
4814 	}
4815 
4816 	return 0;
4817 }
4818 
4819 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4820 				    struct l2cap_cmd_hdr *cmd,
4821 				    u16 cmd_len, void *data)
4822 {
4823 	struct l2cap_create_chan_req *req = data;
4824 	struct l2cap_create_chan_rsp rsp;
4825 	struct l2cap_chan *chan;
4826 	struct hci_dev *hdev;
4827 	u16 psm, scid;
4828 
4829 	if (cmd_len != sizeof(*req))
4830 		return -EPROTO;
4831 
4832 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4833 		return -EINVAL;
4834 
4835 	psm = le16_to_cpu(req->psm);
4836 	scid = le16_to_cpu(req->scid);
4837 
4838 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4839 
4840 	/* For controller id 0 make BR/EDR connection */
4841 	if (req->amp_id == AMP_ID_BREDR) {
4842 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4843 			      req->amp_id);
4844 		return 0;
4845 	}
4846 
4847 	/* Validate AMP controller id */
4848 	hdev = hci_dev_get(req->amp_id);
4849 	if (!hdev)
4850 		goto error;
4851 
4852 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4853 		hci_dev_put(hdev);
4854 		goto error;
4855 	}
4856 
4857 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4858 			     req->amp_id);
4859 	if (chan) {
4860 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4861 		struct hci_conn *hs_hcon;
4862 
4863 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4864 						  &conn->hcon->dst);
4865 		if (!hs_hcon) {
4866 			hci_dev_put(hdev);
4867 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4868 					       chan->dcid);
4869 			return 0;
4870 		}
4871 
4872 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4873 
4874 		mgr->bredr_chan = chan;
4875 		chan->hs_hcon = hs_hcon;
4876 		chan->fcs = L2CAP_FCS_NONE;
4877 		conn->mtu = hdev->block_mtu;
4878 	}
4879 
4880 	hci_dev_put(hdev);
4881 
4882 	return 0;
4883 
4884 error:
4885 	rsp.dcid = 0;
4886 	rsp.scid = cpu_to_le16(scid);
4887 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4888 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4889 
4890 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4891 		       sizeof(rsp), &rsp);
4892 
4893 	return 0;
4894 }
4895 
4896 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4897 {
4898 	struct l2cap_move_chan_req req;
4899 	u8 ident;
4900 
4901 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4902 
4903 	ident = l2cap_get_ident(chan->conn);
4904 	chan->ident = ident;
4905 
4906 	req.icid = cpu_to_le16(chan->scid);
4907 	req.dest_amp_id = dest_amp_id;
4908 
4909 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4910 		       &req);
4911 
4912 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4913 }
4914 
4915 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4916 {
4917 	struct l2cap_move_chan_rsp rsp;
4918 
4919 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4920 
4921 	rsp.icid = cpu_to_le16(chan->dcid);
4922 	rsp.result = cpu_to_le16(result);
4923 
4924 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4925 		       sizeof(rsp), &rsp);
4926 }
4927 
4928 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4929 {
4930 	struct l2cap_move_chan_cfm cfm;
4931 
4932 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4933 
4934 	chan->ident = l2cap_get_ident(chan->conn);
4935 
4936 	cfm.icid = cpu_to_le16(chan->scid);
4937 	cfm.result = cpu_to_le16(result);
4938 
4939 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4940 		       sizeof(cfm), &cfm);
4941 
4942 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4943 }
4944 
4945 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4946 {
4947 	struct l2cap_move_chan_cfm cfm;
4948 
4949 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4950 
4951 	cfm.icid = cpu_to_le16(icid);
4952 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4953 
4954 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4955 		       sizeof(cfm), &cfm);
4956 }
4957 
4958 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4959 					 u16 icid)
4960 {
4961 	struct l2cap_move_chan_cfm_rsp rsp;
4962 
4963 	BT_DBG("icid 0x%4.4x", icid);
4964 
4965 	rsp.icid = cpu_to_le16(icid);
4966 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4967 }
4968 
4969 static void __release_logical_link(struct l2cap_chan *chan)
4970 {
4971 	chan->hs_hchan = NULL;
4972 	chan->hs_hcon = NULL;
4973 
4974 	/* Placeholder - release the logical link */
4975 }
4976 
4977 static void l2cap_logical_fail(struct l2cap_chan *chan)
4978 {
4979 	/* Logical link setup failed */
4980 	if (chan->state != BT_CONNECTED) {
4981 		/* Create channel failure, disconnect */
4982 		l2cap_send_disconn_req(chan, ECONNRESET);
4983 		return;
4984 	}
4985 
4986 	switch (chan->move_role) {
4987 	case L2CAP_MOVE_ROLE_RESPONDER:
4988 		l2cap_move_done(chan);
4989 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4990 		break;
4991 	case L2CAP_MOVE_ROLE_INITIATOR:
4992 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4993 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4994 			/* Remote has only sent pending or
4995 			 * success responses, clean up
4996 			 */
4997 			l2cap_move_done(chan);
4998 		}
4999 
5000 		/* Other amp move states imply that the move
5001 		 * has already aborted
5002 		 */
5003 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5004 		break;
5005 	}
5006 }
5007 
5008 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5009 					struct hci_chan *hchan)
5010 {
5011 	struct l2cap_conf_rsp rsp;
5012 
5013 	chan->hs_hchan = hchan;
5014 	chan->hs_hcon->l2cap_data = chan->conn;
5015 
5016 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5017 
5018 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5019 		int err;
5020 
5021 		set_default_fcs(chan);
5022 
5023 		err = l2cap_ertm_init(chan);
5024 		if (err < 0)
5025 			l2cap_send_disconn_req(chan, -err);
5026 		else
5027 			l2cap_chan_ready(chan);
5028 	}
5029 }
5030 
5031 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5032 				      struct hci_chan *hchan)
5033 {
5034 	chan->hs_hcon = hchan->conn;
5035 	chan->hs_hcon->l2cap_data = chan->conn;
5036 
5037 	BT_DBG("move_state %d", chan->move_state);
5038 
5039 	switch (chan->move_state) {
5040 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5041 		/* Move confirm will be sent after a success
5042 		 * response is received
5043 		 */
5044 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5045 		break;
5046 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5047 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5048 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5049 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5050 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5051 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5052 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5053 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5054 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5055 		}
5056 		break;
5057 	default:
5058 		/* Move was not in expected state, free the channel */
5059 		__release_logical_link(chan);
5060 
5061 		chan->move_state = L2CAP_MOVE_STABLE;
5062 	}
5063 }
5064 
5065 /* Call with chan locked */
5066 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5067 		       u8 status)
5068 {
5069 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5070 
5071 	if (status) {
5072 		l2cap_logical_fail(chan);
5073 		__release_logical_link(chan);
5074 		return;
5075 	}
5076 
5077 	if (chan->state != BT_CONNECTED) {
5078 		/* Ignore logical link if channel is on BR/EDR */
5079 		if (chan->local_amp_id != AMP_ID_BREDR)
5080 			l2cap_logical_finish_create(chan, hchan);
5081 	} else {
5082 		l2cap_logical_finish_move(chan, hchan);
5083 	}
5084 }
5085 
5086 void l2cap_move_start(struct l2cap_chan *chan)
5087 {
5088 	BT_DBG("chan %p", chan);
5089 
5090 	if (chan->local_amp_id == AMP_ID_BREDR) {
5091 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5092 			return;
5093 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5094 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5095 		/* Placeholder - start physical link setup */
5096 	} else {
5097 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5098 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5099 		chan->move_id = 0;
5100 		l2cap_move_setup(chan);
5101 		l2cap_send_move_chan_req(chan, 0);
5102 	}
5103 }
5104 
5105 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5106 			    u8 local_amp_id, u8 remote_amp_id)
5107 {
5108 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5109 	       local_amp_id, remote_amp_id);
5110 
5111 	chan->fcs = L2CAP_FCS_NONE;
5112 
5113 	/* Outgoing channel on AMP */
5114 	if (chan->state == BT_CONNECT) {
5115 		if (result == L2CAP_CR_SUCCESS) {
5116 			chan->local_amp_id = local_amp_id;
5117 			l2cap_send_create_chan_req(chan, remote_amp_id);
5118 		} else {
5119 			/* Revert to BR/EDR connect */
5120 			l2cap_send_conn_req(chan);
5121 		}
5122 
5123 		return;
5124 	}
5125 
5126 	/* Incoming channel on AMP */
5127 	if (__l2cap_no_conn_pending(chan)) {
5128 		struct l2cap_conn_rsp rsp;
5129 		char buf[128];
5130 		rsp.scid = cpu_to_le16(chan->dcid);
5131 		rsp.dcid = cpu_to_le16(chan->scid);
5132 
5133 		if (result == L2CAP_CR_SUCCESS) {
5134 			/* Send successful response */
5135 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5136 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5137 		} else {
5138 			/* Send negative response */
5139 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5140 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5141 		}
5142 
5143 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5144 			       sizeof(rsp), &rsp);
5145 
5146 		if (result == L2CAP_CR_SUCCESS) {
5147 			l2cap_state_change(chan, BT_CONFIG);
5148 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5149 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5150 				       L2CAP_CONF_REQ,
5151 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5152 			chan->num_conf_req++;
5153 		}
5154 	}
5155 }
5156 
5157 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5158 				   u8 remote_amp_id)
5159 {
5160 	l2cap_move_setup(chan);
5161 	chan->move_id = local_amp_id;
5162 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5163 
5164 	l2cap_send_move_chan_req(chan, remote_amp_id);
5165 }
5166 
5167 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5168 {
5169 	struct hci_chan *hchan = NULL;
5170 
5171 	/* Placeholder - get hci_chan for logical link */
5172 
5173 	if (hchan) {
5174 		if (hchan->state == BT_CONNECTED) {
5175 			/* Logical link is ready to go */
5176 			chan->hs_hcon = hchan->conn;
5177 			chan->hs_hcon->l2cap_data = chan->conn;
5178 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5179 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5180 
5181 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5182 		} else {
5183 			/* Wait for logical link to be ready */
5184 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5185 		}
5186 	} else {
5187 		/* Logical link not available */
5188 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5189 	}
5190 }
5191 
5192 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5193 {
5194 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5195 		u8 rsp_result;
5196 		if (result == -EINVAL)
5197 			rsp_result = L2CAP_MR_BAD_ID;
5198 		else
5199 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5200 
5201 		l2cap_send_move_chan_rsp(chan, rsp_result);
5202 	}
5203 
5204 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5205 	chan->move_state = L2CAP_MOVE_STABLE;
5206 
5207 	/* Restart data transmission */
5208 	l2cap_ertm_send(chan);
5209 }
5210 
5211 /* Invoke with locked chan */
5212 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5213 {
5214 	u8 local_amp_id = chan->local_amp_id;
5215 	u8 remote_amp_id = chan->remote_amp_id;
5216 
5217 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5218 	       chan, result, local_amp_id, remote_amp_id);
5219 
5220 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5221 		return;
5222 
5223 	if (chan->state != BT_CONNECTED) {
5224 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5225 	} else if (result != L2CAP_MR_SUCCESS) {
5226 		l2cap_do_move_cancel(chan, result);
5227 	} else {
5228 		switch (chan->move_role) {
5229 		case L2CAP_MOVE_ROLE_INITIATOR:
5230 			l2cap_do_move_initiate(chan, local_amp_id,
5231 					       remote_amp_id);
5232 			break;
5233 		case L2CAP_MOVE_ROLE_RESPONDER:
5234 			l2cap_do_move_respond(chan, result);
5235 			break;
5236 		default:
5237 			l2cap_do_move_cancel(chan, result);
5238 			break;
5239 		}
5240 	}
5241 }
5242 
5243 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5244 					 struct l2cap_cmd_hdr *cmd,
5245 					 u16 cmd_len, void *data)
5246 {
5247 	struct l2cap_move_chan_req *req = data;
5248 	struct l2cap_move_chan_rsp rsp;
5249 	struct l2cap_chan *chan;
5250 	u16 icid = 0;
5251 	u16 result = L2CAP_MR_NOT_ALLOWED;
5252 
5253 	if (cmd_len != sizeof(*req))
5254 		return -EPROTO;
5255 
5256 	icid = le16_to_cpu(req->icid);
5257 
5258 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5259 
5260 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5261 		return -EINVAL;
5262 
5263 	chan = l2cap_get_chan_by_dcid(conn, icid);
5264 	if (!chan) {
5265 		rsp.icid = cpu_to_le16(icid);
5266 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5267 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5268 			       sizeof(rsp), &rsp);
5269 		return 0;
5270 	}
5271 
5272 	chan->ident = cmd->ident;
5273 
5274 	if (chan->scid < L2CAP_CID_DYN_START ||
5275 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5276 	    (chan->mode != L2CAP_MODE_ERTM &&
5277 	     chan->mode != L2CAP_MODE_STREAMING)) {
5278 		result = L2CAP_MR_NOT_ALLOWED;
5279 		goto send_move_response;
5280 	}
5281 
5282 	if (chan->local_amp_id == req->dest_amp_id) {
5283 		result = L2CAP_MR_SAME_ID;
5284 		goto send_move_response;
5285 	}
5286 
5287 	if (req->dest_amp_id != AMP_ID_BREDR) {
5288 		struct hci_dev *hdev;
5289 		hdev = hci_dev_get(req->dest_amp_id);
5290 		if (!hdev || hdev->dev_type != HCI_AMP ||
5291 		    !test_bit(HCI_UP, &hdev->flags)) {
5292 			if (hdev)
5293 				hci_dev_put(hdev);
5294 
5295 			result = L2CAP_MR_BAD_ID;
5296 			goto send_move_response;
5297 		}
5298 		hci_dev_put(hdev);
5299 	}
5300 
5301 	/* Detect a move collision.  Only send a collision response
5302 	 * if this side has "lost", otherwise proceed with the move.
5303 	 * The winner has the larger bd_addr.
5304 	 */
5305 	if ((__chan_is_moving(chan) ||
5306 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5307 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5308 		result = L2CAP_MR_COLLISION;
5309 		goto send_move_response;
5310 	}
5311 
5312 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5313 	l2cap_move_setup(chan);
5314 	chan->move_id = req->dest_amp_id;
5315 
5316 	if (req->dest_amp_id == AMP_ID_BREDR) {
5317 		/* Moving to BR/EDR */
5318 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5319 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5320 			result = L2CAP_MR_PEND;
5321 		} else {
5322 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5323 			result = L2CAP_MR_SUCCESS;
5324 		}
5325 	} else {
5326 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5327 		/* Placeholder - uncomment when amp functions are available */
5328 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5329 		result = L2CAP_MR_PEND;
5330 	}
5331 
5332 send_move_response:
5333 	l2cap_send_move_chan_rsp(chan, result);
5334 
5335 	l2cap_chan_unlock(chan);
5336 	l2cap_chan_put(chan);
5337 
5338 	return 0;
5339 }
5340 
5341 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5342 {
5343 	struct l2cap_chan *chan;
5344 	struct hci_chan *hchan = NULL;
5345 
5346 	chan = l2cap_get_chan_by_scid(conn, icid);
5347 	if (!chan) {
5348 		l2cap_send_move_chan_cfm_icid(conn, icid);
5349 		return;
5350 	}
5351 
5352 	__clear_chan_timer(chan);
5353 	if (result == L2CAP_MR_PEND)
5354 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5355 
5356 	switch (chan->move_state) {
5357 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5358 		/* Move confirm will be sent when logical link
5359 		 * is complete.
5360 		 */
5361 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5362 		break;
5363 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5364 		if (result == L2CAP_MR_PEND) {
5365 			break;
5366 		} else if (test_bit(CONN_LOCAL_BUSY,
5367 				    &chan->conn_state)) {
5368 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5369 		} else {
5370 			/* Logical link is up or moving to BR/EDR,
5371 			 * proceed with move
5372 			 */
5373 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5374 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5375 		}
5376 		break;
5377 	case L2CAP_MOVE_WAIT_RSP:
5378 		/* Moving to AMP */
5379 		if (result == L2CAP_MR_SUCCESS) {
5380 			/* Remote is ready, send confirm immediately
5381 			 * after logical link is ready
5382 			 */
5383 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5384 		} else {
5385 			/* Both logical link and move success
5386 			 * are required to confirm
5387 			 */
5388 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5389 		}
5390 
5391 		/* Placeholder - get hci_chan for logical link */
5392 		if (!hchan) {
5393 			/* Logical link not available */
5394 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5395 			break;
5396 		}
5397 
5398 		/* If the logical link is not yet connected, do not
5399 		 * send confirmation.
5400 		 */
5401 		if (hchan->state != BT_CONNECTED)
5402 			break;
5403 
5404 		/* Logical link is already ready to go */
5405 
5406 		chan->hs_hcon = hchan->conn;
5407 		chan->hs_hcon->l2cap_data = chan->conn;
5408 
5409 		if (result == L2CAP_MR_SUCCESS) {
5410 			/* Can confirm now */
5411 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5412 		} else {
5413 			/* Now only need move success
5414 			 * to confirm
5415 			 */
5416 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5417 		}
5418 
5419 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5420 		break;
5421 	default:
5422 		/* Any other amp move state means the move failed. */
5423 		chan->move_id = chan->local_amp_id;
5424 		l2cap_move_done(chan);
5425 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5426 	}
5427 
5428 	l2cap_chan_unlock(chan);
5429 	l2cap_chan_put(chan);
5430 }
5431 
5432 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5433 			    u16 result)
5434 {
5435 	struct l2cap_chan *chan;
5436 
5437 	chan = l2cap_get_chan_by_ident(conn, ident);
5438 	if (!chan) {
5439 		/* Could not locate channel, icid is best guess */
5440 		l2cap_send_move_chan_cfm_icid(conn, icid);
5441 		return;
5442 	}
5443 
5444 	__clear_chan_timer(chan);
5445 
5446 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5447 		if (result == L2CAP_MR_COLLISION) {
5448 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5449 		} else {
5450 			/* Cleanup - cancel move */
5451 			chan->move_id = chan->local_amp_id;
5452 			l2cap_move_done(chan);
5453 		}
5454 	}
5455 
5456 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5457 
5458 	l2cap_chan_unlock(chan);
5459 	l2cap_chan_put(chan);
5460 }
5461 
5462 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5463 				  struct l2cap_cmd_hdr *cmd,
5464 				  u16 cmd_len, void *data)
5465 {
5466 	struct l2cap_move_chan_rsp *rsp = data;
5467 	u16 icid, result;
5468 
5469 	if (cmd_len != sizeof(*rsp))
5470 		return -EPROTO;
5471 
5472 	icid = le16_to_cpu(rsp->icid);
5473 	result = le16_to_cpu(rsp->result);
5474 
5475 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5476 
5477 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5478 		l2cap_move_continue(conn, icid, result);
5479 	else
5480 		l2cap_move_fail(conn, cmd->ident, icid, result);
5481 
5482 	return 0;
5483 }
5484 
5485 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5486 				      struct l2cap_cmd_hdr *cmd,
5487 				      u16 cmd_len, void *data)
5488 {
5489 	struct l2cap_move_chan_cfm *cfm = data;
5490 	struct l2cap_chan *chan;
5491 	u16 icid, result;
5492 
5493 	if (cmd_len != sizeof(*cfm))
5494 		return -EPROTO;
5495 
5496 	icid = le16_to_cpu(cfm->icid);
5497 	result = le16_to_cpu(cfm->result);
5498 
5499 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5500 
5501 	chan = l2cap_get_chan_by_dcid(conn, icid);
5502 	if (!chan) {
5503 		/* Spec requires a response even if the icid was not found */
5504 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5505 		return 0;
5506 	}
5507 
5508 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5509 		if (result == L2CAP_MC_CONFIRMED) {
5510 			chan->local_amp_id = chan->move_id;
5511 			if (chan->local_amp_id == AMP_ID_BREDR)
5512 				__release_logical_link(chan);
5513 		} else {
5514 			chan->move_id = chan->local_amp_id;
5515 		}
5516 
5517 		l2cap_move_done(chan);
5518 	}
5519 
5520 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5521 
5522 	l2cap_chan_unlock(chan);
5523 	l2cap_chan_put(chan);
5524 
5525 	return 0;
5526 }
5527 
5528 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5529 						 struct l2cap_cmd_hdr *cmd,
5530 						 u16 cmd_len, void *data)
5531 {
5532 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5533 	struct l2cap_chan *chan;
5534 	u16 icid;
5535 
5536 	if (cmd_len != sizeof(*rsp))
5537 		return -EPROTO;
5538 
5539 	icid = le16_to_cpu(rsp->icid);
5540 
5541 	BT_DBG("icid 0x%4.4x", icid);
5542 
5543 	chan = l2cap_get_chan_by_scid(conn, icid);
5544 	if (!chan)
5545 		return 0;
5546 
5547 	__clear_chan_timer(chan);
5548 
5549 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5550 		chan->local_amp_id = chan->move_id;
5551 
5552 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5553 			__release_logical_link(chan);
5554 
5555 		l2cap_move_done(chan);
5556 	}
5557 
5558 	l2cap_chan_unlock(chan);
5559 	l2cap_chan_put(chan);
5560 
5561 	return 0;
5562 }
5563 
5564 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5565 					      struct l2cap_cmd_hdr *cmd,
5566 					      u16 cmd_len, u8 *data)
5567 {
5568 	struct hci_conn *hcon = conn->hcon;
5569 	struct l2cap_conn_param_update_req *req;
5570 	struct l2cap_conn_param_update_rsp rsp;
5571 	u16 min, max, latency, to_multiplier;
5572 	int err;
5573 
5574 	if (hcon->role != HCI_ROLE_MASTER)
5575 		return -EINVAL;
5576 
5577 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5578 		return -EPROTO;
5579 
5580 	req = (struct l2cap_conn_param_update_req *) data;
5581 	min		= __le16_to_cpu(req->min);
5582 	max		= __le16_to_cpu(req->max);
5583 	latency		= __le16_to_cpu(req->latency);
5584 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5585 
5586 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5587 	       min, max, latency, to_multiplier);
5588 
5589 	memset(&rsp, 0, sizeof(rsp));
5590 
5591 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5592 	if (err)
5593 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5594 	else
5595 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5596 
5597 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5598 		       sizeof(rsp), &rsp);
5599 
5600 	if (!err) {
5601 		u8 store_hint;
5602 
5603 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5604 						to_multiplier);
5605 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5606 				    store_hint, min, max, latency,
5607 				    to_multiplier);
5608 
5609 	}
5610 
5611 	return 0;
5612 }
5613 
5614 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5615 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5616 				u8 *data)
5617 {
5618 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5619 	struct hci_conn *hcon = conn->hcon;
5620 	u16 dcid, mtu, mps, credits, result;
5621 	struct l2cap_chan *chan;
5622 	int err, sec_level;
5623 
5624 	if (cmd_len < sizeof(*rsp))
5625 		return -EPROTO;
5626 
5627 	dcid    = __le16_to_cpu(rsp->dcid);
5628 	mtu     = __le16_to_cpu(rsp->mtu);
5629 	mps     = __le16_to_cpu(rsp->mps);
5630 	credits = __le16_to_cpu(rsp->credits);
5631 	result  = __le16_to_cpu(rsp->result);
5632 
5633 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5634 					   dcid < L2CAP_CID_DYN_START ||
5635 					   dcid > L2CAP_CID_LE_DYN_END))
5636 		return -EPROTO;
5637 
5638 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5639 	       dcid, mtu, mps, credits, result);
5640 
5641 	mutex_lock(&conn->chan_lock);
5642 
5643 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5644 	if (!chan) {
5645 		err = -EBADSLT;
5646 		goto unlock;
5647 	}
5648 
5649 	err = 0;
5650 
5651 	l2cap_chan_lock(chan);
5652 
5653 	switch (result) {
5654 	case L2CAP_CR_LE_SUCCESS:
5655 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5656 			err = -EBADSLT;
5657 			break;
5658 		}
5659 
5660 		chan->ident = 0;
5661 		chan->dcid = dcid;
5662 		chan->omtu = mtu;
5663 		chan->remote_mps = mps;
5664 		chan->tx_credits = credits;
5665 		l2cap_chan_ready(chan);
5666 		break;
5667 
5668 	case L2CAP_CR_LE_AUTHENTICATION:
5669 	case L2CAP_CR_LE_ENCRYPTION:
5670 		/* If we already have MITM protection we can't do
5671 		 * anything.
5672 		 */
5673 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5674 			l2cap_chan_del(chan, ECONNREFUSED);
5675 			break;
5676 		}
5677 
5678 		sec_level = hcon->sec_level + 1;
5679 		if (chan->sec_level < sec_level)
5680 			chan->sec_level = sec_level;
5681 
5682 		/* We'll need to send a new Connect Request */
5683 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5684 
5685 		smp_conn_security(hcon, chan->sec_level);
5686 		break;
5687 
5688 	default:
5689 		l2cap_chan_del(chan, ECONNREFUSED);
5690 		break;
5691 	}
5692 
5693 	l2cap_chan_unlock(chan);
5694 
5695 unlock:
5696 	mutex_unlock(&conn->chan_lock);
5697 
5698 	return err;
5699 }
5700 
5701 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5702 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5703 				      u8 *data)
5704 {
5705 	int err = 0;
5706 
5707 	switch (cmd->code) {
5708 	case L2CAP_COMMAND_REJ:
5709 		l2cap_command_rej(conn, cmd, cmd_len, data);
5710 		break;
5711 
5712 	case L2CAP_CONN_REQ:
5713 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5714 		break;
5715 
5716 	case L2CAP_CONN_RSP:
5717 	case L2CAP_CREATE_CHAN_RSP:
5718 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5719 		break;
5720 
5721 	case L2CAP_CONF_REQ:
5722 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5723 		break;
5724 
5725 	case L2CAP_CONF_RSP:
5726 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5727 		break;
5728 
5729 	case L2CAP_DISCONN_REQ:
5730 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5731 		break;
5732 
5733 	case L2CAP_DISCONN_RSP:
5734 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5735 		break;
5736 
5737 	case L2CAP_ECHO_REQ:
5738 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5739 		break;
5740 
5741 	case L2CAP_ECHO_RSP:
5742 		break;
5743 
5744 	case L2CAP_INFO_REQ:
5745 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5746 		break;
5747 
5748 	case L2CAP_INFO_RSP:
5749 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5750 		break;
5751 
5752 	case L2CAP_CREATE_CHAN_REQ:
5753 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5754 		break;
5755 
5756 	case L2CAP_MOVE_CHAN_REQ:
5757 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5758 		break;
5759 
5760 	case L2CAP_MOVE_CHAN_RSP:
5761 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5762 		break;
5763 
5764 	case L2CAP_MOVE_CHAN_CFM:
5765 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5766 		break;
5767 
5768 	case L2CAP_MOVE_CHAN_CFM_RSP:
5769 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5770 		break;
5771 
5772 	default:
5773 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5774 		err = -EINVAL;
5775 		break;
5776 	}
5777 
5778 	return err;
5779 }
5780 
5781 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5782 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5783 				u8 *data)
5784 {
5785 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5786 	struct l2cap_le_conn_rsp rsp;
5787 	struct l2cap_chan *chan, *pchan;
5788 	u16 dcid, scid, credits, mtu, mps;
5789 	__le16 psm;
5790 	u8 result;
5791 
5792 	if (cmd_len != sizeof(*req))
5793 		return -EPROTO;
5794 
5795 	scid = __le16_to_cpu(req->scid);
5796 	mtu  = __le16_to_cpu(req->mtu);
5797 	mps  = __le16_to_cpu(req->mps);
5798 	psm  = req->psm;
5799 	dcid = 0;
5800 	credits = 0;
5801 
5802 	if (mtu < 23 || mps < 23)
5803 		return -EPROTO;
5804 
5805 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5806 	       scid, mtu, mps);
5807 
5808 	/* Check if we have socket listening on psm */
5809 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5810 					 &conn->hcon->dst, LE_LINK);
5811 	if (!pchan) {
5812 		result = L2CAP_CR_LE_BAD_PSM;
5813 		chan = NULL;
5814 		goto response;
5815 	}
5816 
5817 	mutex_lock(&conn->chan_lock);
5818 	l2cap_chan_lock(pchan);
5819 
5820 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5821 				     SMP_ALLOW_STK)) {
5822 		result = L2CAP_CR_LE_AUTHENTICATION;
5823 		chan = NULL;
5824 		goto response_unlock;
5825 	}
5826 
5827 	/* Check for valid dynamic CID range */
5828 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5829 		result = L2CAP_CR_LE_INVALID_SCID;
5830 		chan = NULL;
5831 		goto response_unlock;
5832 	}
5833 
5834 	/* Check if we already have channel with that dcid */
5835 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5836 		result = L2CAP_CR_LE_SCID_IN_USE;
5837 		chan = NULL;
5838 		goto response_unlock;
5839 	}
5840 
5841 	chan = pchan->ops->new_connection(pchan);
5842 	if (!chan) {
5843 		result = L2CAP_CR_LE_NO_MEM;
5844 		goto response_unlock;
5845 	}
5846 
5847 	bacpy(&chan->src, &conn->hcon->src);
5848 	bacpy(&chan->dst, &conn->hcon->dst);
5849 	chan->src_type = bdaddr_src_type(conn->hcon);
5850 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5851 	chan->psm  = psm;
5852 	chan->dcid = scid;
5853 	chan->omtu = mtu;
5854 	chan->remote_mps = mps;
5855 
5856 	__l2cap_chan_add(conn, chan);
5857 
5858 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5859 
5860 	dcid = chan->scid;
5861 	credits = chan->rx_credits;
5862 
5863 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5864 
5865 	chan->ident = cmd->ident;
5866 
5867 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5868 		l2cap_state_change(chan, BT_CONNECT2);
5869 		/* The following result value is actually not defined
5870 		 * for LE CoC but we use it to let the function know
5871 		 * that it should bail out after doing its cleanup
5872 		 * instead of sending a response.
5873 		 */
5874 		result = L2CAP_CR_PEND;
5875 		chan->ops->defer(chan);
5876 	} else {
5877 		l2cap_chan_ready(chan);
5878 		result = L2CAP_CR_LE_SUCCESS;
5879 	}
5880 
5881 response_unlock:
5882 	l2cap_chan_unlock(pchan);
5883 	mutex_unlock(&conn->chan_lock);
5884 	l2cap_chan_put(pchan);
5885 
5886 	if (result == L2CAP_CR_PEND)
5887 		return 0;
5888 
5889 response:
5890 	if (chan) {
5891 		rsp.mtu = cpu_to_le16(chan->imtu);
5892 		rsp.mps = cpu_to_le16(chan->mps);
5893 	} else {
5894 		rsp.mtu = 0;
5895 		rsp.mps = 0;
5896 	}
5897 
5898 	rsp.dcid    = cpu_to_le16(dcid);
5899 	rsp.credits = cpu_to_le16(credits);
5900 	rsp.result  = cpu_to_le16(result);
5901 
5902 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5903 
5904 	return 0;
5905 }
5906 
5907 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5908 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5909 				   u8 *data)
5910 {
5911 	struct l2cap_le_credits *pkt;
5912 	struct l2cap_chan *chan;
5913 	u16 cid, credits, max_credits;
5914 
5915 	if (cmd_len != sizeof(*pkt))
5916 		return -EPROTO;
5917 
5918 	pkt = (struct l2cap_le_credits *) data;
5919 	cid	= __le16_to_cpu(pkt->cid);
5920 	credits	= __le16_to_cpu(pkt->credits);
5921 
5922 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5923 
5924 	chan = l2cap_get_chan_by_dcid(conn, cid);
5925 	if (!chan)
5926 		return -EBADSLT;
5927 
5928 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5929 	if (credits > max_credits) {
5930 		BT_ERR("LE credits overflow");
5931 		l2cap_send_disconn_req(chan, ECONNRESET);
5932 
5933 		/* Return 0 so that we don't trigger an unnecessary
5934 		 * command reject packet.
5935 		 */
5936 		goto unlock;
5937 	}
5938 
5939 	chan->tx_credits += credits;
5940 
5941 	/* Resume sending */
5942 	l2cap_le_flowctl_send(chan);
5943 
5944 	if (chan->tx_credits)
5945 		chan->ops->resume(chan);
5946 
5947 unlock:
5948 	l2cap_chan_unlock(chan);
5949 	l2cap_chan_put(chan);
5950 
5951 	return 0;
5952 }
5953 
5954 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5955 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5956 				       u8 *data)
5957 {
5958 	struct l2cap_ecred_conn_req *req = (void *) data;
5959 	struct {
5960 		struct l2cap_ecred_conn_rsp rsp;
5961 		__le16 dcid[L2CAP_ECRED_MAX_CID];
5962 	} __packed pdu;
5963 	struct l2cap_chan *chan, *pchan;
5964 	u16 mtu, mps;
5965 	__le16 psm;
5966 	u8 result, len = 0;
5967 	int i, num_scid;
5968 	bool defer = false;
5969 
5970 	if (!enable_ecred)
5971 		return -EINVAL;
5972 
5973 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5974 		result = L2CAP_CR_LE_INVALID_PARAMS;
5975 		goto response;
5976 	}
5977 
5978 	cmd_len -= sizeof(*req);
5979 	num_scid = cmd_len / sizeof(u16);
5980 
5981 	if (num_scid > ARRAY_SIZE(pdu.dcid)) {
5982 		result = L2CAP_CR_LE_INVALID_PARAMS;
5983 		goto response;
5984 	}
5985 
5986 	mtu  = __le16_to_cpu(req->mtu);
5987 	mps  = __le16_to_cpu(req->mps);
5988 
5989 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5990 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5991 		goto response;
5992 	}
5993 
5994 	psm  = req->psm;
5995 
5996 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5997 
5998 	memset(&pdu, 0, sizeof(pdu));
5999 
6000 	/* Check if we have socket listening on psm */
6001 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6002 					 &conn->hcon->dst, LE_LINK);
6003 	if (!pchan) {
6004 		result = L2CAP_CR_LE_BAD_PSM;
6005 		goto response;
6006 	}
6007 
6008 	mutex_lock(&conn->chan_lock);
6009 	l2cap_chan_lock(pchan);
6010 
6011 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6012 				     SMP_ALLOW_STK)) {
6013 		result = L2CAP_CR_LE_AUTHENTICATION;
6014 		goto unlock;
6015 	}
6016 
6017 	result = L2CAP_CR_LE_SUCCESS;
6018 
6019 	for (i = 0; i < num_scid; i++) {
6020 		u16 scid = __le16_to_cpu(req->scid[i]);
6021 
6022 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
6023 
6024 		pdu.dcid[i] = 0x0000;
6025 		len += sizeof(*pdu.dcid);
6026 
6027 		/* Check for valid dynamic CID range */
6028 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6029 			result = L2CAP_CR_LE_INVALID_SCID;
6030 			continue;
6031 		}
6032 
6033 		/* Check if we already have channel with that dcid */
6034 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
6035 			result = L2CAP_CR_LE_SCID_IN_USE;
6036 			continue;
6037 		}
6038 
6039 		chan = pchan->ops->new_connection(pchan);
6040 		if (!chan) {
6041 			result = L2CAP_CR_LE_NO_MEM;
6042 			continue;
6043 		}
6044 
6045 		bacpy(&chan->src, &conn->hcon->src);
6046 		bacpy(&chan->dst, &conn->hcon->dst);
6047 		chan->src_type = bdaddr_src_type(conn->hcon);
6048 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6049 		chan->psm  = psm;
6050 		chan->dcid = scid;
6051 		chan->omtu = mtu;
6052 		chan->remote_mps = mps;
6053 
6054 		__l2cap_chan_add(conn, chan);
6055 
6056 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6057 
6058 		/* Init response */
6059 		if (!pdu.rsp.credits) {
6060 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6061 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6062 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6063 		}
6064 
6065 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6066 
6067 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6068 
6069 		chan->ident = cmd->ident;
6070 
6071 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6072 			l2cap_state_change(chan, BT_CONNECT2);
6073 			defer = true;
6074 			chan->ops->defer(chan);
6075 		} else {
6076 			l2cap_chan_ready(chan);
6077 		}
6078 	}
6079 
6080 unlock:
6081 	l2cap_chan_unlock(pchan);
6082 	mutex_unlock(&conn->chan_lock);
6083 	l2cap_chan_put(pchan);
6084 
6085 response:
6086 	pdu.rsp.result = cpu_to_le16(result);
6087 
6088 	if (defer)
6089 		return 0;
6090 
6091 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6092 		       sizeof(pdu.rsp) + len, &pdu);
6093 
6094 	return 0;
6095 }
6096 
6097 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6098 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6099 				       u8 *data)
6100 {
6101 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6102 	struct hci_conn *hcon = conn->hcon;
6103 	u16 mtu, mps, credits, result;
6104 	struct l2cap_chan *chan, *tmp;
6105 	int err = 0, sec_level;
6106 	int i = 0;
6107 
6108 	if (cmd_len < sizeof(*rsp))
6109 		return -EPROTO;
6110 
6111 	mtu     = __le16_to_cpu(rsp->mtu);
6112 	mps     = __le16_to_cpu(rsp->mps);
6113 	credits = __le16_to_cpu(rsp->credits);
6114 	result  = __le16_to_cpu(rsp->result);
6115 
6116 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6117 	       result);
6118 
6119 	mutex_lock(&conn->chan_lock);
6120 
6121 	cmd_len -= sizeof(*rsp);
6122 
6123 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6124 		u16 dcid;
6125 
6126 		if (chan->ident != cmd->ident ||
6127 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6128 		    chan->state == BT_CONNECTED)
6129 			continue;
6130 
6131 		l2cap_chan_lock(chan);
6132 
6133 		/* Check that there is a dcid for each pending channel */
6134 		if (cmd_len < sizeof(dcid)) {
6135 			l2cap_chan_del(chan, ECONNREFUSED);
6136 			l2cap_chan_unlock(chan);
6137 			continue;
6138 		}
6139 
6140 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6141 		cmd_len -= sizeof(u16);
6142 
6143 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6144 
6145 		/* Check if dcid is already in use */
6146 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6147 			/* If a device receives a
6148 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6149 			 * already-assigned Destination CID, then both the
6150 			 * original channel and the new channel shall be
6151 			 * immediately discarded and not used.
6152 			 */
6153 			l2cap_chan_del(chan, ECONNREFUSED);
6154 			l2cap_chan_unlock(chan);
6155 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6156 			l2cap_chan_lock(chan);
6157 			l2cap_chan_del(chan, ECONNRESET);
6158 			l2cap_chan_unlock(chan);
6159 			continue;
6160 		}
6161 
6162 		switch (result) {
6163 		case L2CAP_CR_LE_AUTHENTICATION:
6164 		case L2CAP_CR_LE_ENCRYPTION:
6165 			/* If we already have MITM protection we can't do
6166 			 * anything.
6167 			 */
6168 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6169 				l2cap_chan_del(chan, ECONNREFUSED);
6170 				break;
6171 			}
6172 
6173 			sec_level = hcon->sec_level + 1;
6174 			if (chan->sec_level < sec_level)
6175 				chan->sec_level = sec_level;
6176 
6177 			/* We'll need to send a new Connect Request */
6178 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6179 
6180 			smp_conn_security(hcon, chan->sec_level);
6181 			break;
6182 
6183 		case L2CAP_CR_LE_BAD_PSM:
6184 			l2cap_chan_del(chan, ECONNREFUSED);
6185 			break;
6186 
6187 		default:
6188 			/* If dcid was not set it means channels was refused */
6189 			if (!dcid) {
6190 				l2cap_chan_del(chan, ECONNREFUSED);
6191 				break;
6192 			}
6193 
6194 			chan->ident = 0;
6195 			chan->dcid = dcid;
6196 			chan->omtu = mtu;
6197 			chan->remote_mps = mps;
6198 			chan->tx_credits = credits;
6199 			l2cap_chan_ready(chan);
6200 			break;
6201 		}
6202 
6203 		l2cap_chan_unlock(chan);
6204 	}
6205 
6206 	mutex_unlock(&conn->chan_lock);
6207 
6208 	return err;
6209 }
6210 
6211 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6212 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6213 					 u8 *data)
6214 {
6215 	struct l2cap_ecred_reconf_req *req = (void *) data;
6216 	struct l2cap_ecred_reconf_rsp rsp;
6217 	u16 mtu, mps, result;
6218 	struct l2cap_chan *chan;
6219 	int i, num_scid;
6220 
6221 	if (!enable_ecred)
6222 		return -EINVAL;
6223 
6224 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6225 		result = L2CAP_CR_LE_INVALID_PARAMS;
6226 		goto respond;
6227 	}
6228 
6229 	mtu = __le16_to_cpu(req->mtu);
6230 	mps = __le16_to_cpu(req->mps);
6231 
6232 	BT_DBG("mtu %u mps %u", mtu, mps);
6233 
6234 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6235 		result = L2CAP_RECONF_INVALID_MTU;
6236 		goto respond;
6237 	}
6238 
6239 	if (mps < L2CAP_ECRED_MIN_MPS) {
6240 		result = L2CAP_RECONF_INVALID_MPS;
6241 		goto respond;
6242 	}
6243 
6244 	cmd_len -= sizeof(*req);
6245 	num_scid = cmd_len / sizeof(u16);
6246 	result = L2CAP_RECONF_SUCCESS;
6247 
6248 	for (i = 0; i < num_scid; i++) {
6249 		u16 scid;
6250 
6251 		scid = __le16_to_cpu(req->scid[i]);
6252 		if (!scid)
6253 			return -EPROTO;
6254 
6255 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6256 		if (!chan)
6257 			continue;
6258 
6259 		/* If the MTU value is decreased for any of the included
6260 		 * channels, then the receiver shall disconnect all
6261 		 * included channels.
6262 		 */
6263 		if (chan->omtu > mtu) {
6264 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6265 			       chan->omtu, mtu);
6266 			result = L2CAP_RECONF_INVALID_MTU;
6267 		}
6268 
6269 		chan->omtu = mtu;
6270 		chan->remote_mps = mps;
6271 	}
6272 
6273 respond:
6274 	rsp.result = cpu_to_le16(result);
6275 
6276 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6277 		       &rsp);
6278 
6279 	return 0;
6280 }
6281 
6282 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6283 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6284 					 u8 *data)
6285 {
6286 	struct l2cap_chan *chan, *tmp;
6287 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6288 	u16 result;
6289 
6290 	if (cmd_len < sizeof(*rsp))
6291 		return -EPROTO;
6292 
6293 	result = __le16_to_cpu(rsp->result);
6294 
6295 	BT_DBG("result 0x%4.4x", rsp->result);
6296 
6297 	if (!result)
6298 		return 0;
6299 
6300 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6301 		if (chan->ident != cmd->ident)
6302 			continue;
6303 
6304 		l2cap_chan_del(chan, ECONNRESET);
6305 	}
6306 
6307 	return 0;
6308 }
6309 
6310 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6311 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6312 				       u8 *data)
6313 {
6314 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6315 	struct l2cap_chan *chan;
6316 
6317 	if (cmd_len < sizeof(*rej))
6318 		return -EPROTO;
6319 
6320 	mutex_lock(&conn->chan_lock);
6321 
6322 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6323 	if (!chan)
6324 		goto done;
6325 
6326 	l2cap_chan_lock(chan);
6327 	l2cap_chan_del(chan, ECONNREFUSED);
6328 	l2cap_chan_unlock(chan);
6329 
6330 done:
6331 	mutex_unlock(&conn->chan_lock);
6332 	return 0;
6333 }
6334 
6335 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6336 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6337 				   u8 *data)
6338 {
6339 	int err = 0;
6340 
6341 	switch (cmd->code) {
6342 	case L2CAP_COMMAND_REJ:
6343 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6344 		break;
6345 
6346 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6347 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6348 		break;
6349 
6350 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6351 		break;
6352 
6353 	case L2CAP_LE_CONN_RSP:
6354 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6355 		break;
6356 
6357 	case L2CAP_LE_CONN_REQ:
6358 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6359 		break;
6360 
6361 	case L2CAP_LE_CREDITS:
6362 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6363 		break;
6364 
6365 	case L2CAP_ECRED_CONN_REQ:
6366 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6367 		break;
6368 
6369 	case L2CAP_ECRED_CONN_RSP:
6370 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6371 		break;
6372 
6373 	case L2CAP_ECRED_RECONF_REQ:
6374 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6375 		break;
6376 
6377 	case L2CAP_ECRED_RECONF_RSP:
6378 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6379 		break;
6380 
6381 	case L2CAP_DISCONN_REQ:
6382 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6383 		break;
6384 
6385 	case L2CAP_DISCONN_RSP:
6386 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6387 		break;
6388 
6389 	default:
6390 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6391 		err = -EINVAL;
6392 		break;
6393 	}
6394 
6395 	return err;
6396 }
6397 
6398 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6399 					struct sk_buff *skb)
6400 {
6401 	struct hci_conn *hcon = conn->hcon;
6402 	struct l2cap_cmd_hdr *cmd;
6403 	u16 len;
6404 	int err;
6405 
6406 	if (hcon->type != LE_LINK)
6407 		goto drop;
6408 
6409 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6410 		goto drop;
6411 
6412 	cmd = (void *) skb->data;
6413 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6414 
6415 	len = le16_to_cpu(cmd->len);
6416 
6417 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6418 
6419 	if (len != skb->len || !cmd->ident) {
6420 		BT_DBG("corrupted command");
6421 		goto drop;
6422 	}
6423 
6424 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6425 	if (err) {
6426 		struct l2cap_cmd_rej_unk rej;
6427 
6428 		BT_ERR("Wrong link type (%d)", err);
6429 
6430 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6431 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6432 			       sizeof(rej), &rej);
6433 	}
6434 
6435 drop:
6436 	kfree_skb(skb);
6437 }
6438 
6439 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6440 				     struct sk_buff *skb)
6441 {
6442 	struct hci_conn *hcon = conn->hcon;
6443 	struct l2cap_cmd_hdr *cmd;
6444 	int err;
6445 
6446 	l2cap_raw_recv(conn, skb);
6447 
6448 	if (hcon->type != ACL_LINK)
6449 		goto drop;
6450 
6451 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6452 		u16 len;
6453 
6454 		cmd = (void *) skb->data;
6455 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6456 
6457 		len = le16_to_cpu(cmd->len);
6458 
6459 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6460 		       cmd->ident);
6461 
6462 		if (len > skb->len || !cmd->ident) {
6463 			BT_DBG("corrupted command");
6464 			break;
6465 		}
6466 
6467 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6468 		if (err) {
6469 			struct l2cap_cmd_rej_unk rej;
6470 
6471 			BT_ERR("Wrong link type (%d)", err);
6472 
6473 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6474 			l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6475 				       sizeof(rej), &rej);
6476 		}
6477 
6478 		skb_pull(skb, len);
6479 	}
6480 
6481 drop:
6482 	kfree_skb(skb);
6483 }
6484 
6485 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6486 {
6487 	u16 our_fcs, rcv_fcs;
6488 	int hdr_size;
6489 
6490 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6491 		hdr_size = L2CAP_EXT_HDR_SIZE;
6492 	else
6493 		hdr_size = L2CAP_ENH_HDR_SIZE;
6494 
6495 	if (chan->fcs == L2CAP_FCS_CRC16) {
6496 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6497 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6498 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6499 
6500 		if (our_fcs != rcv_fcs)
6501 			return -EBADMSG;
6502 	}
6503 	return 0;
6504 }
6505 
6506 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6507 {
6508 	struct l2cap_ctrl control;
6509 
6510 	BT_DBG("chan %p", chan);
6511 
6512 	memset(&control, 0, sizeof(control));
6513 	control.sframe = 1;
6514 	control.final = 1;
6515 	control.reqseq = chan->buffer_seq;
6516 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6517 
6518 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6519 		control.super = L2CAP_SUPER_RNR;
6520 		l2cap_send_sframe(chan, &control);
6521 	}
6522 
6523 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6524 	    chan->unacked_frames > 0)
6525 		__set_retrans_timer(chan);
6526 
6527 	/* Send pending iframes */
6528 	l2cap_ertm_send(chan);
6529 
6530 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6531 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6532 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6533 		 * send it now.
6534 		 */
6535 		control.super = L2CAP_SUPER_RR;
6536 		l2cap_send_sframe(chan, &control);
6537 	}
6538 }
6539 
6540 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6541 			    struct sk_buff **last_frag)
6542 {
6543 	/* skb->len reflects data in skb as well as all fragments
6544 	 * skb->data_len reflects only data in fragments
6545 	 */
6546 	if (!skb_has_frag_list(skb))
6547 		skb_shinfo(skb)->frag_list = new_frag;
6548 
6549 	new_frag->next = NULL;
6550 
6551 	(*last_frag)->next = new_frag;
6552 	*last_frag = new_frag;
6553 
6554 	skb->len += new_frag->len;
6555 	skb->data_len += new_frag->len;
6556 	skb->truesize += new_frag->truesize;
6557 }
6558 
6559 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6560 				struct l2cap_ctrl *control)
6561 {
6562 	int err = -EINVAL;
6563 
6564 	switch (control->sar) {
6565 	case L2CAP_SAR_UNSEGMENTED:
6566 		if (chan->sdu)
6567 			break;
6568 
6569 		err = chan->ops->recv(chan, skb);
6570 		break;
6571 
6572 	case L2CAP_SAR_START:
6573 		if (chan->sdu)
6574 			break;
6575 
6576 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6577 			break;
6578 
6579 		chan->sdu_len = get_unaligned_le16(skb->data);
6580 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6581 
6582 		if (chan->sdu_len > chan->imtu) {
6583 			err = -EMSGSIZE;
6584 			break;
6585 		}
6586 
6587 		if (skb->len >= chan->sdu_len)
6588 			break;
6589 
6590 		chan->sdu = skb;
6591 		chan->sdu_last_frag = skb;
6592 
6593 		skb = NULL;
6594 		err = 0;
6595 		break;
6596 
6597 	case L2CAP_SAR_CONTINUE:
6598 		if (!chan->sdu)
6599 			break;
6600 
6601 		append_skb_frag(chan->sdu, skb,
6602 				&chan->sdu_last_frag);
6603 		skb = NULL;
6604 
6605 		if (chan->sdu->len >= chan->sdu_len)
6606 			break;
6607 
6608 		err = 0;
6609 		break;
6610 
6611 	case L2CAP_SAR_END:
6612 		if (!chan->sdu)
6613 			break;
6614 
6615 		append_skb_frag(chan->sdu, skb,
6616 				&chan->sdu_last_frag);
6617 		skb = NULL;
6618 
6619 		if (chan->sdu->len != chan->sdu_len)
6620 			break;
6621 
6622 		err = chan->ops->recv(chan, chan->sdu);
6623 
6624 		if (!err) {
6625 			/* Reassembly complete */
6626 			chan->sdu = NULL;
6627 			chan->sdu_last_frag = NULL;
6628 			chan->sdu_len = 0;
6629 		}
6630 		break;
6631 	}
6632 
6633 	if (err) {
6634 		kfree_skb(skb);
6635 		kfree_skb(chan->sdu);
6636 		chan->sdu = NULL;
6637 		chan->sdu_last_frag = NULL;
6638 		chan->sdu_len = 0;
6639 	}
6640 
6641 	return err;
6642 }
6643 
6644 static int l2cap_resegment(struct l2cap_chan *chan)
6645 {
6646 	/* Placeholder */
6647 	return 0;
6648 }
6649 
6650 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6651 {
6652 	u8 event;
6653 
6654 	if (chan->mode != L2CAP_MODE_ERTM)
6655 		return;
6656 
6657 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6658 	l2cap_tx(chan, NULL, NULL, event);
6659 }
6660 
6661 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6662 {
6663 	int err = 0;
6664 	/* Pass sequential frames to l2cap_reassemble_sdu()
6665 	 * until a gap is encountered.
6666 	 */
6667 
6668 	BT_DBG("chan %p", chan);
6669 
6670 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6671 		struct sk_buff *skb;
6672 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6673 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6674 
6675 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6676 
6677 		if (!skb)
6678 			break;
6679 
6680 		skb_unlink(skb, &chan->srej_q);
6681 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6682 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6683 		if (err)
6684 			break;
6685 	}
6686 
6687 	if (skb_queue_empty(&chan->srej_q)) {
6688 		chan->rx_state = L2CAP_RX_STATE_RECV;
6689 		l2cap_send_ack(chan);
6690 	}
6691 
6692 	return err;
6693 }
6694 
6695 static void l2cap_handle_srej(struct l2cap_chan *chan,
6696 			      struct l2cap_ctrl *control)
6697 {
6698 	struct sk_buff *skb;
6699 
6700 	BT_DBG("chan %p, control %p", chan, control);
6701 
6702 	if (control->reqseq == chan->next_tx_seq) {
6703 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6704 		l2cap_send_disconn_req(chan, ECONNRESET);
6705 		return;
6706 	}
6707 
6708 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6709 
6710 	if (skb == NULL) {
6711 		BT_DBG("Seq %d not available for retransmission",
6712 		       control->reqseq);
6713 		return;
6714 	}
6715 
6716 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6717 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6718 		l2cap_send_disconn_req(chan, ECONNRESET);
6719 		return;
6720 	}
6721 
6722 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6723 
6724 	if (control->poll) {
6725 		l2cap_pass_to_tx(chan, control);
6726 
6727 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6728 		l2cap_retransmit(chan, control);
6729 		l2cap_ertm_send(chan);
6730 
6731 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6732 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6733 			chan->srej_save_reqseq = control->reqseq;
6734 		}
6735 	} else {
6736 		l2cap_pass_to_tx_fbit(chan, control);
6737 
6738 		if (control->final) {
6739 			if (chan->srej_save_reqseq != control->reqseq ||
6740 			    !test_and_clear_bit(CONN_SREJ_ACT,
6741 						&chan->conn_state))
6742 				l2cap_retransmit(chan, control);
6743 		} else {
6744 			l2cap_retransmit(chan, control);
6745 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6746 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6747 				chan->srej_save_reqseq = control->reqseq;
6748 			}
6749 		}
6750 	}
6751 }
6752 
6753 static void l2cap_handle_rej(struct l2cap_chan *chan,
6754 			     struct l2cap_ctrl *control)
6755 {
6756 	struct sk_buff *skb;
6757 
6758 	BT_DBG("chan %p, control %p", chan, control);
6759 
6760 	if (control->reqseq == chan->next_tx_seq) {
6761 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6762 		l2cap_send_disconn_req(chan, ECONNRESET);
6763 		return;
6764 	}
6765 
6766 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6767 
6768 	if (chan->max_tx && skb &&
6769 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6770 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6771 		l2cap_send_disconn_req(chan, ECONNRESET);
6772 		return;
6773 	}
6774 
6775 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6776 
6777 	l2cap_pass_to_tx(chan, control);
6778 
6779 	if (control->final) {
6780 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6781 			l2cap_retransmit_all(chan, control);
6782 	} else {
6783 		l2cap_retransmit_all(chan, control);
6784 		l2cap_ertm_send(chan);
6785 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6786 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6787 	}
6788 }
6789 
6790 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6791 {
6792 	BT_DBG("chan %p, txseq %d", chan, txseq);
6793 
6794 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6795 	       chan->expected_tx_seq);
6796 
6797 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6798 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6799 		    chan->tx_win) {
6800 			/* See notes below regarding "double poll" and
6801 			 * invalid packets.
6802 			 */
6803 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6804 				BT_DBG("Invalid/Ignore - after SREJ");
6805 				return L2CAP_TXSEQ_INVALID_IGNORE;
6806 			} else {
6807 				BT_DBG("Invalid - in window after SREJ sent");
6808 				return L2CAP_TXSEQ_INVALID;
6809 			}
6810 		}
6811 
6812 		if (chan->srej_list.head == txseq) {
6813 			BT_DBG("Expected SREJ");
6814 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6815 		}
6816 
6817 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6818 			BT_DBG("Duplicate SREJ - txseq already stored");
6819 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6820 		}
6821 
6822 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6823 			BT_DBG("Unexpected SREJ - not requested");
6824 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6825 		}
6826 	}
6827 
6828 	if (chan->expected_tx_seq == txseq) {
6829 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6830 		    chan->tx_win) {
6831 			BT_DBG("Invalid - txseq outside tx window");
6832 			return L2CAP_TXSEQ_INVALID;
6833 		} else {
6834 			BT_DBG("Expected");
6835 			return L2CAP_TXSEQ_EXPECTED;
6836 		}
6837 	}
6838 
6839 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6840 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6841 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6842 		return L2CAP_TXSEQ_DUPLICATE;
6843 	}
6844 
6845 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6846 		/* A source of invalid packets is a "double poll" condition,
6847 		 * where delays cause us to send multiple poll packets.  If
6848 		 * the remote stack receives and processes both polls,
6849 		 * sequence numbers can wrap around in such a way that a
6850 		 * resent frame has a sequence number that looks like new data
6851 		 * with a sequence gap.  This would trigger an erroneous SREJ
6852 		 * request.
6853 		 *
6854 		 * Fortunately, this is impossible with a tx window that's
6855 		 * less than half of the maximum sequence number, which allows
6856 		 * invalid frames to be safely ignored.
6857 		 *
6858 		 * With tx window sizes greater than half of the tx window
6859 		 * maximum, the frame is invalid and cannot be ignored.  This
6860 		 * causes a disconnect.
6861 		 */
6862 
6863 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6864 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6865 			return L2CAP_TXSEQ_INVALID_IGNORE;
6866 		} else {
6867 			BT_DBG("Invalid - txseq outside tx window");
6868 			return L2CAP_TXSEQ_INVALID;
6869 		}
6870 	} else {
6871 		BT_DBG("Unexpected - txseq indicates missing frames");
6872 		return L2CAP_TXSEQ_UNEXPECTED;
6873 	}
6874 }
6875 
6876 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6877 			       struct l2cap_ctrl *control,
6878 			       struct sk_buff *skb, u8 event)
6879 {
6880 	int err = 0;
6881 	bool skb_in_use = false;
6882 
6883 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6884 	       event);
6885 
6886 	switch (event) {
6887 	case L2CAP_EV_RECV_IFRAME:
6888 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6889 		case L2CAP_TXSEQ_EXPECTED:
6890 			l2cap_pass_to_tx(chan, control);
6891 
6892 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6893 				BT_DBG("Busy, discarding expected seq %d",
6894 				       control->txseq);
6895 				break;
6896 			}
6897 
6898 			chan->expected_tx_seq = __next_seq(chan,
6899 							   control->txseq);
6900 
6901 			chan->buffer_seq = chan->expected_tx_seq;
6902 			skb_in_use = true;
6903 
6904 			err = l2cap_reassemble_sdu(chan, skb, control);
6905 			if (err)
6906 				break;
6907 
6908 			if (control->final) {
6909 				if (!test_and_clear_bit(CONN_REJ_ACT,
6910 							&chan->conn_state)) {
6911 					control->final = 0;
6912 					l2cap_retransmit_all(chan, control);
6913 					l2cap_ertm_send(chan);
6914 				}
6915 			}
6916 
6917 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6918 				l2cap_send_ack(chan);
6919 			break;
6920 		case L2CAP_TXSEQ_UNEXPECTED:
6921 			l2cap_pass_to_tx(chan, control);
6922 
6923 			/* Can't issue SREJ frames in the local busy state.
6924 			 * Drop this frame, it will be seen as missing
6925 			 * when local busy is exited.
6926 			 */
6927 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6928 				BT_DBG("Busy, discarding unexpected seq %d",
6929 				       control->txseq);
6930 				break;
6931 			}
6932 
6933 			/* There was a gap in the sequence, so an SREJ
6934 			 * must be sent for each missing frame.  The
6935 			 * current frame is stored for later use.
6936 			 */
6937 			skb_queue_tail(&chan->srej_q, skb);
6938 			skb_in_use = true;
6939 			BT_DBG("Queued %p (queue len %d)", skb,
6940 			       skb_queue_len(&chan->srej_q));
6941 
6942 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6943 			l2cap_seq_list_clear(&chan->srej_list);
6944 			l2cap_send_srej(chan, control->txseq);
6945 
6946 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6947 			break;
6948 		case L2CAP_TXSEQ_DUPLICATE:
6949 			l2cap_pass_to_tx(chan, control);
6950 			break;
6951 		case L2CAP_TXSEQ_INVALID_IGNORE:
6952 			break;
6953 		case L2CAP_TXSEQ_INVALID:
6954 		default:
6955 			l2cap_send_disconn_req(chan, ECONNRESET);
6956 			break;
6957 		}
6958 		break;
6959 	case L2CAP_EV_RECV_RR:
6960 		l2cap_pass_to_tx(chan, control);
6961 		if (control->final) {
6962 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6963 
6964 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6965 			    !__chan_is_moving(chan)) {
6966 				control->final = 0;
6967 				l2cap_retransmit_all(chan, control);
6968 			}
6969 
6970 			l2cap_ertm_send(chan);
6971 		} else if (control->poll) {
6972 			l2cap_send_i_or_rr_or_rnr(chan);
6973 		} else {
6974 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6975 					       &chan->conn_state) &&
6976 			    chan->unacked_frames)
6977 				__set_retrans_timer(chan);
6978 
6979 			l2cap_ertm_send(chan);
6980 		}
6981 		break;
6982 	case L2CAP_EV_RECV_RNR:
6983 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6984 		l2cap_pass_to_tx(chan, control);
6985 		if (control && control->poll) {
6986 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6987 			l2cap_send_rr_or_rnr(chan, 0);
6988 		}
6989 		__clear_retrans_timer(chan);
6990 		l2cap_seq_list_clear(&chan->retrans_list);
6991 		break;
6992 	case L2CAP_EV_RECV_REJ:
6993 		l2cap_handle_rej(chan, control);
6994 		break;
6995 	case L2CAP_EV_RECV_SREJ:
6996 		l2cap_handle_srej(chan, control);
6997 		break;
6998 	default:
6999 		break;
7000 	}
7001 
7002 	if (skb && !skb_in_use) {
7003 		BT_DBG("Freeing %p", skb);
7004 		kfree_skb(skb);
7005 	}
7006 
7007 	return err;
7008 }
7009 
7010 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7011 				    struct l2cap_ctrl *control,
7012 				    struct sk_buff *skb, u8 event)
7013 {
7014 	int err = 0;
7015 	u16 txseq = control->txseq;
7016 	bool skb_in_use = false;
7017 
7018 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7019 	       event);
7020 
7021 	switch (event) {
7022 	case L2CAP_EV_RECV_IFRAME:
7023 		switch (l2cap_classify_txseq(chan, txseq)) {
7024 		case L2CAP_TXSEQ_EXPECTED:
7025 			/* Keep frame for reassembly later */
7026 			l2cap_pass_to_tx(chan, control);
7027 			skb_queue_tail(&chan->srej_q, skb);
7028 			skb_in_use = true;
7029 			BT_DBG("Queued %p (queue len %d)", skb,
7030 			       skb_queue_len(&chan->srej_q));
7031 
7032 			chan->expected_tx_seq = __next_seq(chan, txseq);
7033 			break;
7034 		case L2CAP_TXSEQ_EXPECTED_SREJ:
7035 			l2cap_seq_list_pop(&chan->srej_list);
7036 
7037 			l2cap_pass_to_tx(chan, control);
7038 			skb_queue_tail(&chan->srej_q, skb);
7039 			skb_in_use = true;
7040 			BT_DBG("Queued %p (queue len %d)", skb,
7041 			       skb_queue_len(&chan->srej_q));
7042 
7043 			err = l2cap_rx_queued_iframes(chan);
7044 			if (err)
7045 				break;
7046 
7047 			break;
7048 		case L2CAP_TXSEQ_UNEXPECTED:
7049 			/* Got a frame that can't be reassembled yet.
7050 			 * Save it for later, and send SREJs to cover
7051 			 * the missing frames.
7052 			 */
7053 			skb_queue_tail(&chan->srej_q, skb);
7054 			skb_in_use = true;
7055 			BT_DBG("Queued %p (queue len %d)", skb,
7056 			       skb_queue_len(&chan->srej_q));
7057 
7058 			l2cap_pass_to_tx(chan, control);
7059 			l2cap_send_srej(chan, control->txseq);
7060 			break;
7061 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7062 			/* This frame was requested with an SREJ, but
7063 			 * some expected retransmitted frames are
7064 			 * missing.  Request retransmission of missing
7065 			 * SREJ'd frames.
7066 			 */
7067 			skb_queue_tail(&chan->srej_q, skb);
7068 			skb_in_use = true;
7069 			BT_DBG("Queued %p (queue len %d)", skb,
7070 			       skb_queue_len(&chan->srej_q));
7071 
7072 			l2cap_pass_to_tx(chan, control);
7073 			l2cap_send_srej_list(chan, control->txseq);
7074 			break;
7075 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7076 			/* We've already queued this frame.  Drop this copy. */
7077 			l2cap_pass_to_tx(chan, control);
7078 			break;
7079 		case L2CAP_TXSEQ_DUPLICATE:
7080 			/* Expecting a later sequence number, so this frame
7081 			 * was already received.  Ignore it completely.
7082 			 */
7083 			break;
7084 		case L2CAP_TXSEQ_INVALID_IGNORE:
7085 			break;
7086 		case L2CAP_TXSEQ_INVALID:
7087 		default:
7088 			l2cap_send_disconn_req(chan, ECONNRESET);
7089 			break;
7090 		}
7091 		break;
7092 	case L2CAP_EV_RECV_RR:
7093 		l2cap_pass_to_tx(chan, control);
7094 		if (control->final) {
7095 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7096 
7097 			if (!test_and_clear_bit(CONN_REJ_ACT,
7098 						&chan->conn_state)) {
7099 				control->final = 0;
7100 				l2cap_retransmit_all(chan, control);
7101 			}
7102 
7103 			l2cap_ertm_send(chan);
7104 		} else if (control->poll) {
7105 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7106 					       &chan->conn_state) &&
7107 			    chan->unacked_frames) {
7108 				__set_retrans_timer(chan);
7109 			}
7110 
7111 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7112 			l2cap_send_srej_tail(chan);
7113 		} else {
7114 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7115 					       &chan->conn_state) &&
7116 			    chan->unacked_frames)
7117 				__set_retrans_timer(chan);
7118 
7119 			l2cap_send_ack(chan);
7120 		}
7121 		break;
7122 	case L2CAP_EV_RECV_RNR:
7123 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7124 		l2cap_pass_to_tx(chan, control);
7125 		if (control->poll) {
7126 			l2cap_send_srej_tail(chan);
7127 		} else {
7128 			struct l2cap_ctrl rr_control;
7129 			memset(&rr_control, 0, sizeof(rr_control));
7130 			rr_control.sframe = 1;
7131 			rr_control.super = L2CAP_SUPER_RR;
7132 			rr_control.reqseq = chan->buffer_seq;
7133 			l2cap_send_sframe(chan, &rr_control);
7134 		}
7135 
7136 		break;
7137 	case L2CAP_EV_RECV_REJ:
7138 		l2cap_handle_rej(chan, control);
7139 		break;
7140 	case L2CAP_EV_RECV_SREJ:
7141 		l2cap_handle_srej(chan, control);
7142 		break;
7143 	}
7144 
7145 	if (skb && !skb_in_use) {
7146 		BT_DBG("Freeing %p", skb);
7147 		kfree_skb(skb);
7148 	}
7149 
7150 	return err;
7151 }
7152 
7153 static int l2cap_finish_move(struct l2cap_chan *chan)
7154 {
7155 	BT_DBG("chan %p", chan);
7156 
7157 	chan->rx_state = L2CAP_RX_STATE_RECV;
7158 
7159 	if (chan->hs_hcon)
7160 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7161 	else
7162 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7163 
7164 	return l2cap_resegment(chan);
7165 }
7166 
7167 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7168 				 struct l2cap_ctrl *control,
7169 				 struct sk_buff *skb, u8 event)
7170 {
7171 	int err;
7172 
7173 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7174 	       event);
7175 
7176 	if (!control->poll)
7177 		return -EPROTO;
7178 
7179 	l2cap_process_reqseq(chan, control->reqseq);
7180 
7181 	if (!skb_queue_empty(&chan->tx_q))
7182 		chan->tx_send_head = skb_peek(&chan->tx_q);
7183 	else
7184 		chan->tx_send_head = NULL;
7185 
7186 	/* Rewind next_tx_seq to the point expected
7187 	 * by the receiver.
7188 	 */
7189 	chan->next_tx_seq = control->reqseq;
7190 	chan->unacked_frames = 0;
7191 
7192 	err = l2cap_finish_move(chan);
7193 	if (err)
7194 		return err;
7195 
7196 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7197 	l2cap_send_i_or_rr_or_rnr(chan);
7198 
7199 	if (event == L2CAP_EV_RECV_IFRAME)
7200 		return -EPROTO;
7201 
7202 	return l2cap_rx_state_recv(chan, control, NULL, event);
7203 }
7204 
7205 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7206 				 struct l2cap_ctrl *control,
7207 				 struct sk_buff *skb, u8 event)
7208 {
7209 	int err;
7210 
7211 	if (!control->final)
7212 		return -EPROTO;
7213 
7214 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7215 
7216 	chan->rx_state = L2CAP_RX_STATE_RECV;
7217 	l2cap_process_reqseq(chan, control->reqseq);
7218 
7219 	if (!skb_queue_empty(&chan->tx_q))
7220 		chan->tx_send_head = skb_peek(&chan->tx_q);
7221 	else
7222 		chan->tx_send_head = NULL;
7223 
7224 	/* Rewind next_tx_seq to the point expected
7225 	 * by the receiver.
7226 	 */
7227 	chan->next_tx_seq = control->reqseq;
7228 	chan->unacked_frames = 0;
7229 
7230 	if (chan->hs_hcon)
7231 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7232 	else
7233 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7234 
7235 	err = l2cap_resegment(chan);
7236 
7237 	if (!err)
7238 		err = l2cap_rx_state_recv(chan, control, skb, event);
7239 
7240 	return err;
7241 }
7242 
7243 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7244 {
7245 	/* Make sure reqseq is for a packet that has been sent but not acked */
7246 	u16 unacked;
7247 
7248 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7249 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7250 }
7251 
7252 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7253 		    struct sk_buff *skb, u8 event)
7254 {
7255 	int err = 0;
7256 
7257 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7258 	       control, skb, event, chan->rx_state);
7259 
7260 	if (__valid_reqseq(chan, control->reqseq)) {
7261 		switch (chan->rx_state) {
7262 		case L2CAP_RX_STATE_RECV:
7263 			err = l2cap_rx_state_recv(chan, control, skb, event);
7264 			break;
7265 		case L2CAP_RX_STATE_SREJ_SENT:
7266 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7267 						       event);
7268 			break;
7269 		case L2CAP_RX_STATE_WAIT_P:
7270 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7271 			break;
7272 		case L2CAP_RX_STATE_WAIT_F:
7273 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7274 			break;
7275 		default:
7276 			/* shut it down */
7277 			break;
7278 		}
7279 	} else {
7280 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7281 		       control->reqseq, chan->next_tx_seq,
7282 		       chan->expected_ack_seq);
7283 		l2cap_send_disconn_req(chan, ECONNRESET);
7284 	}
7285 
7286 	return err;
7287 }
7288 
7289 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7290 			   struct sk_buff *skb)
7291 {
7292 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7293 	       chan->rx_state);
7294 
7295 	if (l2cap_classify_txseq(chan, control->txseq) ==
7296 	    L2CAP_TXSEQ_EXPECTED) {
7297 		l2cap_pass_to_tx(chan, control);
7298 
7299 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
7300 		       __next_seq(chan, chan->buffer_seq));
7301 
7302 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7303 
7304 		l2cap_reassemble_sdu(chan, skb, control);
7305 	} else {
7306 		if (chan->sdu) {
7307 			kfree_skb(chan->sdu);
7308 			chan->sdu = NULL;
7309 		}
7310 		chan->sdu_last_frag = NULL;
7311 		chan->sdu_len = 0;
7312 
7313 		if (skb) {
7314 			BT_DBG("Freeing %p", skb);
7315 			kfree_skb(skb);
7316 		}
7317 	}
7318 
7319 	chan->last_acked_seq = control->txseq;
7320 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
7321 
7322 	return 0;
7323 }
7324 
7325 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7326 {
7327 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7328 	u16 len;
7329 	u8 event;
7330 
7331 	__unpack_control(chan, skb);
7332 
7333 	len = skb->len;
7334 
7335 	/*
7336 	 * We can just drop the corrupted I-frame here.
7337 	 * Receiver will miss it and start proper recovery
7338 	 * procedures and ask for retransmission.
7339 	 */
7340 	if (l2cap_check_fcs(chan, skb))
7341 		goto drop;
7342 
7343 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7344 		len -= L2CAP_SDULEN_SIZE;
7345 
7346 	if (chan->fcs == L2CAP_FCS_CRC16)
7347 		len -= L2CAP_FCS_SIZE;
7348 
7349 	if (len > chan->mps) {
7350 		l2cap_send_disconn_req(chan, ECONNRESET);
7351 		goto drop;
7352 	}
7353 
7354 	if (chan->ops->filter) {
7355 		if (chan->ops->filter(chan, skb))
7356 			goto drop;
7357 	}
7358 
7359 	if (!control->sframe) {
7360 		int err;
7361 
7362 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7363 		       control->sar, control->reqseq, control->final,
7364 		       control->txseq);
7365 
7366 		/* Validate F-bit - F=0 always valid, F=1 only
7367 		 * valid in TX WAIT_F
7368 		 */
7369 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7370 			goto drop;
7371 
7372 		if (chan->mode != L2CAP_MODE_STREAMING) {
7373 			event = L2CAP_EV_RECV_IFRAME;
7374 			err = l2cap_rx(chan, control, skb, event);
7375 		} else {
7376 			err = l2cap_stream_rx(chan, control, skb);
7377 		}
7378 
7379 		if (err)
7380 			l2cap_send_disconn_req(chan, ECONNRESET);
7381 	} else {
7382 		const u8 rx_func_to_event[4] = {
7383 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7384 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7385 		};
7386 
7387 		/* Only I-frames are expected in streaming mode */
7388 		if (chan->mode == L2CAP_MODE_STREAMING)
7389 			goto drop;
7390 
7391 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7392 		       control->reqseq, control->final, control->poll,
7393 		       control->super);
7394 
7395 		if (len != 0) {
7396 			BT_ERR("Trailing bytes: %d in sframe", len);
7397 			l2cap_send_disconn_req(chan, ECONNRESET);
7398 			goto drop;
7399 		}
7400 
7401 		/* Validate F and P bits */
7402 		if (control->final && (control->poll ||
7403 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7404 			goto drop;
7405 
7406 		event = rx_func_to_event[control->super];
7407 		if (l2cap_rx(chan, control, skb, event))
7408 			l2cap_send_disconn_req(chan, ECONNRESET);
7409 	}
7410 
7411 	return 0;
7412 
7413 drop:
7414 	kfree_skb(skb);
7415 	return 0;
7416 }
7417 
7418 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7419 {
7420 	struct l2cap_conn *conn = chan->conn;
7421 	struct l2cap_le_credits pkt;
7422 	u16 return_credits;
7423 
7424 	return_credits = (chan->imtu / chan->mps) + 1;
7425 
7426 	if (chan->rx_credits >= return_credits)
7427 		return;
7428 
7429 	return_credits -= chan->rx_credits;
7430 
7431 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7432 
7433 	chan->rx_credits += return_credits;
7434 
7435 	pkt.cid     = cpu_to_le16(chan->scid);
7436 	pkt.credits = cpu_to_le16(return_credits);
7437 
7438 	chan->ident = l2cap_get_ident(conn);
7439 
7440 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7441 }
7442 
7443 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7444 {
7445 	int err;
7446 
7447 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7448 
7449 	/* Wait recv to confirm reception before updating the credits */
7450 	err = chan->ops->recv(chan, skb);
7451 
7452 	/* Update credits whenever an SDU is received */
7453 	l2cap_chan_le_send_credits(chan);
7454 
7455 	return err;
7456 }
7457 
7458 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7459 {
7460 	int err;
7461 
7462 	if (!chan->rx_credits) {
7463 		BT_ERR("No credits to receive LE L2CAP data");
7464 		l2cap_send_disconn_req(chan, ECONNRESET);
7465 		return -ENOBUFS;
7466 	}
7467 
7468 	if (chan->imtu < skb->len) {
7469 		BT_ERR("Too big LE L2CAP PDU");
7470 		return -ENOBUFS;
7471 	}
7472 
7473 	chan->rx_credits--;
7474 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7475 
7476 	/* Update if remote had run out of credits, this should only happens
7477 	 * if the remote is not using the entire MPS.
7478 	 */
7479 	if (!chan->rx_credits)
7480 		l2cap_chan_le_send_credits(chan);
7481 
7482 	err = 0;
7483 
7484 	if (!chan->sdu) {
7485 		u16 sdu_len;
7486 
7487 		sdu_len = get_unaligned_le16(skb->data);
7488 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7489 
7490 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7491 		       sdu_len, skb->len, chan->imtu);
7492 
7493 		if (sdu_len > chan->imtu) {
7494 			BT_ERR("Too big LE L2CAP SDU length received");
7495 			err = -EMSGSIZE;
7496 			goto failed;
7497 		}
7498 
7499 		if (skb->len > sdu_len) {
7500 			BT_ERR("Too much LE L2CAP data received");
7501 			err = -EINVAL;
7502 			goto failed;
7503 		}
7504 
7505 		if (skb->len == sdu_len)
7506 			return l2cap_ecred_recv(chan, skb);
7507 
7508 		chan->sdu = skb;
7509 		chan->sdu_len = sdu_len;
7510 		chan->sdu_last_frag = skb;
7511 
7512 		/* Detect if remote is not able to use the selected MPS */
7513 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7514 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7515 
7516 			/* Adjust the number of credits */
7517 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7518 			chan->mps = mps_len;
7519 			l2cap_chan_le_send_credits(chan);
7520 		}
7521 
7522 		return 0;
7523 	}
7524 
7525 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7526 	       chan->sdu->len, skb->len, chan->sdu_len);
7527 
7528 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7529 		BT_ERR("Too much LE L2CAP data received");
7530 		err = -EINVAL;
7531 		goto failed;
7532 	}
7533 
7534 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7535 	skb = NULL;
7536 
7537 	if (chan->sdu->len == chan->sdu_len) {
7538 		err = l2cap_ecred_recv(chan, chan->sdu);
7539 		if (!err) {
7540 			chan->sdu = NULL;
7541 			chan->sdu_last_frag = NULL;
7542 			chan->sdu_len = 0;
7543 		}
7544 	}
7545 
7546 failed:
7547 	if (err) {
7548 		kfree_skb(skb);
7549 		kfree_skb(chan->sdu);
7550 		chan->sdu = NULL;
7551 		chan->sdu_last_frag = NULL;
7552 		chan->sdu_len = 0;
7553 	}
7554 
7555 	/* We can't return an error here since we took care of the skb
7556 	 * freeing internally. An error return would cause the caller to
7557 	 * do a double-free of the skb.
7558 	 */
7559 	return 0;
7560 }
7561 
7562 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7563 			       struct sk_buff *skb)
7564 {
7565 	struct l2cap_chan *chan;
7566 
7567 	chan = l2cap_get_chan_by_scid(conn, cid);
7568 	if (!chan) {
7569 		if (cid == L2CAP_CID_A2MP) {
7570 			chan = a2mp_channel_create(conn, skb);
7571 			if (!chan) {
7572 				kfree_skb(skb);
7573 				return;
7574 			}
7575 
7576 			l2cap_chan_lock(chan);
7577 		} else {
7578 			BT_DBG("unknown cid 0x%4.4x", cid);
7579 			/* Drop packet and return */
7580 			kfree_skb(skb);
7581 			return;
7582 		}
7583 	}
7584 
7585 	BT_DBG("chan %p, len %d", chan, skb->len);
7586 
7587 	/* If we receive data on a fixed channel before the info req/rsp
7588 	 * procedure is done simply assume that the channel is supported
7589 	 * and mark it as ready.
7590 	 */
7591 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7592 		l2cap_chan_ready(chan);
7593 
7594 	if (chan->state != BT_CONNECTED)
7595 		goto drop;
7596 
7597 	switch (chan->mode) {
7598 	case L2CAP_MODE_LE_FLOWCTL:
7599 	case L2CAP_MODE_EXT_FLOWCTL:
7600 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7601 			goto drop;
7602 
7603 		goto done;
7604 
7605 	case L2CAP_MODE_BASIC:
7606 		/* If socket recv buffers overflows we drop data here
7607 		 * which is *bad* because L2CAP has to be reliable.
7608 		 * But we don't have any other choice. L2CAP doesn't
7609 		 * provide flow control mechanism. */
7610 
7611 		if (chan->imtu < skb->len) {
7612 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7613 			goto drop;
7614 		}
7615 
7616 		if (!chan->ops->recv(chan, skb))
7617 			goto done;
7618 		break;
7619 
7620 	case L2CAP_MODE_ERTM:
7621 	case L2CAP_MODE_STREAMING:
7622 		l2cap_data_rcv(chan, skb);
7623 		goto done;
7624 
7625 	default:
7626 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7627 		break;
7628 	}
7629 
7630 drop:
7631 	kfree_skb(skb);
7632 
7633 done:
7634 	l2cap_chan_unlock(chan);
7635 	l2cap_chan_put(chan);
7636 }
7637 
7638 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7639 				  struct sk_buff *skb)
7640 {
7641 	struct hci_conn *hcon = conn->hcon;
7642 	struct l2cap_chan *chan;
7643 
7644 	if (hcon->type != ACL_LINK)
7645 		goto free_skb;
7646 
7647 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7648 					ACL_LINK);
7649 	if (!chan)
7650 		goto free_skb;
7651 
7652 	BT_DBG("chan %p, len %d", chan, skb->len);
7653 
7654 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7655 		goto drop;
7656 
7657 	if (chan->imtu < skb->len)
7658 		goto drop;
7659 
7660 	/* Store remote BD_ADDR and PSM for msg_name */
7661 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7662 	bt_cb(skb)->l2cap.psm = psm;
7663 
7664 	if (!chan->ops->recv(chan, skb)) {
7665 		l2cap_chan_put(chan);
7666 		return;
7667 	}
7668 
7669 drop:
7670 	l2cap_chan_put(chan);
7671 free_skb:
7672 	kfree_skb(skb);
7673 }
7674 
7675 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7676 {
7677 	struct l2cap_hdr *lh = (void *) skb->data;
7678 	struct hci_conn *hcon = conn->hcon;
7679 	u16 cid, len;
7680 	__le16 psm;
7681 
7682 	if (hcon->state != BT_CONNECTED) {
7683 		BT_DBG("queueing pending rx skb");
7684 		skb_queue_tail(&conn->pending_rx, skb);
7685 		return;
7686 	}
7687 
7688 	skb_pull(skb, L2CAP_HDR_SIZE);
7689 	cid = __le16_to_cpu(lh->cid);
7690 	len = __le16_to_cpu(lh->len);
7691 
7692 	if (len != skb->len) {
7693 		kfree_skb(skb);
7694 		return;
7695 	}
7696 
7697 	/* Since we can't actively block incoming LE connections we must
7698 	 * at least ensure that we ignore incoming data from them.
7699 	 */
7700 	if (hcon->type == LE_LINK &&
7701 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7702 				   bdaddr_dst_type(hcon))) {
7703 		kfree_skb(skb);
7704 		return;
7705 	}
7706 
7707 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7708 
7709 	switch (cid) {
7710 	case L2CAP_CID_SIGNALING:
7711 		l2cap_sig_channel(conn, skb);
7712 		break;
7713 
7714 	case L2CAP_CID_CONN_LESS:
7715 		psm = get_unaligned((__le16 *) skb->data);
7716 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7717 		l2cap_conless_channel(conn, psm, skb);
7718 		break;
7719 
7720 	case L2CAP_CID_LE_SIGNALING:
7721 		l2cap_le_sig_channel(conn, skb);
7722 		break;
7723 
7724 	default:
7725 		l2cap_data_channel(conn, cid, skb);
7726 		break;
7727 	}
7728 }
7729 
7730 static void process_pending_rx(struct work_struct *work)
7731 {
7732 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7733 					       pending_rx_work);
7734 	struct sk_buff *skb;
7735 
7736 	BT_DBG("");
7737 
7738 	while ((skb = skb_dequeue(&conn->pending_rx)))
7739 		l2cap_recv_frame(conn, skb);
7740 }
7741 
7742 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7743 {
7744 	struct l2cap_conn *conn = hcon->l2cap_data;
7745 	struct hci_chan *hchan;
7746 
7747 	if (conn)
7748 		return conn;
7749 
7750 	hchan = hci_chan_create(hcon);
7751 	if (!hchan)
7752 		return NULL;
7753 
7754 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7755 	if (!conn) {
7756 		hci_chan_del(hchan);
7757 		return NULL;
7758 	}
7759 
7760 	kref_init(&conn->ref);
7761 	hcon->l2cap_data = conn;
7762 	conn->hcon = hci_conn_get(hcon);
7763 	conn->hchan = hchan;
7764 
7765 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7766 
7767 	switch (hcon->type) {
7768 	case LE_LINK:
7769 		if (hcon->hdev->le_mtu) {
7770 			conn->mtu = hcon->hdev->le_mtu;
7771 			break;
7772 		}
7773 		fallthrough;
7774 	default:
7775 		conn->mtu = hcon->hdev->acl_mtu;
7776 		break;
7777 	}
7778 
7779 	conn->feat_mask = 0;
7780 
7781 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7782 
7783 	if (hcon->type == ACL_LINK &&
7784 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7785 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7786 
7787 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7788 	    (bredr_sc_enabled(hcon->hdev) ||
7789 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7790 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7791 
7792 	mutex_init(&conn->ident_lock);
7793 	mutex_init(&conn->chan_lock);
7794 
7795 	INIT_LIST_HEAD(&conn->chan_l);
7796 	INIT_LIST_HEAD(&conn->users);
7797 
7798 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7799 
7800 	skb_queue_head_init(&conn->pending_rx);
7801 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7802 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7803 
7804 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7805 
7806 	return conn;
7807 }
7808 
7809 static bool is_valid_psm(u16 psm, u8 dst_type)
7810 {
7811 	if (!psm)
7812 		return false;
7813 
7814 	if (bdaddr_type_is_le(dst_type))
7815 		return (psm <= 0x00ff);
7816 
7817 	/* PSM must be odd and lsb of upper byte must be 0 */
7818 	return ((psm & 0x0101) == 0x0001);
7819 }
7820 
7821 struct l2cap_chan_data {
7822 	struct l2cap_chan *chan;
7823 	struct pid *pid;
7824 	int count;
7825 };
7826 
7827 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7828 {
7829 	struct l2cap_chan_data *d = data;
7830 	struct pid *pid;
7831 
7832 	if (chan == d->chan)
7833 		return;
7834 
7835 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7836 		return;
7837 
7838 	pid = chan->ops->get_peer_pid(chan);
7839 
7840 	/* Only count deferred channels with the same PID/PSM */
7841 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7842 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7843 		return;
7844 
7845 	d->count++;
7846 }
7847 
7848 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7849 		       bdaddr_t *dst, u8 dst_type)
7850 {
7851 	struct l2cap_conn *conn;
7852 	struct hci_conn *hcon;
7853 	struct hci_dev *hdev;
7854 	int err;
7855 
7856 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7857 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7858 
7859 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7860 	if (!hdev)
7861 		return -EHOSTUNREACH;
7862 
7863 	hci_dev_lock(hdev);
7864 
7865 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7866 	    chan->chan_type != L2CAP_CHAN_RAW) {
7867 		err = -EINVAL;
7868 		goto done;
7869 	}
7870 
7871 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7872 		err = -EINVAL;
7873 		goto done;
7874 	}
7875 
7876 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7877 		err = -EINVAL;
7878 		goto done;
7879 	}
7880 
7881 	switch (chan->mode) {
7882 	case L2CAP_MODE_BASIC:
7883 		break;
7884 	case L2CAP_MODE_LE_FLOWCTL:
7885 		break;
7886 	case L2CAP_MODE_EXT_FLOWCTL:
7887 		if (!enable_ecred) {
7888 			err = -EOPNOTSUPP;
7889 			goto done;
7890 		}
7891 		break;
7892 	case L2CAP_MODE_ERTM:
7893 	case L2CAP_MODE_STREAMING:
7894 		if (!disable_ertm)
7895 			break;
7896 		fallthrough;
7897 	default:
7898 		err = -EOPNOTSUPP;
7899 		goto done;
7900 	}
7901 
7902 	switch (chan->state) {
7903 	case BT_CONNECT:
7904 	case BT_CONNECT2:
7905 	case BT_CONFIG:
7906 		/* Already connecting */
7907 		err = 0;
7908 		goto done;
7909 
7910 	case BT_CONNECTED:
7911 		/* Already connected */
7912 		err = -EISCONN;
7913 		goto done;
7914 
7915 	case BT_OPEN:
7916 	case BT_BOUND:
7917 		/* Can connect */
7918 		break;
7919 
7920 	default:
7921 		err = -EBADFD;
7922 		goto done;
7923 	}
7924 
7925 	/* Set destination address and psm */
7926 	bacpy(&chan->dst, dst);
7927 	chan->dst_type = dst_type;
7928 
7929 	chan->psm = psm;
7930 	chan->dcid = cid;
7931 
7932 	if (bdaddr_type_is_le(dst_type)) {
7933 		/* Convert from L2CAP channel address type to HCI address type
7934 		 */
7935 		if (dst_type == BDADDR_LE_PUBLIC)
7936 			dst_type = ADDR_LE_DEV_PUBLIC;
7937 		else
7938 			dst_type = ADDR_LE_DEV_RANDOM;
7939 
7940 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7941 			hcon = hci_connect_le(hdev, dst, dst_type, false,
7942 					      chan->sec_level,
7943 					      HCI_LE_CONN_TIMEOUT,
7944 					      HCI_ROLE_SLAVE);
7945 		else
7946 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7947 						   chan->sec_level,
7948 						   HCI_LE_CONN_TIMEOUT,
7949 						   CONN_REASON_L2CAP_CHAN);
7950 
7951 	} else {
7952 		u8 auth_type = l2cap_get_auth_type(chan);
7953 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7954 				       CONN_REASON_L2CAP_CHAN);
7955 	}
7956 
7957 	if (IS_ERR(hcon)) {
7958 		err = PTR_ERR(hcon);
7959 		goto done;
7960 	}
7961 
7962 	conn = l2cap_conn_add(hcon);
7963 	if (!conn) {
7964 		hci_conn_drop(hcon);
7965 		err = -ENOMEM;
7966 		goto done;
7967 	}
7968 
7969 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7970 		struct l2cap_chan_data data;
7971 
7972 		data.chan = chan;
7973 		data.pid = chan->ops->get_peer_pid(chan);
7974 		data.count = 1;
7975 
7976 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7977 
7978 		/* Check if there isn't too many channels being connected */
7979 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7980 			hci_conn_drop(hcon);
7981 			err = -EPROTO;
7982 			goto done;
7983 		}
7984 	}
7985 
7986 	mutex_lock(&conn->chan_lock);
7987 	l2cap_chan_lock(chan);
7988 
7989 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7990 		hci_conn_drop(hcon);
7991 		err = -EBUSY;
7992 		goto chan_unlock;
7993 	}
7994 
7995 	/* Update source addr of the socket */
7996 	bacpy(&chan->src, &hcon->src);
7997 	chan->src_type = bdaddr_src_type(hcon);
7998 
7999 	__l2cap_chan_add(conn, chan);
8000 
8001 	/* l2cap_chan_add takes its own ref so we can drop this one */
8002 	hci_conn_drop(hcon);
8003 
8004 	l2cap_state_change(chan, BT_CONNECT);
8005 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8006 
8007 	/* Release chan->sport so that it can be reused by other
8008 	 * sockets (as it's only used for listening sockets).
8009 	 */
8010 	write_lock(&chan_list_lock);
8011 	chan->sport = 0;
8012 	write_unlock(&chan_list_lock);
8013 
8014 	if (hcon->state == BT_CONNECTED) {
8015 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8016 			__clear_chan_timer(chan);
8017 			if (l2cap_chan_check_security(chan, true))
8018 				l2cap_state_change(chan, BT_CONNECTED);
8019 		} else
8020 			l2cap_do_start(chan);
8021 	}
8022 
8023 	err = 0;
8024 
8025 chan_unlock:
8026 	l2cap_chan_unlock(chan);
8027 	mutex_unlock(&conn->chan_lock);
8028 done:
8029 	hci_dev_unlock(hdev);
8030 	hci_dev_put(hdev);
8031 	return err;
8032 }
8033 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8034 
8035 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8036 {
8037 	struct l2cap_conn *conn = chan->conn;
8038 	struct {
8039 		struct l2cap_ecred_reconf_req req;
8040 		__le16 scid;
8041 	} pdu;
8042 
8043 	pdu.req.mtu = cpu_to_le16(chan->imtu);
8044 	pdu.req.mps = cpu_to_le16(chan->mps);
8045 	pdu.scid    = cpu_to_le16(chan->scid);
8046 
8047 	chan->ident = l2cap_get_ident(conn);
8048 
8049 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8050 		       sizeof(pdu), &pdu);
8051 }
8052 
8053 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8054 {
8055 	if (chan->imtu > mtu)
8056 		return -EINVAL;
8057 
8058 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8059 
8060 	chan->imtu = mtu;
8061 
8062 	l2cap_ecred_reconfigure(chan);
8063 
8064 	return 0;
8065 }
8066 
8067 /* ---- L2CAP interface with lower layer (HCI) ---- */
8068 
8069 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8070 {
8071 	int exact = 0, lm1 = 0, lm2 = 0;
8072 	struct l2cap_chan *c;
8073 
8074 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8075 
8076 	/* Find listening sockets and check their link_mode */
8077 	read_lock(&chan_list_lock);
8078 	list_for_each_entry(c, &chan_list, global_l) {
8079 		if (c->state != BT_LISTEN)
8080 			continue;
8081 
8082 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8083 			lm1 |= HCI_LM_ACCEPT;
8084 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8085 				lm1 |= HCI_LM_MASTER;
8086 			exact++;
8087 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8088 			lm2 |= HCI_LM_ACCEPT;
8089 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8090 				lm2 |= HCI_LM_MASTER;
8091 		}
8092 	}
8093 	read_unlock(&chan_list_lock);
8094 
8095 	return exact ? lm1 : lm2;
8096 }
8097 
8098 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8099  * from an existing channel in the list or from the beginning of the
8100  * global list (by passing NULL as first parameter).
8101  */
8102 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8103 						  struct hci_conn *hcon)
8104 {
8105 	u8 src_type = bdaddr_src_type(hcon);
8106 
8107 	read_lock(&chan_list_lock);
8108 
8109 	if (c)
8110 		c = list_next_entry(c, global_l);
8111 	else
8112 		c = list_entry(chan_list.next, typeof(*c), global_l);
8113 
8114 	list_for_each_entry_from(c, &chan_list, global_l) {
8115 		if (c->chan_type != L2CAP_CHAN_FIXED)
8116 			continue;
8117 		if (c->state != BT_LISTEN)
8118 			continue;
8119 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8120 			continue;
8121 		if (src_type != c->src_type)
8122 			continue;
8123 
8124 		c = l2cap_chan_hold_unless_zero(c);
8125 		read_unlock(&chan_list_lock);
8126 		return c;
8127 	}
8128 
8129 	read_unlock(&chan_list_lock);
8130 
8131 	return NULL;
8132 }
8133 
8134 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8135 {
8136 	struct hci_dev *hdev = hcon->hdev;
8137 	struct l2cap_conn *conn;
8138 	struct l2cap_chan *pchan;
8139 	u8 dst_type;
8140 
8141 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8142 		return;
8143 
8144 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8145 
8146 	if (status) {
8147 		l2cap_conn_del(hcon, bt_to_errno(status));
8148 		return;
8149 	}
8150 
8151 	conn = l2cap_conn_add(hcon);
8152 	if (!conn)
8153 		return;
8154 
8155 	dst_type = bdaddr_dst_type(hcon);
8156 
8157 	/* If device is blocked, do not create channels for it */
8158 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8159 		return;
8160 
8161 	/* Find fixed channels and notify them of the new connection. We
8162 	 * use multiple individual lookups, continuing each time where
8163 	 * we left off, because the list lock would prevent calling the
8164 	 * potentially sleeping l2cap_chan_lock() function.
8165 	 */
8166 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8167 	while (pchan) {
8168 		struct l2cap_chan *chan, *next;
8169 
8170 		/* Client fixed channels should override server ones */
8171 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8172 			goto next;
8173 
8174 		l2cap_chan_lock(pchan);
8175 		chan = pchan->ops->new_connection(pchan);
8176 		if (chan) {
8177 			bacpy(&chan->src, &hcon->src);
8178 			bacpy(&chan->dst, &hcon->dst);
8179 			chan->src_type = bdaddr_src_type(hcon);
8180 			chan->dst_type = dst_type;
8181 
8182 			__l2cap_chan_add(conn, chan);
8183 		}
8184 
8185 		l2cap_chan_unlock(pchan);
8186 next:
8187 		next = l2cap_global_fixed_chan(pchan, hcon);
8188 		l2cap_chan_put(pchan);
8189 		pchan = next;
8190 	}
8191 
8192 	l2cap_conn_ready(conn);
8193 }
8194 
8195 int l2cap_disconn_ind(struct hci_conn *hcon)
8196 {
8197 	struct l2cap_conn *conn = hcon->l2cap_data;
8198 
8199 	BT_DBG("hcon %p", hcon);
8200 
8201 	if (!conn)
8202 		return HCI_ERROR_REMOTE_USER_TERM;
8203 	return conn->disc_reason;
8204 }
8205 
8206 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8207 {
8208 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8209 		return;
8210 
8211 	BT_DBG("hcon %p reason %d", hcon, reason);
8212 
8213 	l2cap_conn_del(hcon, bt_to_errno(reason));
8214 }
8215 
8216 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8217 {
8218 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8219 		return;
8220 
8221 	if (encrypt == 0x00) {
8222 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8223 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8224 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8225 			   chan->sec_level == BT_SECURITY_FIPS)
8226 			l2cap_chan_close(chan, ECONNREFUSED);
8227 	} else {
8228 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8229 			__clear_chan_timer(chan);
8230 	}
8231 }
8232 
8233 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8234 {
8235 	struct l2cap_conn *conn = hcon->l2cap_data;
8236 	struct l2cap_chan *chan;
8237 
8238 	if (!conn)
8239 		return;
8240 
8241 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8242 
8243 	mutex_lock(&conn->chan_lock);
8244 
8245 	list_for_each_entry(chan, &conn->chan_l, list) {
8246 		l2cap_chan_lock(chan);
8247 
8248 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8249 		       state_to_string(chan->state));
8250 
8251 		if (chan->scid == L2CAP_CID_A2MP) {
8252 			l2cap_chan_unlock(chan);
8253 			continue;
8254 		}
8255 
8256 		if (!status && encrypt)
8257 			chan->sec_level = hcon->sec_level;
8258 
8259 		if (!__l2cap_no_conn_pending(chan)) {
8260 			l2cap_chan_unlock(chan);
8261 			continue;
8262 		}
8263 
8264 		if (!status && (chan->state == BT_CONNECTED ||
8265 				chan->state == BT_CONFIG)) {
8266 			chan->ops->resume(chan);
8267 			l2cap_check_encryption(chan, encrypt);
8268 			l2cap_chan_unlock(chan);
8269 			continue;
8270 		}
8271 
8272 		if (chan->state == BT_CONNECT) {
8273 			if (!status && l2cap_check_enc_key_size(hcon))
8274 				l2cap_start_connection(chan);
8275 			else
8276 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8277 		} else if (chan->state == BT_CONNECT2 &&
8278 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8279 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8280 			struct l2cap_conn_rsp rsp;
8281 			__u16 res, stat;
8282 
8283 			if (!status && l2cap_check_enc_key_size(hcon)) {
8284 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8285 					res = L2CAP_CR_PEND;
8286 					stat = L2CAP_CS_AUTHOR_PEND;
8287 					chan->ops->defer(chan);
8288 				} else {
8289 					l2cap_state_change(chan, BT_CONFIG);
8290 					res = L2CAP_CR_SUCCESS;
8291 					stat = L2CAP_CS_NO_INFO;
8292 				}
8293 			} else {
8294 				l2cap_state_change(chan, BT_DISCONN);
8295 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8296 				res = L2CAP_CR_SEC_BLOCK;
8297 				stat = L2CAP_CS_NO_INFO;
8298 			}
8299 
8300 			rsp.scid   = cpu_to_le16(chan->dcid);
8301 			rsp.dcid   = cpu_to_le16(chan->scid);
8302 			rsp.result = cpu_to_le16(res);
8303 			rsp.status = cpu_to_le16(stat);
8304 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8305 				       sizeof(rsp), &rsp);
8306 
8307 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8308 			    res == L2CAP_CR_SUCCESS) {
8309 				char buf[128];
8310 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8311 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8312 					       L2CAP_CONF_REQ,
8313 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8314 					       buf);
8315 				chan->num_conf_req++;
8316 			}
8317 		}
8318 
8319 		l2cap_chan_unlock(chan);
8320 	}
8321 
8322 	mutex_unlock(&conn->chan_lock);
8323 }
8324 
8325 /* Append fragment into frame respecting the maximum len of rx_skb */
8326 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
8327 			   u16 len)
8328 {
8329 	if (!conn->rx_skb) {
8330 		/* Allocate skb for the complete frame (with header) */
8331 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8332 		if (!conn->rx_skb)
8333 			return -ENOMEM;
8334 		/* Init rx_len */
8335 		conn->rx_len = len;
8336 	}
8337 
8338 	/* Copy as much as the rx_skb can hold */
8339 	len = min_t(u16, len, skb->len);
8340 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
8341 	skb_pull(skb, len);
8342 	conn->rx_len -= len;
8343 
8344 	return len;
8345 }
8346 
8347 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
8348 {
8349 	struct sk_buff *rx_skb;
8350 	int len;
8351 
8352 	/* Append just enough to complete the header */
8353 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
8354 
8355 	/* If header could not be read just continue */
8356 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
8357 		return len;
8358 
8359 	rx_skb = conn->rx_skb;
8360 	len = get_unaligned_le16(rx_skb->data);
8361 
8362 	/* Check if rx_skb has enough space to received all fragments */
8363 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
8364 		/* Update expected len */
8365 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
8366 		return L2CAP_LEN_SIZE;
8367 	}
8368 
8369 	/* Reset conn->rx_skb since it will need to be reallocated in order to
8370 	 * fit all fragments.
8371 	 */
8372 	conn->rx_skb = NULL;
8373 
8374 	/* Reallocates rx_skb using the exact expected length */
8375 	len = l2cap_recv_frag(conn, rx_skb,
8376 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
8377 	kfree_skb(rx_skb);
8378 
8379 	return len;
8380 }
8381 
8382 static void l2cap_recv_reset(struct l2cap_conn *conn)
8383 {
8384 	kfree_skb(conn->rx_skb);
8385 	conn->rx_skb = NULL;
8386 	conn->rx_len = 0;
8387 }
8388 
8389 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8390 {
8391 	struct l2cap_conn *conn = hcon->l2cap_data;
8392 	int len;
8393 
8394 	/* For AMP controller do not create l2cap conn */
8395 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8396 		goto drop;
8397 
8398 	if (!conn)
8399 		conn = l2cap_conn_add(hcon);
8400 
8401 	if (!conn)
8402 		goto drop;
8403 
8404 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
8405 
8406 	switch (flags) {
8407 	case ACL_START:
8408 	case ACL_START_NO_FLUSH:
8409 	case ACL_COMPLETE:
8410 		if (conn->rx_skb) {
8411 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8412 			l2cap_recv_reset(conn);
8413 			l2cap_conn_unreliable(conn, ECOMM);
8414 		}
8415 
8416 		/* Start fragment may not contain the L2CAP length so just
8417 		 * copy the initial byte when that happens and use conn->mtu as
8418 		 * expected length.
8419 		 */
8420 		if (skb->len < L2CAP_LEN_SIZE) {
8421 			if (l2cap_recv_frag(conn, skb, conn->mtu) < 0)
8422 				goto drop;
8423 			return;
8424 		}
8425 
8426 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
8427 
8428 		if (len == skb->len) {
8429 			/* Complete frame received */
8430 			l2cap_recv_frame(conn, skb);
8431 			return;
8432 		}
8433 
8434 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
8435 
8436 		if (skb->len > len) {
8437 			BT_ERR("Frame is too long (len %u, expected len %d)",
8438 			       skb->len, len);
8439 			l2cap_conn_unreliable(conn, ECOMM);
8440 			goto drop;
8441 		}
8442 
8443 		/* Append fragment into frame (with header) */
8444 		if (l2cap_recv_frag(conn, skb, len) < 0)
8445 			goto drop;
8446 
8447 		break;
8448 
8449 	case ACL_CONT:
8450 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
8451 
8452 		if (!conn->rx_skb) {
8453 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8454 			l2cap_conn_unreliable(conn, ECOMM);
8455 			goto drop;
8456 		}
8457 
8458 		/* Complete the L2CAP length if it has not been read */
8459 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
8460 			if (l2cap_recv_len(conn, skb) < 0) {
8461 				l2cap_conn_unreliable(conn, ECOMM);
8462 				goto drop;
8463 			}
8464 
8465 			/* Header still could not be read just continue */
8466 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
8467 				return;
8468 		}
8469 
8470 		if (skb->len > conn->rx_len) {
8471 			BT_ERR("Fragment is too long (len %u, expected %u)",
8472 			       skb->len, conn->rx_len);
8473 			l2cap_recv_reset(conn);
8474 			l2cap_conn_unreliable(conn, ECOMM);
8475 			goto drop;
8476 		}
8477 
8478 		/* Append fragment into frame (with header) */
8479 		l2cap_recv_frag(conn, skb, skb->len);
8480 
8481 		if (!conn->rx_len) {
8482 			/* Complete frame received. l2cap_recv_frame
8483 			 * takes ownership of the skb so set the global
8484 			 * rx_skb pointer to NULL first.
8485 			 */
8486 			struct sk_buff *rx_skb = conn->rx_skb;
8487 			conn->rx_skb = NULL;
8488 			l2cap_recv_frame(conn, rx_skb);
8489 		}
8490 		break;
8491 	}
8492 
8493 drop:
8494 	kfree_skb(skb);
8495 }
8496 
8497 static struct hci_cb l2cap_cb = {
8498 	.name		= "L2CAP",
8499 	.connect_cfm	= l2cap_connect_cfm,
8500 	.disconn_cfm	= l2cap_disconn_cfm,
8501 	.security_cfm	= l2cap_security_cfm,
8502 };
8503 
8504 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8505 {
8506 	struct l2cap_chan *c;
8507 
8508 	read_lock(&chan_list_lock);
8509 
8510 	list_for_each_entry(c, &chan_list, global_l) {
8511 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8512 			   &c->src, c->src_type, &c->dst, c->dst_type,
8513 			   c->state, __le16_to_cpu(c->psm),
8514 			   c->scid, c->dcid, c->imtu, c->omtu,
8515 			   c->sec_level, c->mode);
8516 	}
8517 
8518 	read_unlock(&chan_list_lock);
8519 
8520 	return 0;
8521 }
8522 
8523 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8524 
8525 static struct dentry *l2cap_debugfs;
8526 
8527 int __init l2cap_init(void)
8528 {
8529 	int err;
8530 
8531 	err = l2cap_init_sockets();
8532 	if (err < 0)
8533 		return err;
8534 
8535 	hci_register_cb(&l2cap_cb);
8536 
8537 	if (IS_ERR_OR_NULL(bt_debugfs))
8538 		return 0;
8539 
8540 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8541 					    NULL, &l2cap_debugfs_fops);
8542 
8543 	return 0;
8544 }
8545 
8546 void l2cap_exit(void)
8547 {
8548 	debugfs_remove(l2cap_debugfs);
8549 	hci_unregister_cb(&l2cap_cb);
8550 	l2cap_cleanup_sockets();
8551 }
8552 
8553 module_param(disable_ertm, bool, 0644);
8554 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8555 
8556 module_param(enable_ecred, bool, 0644);
8557 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8558