xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision d9f6e12f)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred;
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 
65 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
66 {
67 	if (link_type == LE_LINK) {
68 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
69 			return BDADDR_LE_PUBLIC;
70 		else
71 			return BDADDR_LE_RANDOM;
72 	}
73 
74 	return BDADDR_BREDR;
75 }
76 
77 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
78 {
79 	return bdaddr_type(hcon->type, hcon->src_type);
80 }
81 
82 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
83 {
84 	return bdaddr_type(hcon->type, hcon->dst_type);
85 }
86 
87 /* ---- L2CAP channels ---- */
88 
89 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
90 						   u16 cid)
91 {
92 	struct l2cap_chan *c;
93 
94 	list_for_each_entry(c, &conn->chan_l, list) {
95 		if (c->dcid == cid)
96 			return c;
97 	}
98 	return NULL;
99 }
100 
101 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
102 						   u16 cid)
103 {
104 	struct l2cap_chan *c;
105 
106 	list_for_each_entry(c, &conn->chan_l, list) {
107 		if (c->scid == cid)
108 			return c;
109 	}
110 	return NULL;
111 }
112 
113 /* Find channel with given SCID.
114  * Returns locked channel. */
115 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
116 						 u16 cid)
117 {
118 	struct l2cap_chan *c;
119 
120 	mutex_lock(&conn->chan_lock);
121 	c = __l2cap_get_chan_by_scid(conn, cid);
122 	if (c)
123 		l2cap_chan_lock(c);
124 	mutex_unlock(&conn->chan_lock);
125 
126 	return c;
127 }
128 
129 /* Find channel with given DCID.
130  * Returns locked channel.
131  */
132 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
133 						 u16 cid)
134 {
135 	struct l2cap_chan *c;
136 
137 	mutex_lock(&conn->chan_lock);
138 	c = __l2cap_get_chan_by_dcid(conn, cid);
139 	if (c)
140 		l2cap_chan_lock(c);
141 	mutex_unlock(&conn->chan_lock);
142 
143 	return c;
144 }
145 
146 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
147 						    u8 ident)
148 {
149 	struct l2cap_chan *c;
150 
151 	list_for_each_entry(c, &conn->chan_l, list) {
152 		if (c->ident == ident)
153 			return c;
154 	}
155 	return NULL;
156 }
157 
158 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
159 						  u8 ident)
160 {
161 	struct l2cap_chan *c;
162 
163 	mutex_lock(&conn->chan_lock);
164 	c = __l2cap_get_chan_by_ident(conn, ident);
165 	if (c)
166 		l2cap_chan_lock(c);
167 	mutex_unlock(&conn->chan_lock);
168 
169 	return c;
170 }
171 
172 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
173 						      u8 src_type)
174 {
175 	struct l2cap_chan *c;
176 
177 	list_for_each_entry(c, &chan_list, global_l) {
178 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
179 			continue;
180 
181 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
182 			continue;
183 
184 		if (c->sport == psm && !bacmp(&c->src, src))
185 			return c;
186 	}
187 	return NULL;
188 }
189 
190 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
191 {
192 	int err;
193 
194 	write_lock(&chan_list_lock);
195 
196 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
197 		err = -EADDRINUSE;
198 		goto done;
199 	}
200 
201 	if (psm) {
202 		chan->psm = psm;
203 		chan->sport = psm;
204 		err = 0;
205 	} else {
206 		u16 p, start, end, incr;
207 
208 		if (chan->src_type == BDADDR_BREDR) {
209 			start = L2CAP_PSM_DYN_START;
210 			end = L2CAP_PSM_AUTO_END;
211 			incr = 2;
212 		} else {
213 			start = L2CAP_PSM_LE_DYN_START;
214 			end = L2CAP_PSM_LE_DYN_END;
215 			incr = 1;
216 		}
217 
218 		err = -EINVAL;
219 		for (p = start; p <= end; p += incr)
220 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
221 							 chan->src_type)) {
222 				chan->psm   = cpu_to_le16(p);
223 				chan->sport = cpu_to_le16(p);
224 				err = 0;
225 				break;
226 			}
227 	}
228 
229 done:
230 	write_unlock(&chan_list_lock);
231 	return err;
232 }
233 EXPORT_SYMBOL_GPL(l2cap_add_psm);
234 
235 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
236 {
237 	write_lock(&chan_list_lock);
238 
239 	/* Override the defaults (which are for conn-oriented) */
240 	chan->omtu = L2CAP_DEFAULT_MTU;
241 	chan->chan_type = L2CAP_CHAN_FIXED;
242 
243 	chan->scid = scid;
244 
245 	write_unlock(&chan_list_lock);
246 
247 	return 0;
248 }
249 
250 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
251 {
252 	u16 cid, dyn_end;
253 
254 	if (conn->hcon->type == LE_LINK)
255 		dyn_end = L2CAP_CID_LE_DYN_END;
256 	else
257 		dyn_end = L2CAP_CID_DYN_END;
258 
259 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
260 		if (!__l2cap_get_chan_by_scid(conn, cid))
261 			return cid;
262 	}
263 
264 	return 0;
265 }
266 
267 static void l2cap_state_change(struct l2cap_chan *chan, int state)
268 {
269 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
270 	       state_to_string(state));
271 
272 	chan->state = state;
273 	chan->ops->state_change(chan, state, 0);
274 }
275 
276 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
277 						int state, int err)
278 {
279 	chan->state = state;
280 	chan->ops->state_change(chan, chan->state, err);
281 }
282 
283 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
284 {
285 	chan->ops->state_change(chan, chan->state, err);
286 }
287 
288 static void __set_retrans_timer(struct l2cap_chan *chan)
289 {
290 	if (!delayed_work_pending(&chan->monitor_timer) &&
291 	    chan->retrans_timeout) {
292 		l2cap_set_timer(chan, &chan->retrans_timer,
293 				msecs_to_jiffies(chan->retrans_timeout));
294 	}
295 }
296 
297 static void __set_monitor_timer(struct l2cap_chan *chan)
298 {
299 	__clear_retrans_timer(chan);
300 	if (chan->monitor_timeout) {
301 		l2cap_set_timer(chan, &chan->monitor_timer,
302 				msecs_to_jiffies(chan->monitor_timeout));
303 	}
304 }
305 
306 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
307 					       u16 seq)
308 {
309 	struct sk_buff *skb;
310 
311 	skb_queue_walk(head, skb) {
312 		if (bt_cb(skb)->l2cap.txseq == seq)
313 			return skb;
314 	}
315 
316 	return NULL;
317 }
318 
319 /* ---- L2CAP sequence number lists ---- */
320 
321 /* For ERTM, ordered lists of sequence numbers must be tracked for
322  * SREJ requests that are received and for frames that are to be
323  * retransmitted. These seq_list functions implement a singly-linked
324  * list in an array, where membership in the list can also be checked
325  * in constant time. Items can also be added to the tail of the list
326  * and removed from the head in constant time, without further memory
327  * allocs or frees.
328  */
329 
330 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
331 {
332 	size_t alloc_size, i;
333 
334 	/* Allocated size is a power of 2 to map sequence numbers
335 	 * (which may be up to 14 bits) in to a smaller array that is
336 	 * sized for the negotiated ERTM transmit windows.
337 	 */
338 	alloc_size = roundup_pow_of_two(size);
339 
340 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
341 	if (!seq_list->list)
342 		return -ENOMEM;
343 
344 	seq_list->mask = alloc_size - 1;
345 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
346 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
347 	for (i = 0; i < alloc_size; i++)
348 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
349 
350 	return 0;
351 }
352 
353 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
354 {
355 	kfree(seq_list->list);
356 }
357 
358 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
359 					   u16 seq)
360 {
361 	/* Constant-time check for list membership */
362 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
363 }
364 
365 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
366 {
367 	u16 seq = seq_list->head;
368 	u16 mask = seq_list->mask;
369 
370 	seq_list->head = seq_list->list[seq & mask];
371 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
372 
373 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
374 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 	}
377 
378 	return seq;
379 }
380 
381 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
382 {
383 	u16 i;
384 
385 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
386 		return;
387 
388 	for (i = 0; i <= seq_list->mask; i++)
389 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
390 
391 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
392 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
393 }
394 
395 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
396 {
397 	u16 mask = seq_list->mask;
398 
399 	/* All appends happen in constant time */
400 
401 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
402 		return;
403 
404 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
405 		seq_list->head = seq;
406 	else
407 		seq_list->list[seq_list->tail & mask] = seq;
408 
409 	seq_list->tail = seq;
410 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
411 }
412 
413 static void l2cap_chan_timeout(struct work_struct *work)
414 {
415 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
416 					       chan_timer.work);
417 	struct l2cap_conn *conn = chan->conn;
418 	int reason;
419 
420 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
421 
422 	mutex_lock(&conn->chan_lock);
423 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
424 	 * this work. No need to call l2cap_chan_hold(chan) here again.
425 	 */
426 	l2cap_chan_lock(chan);
427 
428 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
429 		reason = ECONNREFUSED;
430 	else if (chan->state == BT_CONNECT &&
431 		 chan->sec_level != BT_SECURITY_SDP)
432 		reason = ECONNREFUSED;
433 	else
434 		reason = ETIMEDOUT;
435 
436 	l2cap_chan_close(chan, reason);
437 
438 	chan->ops->close(chan);
439 
440 	l2cap_chan_unlock(chan);
441 	l2cap_chan_put(chan);
442 
443 	mutex_unlock(&conn->chan_lock);
444 }
445 
446 struct l2cap_chan *l2cap_chan_create(void)
447 {
448 	struct l2cap_chan *chan;
449 
450 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
451 	if (!chan)
452 		return NULL;
453 
454 	mutex_init(&chan->lock);
455 
456 	/* Set default lock nesting level */
457 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
458 
459 	write_lock(&chan_list_lock);
460 	list_add(&chan->global_l, &chan_list);
461 	write_unlock(&chan_list_lock);
462 
463 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
464 
465 	chan->state = BT_OPEN;
466 
467 	kref_init(&chan->kref);
468 
469 	/* This flag is cleared in l2cap_chan_ready() */
470 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
471 
472 	BT_DBG("chan %p", chan);
473 
474 	return chan;
475 }
476 EXPORT_SYMBOL_GPL(l2cap_chan_create);
477 
478 static void l2cap_chan_destroy(struct kref *kref)
479 {
480 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
481 
482 	BT_DBG("chan %p", chan);
483 
484 	write_lock(&chan_list_lock);
485 	list_del(&chan->global_l);
486 	write_unlock(&chan_list_lock);
487 
488 	kfree(chan);
489 }
490 
491 void l2cap_chan_hold(struct l2cap_chan *c)
492 {
493 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
494 
495 	kref_get(&c->kref);
496 }
497 
498 void l2cap_chan_put(struct l2cap_chan *c)
499 {
500 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
501 
502 	kref_put(&c->kref, l2cap_chan_destroy);
503 }
504 EXPORT_SYMBOL_GPL(l2cap_chan_put);
505 
506 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
507 {
508 	chan->fcs  = L2CAP_FCS_CRC16;
509 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
510 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
511 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
512 	chan->remote_max_tx = chan->max_tx;
513 	chan->remote_tx_win = chan->tx_win;
514 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
515 	chan->sec_level = BT_SECURITY_LOW;
516 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
517 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
518 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
519 	chan->conf_state = 0;
520 
521 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
522 }
523 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
524 
525 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
526 {
527 	chan->sdu = NULL;
528 	chan->sdu_last_frag = NULL;
529 	chan->sdu_len = 0;
530 	chan->tx_credits = tx_credits;
531 	/* Derive MPS from connection MTU to stop HCI fragmentation */
532 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
533 	/* Give enough credits for a full packet */
534 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
535 
536 	skb_queue_head_init(&chan->tx_q);
537 }
538 
539 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
540 {
541 	l2cap_le_flowctl_init(chan, tx_credits);
542 
543 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
544 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
545 		chan->mps = L2CAP_ECRED_MIN_MPS;
546 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
547 	}
548 }
549 
550 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
551 {
552 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
553 	       __le16_to_cpu(chan->psm), chan->dcid);
554 
555 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
556 
557 	chan->conn = conn;
558 
559 	switch (chan->chan_type) {
560 	case L2CAP_CHAN_CONN_ORIENTED:
561 		/* Alloc CID for connection-oriented socket */
562 		chan->scid = l2cap_alloc_cid(conn);
563 		if (conn->hcon->type == ACL_LINK)
564 			chan->omtu = L2CAP_DEFAULT_MTU;
565 		break;
566 
567 	case L2CAP_CHAN_CONN_LESS:
568 		/* Connectionless socket */
569 		chan->scid = L2CAP_CID_CONN_LESS;
570 		chan->dcid = L2CAP_CID_CONN_LESS;
571 		chan->omtu = L2CAP_DEFAULT_MTU;
572 		break;
573 
574 	case L2CAP_CHAN_FIXED:
575 		/* Caller will set CID and CID specific MTU values */
576 		break;
577 
578 	default:
579 		/* Raw socket can send/recv signalling messages only */
580 		chan->scid = L2CAP_CID_SIGNALING;
581 		chan->dcid = L2CAP_CID_SIGNALING;
582 		chan->omtu = L2CAP_DEFAULT_MTU;
583 	}
584 
585 	chan->local_id		= L2CAP_BESTEFFORT_ID;
586 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
587 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
588 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
589 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
590 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
591 
592 	l2cap_chan_hold(chan);
593 
594 	/* Only keep a reference for fixed channels if they requested it */
595 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
596 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
597 		hci_conn_hold(conn->hcon);
598 
599 	list_add(&chan->list, &conn->chan_l);
600 }
601 
602 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
603 {
604 	mutex_lock(&conn->chan_lock);
605 	__l2cap_chan_add(conn, chan);
606 	mutex_unlock(&conn->chan_lock);
607 }
608 
609 void l2cap_chan_del(struct l2cap_chan *chan, int err)
610 {
611 	struct l2cap_conn *conn = chan->conn;
612 
613 	__clear_chan_timer(chan);
614 
615 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
616 	       state_to_string(chan->state));
617 
618 	chan->ops->teardown(chan, err);
619 
620 	if (conn) {
621 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
622 		/* Delete from channel list */
623 		list_del(&chan->list);
624 
625 		l2cap_chan_put(chan);
626 
627 		chan->conn = NULL;
628 
629 		/* Reference was only held for non-fixed channels or
630 		 * fixed channels that explicitly requested it using the
631 		 * FLAG_HOLD_HCI_CONN flag.
632 		 */
633 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
634 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
635 			hci_conn_drop(conn->hcon);
636 
637 		if (mgr && mgr->bredr_chan == chan)
638 			mgr->bredr_chan = NULL;
639 	}
640 
641 	if (chan->hs_hchan) {
642 		struct hci_chan *hs_hchan = chan->hs_hchan;
643 
644 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
645 		amp_disconnect_logical_link(hs_hchan);
646 	}
647 
648 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
649 		return;
650 
651 	switch(chan->mode) {
652 	case L2CAP_MODE_BASIC:
653 		break;
654 
655 	case L2CAP_MODE_LE_FLOWCTL:
656 	case L2CAP_MODE_EXT_FLOWCTL:
657 		skb_queue_purge(&chan->tx_q);
658 		break;
659 
660 	case L2CAP_MODE_ERTM:
661 		__clear_retrans_timer(chan);
662 		__clear_monitor_timer(chan);
663 		__clear_ack_timer(chan);
664 
665 		skb_queue_purge(&chan->srej_q);
666 
667 		l2cap_seq_list_free(&chan->srej_list);
668 		l2cap_seq_list_free(&chan->retrans_list);
669 		fallthrough;
670 
671 	case L2CAP_MODE_STREAMING:
672 		skb_queue_purge(&chan->tx_q);
673 		break;
674 	}
675 
676 	return;
677 }
678 EXPORT_SYMBOL_GPL(l2cap_chan_del);
679 
680 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
681 			      void *data)
682 {
683 	struct l2cap_chan *chan;
684 
685 	list_for_each_entry(chan, &conn->chan_l, list) {
686 		func(chan, data);
687 	}
688 }
689 
690 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
691 		     void *data)
692 {
693 	if (!conn)
694 		return;
695 
696 	mutex_lock(&conn->chan_lock);
697 	__l2cap_chan_list(conn, func, data);
698 	mutex_unlock(&conn->chan_lock);
699 }
700 
701 EXPORT_SYMBOL_GPL(l2cap_chan_list);
702 
703 static void l2cap_conn_update_id_addr(struct work_struct *work)
704 {
705 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
706 					       id_addr_update_work);
707 	struct hci_conn *hcon = conn->hcon;
708 	struct l2cap_chan *chan;
709 
710 	mutex_lock(&conn->chan_lock);
711 
712 	list_for_each_entry(chan, &conn->chan_l, list) {
713 		l2cap_chan_lock(chan);
714 		bacpy(&chan->dst, &hcon->dst);
715 		chan->dst_type = bdaddr_dst_type(hcon);
716 		l2cap_chan_unlock(chan);
717 	}
718 
719 	mutex_unlock(&conn->chan_lock);
720 }
721 
722 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
723 {
724 	struct l2cap_conn *conn = chan->conn;
725 	struct l2cap_le_conn_rsp rsp;
726 	u16 result;
727 
728 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
729 		result = L2CAP_CR_LE_AUTHORIZATION;
730 	else
731 		result = L2CAP_CR_LE_BAD_PSM;
732 
733 	l2cap_state_change(chan, BT_DISCONN);
734 
735 	rsp.dcid    = cpu_to_le16(chan->scid);
736 	rsp.mtu     = cpu_to_le16(chan->imtu);
737 	rsp.mps     = cpu_to_le16(chan->mps);
738 	rsp.credits = cpu_to_le16(chan->rx_credits);
739 	rsp.result  = cpu_to_le16(result);
740 
741 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
742 		       &rsp);
743 }
744 
745 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
746 {
747 	struct l2cap_conn *conn = chan->conn;
748 	struct l2cap_ecred_conn_rsp rsp;
749 	u16 result;
750 
751 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
752 		result = L2CAP_CR_LE_AUTHORIZATION;
753 	else
754 		result = L2CAP_CR_LE_BAD_PSM;
755 
756 	l2cap_state_change(chan, BT_DISCONN);
757 
758 	memset(&rsp, 0, sizeof(rsp));
759 
760 	rsp.result  = cpu_to_le16(result);
761 
762 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
763 		       &rsp);
764 }
765 
766 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
767 {
768 	struct l2cap_conn *conn = chan->conn;
769 	struct l2cap_conn_rsp rsp;
770 	u16 result;
771 
772 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
773 		result = L2CAP_CR_SEC_BLOCK;
774 	else
775 		result = L2CAP_CR_BAD_PSM;
776 
777 	l2cap_state_change(chan, BT_DISCONN);
778 
779 	rsp.scid   = cpu_to_le16(chan->dcid);
780 	rsp.dcid   = cpu_to_le16(chan->scid);
781 	rsp.result = cpu_to_le16(result);
782 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
783 
784 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
785 }
786 
787 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
788 {
789 	struct l2cap_conn *conn = chan->conn;
790 
791 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
792 
793 	switch (chan->state) {
794 	case BT_LISTEN:
795 		chan->ops->teardown(chan, 0);
796 		break;
797 
798 	case BT_CONNECTED:
799 	case BT_CONFIG:
800 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
801 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
802 			l2cap_send_disconn_req(chan, reason);
803 		} else
804 			l2cap_chan_del(chan, reason);
805 		break;
806 
807 	case BT_CONNECT2:
808 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
809 			if (conn->hcon->type == ACL_LINK)
810 				l2cap_chan_connect_reject(chan);
811 			else if (conn->hcon->type == LE_LINK) {
812 				switch (chan->mode) {
813 				case L2CAP_MODE_LE_FLOWCTL:
814 					l2cap_chan_le_connect_reject(chan);
815 					break;
816 				case L2CAP_MODE_EXT_FLOWCTL:
817 					l2cap_chan_ecred_connect_reject(chan);
818 					break;
819 				}
820 			}
821 		}
822 
823 		l2cap_chan_del(chan, reason);
824 		break;
825 
826 	case BT_CONNECT:
827 	case BT_DISCONN:
828 		l2cap_chan_del(chan, reason);
829 		break;
830 
831 	default:
832 		chan->ops->teardown(chan, 0);
833 		break;
834 	}
835 }
836 EXPORT_SYMBOL(l2cap_chan_close);
837 
838 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
839 {
840 	switch (chan->chan_type) {
841 	case L2CAP_CHAN_RAW:
842 		switch (chan->sec_level) {
843 		case BT_SECURITY_HIGH:
844 		case BT_SECURITY_FIPS:
845 			return HCI_AT_DEDICATED_BONDING_MITM;
846 		case BT_SECURITY_MEDIUM:
847 			return HCI_AT_DEDICATED_BONDING;
848 		default:
849 			return HCI_AT_NO_BONDING;
850 		}
851 		break;
852 	case L2CAP_CHAN_CONN_LESS:
853 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
854 			if (chan->sec_level == BT_SECURITY_LOW)
855 				chan->sec_level = BT_SECURITY_SDP;
856 		}
857 		if (chan->sec_level == BT_SECURITY_HIGH ||
858 		    chan->sec_level == BT_SECURITY_FIPS)
859 			return HCI_AT_NO_BONDING_MITM;
860 		else
861 			return HCI_AT_NO_BONDING;
862 		break;
863 	case L2CAP_CHAN_CONN_ORIENTED:
864 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
865 			if (chan->sec_level == BT_SECURITY_LOW)
866 				chan->sec_level = BT_SECURITY_SDP;
867 
868 			if (chan->sec_level == BT_SECURITY_HIGH ||
869 			    chan->sec_level == BT_SECURITY_FIPS)
870 				return HCI_AT_NO_BONDING_MITM;
871 			else
872 				return HCI_AT_NO_BONDING;
873 		}
874 		fallthrough;
875 
876 	default:
877 		switch (chan->sec_level) {
878 		case BT_SECURITY_HIGH:
879 		case BT_SECURITY_FIPS:
880 			return HCI_AT_GENERAL_BONDING_MITM;
881 		case BT_SECURITY_MEDIUM:
882 			return HCI_AT_GENERAL_BONDING;
883 		default:
884 			return HCI_AT_NO_BONDING;
885 		}
886 		break;
887 	}
888 }
889 
890 /* Service level security */
891 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
892 {
893 	struct l2cap_conn *conn = chan->conn;
894 	__u8 auth_type;
895 
896 	if (conn->hcon->type == LE_LINK)
897 		return smp_conn_security(conn->hcon, chan->sec_level);
898 
899 	auth_type = l2cap_get_auth_type(chan);
900 
901 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
902 				 initiator);
903 }
904 
905 static u8 l2cap_get_ident(struct l2cap_conn *conn)
906 {
907 	u8 id;
908 
909 	/* Get next available identificator.
910 	 *    1 - 128 are used by kernel.
911 	 *  129 - 199 are reserved.
912 	 *  200 - 254 are used by utilities like l2ping, etc.
913 	 */
914 
915 	mutex_lock(&conn->ident_lock);
916 
917 	if (++conn->tx_ident > 128)
918 		conn->tx_ident = 1;
919 
920 	id = conn->tx_ident;
921 
922 	mutex_unlock(&conn->ident_lock);
923 
924 	return id;
925 }
926 
927 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
928 			   void *data)
929 {
930 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
931 	u8 flags;
932 
933 	BT_DBG("code 0x%2.2x", code);
934 
935 	if (!skb)
936 		return;
937 
938 	/* Use NO_FLUSH if supported or we have an LE link (which does
939 	 * not support auto-flushing packets) */
940 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
941 	    conn->hcon->type == LE_LINK)
942 		flags = ACL_START_NO_FLUSH;
943 	else
944 		flags = ACL_START;
945 
946 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
947 	skb->priority = HCI_PRIO_MAX;
948 
949 	hci_send_acl(conn->hchan, skb, flags);
950 }
951 
952 static bool __chan_is_moving(struct l2cap_chan *chan)
953 {
954 	return chan->move_state != L2CAP_MOVE_STABLE &&
955 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
956 }
957 
958 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
959 {
960 	struct hci_conn *hcon = chan->conn->hcon;
961 	u16 flags;
962 
963 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
964 	       skb->priority);
965 
966 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
967 		if (chan->hs_hchan)
968 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
969 		else
970 			kfree_skb(skb);
971 
972 		return;
973 	}
974 
975 	/* Use NO_FLUSH for LE links (where this is the only option) or
976 	 * if the BR/EDR link supports it and flushing has not been
977 	 * explicitly requested (through FLAG_FLUSHABLE).
978 	 */
979 	if (hcon->type == LE_LINK ||
980 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
981 	     lmp_no_flush_capable(hcon->hdev)))
982 		flags = ACL_START_NO_FLUSH;
983 	else
984 		flags = ACL_START;
985 
986 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
987 	hci_send_acl(chan->conn->hchan, skb, flags);
988 }
989 
990 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
991 {
992 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
993 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
994 
995 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
996 		/* S-Frame */
997 		control->sframe = 1;
998 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
999 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1000 
1001 		control->sar = 0;
1002 		control->txseq = 0;
1003 	} else {
1004 		/* I-Frame */
1005 		control->sframe = 0;
1006 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1007 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1008 
1009 		control->poll = 0;
1010 		control->super = 0;
1011 	}
1012 }
1013 
1014 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1015 {
1016 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1017 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1018 
1019 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1020 		/* S-Frame */
1021 		control->sframe = 1;
1022 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1023 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1024 
1025 		control->sar = 0;
1026 		control->txseq = 0;
1027 	} else {
1028 		/* I-Frame */
1029 		control->sframe = 0;
1030 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1031 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1032 
1033 		control->poll = 0;
1034 		control->super = 0;
1035 	}
1036 }
1037 
1038 static inline void __unpack_control(struct l2cap_chan *chan,
1039 				    struct sk_buff *skb)
1040 {
1041 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1042 		__unpack_extended_control(get_unaligned_le32(skb->data),
1043 					  &bt_cb(skb)->l2cap);
1044 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1045 	} else {
1046 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1047 					  &bt_cb(skb)->l2cap);
1048 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1049 	}
1050 }
1051 
1052 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1053 {
1054 	u32 packed;
1055 
1056 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1057 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1058 
1059 	if (control->sframe) {
1060 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1061 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1062 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1063 	} else {
1064 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1065 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1066 	}
1067 
1068 	return packed;
1069 }
1070 
1071 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1072 {
1073 	u16 packed;
1074 
1075 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1076 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1077 
1078 	if (control->sframe) {
1079 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1080 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1081 		packed |= L2CAP_CTRL_FRAME_TYPE;
1082 	} else {
1083 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1084 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1085 	}
1086 
1087 	return packed;
1088 }
1089 
1090 static inline void __pack_control(struct l2cap_chan *chan,
1091 				  struct l2cap_ctrl *control,
1092 				  struct sk_buff *skb)
1093 {
1094 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1095 		put_unaligned_le32(__pack_extended_control(control),
1096 				   skb->data + L2CAP_HDR_SIZE);
1097 	} else {
1098 		put_unaligned_le16(__pack_enhanced_control(control),
1099 				   skb->data + L2CAP_HDR_SIZE);
1100 	}
1101 }
1102 
1103 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1104 {
1105 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1106 		return L2CAP_EXT_HDR_SIZE;
1107 	else
1108 		return L2CAP_ENH_HDR_SIZE;
1109 }
1110 
1111 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1112 					       u32 control)
1113 {
1114 	struct sk_buff *skb;
1115 	struct l2cap_hdr *lh;
1116 	int hlen = __ertm_hdr_size(chan);
1117 
1118 	if (chan->fcs == L2CAP_FCS_CRC16)
1119 		hlen += L2CAP_FCS_SIZE;
1120 
1121 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1122 
1123 	if (!skb)
1124 		return ERR_PTR(-ENOMEM);
1125 
1126 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1127 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1128 	lh->cid = cpu_to_le16(chan->dcid);
1129 
1130 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1131 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1132 	else
1133 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1134 
1135 	if (chan->fcs == L2CAP_FCS_CRC16) {
1136 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1137 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1138 	}
1139 
1140 	skb->priority = HCI_PRIO_MAX;
1141 	return skb;
1142 }
1143 
1144 static void l2cap_send_sframe(struct l2cap_chan *chan,
1145 			      struct l2cap_ctrl *control)
1146 {
1147 	struct sk_buff *skb;
1148 	u32 control_field;
1149 
1150 	BT_DBG("chan %p, control %p", chan, control);
1151 
1152 	if (!control->sframe)
1153 		return;
1154 
1155 	if (__chan_is_moving(chan))
1156 		return;
1157 
1158 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1159 	    !control->poll)
1160 		control->final = 1;
1161 
1162 	if (control->super == L2CAP_SUPER_RR)
1163 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1164 	else if (control->super == L2CAP_SUPER_RNR)
1165 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1166 
1167 	if (control->super != L2CAP_SUPER_SREJ) {
1168 		chan->last_acked_seq = control->reqseq;
1169 		__clear_ack_timer(chan);
1170 	}
1171 
1172 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1173 	       control->final, control->poll, control->super);
1174 
1175 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1176 		control_field = __pack_extended_control(control);
1177 	else
1178 		control_field = __pack_enhanced_control(control);
1179 
1180 	skb = l2cap_create_sframe_pdu(chan, control_field);
1181 	if (!IS_ERR(skb))
1182 		l2cap_do_send(chan, skb);
1183 }
1184 
1185 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1186 {
1187 	struct l2cap_ctrl control;
1188 
1189 	BT_DBG("chan %p, poll %d", chan, poll);
1190 
1191 	memset(&control, 0, sizeof(control));
1192 	control.sframe = 1;
1193 	control.poll = poll;
1194 
1195 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1196 		control.super = L2CAP_SUPER_RNR;
1197 	else
1198 		control.super = L2CAP_SUPER_RR;
1199 
1200 	control.reqseq = chan->buffer_seq;
1201 	l2cap_send_sframe(chan, &control);
1202 }
1203 
1204 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1205 {
1206 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1207 		return true;
1208 
1209 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1210 }
1211 
1212 static bool __amp_capable(struct l2cap_chan *chan)
1213 {
1214 	struct l2cap_conn *conn = chan->conn;
1215 	struct hci_dev *hdev;
1216 	bool amp_available = false;
1217 
1218 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1219 		return false;
1220 
1221 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1222 		return false;
1223 
1224 	read_lock(&hci_dev_list_lock);
1225 	list_for_each_entry(hdev, &hci_dev_list, list) {
1226 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1227 		    test_bit(HCI_UP, &hdev->flags)) {
1228 			amp_available = true;
1229 			break;
1230 		}
1231 	}
1232 	read_unlock(&hci_dev_list_lock);
1233 
1234 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1235 		return amp_available;
1236 
1237 	return false;
1238 }
1239 
1240 static bool l2cap_check_efs(struct l2cap_chan *chan)
1241 {
1242 	/* Check EFS parameters */
1243 	return true;
1244 }
1245 
1246 void l2cap_send_conn_req(struct l2cap_chan *chan)
1247 {
1248 	struct l2cap_conn *conn = chan->conn;
1249 	struct l2cap_conn_req req;
1250 
1251 	req.scid = cpu_to_le16(chan->scid);
1252 	req.psm  = chan->psm;
1253 
1254 	chan->ident = l2cap_get_ident(conn);
1255 
1256 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1257 
1258 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1259 }
1260 
1261 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1262 {
1263 	struct l2cap_create_chan_req req;
1264 	req.scid = cpu_to_le16(chan->scid);
1265 	req.psm  = chan->psm;
1266 	req.amp_id = amp_id;
1267 
1268 	chan->ident = l2cap_get_ident(chan->conn);
1269 
1270 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1271 		       sizeof(req), &req);
1272 }
1273 
1274 static void l2cap_move_setup(struct l2cap_chan *chan)
1275 {
1276 	struct sk_buff *skb;
1277 
1278 	BT_DBG("chan %p", chan);
1279 
1280 	if (chan->mode != L2CAP_MODE_ERTM)
1281 		return;
1282 
1283 	__clear_retrans_timer(chan);
1284 	__clear_monitor_timer(chan);
1285 	__clear_ack_timer(chan);
1286 
1287 	chan->retry_count = 0;
1288 	skb_queue_walk(&chan->tx_q, skb) {
1289 		if (bt_cb(skb)->l2cap.retries)
1290 			bt_cb(skb)->l2cap.retries = 1;
1291 		else
1292 			break;
1293 	}
1294 
1295 	chan->expected_tx_seq = chan->buffer_seq;
1296 
1297 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1298 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1299 	l2cap_seq_list_clear(&chan->retrans_list);
1300 	l2cap_seq_list_clear(&chan->srej_list);
1301 	skb_queue_purge(&chan->srej_q);
1302 
1303 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1304 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1305 
1306 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1307 }
1308 
1309 static void l2cap_move_done(struct l2cap_chan *chan)
1310 {
1311 	u8 move_role = chan->move_role;
1312 	BT_DBG("chan %p", chan);
1313 
1314 	chan->move_state = L2CAP_MOVE_STABLE;
1315 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1316 
1317 	if (chan->mode != L2CAP_MODE_ERTM)
1318 		return;
1319 
1320 	switch (move_role) {
1321 	case L2CAP_MOVE_ROLE_INITIATOR:
1322 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1323 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1324 		break;
1325 	case L2CAP_MOVE_ROLE_RESPONDER:
1326 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1327 		break;
1328 	}
1329 }
1330 
1331 static void l2cap_chan_ready(struct l2cap_chan *chan)
1332 {
1333 	/* The channel may have already been flagged as connected in
1334 	 * case of receiving data before the L2CAP info req/rsp
1335 	 * procedure is complete.
1336 	 */
1337 	if (chan->state == BT_CONNECTED)
1338 		return;
1339 
1340 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1341 	chan->conf_state = 0;
1342 	__clear_chan_timer(chan);
1343 
1344 	switch (chan->mode) {
1345 	case L2CAP_MODE_LE_FLOWCTL:
1346 	case L2CAP_MODE_EXT_FLOWCTL:
1347 		if (!chan->tx_credits)
1348 			chan->ops->suspend(chan);
1349 		break;
1350 	}
1351 
1352 	chan->state = BT_CONNECTED;
1353 
1354 	chan->ops->ready(chan);
1355 }
1356 
1357 static void l2cap_le_connect(struct l2cap_chan *chan)
1358 {
1359 	struct l2cap_conn *conn = chan->conn;
1360 	struct l2cap_le_conn_req req;
1361 
1362 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1363 		return;
1364 
1365 	if (!chan->imtu)
1366 		chan->imtu = chan->conn->mtu;
1367 
1368 	l2cap_le_flowctl_init(chan, 0);
1369 
1370 	req.psm     = chan->psm;
1371 	req.scid    = cpu_to_le16(chan->scid);
1372 	req.mtu     = cpu_to_le16(chan->imtu);
1373 	req.mps     = cpu_to_le16(chan->mps);
1374 	req.credits = cpu_to_le16(chan->rx_credits);
1375 
1376 	chan->ident = l2cap_get_ident(conn);
1377 
1378 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1379 		       sizeof(req), &req);
1380 }
1381 
1382 struct l2cap_ecred_conn_data {
1383 	struct {
1384 		struct l2cap_ecred_conn_req req;
1385 		__le16 scid[5];
1386 	} __packed pdu;
1387 	struct l2cap_chan *chan;
1388 	struct pid *pid;
1389 	int count;
1390 };
1391 
1392 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1393 {
1394 	struct l2cap_ecred_conn_data *conn = data;
1395 	struct pid *pid;
1396 
1397 	if (chan == conn->chan)
1398 		return;
1399 
1400 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1401 		return;
1402 
1403 	pid = chan->ops->get_peer_pid(chan);
1404 
1405 	/* Only add deferred channels with the same PID/PSM */
1406 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1407 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1408 		return;
1409 
1410 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1411 		return;
1412 
1413 	l2cap_ecred_init(chan, 0);
1414 
1415 	/* Set the same ident so we can match on the rsp */
1416 	chan->ident = conn->chan->ident;
1417 
1418 	/* Include all channels deferred */
1419 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1420 
1421 	conn->count++;
1422 }
1423 
1424 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1425 {
1426 	struct l2cap_conn *conn = chan->conn;
1427 	struct l2cap_ecred_conn_data data;
1428 
1429 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1430 		return;
1431 
1432 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1433 		return;
1434 
1435 	l2cap_ecred_init(chan, 0);
1436 
1437 	data.pdu.req.psm     = chan->psm;
1438 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1439 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1440 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1441 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1442 
1443 	chan->ident = l2cap_get_ident(conn);
1444 	data.pid = chan->ops->get_peer_pid(chan);
1445 
1446 	data.count = 1;
1447 	data.chan = chan;
1448 	data.pid = chan->ops->get_peer_pid(chan);
1449 
1450 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1451 
1452 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1453 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1454 		       &data.pdu);
1455 }
1456 
1457 static void l2cap_le_start(struct l2cap_chan *chan)
1458 {
1459 	struct l2cap_conn *conn = chan->conn;
1460 
1461 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1462 		return;
1463 
1464 	if (!chan->psm) {
1465 		l2cap_chan_ready(chan);
1466 		return;
1467 	}
1468 
1469 	if (chan->state == BT_CONNECT) {
1470 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1471 			l2cap_ecred_connect(chan);
1472 		else
1473 			l2cap_le_connect(chan);
1474 	}
1475 }
1476 
1477 static void l2cap_start_connection(struct l2cap_chan *chan)
1478 {
1479 	if (__amp_capable(chan)) {
1480 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1481 		a2mp_discover_amp(chan);
1482 	} else if (chan->conn->hcon->type == LE_LINK) {
1483 		l2cap_le_start(chan);
1484 	} else {
1485 		l2cap_send_conn_req(chan);
1486 	}
1487 }
1488 
1489 static void l2cap_request_info(struct l2cap_conn *conn)
1490 {
1491 	struct l2cap_info_req req;
1492 
1493 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1494 		return;
1495 
1496 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1497 
1498 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1499 	conn->info_ident = l2cap_get_ident(conn);
1500 
1501 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1502 
1503 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1504 		       sizeof(req), &req);
1505 }
1506 
1507 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1508 {
1509 	/* The minimum encryption key size needs to be enforced by the
1510 	 * host stack before establishing any L2CAP connections. The
1511 	 * specification in theory allows a minimum of 1, but to align
1512 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1513 	 *
1514 	 * This check might also be called for unencrypted connections
1515 	 * that have no key size requirements. Ensure that the link is
1516 	 * actually encrypted before enforcing a key size.
1517 	 */
1518 	int min_key_size = hcon->hdev->min_enc_key_size;
1519 
1520 	/* On FIPS security level, key size must be 16 bytes */
1521 	if (hcon->sec_level == BT_SECURITY_FIPS)
1522 		min_key_size = 16;
1523 
1524 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1525 		hcon->enc_key_size >= min_key_size);
1526 }
1527 
1528 static void l2cap_do_start(struct l2cap_chan *chan)
1529 {
1530 	struct l2cap_conn *conn = chan->conn;
1531 
1532 	if (conn->hcon->type == LE_LINK) {
1533 		l2cap_le_start(chan);
1534 		return;
1535 	}
1536 
1537 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1538 		l2cap_request_info(conn);
1539 		return;
1540 	}
1541 
1542 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1543 		return;
1544 
1545 	if (!l2cap_chan_check_security(chan, true) ||
1546 	    !__l2cap_no_conn_pending(chan))
1547 		return;
1548 
1549 	if (l2cap_check_enc_key_size(conn->hcon))
1550 		l2cap_start_connection(chan);
1551 	else
1552 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1553 }
1554 
1555 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1556 {
1557 	u32 local_feat_mask = l2cap_feat_mask;
1558 	if (!disable_ertm)
1559 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1560 
1561 	switch (mode) {
1562 	case L2CAP_MODE_ERTM:
1563 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1564 	case L2CAP_MODE_STREAMING:
1565 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1566 	default:
1567 		return 0x00;
1568 	}
1569 }
1570 
1571 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1572 {
1573 	struct l2cap_conn *conn = chan->conn;
1574 	struct l2cap_disconn_req req;
1575 
1576 	if (!conn)
1577 		return;
1578 
1579 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1580 		__clear_retrans_timer(chan);
1581 		__clear_monitor_timer(chan);
1582 		__clear_ack_timer(chan);
1583 	}
1584 
1585 	if (chan->scid == L2CAP_CID_A2MP) {
1586 		l2cap_state_change(chan, BT_DISCONN);
1587 		return;
1588 	}
1589 
1590 	req.dcid = cpu_to_le16(chan->dcid);
1591 	req.scid = cpu_to_le16(chan->scid);
1592 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1593 		       sizeof(req), &req);
1594 
1595 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1596 }
1597 
1598 /* ---- L2CAP connections ---- */
1599 static void l2cap_conn_start(struct l2cap_conn *conn)
1600 {
1601 	struct l2cap_chan *chan, *tmp;
1602 
1603 	BT_DBG("conn %p", conn);
1604 
1605 	mutex_lock(&conn->chan_lock);
1606 
1607 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1608 		l2cap_chan_lock(chan);
1609 
1610 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1611 			l2cap_chan_ready(chan);
1612 			l2cap_chan_unlock(chan);
1613 			continue;
1614 		}
1615 
1616 		if (chan->state == BT_CONNECT) {
1617 			if (!l2cap_chan_check_security(chan, true) ||
1618 			    !__l2cap_no_conn_pending(chan)) {
1619 				l2cap_chan_unlock(chan);
1620 				continue;
1621 			}
1622 
1623 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1624 			    && test_bit(CONF_STATE2_DEVICE,
1625 					&chan->conf_state)) {
1626 				l2cap_chan_close(chan, ECONNRESET);
1627 				l2cap_chan_unlock(chan);
1628 				continue;
1629 			}
1630 
1631 			if (l2cap_check_enc_key_size(conn->hcon))
1632 				l2cap_start_connection(chan);
1633 			else
1634 				l2cap_chan_close(chan, ECONNREFUSED);
1635 
1636 		} else if (chan->state == BT_CONNECT2) {
1637 			struct l2cap_conn_rsp rsp;
1638 			char buf[128];
1639 			rsp.scid = cpu_to_le16(chan->dcid);
1640 			rsp.dcid = cpu_to_le16(chan->scid);
1641 
1642 			if (l2cap_chan_check_security(chan, false)) {
1643 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1644 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1645 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1646 					chan->ops->defer(chan);
1647 
1648 				} else {
1649 					l2cap_state_change(chan, BT_CONFIG);
1650 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1651 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1652 				}
1653 			} else {
1654 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1655 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1656 			}
1657 
1658 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1659 				       sizeof(rsp), &rsp);
1660 
1661 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1662 			    rsp.result != L2CAP_CR_SUCCESS) {
1663 				l2cap_chan_unlock(chan);
1664 				continue;
1665 			}
1666 
1667 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1668 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1669 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1670 			chan->num_conf_req++;
1671 		}
1672 
1673 		l2cap_chan_unlock(chan);
1674 	}
1675 
1676 	mutex_unlock(&conn->chan_lock);
1677 }
1678 
1679 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1680 {
1681 	struct hci_conn *hcon = conn->hcon;
1682 	struct hci_dev *hdev = hcon->hdev;
1683 
1684 	BT_DBG("%s conn %p", hdev->name, conn);
1685 
1686 	/* For outgoing pairing which doesn't necessarily have an
1687 	 * associated socket (e.g. mgmt_pair_device).
1688 	 */
1689 	if (hcon->out)
1690 		smp_conn_security(hcon, hcon->pending_sec_level);
1691 
1692 	/* For LE slave connections, make sure the connection interval
1693 	 * is in the range of the minium and maximum interval that has
1694 	 * been configured for this connection. If not, then trigger
1695 	 * the connection update procedure.
1696 	 */
1697 	if (hcon->role == HCI_ROLE_SLAVE &&
1698 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1699 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1700 		struct l2cap_conn_param_update_req req;
1701 
1702 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1703 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1704 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1705 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1706 
1707 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1708 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1709 	}
1710 }
1711 
1712 static void l2cap_conn_ready(struct l2cap_conn *conn)
1713 {
1714 	struct l2cap_chan *chan;
1715 	struct hci_conn *hcon = conn->hcon;
1716 
1717 	BT_DBG("conn %p", conn);
1718 
1719 	if (hcon->type == ACL_LINK)
1720 		l2cap_request_info(conn);
1721 
1722 	mutex_lock(&conn->chan_lock);
1723 
1724 	list_for_each_entry(chan, &conn->chan_l, list) {
1725 
1726 		l2cap_chan_lock(chan);
1727 
1728 		if (chan->scid == L2CAP_CID_A2MP) {
1729 			l2cap_chan_unlock(chan);
1730 			continue;
1731 		}
1732 
1733 		if (hcon->type == LE_LINK) {
1734 			l2cap_le_start(chan);
1735 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1736 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1737 				l2cap_chan_ready(chan);
1738 		} else if (chan->state == BT_CONNECT) {
1739 			l2cap_do_start(chan);
1740 		}
1741 
1742 		l2cap_chan_unlock(chan);
1743 	}
1744 
1745 	mutex_unlock(&conn->chan_lock);
1746 
1747 	if (hcon->type == LE_LINK)
1748 		l2cap_le_conn_ready(conn);
1749 
1750 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1751 }
1752 
1753 /* Notify sockets that we cannot guaranty reliability anymore */
1754 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1755 {
1756 	struct l2cap_chan *chan;
1757 
1758 	BT_DBG("conn %p", conn);
1759 
1760 	mutex_lock(&conn->chan_lock);
1761 
1762 	list_for_each_entry(chan, &conn->chan_l, list) {
1763 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1764 			l2cap_chan_set_err(chan, err);
1765 	}
1766 
1767 	mutex_unlock(&conn->chan_lock);
1768 }
1769 
1770 static void l2cap_info_timeout(struct work_struct *work)
1771 {
1772 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1773 					       info_timer.work);
1774 
1775 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1776 	conn->info_ident = 0;
1777 
1778 	l2cap_conn_start(conn);
1779 }
1780 
1781 /*
1782  * l2cap_user
1783  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1784  * callback is called during registration. The ->remove callback is called
1785  * during unregistration.
1786  * An l2cap_user object can either be explicitly unregistered or when the
1787  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1788  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1789  * External modules must own a reference to the l2cap_conn object if they intend
1790  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1791  * any time if they don't.
1792  */
1793 
1794 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1795 {
1796 	struct hci_dev *hdev = conn->hcon->hdev;
1797 	int ret;
1798 
1799 	/* We need to check whether l2cap_conn is registered. If it is not, we
1800 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1801 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1802 	 * relies on the parent hci_conn object to be locked. This itself relies
1803 	 * on the hci_dev object to be locked. So we must lock the hci device
1804 	 * here, too. */
1805 
1806 	hci_dev_lock(hdev);
1807 
1808 	if (!list_empty(&user->list)) {
1809 		ret = -EINVAL;
1810 		goto out_unlock;
1811 	}
1812 
1813 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1814 	if (!conn->hchan) {
1815 		ret = -ENODEV;
1816 		goto out_unlock;
1817 	}
1818 
1819 	ret = user->probe(conn, user);
1820 	if (ret)
1821 		goto out_unlock;
1822 
1823 	list_add(&user->list, &conn->users);
1824 	ret = 0;
1825 
1826 out_unlock:
1827 	hci_dev_unlock(hdev);
1828 	return ret;
1829 }
1830 EXPORT_SYMBOL(l2cap_register_user);
1831 
1832 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1833 {
1834 	struct hci_dev *hdev = conn->hcon->hdev;
1835 
1836 	hci_dev_lock(hdev);
1837 
1838 	if (list_empty(&user->list))
1839 		goto out_unlock;
1840 
1841 	list_del_init(&user->list);
1842 	user->remove(conn, user);
1843 
1844 out_unlock:
1845 	hci_dev_unlock(hdev);
1846 }
1847 EXPORT_SYMBOL(l2cap_unregister_user);
1848 
1849 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1850 {
1851 	struct l2cap_user *user;
1852 
1853 	while (!list_empty(&conn->users)) {
1854 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1855 		list_del_init(&user->list);
1856 		user->remove(conn, user);
1857 	}
1858 }
1859 
1860 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1861 {
1862 	struct l2cap_conn *conn = hcon->l2cap_data;
1863 	struct l2cap_chan *chan, *l;
1864 
1865 	if (!conn)
1866 		return;
1867 
1868 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1869 
1870 	kfree_skb(conn->rx_skb);
1871 
1872 	skb_queue_purge(&conn->pending_rx);
1873 
1874 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1875 	 * might block if we are running on a worker from the same workqueue
1876 	 * pending_rx_work is waiting on.
1877 	 */
1878 	if (work_pending(&conn->pending_rx_work))
1879 		cancel_work_sync(&conn->pending_rx_work);
1880 
1881 	if (work_pending(&conn->id_addr_update_work))
1882 		cancel_work_sync(&conn->id_addr_update_work);
1883 
1884 	l2cap_unregister_all_users(conn);
1885 
1886 	/* Force the connection to be immediately dropped */
1887 	hcon->disc_timeout = 0;
1888 
1889 	mutex_lock(&conn->chan_lock);
1890 
1891 	/* Kill channels */
1892 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1893 		l2cap_chan_hold(chan);
1894 		l2cap_chan_lock(chan);
1895 
1896 		l2cap_chan_del(chan, err);
1897 
1898 		chan->ops->close(chan);
1899 
1900 		l2cap_chan_unlock(chan);
1901 		l2cap_chan_put(chan);
1902 	}
1903 
1904 	mutex_unlock(&conn->chan_lock);
1905 
1906 	hci_chan_del(conn->hchan);
1907 
1908 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1909 		cancel_delayed_work_sync(&conn->info_timer);
1910 
1911 	hcon->l2cap_data = NULL;
1912 	conn->hchan = NULL;
1913 	l2cap_conn_put(conn);
1914 }
1915 
1916 static void l2cap_conn_free(struct kref *ref)
1917 {
1918 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1919 
1920 	hci_conn_put(conn->hcon);
1921 	kfree(conn);
1922 }
1923 
1924 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1925 {
1926 	kref_get(&conn->ref);
1927 	return conn;
1928 }
1929 EXPORT_SYMBOL(l2cap_conn_get);
1930 
1931 void l2cap_conn_put(struct l2cap_conn *conn)
1932 {
1933 	kref_put(&conn->ref, l2cap_conn_free);
1934 }
1935 EXPORT_SYMBOL(l2cap_conn_put);
1936 
1937 /* ---- Socket interface ---- */
1938 
1939 /* Find socket with psm and source / destination bdaddr.
1940  * Returns closest match.
1941  */
1942 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1943 						   bdaddr_t *src,
1944 						   bdaddr_t *dst,
1945 						   u8 link_type)
1946 {
1947 	struct l2cap_chan *c, *c1 = NULL;
1948 
1949 	read_lock(&chan_list_lock);
1950 
1951 	list_for_each_entry(c, &chan_list, global_l) {
1952 		if (state && c->state != state)
1953 			continue;
1954 
1955 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1956 			continue;
1957 
1958 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1959 			continue;
1960 
1961 		if (c->psm == psm) {
1962 			int src_match, dst_match;
1963 			int src_any, dst_any;
1964 
1965 			/* Exact match. */
1966 			src_match = !bacmp(&c->src, src);
1967 			dst_match = !bacmp(&c->dst, dst);
1968 			if (src_match && dst_match) {
1969 				l2cap_chan_hold(c);
1970 				read_unlock(&chan_list_lock);
1971 				return c;
1972 			}
1973 
1974 			/* Closest match */
1975 			src_any = !bacmp(&c->src, BDADDR_ANY);
1976 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1977 			if ((src_match && dst_any) || (src_any && dst_match) ||
1978 			    (src_any && dst_any))
1979 				c1 = c;
1980 		}
1981 	}
1982 
1983 	if (c1)
1984 		l2cap_chan_hold(c1);
1985 
1986 	read_unlock(&chan_list_lock);
1987 
1988 	return c1;
1989 }
1990 
1991 static void l2cap_monitor_timeout(struct work_struct *work)
1992 {
1993 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1994 					       monitor_timer.work);
1995 
1996 	BT_DBG("chan %p", chan);
1997 
1998 	l2cap_chan_lock(chan);
1999 
2000 	if (!chan->conn) {
2001 		l2cap_chan_unlock(chan);
2002 		l2cap_chan_put(chan);
2003 		return;
2004 	}
2005 
2006 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2007 
2008 	l2cap_chan_unlock(chan);
2009 	l2cap_chan_put(chan);
2010 }
2011 
2012 static void l2cap_retrans_timeout(struct work_struct *work)
2013 {
2014 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2015 					       retrans_timer.work);
2016 
2017 	BT_DBG("chan %p", chan);
2018 
2019 	l2cap_chan_lock(chan);
2020 
2021 	if (!chan->conn) {
2022 		l2cap_chan_unlock(chan);
2023 		l2cap_chan_put(chan);
2024 		return;
2025 	}
2026 
2027 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2028 	l2cap_chan_unlock(chan);
2029 	l2cap_chan_put(chan);
2030 }
2031 
2032 static void l2cap_streaming_send(struct l2cap_chan *chan,
2033 				 struct sk_buff_head *skbs)
2034 {
2035 	struct sk_buff *skb;
2036 	struct l2cap_ctrl *control;
2037 
2038 	BT_DBG("chan %p, skbs %p", chan, skbs);
2039 
2040 	if (__chan_is_moving(chan))
2041 		return;
2042 
2043 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2044 
2045 	while (!skb_queue_empty(&chan->tx_q)) {
2046 
2047 		skb = skb_dequeue(&chan->tx_q);
2048 
2049 		bt_cb(skb)->l2cap.retries = 1;
2050 		control = &bt_cb(skb)->l2cap;
2051 
2052 		control->reqseq = 0;
2053 		control->txseq = chan->next_tx_seq;
2054 
2055 		__pack_control(chan, control, skb);
2056 
2057 		if (chan->fcs == L2CAP_FCS_CRC16) {
2058 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2059 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2060 		}
2061 
2062 		l2cap_do_send(chan, skb);
2063 
2064 		BT_DBG("Sent txseq %u", control->txseq);
2065 
2066 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2067 		chan->frames_sent++;
2068 	}
2069 }
2070 
2071 static int l2cap_ertm_send(struct l2cap_chan *chan)
2072 {
2073 	struct sk_buff *skb, *tx_skb;
2074 	struct l2cap_ctrl *control;
2075 	int sent = 0;
2076 
2077 	BT_DBG("chan %p", chan);
2078 
2079 	if (chan->state != BT_CONNECTED)
2080 		return -ENOTCONN;
2081 
2082 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2083 		return 0;
2084 
2085 	if (__chan_is_moving(chan))
2086 		return 0;
2087 
2088 	while (chan->tx_send_head &&
2089 	       chan->unacked_frames < chan->remote_tx_win &&
2090 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2091 
2092 		skb = chan->tx_send_head;
2093 
2094 		bt_cb(skb)->l2cap.retries = 1;
2095 		control = &bt_cb(skb)->l2cap;
2096 
2097 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2098 			control->final = 1;
2099 
2100 		control->reqseq = chan->buffer_seq;
2101 		chan->last_acked_seq = chan->buffer_seq;
2102 		control->txseq = chan->next_tx_seq;
2103 
2104 		__pack_control(chan, control, skb);
2105 
2106 		if (chan->fcs == L2CAP_FCS_CRC16) {
2107 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2108 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2109 		}
2110 
2111 		/* Clone after data has been modified. Data is assumed to be
2112 		   read-only (for locking purposes) on cloned sk_buffs.
2113 		 */
2114 		tx_skb = skb_clone(skb, GFP_KERNEL);
2115 
2116 		if (!tx_skb)
2117 			break;
2118 
2119 		__set_retrans_timer(chan);
2120 
2121 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2122 		chan->unacked_frames++;
2123 		chan->frames_sent++;
2124 		sent++;
2125 
2126 		if (skb_queue_is_last(&chan->tx_q, skb))
2127 			chan->tx_send_head = NULL;
2128 		else
2129 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2130 
2131 		l2cap_do_send(chan, tx_skb);
2132 		BT_DBG("Sent txseq %u", control->txseq);
2133 	}
2134 
2135 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2136 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2137 
2138 	return sent;
2139 }
2140 
2141 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2142 {
2143 	struct l2cap_ctrl control;
2144 	struct sk_buff *skb;
2145 	struct sk_buff *tx_skb;
2146 	u16 seq;
2147 
2148 	BT_DBG("chan %p", chan);
2149 
2150 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2151 		return;
2152 
2153 	if (__chan_is_moving(chan))
2154 		return;
2155 
2156 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2157 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2158 
2159 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2160 		if (!skb) {
2161 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2162 			       seq);
2163 			continue;
2164 		}
2165 
2166 		bt_cb(skb)->l2cap.retries++;
2167 		control = bt_cb(skb)->l2cap;
2168 
2169 		if (chan->max_tx != 0 &&
2170 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2171 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2172 			l2cap_send_disconn_req(chan, ECONNRESET);
2173 			l2cap_seq_list_clear(&chan->retrans_list);
2174 			break;
2175 		}
2176 
2177 		control.reqseq = chan->buffer_seq;
2178 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2179 			control.final = 1;
2180 		else
2181 			control.final = 0;
2182 
2183 		if (skb_cloned(skb)) {
2184 			/* Cloned sk_buffs are read-only, so we need a
2185 			 * writeable copy
2186 			 */
2187 			tx_skb = skb_copy(skb, GFP_KERNEL);
2188 		} else {
2189 			tx_skb = skb_clone(skb, GFP_KERNEL);
2190 		}
2191 
2192 		if (!tx_skb) {
2193 			l2cap_seq_list_clear(&chan->retrans_list);
2194 			break;
2195 		}
2196 
2197 		/* Update skb contents */
2198 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2199 			put_unaligned_le32(__pack_extended_control(&control),
2200 					   tx_skb->data + L2CAP_HDR_SIZE);
2201 		} else {
2202 			put_unaligned_le16(__pack_enhanced_control(&control),
2203 					   tx_skb->data + L2CAP_HDR_SIZE);
2204 		}
2205 
2206 		/* Update FCS */
2207 		if (chan->fcs == L2CAP_FCS_CRC16) {
2208 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2209 					tx_skb->len - L2CAP_FCS_SIZE);
2210 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2211 						L2CAP_FCS_SIZE);
2212 		}
2213 
2214 		l2cap_do_send(chan, tx_skb);
2215 
2216 		BT_DBG("Resent txseq %d", control.txseq);
2217 
2218 		chan->last_acked_seq = chan->buffer_seq;
2219 	}
2220 }
2221 
2222 static void l2cap_retransmit(struct l2cap_chan *chan,
2223 			     struct l2cap_ctrl *control)
2224 {
2225 	BT_DBG("chan %p, control %p", chan, control);
2226 
2227 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2228 	l2cap_ertm_resend(chan);
2229 }
2230 
2231 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2232 				 struct l2cap_ctrl *control)
2233 {
2234 	struct sk_buff *skb;
2235 
2236 	BT_DBG("chan %p, control %p", chan, control);
2237 
2238 	if (control->poll)
2239 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2240 
2241 	l2cap_seq_list_clear(&chan->retrans_list);
2242 
2243 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2244 		return;
2245 
2246 	if (chan->unacked_frames) {
2247 		skb_queue_walk(&chan->tx_q, skb) {
2248 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2249 			    skb == chan->tx_send_head)
2250 				break;
2251 		}
2252 
2253 		skb_queue_walk_from(&chan->tx_q, skb) {
2254 			if (skb == chan->tx_send_head)
2255 				break;
2256 
2257 			l2cap_seq_list_append(&chan->retrans_list,
2258 					      bt_cb(skb)->l2cap.txseq);
2259 		}
2260 
2261 		l2cap_ertm_resend(chan);
2262 	}
2263 }
2264 
2265 static void l2cap_send_ack(struct l2cap_chan *chan)
2266 {
2267 	struct l2cap_ctrl control;
2268 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2269 					 chan->last_acked_seq);
2270 	int threshold;
2271 
2272 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2273 	       chan, chan->last_acked_seq, chan->buffer_seq);
2274 
2275 	memset(&control, 0, sizeof(control));
2276 	control.sframe = 1;
2277 
2278 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2279 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2280 		__clear_ack_timer(chan);
2281 		control.super = L2CAP_SUPER_RNR;
2282 		control.reqseq = chan->buffer_seq;
2283 		l2cap_send_sframe(chan, &control);
2284 	} else {
2285 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2286 			l2cap_ertm_send(chan);
2287 			/* If any i-frames were sent, they included an ack */
2288 			if (chan->buffer_seq == chan->last_acked_seq)
2289 				frames_to_ack = 0;
2290 		}
2291 
2292 		/* Ack now if the window is 3/4ths full.
2293 		 * Calculate without mul or div
2294 		 */
2295 		threshold = chan->ack_win;
2296 		threshold += threshold << 1;
2297 		threshold >>= 2;
2298 
2299 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2300 		       threshold);
2301 
2302 		if (frames_to_ack >= threshold) {
2303 			__clear_ack_timer(chan);
2304 			control.super = L2CAP_SUPER_RR;
2305 			control.reqseq = chan->buffer_seq;
2306 			l2cap_send_sframe(chan, &control);
2307 			frames_to_ack = 0;
2308 		}
2309 
2310 		if (frames_to_ack)
2311 			__set_ack_timer(chan);
2312 	}
2313 }
2314 
2315 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2316 					 struct msghdr *msg, int len,
2317 					 int count, struct sk_buff *skb)
2318 {
2319 	struct l2cap_conn *conn = chan->conn;
2320 	struct sk_buff **frag;
2321 	int sent = 0;
2322 
2323 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2324 		return -EFAULT;
2325 
2326 	sent += count;
2327 	len  -= count;
2328 
2329 	/* Continuation fragments (no L2CAP header) */
2330 	frag = &skb_shinfo(skb)->frag_list;
2331 	while (len) {
2332 		struct sk_buff *tmp;
2333 
2334 		count = min_t(unsigned int, conn->mtu, len);
2335 
2336 		tmp = chan->ops->alloc_skb(chan, 0, count,
2337 					   msg->msg_flags & MSG_DONTWAIT);
2338 		if (IS_ERR(tmp))
2339 			return PTR_ERR(tmp);
2340 
2341 		*frag = tmp;
2342 
2343 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2344 				   &msg->msg_iter))
2345 			return -EFAULT;
2346 
2347 		sent += count;
2348 		len  -= count;
2349 
2350 		skb->len += (*frag)->len;
2351 		skb->data_len += (*frag)->len;
2352 
2353 		frag = &(*frag)->next;
2354 	}
2355 
2356 	return sent;
2357 }
2358 
2359 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2360 						 struct msghdr *msg, size_t len)
2361 {
2362 	struct l2cap_conn *conn = chan->conn;
2363 	struct sk_buff *skb;
2364 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2365 	struct l2cap_hdr *lh;
2366 
2367 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2368 	       __le16_to_cpu(chan->psm), len);
2369 
2370 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2371 
2372 	skb = chan->ops->alloc_skb(chan, hlen, count,
2373 				   msg->msg_flags & MSG_DONTWAIT);
2374 	if (IS_ERR(skb))
2375 		return skb;
2376 
2377 	/* Create L2CAP header */
2378 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2379 	lh->cid = cpu_to_le16(chan->dcid);
2380 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2381 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2382 
2383 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2384 	if (unlikely(err < 0)) {
2385 		kfree_skb(skb);
2386 		return ERR_PTR(err);
2387 	}
2388 	return skb;
2389 }
2390 
2391 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2392 					      struct msghdr *msg, size_t len)
2393 {
2394 	struct l2cap_conn *conn = chan->conn;
2395 	struct sk_buff *skb;
2396 	int err, count;
2397 	struct l2cap_hdr *lh;
2398 
2399 	BT_DBG("chan %p len %zu", chan, len);
2400 
2401 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2402 
2403 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2404 				   msg->msg_flags & MSG_DONTWAIT);
2405 	if (IS_ERR(skb))
2406 		return skb;
2407 
2408 	/* Create L2CAP header */
2409 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2410 	lh->cid = cpu_to_le16(chan->dcid);
2411 	lh->len = cpu_to_le16(len);
2412 
2413 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2414 	if (unlikely(err < 0)) {
2415 		kfree_skb(skb);
2416 		return ERR_PTR(err);
2417 	}
2418 	return skb;
2419 }
2420 
2421 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2422 					       struct msghdr *msg, size_t len,
2423 					       u16 sdulen)
2424 {
2425 	struct l2cap_conn *conn = chan->conn;
2426 	struct sk_buff *skb;
2427 	int err, count, hlen;
2428 	struct l2cap_hdr *lh;
2429 
2430 	BT_DBG("chan %p len %zu", chan, len);
2431 
2432 	if (!conn)
2433 		return ERR_PTR(-ENOTCONN);
2434 
2435 	hlen = __ertm_hdr_size(chan);
2436 
2437 	if (sdulen)
2438 		hlen += L2CAP_SDULEN_SIZE;
2439 
2440 	if (chan->fcs == L2CAP_FCS_CRC16)
2441 		hlen += L2CAP_FCS_SIZE;
2442 
2443 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2444 
2445 	skb = chan->ops->alloc_skb(chan, hlen, count,
2446 				   msg->msg_flags & MSG_DONTWAIT);
2447 	if (IS_ERR(skb))
2448 		return skb;
2449 
2450 	/* Create L2CAP header */
2451 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2452 	lh->cid = cpu_to_le16(chan->dcid);
2453 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2454 
2455 	/* Control header is populated later */
2456 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2457 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2458 	else
2459 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2460 
2461 	if (sdulen)
2462 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2463 
2464 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2465 	if (unlikely(err < 0)) {
2466 		kfree_skb(skb);
2467 		return ERR_PTR(err);
2468 	}
2469 
2470 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2471 	bt_cb(skb)->l2cap.retries = 0;
2472 	return skb;
2473 }
2474 
2475 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2476 			     struct sk_buff_head *seg_queue,
2477 			     struct msghdr *msg, size_t len)
2478 {
2479 	struct sk_buff *skb;
2480 	u16 sdu_len;
2481 	size_t pdu_len;
2482 	u8 sar;
2483 
2484 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2485 
2486 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2487 	 * so fragmented skbs are not used.  The HCI layer's handling
2488 	 * of fragmented skbs is not compatible with ERTM's queueing.
2489 	 */
2490 
2491 	/* PDU size is derived from the HCI MTU */
2492 	pdu_len = chan->conn->mtu;
2493 
2494 	/* Constrain PDU size for BR/EDR connections */
2495 	if (!chan->hs_hcon)
2496 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2497 
2498 	/* Adjust for largest possible L2CAP overhead. */
2499 	if (chan->fcs)
2500 		pdu_len -= L2CAP_FCS_SIZE;
2501 
2502 	pdu_len -= __ertm_hdr_size(chan);
2503 
2504 	/* Remote device may have requested smaller PDUs */
2505 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2506 
2507 	if (len <= pdu_len) {
2508 		sar = L2CAP_SAR_UNSEGMENTED;
2509 		sdu_len = 0;
2510 		pdu_len = len;
2511 	} else {
2512 		sar = L2CAP_SAR_START;
2513 		sdu_len = len;
2514 	}
2515 
2516 	while (len > 0) {
2517 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2518 
2519 		if (IS_ERR(skb)) {
2520 			__skb_queue_purge(seg_queue);
2521 			return PTR_ERR(skb);
2522 		}
2523 
2524 		bt_cb(skb)->l2cap.sar = sar;
2525 		__skb_queue_tail(seg_queue, skb);
2526 
2527 		len -= pdu_len;
2528 		if (sdu_len)
2529 			sdu_len = 0;
2530 
2531 		if (len <= pdu_len) {
2532 			sar = L2CAP_SAR_END;
2533 			pdu_len = len;
2534 		} else {
2535 			sar = L2CAP_SAR_CONTINUE;
2536 		}
2537 	}
2538 
2539 	return 0;
2540 }
2541 
2542 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2543 						   struct msghdr *msg,
2544 						   size_t len, u16 sdulen)
2545 {
2546 	struct l2cap_conn *conn = chan->conn;
2547 	struct sk_buff *skb;
2548 	int err, count, hlen;
2549 	struct l2cap_hdr *lh;
2550 
2551 	BT_DBG("chan %p len %zu", chan, len);
2552 
2553 	if (!conn)
2554 		return ERR_PTR(-ENOTCONN);
2555 
2556 	hlen = L2CAP_HDR_SIZE;
2557 
2558 	if (sdulen)
2559 		hlen += L2CAP_SDULEN_SIZE;
2560 
2561 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2562 
2563 	skb = chan->ops->alloc_skb(chan, hlen, count,
2564 				   msg->msg_flags & MSG_DONTWAIT);
2565 	if (IS_ERR(skb))
2566 		return skb;
2567 
2568 	/* Create L2CAP header */
2569 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2570 	lh->cid = cpu_to_le16(chan->dcid);
2571 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2572 
2573 	if (sdulen)
2574 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2575 
2576 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2577 	if (unlikely(err < 0)) {
2578 		kfree_skb(skb);
2579 		return ERR_PTR(err);
2580 	}
2581 
2582 	return skb;
2583 }
2584 
2585 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2586 				struct sk_buff_head *seg_queue,
2587 				struct msghdr *msg, size_t len)
2588 {
2589 	struct sk_buff *skb;
2590 	size_t pdu_len;
2591 	u16 sdu_len;
2592 
2593 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2594 
2595 	sdu_len = len;
2596 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2597 
2598 	while (len > 0) {
2599 		if (len <= pdu_len)
2600 			pdu_len = len;
2601 
2602 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2603 		if (IS_ERR(skb)) {
2604 			__skb_queue_purge(seg_queue);
2605 			return PTR_ERR(skb);
2606 		}
2607 
2608 		__skb_queue_tail(seg_queue, skb);
2609 
2610 		len -= pdu_len;
2611 
2612 		if (sdu_len) {
2613 			sdu_len = 0;
2614 			pdu_len += L2CAP_SDULEN_SIZE;
2615 		}
2616 	}
2617 
2618 	return 0;
2619 }
2620 
2621 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2622 {
2623 	int sent = 0;
2624 
2625 	BT_DBG("chan %p", chan);
2626 
2627 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2628 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2629 		chan->tx_credits--;
2630 		sent++;
2631 	}
2632 
2633 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2634 	       skb_queue_len(&chan->tx_q));
2635 }
2636 
2637 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2638 {
2639 	struct sk_buff *skb;
2640 	int err;
2641 	struct sk_buff_head seg_queue;
2642 
2643 	if (!chan->conn)
2644 		return -ENOTCONN;
2645 
2646 	/* Connectionless channel */
2647 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2648 		skb = l2cap_create_connless_pdu(chan, msg, len);
2649 		if (IS_ERR(skb))
2650 			return PTR_ERR(skb);
2651 
2652 		/* Channel lock is released before requesting new skb and then
2653 		 * reacquired thus we need to recheck channel state.
2654 		 */
2655 		if (chan->state != BT_CONNECTED) {
2656 			kfree_skb(skb);
2657 			return -ENOTCONN;
2658 		}
2659 
2660 		l2cap_do_send(chan, skb);
2661 		return len;
2662 	}
2663 
2664 	switch (chan->mode) {
2665 	case L2CAP_MODE_LE_FLOWCTL:
2666 	case L2CAP_MODE_EXT_FLOWCTL:
2667 		/* Check outgoing MTU */
2668 		if (len > chan->omtu)
2669 			return -EMSGSIZE;
2670 
2671 		__skb_queue_head_init(&seg_queue);
2672 
2673 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2674 
2675 		if (chan->state != BT_CONNECTED) {
2676 			__skb_queue_purge(&seg_queue);
2677 			err = -ENOTCONN;
2678 		}
2679 
2680 		if (err)
2681 			return err;
2682 
2683 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2684 
2685 		l2cap_le_flowctl_send(chan);
2686 
2687 		if (!chan->tx_credits)
2688 			chan->ops->suspend(chan);
2689 
2690 		err = len;
2691 
2692 		break;
2693 
2694 	case L2CAP_MODE_BASIC:
2695 		/* Check outgoing MTU */
2696 		if (len > chan->omtu)
2697 			return -EMSGSIZE;
2698 
2699 		/* Create a basic PDU */
2700 		skb = l2cap_create_basic_pdu(chan, msg, len);
2701 		if (IS_ERR(skb))
2702 			return PTR_ERR(skb);
2703 
2704 		/* Channel lock is released before requesting new skb and then
2705 		 * reacquired thus we need to recheck channel state.
2706 		 */
2707 		if (chan->state != BT_CONNECTED) {
2708 			kfree_skb(skb);
2709 			return -ENOTCONN;
2710 		}
2711 
2712 		l2cap_do_send(chan, skb);
2713 		err = len;
2714 		break;
2715 
2716 	case L2CAP_MODE_ERTM:
2717 	case L2CAP_MODE_STREAMING:
2718 		/* Check outgoing MTU */
2719 		if (len > chan->omtu) {
2720 			err = -EMSGSIZE;
2721 			break;
2722 		}
2723 
2724 		__skb_queue_head_init(&seg_queue);
2725 
2726 		/* Do segmentation before calling in to the state machine,
2727 		 * since it's possible to block while waiting for memory
2728 		 * allocation.
2729 		 */
2730 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2731 
2732 		/* The channel could have been closed while segmenting,
2733 		 * check that it is still connected.
2734 		 */
2735 		if (chan->state != BT_CONNECTED) {
2736 			__skb_queue_purge(&seg_queue);
2737 			err = -ENOTCONN;
2738 		}
2739 
2740 		if (err)
2741 			break;
2742 
2743 		if (chan->mode == L2CAP_MODE_ERTM)
2744 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2745 		else
2746 			l2cap_streaming_send(chan, &seg_queue);
2747 
2748 		err = len;
2749 
2750 		/* If the skbs were not queued for sending, they'll still be in
2751 		 * seg_queue and need to be purged.
2752 		 */
2753 		__skb_queue_purge(&seg_queue);
2754 		break;
2755 
2756 	default:
2757 		BT_DBG("bad state %1.1x", chan->mode);
2758 		err = -EBADFD;
2759 	}
2760 
2761 	return err;
2762 }
2763 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2764 
2765 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2766 {
2767 	struct l2cap_ctrl control;
2768 	u16 seq;
2769 
2770 	BT_DBG("chan %p, txseq %u", chan, txseq);
2771 
2772 	memset(&control, 0, sizeof(control));
2773 	control.sframe = 1;
2774 	control.super = L2CAP_SUPER_SREJ;
2775 
2776 	for (seq = chan->expected_tx_seq; seq != txseq;
2777 	     seq = __next_seq(chan, seq)) {
2778 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2779 			control.reqseq = seq;
2780 			l2cap_send_sframe(chan, &control);
2781 			l2cap_seq_list_append(&chan->srej_list, seq);
2782 		}
2783 	}
2784 
2785 	chan->expected_tx_seq = __next_seq(chan, txseq);
2786 }
2787 
2788 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2789 {
2790 	struct l2cap_ctrl control;
2791 
2792 	BT_DBG("chan %p", chan);
2793 
2794 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2795 		return;
2796 
2797 	memset(&control, 0, sizeof(control));
2798 	control.sframe = 1;
2799 	control.super = L2CAP_SUPER_SREJ;
2800 	control.reqseq = chan->srej_list.tail;
2801 	l2cap_send_sframe(chan, &control);
2802 }
2803 
2804 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2805 {
2806 	struct l2cap_ctrl control;
2807 	u16 initial_head;
2808 	u16 seq;
2809 
2810 	BT_DBG("chan %p, txseq %u", chan, txseq);
2811 
2812 	memset(&control, 0, sizeof(control));
2813 	control.sframe = 1;
2814 	control.super = L2CAP_SUPER_SREJ;
2815 
2816 	/* Capture initial list head to allow only one pass through the list. */
2817 	initial_head = chan->srej_list.head;
2818 
2819 	do {
2820 		seq = l2cap_seq_list_pop(&chan->srej_list);
2821 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2822 			break;
2823 
2824 		control.reqseq = seq;
2825 		l2cap_send_sframe(chan, &control);
2826 		l2cap_seq_list_append(&chan->srej_list, seq);
2827 	} while (chan->srej_list.head != initial_head);
2828 }
2829 
2830 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2831 {
2832 	struct sk_buff *acked_skb;
2833 	u16 ackseq;
2834 
2835 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2836 
2837 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2838 		return;
2839 
2840 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2841 	       chan->expected_ack_seq, chan->unacked_frames);
2842 
2843 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2844 	     ackseq = __next_seq(chan, ackseq)) {
2845 
2846 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2847 		if (acked_skb) {
2848 			skb_unlink(acked_skb, &chan->tx_q);
2849 			kfree_skb(acked_skb);
2850 			chan->unacked_frames--;
2851 		}
2852 	}
2853 
2854 	chan->expected_ack_seq = reqseq;
2855 
2856 	if (chan->unacked_frames == 0)
2857 		__clear_retrans_timer(chan);
2858 
2859 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2860 }
2861 
2862 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2863 {
2864 	BT_DBG("chan %p", chan);
2865 
2866 	chan->expected_tx_seq = chan->buffer_seq;
2867 	l2cap_seq_list_clear(&chan->srej_list);
2868 	skb_queue_purge(&chan->srej_q);
2869 	chan->rx_state = L2CAP_RX_STATE_RECV;
2870 }
2871 
2872 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2873 				struct l2cap_ctrl *control,
2874 				struct sk_buff_head *skbs, u8 event)
2875 {
2876 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2877 	       event);
2878 
2879 	switch (event) {
2880 	case L2CAP_EV_DATA_REQUEST:
2881 		if (chan->tx_send_head == NULL)
2882 			chan->tx_send_head = skb_peek(skbs);
2883 
2884 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2885 		l2cap_ertm_send(chan);
2886 		break;
2887 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2888 		BT_DBG("Enter LOCAL_BUSY");
2889 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2890 
2891 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2892 			/* The SREJ_SENT state must be aborted if we are to
2893 			 * enter the LOCAL_BUSY state.
2894 			 */
2895 			l2cap_abort_rx_srej_sent(chan);
2896 		}
2897 
2898 		l2cap_send_ack(chan);
2899 
2900 		break;
2901 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2902 		BT_DBG("Exit LOCAL_BUSY");
2903 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2904 
2905 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2906 			struct l2cap_ctrl local_control;
2907 
2908 			memset(&local_control, 0, sizeof(local_control));
2909 			local_control.sframe = 1;
2910 			local_control.super = L2CAP_SUPER_RR;
2911 			local_control.poll = 1;
2912 			local_control.reqseq = chan->buffer_seq;
2913 			l2cap_send_sframe(chan, &local_control);
2914 
2915 			chan->retry_count = 1;
2916 			__set_monitor_timer(chan);
2917 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2918 		}
2919 		break;
2920 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2921 		l2cap_process_reqseq(chan, control->reqseq);
2922 		break;
2923 	case L2CAP_EV_EXPLICIT_POLL:
2924 		l2cap_send_rr_or_rnr(chan, 1);
2925 		chan->retry_count = 1;
2926 		__set_monitor_timer(chan);
2927 		__clear_ack_timer(chan);
2928 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2929 		break;
2930 	case L2CAP_EV_RETRANS_TO:
2931 		l2cap_send_rr_or_rnr(chan, 1);
2932 		chan->retry_count = 1;
2933 		__set_monitor_timer(chan);
2934 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2935 		break;
2936 	case L2CAP_EV_RECV_FBIT:
2937 		/* Nothing to process */
2938 		break;
2939 	default:
2940 		break;
2941 	}
2942 }
2943 
2944 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2945 				  struct l2cap_ctrl *control,
2946 				  struct sk_buff_head *skbs, u8 event)
2947 {
2948 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2949 	       event);
2950 
2951 	switch (event) {
2952 	case L2CAP_EV_DATA_REQUEST:
2953 		if (chan->tx_send_head == NULL)
2954 			chan->tx_send_head = skb_peek(skbs);
2955 		/* Queue data, but don't send. */
2956 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2957 		break;
2958 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2959 		BT_DBG("Enter LOCAL_BUSY");
2960 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2961 
2962 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2963 			/* The SREJ_SENT state must be aborted if we are to
2964 			 * enter the LOCAL_BUSY state.
2965 			 */
2966 			l2cap_abort_rx_srej_sent(chan);
2967 		}
2968 
2969 		l2cap_send_ack(chan);
2970 
2971 		break;
2972 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2973 		BT_DBG("Exit LOCAL_BUSY");
2974 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2975 
2976 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2977 			struct l2cap_ctrl local_control;
2978 			memset(&local_control, 0, sizeof(local_control));
2979 			local_control.sframe = 1;
2980 			local_control.super = L2CAP_SUPER_RR;
2981 			local_control.poll = 1;
2982 			local_control.reqseq = chan->buffer_seq;
2983 			l2cap_send_sframe(chan, &local_control);
2984 
2985 			chan->retry_count = 1;
2986 			__set_monitor_timer(chan);
2987 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2988 		}
2989 		break;
2990 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2991 		l2cap_process_reqseq(chan, control->reqseq);
2992 		fallthrough;
2993 
2994 	case L2CAP_EV_RECV_FBIT:
2995 		if (control && control->final) {
2996 			__clear_monitor_timer(chan);
2997 			if (chan->unacked_frames > 0)
2998 				__set_retrans_timer(chan);
2999 			chan->retry_count = 0;
3000 			chan->tx_state = L2CAP_TX_STATE_XMIT;
3001 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3002 		}
3003 		break;
3004 	case L2CAP_EV_EXPLICIT_POLL:
3005 		/* Ignore */
3006 		break;
3007 	case L2CAP_EV_MONITOR_TO:
3008 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3009 			l2cap_send_rr_or_rnr(chan, 1);
3010 			__set_monitor_timer(chan);
3011 			chan->retry_count++;
3012 		} else {
3013 			l2cap_send_disconn_req(chan, ECONNABORTED);
3014 		}
3015 		break;
3016 	default:
3017 		break;
3018 	}
3019 }
3020 
3021 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3022 		     struct sk_buff_head *skbs, u8 event)
3023 {
3024 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3025 	       chan, control, skbs, event, chan->tx_state);
3026 
3027 	switch (chan->tx_state) {
3028 	case L2CAP_TX_STATE_XMIT:
3029 		l2cap_tx_state_xmit(chan, control, skbs, event);
3030 		break;
3031 	case L2CAP_TX_STATE_WAIT_F:
3032 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3033 		break;
3034 	default:
3035 		/* Ignore event */
3036 		break;
3037 	}
3038 }
3039 
3040 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3041 			     struct l2cap_ctrl *control)
3042 {
3043 	BT_DBG("chan %p, control %p", chan, control);
3044 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3045 }
3046 
3047 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3048 				  struct l2cap_ctrl *control)
3049 {
3050 	BT_DBG("chan %p, control %p", chan, control);
3051 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3052 }
3053 
3054 /* Copy frame to all raw sockets on that connection */
3055 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3056 {
3057 	struct sk_buff *nskb;
3058 	struct l2cap_chan *chan;
3059 
3060 	BT_DBG("conn %p", conn);
3061 
3062 	mutex_lock(&conn->chan_lock);
3063 
3064 	list_for_each_entry(chan, &conn->chan_l, list) {
3065 		if (chan->chan_type != L2CAP_CHAN_RAW)
3066 			continue;
3067 
3068 		/* Don't send frame to the channel it came from */
3069 		if (bt_cb(skb)->l2cap.chan == chan)
3070 			continue;
3071 
3072 		nskb = skb_clone(skb, GFP_KERNEL);
3073 		if (!nskb)
3074 			continue;
3075 		if (chan->ops->recv(chan, nskb))
3076 			kfree_skb(nskb);
3077 	}
3078 
3079 	mutex_unlock(&conn->chan_lock);
3080 }
3081 
3082 /* ---- L2CAP signalling commands ---- */
3083 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3084 				       u8 ident, u16 dlen, void *data)
3085 {
3086 	struct sk_buff *skb, **frag;
3087 	struct l2cap_cmd_hdr *cmd;
3088 	struct l2cap_hdr *lh;
3089 	int len, count;
3090 
3091 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3092 	       conn, code, ident, dlen);
3093 
3094 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3095 		return NULL;
3096 
3097 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3098 	count = min_t(unsigned int, conn->mtu, len);
3099 
3100 	skb = bt_skb_alloc(count, GFP_KERNEL);
3101 	if (!skb)
3102 		return NULL;
3103 
3104 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3105 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3106 
3107 	if (conn->hcon->type == LE_LINK)
3108 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3109 	else
3110 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3111 
3112 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3113 	cmd->code  = code;
3114 	cmd->ident = ident;
3115 	cmd->len   = cpu_to_le16(dlen);
3116 
3117 	if (dlen) {
3118 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3119 		skb_put_data(skb, data, count);
3120 		data += count;
3121 	}
3122 
3123 	len -= skb->len;
3124 
3125 	/* Continuation fragments (no L2CAP header) */
3126 	frag = &skb_shinfo(skb)->frag_list;
3127 	while (len) {
3128 		count = min_t(unsigned int, conn->mtu, len);
3129 
3130 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3131 		if (!*frag)
3132 			goto fail;
3133 
3134 		skb_put_data(*frag, data, count);
3135 
3136 		len  -= count;
3137 		data += count;
3138 
3139 		frag = &(*frag)->next;
3140 	}
3141 
3142 	return skb;
3143 
3144 fail:
3145 	kfree_skb(skb);
3146 	return NULL;
3147 }
3148 
3149 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3150 				     unsigned long *val)
3151 {
3152 	struct l2cap_conf_opt *opt = *ptr;
3153 	int len;
3154 
3155 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3156 	*ptr += len;
3157 
3158 	*type = opt->type;
3159 	*olen = opt->len;
3160 
3161 	switch (opt->len) {
3162 	case 1:
3163 		*val = *((u8 *) opt->val);
3164 		break;
3165 
3166 	case 2:
3167 		*val = get_unaligned_le16(opt->val);
3168 		break;
3169 
3170 	case 4:
3171 		*val = get_unaligned_le32(opt->val);
3172 		break;
3173 
3174 	default:
3175 		*val = (unsigned long) opt->val;
3176 		break;
3177 	}
3178 
3179 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3180 	return len;
3181 }
3182 
3183 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3184 {
3185 	struct l2cap_conf_opt *opt = *ptr;
3186 
3187 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3188 
3189 	if (size < L2CAP_CONF_OPT_SIZE + len)
3190 		return;
3191 
3192 	opt->type = type;
3193 	opt->len  = len;
3194 
3195 	switch (len) {
3196 	case 1:
3197 		*((u8 *) opt->val)  = val;
3198 		break;
3199 
3200 	case 2:
3201 		put_unaligned_le16(val, opt->val);
3202 		break;
3203 
3204 	case 4:
3205 		put_unaligned_le32(val, opt->val);
3206 		break;
3207 
3208 	default:
3209 		memcpy(opt->val, (void *) val, len);
3210 		break;
3211 	}
3212 
3213 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3214 }
3215 
3216 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3217 {
3218 	struct l2cap_conf_efs efs;
3219 
3220 	switch (chan->mode) {
3221 	case L2CAP_MODE_ERTM:
3222 		efs.id		= chan->local_id;
3223 		efs.stype	= chan->local_stype;
3224 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3225 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3226 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3227 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3228 		break;
3229 
3230 	case L2CAP_MODE_STREAMING:
3231 		efs.id		= 1;
3232 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3233 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3234 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3235 		efs.acc_lat	= 0;
3236 		efs.flush_to	= 0;
3237 		break;
3238 
3239 	default:
3240 		return;
3241 	}
3242 
3243 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3244 			   (unsigned long) &efs, size);
3245 }
3246 
3247 static void l2cap_ack_timeout(struct work_struct *work)
3248 {
3249 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3250 					       ack_timer.work);
3251 	u16 frames_to_ack;
3252 
3253 	BT_DBG("chan %p", chan);
3254 
3255 	l2cap_chan_lock(chan);
3256 
3257 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3258 				     chan->last_acked_seq);
3259 
3260 	if (frames_to_ack)
3261 		l2cap_send_rr_or_rnr(chan, 0);
3262 
3263 	l2cap_chan_unlock(chan);
3264 	l2cap_chan_put(chan);
3265 }
3266 
3267 int l2cap_ertm_init(struct l2cap_chan *chan)
3268 {
3269 	int err;
3270 
3271 	chan->next_tx_seq = 0;
3272 	chan->expected_tx_seq = 0;
3273 	chan->expected_ack_seq = 0;
3274 	chan->unacked_frames = 0;
3275 	chan->buffer_seq = 0;
3276 	chan->frames_sent = 0;
3277 	chan->last_acked_seq = 0;
3278 	chan->sdu = NULL;
3279 	chan->sdu_last_frag = NULL;
3280 	chan->sdu_len = 0;
3281 
3282 	skb_queue_head_init(&chan->tx_q);
3283 
3284 	chan->local_amp_id = AMP_ID_BREDR;
3285 	chan->move_id = AMP_ID_BREDR;
3286 	chan->move_state = L2CAP_MOVE_STABLE;
3287 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3288 
3289 	if (chan->mode != L2CAP_MODE_ERTM)
3290 		return 0;
3291 
3292 	chan->rx_state = L2CAP_RX_STATE_RECV;
3293 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3294 
3295 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3296 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3297 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3298 
3299 	skb_queue_head_init(&chan->srej_q);
3300 
3301 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3302 	if (err < 0)
3303 		return err;
3304 
3305 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3306 	if (err < 0)
3307 		l2cap_seq_list_free(&chan->srej_list);
3308 
3309 	return err;
3310 }
3311 
3312 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3313 {
3314 	switch (mode) {
3315 	case L2CAP_MODE_STREAMING:
3316 	case L2CAP_MODE_ERTM:
3317 		if (l2cap_mode_supported(mode, remote_feat_mask))
3318 			return mode;
3319 		fallthrough;
3320 	default:
3321 		return L2CAP_MODE_BASIC;
3322 	}
3323 }
3324 
3325 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3326 {
3327 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3328 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3329 }
3330 
3331 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3332 {
3333 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3334 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3335 }
3336 
3337 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3338 				      struct l2cap_conf_rfc *rfc)
3339 {
3340 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3341 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3342 
3343 		/* Class 1 devices have must have ERTM timeouts
3344 		 * exceeding the Link Supervision Timeout.  The
3345 		 * default Link Supervision Timeout for AMP
3346 		 * controllers is 10 seconds.
3347 		 *
3348 		 * Class 1 devices use 0xffffffff for their
3349 		 * best-effort flush timeout, so the clamping logic
3350 		 * will result in a timeout that meets the above
3351 		 * requirement.  ERTM timeouts are 16-bit values, so
3352 		 * the maximum timeout is 65.535 seconds.
3353 		 */
3354 
3355 		/* Convert timeout to milliseconds and round */
3356 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3357 
3358 		/* This is the recommended formula for class 2 devices
3359 		 * that start ERTM timers when packets are sent to the
3360 		 * controller.
3361 		 */
3362 		ertm_to = 3 * ertm_to + 500;
3363 
3364 		if (ertm_to > 0xffff)
3365 			ertm_to = 0xffff;
3366 
3367 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3368 		rfc->monitor_timeout = rfc->retrans_timeout;
3369 	} else {
3370 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3371 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3372 	}
3373 }
3374 
3375 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3376 {
3377 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3378 	    __l2cap_ews_supported(chan->conn)) {
3379 		/* use extended control field */
3380 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3381 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3382 	} else {
3383 		chan->tx_win = min_t(u16, chan->tx_win,
3384 				     L2CAP_DEFAULT_TX_WINDOW);
3385 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3386 	}
3387 	chan->ack_win = chan->tx_win;
3388 }
3389 
3390 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3391 {
3392 	struct hci_conn *conn = chan->conn->hcon;
3393 
3394 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3395 
3396 	/* The 2-DH1 packet has between 2 and 56 information bytes
3397 	 * (including the 2-byte payload header)
3398 	 */
3399 	if (!(conn->pkt_type & HCI_2DH1))
3400 		chan->imtu = 54;
3401 
3402 	/* The 3-DH1 packet has between 2 and 85 information bytes
3403 	 * (including the 2-byte payload header)
3404 	 */
3405 	if (!(conn->pkt_type & HCI_3DH1))
3406 		chan->imtu = 83;
3407 
3408 	/* The 2-DH3 packet has between 2 and 369 information bytes
3409 	 * (including the 2-byte payload header)
3410 	 */
3411 	if (!(conn->pkt_type & HCI_2DH3))
3412 		chan->imtu = 367;
3413 
3414 	/* The 3-DH3 packet has between 2 and 554 information bytes
3415 	 * (including the 2-byte payload header)
3416 	 */
3417 	if (!(conn->pkt_type & HCI_3DH3))
3418 		chan->imtu = 552;
3419 
3420 	/* The 2-DH5 packet has between 2 and 681 information bytes
3421 	 * (including the 2-byte payload header)
3422 	 */
3423 	if (!(conn->pkt_type & HCI_2DH5))
3424 		chan->imtu = 679;
3425 
3426 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3427 	 * (including the 2-byte payload header)
3428 	 */
3429 	if (!(conn->pkt_type & HCI_3DH5))
3430 		chan->imtu = 1021;
3431 }
3432 
3433 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3434 {
3435 	struct l2cap_conf_req *req = data;
3436 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3437 	void *ptr = req->data;
3438 	void *endptr = data + data_size;
3439 	u16 size;
3440 
3441 	BT_DBG("chan %p", chan);
3442 
3443 	if (chan->num_conf_req || chan->num_conf_rsp)
3444 		goto done;
3445 
3446 	switch (chan->mode) {
3447 	case L2CAP_MODE_STREAMING:
3448 	case L2CAP_MODE_ERTM:
3449 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3450 			break;
3451 
3452 		if (__l2cap_efs_supported(chan->conn))
3453 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3454 
3455 		fallthrough;
3456 	default:
3457 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3458 		break;
3459 	}
3460 
3461 done:
3462 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3463 		if (!chan->imtu)
3464 			l2cap_mtu_auto(chan);
3465 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3466 				   endptr - ptr);
3467 	}
3468 
3469 	switch (chan->mode) {
3470 	case L2CAP_MODE_BASIC:
3471 		if (disable_ertm)
3472 			break;
3473 
3474 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3475 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3476 			break;
3477 
3478 		rfc.mode            = L2CAP_MODE_BASIC;
3479 		rfc.txwin_size      = 0;
3480 		rfc.max_transmit    = 0;
3481 		rfc.retrans_timeout = 0;
3482 		rfc.monitor_timeout = 0;
3483 		rfc.max_pdu_size    = 0;
3484 
3485 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3486 				   (unsigned long) &rfc, endptr - ptr);
3487 		break;
3488 
3489 	case L2CAP_MODE_ERTM:
3490 		rfc.mode            = L2CAP_MODE_ERTM;
3491 		rfc.max_transmit    = chan->max_tx;
3492 
3493 		__l2cap_set_ertm_timeouts(chan, &rfc);
3494 
3495 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3496 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3497 			     L2CAP_FCS_SIZE);
3498 		rfc.max_pdu_size = cpu_to_le16(size);
3499 
3500 		l2cap_txwin_setup(chan);
3501 
3502 		rfc.txwin_size = min_t(u16, chan->tx_win,
3503 				       L2CAP_DEFAULT_TX_WINDOW);
3504 
3505 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3506 				   (unsigned long) &rfc, endptr - ptr);
3507 
3508 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3509 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3510 
3511 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3512 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3513 					   chan->tx_win, endptr - ptr);
3514 
3515 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3516 			if (chan->fcs == L2CAP_FCS_NONE ||
3517 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3518 				chan->fcs = L2CAP_FCS_NONE;
3519 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3520 						   chan->fcs, endptr - ptr);
3521 			}
3522 		break;
3523 
3524 	case L2CAP_MODE_STREAMING:
3525 		l2cap_txwin_setup(chan);
3526 		rfc.mode            = L2CAP_MODE_STREAMING;
3527 		rfc.txwin_size      = 0;
3528 		rfc.max_transmit    = 0;
3529 		rfc.retrans_timeout = 0;
3530 		rfc.monitor_timeout = 0;
3531 
3532 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3533 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3534 			     L2CAP_FCS_SIZE);
3535 		rfc.max_pdu_size = cpu_to_le16(size);
3536 
3537 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3538 				   (unsigned long) &rfc, endptr - ptr);
3539 
3540 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3541 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3542 
3543 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3544 			if (chan->fcs == L2CAP_FCS_NONE ||
3545 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3546 				chan->fcs = L2CAP_FCS_NONE;
3547 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3548 						   chan->fcs, endptr - ptr);
3549 			}
3550 		break;
3551 	}
3552 
3553 	req->dcid  = cpu_to_le16(chan->dcid);
3554 	req->flags = cpu_to_le16(0);
3555 
3556 	return ptr - data;
3557 }
3558 
3559 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3560 {
3561 	struct l2cap_conf_rsp *rsp = data;
3562 	void *ptr = rsp->data;
3563 	void *endptr = data + data_size;
3564 	void *req = chan->conf_req;
3565 	int len = chan->conf_len;
3566 	int type, hint, olen;
3567 	unsigned long val;
3568 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3569 	struct l2cap_conf_efs efs;
3570 	u8 remote_efs = 0;
3571 	u16 mtu = L2CAP_DEFAULT_MTU;
3572 	u16 result = L2CAP_CONF_SUCCESS;
3573 	u16 size;
3574 
3575 	BT_DBG("chan %p", chan);
3576 
3577 	while (len >= L2CAP_CONF_OPT_SIZE) {
3578 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3579 		if (len < 0)
3580 			break;
3581 
3582 		hint  = type & L2CAP_CONF_HINT;
3583 		type &= L2CAP_CONF_MASK;
3584 
3585 		switch (type) {
3586 		case L2CAP_CONF_MTU:
3587 			if (olen != 2)
3588 				break;
3589 			mtu = val;
3590 			break;
3591 
3592 		case L2CAP_CONF_FLUSH_TO:
3593 			if (olen != 2)
3594 				break;
3595 			chan->flush_to = val;
3596 			break;
3597 
3598 		case L2CAP_CONF_QOS:
3599 			break;
3600 
3601 		case L2CAP_CONF_RFC:
3602 			if (olen != sizeof(rfc))
3603 				break;
3604 			memcpy(&rfc, (void *) val, olen);
3605 			break;
3606 
3607 		case L2CAP_CONF_FCS:
3608 			if (olen != 1)
3609 				break;
3610 			if (val == L2CAP_FCS_NONE)
3611 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3612 			break;
3613 
3614 		case L2CAP_CONF_EFS:
3615 			if (olen != sizeof(efs))
3616 				break;
3617 			remote_efs = 1;
3618 			memcpy(&efs, (void *) val, olen);
3619 			break;
3620 
3621 		case L2CAP_CONF_EWS:
3622 			if (olen != 2)
3623 				break;
3624 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3625 				return -ECONNREFUSED;
3626 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3627 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3628 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3629 			chan->remote_tx_win = val;
3630 			break;
3631 
3632 		default:
3633 			if (hint)
3634 				break;
3635 			result = L2CAP_CONF_UNKNOWN;
3636 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3637 			break;
3638 		}
3639 	}
3640 
3641 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3642 		goto done;
3643 
3644 	switch (chan->mode) {
3645 	case L2CAP_MODE_STREAMING:
3646 	case L2CAP_MODE_ERTM:
3647 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3648 			chan->mode = l2cap_select_mode(rfc.mode,
3649 						       chan->conn->feat_mask);
3650 			break;
3651 		}
3652 
3653 		if (remote_efs) {
3654 			if (__l2cap_efs_supported(chan->conn))
3655 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3656 			else
3657 				return -ECONNREFUSED;
3658 		}
3659 
3660 		if (chan->mode != rfc.mode)
3661 			return -ECONNREFUSED;
3662 
3663 		break;
3664 	}
3665 
3666 done:
3667 	if (chan->mode != rfc.mode) {
3668 		result = L2CAP_CONF_UNACCEPT;
3669 		rfc.mode = chan->mode;
3670 
3671 		if (chan->num_conf_rsp == 1)
3672 			return -ECONNREFUSED;
3673 
3674 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3675 				   (unsigned long) &rfc, endptr - ptr);
3676 	}
3677 
3678 	if (result == L2CAP_CONF_SUCCESS) {
3679 		/* Configure output options and let the other side know
3680 		 * which ones we don't like. */
3681 
3682 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3683 			result = L2CAP_CONF_UNACCEPT;
3684 		else {
3685 			chan->omtu = mtu;
3686 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3687 		}
3688 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3689 
3690 		if (remote_efs) {
3691 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3692 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3693 			    efs.stype != chan->local_stype) {
3694 
3695 				result = L2CAP_CONF_UNACCEPT;
3696 
3697 				if (chan->num_conf_req >= 1)
3698 					return -ECONNREFUSED;
3699 
3700 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3701 						   sizeof(efs),
3702 						   (unsigned long) &efs, endptr - ptr);
3703 			} else {
3704 				/* Send PENDING Conf Rsp */
3705 				result = L2CAP_CONF_PENDING;
3706 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3707 			}
3708 		}
3709 
3710 		switch (rfc.mode) {
3711 		case L2CAP_MODE_BASIC:
3712 			chan->fcs = L2CAP_FCS_NONE;
3713 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3714 			break;
3715 
3716 		case L2CAP_MODE_ERTM:
3717 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3718 				chan->remote_tx_win = rfc.txwin_size;
3719 			else
3720 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3721 
3722 			chan->remote_max_tx = rfc.max_transmit;
3723 
3724 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3725 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3726 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3727 			rfc.max_pdu_size = cpu_to_le16(size);
3728 			chan->remote_mps = size;
3729 
3730 			__l2cap_set_ertm_timeouts(chan, &rfc);
3731 
3732 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3733 
3734 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3735 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3736 
3737 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3738 				chan->remote_id = efs.id;
3739 				chan->remote_stype = efs.stype;
3740 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3741 				chan->remote_flush_to =
3742 					le32_to_cpu(efs.flush_to);
3743 				chan->remote_acc_lat =
3744 					le32_to_cpu(efs.acc_lat);
3745 				chan->remote_sdu_itime =
3746 					le32_to_cpu(efs.sdu_itime);
3747 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3748 						   sizeof(efs),
3749 						   (unsigned long) &efs, endptr - ptr);
3750 			}
3751 			break;
3752 
3753 		case L2CAP_MODE_STREAMING:
3754 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3755 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3756 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3757 			rfc.max_pdu_size = cpu_to_le16(size);
3758 			chan->remote_mps = size;
3759 
3760 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3761 
3762 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3763 					   (unsigned long) &rfc, endptr - ptr);
3764 
3765 			break;
3766 
3767 		default:
3768 			result = L2CAP_CONF_UNACCEPT;
3769 
3770 			memset(&rfc, 0, sizeof(rfc));
3771 			rfc.mode = chan->mode;
3772 		}
3773 
3774 		if (result == L2CAP_CONF_SUCCESS)
3775 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3776 	}
3777 	rsp->scid   = cpu_to_le16(chan->dcid);
3778 	rsp->result = cpu_to_le16(result);
3779 	rsp->flags  = cpu_to_le16(0);
3780 
3781 	return ptr - data;
3782 }
3783 
3784 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3785 				void *data, size_t size, u16 *result)
3786 {
3787 	struct l2cap_conf_req *req = data;
3788 	void *ptr = req->data;
3789 	void *endptr = data + size;
3790 	int type, olen;
3791 	unsigned long val;
3792 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3793 	struct l2cap_conf_efs efs;
3794 
3795 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3796 
3797 	while (len >= L2CAP_CONF_OPT_SIZE) {
3798 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3799 		if (len < 0)
3800 			break;
3801 
3802 		switch (type) {
3803 		case L2CAP_CONF_MTU:
3804 			if (olen != 2)
3805 				break;
3806 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3807 				*result = L2CAP_CONF_UNACCEPT;
3808 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3809 			} else
3810 				chan->imtu = val;
3811 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3812 					   endptr - ptr);
3813 			break;
3814 
3815 		case L2CAP_CONF_FLUSH_TO:
3816 			if (olen != 2)
3817 				break;
3818 			chan->flush_to = val;
3819 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3820 					   chan->flush_to, endptr - ptr);
3821 			break;
3822 
3823 		case L2CAP_CONF_RFC:
3824 			if (olen != sizeof(rfc))
3825 				break;
3826 			memcpy(&rfc, (void *)val, olen);
3827 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3828 			    rfc.mode != chan->mode)
3829 				return -ECONNREFUSED;
3830 			chan->fcs = 0;
3831 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3832 					   (unsigned long) &rfc, endptr - ptr);
3833 			break;
3834 
3835 		case L2CAP_CONF_EWS:
3836 			if (olen != 2)
3837 				break;
3838 			chan->ack_win = min_t(u16, val, chan->ack_win);
3839 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3840 					   chan->tx_win, endptr - ptr);
3841 			break;
3842 
3843 		case L2CAP_CONF_EFS:
3844 			if (olen != sizeof(efs))
3845 				break;
3846 			memcpy(&efs, (void *)val, olen);
3847 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3848 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3849 			    efs.stype != chan->local_stype)
3850 				return -ECONNREFUSED;
3851 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3852 					   (unsigned long) &efs, endptr - ptr);
3853 			break;
3854 
3855 		case L2CAP_CONF_FCS:
3856 			if (olen != 1)
3857 				break;
3858 			if (*result == L2CAP_CONF_PENDING)
3859 				if (val == L2CAP_FCS_NONE)
3860 					set_bit(CONF_RECV_NO_FCS,
3861 						&chan->conf_state);
3862 			break;
3863 		}
3864 	}
3865 
3866 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3867 		return -ECONNREFUSED;
3868 
3869 	chan->mode = rfc.mode;
3870 
3871 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3872 		switch (rfc.mode) {
3873 		case L2CAP_MODE_ERTM:
3874 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3875 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3876 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3877 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3878 				chan->ack_win = min_t(u16, chan->ack_win,
3879 						      rfc.txwin_size);
3880 
3881 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3882 				chan->local_msdu = le16_to_cpu(efs.msdu);
3883 				chan->local_sdu_itime =
3884 					le32_to_cpu(efs.sdu_itime);
3885 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3886 				chan->local_flush_to =
3887 					le32_to_cpu(efs.flush_to);
3888 			}
3889 			break;
3890 
3891 		case L2CAP_MODE_STREAMING:
3892 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3893 		}
3894 	}
3895 
3896 	req->dcid   = cpu_to_le16(chan->dcid);
3897 	req->flags  = cpu_to_le16(0);
3898 
3899 	return ptr - data;
3900 }
3901 
3902 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3903 				u16 result, u16 flags)
3904 {
3905 	struct l2cap_conf_rsp *rsp = data;
3906 	void *ptr = rsp->data;
3907 
3908 	BT_DBG("chan %p", chan);
3909 
3910 	rsp->scid   = cpu_to_le16(chan->dcid);
3911 	rsp->result = cpu_to_le16(result);
3912 	rsp->flags  = cpu_to_le16(flags);
3913 
3914 	return ptr - data;
3915 }
3916 
3917 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3918 {
3919 	struct l2cap_le_conn_rsp rsp;
3920 	struct l2cap_conn *conn = chan->conn;
3921 
3922 	BT_DBG("chan %p", chan);
3923 
3924 	rsp.dcid    = cpu_to_le16(chan->scid);
3925 	rsp.mtu     = cpu_to_le16(chan->imtu);
3926 	rsp.mps     = cpu_to_le16(chan->mps);
3927 	rsp.credits = cpu_to_le16(chan->rx_credits);
3928 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3929 
3930 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3931 		       &rsp);
3932 }
3933 
3934 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3935 {
3936 	struct {
3937 		struct l2cap_ecred_conn_rsp rsp;
3938 		__le16 dcid[5];
3939 	} __packed pdu;
3940 	struct l2cap_conn *conn = chan->conn;
3941 	u16 ident = chan->ident;
3942 	int i = 0;
3943 
3944 	if (!ident)
3945 		return;
3946 
3947 	BT_DBG("chan %p ident %d", chan, ident);
3948 
3949 	pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3950 	pdu.rsp.mps     = cpu_to_le16(chan->mps);
3951 	pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3952 	pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3953 
3954 	mutex_lock(&conn->chan_lock);
3955 
3956 	list_for_each_entry(chan, &conn->chan_l, list) {
3957 		if (chan->ident != ident)
3958 			continue;
3959 
3960 		/* Reset ident so only one response is sent */
3961 		chan->ident = 0;
3962 
3963 		/* Include all channels pending with the same ident */
3964 		pdu.dcid[i++] = cpu_to_le16(chan->scid);
3965 	}
3966 
3967 	mutex_unlock(&conn->chan_lock);
3968 
3969 	l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
3970 			sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
3971 }
3972 
3973 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3974 {
3975 	struct l2cap_conn_rsp rsp;
3976 	struct l2cap_conn *conn = chan->conn;
3977 	u8 buf[128];
3978 	u8 rsp_code;
3979 
3980 	rsp.scid   = cpu_to_le16(chan->dcid);
3981 	rsp.dcid   = cpu_to_le16(chan->scid);
3982 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3983 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3984 
3985 	if (chan->hs_hcon)
3986 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3987 	else
3988 		rsp_code = L2CAP_CONN_RSP;
3989 
3990 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3991 
3992 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3993 
3994 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3995 		return;
3996 
3997 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3998 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3999 	chan->num_conf_req++;
4000 }
4001 
4002 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4003 {
4004 	int type, olen;
4005 	unsigned long val;
4006 	/* Use sane default values in case a misbehaving remote device
4007 	 * did not send an RFC or extended window size option.
4008 	 */
4009 	u16 txwin_ext = chan->ack_win;
4010 	struct l2cap_conf_rfc rfc = {
4011 		.mode = chan->mode,
4012 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4013 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4014 		.max_pdu_size = cpu_to_le16(chan->imtu),
4015 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4016 	};
4017 
4018 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4019 
4020 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4021 		return;
4022 
4023 	while (len >= L2CAP_CONF_OPT_SIZE) {
4024 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4025 		if (len < 0)
4026 			break;
4027 
4028 		switch (type) {
4029 		case L2CAP_CONF_RFC:
4030 			if (olen != sizeof(rfc))
4031 				break;
4032 			memcpy(&rfc, (void *)val, olen);
4033 			break;
4034 		case L2CAP_CONF_EWS:
4035 			if (olen != 2)
4036 				break;
4037 			txwin_ext = val;
4038 			break;
4039 		}
4040 	}
4041 
4042 	switch (rfc.mode) {
4043 	case L2CAP_MODE_ERTM:
4044 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4045 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4046 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4047 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4048 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4049 		else
4050 			chan->ack_win = min_t(u16, chan->ack_win,
4051 					      rfc.txwin_size);
4052 		break;
4053 	case L2CAP_MODE_STREAMING:
4054 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4055 	}
4056 }
4057 
4058 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4059 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4060 				    u8 *data)
4061 {
4062 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4063 
4064 	if (cmd_len < sizeof(*rej))
4065 		return -EPROTO;
4066 
4067 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4068 		return 0;
4069 
4070 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4071 	    cmd->ident == conn->info_ident) {
4072 		cancel_delayed_work(&conn->info_timer);
4073 
4074 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4075 		conn->info_ident = 0;
4076 
4077 		l2cap_conn_start(conn);
4078 	}
4079 
4080 	return 0;
4081 }
4082 
4083 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4084 					struct l2cap_cmd_hdr *cmd,
4085 					u8 *data, u8 rsp_code, u8 amp_id)
4086 {
4087 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4088 	struct l2cap_conn_rsp rsp;
4089 	struct l2cap_chan *chan = NULL, *pchan;
4090 	int result, status = L2CAP_CS_NO_INFO;
4091 
4092 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4093 	__le16 psm = req->psm;
4094 
4095 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4096 
4097 	/* Check if we have socket listening on psm */
4098 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4099 					 &conn->hcon->dst, ACL_LINK);
4100 	if (!pchan) {
4101 		result = L2CAP_CR_BAD_PSM;
4102 		goto sendresp;
4103 	}
4104 
4105 	mutex_lock(&conn->chan_lock);
4106 	l2cap_chan_lock(pchan);
4107 
4108 	/* Check if the ACL is secure enough (if not SDP) */
4109 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4110 	    !hci_conn_check_link_mode(conn->hcon)) {
4111 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4112 		result = L2CAP_CR_SEC_BLOCK;
4113 		goto response;
4114 	}
4115 
4116 	result = L2CAP_CR_NO_MEM;
4117 
4118 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4119 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4120 		result = L2CAP_CR_INVALID_SCID;
4121 		goto response;
4122 	}
4123 
4124 	/* Check if we already have channel with that dcid */
4125 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4126 		result = L2CAP_CR_SCID_IN_USE;
4127 		goto response;
4128 	}
4129 
4130 	chan = pchan->ops->new_connection(pchan);
4131 	if (!chan)
4132 		goto response;
4133 
4134 	/* For certain devices (ex: HID mouse), support for authentication,
4135 	 * pairing and bonding is optional. For such devices, inorder to avoid
4136 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4137 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4138 	 */
4139 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4140 
4141 	bacpy(&chan->src, &conn->hcon->src);
4142 	bacpy(&chan->dst, &conn->hcon->dst);
4143 	chan->src_type = bdaddr_src_type(conn->hcon);
4144 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4145 	chan->psm  = psm;
4146 	chan->dcid = scid;
4147 	chan->local_amp_id = amp_id;
4148 
4149 	__l2cap_chan_add(conn, chan);
4150 
4151 	dcid = chan->scid;
4152 
4153 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4154 
4155 	chan->ident = cmd->ident;
4156 
4157 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4158 		if (l2cap_chan_check_security(chan, false)) {
4159 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4160 				l2cap_state_change(chan, BT_CONNECT2);
4161 				result = L2CAP_CR_PEND;
4162 				status = L2CAP_CS_AUTHOR_PEND;
4163 				chan->ops->defer(chan);
4164 			} else {
4165 				/* Force pending result for AMP controllers.
4166 				 * The connection will succeed after the
4167 				 * physical link is up.
4168 				 */
4169 				if (amp_id == AMP_ID_BREDR) {
4170 					l2cap_state_change(chan, BT_CONFIG);
4171 					result = L2CAP_CR_SUCCESS;
4172 				} else {
4173 					l2cap_state_change(chan, BT_CONNECT2);
4174 					result = L2CAP_CR_PEND;
4175 				}
4176 				status = L2CAP_CS_NO_INFO;
4177 			}
4178 		} else {
4179 			l2cap_state_change(chan, BT_CONNECT2);
4180 			result = L2CAP_CR_PEND;
4181 			status = L2CAP_CS_AUTHEN_PEND;
4182 		}
4183 	} else {
4184 		l2cap_state_change(chan, BT_CONNECT2);
4185 		result = L2CAP_CR_PEND;
4186 		status = L2CAP_CS_NO_INFO;
4187 	}
4188 
4189 response:
4190 	l2cap_chan_unlock(pchan);
4191 	mutex_unlock(&conn->chan_lock);
4192 	l2cap_chan_put(pchan);
4193 
4194 sendresp:
4195 	rsp.scid   = cpu_to_le16(scid);
4196 	rsp.dcid   = cpu_to_le16(dcid);
4197 	rsp.result = cpu_to_le16(result);
4198 	rsp.status = cpu_to_le16(status);
4199 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4200 
4201 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4202 		struct l2cap_info_req info;
4203 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4204 
4205 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4206 		conn->info_ident = l2cap_get_ident(conn);
4207 
4208 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4209 
4210 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4211 			       sizeof(info), &info);
4212 	}
4213 
4214 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4215 	    result == L2CAP_CR_SUCCESS) {
4216 		u8 buf[128];
4217 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4218 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4219 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4220 		chan->num_conf_req++;
4221 	}
4222 
4223 	return chan;
4224 }
4225 
4226 static int l2cap_connect_req(struct l2cap_conn *conn,
4227 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4228 {
4229 	struct hci_dev *hdev = conn->hcon->hdev;
4230 	struct hci_conn *hcon = conn->hcon;
4231 
4232 	if (cmd_len < sizeof(struct l2cap_conn_req))
4233 		return -EPROTO;
4234 
4235 	hci_dev_lock(hdev);
4236 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4237 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4238 		mgmt_device_connected(hdev, hcon, 0, NULL, 0);
4239 	hci_dev_unlock(hdev);
4240 
4241 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4242 	return 0;
4243 }
4244 
4245 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4246 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4247 				    u8 *data)
4248 {
4249 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4250 	u16 scid, dcid, result, status;
4251 	struct l2cap_chan *chan;
4252 	u8 req[128];
4253 	int err;
4254 
4255 	if (cmd_len < sizeof(*rsp))
4256 		return -EPROTO;
4257 
4258 	scid   = __le16_to_cpu(rsp->scid);
4259 	dcid   = __le16_to_cpu(rsp->dcid);
4260 	result = __le16_to_cpu(rsp->result);
4261 	status = __le16_to_cpu(rsp->status);
4262 
4263 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4264 	       dcid, scid, result, status);
4265 
4266 	mutex_lock(&conn->chan_lock);
4267 
4268 	if (scid) {
4269 		chan = __l2cap_get_chan_by_scid(conn, scid);
4270 		if (!chan) {
4271 			err = -EBADSLT;
4272 			goto unlock;
4273 		}
4274 	} else {
4275 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4276 		if (!chan) {
4277 			err = -EBADSLT;
4278 			goto unlock;
4279 		}
4280 	}
4281 
4282 	err = 0;
4283 
4284 	l2cap_chan_lock(chan);
4285 
4286 	switch (result) {
4287 	case L2CAP_CR_SUCCESS:
4288 		l2cap_state_change(chan, BT_CONFIG);
4289 		chan->ident = 0;
4290 		chan->dcid = dcid;
4291 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4292 
4293 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4294 			break;
4295 
4296 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4297 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4298 		chan->num_conf_req++;
4299 		break;
4300 
4301 	case L2CAP_CR_PEND:
4302 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4303 		break;
4304 
4305 	default:
4306 		l2cap_chan_del(chan, ECONNREFUSED);
4307 		break;
4308 	}
4309 
4310 	l2cap_chan_unlock(chan);
4311 
4312 unlock:
4313 	mutex_unlock(&conn->chan_lock);
4314 
4315 	return err;
4316 }
4317 
4318 static inline void set_default_fcs(struct l2cap_chan *chan)
4319 {
4320 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4321 	 * sides request it.
4322 	 */
4323 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4324 		chan->fcs = L2CAP_FCS_NONE;
4325 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4326 		chan->fcs = L2CAP_FCS_CRC16;
4327 }
4328 
4329 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4330 				    u8 ident, u16 flags)
4331 {
4332 	struct l2cap_conn *conn = chan->conn;
4333 
4334 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4335 	       flags);
4336 
4337 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4338 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4339 
4340 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4341 		       l2cap_build_conf_rsp(chan, data,
4342 					    L2CAP_CONF_SUCCESS, flags), data);
4343 }
4344 
4345 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4346 				   u16 scid, u16 dcid)
4347 {
4348 	struct l2cap_cmd_rej_cid rej;
4349 
4350 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4351 	rej.scid = __cpu_to_le16(scid);
4352 	rej.dcid = __cpu_to_le16(dcid);
4353 
4354 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4355 }
4356 
4357 static inline int l2cap_config_req(struct l2cap_conn *conn,
4358 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4359 				   u8 *data)
4360 {
4361 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4362 	u16 dcid, flags;
4363 	u8 rsp[64];
4364 	struct l2cap_chan *chan;
4365 	int len, err = 0;
4366 
4367 	if (cmd_len < sizeof(*req))
4368 		return -EPROTO;
4369 
4370 	dcid  = __le16_to_cpu(req->dcid);
4371 	flags = __le16_to_cpu(req->flags);
4372 
4373 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4374 
4375 	chan = l2cap_get_chan_by_scid(conn, dcid);
4376 	if (!chan) {
4377 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4378 		return 0;
4379 	}
4380 
4381 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4382 	    chan->state != BT_CONNECTED) {
4383 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4384 				       chan->dcid);
4385 		goto unlock;
4386 	}
4387 
4388 	/* Reject if config buffer is too small. */
4389 	len = cmd_len - sizeof(*req);
4390 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4391 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4392 			       l2cap_build_conf_rsp(chan, rsp,
4393 			       L2CAP_CONF_REJECT, flags), rsp);
4394 		goto unlock;
4395 	}
4396 
4397 	/* Store config. */
4398 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4399 	chan->conf_len += len;
4400 
4401 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4402 		/* Incomplete config. Send empty response. */
4403 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4404 			       l2cap_build_conf_rsp(chan, rsp,
4405 			       L2CAP_CONF_SUCCESS, flags), rsp);
4406 		goto unlock;
4407 	}
4408 
4409 	/* Complete config. */
4410 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4411 	if (len < 0) {
4412 		l2cap_send_disconn_req(chan, ECONNRESET);
4413 		goto unlock;
4414 	}
4415 
4416 	chan->ident = cmd->ident;
4417 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4418 	chan->num_conf_rsp++;
4419 
4420 	/* Reset config buffer. */
4421 	chan->conf_len = 0;
4422 
4423 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4424 		goto unlock;
4425 
4426 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4427 		set_default_fcs(chan);
4428 
4429 		if (chan->mode == L2CAP_MODE_ERTM ||
4430 		    chan->mode == L2CAP_MODE_STREAMING)
4431 			err = l2cap_ertm_init(chan);
4432 
4433 		if (err < 0)
4434 			l2cap_send_disconn_req(chan, -err);
4435 		else
4436 			l2cap_chan_ready(chan);
4437 
4438 		goto unlock;
4439 	}
4440 
4441 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4442 		u8 buf[64];
4443 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4444 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4445 		chan->num_conf_req++;
4446 	}
4447 
4448 	/* Got Conf Rsp PENDING from remote side and assume we sent
4449 	   Conf Rsp PENDING in the code above */
4450 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4451 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4452 
4453 		/* check compatibility */
4454 
4455 		/* Send rsp for BR/EDR channel */
4456 		if (!chan->hs_hcon)
4457 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4458 		else
4459 			chan->ident = cmd->ident;
4460 	}
4461 
4462 unlock:
4463 	l2cap_chan_unlock(chan);
4464 	return err;
4465 }
4466 
4467 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4468 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4469 				   u8 *data)
4470 {
4471 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4472 	u16 scid, flags, result;
4473 	struct l2cap_chan *chan;
4474 	int len = cmd_len - sizeof(*rsp);
4475 	int err = 0;
4476 
4477 	if (cmd_len < sizeof(*rsp))
4478 		return -EPROTO;
4479 
4480 	scid   = __le16_to_cpu(rsp->scid);
4481 	flags  = __le16_to_cpu(rsp->flags);
4482 	result = __le16_to_cpu(rsp->result);
4483 
4484 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4485 	       result, len);
4486 
4487 	chan = l2cap_get_chan_by_scid(conn, scid);
4488 	if (!chan)
4489 		return 0;
4490 
4491 	switch (result) {
4492 	case L2CAP_CONF_SUCCESS:
4493 		l2cap_conf_rfc_get(chan, rsp->data, len);
4494 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4495 		break;
4496 
4497 	case L2CAP_CONF_PENDING:
4498 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4499 
4500 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4501 			char buf[64];
4502 
4503 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4504 						   buf, sizeof(buf), &result);
4505 			if (len < 0) {
4506 				l2cap_send_disconn_req(chan, ECONNRESET);
4507 				goto done;
4508 			}
4509 
4510 			if (!chan->hs_hcon) {
4511 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4512 							0);
4513 			} else {
4514 				if (l2cap_check_efs(chan)) {
4515 					amp_create_logical_link(chan);
4516 					chan->ident = cmd->ident;
4517 				}
4518 			}
4519 		}
4520 		goto done;
4521 
4522 	case L2CAP_CONF_UNKNOWN:
4523 	case L2CAP_CONF_UNACCEPT:
4524 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4525 			char req[64];
4526 
4527 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4528 				l2cap_send_disconn_req(chan, ECONNRESET);
4529 				goto done;
4530 			}
4531 
4532 			/* throw out any old stored conf requests */
4533 			result = L2CAP_CONF_SUCCESS;
4534 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4535 						   req, sizeof(req), &result);
4536 			if (len < 0) {
4537 				l2cap_send_disconn_req(chan, ECONNRESET);
4538 				goto done;
4539 			}
4540 
4541 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4542 				       L2CAP_CONF_REQ, len, req);
4543 			chan->num_conf_req++;
4544 			if (result != L2CAP_CONF_SUCCESS)
4545 				goto done;
4546 			break;
4547 		}
4548 		fallthrough;
4549 
4550 	default:
4551 		l2cap_chan_set_err(chan, ECONNRESET);
4552 
4553 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4554 		l2cap_send_disconn_req(chan, ECONNRESET);
4555 		goto done;
4556 	}
4557 
4558 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4559 		goto done;
4560 
4561 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4562 
4563 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4564 		set_default_fcs(chan);
4565 
4566 		if (chan->mode == L2CAP_MODE_ERTM ||
4567 		    chan->mode == L2CAP_MODE_STREAMING)
4568 			err = l2cap_ertm_init(chan);
4569 
4570 		if (err < 0)
4571 			l2cap_send_disconn_req(chan, -err);
4572 		else
4573 			l2cap_chan_ready(chan);
4574 	}
4575 
4576 done:
4577 	l2cap_chan_unlock(chan);
4578 	return err;
4579 }
4580 
4581 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4582 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4583 				       u8 *data)
4584 {
4585 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4586 	struct l2cap_disconn_rsp rsp;
4587 	u16 dcid, scid;
4588 	struct l2cap_chan *chan;
4589 
4590 	if (cmd_len != sizeof(*req))
4591 		return -EPROTO;
4592 
4593 	scid = __le16_to_cpu(req->scid);
4594 	dcid = __le16_to_cpu(req->dcid);
4595 
4596 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4597 
4598 	mutex_lock(&conn->chan_lock);
4599 
4600 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4601 	if (!chan) {
4602 		mutex_unlock(&conn->chan_lock);
4603 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4604 		return 0;
4605 	}
4606 
4607 	l2cap_chan_hold(chan);
4608 	l2cap_chan_lock(chan);
4609 
4610 	rsp.dcid = cpu_to_le16(chan->scid);
4611 	rsp.scid = cpu_to_le16(chan->dcid);
4612 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4613 
4614 	chan->ops->set_shutdown(chan);
4615 
4616 	l2cap_chan_del(chan, ECONNRESET);
4617 
4618 	chan->ops->close(chan);
4619 
4620 	l2cap_chan_unlock(chan);
4621 	l2cap_chan_put(chan);
4622 
4623 	mutex_unlock(&conn->chan_lock);
4624 
4625 	return 0;
4626 }
4627 
4628 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4629 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4630 				       u8 *data)
4631 {
4632 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4633 	u16 dcid, scid;
4634 	struct l2cap_chan *chan;
4635 
4636 	if (cmd_len != sizeof(*rsp))
4637 		return -EPROTO;
4638 
4639 	scid = __le16_to_cpu(rsp->scid);
4640 	dcid = __le16_to_cpu(rsp->dcid);
4641 
4642 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4643 
4644 	mutex_lock(&conn->chan_lock);
4645 
4646 	chan = __l2cap_get_chan_by_scid(conn, scid);
4647 	if (!chan) {
4648 		mutex_unlock(&conn->chan_lock);
4649 		return 0;
4650 	}
4651 
4652 	l2cap_chan_hold(chan);
4653 	l2cap_chan_lock(chan);
4654 
4655 	if (chan->state != BT_DISCONN) {
4656 		l2cap_chan_unlock(chan);
4657 		l2cap_chan_put(chan);
4658 		mutex_unlock(&conn->chan_lock);
4659 		return 0;
4660 	}
4661 
4662 	l2cap_chan_del(chan, 0);
4663 
4664 	chan->ops->close(chan);
4665 
4666 	l2cap_chan_unlock(chan);
4667 	l2cap_chan_put(chan);
4668 
4669 	mutex_unlock(&conn->chan_lock);
4670 
4671 	return 0;
4672 }
4673 
4674 static inline int l2cap_information_req(struct l2cap_conn *conn,
4675 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4676 					u8 *data)
4677 {
4678 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4679 	u16 type;
4680 
4681 	if (cmd_len != sizeof(*req))
4682 		return -EPROTO;
4683 
4684 	type = __le16_to_cpu(req->type);
4685 
4686 	BT_DBG("type 0x%4.4x", type);
4687 
4688 	if (type == L2CAP_IT_FEAT_MASK) {
4689 		u8 buf[8];
4690 		u32 feat_mask = l2cap_feat_mask;
4691 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4692 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4693 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4694 		if (!disable_ertm)
4695 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4696 				| L2CAP_FEAT_FCS;
4697 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4698 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4699 				| L2CAP_FEAT_EXT_WINDOW;
4700 
4701 		put_unaligned_le32(feat_mask, rsp->data);
4702 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4703 			       buf);
4704 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4705 		u8 buf[12];
4706 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4707 
4708 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4709 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4710 		rsp->data[0] = conn->local_fixed_chan;
4711 		memset(rsp->data + 1, 0, 7);
4712 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4713 			       buf);
4714 	} else {
4715 		struct l2cap_info_rsp rsp;
4716 		rsp.type   = cpu_to_le16(type);
4717 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4718 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4719 			       &rsp);
4720 	}
4721 
4722 	return 0;
4723 }
4724 
4725 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4726 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4727 					u8 *data)
4728 {
4729 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4730 	u16 type, result;
4731 
4732 	if (cmd_len < sizeof(*rsp))
4733 		return -EPROTO;
4734 
4735 	type   = __le16_to_cpu(rsp->type);
4736 	result = __le16_to_cpu(rsp->result);
4737 
4738 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4739 
4740 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4741 	if (cmd->ident != conn->info_ident ||
4742 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4743 		return 0;
4744 
4745 	cancel_delayed_work(&conn->info_timer);
4746 
4747 	if (result != L2CAP_IR_SUCCESS) {
4748 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4749 		conn->info_ident = 0;
4750 
4751 		l2cap_conn_start(conn);
4752 
4753 		return 0;
4754 	}
4755 
4756 	switch (type) {
4757 	case L2CAP_IT_FEAT_MASK:
4758 		conn->feat_mask = get_unaligned_le32(rsp->data);
4759 
4760 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4761 			struct l2cap_info_req req;
4762 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4763 
4764 			conn->info_ident = l2cap_get_ident(conn);
4765 
4766 			l2cap_send_cmd(conn, conn->info_ident,
4767 				       L2CAP_INFO_REQ, sizeof(req), &req);
4768 		} else {
4769 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4770 			conn->info_ident = 0;
4771 
4772 			l2cap_conn_start(conn);
4773 		}
4774 		break;
4775 
4776 	case L2CAP_IT_FIXED_CHAN:
4777 		conn->remote_fixed_chan = rsp->data[0];
4778 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4779 		conn->info_ident = 0;
4780 
4781 		l2cap_conn_start(conn);
4782 		break;
4783 	}
4784 
4785 	return 0;
4786 }
4787 
4788 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4789 				    struct l2cap_cmd_hdr *cmd,
4790 				    u16 cmd_len, void *data)
4791 {
4792 	struct l2cap_create_chan_req *req = data;
4793 	struct l2cap_create_chan_rsp rsp;
4794 	struct l2cap_chan *chan;
4795 	struct hci_dev *hdev;
4796 	u16 psm, scid;
4797 
4798 	if (cmd_len != sizeof(*req))
4799 		return -EPROTO;
4800 
4801 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4802 		return -EINVAL;
4803 
4804 	psm = le16_to_cpu(req->psm);
4805 	scid = le16_to_cpu(req->scid);
4806 
4807 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4808 
4809 	/* For controller id 0 make BR/EDR connection */
4810 	if (req->amp_id == AMP_ID_BREDR) {
4811 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4812 			      req->amp_id);
4813 		return 0;
4814 	}
4815 
4816 	/* Validate AMP controller id */
4817 	hdev = hci_dev_get(req->amp_id);
4818 	if (!hdev)
4819 		goto error;
4820 
4821 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4822 		hci_dev_put(hdev);
4823 		goto error;
4824 	}
4825 
4826 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4827 			     req->amp_id);
4828 	if (chan) {
4829 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4830 		struct hci_conn *hs_hcon;
4831 
4832 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4833 						  &conn->hcon->dst);
4834 		if (!hs_hcon) {
4835 			hci_dev_put(hdev);
4836 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4837 					       chan->dcid);
4838 			return 0;
4839 		}
4840 
4841 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4842 
4843 		mgr->bredr_chan = chan;
4844 		chan->hs_hcon = hs_hcon;
4845 		chan->fcs = L2CAP_FCS_NONE;
4846 		conn->mtu = hdev->block_mtu;
4847 	}
4848 
4849 	hci_dev_put(hdev);
4850 
4851 	return 0;
4852 
4853 error:
4854 	rsp.dcid = 0;
4855 	rsp.scid = cpu_to_le16(scid);
4856 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4857 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4858 
4859 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4860 		       sizeof(rsp), &rsp);
4861 
4862 	return 0;
4863 }
4864 
4865 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4866 {
4867 	struct l2cap_move_chan_req req;
4868 	u8 ident;
4869 
4870 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4871 
4872 	ident = l2cap_get_ident(chan->conn);
4873 	chan->ident = ident;
4874 
4875 	req.icid = cpu_to_le16(chan->scid);
4876 	req.dest_amp_id = dest_amp_id;
4877 
4878 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4879 		       &req);
4880 
4881 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4882 }
4883 
4884 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4885 {
4886 	struct l2cap_move_chan_rsp rsp;
4887 
4888 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4889 
4890 	rsp.icid = cpu_to_le16(chan->dcid);
4891 	rsp.result = cpu_to_le16(result);
4892 
4893 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4894 		       sizeof(rsp), &rsp);
4895 }
4896 
4897 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4898 {
4899 	struct l2cap_move_chan_cfm cfm;
4900 
4901 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4902 
4903 	chan->ident = l2cap_get_ident(chan->conn);
4904 
4905 	cfm.icid = cpu_to_le16(chan->scid);
4906 	cfm.result = cpu_to_le16(result);
4907 
4908 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4909 		       sizeof(cfm), &cfm);
4910 
4911 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4912 }
4913 
4914 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4915 {
4916 	struct l2cap_move_chan_cfm cfm;
4917 
4918 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4919 
4920 	cfm.icid = cpu_to_le16(icid);
4921 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4922 
4923 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4924 		       sizeof(cfm), &cfm);
4925 }
4926 
4927 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4928 					 u16 icid)
4929 {
4930 	struct l2cap_move_chan_cfm_rsp rsp;
4931 
4932 	BT_DBG("icid 0x%4.4x", icid);
4933 
4934 	rsp.icid = cpu_to_le16(icid);
4935 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4936 }
4937 
4938 static void __release_logical_link(struct l2cap_chan *chan)
4939 {
4940 	chan->hs_hchan = NULL;
4941 	chan->hs_hcon = NULL;
4942 
4943 	/* Placeholder - release the logical link */
4944 }
4945 
4946 static void l2cap_logical_fail(struct l2cap_chan *chan)
4947 {
4948 	/* Logical link setup failed */
4949 	if (chan->state != BT_CONNECTED) {
4950 		/* Create channel failure, disconnect */
4951 		l2cap_send_disconn_req(chan, ECONNRESET);
4952 		return;
4953 	}
4954 
4955 	switch (chan->move_role) {
4956 	case L2CAP_MOVE_ROLE_RESPONDER:
4957 		l2cap_move_done(chan);
4958 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4959 		break;
4960 	case L2CAP_MOVE_ROLE_INITIATOR:
4961 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4962 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4963 			/* Remote has only sent pending or
4964 			 * success responses, clean up
4965 			 */
4966 			l2cap_move_done(chan);
4967 		}
4968 
4969 		/* Other amp move states imply that the move
4970 		 * has already aborted
4971 		 */
4972 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4973 		break;
4974 	}
4975 }
4976 
4977 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4978 					struct hci_chan *hchan)
4979 {
4980 	struct l2cap_conf_rsp rsp;
4981 
4982 	chan->hs_hchan = hchan;
4983 	chan->hs_hcon->l2cap_data = chan->conn;
4984 
4985 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4986 
4987 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4988 		int err;
4989 
4990 		set_default_fcs(chan);
4991 
4992 		err = l2cap_ertm_init(chan);
4993 		if (err < 0)
4994 			l2cap_send_disconn_req(chan, -err);
4995 		else
4996 			l2cap_chan_ready(chan);
4997 	}
4998 }
4999 
5000 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5001 				      struct hci_chan *hchan)
5002 {
5003 	chan->hs_hcon = hchan->conn;
5004 	chan->hs_hcon->l2cap_data = chan->conn;
5005 
5006 	BT_DBG("move_state %d", chan->move_state);
5007 
5008 	switch (chan->move_state) {
5009 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5010 		/* Move confirm will be sent after a success
5011 		 * response is received
5012 		 */
5013 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5014 		break;
5015 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5016 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5017 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5018 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5019 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5020 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5021 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5022 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5023 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5024 		}
5025 		break;
5026 	default:
5027 		/* Move was not in expected state, free the channel */
5028 		__release_logical_link(chan);
5029 
5030 		chan->move_state = L2CAP_MOVE_STABLE;
5031 	}
5032 }
5033 
5034 /* Call with chan locked */
5035 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5036 		       u8 status)
5037 {
5038 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5039 
5040 	if (status) {
5041 		l2cap_logical_fail(chan);
5042 		__release_logical_link(chan);
5043 		return;
5044 	}
5045 
5046 	if (chan->state != BT_CONNECTED) {
5047 		/* Ignore logical link if channel is on BR/EDR */
5048 		if (chan->local_amp_id != AMP_ID_BREDR)
5049 			l2cap_logical_finish_create(chan, hchan);
5050 	} else {
5051 		l2cap_logical_finish_move(chan, hchan);
5052 	}
5053 }
5054 
5055 void l2cap_move_start(struct l2cap_chan *chan)
5056 {
5057 	BT_DBG("chan %p", chan);
5058 
5059 	if (chan->local_amp_id == AMP_ID_BREDR) {
5060 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5061 			return;
5062 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5063 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5064 		/* Placeholder - start physical link setup */
5065 	} else {
5066 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5067 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5068 		chan->move_id = 0;
5069 		l2cap_move_setup(chan);
5070 		l2cap_send_move_chan_req(chan, 0);
5071 	}
5072 }
5073 
5074 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5075 			    u8 local_amp_id, u8 remote_amp_id)
5076 {
5077 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5078 	       local_amp_id, remote_amp_id);
5079 
5080 	chan->fcs = L2CAP_FCS_NONE;
5081 
5082 	/* Outgoing channel on AMP */
5083 	if (chan->state == BT_CONNECT) {
5084 		if (result == L2CAP_CR_SUCCESS) {
5085 			chan->local_amp_id = local_amp_id;
5086 			l2cap_send_create_chan_req(chan, remote_amp_id);
5087 		} else {
5088 			/* Revert to BR/EDR connect */
5089 			l2cap_send_conn_req(chan);
5090 		}
5091 
5092 		return;
5093 	}
5094 
5095 	/* Incoming channel on AMP */
5096 	if (__l2cap_no_conn_pending(chan)) {
5097 		struct l2cap_conn_rsp rsp;
5098 		char buf[128];
5099 		rsp.scid = cpu_to_le16(chan->dcid);
5100 		rsp.dcid = cpu_to_le16(chan->scid);
5101 
5102 		if (result == L2CAP_CR_SUCCESS) {
5103 			/* Send successful response */
5104 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5105 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5106 		} else {
5107 			/* Send negative response */
5108 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5109 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5110 		}
5111 
5112 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5113 			       sizeof(rsp), &rsp);
5114 
5115 		if (result == L2CAP_CR_SUCCESS) {
5116 			l2cap_state_change(chan, BT_CONFIG);
5117 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5118 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5119 				       L2CAP_CONF_REQ,
5120 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5121 			chan->num_conf_req++;
5122 		}
5123 	}
5124 }
5125 
5126 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5127 				   u8 remote_amp_id)
5128 {
5129 	l2cap_move_setup(chan);
5130 	chan->move_id = local_amp_id;
5131 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5132 
5133 	l2cap_send_move_chan_req(chan, remote_amp_id);
5134 }
5135 
5136 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5137 {
5138 	struct hci_chan *hchan = NULL;
5139 
5140 	/* Placeholder - get hci_chan for logical link */
5141 
5142 	if (hchan) {
5143 		if (hchan->state == BT_CONNECTED) {
5144 			/* Logical link is ready to go */
5145 			chan->hs_hcon = hchan->conn;
5146 			chan->hs_hcon->l2cap_data = chan->conn;
5147 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5148 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5149 
5150 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5151 		} else {
5152 			/* Wait for logical link to be ready */
5153 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5154 		}
5155 	} else {
5156 		/* Logical link not available */
5157 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5158 	}
5159 }
5160 
5161 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5162 {
5163 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5164 		u8 rsp_result;
5165 		if (result == -EINVAL)
5166 			rsp_result = L2CAP_MR_BAD_ID;
5167 		else
5168 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5169 
5170 		l2cap_send_move_chan_rsp(chan, rsp_result);
5171 	}
5172 
5173 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5174 	chan->move_state = L2CAP_MOVE_STABLE;
5175 
5176 	/* Restart data transmission */
5177 	l2cap_ertm_send(chan);
5178 }
5179 
5180 /* Invoke with locked chan */
5181 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5182 {
5183 	u8 local_amp_id = chan->local_amp_id;
5184 	u8 remote_amp_id = chan->remote_amp_id;
5185 
5186 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5187 	       chan, result, local_amp_id, remote_amp_id);
5188 
5189 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5190 		return;
5191 
5192 	if (chan->state != BT_CONNECTED) {
5193 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5194 	} else if (result != L2CAP_MR_SUCCESS) {
5195 		l2cap_do_move_cancel(chan, result);
5196 	} else {
5197 		switch (chan->move_role) {
5198 		case L2CAP_MOVE_ROLE_INITIATOR:
5199 			l2cap_do_move_initiate(chan, local_amp_id,
5200 					       remote_amp_id);
5201 			break;
5202 		case L2CAP_MOVE_ROLE_RESPONDER:
5203 			l2cap_do_move_respond(chan, result);
5204 			break;
5205 		default:
5206 			l2cap_do_move_cancel(chan, result);
5207 			break;
5208 		}
5209 	}
5210 }
5211 
5212 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5213 					 struct l2cap_cmd_hdr *cmd,
5214 					 u16 cmd_len, void *data)
5215 {
5216 	struct l2cap_move_chan_req *req = data;
5217 	struct l2cap_move_chan_rsp rsp;
5218 	struct l2cap_chan *chan;
5219 	u16 icid = 0;
5220 	u16 result = L2CAP_MR_NOT_ALLOWED;
5221 
5222 	if (cmd_len != sizeof(*req))
5223 		return -EPROTO;
5224 
5225 	icid = le16_to_cpu(req->icid);
5226 
5227 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5228 
5229 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5230 		return -EINVAL;
5231 
5232 	chan = l2cap_get_chan_by_dcid(conn, icid);
5233 	if (!chan) {
5234 		rsp.icid = cpu_to_le16(icid);
5235 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5236 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5237 			       sizeof(rsp), &rsp);
5238 		return 0;
5239 	}
5240 
5241 	chan->ident = cmd->ident;
5242 
5243 	if (chan->scid < L2CAP_CID_DYN_START ||
5244 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5245 	    (chan->mode != L2CAP_MODE_ERTM &&
5246 	     chan->mode != L2CAP_MODE_STREAMING)) {
5247 		result = L2CAP_MR_NOT_ALLOWED;
5248 		goto send_move_response;
5249 	}
5250 
5251 	if (chan->local_amp_id == req->dest_amp_id) {
5252 		result = L2CAP_MR_SAME_ID;
5253 		goto send_move_response;
5254 	}
5255 
5256 	if (req->dest_amp_id != AMP_ID_BREDR) {
5257 		struct hci_dev *hdev;
5258 		hdev = hci_dev_get(req->dest_amp_id);
5259 		if (!hdev || hdev->dev_type != HCI_AMP ||
5260 		    !test_bit(HCI_UP, &hdev->flags)) {
5261 			if (hdev)
5262 				hci_dev_put(hdev);
5263 
5264 			result = L2CAP_MR_BAD_ID;
5265 			goto send_move_response;
5266 		}
5267 		hci_dev_put(hdev);
5268 	}
5269 
5270 	/* Detect a move collision.  Only send a collision response
5271 	 * if this side has "lost", otherwise proceed with the move.
5272 	 * The winner has the larger bd_addr.
5273 	 */
5274 	if ((__chan_is_moving(chan) ||
5275 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5276 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5277 		result = L2CAP_MR_COLLISION;
5278 		goto send_move_response;
5279 	}
5280 
5281 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5282 	l2cap_move_setup(chan);
5283 	chan->move_id = req->dest_amp_id;
5284 
5285 	if (req->dest_amp_id == AMP_ID_BREDR) {
5286 		/* Moving to BR/EDR */
5287 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5288 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5289 			result = L2CAP_MR_PEND;
5290 		} else {
5291 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5292 			result = L2CAP_MR_SUCCESS;
5293 		}
5294 	} else {
5295 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5296 		/* Placeholder - uncomment when amp functions are available */
5297 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5298 		result = L2CAP_MR_PEND;
5299 	}
5300 
5301 send_move_response:
5302 	l2cap_send_move_chan_rsp(chan, result);
5303 
5304 	l2cap_chan_unlock(chan);
5305 
5306 	return 0;
5307 }
5308 
5309 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5310 {
5311 	struct l2cap_chan *chan;
5312 	struct hci_chan *hchan = NULL;
5313 
5314 	chan = l2cap_get_chan_by_scid(conn, icid);
5315 	if (!chan) {
5316 		l2cap_send_move_chan_cfm_icid(conn, icid);
5317 		return;
5318 	}
5319 
5320 	__clear_chan_timer(chan);
5321 	if (result == L2CAP_MR_PEND)
5322 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5323 
5324 	switch (chan->move_state) {
5325 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5326 		/* Move confirm will be sent when logical link
5327 		 * is complete.
5328 		 */
5329 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5330 		break;
5331 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5332 		if (result == L2CAP_MR_PEND) {
5333 			break;
5334 		} else if (test_bit(CONN_LOCAL_BUSY,
5335 				    &chan->conn_state)) {
5336 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5337 		} else {
5338 			/* Logical link is up or moving to BR/EDR,
5339 			 * proceed with move
5340 			 */
5341 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5342 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5343 		}
5344 		break;
5345 	case L2CAP_MOVE_WAIT_RSP:
5346 		/* Moving to AMP */
5347 		if (result == L2CAP_MR_SUCCESS) {
5348 			/* Remote is ready, send confirm immediately
5349 			 * after logical link is ready
5350 			 */
5351 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5352 		} else {
5353 			/* Both logical link and move success
5354 			 * are required to confirm
5355 			 */
5356 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5357 		}
5358 
5359 		/* Placeholder - get hci_chan for logical link */
5360 		if (!hchan) {
5361 			/* Logical link not available */
5362 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5363 			break;
5364 		}
5365 
5366 		/* If the logical link is not yet connected, do not
5367 		 * send confirmation.
5368 		 */
5369 		if (hchan->state != BT_CONNECTED)
5370 			break;
5371 
5372 		/* Logical link is already ready to go */
5373 
5374 		chan->hs_hcon = hchan->conn;
5375 		chan->hs_hcon->l2cap_data = chan->conn;
5376 
5377 		if (result == L2CAP_MR_SUCCESS) {
5378 			/* Can confirm now */
5379 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5380 		} else {
5381 			/* Now only need move success
5382 			 * to confirm
5383 			 */
5384 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5385 		}
5386 
5387 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5388 		break;
5389 	default:
5390 		/* Any other amp move state means the move failed. */
5391 		chan->move_id = chan->local_amp_id;
5392 		l2cap_move_done(chan);
5393 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5394 	}
5395 
5396 	l2cap_chan_unlock(chan);
5397 }
5398 
5399 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5400 			    u16 result)
5401 {
5402 	struct l2cap_chan *chan;
5403 
5404 	chan = l2cap_get_chan_by_ident(conn, ident);
5405 	if (!chan) {
5406 		/* Could not locate channel, icid is best guess */
5407 		l2cap_send_move_chan_cfm_icid(conn, icid);
5408 		return;
5409 	}
5410 
5411 	__clear_chan_timer(chan);
5412 
5413 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5414 		if (result == L2CAP_MR_COLLISION) {
5415 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5416 		} else {
5417 			/* Cleanup - cancel move */
5418 			chan->move_id = chan->local_amp_id;
5419 			l2cap_move_done(chan);
5420 		}
5421 	}
5422 
5423 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5424 
5425 	l2cap_chan_unlock(chan);
5426 }
5427 
5428 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5429 				  struct l2cap_cmd_hdr *cmd,
5430 				  u16 cmd_len, void *data)
5431 {
5432 	struct l2cap_move_chan_rsp *rsp = data;
5433 	u16 icid, result;
5434 
5435 	if (cmd_len != sizeof(*rsp))
5436 		return -EPROTO;
5437 
5438 	icid = le16_to_cpu(rsp->icid);
5439 	result = le16_to_cpu(rsp->result);
5440 
5441 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5442 
5443 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5444 		l2cap_move_continue(conn, icid, result);
5445 	else
5446 		l2cap_move_fail(conn, cmd->ident, icid, result);
5447 
5448 	return 0;
5449 }
5450 
5451 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5452 				      struct l2cap_cmd_hdr *cmd,
5453 				      u16 cmd_len, void *data)
5454 {
5455 	struct l2cap_move_chan_cfm *cfm = data;
5456 	struct l2cap_chan *chan;
5457 	u16 icid, result;
5458 
5459 	if (cmd_len != sizeof(*cfm))
5460 		return -EPROTO;
5461 
5462 	icid = le16_to_cpu(cfm->icid);
5463 	result = le16_to_cpu(cfm->result);
5464 
5465 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5466 
5467 	chan = l2cap_get_chan_by_dcid(conn, icid);
5468 	if (!chan) {
5469 		/* Spec requires a response even if the icid was not found */
5470 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5471 		return 0;
5472 	}
5473 
5474 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5475 		if (result == L2CAP_MC_CONFIRMED) {
5476 			chan->local_amp_id = chan->move_id;
5477 			if (chan->local_amp_id == AMP_ID_BREDR)
5478 				__release_logical_link(chan);
5479 		} else {
5480 			chan->move_id = chan->local_amp_id;
5481 		}
5482 
5483 		l2cap_move_done(chan);
5484 	}
5485 
5486 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5487 
5488 	l2cap_chan_unlock(chan);
5489 
5490 	return 0;
5491 }
5492 
5493 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5494 						 struct l2cap_cmd_hdr *cmd,
5495 						 u16 cmd_len, void *data)
5496 {
5497 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5498 	struct l2cap_chan *chan;
5499 	u16 icid;
5500 
5501 	if (cmd_len != sizeof(*rsp))
5502 		return -EPROTO;
5503 
5504 	icid = le16_to_cpu(rsp->icid);
5505 
5506 	BT_DBG("icid 0x%4.4x", icid);
5507 
5508 	chan = l2cap_get_chan_by_scid(conn, icid);
5509 	if (!chan)
5510 		return 0;
5511 
5512 	__clear_chan_timer(chan);
5513 
5514 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5515 		chan->local_amp_id = chan->move_id;
5516 
5517 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5518 			__release_logical_link(chan);
5519 
5520 		l2cap_move_done(chan);
5521 	}
5522 
5523 	l2cap_chan_unlock(chan);
5524 
5525 	return 0;
5526 }
5527 
5528 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5529 					      struct l2cap_cmd_hdr *cmd,
5530 					      u16 cmd_len, u8 *data)
5531 {
5532 	struct hci_conn *hcon = conn->hcon;
5533 	struct l2cap_conn_param_update_req *req;
5534 	struct l2cap_conn_param_update_rsp rsp;
5535 	u16 min, max, latency, to_multiplier;
5536 	int err;
5537 
5538 	if (hcon->role != HCI_ROLE_MASTER)
5539 		return -EINVAL;
5540 
5541 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5542 		return -EPROTO;
5543 
5544 	req = (struct l2cap_conn_param_update_req *) data;
5545 	min		= __le16_to_cpu(req->min);
5546 	max		= __le16_to_cpu(req->max);
5547 	latency		= __le16_to_cpu(req->latency);
5548 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5549 
5550 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5551 	       min, max, latency, to_multiplier);
5552 
5553 	memset(&rsp, 0, sizeof(rsp));
5554 
5555 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5556 	if (err)
5557 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5558 	else
5559 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5560 
5561 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5562 		       sizeof(rsp), &rsp);
5563 
5564 	if (!err) {
5565 		u8 store_hint;
5566 
5567 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5568 						to_multiplier);
5569 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5570 				    store_hint, min, max, latency,
5571 				    to_multiplier);
5572 
5573 	}
5574 
5575 	return 0;
5576 }
5577 
5578 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5579 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5580 				u8 *data)
5581 {
5582 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5583 	struct hci_conn *hcon = conn->hcon;
5584 	u16 dcid, mtu, mps, credits, result;
5585 	struct l2cap_chan *chan;
5586 	int err, sec_level;
5587 
5588 	if (cmd_len < sizeof(*rsp))
5589 		return -EPROTO;
5590 
5591 	dcid    = __le16_to_cpu(rsp->dcid);
5592 	mtu     = __le16_to_cpu(rsp->mtu);
5593 	mps     = __le16_to_cpu(rsp->mps);
5594 	credits = __le16_to_cpu(rsp->credits);
5595 	result  = __le16_to_cpu(rsp->result);
5596 
5597 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5598 					   dcid < L2CAP_CID_DYN_START ||
5599 					   dcid > L2CAP_CID_LE_DYN_END))
5600 		return -EPROTO;
5601 
5602 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5603 	       dcid, mtu, mps, credits, result);
5604 
5605 	mutex_lock(&conn->chan_lock);
5606 
5607 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5608 	if (!chan) {
5609 		err = -EBADSLT;
5610 		goto unlock;
5611 	}
5612 
5613 	err = 0;
5614 
5615 	l2cap_chan_lock(chan);
5616 
5617 	switch (result) {
5618 	case L2CAP_CR_LE_SUCCESS:
5619 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5620 			err = -EBADSLT;
5621 			break;
5622 		}
5623 
5624 		chan->ident = 0;
5625 		chan->dcid = dcid;
5626 		chan->omtu = mtu;
5627 		chan->remote_mps = mps;
5628 		chan->tx_credits = credits;
5629 		l2cap_chan_ready(chan);
5630 		break;
5631 
5632 	case L2CAP_CR_LE_AUTHENTICATION:
5633 	case L2CAP_CR_LE_ENCRYPTION:
5634 		/* If we already have MITM protection we can't do
5635 		 * anything.
5636 		 */
5637 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5638 			l2cap_chan_del(chan, ECONNREFUSED);
5639 			break;
5640 		}
5641 
5642 		sec_level = hcon->sec_level + 1;
5643 		if (chan->sec_level < sec_level)
5644 			chan->sec_level = sec_level;
5645 
5646 		/* We'll need to send a new Connect Request */
5647 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5648 
5649 		smp_conn_security(hcon, chan->sec_level);
5650 		break;
5651 
5652 	default:
5653 		l2cap_chan_del(chan, ECONNREFUSED);
5654 		break;
5655 	}
5656 
5657 	l2cap_chan_unlock(chan);
5658 
5659 unlock:
5660 	mutex_unlock(&conn->chan_lock);
5661 
5662 	return err;
5663 }
5664 
5665 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5666 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5667 				      u8 *data)
5668 {
5669 	int err = 0;
5670 
5671 	switch (cmd->code) {
5672 	case L2CAP_COMMAND_REJ:
5673 		l2cap_command_rej(conn, cmd, cmd_len, data);
5674 		break;
5675 
5676 	case L2CAP_CONN_REQ:
5677 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5678 		break;
5679 
5680 	case L2CAP_CONN_RSP:
5681 	case L2CAP_CREATE_CHAN_RSP:
5682 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5683 		break;
5684 
5685 	case L2CAP_CONF_REQ:
5686 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5687 		break;
5688 
5689 	case L2CAP_CONF_RSP:
5690 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5691 		break;
5692 
5693 	case L2CAP_DISCONN_REQ:
5694 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5695 		break;
5696 
5697 	case L2CAP_DISCONN_RSP:
5698 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5699 		break;
5700 
5701 	case L2CAP_ECHO_REQ:
5702 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5703 		break;
5704 
5705 	case L2CAP_ECHO_RSP:
5706 		break;
5707 
5708 	case L2CAP_INFO_REQ:
5709 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5710 		break;
5711 
5712 	case L2CAP_INFO_RSP:
5713 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5714 		break;
5715 
5716 	case L2CAP_CREATE_CHAN_REQ:
5717 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5718 		break;
5719 
5720 	case L2CAP_MOVE_CHAN_REQ:
5721 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5722 		break;
5723 
5724 	case L2CAP_MOVE_CHAN_RSP:
5725 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5726 		break;
5727 
5728 	case L2CAP_MOVE_CHAN_CFM:
5729 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5730 		break;
5731 
5732 	case L2CAP_MOVE_CHAN_CFM_RSP:
5733 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5734 		break;
5735 
5736 	default:
5737 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5738 		err = -EINVAL;
5739 		break;
5740 	}
5741 
5742 	return err;
5743 }
5744 
5745 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5746 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5747 				u8 *data)
5748 {
5749 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5750 	struct l2cap_le_conn_rsp rsp;
5751 	struct l2cap_chan *chan, *pchan;
5752 	u16 dcid, scid, credits, mtu, mps;
5753 	__le16 psm;
5754 	u8 result;
5755 
5756 	if (cmd_len != sizeof(*req))
5757 		return -EPROTO;
5758 
5759 	scid = __le16_to_cpu(req->scid);
5760 	mtu  = __le16_to_cpu(req->mtu);
5761 	mps  = __le16_to_cpu(req->mps);
5762 	psm  = req->psm;
5763 	dcid = 0;
5764 	credits = 0;
5765 
5766 	if (mtu < 23 || mps < 23)
5767 		return -EPROTO;
5768 
5769 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5770 	       scid, mtu, mps);
5771 
5772 	/* Check if we have socket listening on psm */
5773 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5774 					 &conn->hcon->dst, LE_LINK);
5775 	if (!pchan) {
5776 		result = L2CAP_CR_LE_BAD_PSM;
5777 		chan = NULL;
5778 		goto response;
5779 	}
5780 
5781 	mutex_lock(&conn->chan_lock);
5782 	l2cap_chan_lock(pchan);
5783 
5784 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5785 				     SMP_ALLOW_STK)) {
5786 		result = L2CAP_CR_LE_AUTHENTICATION;
5787 		chan = NULL;
5788 		goto response_unlock;
5789 	}
5790 
5791 	/* Check for valid dynamic CID range */
5792 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5793 		result = L2CAP_CR_LE_INVALID_SCID;
5794 		chan = NULL;
5795 		goto response_unlock;
5796 	}
5797 
5798 	/* Check if we already have channel with that dcid */
5799 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5800 		result = L2CAP_CR_LE_SCID_IN_USE;
5801 		chan = NULL;
5802 		goto response_unlock;
5803 	}
5804 
5805 	chan = pchan->ops->new_connection(pchan);
5806 	if (!chan) {
5807 		result = L2CAP_CR_LE_NO_MEM;
5808 		goto response_unlock;
5809 	}
5810 
5811 	bacpy(&chan->src, &conn->hcon->src);
5812 	bacpy(&chan->dst, &conn->hcon->dst);
5813 	chan->src_type = bdaddr_src_type(conn->hcon);
5814 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5815 	chan->psm  = psm;
5816 	chan->dcid = scid;
5817 	chan->omtu = mtu;
5818 	chan->remote_mps = mps;
5819 
5820 	__l2cap_chan_add(conn, chan);
5821 
5822 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5823 
5824 	dcid = chan->scid;
5825 	credits = chan->rx_credits;
5826 
5827 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5828 
5829 	chan->ident = cmd->ident;
5830 
5831 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5832 		l2cap_state_change(chan, BT_CONNECT2);
5833 		/* The following result value is actually not defined
5834 		 * for LE CoC but we use it to let the function know
5835 		 * that it should bail out after doing its cleanup
5836 		 * instead of sending a response.
5837 		 */
5838 		result = L2CAP_CR_PEND;
5839 		chan->ops->defer(chan);
5840 	} else {
5841 		l2cap_chan_ready(chan);
5842 		result = L2CAP_CR_LE_SUCCESS;
5843 	}
5844 
5845 response_unlock:
5846 	l2cap_chan_unlock(pchan);
5847 	mutex_unlock(&conn->chan_lock);
5848 	l2cap_chan_put(pchan);
5849 
5850 	if (result == L2CAP_CR_PEND)
5851 		return 0;
5852 
5853 response:
5854 	if (chan) {
5855 		rsp.mtu = cpu_to_le16(chan->imtu);
5856 		rsp.mps = cpu_to_le16(chan->mps);
5857 	} else {
5858 		rsp.mtu = 0;
5859 		rsp.mps = 0;
5860 	}
5861 
5862 	rsp.dcid    = cpu_to_le16(dcid);
5863 	rsp.credits = cpu_to_le16(credits);
5864 	rsp.result  = cpu_to_le16(result);
5865 
5866 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5867 
5868 	return 0;
5869 }
5870 
5871 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5872 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5873 				   u8 *data)
5874 {
5875 	struct l2cap_le_credits *pkt;
5876 	struct l2cap_chan *chan;
5877 	u16 cid, credits, max_credits;
5878 
5879 	if (cmd_len != sizeof(*pkt))
5880 		return -EPROTO;
5881 
5882 	pkt = (struct l2cap_le_credits *) data;
5883 	cid	= __le16_to_cpu(pkt->cid);
5884 	credits	= __le16_to_cpu(pkt->credits);
5885 
5886 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5887 
5888 	chan = l2cap_get_chan_by_dcid(conn, cid);
5889 	if (!chan)
5890 		return -EBADSLT;
5891 
5892 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5893 	if (credits > max_credits) {
5894 		BT_ERR("LE credits overflow");
5895 		l2cap_send_disconn_req(chan, ECONNRESET);
5896 		l2cap_chan_unlock(chan);
5897 
5898 		/* Return 0 so that we don't trigger an unnecessary
5899 		 * command reject packet.
5900 		 */
5901 		return 0;
5902 	}
5903 
5904 	chan->tx_credits += credits;
5905 
5906 	/* Resume sending */
5907 	l2cap_le_flowctl_send(chan);
5908 
5909 	if (chan->tx_credits)
5910 		chan->ops->resume(chan);
5911 
5912 	l2cap_chan_unlock(chan);
5913 
5914 	return 0;
5915 }
5916 
5917 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5918 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5919 				       u8 *data)
5920 {
5921 	struct l2cap_ecred_conn_req *req = (void *) data;
5922 	struct {
5923 		struct l2cap_ecred_conn_rsp rsp;
5924 		__le16 dcid[5];
5925 	} __packed pdu;
5926 	struct l2cap_chan *chan, *pchan;
5927 	u16 mtu, mps;
5928 	__le16 psm;
5929 	u8 result, len = 0;
5930 	int i, num_scid;
5931 	bool defer = false;
5932 
5933 	if (!enable_ecred)
5934 		return -EINVAL;
5935 
5936 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5937 		result = L2CAP_CR_LE_INVALID_PARAMS;
5938 		goto response;
5939 	}
5940 
5941 	mtu  = __le16_to_cpu(req->mtu);
5942 	mps  = __le16_to_cpu(req->mps);
5943 
5944 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5945 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5946 		goto response;
5947 	}
5948 
5949 	psm  = req->psm;
5950 
5951 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5952 
5953 	memset(&pdu, 0, sizeof(pdu));
5954 
5955 	/* Check if we have socket listening on psm */
5956 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5957 					 &conn->hcon->dst, LE_LINK);
5958 	if (!pchan) {
5959 		result = L2CAP_CR_LE_BAD_PSM;
5960 		goto response;
5961 	}
5962 
5963 	mutex_lock(&conn->chan_lock);
5964 	l2cap_chan_lock(pchan);
5965 
5966 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5967 				     SMP_ALLOW_STK)) {
5968 		result = L2CAP_CR_LE_AUTHENTICATION;
5969 		goto unlock;
5970 	}
5971 
5972 	result = L2CAP_CR_LE_SUCCESS;
5973 	cmd_len -= sizeof(*req);
5974 	num_scid = cmd_len / sizeof(u16);
5975 
5976 	for (i = 0; i < num_scid; i++) {
5977 		u16 scid = __le16_to_cpu(req->scid[i]);
5978 
5979 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
5980 
5981 		pdu.dcid[i] = 0x0000;
5982 		len += sizeof(*pdu.dcid);
5983 
5984 		/* Check for valid dynamic CID range */
5985 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5986 			result = L2CAP_CR_LE_INVALID_SCID;
5987 			continue;
5988 		}
5989 
5990 		/* Check if we already have channel with that dcid */
5991 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
5992 			result = L2CAP_CR_LE_SCID_IN_USE;
5993 			continue;
5994 		}
5995 
5996 		chan = pchan->ops->new_connection(pchan);
5997 		if (!chan) {
5998 			result = L2CAP_CR_LE_NO_MEM;
5999 			continue;
6000 		}
6001 
6002 		bacpy(&chan->src, &conn->hcon->src);
6003 		bacpy(&chan->dst, &conn->hcon->dst);
6004 		chan->src_type = bdaddr_src_type(conn->hcon);
6005 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6006 		chan->psm  = psm;
6007 		chan->dcid = scid;
6008 		chan->omtu = mtu;
6009 		chan->remote_mps = mps;
6010 
6011 		__l2cap_chan_add(conn, chan);
6012 
6013 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6014 
6015 		/* Init response */
6016 		if (!pdu.rsp.credits) {
6017 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6018 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6019 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6020 		}
6021 
6022 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6023 
6024 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6025 
6026 		chan->ident = cmd->ident;
6027 
6028 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6029 			l2cap_state_change(chan, BT_CONNECT2);
6030 			defer = true;
6031 			chan->ops->defer(chan);
6032 		} else {
6033 			l2cap_chan_ready(chan);
6034 		}
6035 	}
6036 
6037 unlock:
6038 	l2cap_chan_unlock(pchan);
6039 	mutex_unlock(&conn->chan_lock);
6040 	l2cap_chan_put(pchan);
6041 
6042 response:
6043 	pdu.rsp.result = cpu_to_le16(result);
6044 
6045 	if (defer)
6046 		return 0;
6047 
6048 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6049 		       sizeof(pdu.rsp) + len, &pdu);
6050 
6051 	return 0;
6052 }
6053 
6054 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6055 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6056 				       u8 *data)
6057 {
6058 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6059 	struct hci_conn *hcon = conn->hcon;
6060 	u16 mtu, mps, credits, result;
6061 	struct l2cap_chan *chan;
6062 	int err = 0, sec_level;
6063 	int i = 0;
6064 
6065 	if (cmd_len < sizeof(*rsp))
6066 		return -EPROTO;
6067 
6068 	mtu     = __le16_to_cpu(rsp->mtu);
6069 	mps     = __le16_to_cpu(rsp->mps);
6070 	credits = __le16_to_cpu(rsp->credits);
6071 	result  = __le16_to_cpu(rsp->result);
6072 
6073 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6074 	       result);
6075 
6076 	mutex_lock(&conn->chan_lock);
6077 
6078 	cmd_len -= sizeof(*rsp);
6079 
6080 	list_for_each_entry(chan, &conn->chan_l, list) {
6081 		u16 dcid;
6082 
6083 		if (chan->ident != cmd->ident ||
6084 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6085 		    chan->state == BT_CONNECTED)
6086 			continue;
6087 
6088 		l2cap_chan_lock(chan);
6089 
6090 		/* Check that there is a dcid for each pending channel */
6091 		if (cmd_len < sizeof(dcid)) {
6092 			l2cap_chan_del(chan, ECONNREFUSED);
6093 			l2cap_chan_unlock(chan);
6094 			continue;
6095 		}
6096 
6097 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6098 		cmd_len -= sizeof(u16);
6099 
6100 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6101 
6102 		/* Check if dcid is already in use */
6103 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6104 			/* If a device receives a
6105 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6106 			 * already-assigned Destination CID, then both the
6107 			 * original channel and the new channel shall be
6108 			 * immediately discarded and not used.
6109 			 */
6110 			l2cap_chan_del(chan, ECONNREFUSED);
6111 			l2cap_chan_unlock(chan);
6112 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6113 			l2cap_chan_lock(chan);
6114 			l2cap_chan_del(chan, ECONNRESET);
6115 			l2cap_chan_unlock(chan);
6116 			continue;
6117 		}
6118 
6119 		switch (result) {
6120 		case L2CAP_CR_LE_AUTHENTICATION:
6121 		case L2CAP_CR_LE_ENCRYPTION:
6122 			/* If we already have MITM protection we can't do
6123 			 * anything.
6124 			 */
6125 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6126 				l2cap_chan_del(chan, ECONNREFUSED);
6127 				break;
6128 			}
6129 
6130 			sec_level = hcon->sec_level + 1;
6131 			if (chan->sec_level < sec_level)
6132 				chan->sec_level = sec_level;
6133 
6134 			/* We'll need to send a new Connect Request */
6135 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6136 
6137 			smp_conn_security(hcon, chan->sec_level);
6138 			break;
6139 
6140 		case L2CAP_CR_LE_BAD_PSM:
6141 			l2cap_chan_del(chan, ECONNREFUSED);
6142 			break;
6143 
6144 		default:
6145 			/* If dcid was not set it means channels was refused */
6146 			if (!dcid) {
6147 				l2cap_chan_del(chan, ECONNREFUSED);
6148 				break;
6149 			}
6150 
6151 			chan->ident = 0;
6152 			chan->dcid = dcid;
6153 			chan->omtu = mtu;
6154 			chan->remote_mps = mps;
6155 			chan->tx_credits = credits;
6156 			l2cap_chan_ready(chan);
6157 			break;
6158 		}
6159 
6160 		l2cap_chan_unlock(chan);
6161 	}
6162 
6163 	mutex_unlock(&conn->chan_lock);
6164 
6165 	return err;
6166 }
6167 
6168 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6169 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6170 					 u8 *data)
6171 {
6172 	struct l2cap_ecred_reconf_req *req = (void *) data;
6173 	struct l2cap_ecred_reconf_rsp rsp;
6174 	u16 mtu, mps, result;
6175 	struct l2cap_chan *chan;
6176 	int i, num_scid;
6177 
6178 	if (!enable_ecred)
6179 		return -EINVAL;
6180 
6181 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6182 		result = L2CAP_CR_LE_INVALID_PARAMS;
6183 		goto respond;
6184 	}
6185 
6186 	mtu = __le16_to_cpu(req->mtu);
6187 	mps = __le16_to_cpu(req->mps);
6188 
6189 	BT_DBG("mtu %u mps %u", mtu, mps);
6190 
6191 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6192 		result = L2CAP_RECONF_INVALID_MTU;
6193 		goto respond;
6194 	}
6195 
6196 	if (mps < L2CAP_ECRED_MIN_MPS) {
6197 		result = L2CAP_RECONF_INVALID_MPS;
6198 		goto respond;
6199 	}
6200 
6201 	cmd_len -= sizeof(*req);
6202 	num_scid = cmd_len / sizeof(u16);
6203 	result = L2CAP_RECONF_SUCCESS;
6204 
6205 	for (i = 0; i < num_scid; i++) {
6206 		u16 scid;
6207 
6208 		scid = __le16_to_cpu(req->scid[i]);
6209 		if (!scid)
6210 			return -EPROTO;
6211 
6212 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6213 		if (!chan)
6214 			continue;
6215 
6216 		/* If the MTU value is decreased for any of the included
6217 		 * channels, then the receiver shall disconnect all
6218 		 * included channels.
6219 		 */
6220 		if (chan->omtu > mtu) {
6221 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6222 			       chan->omtu, mtu);
6223 			result = L2CAP_RECONF_INVALID_MTU;
6224 		}
6225 
6226 		chan->omtu = mtu;
6227 		chan->remote_mps = mps;
6228 	}
6229 
6230 respond:
6231 	rsp.result = cpu_to_le16(result);
6232 
6233 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6234 		       &rsp);
6235 
6236 	return 0;
6237 }
6238 
6239 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6240 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6241 					 u8 *data)
6242 {
6243 	struct l2cap_chan *chan;
6244 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6245 	u16 result;
6246 
6247 	if (cmd_len < sizeof(*rsp))
6248 		return -EPROTO;
6249 
6250 	result = __le16_to_cpu(rsp->result);
6251 
6252 	BT_DBG("result 0x%4.4x", rsp->result);
6253 
6254 	if (!result)
6255 		return 0;
6256 
6257 	list_for_each_entry(chan, &conn->chan_l, list) {
6258 		if (chan->ident != cmd->ident)
6259 			continue;
6260 
6261 		l2cap_chan_del(chan, ECONNRESET);
6262 	}
6263 
6264 	return 0;
6265 }
6266 
6267 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6268 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6269 				       u8 *data)
6270 {
6271 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6272 	struct l2cap_chan *chan;
6273 
6274 	if (cmd_len < sizeof(*rej))
6275 		return -EPROTO;
6276 
6277 	mutex_lock(&conn->chan_lock);
6278 
6279 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6280 	if (!chan)
6281 		goto done;
6282 
6283 	l2cap_chan_lock(chan);
6284 	l2cap_chan_del(chan, ECONNREFUSED);
6285 	l2cap_chan_unlock(chan);
6286 
6287 done:
6288 	mutex_unlock(&conn->chan_lock);
6289 	return 0;
6290 }
6291 
6292 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6293 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6294 				   u8 *data)
6295 {
6296 	int err = 0;
6297 
6298 	switch (cmd->code) {
6299 	case L2CAP_COMMAND_REJ:
6300 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6301 		break;
6302 
6303 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6304 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6305 		break;
6306 
6307 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6308 		break;
6309 
6310 	case L2CAP_LE_CONN_RSP:
6311 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6312 		break;
6313 
6314 	case L2CAP_LE_CONN_REQ:
6315 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6316 		break;
6317 
6318 	case L2CAP_LE_CREDITS:
6319 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6320 		break;
6321 
6322 	case L2CAP_ECRED_CONN_REQ:
6323 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6324 		break;
6325 
6326 	case L2CAP_ECRED_CONN_RSP:
6327 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6328 		break;
6329 
6330 	case L2CAP_ECRED_RECONF_REQ:
6331 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6332 		break;
6333 
6334 	case L2CAP_ECRED_RECONF_RSP:
6335 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6336 		break;
6337 
6338 	case L2CAP_DISCONN_REQ:
6339 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6340 		break;
6341 
6342 	case L2CAP_DISCONN_RSP:
6343 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6344 		break;
6345 
6346 	default:
6347 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6348 		err = -EINVAL;
6349 		break;
6350 	}
6351 
6352 	return err;
6353 }
6354 
6355 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6356 					struct sk_buff *skb)
6357 {
6358 	struct hci_conn *hcon = conn->hcon;
6359 	struct l2cap_cmd_hdr *cmd;
6360 	u16 len;
6361 	int err;
6362 
6363 	if (hcon->type != LE_LINK)
6364 		goto drop;
6365 
6366 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6367 		goto drop;
6368 
6369 	cmd = (void *) skb->data;
6370 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6371 
6372 	len = le16_to_cpu(cmd->len);
6373 
6374 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6375 
6376 	if (len != skb->len || !cmd->ident) {
6377 		BT_DBG("corrupted command");
6378 		goto drop;
6379 	}
6380 
6381 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6382 	if (err) {
6383 		struct l2cap_cmd_rej_unk rej;
6384 
6385 		BT_ERR("Wrong link type (%d)", err);
6386 
6387 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6388 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6389 			       sizeof(rej), &rej);
6390 	}
6391 
6392 drop:
6393 	kfree_skb(skb);
6394 }
6395 
6396 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6397 				     struct sk_buff *skb)
6398 {
6399 	struct hci_conn *hcon = conn->hcon;
6400 	struct l2cap_cmd_hdr *cmd;
6401 	int err;
6402 
6403 	l2cap_raw_recv(conn, skb);
6404 
6405 	if (hcon->type != ACL_LINK)
6406 		goto drop;
6407 
6408 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6409 		u16 len;
6410 
6411 		cmd = (void *) skb->data;
6412 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6413 
6414 		len = le16_to_cpu(cmd->len);
6415 
6416 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6417 		       cmd->ident);
6418 
6419 		if (len > skb->len || !cmd->ident) {
6420 			BT_DBG("corrupted command");
6421 			break;
6422 		}
6423 
6424 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6425 		if (err) {
6426 			struct l2cap_cmd_rej_unk rej;
6427 
6428 			BT_ERR("Wrong link type (%d)", err);
6429 
6430 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6431 			l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6432 				       sizeof(rej), &rej);
6433 		}
6434 
6435 		skb_pull(skb, len);
6436 	}
6437 
6438 drop:
6439 	kfree_skb(skb);
6440 }
6441 
6442 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6443 {
6444 	u16 our_fcs, rcv_fcs;
6445 	int hdr_size;
6446 
6447 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6448 		hdr_size = L2CAP_EXT_HDR_SIZE;
6449 	else
6450 		hdr_size = L2CAP_ENH_HDR_SIZE;
6451 
6452 	if (chan->fcs == L2CAP_FCS_CRC16) {
6453 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6454 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6455 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6456 
6457 		if (our_fcs != rcv_fcs)
6458 			return -EBADMSG;
6459 	}
6460 	return 0;
6461 }
6462 
6463 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6464 {
6465 	struct l2cap_ctrl control;
6466 
6467 	BT_DBG("chan %p", chan);
6468 
6469 	memset(&control, 0, sizeof(control));
6470 	control.sframe = 1;
6471 	control.final = 1;
6472 	control.reqseq = chan->buffer_seq;
6473 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6474 
6475 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6476 		control.super = L2CAP_SUPER_RNR;
6477 		l2cap_send_sframe(chan, &control);
6478 	}
6479 
6480 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6481 	    chan->unacked_frames > 0)
6482 		__set_retrans_timer(chan);
6483 
6484 	/* Send pending iframes */
6485 	l2cap_ertm_send(chan);
6486 
6487 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6488 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6489 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6490 		 * send it now.
6491 		 */
6492 		control.super = L2CAP_SUPER_RR;
6493 		l2cap_send_sframe(chan, &control);
6494 	}
6495 }
6496 
6497 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6498 			    struct sk_buff **last_frag)
6499 {
6500 	/* skb->len reflects data in skb as well as all fragments
6501 	 * skb->data_len reflects only data in fragments
6502 	 */
6503 	if (!skb_has_frag_list(skb))
6504 		skb_shinfo(skb)->frag_list = new_frag;
6505 
6506 	new_frag->next = NULL;
6507 
6508 	(*last_frag)->next = new_frag;
6509 	*last_frag = new_frag;
6510 
6511 	skb->len += new_frag->len;
6512 	skb->data_len += new_frag->len;
6513 	skb->truesize += new_frag->truesize;
6514 }
6515 
6516 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6517 				struct l2cap_ctrl *control)
6518 {
6519 	int err = -EINVAL;
6520 
6521 	switch (control->sar) {
6522 	case L2CAP_SAR_UNSEGMENTED:
6523 		if (chan->sdu)
6524 			break;
6525 
6526 		err = chan->ops->recv(chan, skb);
6527 		break;
6528 
6529 	case L2CAP_SAR_START:
6530 		if (chan->sdu)
6531 			break;
6532 
6533 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6534 			break;
6535 
6536 		chan->sdu_len = get_unaligned_le16(skb->data);
6537 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6538 
6539 		if (chan->sdu_len > chan->imtu) {
6540 			err = -EMSGSIZE;
6541 			break;
6542 		}
6543 
6544 		if (skb->len >= chan->sdu_len)
6545 			break;
6546 
6547 		chan->sdu = skb;
6548 		chan->sdu_last_frag = skb;
6549 
6550 		skb = NULL;
6551 		err = 0;
6552 		break;
6553 
6554 	case L2CAP_SAR_CONTINUE:
6555 		if (!chan->sdu)
6556 			break;
6557 
6558 		append_skb_frag(chan->sdu, skb,
6559 				&chan->sdu_last_frag);
6560 		skb = NULL;
6561 
6562 		if (chan->sdu->len >= chan->sdu_len)
6563 			break;
6564 
6565 		err = 0;
6566 		break;
6567 
6568 	case L2CAP_SAR_END:
6569 		if (!chan->sdu)
6570 			break;
6571 
6572 		append_skb_frag(chan->sdu, skb,
6573 				&chan->sdu_last_frag);
6574 		skb = NULL;
6575 
6576 		if (chan->sdu->len != chan->sdu_len)
6577 			break;
6578 
6579 		err = chan->ops->recv(chan, chan->sdu);
6580 
6581 		if (!err) {
6582 			/* Reassembly complete */
6583 			chan->sdu = NULL;
6584 			chan->sdu_last_frag = NULL;
6585 			chan->sdu_len = 0;
6586 		}
6587 		break;
6588 	}
6589 
6590 	if (err) {
6591 		kfree_skb(skb);
6592 		kfree_skb(chan->sdu);
6593 		chan->sdu = NULL;
6594 		chan->sdu_last_frag = NULL;
6595 		chan->sdu_len = 0;
6596 	}
6597 
6598 	return err;
6599 }
6600 
6601 static int l2cap_resegment(struct l2cap_chan *chan)
6602 {
6603 	/* Placeholder */
6604 	return 0;
6605 }
6606 
6607 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6608 {
6609 	u8 event;
6610 
6611 	if (chan->mode != L2CAP_MODE_ERTM)
6612 		return;
6613 
6614 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6615 	l2cap_tx(chan, NULL, NULL, event);
6616 }
6617 
6618 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6619 {
6620 	int err = 0;
6621 	/* Pass sequential frames to l2cap_reassemble_sdu()
6622 	 * until a gap is encountered.
6623 	 */
6624 
6625 	BT_DBG("chan %p", chan);
6626 
6627 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6628 		struct sk_buff *skb;
6629 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6630 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6631 
6632 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6633 
6634 		if (!skb)
6635 			break;
6636 
6637 		skb_unlink(skb, &chan->srej_q);
6638 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6639 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6640 		if (err)
6641 			break;
6642 	}
6643 
6644 	if (skb_queue_empty(&chan->srej_q)) {
6645 		chan->rx_state = L2CAP_RX_STATE_RECV;
6646 		l2cap_send_ack(chan);
6647 	}
6648 
6649 	return err;
6650 }
6651 
6652 static void l2cap_handle_srej(struct l2cap_chan *chan,
6653 			      struct l2cap_ctrl *control)
6654 {
6655 	struct sk_buff *skb;
6656 
6657 	BT_DBG("chan %p, control %p", chan, control);
6658 
6659 	if (control->reqseq == chan->next_tx_seq) {
6660 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6661 		l2cap_send_disconn_req(chan, ECONNRESET);
6662 		return;
6663 	}
6664 
6665 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6666 
6667 	if (skb == NULL) {
6668 		BT_DBG("Seq %d not available for retransmission",
6669 		       control->reqseq);
6670 		return;
6671 	}
6672 
6673 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6674 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6675 		l2cap_send_disconn_req(chan, ECONNRESET);
6676 		return;
6677 	}
6678 
6679 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6680 
6681 	if (control->poll) {
6682 		l2cap_pass_to_tx(chan, control);
6683 
6684 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6685 		l2cap_retransmit(chan, control);
6686 		l2cap_ertm_send(chan);
6687 
6688 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6689 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6690 			chan->srej_save_reqseq = control->reqseq;
6691 		}
6692 	} else {
6693 		l2cap_pass_to_tx_fbit(chan, control);
6694 
6695 		if (control->final) {
6696 			if (chan->srej_save_reqseq != control->reqseq ||
6697 			    !test_and_clear_bit(CONN_SREJ_ACT,
6698 						&chan->conn_state))
6699 				l2cap_retransmit(chan, control);
6700 		} else {
6701 			l2cap_retransmit(chan, control);
6702 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6703 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6704 				chan->srej_save_reqseq = control->reqseq;
6705 			}
6706 		}
6707 	}
6708 }
6709 
6710 static void l2cap_handle_rej(struct l2cap_chan *chan,
6711 			     struct l2cap_ctrl *control)
6712 {
6713 	struct sk_buff *skb;
6714 
6715 	BT_DBG("chan %p, control %p", chan, control);
6716 
6717 	if (control->reqseq == chan->next_tx_seq) {
6718 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6719 		l2cap_send_disconn_req(chan, ECONNRESET);
6720 		return;
6721 	}
6722 
6723 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6724 
6725 	if (chan->max_tx && skb &&
6726 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6727 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6728 		l2cap_send_disconn_req(chan, ECONNRESET);
6729 		return;
6730 	}
6731 
6732 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6733 
6734 	l2cap_pass_to_tx(chan, control);
6735 
6736 	if (control->final) {
6737 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6738 			l2cap_retransmit_all(chan, control);
6739 	} else {
6740 		l2cap_retransmit_all(chan, control);
6741 		l2cap_ertm_send(chan);
6742 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6743 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6744 	}
6745 }
6746 
6747 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6748 {
6749 	BT_DBG("chan %p, txseq %d", chan, txseq);
6750 
6751 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6752 	       chan->expected_tx_seq);
6753 
6754 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6755 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6756 		    chan->tx_win) {
6757 			/* See notes below regarding "double poll" and
6758 			 * invalid packets.
6759 			 */
6760 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6761 				BT_DBG("Invalid/Ignore - after SREJ");
6762 				return L2CAP_TXSEQ_INVALID_IGNORE;
6763 			} else {
6764 				BT_DBG("Invalid - in window after SREJ sent");
6765 				return L2CAP_TXSEQ_INVALID;
6766 			}
6767 		}
6768 
6769 		if (chan->srej_list.head == txseq) {
6770 			BT_DBG("Expected SREJ");
6771 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6772 		}
6773 
6774 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6775 			BT_DBG("Duplicate SREJ - txseq already stored");
6776 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6777 		}
6778 
6779 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6780 			BT_DBG("Unexpected SREJ - not requested");
6781 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6782 		}
6783 	}
6784 
6785 	if (chan->expected_tx_seq == txseq) {
6786 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6787 		    chan->tx_win) {
6788 			BT_DBG("Invalid - txseq outside tx window");
6789 			return L2CAP_TXSEQ_INVALID;
6790 		} else {
6791 			BT_DBG("Expected");
6792 			return L2CAP_TXSEQ_EXPECTED;
6793 		}
6794 	}
6795 
6796 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6797 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6798 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6799 		return L2CAP_TXSEQ_DUPLICATE;
6800 	}
6801 
6802 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6803 		/* A source of invalid packets is a "double poll" condition,
6804 		 * where delays cause us to send multiple poll packets.  If
6805 		 * the remote stack receives and processes both polls,
6806 		 * sequence numbers can wrap around in such a way that a
6807 		 * resent frame has a sequence number that looks like new data
6808 		 * with a sequence gap.  This would trigger an erroneous SREJ
6809 		 * request.
6810 		 *
6811 		 * Fortunately, this is impossible with a tx window that's
6812 		 * less than half of the maximum sequence number, which allows
6813 		 * invalid frames to be safely ignored.
6814 		 *
6815 		 * With tx window sizes greater than half of the tx window
6816 		 * maximum, the frame is invalid and cannot be ignored.  This
6817 		 * causes a disconnect.
6818 		 */
6819 
6820 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6821 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6822 			return L2CAP_TXSEQ_INVALID_IGNORE;
6823 		} else {
6824 			BT_DBG("Invalid - txseq outside tx window");
6825 			return L2CAP_TXSEQ_INVALID;
6826 		}
6827 	} else {
6828 		BT_DBG("Unexpected - txseq indicates missing frames");
6829 		return L2CAP_TXSEQ_UNEXPECTED;
6830 	}
6831 }
6832 
6833 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6834 			       struct l2cap_ctrl *control,
6835 			       struct sk_buff *skb, u8 event)
6836 {
6837 	int err = 0;
6838 	bool skb_in_use = false;
6839 
6840 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6841 	       event);
6842 
6843 	switch (event) {
6844 	case L2CAP_EV_RECV_IFRAME:
6845 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6846 		case L2CAP_TXSEQ_EXPECTED:
6847 			l2cap_pass_to_tx(chan, control);
6848 
6849 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6850 				BT_DBG("Busy, discarding expected seq %d",
6851 				       control->txseq);
6852 				break;
6853 			}
6854 
6855 			chan->expected_tx_seq = __next_seq(chan,
6856 							   control->txseq);
6857 
6858 			chan->buffer_seq = chan->expected_tx_seq;
6859 			skb_in_use = true;
6860 
6861 			err = l2cap_reassemble_sdu(chan, skb, control);
6862 			if (err)
6863 				break;
6864 
6865 			if (control->final) {
6866 				if (!test_and_clear_bit(CONN_REJ_ACT,
6867 							&chan->conn_state)) {
6868 					control->final = 0;
6869 					l2cap_retransmit_all(chan, control);
6870 					l2cap_ertm_send(chan);
6871 				}
6872 			}
6873 
6874 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6875 				l2cap_send_ack(chan);
6876 			break;
6877 		case L2CAP_TXSEQ_UNEXPECTED:
6878 			l2cap_pass_to_tx(chan, control);
6879 
6880 			/* Can't issue SREJ frames in the local busy state.
6881 			 * Drop this frame, it will be seen as missing
6882 			 * when local busy is exited.
6883 			 */
6884 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6885 				BT_DBG("Busy, discarding unexpected seq %d",
6886 				       control->txseq);
6887 				break;
6888 			}
6889 
6890 			/* There was a gap in the sequence, so an SREJ
6891 			 * must be sent for each missing frame.  The
6892 			 * current frame is stored for later use.
6893 			 */
6894 			skb_queue_tail(&chan->srej_q, skb);
6895 			skb_in_use = true;
6896 			BT_DBG("Queued %p (queue len %d)", skb,
6897 			       skb_queue_len(&chan->srej_q));
6898 
6899 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6900 			l2cap_seq_list_clear(&chan->srej_list);
6901 			l2cap_send_srej(chan, control->txseq);
6902 
6903 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6904 			break;
6905 		case L2CAP_TXSEQ_DUPLICATE:
6906 			l2cap_pass_to_tx(chan, control);
6907 			break;
6908 		case L2CAP_TXSEQ_INVALID_IGNORE:
6909 			break;
6910 		case L2CAP_TXSEQ_INVALID:
6911 		default:
6912 			l2cap_send_disconn_req(chan, ECONNRESET);
6913 			break;
6914 		}
6915 		break;
6916 	case L2CAP_EV_RECV_RR:
6917 		l2cap_pass_to_tx(chan, control);
6918 		if (control->final) {
6919 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6920 
6921 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6922 			    !__chan_is_moving(chan)) {
6923 				control->final = 0;
6924 				l2cap_retransmit_all(chan, control);
6925 			}
6926 
6927 			l2cap_ertm_send(chan);
6928 		} else if (control->poll) {
6929 			l2cap_send_i_or_rr_or_rnr(chan);
6930 		} else {
6931 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6932 					       &chan->conn_state) &&
6933 			    chan->unacked_frames)
6934 				__set_retrans_timer(chan);
6935 
6936 			l2cap_ertm_send(chan);
6937 		}
6938 		break;
6939 	case L2CAP_EV_RECV_RNR:
6940 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6941 		l2cap_pass_to_tx(chan, control);
6942 		if (control && control->poll) {
6943 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6944 			l2cap_send_rr_or_rnr(chan, 0);
6945 		}
6946 		__clear_retrans_timer(chan);
6947 		l2cap_seq_list_clear(&chan->retrans_list);
6948 		break;
6949 	case L2CAP_EV_RECV_REJ:
6950 		l2cap_handle_rej(chan, control);
6951 		break;
6952 	case L2CAP_EV_RECV_SREJ:
6953 		l2cap_handle_srej(chan, control);
6954 		break;
6955 	default:
6956 		break;
6957 	}
6958 
6959 	if (skb && !skb_in_use) {
6960 		BT_DBG("Freeing %p", skb);
6961 		kfree_skb(skb);
6962 	}
6963 
6964 	return err;
6965 }
6966 
6967 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6968 				    struct l2cap_ctrl *control,
6969 				    struct sk_buff *skb, u8 event)
6970 {
6971 	int err = 0;
6972 	u16 txseq = control->txseq;
6973 	bool skb_in_use = false;
6974 
6975 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6976 	       event);
6977 
6978 	switch (event) {
6979 	case L2CAP_EV_RECV_IFRAME:
6980 		switch (l2cap_classify_txseq(chan, txseq)) {
6981 		case L2CAP_TXSEQ_EXPECTED:
6982 			/* Keep frame for reassembly later */
6983 			l2cap_pass_to_tx(chan, control);
6984 			skb_queue_tail(&chan->srej_q, skb);
6985 			skb_in_use = true;
6986 			BT_DBG("Queued %p (queue len %d)", skb,
6987 			       skb_queue_len(&chan->srej_q));
6988 
6989 			chan->expected_tx_seq = __next_seq(chan, txseq);
6990 			break;
6991 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6992 			l2cap_seq_list_pop(&chan->srej_list);
6993 
6994 			l2cap_pass_to_tx(chan, control);
6995 			skb_queue_tail(&chan->srej_q, skb);
6996 			skb_in_use = true;
6997 			BT_DBG("Queued %p (queue len %d)", skb,
6998 			       skb_queue_len(&chan->srej_q));
6999 
7000 			err = l2cap_rx_queued_iframes(chan);
7001 			if (err)
7002 				break;
7003 
7004 			break;
7005 		case L2CAP_TXSEQ_UNEXPECTED:
7006 			/* Got a frame that can't be reassembled yet.
7007 			 * Save it for later, and send SREJs to cover
7008 			 * the missing frames.
7009 			 */
7010 			skb_queue_tail(&chan->srej_q, skb);
7011 			skb_in_use = true;
7012 			BT_DBG("Queued %p (queue len %d)", skb,
7013 			       skb_queue_len(&chan->srej_q));
7014 
7015 			l2cap_pass_to_tx(chan, control);
7016 			l2cap_send_srej(chan, control->txseq);
7017 			break;
7018 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7019 			/* This frame was requested with an SREJ, but
7020 			 * some expected retransmitted frames are
7021 			 * missing.  Request retransmission of missing
7022 			 * SREJ'd frames.
7023 			 */
7024 			skb_queue_tail(&chan->srej_q, skb);
7025 			skb_in_use = true;
7026 			BT_DBG("Queued %p (queue len %d)", skb,
7027 			       skb_queue_len(&chan->srej_q));
7028 
7029 			l2cap_pass_to_tx(chan, control);
7030 			l2cap_send_srej_list(chan, control->txseq);
7031 			break;
7032 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7033 			/* We've already queued this frame.  Drop this copy. */
7034 			l2cap_pass_to_tx(chan, control);
7035 			break;
7036 		case L2CAP_TXSEQ_DUPLICATE:
7037 			/* Expecting a later sequence number, so this frame
7038 			 * was already received.  Ignore it completely.
7039 			 */
7040 			break;
7041 		case L2CAP_TXSEQ_INVALID_IGNORE:
7042 			break;
7043 		case L2CAP_TXSEQ_INVALID:
7044 		default:
7045 			l2cap_send_disconn_req(chan, ECONNRESET);
7046 			break;
7047 		}
7048 		break;
7049 	case L2CAP_EV_RECV_RR:
7050 		l2cap_pass_to_tx(chan, control);
7051 		if (control->final) {
7052 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7053 
7054 			if (!test_and_clear_bit(CONN_REJ_ACT,
7055 						&chan->conn_state)) {
7056 				control->final = 0;
7057 				l2cap_retransmit_all(chan, control);
7058 			}
7059 
7060 			l2cap_ertm_send(chan);
7061 		} else if (control->poll) {
7062 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7063 					       &chan->conn_state) &&
7064 			    chan->unacked_frames) {
7065 				__set_retrans_timer(chan);
7066 			}
7067 
7068 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7069 			l2cap_send_srej_tail(chan);
7070 		} else {
7071 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7072 					       &chan->conn_state) &&
7073 			    chan->unacked_frames)
7074 				__set_retrans_timer(chan);
7075 
7076 			l2cap_send_ack(chan);
7077 		}
7078 		break;
7079 	case L2CAP_EV_RECV_RNR:
7080 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7081 		l2cap_pass_to_tx(chan, control);
7082 		if (control->poll) {
7083 			l2cap_send_srej_tail(chan);
7084 		} else {
7085 			struct l2cap_ctrl rr_control;
7086 			memset(&rr_control, 0, sizeof(rr_control));
7087 			rr_control.sframe = 1;
7088 			rr_control.super = L2CAP_SUPER_RR;
7089 			rr_control.reqseq = chan->buffer_seq;
7090 			l2cap_send_sframe(chan, &rr_control);
7091 		}
7092 
7093 		break;
7094 	case L2CAP_EV_RECV_REJ:
7095 		l2cap_handle_rej(chan, control);
7096 		break;
7097 	case L2CAP_EV_RECV_SREJ:
7098 		l2cap_handle_srej(chan, control);
7099 		break;
7100 	}
7101 
7102 	if (skb && !skb_in_use) {
7103 		BT_DBG("Freeing %p", skb);
7104 		kfree_skb(skb);
7105 	}
7106 
7107 	return err;
7108 }
7109 
7110 static int l2cap_finish_move(struct l2cap_chan *chan)
7111 {
7112 	BT_DBG("chan %p", chan);
7113 
7114 	chan->rx_state = L2CAP_RX_STATE_RECV;
7115 
7116 	if (chan->hs_hcon)
7117 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7118 	else
7119 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7120 
7121 	return l2cap_resegment(chan);
7122 }
7123 
7124 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7125 				 struct l2cap_ctrl *control,
7126 				 struct sk_buff *skb, u8 event)
7127 {
7128 	int err;
7129 
7130 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7131 	       event);
7132 
7133 	if (!control->poll)
7134 		return -EPROTO;
7135 
7136 	l2cap_process_reqseq(chan, control->reqseq);
7137 
7138 	if (!skb_queue_empty(&chan->tx_q))
7139 		chan->tx_send_head = skb_peek(&chan->tx_q);
7140 	else
7141 		chan->tx_send_head = NULL;
7142 
7143 	/* Rewind next_tx_seq to the point expected
7144 	 * by the receiver.
7145 	 */
7146 	chan->next_tx_seq = control->reqseq;
7147 	chan->unacked_frames = 0;
7148 
7149 	err = l2cap_finish_move(chan);
7150 	if (err)
7151 		return err;
7152 
7153 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7154 	l2cap_send_i_or_rr_or_rnr(chan);
7155 
7156 	if (event == L2CAP_EV_RECV_IFRAME)
7157 		return -EPROTO;
7158 
7159 	return l2cap_rx_state_recv(chan, control, NULL, event);
7160 }
7161 
7162 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7163 				 struct l2cap_ctrl *control,
7164 				 struct sk_buff *skb, u8 event)
7165 {
7166 	int err;
7167 
7168 	if (!control->final)
7169 		return -EPROTO;
7170 
7171 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7172 
7173 	chan->rx_state = L2CAP_RX_STATE_RECV;
7174 	l2cap_process_reqseq(chan, control->reqseq);
7175 
7176 	if (!skb_queue_empty(&chan->tx_q))
7177 		chan->tx_send_head = skb_peek(&chan->tx_q);
7178 	else
7179 		chan->tx_send_head = NULL;
7180 
7181 	/* Rewind next_tx_seq to the point expected
7182 	 * by the receiver.
7183 	 */
7184 	chan->next_tx_seq = control->reqseq;
7185 	chan->unacked_frames = 0;
7186 
7187 	if (chan->hs_hcon)
7188 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7189 	else
7190 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7191 
7192 	err = l2cap_resegment(chan);
7193 
7194 	if (!err)
7195 		err = l2cap_rx_state_recv(chan, control, skb, event);
7196 
7197 	return err;
7198 }
7199 
7200 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7201 {
7202 	/* Make sure reqseq is for a packet that has been sent but not acked */
7203 	u16 unacked;
7204 
7205 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7206 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7207 }
7208 
7209 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7210 		    struct sk_buff *skb, u8 event)
7211 {
7212 	int err = 0;
7213 
7214 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7215 	       control, skb, event, chan->rx_state);
7216 
7217 	if (__valid_reqseq(chan, control->reqseq)) {
7218 		switch (chan->rx_state) {
7219 		case L2CAP_RX_STATE_RECV:
7220 			err = l2cap_rx_state_recv(chan, control, skb, event);
7221 			break;
7222 		case L2CAP_RX_STATE_SREJ_SENT:
7223 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7224 						       event);
7225 			break;
7226 		case L2CAP_RX_STATE_WAIT_P:
7227 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7228 			break;
7229 		case L2CAP_RX_STATE_WAIT_F:
7230 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7231 			break;
7232 		default:
7233 			/* shut it down */
7234 			break;
7235 		}
7236 	} else {
7237 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7238 		       control->reqseq, chan->next_tx_seq,
7239 		       chan->expected_ack_seq);
7240 		l2cap_send_disconn_req(chan, ECONNRESET);
7241 	}
7242 
7243 	return err;
7244 }
7245 
7246 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7247 			   struct sk_buff *skb)
7248 {
7249 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7250 	       chan->rx_state);
7251 
7252 	if (l2cap_classify_txseq(chan, control->txseq) ==
7253 	    L2CAP_TXSEQ_EXPECTED) {
7254 		l2cap_pass_to_tx(chan, control);
7255 
7256 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
7257 		       __next_seq(chan, chan->buffer_seq));
7258 
7259 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7260 
7261 		l2cap_reassemble_sdu(chan, skb, control);
7262 	} else {
7263 		if (chan->sdu) {
7264 			kfree_skb(chan->sdu);
7265 			chan->sdu = NULL;
7266 		}
7267 		chan->sdu_last_frag = NULL;
7268 		chan->sdu_len = 0;
7269 
7270 		if (skb) {
7271 			BT_DBG("Freeing %p", skb);
7272 			kfree_skb(skb);
7273 		}
7274 	}
7275 
7276 	chan->last_acked_seq = control->txseq;
7277 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
7278 
7279 	return 0;
7280 }
7281 
7282 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7283 {
7284 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7285 	u16 len;
7286 	u8 event;
7287 
7288 	__unpack_control(chan, skb);
7289 
7290 	len = skb->len;
7291 
7292 	/*
7293 	 * We can just drop the corrupted I-frame here.
7294 	 * Receiver will miss it and start proper recovery
7295 	 * procedures and ask for retransmission.
7296 	 */
7297 	if (l2cap_check_fcs(chan, skb))
7298 		goto drop;
7299 
7300 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7301 		len -= L2CAP_SDULEN_SIZE;
7302 
7303 	if (chan->fcs == L2CAP_FCS_CRC16)
7304 		len -= L2CAP_FCS_SIZE;
7305 
7306 	if (len > chan->mps) {
7307 		l2cap_send_disconn_req(chan, ECONNRESET);
7308 		goto drop;
7309 	}
7310 
7311 	if (chan->ops->filter) {
7312 		if (chan->ops->filter(chan, skb))
7313 			goto drop;
7314 	}
7315 
7316 	if (!control->sframe) {
7317 		int err;
7318 
7319 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7320 		       control->sar, control->reqseq, control->final,
7321 		       control->txseq);
7322 
7323 		/* Validate F-bit - F=0 always valid, F=1 only
7324 		 * valid in TX WAIT_F
7325 		 */
7326 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7327 			goto drop;
7328 
7329 		if (chan->mode != L2CAP_MODE_STREAMING) {
7330 			event = L2CAP_EV_RECV_IFRAME;
7331 			err = l2cap_rx(chan, control, skb, event);
7332 		} else {
7333 			err = l2cap_stream_rx(chan, control, skb);
7334 		}
7335 
7336 		if (err)
7337 			l2cap_send_disconn_req(chan, ECONNRESET);
7338 	} else {
7339 		const u8 rx_func_to_event[4] = {
7340 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7341 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7342 		};
7343 
7344 		/* Only I-frames are expected in streaming mode */
7345 		if (chan->mode == L2CAP_MODE_STREAMING)
7346 			goto drop;
7347 
7348 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7349 		       control->reqseq, control->final, control->poll,
7350 		       control->super);
7351 
7352 		if (len != 0) {
7353 			BT_ERR("Trailing bytes: %d in sframe", len);
7354 			l2cap_send_disconn_req(chan, ECONNRESET);
7355 			goto drop;
7356 		}
7357 
7358 		/* Validate F and P bits */
7359 		if (control->final && (control->poll ||
7360 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7361 			goto drop;
7362 
7363 		event = rx_func_to_event[control->super];
7364 		if (l2cap_rx(chan, control, skb, event))
7365 			l2cap_send_disconn_req(chan, ECONNRESET);
7366 	}
7367 
7368 	return 0;
7369 
7370 drop:
7371 	kfree_skb(skb);
7372 	return 0;
7373 }
7374 
7375 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7376 {
7377 	struct l2cap_conn *conn = chan->conn;
7378 	struct l2cap_le_credits pkt;
7379 	u16 return_credits;
7380 
7381 	return_credits = (chan->imtu / chan->mps) + 1;
7382 
7383 	if (chan->rx_credits >= return_credits)
7384 		return;
7385 
7386 	return_credits -= chan->rx_credits;
7387 
7388 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7389 
7390 	chan->rx_credits += return_credits;
7391 
7392 	pkt.cid     = cpu_to_le16(chan->scid);
7393 	pkt.credits = cpu_to_le16(return_credits);
7394 
7395 	chan->ident = l2cap_get_ident(conn);
7396 
7397 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7398 }
7399 
7400 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7401 {
7402 	int err;
7403 
7404 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7405 
7406 	/* Wait recv to confirm reception before updating the credits */
7407 	err = chan->ops->recv(chan, skb);
7408 
7409 	/* Update credits whenever an SDU is received */
7410 	l2cap_chan_le_send_credits(chan);
7411 
7412 	return err;
7413 }
7414 
7415 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7416 {
7417 	int err;
7418 
7419 	if (!chan->rx_credits) {
7420 		BT_ERR("No credits to receive LE L2CAP data");
7421 		l2cap_send_disconn_req(chan, ECONNRESET);
7422 		return -ENOBUFS;
7423 	}
7424 
7425 	if (chan->imtu < skb->len) {
7426 		BT_ERR("Too big LE L2CAP PDU");
7427 		return -ENOBUFS;
7428 	}
7429 
7430 	chan->rx_credits--;
7431 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7432 
7433 	/* Update if remote had run out of credits, this should only happens
7434 	 * if the remote is not using the entire MPS.
7435 	 */
7436 	if (!chan->rx_credits)
7437 		l2cap_chan_le_send_credits(chan);
7438 
7439 	err = 0;
7440 
7441 	if (!chan->sdu) {
7442 		u16 sdu_len;
7443 
7444 		sdu_len = get_unaligned_le16(skb->data);
7445 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7446 
7447 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7448 		       sdu_len, skb->len, chan->imtu);
7449 
7450 		if (sdu_len > chan->imtu) {
7451 			BT_ERR("Too big LE L2CAP SDU length received");
7452 			err = -EMSGSIZE;
7453 			goto failed;
7454 		}
7455 
7456 		if (skb->len > sdu_len) {
7457 			BT_ERR("Too much LE L2CAP data received");
7458 			err = -EINVAL;
7459 			goto failed;
7460 		}
7461 
7462 		if (skb->len == sdu_len)
7463 			return l2cap_ecred_recv(chan, skb);
7464 
7465 		chan->sdu = skb;
7466 		chan->sdu_len = sdu_len;
7467 		chan->sdu_last_frag = skb;
7468 
7469 		/* Detect if remote is not able to use the selected MPS */
7470 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7471 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7472 
7473 			/* Adjust the number of credits */
7474 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7475 			chan->mps = mps_len;
7476 			l2cap_chan_le_send_credits(chan);
7477 		}
7478 
7479 		return 0;
7480 	}
7481 
7482 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7483 	       chan->sdu->len, skb->len, chan->sdu_len);
7484 
7485 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7486 		BT_ERR("Too much LE L2CAP data received");
7487 		err = -EINVAL;
7488 		goto failed;
7489 	}
7490 
7491 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7492 	skb = NULL;
7493 
7494 	if (chan->sdu->len == chan->sdu_len) {
7495 		err = l2cap_ecred_recv(chan, chan->sdu);
7496 		if (!err) {
7497 			chan->sdu = NULL;
7498 			chan->sdu_last_frag = NULL;
7499 			chan->sdu_len = 0;
7500 		}
7501 	}
7502 
7503 failed:
7504 	if (err) {
7505 		kfree_skb(skb);
7506 		kfree_skb(chan->sdu);
7507 		chan->sdu = NULL;
7508 		chan->sdu_last_frag = NULL;
7509 		chan->sdu_len = 0;
7510 	}
7511 
7512 	/* We can't return an error here since we took care of the skb
7513 	 * freeing internally. An error return would cause the caller to
7514 	 * do a double-free of the skb.
7515 	 */
7516 	return 0;
7517 }
7518 
7519 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7520 			       struct sk_buff *skb)
7521 {
7522 	struct l2cap_chan *chan;
7523 
7524 	chan = l2cap_get_chan_by_scid(conn, cid);
7525 	if (!chan) {
7526 		if (cid == L2CAP_CID_A2MP) {
7527 			chan = a2mp_channel_create(conn, skb);
7528 			if (!chan) {
7529 				kfree_skb(skb);
7530 				return;
7531 			}
7532 
7533 			l2cap_chan_lock(chan);
7534 		} else {
7535 			BT_DBG("unknown cid 0x%4.4x", cid);
7536 			/* Drop packet and return */
7537 			kfree_skb(skb);
7538 			return;
7539 		}
7540 	}
7541 
7542 	BT_DBG("chan %p, len %d", chan, skb->len);
7543 
7544 	/* If we receive data on a fixed channel before the info req/rsp
7545 	 * procdure is done simply assume that the channel is supported
7546 	 * and mark it as ready.
7547 	 */
7548 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7549 		l2cap_chan_ready(chan);
7550 
7551 	if (chan->state != BT_CONNECTED)
7552 		goto drop;
7553 
7554 	switch (chan->mode) {
7555 	case L2CAP_MODE_LE_FLOWCTL:
7556 	case L2CAP_MODE_EXT_FLOWCTL:
7557 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7558 			goto drop;
7559 
7560 		goto done;
7561 
7562 	case L2CAP_MODE_BASIC:
7563 		/* If socket recv buffers overflows we drop data here
7564 		 * which is *bad* because L2CAP has to be reliable.
7565 		 * But we don't have any other choice. L2CAP doesn't
7566 		 * provide flow control mechanism. */
7567 
7568 		if (chan->imtu < skb->len) {
7569 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7570 			goto drop;
7571 		}
7572 
7573 		if (!chan->ops->recv(chan, skb))
7574 			goto done;
7575 		break;
7576 
7577 	case L2CAP_MODE_ERTM:
7578 	case L2CAP_MODE_STREAMING:
7579 		l2cap_data_rcv(chan, skb);
7580 		goto done;
7581 
7582 	default:
7583 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7584 		break;
7585 	}
7586 
7587 drop:
7588 	kfree_skb(skb);
7589 
7590 done:
7591 	l2cap_chan_unlock(chan);
7592 }
7593 
7594 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7595 				  struct sk_buff *skb)
7596 {
7597 	struct hci_conn *hcon = conn->hcon;
7598 	struct l2cap_chan *chan;
7599 
7600 	if (hcon->type != ACL_LINK)
7601 		goto free_skb;
7602 
7603 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7604 					ACL_LINK);
7605 	if (!chan)
7606 		goto free_skb;
7607 
7608 	BT_DBG("chan %p, len %d", chan, skb->len);
7609 
7610 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7611 		goto drop;
7612 
7613 	if (chan->imtu < skb->len)
7614 		goto drop;
7615 
7616 	/* Store remote BD_ADDR and PSM for msg_name */
7617 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7618 	bt_cb(skb)->l2cap.psm = psm;
7619 
7620 	if (!chan->ops->recv(chan, skb)) {
7621 		l2cap_chan_put(chan);
7622 		return;
7623 	}
7624 
7625 drop:
7626 	l2cap_chan_put(chan);
7627 free_skb:
7628 	kfree_skb(skb);
7629 }
7630 
7631 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7632 {
7633 	struct l2cap_hdr *lh = (void *) skb->data;
7634 	struct hci_conn *hcon = conn->hcon;
7635 	u16 cid, len;
7636 	__le16 psm;
7637 
7638 	if (hcon->state != BT_CONNECTED) {
7639 		BT_DBG("queueing pending rx skb");
7640 		skb_queue_tail(&conn->pending_rx, skb);
7641 		return;
7642 	}
7643 
7644 	skb_pull(skb, L2CAP_HDR_SIZE);
7645 	cid = __le16_to_cpu(lh->cid);
7646 	len = __le16_to_cpu(lh->len);
7647 
7648 	if (len != skb->len) {
7649 		kfree_skb(skb);
7650 		return;
7651 	}
7652 
7653 	/* Since we can't actively block incoming LE connections we must
7654 	 * at least ensure that we ignore incoming data from them.
7655 	 */
7656 	if (hcon->type == LE_LINK &&
7657 	    hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7658 				   bdaddr_dst_type(hcon))) {
7659 		kfree_skb(skb);
7660 		return;
7661 	}
7662 
7663 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7664 
7665 	switch (cid) {
7666 	case L2CAP_CID_SIGNALING:
7667 		l2cap_sig_channel(conn, skb);
7668 		break;
7669 
7670 	case L2CAP_CID_CONN_LESS:
7671 		psm = get_unaligned((__le16 *) skb->data);
7672 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7673 		l2cap_conless_channel(conn, psm, skb);
7674 		break;
7675 
7676 	case L2CAP_CID_LE_SIGNALING:
7677 		l2cap_le_sig_channel(conn, skb);
7678 		break;
7679 
7680 	default:
7681 		l2cap_data_channel(conn, cid, skb);
7682 		break;
7683 	}
7684 }
7685 
7686 static void process_pending_rx(struct work_struct *work)
7687 {
7688 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7689 					       pending_rx_work);
7690 	struct sk_buff *skb;
7691 
7692 	BT_DBG("");
7693 
7694 	while ((skb = skb_dequeue(&conn->pending_rx)))
7695 		l2cap_recv_frame(conn, skb);
7696 }
7697 
7698 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7699 {
7700 	struct l2cap_conn *conn = hcon->l2cap_data;
7701 	struct hci_chan *hchan;
7702 
7703 	if (conn)
7704 		return conn;
7705 
7706 	hchan = hci_chan_create(hcon);
7707 	if (!hchan)
7708 		return NULL;
7709 
7710 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7711 	if (!conn) {
7712 		hci_chan_del(hchan);
7713 		return NULL;
7714 	}
7715 
7716 	kref_init(&conn->ref);
7717 	hcon->l2cap_data = conn;
7718 	conn->hcon = hci_conn_get(hcon);
7719 	conn->hchan = hchan;
7720 
7721 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7722 
7723 	switch (hcon->type) {
7724 	case LE_LINK:
7725 		if (hcon->hdev->le_mtu) {
7726 			conn->mtu = hcon->hdev->le_mtu;
7727 			break;
7728 		}
7729 		fallthrough;
7730 	default:
7731 		conn->mtu = hcon->hdev->acl_mtu;
7732 		break;
7733 	}
7734 
7735 	conn->feat_mask = 0;
7736 
7737 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7738 
7739 	if (hcon->type == ACL_LINK &&
7740 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7741 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7742 
7743 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7744 	    (bredr_sc_enabled(hcon->hdev) ||
7745 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7746 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7747 
7748 	mutex_init(&conn->ident_lock);
7749 	mutex_init(&conn->chan_lock);
7750 
7751 	INIT_LIST_HEAD(&conn->chan_l);
7752 	INIT_LIST_HEAD(&conn->users);
7753 
7754 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7755 
7756 	skb_queue_head_init(&conn->pending_rx);
7757 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7758 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7759 
7760 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7761 
7762 	return conn;
7763 }
7764 
7765 static bool is_valid_psm(u16 psm, u8 dst_type) {
7766 	if (!psm)
7767 		return false;
7768 
7769 	if (bdaddr_type_is_le(dst_type))
7770 		return (psm <= 0x00ff);
7771 
7772 	/* PSM must be odd and lsb of upper byte must be 0 */
7773 	return ((psm & 0x0101) == 0x0001);
7774 }
7775 
7776 struct l2cap_chan_data {
7777 	struct l2cap_chan *chan;
7778 	struct pid *pid;
7779 	int count;
7780 };
7781 
7782 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7783 {
7784 	struct l2cap_chan_data *d = data;
7785 	struct pid *pid;
7786 
7787 	if (chan == d->chan)
7788 		return;
7789 
7790 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7791 		return;
7792 
7793 	pid = chan->ops->get_peer_pid(chan);
7794 
7795 	/* Only count deferred channels with the same PID/PSM */
7796 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7797 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7798 		return;
7799 
7800 	d->count++;
7801 }
7802 
7803 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7804 		       bdaddr_t *dst, u8 dst_type)
7805 {
7806 	struct l2cap_conn *conn;
7807 	struct hci_conn *hcon;
7808 	struct hci_dev *hdev;
7809 	int err;
7810 
7811 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7812 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7813 
7814 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7815 	if (!hdev)
7816 		return -EHOSTUNREACH;
7817 
7818 	hci_dev_lock(hdev);
7819 
7820 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7821 	    chan->chan_type != L2CAP_CHAN_RAW) {
7822 		err = -EINVAL;
7823 		goto done;
7824 	}
7825 
7826 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7827 		err = -EINVAL;
7828 		goto done;
7829 	}
7830 
7831 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7832 		err = -EINVAL;
7833 		goto done;
7834 	}
7835 
7836 	switch (chan->mode) {
7837 	case L2CAP_MODE_BASIC:
7838 		break;
7839 	case L2CAP_MODE_LE_FLOWCTL:
7840 		break;
7841 	case L2CAP_MODE_EXT_FLOWCTL:
7842 		if (!enable_ecred) {
7843 			err = -EOPNOTSUPP;
7844 			goto done;
7845 		}
7846 		break;
7847 	case L2CAP_MODE_ERTM:
7848 	case L2CAP_MODE_STREAMING:
7849 		if (!disable_ertm)
7850 			break;
7851 		fallthrough;
7852 	default:
7853 		err = -EOPNOTSUPP;
7854 		goto done;
7855 	}
7856 
7857 	switch (chan->state) {
7858 	case BT_CONNECT:
7859 	case BT_CONNECT2:
7860 	case BT_CONFIG:
7861 		/* Already connecting */
7862 		err = 0;
7863 		goto done;
7864 
7865 	case BT_CONNECTED:
7866 		/* Already connected */
7867 		err = -EISCONN;
7868 		goto done;
7869 
7870 	case BT_OPEN:
7871 	case BT_BOUND:
7872 		/* Can connect */
7873 		break;
7874 
7875 	default:
7876 		err = -EBADFD;
7877 		goto done;
7878 	}
7879 
7880 	/* Set destination address and psm */
7881 	bacpy(&chan->dst, dst);
7882 	chan->dst_type = dst_type;
7883 
7884 	chan->psm = psm;
7885 	chan->dcid = cid;
7886 
7887 	if (bdaddr_type_is_le(dst_type)) {
7888 		/* Convert from L2CAP channel address type to HCI address type
7889 		 */
7890 		if (dst_type == BDADDR_LE_PUBLIC)
7891 			dst_type = ADDR_LE_DEV_PUBLIC;
7892 		else
7893 			dst_type = ADDR_LE_DEV_RANDOM;
7894 
7895 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7896 			hcon = hci_connect_le(hdev, dst, dst_type,
7897 					      chan->sec_level,
7898 					      HCI_LE_CONN_TIMEOUT,
7899 					      HCI_ROLE_SLAVE, NULL);
7900 		else
7901 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7902 						   chan->sec_level,
7903 						   HCI_LE_CONN_TIMEOUT,
7904 						   CONN_REASON_L2CAP_CHAN);
7905 
7906 	} else {
7907 		u8 auth_type = l2cap_get_auth_type(chan);
7908 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7909 				       CONN_REASON_L2CAP_CHAN);
7910 	}
7911 
7912 	if (IS_ERR(hcon)) {
7913 		err = PTR_ERR(hcon);
7914 		goto done;
7915 	}
7916 
7917 	conn = l2cap_conn_add(hcon);
7918 	if (!conn) {
7919 		hci_conn_drop(hcon);
7920 		err = -ENOMEM;
7921 		goto done;
7922 	}
7923 
7924 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7925 		struct l2cap_chan_data data;
7926 
7927 		data.chan = chan;
7928 		data.pid = chan->ops->get_peer_pid(chan);
7929 		data.count = 1;
7930 
7931 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7932 
7933 		/* Check if there isn't too many channels being connected */
7934 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7935 			hci_conn_drop(hcon);
7936 			err = -EPROTO;
7937 			goto done;
7938 		}
7939 	}
7940 
7941 	mutex_lock(&conn->chan_lock);
7942 	l2cap_chan_lock(chan);
7943 
7944 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7945 		hci_conn_drop(hcon);
7946 		err = -EBUSY;
7947 		goto chan_unlock;
7948 	}
7949 
7950 	/* Update source addr of the socket */
7951 	bacpy(&chan->src, &hcon->src);
7952 	chan->src_type = bdaddr_src_type(hcon);
7953 
7954 	__l2cap_chan_add(conn, chan);
7955 
7956 	/* l2cap_chan_add takes its own ref so we can drop this one */
7957 	hci_conn_drop(hcon);
7958 
7959 	l2cap_state_change(chan, BT_CONNECT);
7960 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7961 
7962 	/* Release chan->sport so that it can be reused by other
7963 	 * sockets (as it's only used for listening sockets).
7964 	 */
7965 	write_lock(&chan_list_lock);
7966 	chan->sport = 0;
7967 	write_unlock(&chan_list_lock);
7968 
7969 	if (hcon->state == BT_CONNECTED) {
7970 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7971 			__clear_chan_timer(chan);
7972 			if (l2cap_chan_check_security(chan, true))
7973 				l2cap_state_change(chan, BT_CONNECTED);
7974 		} else
7975 			l2cap_do_start(chan);
7976 	}
7977 
7978 	err = 0;
7979 
7980 chan_unlock:
7981 	l2cap_chan_unlock(chan);
7982 	mutex_unlock(&conn->chan_lock);
7983 done:
7984 	hci_dev_unlock(hdev);
7985 	hci_dev_put(hdev);
7986 	return err;
7987 }
7988 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7989 
7990 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7991 {
7992 	struct l2cap_conn *conn = chan->conn;
7993 	struct {
7994 		struct l2cap_ecred_reconf_req req;
7995 		__le16 scid;
7996 	} pdu;
7997 
7998 	pdu.req.mtu = cpu_to_le16(chan->imtu);
7999 	pdu.req.mps = cpu_to_le16(chan->mps);
8000 	pdu.scid    = cpu_to_le16(chan->scid);
8001 
8002 	chan->ident = l2cap_get_ident(conn);
8003 
8004 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8005 		       sizeof(pdu), &pdu);
8006 }
8007 
8008 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8009 {
8010 	if (chan->imtu > mtu)
8011 		return -EINVAL;
8012 
8013 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8014 
8015 	chan->imtu = mtu;
8016 
8017 	l2cap_ecred_reconfigure(chan);
8018 
8019 	return 0;
8020 }
8021 
8022 /* ---- L2CAP interface with lower layer (HCI) ---- */
8023 
8024 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8025 {
8026 	int exact = 0, lm1 = 0, lm2 = 0;
8027 	struct l2cap_chan *c;
8028 
8029 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8030 
8031 	/* Find listening sockets and check their link_mode */
8032 	read_lock(&chan_list_lock);
8033 	list_for_each_entry(c, &chan_list, global_l) {
8034 		if (c->state != BT_LISTEN)
8035 			continue;
8036 
8037 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8038 			lm1 |= HCI_LM_ACCEPT;
8039 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8040 				lm1 |= HCI_LM_MASTER;
8041 			exact++;
8042 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8043 			lm2 |= HCI_LM_ACCEPT;
8044 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8045 				lm2 |= HCI_LM_MASTER;
8046 		}
8047 	}
8048 	read_unlock(&chan_list_lock);
8049 
8050 	return exact ? lm1 : lm2;
8051 }
8052 
8053 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8054  * from an existing channel in the list or from the beginning of the
8055  * global list (by passing NULL as first parameter).
8056  */
8057 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8058 						  struct hci_conn *hcon)
8059 {
8060 	u8 src_type = bdaddr_src_type(hcon);
8061 
8062 	read_lock(&chan_list_lock);
8063 
8064 	if (c)
8065 		c = list_next_entry(c, global_l);
8066 	else
8067 		c = list_entry(chan_list.next, typeof(*c), global_l);
8068 
8069 	list_for_each_entry_from(c, &chan_list, global_l) {
8070 		if (c->chan_type != L2CAP_CHAN_FIXED)
8071 			continue;
8072 		if (c->state != BT_LISTEN)
8073 			continue;
8074 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8075 			continue;
8076 		if (src_type != c->src_type)
8077 			continue;
8078 
8079 		l2cap_chan_hold(c);
8080 		read_unlock(&chan_list_lock);
8081 		return c;
8082 	}
8083 
8084 	read_unlock(&chan_list_lock);
8085 
8086 	return NULL;
8087 }
8088 
8089 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8090 {
8091 	struct hci_dev *hdev = hcon->hdev;
8092 	struct l2cap_conn *conn;
8093 	struct l2cap_chan *pchan;
8094 	u8 dst_type;
8095 
8096 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8097 		return;
8098 
8099 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8100 
8101 	if (status) {
8102 		l2cap_conn_del(hcon, bt_to_errno(status));
8103 		return;
8104 	}
8105 
8106 	conn = l2cap_conn_add(hcon);
8107 	if (!conn)
8108 		return;
8109 
8110 	dst_type = bdaddr_dst_type(hcon);
8111 
8112 	/* If device is blocked, do not create channels for it */
8113 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
8114 		return;
8115 
8116 	/* Find fixed channels and notify them of the new connection. We
8117 	 * use multiple individual lookups, continuing each time where
8118 	 * we left off, because the list lock would prevent calling the
8119 	 * potentially sleeping l2cap_chan_lock() function.
8120 	 */
8121 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8122 	while (pchan) {
8123 		struct l2cap_chan *chan, *next;
8124 
8125 		/* Client fixed channels should override server ones */
8126 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8127 			goto next;
8128 
8129 		l2cap_chan_lock(pchan);
8130 		chan = pchan->ops->new_connection(pchan);
8131 		if (chan) {
8132 			bacpy(&chan->src, &hcon->src);
8133 			bacpy(&chan->dst, &hcon->dst);
8134 			chan->src_type = bdaddr_src_type(hcon);
8135 			chan->dst_type = dst_type;
8136 
8137 			__l2cap_chan_add(conn, chan);
8138 		}
8139 
8140 		l2cap_chan_unlock(pchan);
8141 next:
8142 		next = l2cap_global_fixed_chan(pchan, hcon);
8143 		l2cap_chan_put(pchan);
8144 		pchan = next;
8145 	}
8146 
8147 	l2cap_conn_ready(conn);
8148 }
8149 
8150 int l2cap_disconn_ind(struct hci_conn *hcon)
8151 {
8152 	struct l2cap_conn *conn = hcon->l2cap_data;
8153 
8154 	BT_DBG("hcon %p", hcon);
8155 
8156 	if (!conn)
8157 		return HCI_ERROR_REMOTE_USER_TERM;
8158 	return conn->disc_reason;
8159 }
8160 
8161 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8162 {
8163 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8164 		return;
8165 
8166 	BT_DBG("hcon %p reason %d", hcon, reason);
8167 
8168 	l2cap_conn_del(hcon, bt_to_errno(reason));
8169 }
8170 
8171 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8172 {
8173 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8174 		return;
8175 
8176 	if (encrypt == 0x00) {
8177 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8178 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8179 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8180 			   chan->sec_level == BT_SECURITY_FIPS)
8181 			l2cap_chan_close(chan, ECONNREFUSED);
8182 	} else {
8183 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8184 			__clear_chan_timer(chan);
8185 	}
8186 }
8187 
8188 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8189 {
8190 	struct l2cap_conn *conn = hcon->l2cap_data;
8191 	struct l2cap_chan *chan;
8192 
8193 	if (!conn)
8194 		return;
8195 
8196 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8197 
8198 	mutex_lock(&conn->chan_lock);
8199 
8200 	list_for_each_entry(chan, &conn->chan_l, list) {
8201 		l2cap_chan_lock(chan);
8202 
8203 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8204 		       state_to_string(chan->state));
8205 
8206 		if (chan->scid == L2CAP_CID_A2MP) {
8207 			l2cap_chan_unlock(chan);
8208 			continue;
8209 		}
8210 
8211 		if (!status && encrypt)
8212 			chan->sec_level = hcon->sec_level;
8213 
8214 		if (!__l2cap_no_conn_pending(chan)) {
8215 			l2cap_chan_unlock(chan);
8216 			continue;
8217 		}
8218 
8219 		if (!status && (chan->state == BT_CONNECTED ||
8220 				chan->state == BT_CONFIG)) {
8221 			chan->ops->resume(chan);
8222 			l2cap_check_encryption(chan, encrypt);
8223 			l2cap_chan_unlock(chan);
8224 			continue;
8225 		}
8226 
8227 		if (chan->state == BT_CONNECT) {
8228 			if (!status && l2cap_check_enc_key_size(hcon))
8229 				l2cap_start_connection(chan);
8230 			else
8231 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8232 		} else if (chan->state == BT_CONNECT2 &&
8233 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8234 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8235 			struct l2cap_conn_rsp rsp;
8236 			__u16 res, stat;
8237 
8238 			if (!status && l2cap_check_enc_key_size(hcon)) {
8239 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8240 					res = L2CAP_CR_PEND;
8241 					stat = L2CAP_CS_AUTHOR_PEND;
8242 					chan->ops->defer(chan);
8243 				} else {
8244 					l2cap_state_change(chan, BT_CONFIG);
8245 					res = L2CAP_CR_SUCCESS;
8246 					stat = L2CAP_CS_NO_INFO;
8247 				}
8248 			} else {
8249 				l2cap_state_change(chan, BT_DISCONN);
8250 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8251 				res = L2CAP_CR_SEC_BLOCK;
8252 				stat = L2CAP_CS_NO_INFO;
8253 			}
8254 
8255 			rsp.scid   = cpu_to_le16(chan->dcid);
8256 			rsp.dcid   = cpu_to_le16(chan->scid);
8257 			rsp.result = cpu_to_le16(res);
8258 			rsp.status = cpu_to_le16(stat);
8259 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8260 				       sizeof(rsp), &rsp);
8261 
8262 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8263 			    res == L2CAP_CR_SUCCESS) {
8264 				char buf[128];
8265 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8266 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8267 					       L2CAP_CONF_REQ,
8268 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8269 					       buf);
8270 				chan->num_conf_req++;
8271 			}
8272 		}
8273 
8274 		l2cap_chan_unlock(chan);
8275 	}
8276 
8277 	mutex_unlock(&conn->chan_lock);
8278 }
8279 
8280 /* Append fragment into frame respecting the maximum len of rx_skb */
8281 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
8282 			   u16 len)
8283 {
8284 	if (!conn->rx_skb) {
8285 		/* Allocate skb for the complete frame (with header) */
8286 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8287 		if (!conn->rx_skb)
8288 			return -ENOMEM;
8289 		/* Init rx_len */
8290 		conn->rx_len = len;
8291 	}
8292 
8293 	/* Copy as much as the rx_skb can hold */
8294 	len = min_t(u16, len, skb->len);
8295 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
8296 	skb_pull(skb, len);
8297 	conn->rx_len -= len;
8298 
8299 	return len;
8300 }
8301 
8302 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
8303 {
8304 	struct sk_buff *rx_skb;
8305 	int len;
8306 
8307 	/* Append just enough to complete the header */
8308 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
8309 
8310 	/* If header could not be read just continue */
8311 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
8312 		return len;
8313 
8314 	rx_skb = conn->rx_skb;
8315 	len = get_unaligned_le16(rx_skb->data);
8316 
8317 	/* Check if rx_skb has enough space to received all fragments */
8318 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
8319 		/* Update expected len */
8320 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
8321 		return L2CAP_LEN_SIZE;
8322 	}
8323 
8324 	/* Reset conn->rx_skb since it will need to be reallocated in order to
8325 	 * fit all fragments.
8326 	 */
8327 	conn->rx_skb = NULL;
8328 
8329 	/* Reallocates rx_skb using the exact expected length */
8330 	len = l2cap_recv_frag(conn, rx_skb,
8331 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
8332 	kfree_skb(rx_skb);
8333 
8334 	return len;
8335 }
8336 
8337 static void l2cap_recv_reset(struct l2cap_conn *conn)
8338 {
8339 	kfree_skb(conn->rx_skb);
8340 	conn->rx_skb = NULL;
8341 	conn->rx_len = 0;
8342 }
8343 
8344 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8345 {
8346 	struct l2cap_conn *conn = hcon->l2cap_data;
8347 	int len;
8348 
8349 	/* For AMP controller do not create l2cap conn */
8350 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8351 		goto drop;
8352 
8353 	if (!conn)
8354 		conn = l2cap_conn_add(hcon);
8355 
8356 	if (!conn)
8357 		goto drop;
8358 
8359 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
8360 
8361 	switch (flags) {
8362 	case ACL_START:
8363 	case ACL_START_NO_FLUSH:
8364 	case ACL_COMPLETE:
8365 		if (conn->rx_skb) {
8366 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8367 			l2cap_recv_reset(conn);
8368 			l2cap_conn_unreliable(conn, ECOMM);
8369 		}
8370 
8371 		/* Start fragment may not contain the L2CAP length so just
8372 		 * copy the initial byte when that happens and use conn->mtu as
8373 		 * expected length.
8374 		 */
8375 		if (skb->len < L2CAP_LEN_SIZE) {
8376 			if (l2cap_recv_frag(conn, skb, conn->mtu) < 0)
8377 				goto drop;
8378 			return;
8379 		}
8380 
8381 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
8382 
8383 		if (len == skb->len) {
8384 			/* Complete frame received */
8385 			l2cap_recv_frame(conn, skb);
8386 			return;
8387 		}
8388 
8389 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
8390 
8391 		if (skb->len > len) {
8392 			BT_ERR("Frame is too long (len %d, expected len %d)",
8393 			       skb->len, len);
8394 			l2cap_conn_unreliable(conn, ECOMM);
8395 			goto drop;
8396 		}
8397 
8398 		/* Append fragment into frame (with header) */
8399 		if (l2cap_recv_frag(conn, skb, len) < 0)
8400 			goto drop;
8401 
8402 		break;
8403 
8404 	case ACL_CONT:
8405 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
8406 
8407 		if (!conn->rx_skb) {
8408 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8409 			l2cap_conn_unreliable(conn, ECOMM);
8410 			goto drop;
8411 		}
8412 
8413 		/* Complete the L2CAP length if it has not been read */
8414 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
8415 			if (l2cap_recv_len(conn, skb) < 0) {
8416 				l2cap_conn_unreliable(conn, ECOMM);
8417 				goto drop;
8418 			}
8419 
8420 			/* Header still could not be read just continue */
8421 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
8422 				return;
8423 		}
8424 
8425 		if (skb->len > conn->rx_len) {
8426 			BT_ERR("Fragment is too long (len %d, expected %d)",
8427 			       skb->len, conn->rx_len);
8428 			l2cap_recv_reset(conn);
8429 			l2cap_conn_unreliable(conn, ECOMM);
8430 			goto drop;
8431 		}
8432 
8433 		/* Append fragment into frame (with header) */
8434 		l2cap_recv_frag(conn, skb, skb->len);
8435 
8436 		if (!conn->rx_len) {
8437 			/* Complete frame received. l2cap_recv_frame
8438 			 * takes ownership of the skb so set the global
8439 			 * rx_skb pointer to NULL first.
8440 			 */
8441 			struct sk_buff *rx_skb = conn->rx_skb;
8442 			conn->rx_skb = NULL;
8443 			l2cap_recv_frame(conn, rx_skb);
8444 		}
8445 		break;
8446 	}
8447 
8448 drop:
8449 	kfree_skb(skb);
8450 }
8451 
8452 static struct hci_cb l2cap_cb = {
8453 	.name		= "L2CAP",
8454 	.connect_cfm	= l2cap_connect_cfm,
8455 	.disconn_cfm	= l2cap_disconn_cfm,
8456 	.security_cfm	= l2cap_security_cfm,
8457 };
8458 
8459 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8460 {
8461 	struct l2cap_chan *c;
8462 
8463 	read_lock(&chan_list_lock);
8464 
8465 	list_for_each_entry(c, &chan_list, global_l) {
8466 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8467 			   &c->src, c->src_type, &c->dst, c->dst_type,
8468 			   c->state, __le16_to_cpu(c->psm),
8469 			   c->scid, c->dcid, c->imtu, c->omtu,
8470 			   c->sec_level, c->mode);
8471 	}
8472 
8473 	read_unlock(&chan_list_lock);
8474 
8475 	return 0;
8476 }
8477 
8478 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8479 
8480 static struct dentry *l2cap_debugfs;
8481 
8482 int __init l2cap_init(void)
8483 {
8484 	int err;
8485 
8486 	err = l2cap_init_sockets();
8487 	if (err < 0)
8488 		return err;
8489 
8490 	hci_register_cb(&l2cap_cb);
8491 
8492 	if (IS_ERR_OR_NULL(bt_debugfs))
8493 		return 0;
8494 
8495 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8496 					    NULL, &l2cap_debugfs_fops);
8497 
8498 	return 0;
8499 }
8500 
8501 void l2cap_exit(void)
8502 {
8503 	debugfs_remove(l2cap_debugfs);
8504 	hci_unregister_cb(&l2cap_cb);
8505 	l2cap_cleanup_sockets();
8506 }
8507 
8508 module_param(disable_ertm, bool, 0644);
8509 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8510 
8511 module_param(enable_ecred, bool, 0644);
8512 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8513