xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision 619775c3)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred;
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 
65 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
66 {
67 	if (link_type == LE_LINK) {
68 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
69 			return BDADDR_LE_PUBLIC;
70 		else
71 			return BDADDR_LE_RANDOM;
72 	}
73 
74 	return BDADDR_BREDR;
75 }
76 
77 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
78 {
79 	return bdaddr_type(hcon->type, hcon->src_type);
80 }
81 
82 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
83 {
84 	return bdaddr_type(hcon->type, hcon->dst_type);
85 }
86 
87 /* ---- L2CAP channels ---- */
88 
89 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
90 						   u16 cid)
91 {
92 	struct l2cap_chan *c;
93 
94 	list_for_each_entry(c, &conn->chan_l, list) {
95 		if (c->dcid == cid)
96 			return c;
97 	}
98 	return NULL;
99 }
100 
101 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
102 						   u16 cid)
103 {
104 	struct l2cap_chan *c;
105 
106 	list_for_each_entry(c, &conn->chan_l, list) {
107 		if (c->scid == cid)
108 			return c;
109 	}
110 	return NULL;
111 }
112 
113 /* Find channel with given SCID.
114  * Returns locked channel. */
115 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
116 						 u16 cid)
117 {
118 	struct l2cap_chan *c;
119 
120 	mutex_lock(&conn->chan_lock);
121 	c = __l2cap_get_chan_by_scid(conn, cid);
122 	if (c)
123 		l2cap_chan_lock(c);
124 	mutex_unlock(&conn->chan_lock);
125 
126 	return c;
127 }
128 
129 /* Find channel with given DCID.
130  * Returns locked channel.
131  */
132 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
133 						 u16 cid)
134 {
135 	struct l2cap_chan *c;
136 
137 	mutex_lock(&conn->chan_lock);
138 	c = __l2cap_get_chan_by_dcid(conn, cid);
139 	if (c)
140 		l2cap_chan_lock(c);
141 	mutex_unlock(&conn->chan_lock);
142 
143 	return c;
144 }
145 
146 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
147 						    u8 ident)
148 {
149 	struct l2cap_chan *c;
150 
151 	list_for_each_entry(c, &conn->chan_l, list) {
152 		if (c->ident == ident)
153 			return c;
154 	}
155 	return NULL;
156 }
157 
158 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
159 						  u8 ident)
160 {
161 	struct l2cap_chan *c;
162 
163 	mutex_lock(&conn->chan_lock);
164 	c = __l2cap_get_chan_by_ident(conn, ident);
165 	if (c)
166 		l2cap_chan_lock(c);
167 	mutex_unlock(&conn->chan_lock);
168 
169 	return c;
170 }
171 
172 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
173 						      u8 src_type)
174 {
175 	struct l2cap_chan *c;
176 
177 	list_for_each_entry(c, &chan_list, global_l) {
178 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
179 			continue;
180 
181 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
182 			continue;
183 
184 		if (c->sport == psm && !bacmp(&c->src, src))
185 			return c;
186 	}
187 	return NULL;
188 }
189 
190 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
191 {
192 	int err;
193 
194 	write_lock(&chan_list_lock);
195 
196 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
197 		err = -EADDRINUSE;
198 		goto done;
199 	}
200 
201 	if (psm) {
202 		chan->psm = psm;
203 		chan->sport = psm;
204 		err = 0;
205 	} else {
206 		u16 p, start, end, incr;
207 
208 		if (chan->src_type == BDADDR_BREDR) {
209 			start = L2CAP_PSM_DYN_START;
210 			end = L2CAP_PSM_AUTO_END;
211 			incr = 2;
212 		} else {
213 			start = L2CAP_PSM_LE_DYN_START;
214 			end = L2CAP_PSM_LE_DYN_END;
215 			incr = 1;
216 		}
217 
218 		err = -EINVAL;
219 		for (p = start; p <= end; p += incr)
220 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
221 							 chan->src_type)) {
222 				chan->psm   = cpu_to_le16(p);
223 				chan->sport = cpu_to_le16(p);
224 				err = 0;
225 				break;
226 			}
227 	}
228 
229 done:
230 	write_unlock(&chan_list_lock);
231 	return err;
232 }
233 EXPORT_SYMBOL_GPL(l2cap_add_psm);
234 
235 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
236 {
237 	write_lock(&chan_list_lock);
238 
239 	/* Override the defaults (which are for conn-oriented) */
240 	chan->omtu = L2CAP_DEFAULT_MTU;
241 	chan->chan_type = L2CAP_CHAN_FIXED;
242 
243 	chan->scid = scid;
244 
245 	write_unlock(&chan_list_lock);
246 
247 	return 0;
248 }
249 
250 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
251 {
252 	u16 cid, dyn_end;
253 
254 	if (conn->hcon->type == LE_LINK)
255 		dyn_end = L2CAP_CID_LE_DYN_END;
256 	else
257 		dyn_end = L2CAP_CID_DYN_END;
258 
259 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
260 		if (!__l2cap_get_chan_by_scid(conn, cid))
261 			return cid;
262 	}
263 
264 	return 0;
265 }
266 
267 static void l2cap_state_change(struct l2cap_chan *chan, int state)
268 {
269 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
270 	       state_to_string(state));
271 
272 	chan->state = state;
273 	chan->ops->state_change(chan, state, 0);
274 }
275 
276 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
277 						int state, int err)
278 {
279 	chan->state = state;
280 	chan->ops->state_change(chan, chan->state, err);
281 }
282 
283 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
284 {
285 	chan->ops->state_change(chan, chan->state, err);
286 }
287 
288 static void __set_retrans_timer(struct l2cap_chan *chan)
289 {
290 	if (!delayed_work_pending(&chan->monitor_timer) &&
291 	    chan->retrans_timeout) {
292 		l2cap_set_timer(chan, &chan->retrans_timer,
293 				msecs_to_jiffies(chan->retrans_timeout));
294 	}
295 }
296 
297 static void __set_monitor_timer(struct l2cap_chan *chan)
298 {
299 	__clear_retrans_timer(chan);
300 	if (chan->monitor_timeout) {
301 		l2cap_set_timer(chan, &chan->monitor_timer,
302 				msecs_to_jiffies(chan->monitor_timeout));
303 	}
304 }
305 
306 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
307 					       u16 seq)
308 {
309 	struct sk_buff *skb;
310 
311 	skb_queue_walk(head, skb) {
312 		if (bt_cb(skb)->l2cap.txseq == seq)
313 			return skb;
314 	}
315 
316 	return NULL;
317 }
318 
319 /* ---- L2CAP sequence number lists ---- */
320 
321 /* For ERTM, ordered lists of sequence numbers must be tracked for
322  * SREJ requests that are received and for frames that are to be
323  * retransmitted. These seq_list functions implement a singly-linked
324  * list in an array, where membership in the list can also be checked
325  * in constant time. Items can also be added to the tail of the list
326  * and removed from the head in constant time, without further memory
327  * allocs or frees.
328  */
329 
330 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
331 {
332 	size_t alloc_size, i;
333 
334 	/* Allocated size is a power of 2 to map sequence numbers
335 	 * (which may be up to 14 bits) in to a smaller array that is
336 	 * sized for the negotiated ERTM transmit windows.
337 	 */
338 	alloc_size = roundup_pow_of_two(size);
339 
340 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
341 	if (!seq_list->list)
342 		return -ENOMEM;
343 
344 	seq_list->mask = alloc_size - 1;
345 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
346 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
347 	for (i = 0; i < alloc_size; i++)
348 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
349 
350 	return 0;
351 }
352 
353 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
354 {
355 	kfree(seq_list->list);
356 }
357 
358 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
359 					   u16 seq)
360 {
361 	/* Constant-time check for list membership */
362 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
363 }
364 
365 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
366 {
367 	u16 seq = seq_list->head;
368 	u16 mask = seq_list->mask;
369 
370 	seq_list->head = seq_list->list[seq & mask];
371 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
372 
373 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
374 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 	}
377 
378 	return seq;
379 }
380 
381 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
382 {
383 	u16 i;
384 
385 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
386 		return;
387 
388 	for (i = 0; i <= seq_list->mask; i++)
389 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
390 
391 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
392 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
393 }
394 
395 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
396 {
397 	u16 mask = seq_list->mask;
398 
399 	/* All appends happen in constant time */
400 
401 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
402 		return;
403 
404 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
405 		seq_list->head = seq;
406 	else
407 		seq_list->list[seq_list->tail & mask] = seq;
408 
409 	seq_list->tail = seq;
410 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
411 }
412 
413 static void l2cap_chan_timeout(struct work_struct *work)
414 {
415 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
416 					       chan_timer.work);
417 	struct l2cap_conn *conn = chan->conn;
418 	int reason;
419 
420 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
421 
422 	mutex_lock(&conn->chan_lock);
423 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
424 	 * this work. No need to call l2cap_chan_hold(chan) here again.
425 	 */
426 	l2cap_chan_lock(chan);
427 
428 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
429 		reason = ECONNREFUSED;
430 	else if (chan->state == BT_CONNECT &&
431 		 chan->sec_level != BT_SECURITY_SDP)
432 		reason = ECONNREFUSED;
433 	else
434 		reason = ETIMEDOUT;
435 
436 	l2cap_chan_close(chan, reason);
437 
438 	chan->ops->close(chan);
439 
440 	l2cap_chan_unlock(chan);
441 	l2cap_chan_put(chan);
442 
443 	mutex_unlock(&conn->chan_lock);
444 }
445 
446 struct l2cap_chan *l2cap_chan_create(void)
447 {
448 	struct l2cap_chan *chan;
449 
450 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
451 	if (!chan)
452 		return NULL;
453 
454 	mutex_init(&chan->lock);
455 
456 	/* Set default lock nesting level */
457 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
458 
459 	write_lock(&chan_list_lock);
460 	list_add(&chan->global_l, &chan_list);
461 	write_unlock(&chan_list_lock);
462 
463 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
464 
465 	chan->state = BT_OPEN;
466 
467 	kref_init(&chan->kref);
468 
469 	/* This flag is cleared in l2cap_chan_ready() */
470 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
471 
472 	BT_DBG("chan %p", chan);
473 
474 	return chan;
475 }
476 EXPORT_SYMBOL_GPL(l2cap_chan_create);
477 
478 static void l2cap_chan_destroy(struct kref *kref)
479 {
480 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
481 
482 	BT_DBG("chan %p", chan);
483 
484 	write_lock(&chan_list_lock);
485 	list_del(&chan->global_l);
486 	write_unlock(&chan_list_lock);
487 
488 	kfree(chan);
489 }
490 
491 void l2cap_chan_hold(struct l2cap_chan *c)
492 {
493 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
494 
495 	kref_get(&c->kref);
496 }
497 
498 void l2cap_chan_put(struct l2cap_chan *c)
499 {
500 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
501 
502 	kref_put(&c->kref, l2cap_chan_destroy);
503 }
504 EXPORT_SYMBOL_GPL(l2cap_chan_put);
505 
506 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
507 {
508 	chan->fcs  = L2CAP_FCS_CRC16;
509 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
510 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
511 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
512 	chan->remote_max_tx = chan->max_tx;
513 	chan->remote_tx_win = chan->tx_win;
514 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
515 	chan->sec_level = BT_SECURITY_LOW;
516 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
517 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
518 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
519 	chan->conf_state = 0;
520 
521 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
522 }
523 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
524 
525 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
526 {
527 	chan->sdu = NULL;
528 	chan->sdu_last_frag = NULL;
529 	chan->sdu_len = 0;
530 	chan->tx_credits = tx_credits;
531 	/* Derive MPS from connection MTU to stop HCI fragmentation */
532 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
533 	/* Give enough credits for a full packet */
534 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
535 
536 	skb_queue_head_init(&chan->tx_q);
537 }
538 
539 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
540 {
541 	l2cap_le_flowctl_init(chan, tx_credits);
542 
543 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
544 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
545 		chan->mps = L2CAP_ECRED_MIN_MPS;
546 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
547 	}
548 }
549 
550 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
551 {
552 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
553 	       __le16_to_cpu(chan->psm), chan->dcid);
554 
555 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
556 
557 	chan->conn = conn;
558 
559 	switch (chan->chan_type) {
560 	case L2CAP_CHAN_CONN_ORIENTED:
561 		/* Alloc CID for connection-oriented socket */
562 		chan->scid = l2cap_alloc_cid(conn);
563 		if (conn->hcon->type == ACL_LINK)
564 			chan->omtu = L2CAP_DEFAULT_MTU;
565 		break;
566 
567 	case L2CAP_CHAN_CONN_LESS:
568 		/* Connectionless socket */
569 		chan->scid = L2CAP_CID_CONN_LESS;
570 		chan->dcid = L2CAP_CID_CONN_LESS;
571 		chan->omtu = L2CAP_DEFAULT_MTU;
572 		break;
573 
574 	case L2CAP_CHAN_FIXED:
575 		/* Caller will set CID and CID specific MTU values */
576 		break;
577 
578 	default:
579 		/* Raw socket can send/recv signalling messages only */
580 		chan->scid = L2CAP_CID_SIGNALING;
581 		chan->dcid = L2CAP_CID_SIGNALING;
582 		chan->omtu = L2CAP_DEFAULT_MTU;
583 	}
584 
585 	chan->local_id		= L2CAP_BESTEFFORT_ID;
586 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
587 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
588 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
589 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
590 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
591 
592 	l2cap_chan_hold(chan);
593 
594 	/* Only keep a reference for fixed channels if they requested it */
595 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
596 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
597 		hci_conn_hold(conn->hcon);
598 
599 	list_add(&chan->list, &conn->chan_l);
600 }
601 
602 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
603 {
604 	mutex_lock(&conn->chan_lock);
605 	__l2cap_chan_add(conn, chan);
606 	mutex_unlock(&conn->chan_lock);
607 }
608 
609 void l2cap_chan_del(struct l2cap_chan *chan, int err)
610 {
611 	struct l2cap_conn *conn = chan->conn;
612 
613 	__clear_chan_timer(chan);
614 
615 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
616 	       state_to_string(chan->state));
617 
618 	chan->ops->teardown(chan, err);
619 
620 	if (conn) {
621 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
622 		/* Delete from channel list */
623 		list_del(&chan->list);
624 
625 		l2cap_chan_put(chan);
626 
627 		chan->conn = NULL;
628 
629 		/* Reference was only held for non-fixed channels or
630 		 * fixed channels that explicitly requested it using the
631 		 * FLAG_HOLD_HCI_CONN flag.
632 		 */
633 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
634 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
635 			hci_conn_drop(conn->hcon);
636 
637 		if (mgr && mgr->bredr_chan == chan)
638 			mgr->bredr_chan = NULL;
639 	}
640 
641 	if (chan->hs_hchan) {
642 		struct hci_chan *hs_hchan = chan->hs_hchan;
643 
644 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
645 		amp_disconnect_logical_link(hs_hchan);
646 	}
647 
648 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
649 		return;
650 
651 	switch(chan->mode) {
652 	case L2CAP_MODE_BASIC:
653 		break;
654 
655 	case L2CAP_MODE_LE_FLOWCTL:
656 	case L2CAP_MODE_EXT_FLOWCTL:
657 		skb_queue_purge(&chan->tx_q);
658 		break;
659 
660 	case L2CAP_MODE_ERTM:
661 		__clear_retrans_timer(chan);
662 		__clear_monitor_timer(chan);
663 		__clear_ack_timer(chan);
664 
665 		skb_queue_purge(&chan->srej_q);
666 
667 		l2cap_seq_list_free(&chan->srej_list);
668 		l2cap_seq_list_free(&chan->retrans_list);
669 		fallthrough;
670 
671 	case L2CAP_MODE_STREAMING:
672 		skb_queue_purge(&chan->tx_q);
673 		break;
674 	}
675 
676 	return;
677 }
678 EXPORT_SYMBOL_GPL(l2cap_chan_del);
679 
680 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
681 			      void *data)
682 {
683 	struct l2cap_chan *chan;
684 
685 	list_for_each_entry(chan, &conn->chan_l, list) {
686 		func(chan, data);
687 	}
688 }
689 
690 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
691 		     void *data)
692 {
693 	if (!conn)
694 		return;
695 
696 	mutex_lock(&conn->chan_lock);
697 	__l2cap_chan_list(conn, func, data);
698 	mutex_unlock(&conn->chan_lock);
699 }
700 
701 EXPORT_SYMBOL_GPL(l2cap_chan_list);
702 
703 static void l2cap_conn_update_id_addr(struct work_struct *work)
704 {
705 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
706 					       id_addr_update_work);
707 	struct hci_conn *hcon = conn->hcon;
708 	struct l2cap_chan *chan;
709 
710 	mutex_lock(&conn->chan_lock);
711 
712 	list_for_each_entry(chan, &conn->chan_l, list) {
713 		l2cap_chan_lock(chan);
714 		bacpy(&chan->dst, &hcon->dst);
715 		chan->dst_type = bdaddr_dst_type(hcon);
716 		l2cap_chan_unlock(chan);
717 	}
718 
719 	mutex_unlock(&conn->chan_lock);
720 }
721 
722 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
723 {
724 	struct l2cap_conn *conn = chan->conn;
725 	struct l2cap_le_conn_rsp rsp;
726 	u16 result;
727 
728 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
729 		result = L2CAP_CR_LE_AUTHORIZATION;
730 	else
731 		result = L2CAP_CR_LE_BAD_PSM;
732 
733 	l2cap_state_change(chan, BT_DISCONN);
734 
735 	rsp.dcid    = cpu_to_le16(chan->scid);
736 	rsp.mtu     = cpu_to_le16(chan->imtu);
737 	rsp.mps     = cpu_to_le16(chan->mps);
738 	rsp.credits = cpu_to_le16(chan->rx_credits);
739 	rsp.result  = cpu_to_le16(result);
740 
741 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
742 		       &rsp);
743 }
744 
745 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
746 {
747 	struct l2cap_conn *conn = chan->conn;
748 	struct l2cap_ecred_conn_rsp rsp;
749 	u16 result;
750 
751 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
752 		result = L2CAP_CR_LE_AUTHORIZATION;
753 	else
754 		result = L2CAP_CR_LE_BAD_PSM;
755 
756 	l2cap_state_change(chan, BT_DISCONN);
757 
758 	memset(&rsp, 0, sizeof(rsp));
759 
760 	rsp.result  = cpu_to_le16(result);
761 
762 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
763 		       &rsp);
764 }
765 
766 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
767 {
768 	struct l2cap_conn *conn = chan->conn;
769 	struct l2cap_conn_rsp rsp;
770 	u16 result;
771 
772 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
773 		result = L2CAP_CR_SEC_BLOCK;
774 	else
775 		result = L2CAP_CR_BAD_PSM;
776 
777 	l2cap_state_change(chan, BT_DISCONN);
778 
779 	rsp.scid   = cpu_to_le16(chan->dcid);
780 	rsp.dcid   = cpu_to_le16(chan->scid);
781 	rsp.result = cpu_to_le16(result);
782 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
783 
784 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
785 }
786 
787 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
788 {
789 	struct l2cap_conn *conn = chan->conn;
790 
791 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
792 
793 	switch (chan->state) {
794 	case BT_LISTEN:
795 		chan->ops->teardown(chan, 0);
796 		break;
797 
798 	case BT_CONNECTED:
799 	case BT_CONFIG:
800 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
801 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
802 			l2cap_send_disconn_req(chan, reason);
803 		} else
804 			l2cap_chan_del(chan, reason);
805 		break;
806 
807 	case BT_CONNECT2:
808 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
809 			if (conn->hcon->type == ACL_LINK)
810 				l2cap_chan_connect_reject(chan);
811 			else if (conn->hcon->type == LE_LINK) {
812 				switch (chan->mode) {
813 				case L2CAP_MODE_LE_FLOWCTL:
814 					l2cap_chan_le_connect_reject(chan);
815 					break;
816 				case L2CAP_MODE_EXT_FLOWCTL:
817 					l2cap_chan_ecred_connect_reject(chan);
818 					break;
819 				}
820 			}
821 		}
822 
823 		l2cap_chan_del(chan, reason);
824 		break;
825 
826 	case BT_CONNECT:
827 	case BT_DISCONN:
828 		l2cap_chan_del(chan, reason);
829 		break;
830 
831 	default:
832 		chan->ops->teardown(chan, 0);
833 		break;
834 	}
835 }
836 EXPORT_SYMBOL(l2cap_chan_close);
837 
838 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
839 {
840 	switch (chan->chan_type) {
841 	case L2CAP_CHAN_RAW:
842 		switch (chan->sec_level) {
843 		case BT_SECURITY_HIGH:
844 		case BT_SECURITY_FIPS:
845 			return HCI_AT_DEDICATED_BONDING_MITM;
846 		case BT_SECURITY_MEDIUM:
847 			return HCI_AT_DEDICATED_BONDING;
848 		default:
849 			return HCI_AT_NO_BONDING;
850 		}
851 		break;
852 	case L2CAP_CHAN_CONN_LESS:
853 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
854 			if (chan->sec_level == BT_SECURITY_LOW)
855 				chan->sec_level = BT_SECURITY_SDP;
856 		}
857 		if (chan->sec_level == BT_SECURITY_HIGH ||
858 		    chan->sec_level == BT_SECURITY_FIPS)
859 			return HCI_AT_NO_BONDING_MITM;
860 		else
861 			return HCI_AT_NO_BONDING;
862 		break;
863 	case L2CAP_CHAN_CONN_ORIENTED:
864 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
865 			if (chan->sec_level == BT_SECURITY_LOW)
866 				chan->sec_level = BT_SECURITY_SDP;
867 
868 			if (chan->sec_level == BT_SECURITY_HIGH ||
869 			    chan->sec_level == BT_SECURITY_FIPS)
870 				return HCI_AT_NO_BONDING_MITM;
871 			else
872 				return HCI_AT_NO_BONDING;
873 		}
874 		fallthrough;
875 
876 	default:
877 		switch (chan->sec_level) {
878 		case BT_SECURITY_HIGH:
879 		case BT_SECURITY_FIPS:
880 			return HCI_AT_GENERAL_BONDING_MITM;
881 		case BT_SECURITY_MEDIUM:
882 			return HCI_AT_GENERAL_BONDING;
883 		default:
884 			return HCI_AT_NO_BONDING;
885 		}
886 		break;
887 	}
888 }
889 
890 /* Service level security */
891 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
892 {
893 	struct l2cap_conn *conn = chan->conn;
894 	__u8 auth_type;
895 
896 	if (conn->hcon->type == LE_LINK)
897 		return smp_conn_security(conn->hcon, chan->sec_level);
898 
899 	auth_type = l2cap_get_auth_type(chan);
900 
901 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
902 				 initiator);
903 }
904 
905 static u8 l2cap_get_ident(struct l2cap_conn *conn)
906 {
907 	u8 id;
908 
909 	/* Get next available identificator.
910 	 *    1 - 128 are used by kernel.
911 	 *  129 - 199 are reserved.
912 	 *  200 - 254 are used by utilities like l2ping, etc.
913 	 */
914 
915 	mutex_lock(&conn->ident_lock);
916 
917 	if (++conn->tx_ident > 128)
918 		conn->tx_ident = 1;
919 
920 	id = conn->tx_ident;
921 
922 	mutex_unlock(&conn->ident_lock);
923 
924 	return id;
925 }
926 
927 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
928 			   void *data)
929 {
930 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
931 	u8 flags;
932 
933 	BT_DBG("code 0x%2.2x", code);
934 
935 	if (!skb)
936 		return;
937 
938 	/* Use NO_FLUSH if supported or we have an LE link (which does
939 	 * not support auto-flushing packets) */
940 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
941 	    conn->hcon->type == LE_LINK)
942 		flags = ACL_START_NO_FLUSH;
943 	else
944 		flags = ACL_START;
945 
946 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
947 	skb->priority = HCI_PRIO_MAX;
948 
949 	hci_send_acl(conn->hchan, skb, flags);
950 }
951 
952 static bool __chan_is_moving(struct l2cap_chan *chan)
953 {
954 	return chan->move_state != L2CAP_MOVE_STABLE &&
955 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
956 }
957 
958 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
959 {
960 	struct hci_conn *hcon = chan->conn->hcon;
961 	u16 flags;
962 
963 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
964 	       skb->priority);
965 
966 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
967 		if (chan->hs_hchan)
968 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
969 		else
970 			kfree_skb(skb);
971 
972 		return;
973 	}
974 
975 	/* Use NO_FLUSH for LE links (where this is the only option) or
976 	 * if the BR/EDR link supports it and flushing has not been
977 	 * explicitly requested (through FLAG_FLUSHABLE).
978 	 */
979 	if (hcon->type == LE_LINK ||
980 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
981 	     lmp_no_flush_capable(hcon->hdev)))
982 		flags = ACL_START_NO_FLUSH;
983 	else
984 		flags = ACL_START;
985 
986 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
987 	hci_send_acl(chan->conn->hchan, skb, flags);
988 }
989 
990 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
991 {
992 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
993 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
994 
995 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
996 		/* S-Frame */
997 		control->sframe = 1;
998 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
999 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1000 
1001 		control->sar = 0;
1002 		control->txseq = 0;
1003 	} else {
1004 		/* I-Frame */
1005 		control->sframe = 0;
1006 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1007 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1008 
1009 		control->poll = 0;
1010 		control->super = 0;
1011 	}
1012 }
1013 
1014 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1015 {
1016 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1017 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1018 
1019 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1020 		/* S-Frame */
1021 		control->sframe = 1;
1022 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1023 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1024 
1025 		control->sar = 0;
1026 		control->txseq = 0;
1027 	} else {
1028 		/* I-Frame */
1029 		control->sframe = 0;
1030 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1031 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1032 
1033 		control->poll = 0;
1034 		control->super = 0;
1035 	}
1036 }
1037 
1038 static inline void __unpack_control(struct l2cap_chan *chan,
1039 				    struct sk_buff *skb)
1040 {
1041 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1042 		__unpack_extended_control(get_unaligned_le32(skb->data),
1043 					  &bt_cb(skb)->l2cap);
1044 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1045 	} else {
1046 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1047 					  &bt_cb(skb)->l2cap);
1048 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1049 	}
1050 }
1051 
1052 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1053 {
1054 	u32 packed;
1055 
1056 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1057 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1058 
1059 	if (control->sframe) {
1060 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1061 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1062 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1063 	} else {
1064 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1065 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1066 	}
1067 
1068 	return packed;
1069 }
1070 
1071 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1072 {
1073 	u16 packed;
1074 
1075 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1076 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1077 
1078 	if (control->sframe) {
1079 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1080 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1081 		packed |= L2CAP_CTRL_FRAME_TYPE;
1082 	} else {
1083 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1084 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1085 	}
1086 
1087 	return packed;
1088 }
1089 
1090 static inline void __pack_control(struct l2cap_chan *chan,
1091 				  struct l2cap_ctrl *control,
1092 				  struct sk_buff *skb)
1093 {
1094 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1095 		put_unaligned_le32(__pack_extended_control(control),
1096 				   skb->data + L2CAP_HDR_SIZE);
1097 	} else {
1098 		put_unaligned_le16(__pack_enhanced_control(control),
1099 				   skb->data + L2CAP_HDR_SIZE);
1100 	}
1101 }
1102 
1103 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1104 {
1105 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1106 		return L2CAP_EXT_HDR_SIZE;
1107 	else
1108 		return L2CAP_ENH_HDR_SIZE;
1109 }
1110 
1111 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1112 					       u32 control)
1113 {
1114 	struct sk_buff *skb;
1115 	struct l2cap_hdr *lh;
1116 	int hlen = __ertm_hdr_size(chan);
1117 
1118 	if (chan->fcs == L2CAP_FCS_CRC16)
1119 		hlen += L2CAP_FCS_SIZE;
1120 
1121 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1122 
1123 	if (!skb)
1124 		return ERR_PTR(-ENOMEM);
1125 
1126 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1127 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1128 	lh->cid = cpu_to_le16(chan->dcid);
1129 
1130 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1131 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1132 	else
1133 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1134 
1135 	if (chan->fcs == L2CAP_FCS_CRC16) {
1136 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1137 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1138 	}
1139 
1140 	skb->priority = HCI_PRIO_MAX;
1141 	return skb;
1142 }
1143 
1144 static void l2cap_send_sframe(struct l2cap_chan *chan,
1145 			      struct l2cap_ctrl *control)
1146 {
1147 	struct sk_buff *skb;
1148 	u32 control_field;
1149 
1150 	BT_DBG("chan %p, control %p", chan, control);
1151 
1152 	if (!control->sframe)
1153 		return;
1154 
1155 	if (__chan_is_moving(chan))
1156 		return;
1157 
1158 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1159 	    !control->poll)
1160 		control->final = 1;
1161 
1162 	if (control->super == L2CAP_SUPER_RR)
1163 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1164 	else if (control->super == L2CAP_SUPER_RNR)
1165 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1166 
1167 	if (control->super != L2CAP_SUPER_SREJ) {
1168 		chan->last_acked_seq = control->reqseq;
1169 		__clear_ack_timer(chan);
1170 	}
1171 
1172 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1173 	       control->final, control->poll, control->super);
1174 
1175 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1176 		control_field = __pack_extended_control(control);
1177 	else
1178 		control_field = __pack_enhanced_control(control);
1179 
1180 	skb = l2cap_create_sframe_pdu(chan, control_field);
1181 	if (!IS_ERR(skb))
1182 		l2cap_do_send(chan, skb);
1183 }
1184 
1185 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1186 {
1187 	struct l2cap_ctrl control;
1188 
1189 	BT_DBG("chan %p, poll %d", chan, poll);
1190 
1191 	memset(&control, 0, sizeof(control));
1192 	control.sframe = 1;
1193 	control.poll = poll;
1194 
1195 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1196 		control.super = L2CAP_SUPER_RNR;
1197 	else
1198 		control.super = L2CAP_SUPER_RR;
1199 
1200 	control.reqseq = chan->buffer_seq;
1201 	l2cap_send_sframe(chan, &control);
1202 }
1203 
1204 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1205 {
1206 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1207 		return true;
1208 
1209 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1210 }
1211 
1212 static bool __amp_capable(struct l2cap_chan *chan)
1213 {
1214 	struct l2cap_conn *conn = chan->conn;
1215 	struct hci_dev *hdev;
1216 	bool amp_available = false;
1217 
1218 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1219 		return false;
1220 
1221 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1222 		return false;
1223 
1224 	read_lock(&hci_dev_list_lock);
1225 	list_for_each_entry(hdev, &hci_dev_list, list) {
1226 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1227 		    test_bit(HCI_UP, &hdev->flags)) {
1228 			amp_available = true;
1229 			break;
1230 		}
1231 	}
1232 	read_unlock(&hci_dev_list_lock);
1233 
1234 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1235 		return amp_available;
1236 
1237 	return false;
1238 }
1239 
1240 static bool l2cap_check_efs(struct l2cap_chan *chan)
1241 {
1242 	/* Check EFS parameters */
1243 	return true;
1244 }
1245 
1246 void l2cap_send_conn_req(struct l2cap_chan *chan)
1247 {
1248 	struct l2cap_conn *conn = chan->conn;
1249 	struct l2cap_conn_req req;
1250 
1251 	req.scid = cpu_to_le16(chan->scid);
1252 	req.psm  = chan->psm;
1253 
1254 	chan->ident = l2cap_get_ident(conn);
1255 
1256 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1257 
1258 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1259 }
1260 
1261 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1262 {
1263 	struct l2cap_create_chan_req req;
1264 	req.scid = cpu_to_le16(chan->scid);
1265 	req.psm  = chan->psm;
1266 	req.amp_id = amp_id;
1267 
1268 	chan->ident = l2cap_get_ident(chan->conn);
1269 
1270 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1271 		       sizeof(req), &req);
1272 }
1273 
1274 static void l2cap_move_setup(struct l2cap_chan *chan)
1275 {
1276 	struct sk_buff *skb;
1277 
1278 	BT_DBG("chan %p", chan);
1279 
1280 	if (chan->mode != L2CAP_MODE_ERTM)
1281 		return;
1282 
1283 	__clear_retrans_timer(chan);
1284 	__clear_monitor_timer(chan);
1285 	__clear_ack_timer(chan);
1286 
1287 	chan->retry_count = 0;
1288 	skb_queue_walk(&chan->tx_q, skb) {
1289 		if (bt_cb(skb)->l2cap.retries)
1290 			bt_cb(skb)->l2cap.retries = 1;
1291 		else
1292 			break;
1293 	}
1294 
1295 	chan->expected_tx_seq = chan->buffer_seq;
1296 
1297 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1298 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1299 	l2cap_seq_list_clear(&chan->retrans_list);
1300 	l2cap_seq_list_clear(&chan->srej_list);
1301 	skb_queue_purge(&chan->srej_q);
1302 
1303 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1304 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1305 
1306 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1307 }
1308 
1309 static void l2cap_move_done(struct l2cap_chan *chan)
1310 {
1311 	u8 move_role = chan->move_role;
1312 	BT_DBG("chan %p", chan);
1313 
1314 	chan->move_state = L2CAP_MOVE_STABLE;
1315 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1316 
1317 	if (chan->mode != L2CAP_MODE_ERTM)
1318 		return;
1319 
1320 	switch (move_role) {
1321 	case L2CAP_MOVE_ROLE_INITIATOR:
1322 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1323 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1324 		break;
1325 	case L2CAP_MOVE_ROLE_RESPONDER:
1326 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1327 		break;
1328 	}
1329 }
1330 
1331 static void l2cap_chan_ready(struct l2cap_chan *chan)
1332 {
1333 	/* The channel may have already been flagged as connected in
1334 	 * case of receiving data before the L2CAP info req/rsp
1335 	 * procedure is complete.
1336 	 */
1337 	if (chan->state == BT_CONNECTED)
1338 		return;
1339 
1340 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1341 	chan->conf_state = 0;
1342 	__clear_chan_timer(chan);
1343 
1344 	switch (chan->mode) {
1345 	case L2CAP_MODE_LE_FLOWCTL:
1346 	case L2CAP_MODE_EXT_FLOWCTL:
1347 		if (!chan->tx_credits)
1348 			chan->ops->suspend(chan);
1349 		break;
1350 	}
1351 
1352 	chan->state = BT_CONNECTED;
1353 
1354 	chan->ops->ready(chan);
1355 }
1356 
1357 static void l2cap_le_connect(struct l2cap_chan *chan)
1358 {
1359 	struct l2cap_conn *conn = chan->conn;
1360 	struct l2cap_le_conn_req req;
1361 
1362 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1363 		return;
1364 
1365 	if (!chan->imtu)
1366 		chan->imtu = chan->conn->mtu;
1367 
1368 	l2cap_le_flowctl_init(chan, 0);
1369 
1370 	req.psm     = chan->psm;
1371 	req.scid    = cpu_to_le16(chan->scid);
1372 	req.mtu     = cpu_to_le16(chan->imtu);
1373 	req.mps     = cpu_to_le16(chan->mps);
1374 	req.credits = cpu_to_le16(chan->rx_credits);
1375 
1376 	chan->ident = l2cap_get_ident(conn);
1377 
1378 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1379 		       sizeof(req), &req);
1380 }
1381 
1382 struct l2cap_ecred_conn_data {
1383 	struct {
1384 		struct l2cap_ecred_conn_req req;
1385 		__le16 scid[5];
1386 	} __packed pdu;
1387 	struct l2cap_chan *chan;
1388 	struct pid *pid;
1389 	int count;
1390 };
1391 
1392 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1393 {
1394 	struct l2cap_ecred_conn_data *conn = data;
1395 	struct pid *pid;
1396 
1397 	if (chan == conn->chan)
1398 		return;
1399 
1400 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1401 		return;
1402 
1403 	pid = chan->ops->get_peer_pid(chan);
1404 
1405 	/* Only add deferred channels with the same PID/PSM */
1406 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1407 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1408 		return;
1409 
1410 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1411 		return;
1412 
1413 	l2cap_ecred_init(chan, 0);
1414 
1415 	/* Set the same ident so we can match on the rsp */
1416 	chan->ident = conn->chan->ident;
1417 
1418 	/* Include all channels deferred */
1419 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1420 
1421 	conn->count++;
1422 }
1423 
1424 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1425 {
1426 	struct l2cap_conn *conn = chan->conn;
1427 	struct l2cap_ecred_conn_data data;
1428 
1429 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1430 		return;
1431 
1432 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1433 		return;
1434 
1435 	l2cap_ecred_init(chan, 0);
1436 
1437 	data.pdu.req.psm     = chan->psm;
1438 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1439 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1440 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1441 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1442 
1443 	chan->ident = l2cap_get_ident(conn);
1444 	data.pid = chan->ops->get_peer_pid(chan);
1445 
1446 	data.count = 1;
1447 	data.chan = chan;
1448 	data.pid = chan->ops->get_peer_pid(chan);
1449 
1450 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1451 
1452 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1453 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1454 		       &data.pdu);
1455 }
1456 
1457 static void l2cap_le_start(struct l2cap_chan *chan)
1458 {
1459 	struct l2cap_conn *conn = chan->conn;
1460 
1461 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1462 		return;
1463 
1464 	if (!chan->psm) {
1465 		l2cap_chan_ready(chan);
1466 		return;
1467 	}
1468 
1469 	if (chan->state == BT_CONNECT) {
1470 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1471 			l2cap_ecred_connect(chan);
1472 		else
1473 			l2cap_le_connect(chan);
1474 	}
1475 }
1476 
1477 static void l2cap_start_connection(struct l2cap_chan *chan)
1478 {
1479 	if (__amp_capable(chan)) {
1480 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1481 		a2mp_discover_amp(chan);
1482 	} else if (chan->conn->hcon->type == LE_LINK) {
1483 		l2cap_le_start(chan);
1484 	} else {
1485 		l2cap_send_conn_req(chan);
1486 	}
1487 }
1488 
1489 static void l2cap_request_info(struct l2cap_conn *conn)
1490 {
1491 	struct l2cap_info_req req;
1492 
1493 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1494 		return;
1495 
1496 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1497 
1498 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1499 	conn->info_ident = l2cap_get_ident(conn);
1500 
1501 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1502 
1503 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1504 		       sizeof(req), &req);
1505 }
1506 
1507 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1508 {
1509 	/* The minimum encryption key size needs to be enforced by the
1510 	 * host stack before establishing any L2CAP connections. The
1511 	 * specification in theory allows a minimum of 1, but to align
1512 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1513 	 *
1514 	 * This check might also be called for unencrypted connections
1515 	 * that have no key size requirements. Ensure that the link is
1516 	 * actually encrypted before enforcing a key size.
1517 	 */
1518 	int min_key_size = hcon->hdev->min_enc_key_size;
1519 
1520 	/* On FIPS security level, key size must be 16 bytes */
1521 	if (hcon->sec_level == BT_SECURITY_FIPS)
1522 		min_key_size = 16;
1523 
1524 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1525 		hcon->enc_key_size >= min_key_size);
1526 }
1527 
1528 static void l2cap_do_start(struct l2cap_chan *chan)
1529 {
1530 	struct l2cap_conn *conn = chan->conn;
1531 
1532 	if (conn->hcon->type == LE_LINK) {
1533 		l2cap_le_start(chan);
1534 		return;
1535 	}
1536 
1537 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1538 		l2cap_request_info(conn);
1539 		return;
1540 	}
1541 
1542 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1543 		return;
1544 
1545 	if (!l2cap_chan_check_security(chan, true) ||
1546 	    !__l2cap_no_conn_pending(chan))
1547 		return;
1548 
1549 	if (l2cap_check_enc_key_size(conn->hcon))
1550 		l2cap_start_connection(chan);
1551 	else
1552 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1553 }
1554 
1555 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1556 {
1557 	u32 local_feat_mask = l2cap_feat_mask;
1558 	if (!disable_ertm)
1559 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1560 
1561 	switch (mode) {
1562 	case L2CAP_MODE_ERTM:
1563 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1564 	case L2CAP_MODE_STREAMING:
1565 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1566 	default:
1567 		return 0x00;
1568 	}
1569 }
1570 
1571 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1572 {
1573 	struct l2cap_conn *conn = chan->conn;
1574 	struct l2cap_disconn_req req;
1575 
1576 	if (!conn)
1577 		return;
1578 
1579 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1580 		__clear_retrans_timer(chan);
1581 		__clear_monitor_timer(chan);
1582 		__clear_ack_timer(chan);
1583 	}
1584 
1585 	if (chan->scid == L2CAP_CID_A2MP) {
1586 		l2cap_state_change(chan, BT_DISCONN);
1587 		return;
1588 	}
1589 
1590 	req.dcid = cpu_to_le16(chan->dcid);
1591 	req.scid = cpu_to_le16(chan->scid);
1592 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1593 		       sizeof(req), &req);
1594 
1595 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1596 }
1597 
1598 /* ---- L2CAP connections ---- */
1599 static void l2cap_conn_start(struct l2cap_conn *conn)
1600 {
1601 	struct l2cap_chan *chan, *tmp;
1602 
1603 	BT_DBG("conn %p", conn);
1604 
1605 	mutex_lock(&conn->chan_lock);
1606 
1607 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1608 		l2cap_chan_lock(chan);
1609 
1610 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1611 			l2cap_chan_ready(chan);
1612 			l2cap_chan_unlock(chan);
1613 			continue;
1614 		}
1615 
1616 		if (chan->state == BT_CONNECT) {
1617 			if (!l2cap_chan_check_security(chan, true) ||
1618 			    !__l2cap_no_conn_pending(chan)) {
1619 				l2cap_chan_unlock(chan);
1620 				continue;
1621 			}
1622 
1623 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1624 			    && test_bit(CONF_STATE2_DEVICE,
1625 					&chan->conf_state)) {
1626 				l2cap_chan_close(chan, ECONNRESET);
1627 				l2cap_chan_unlock(chan);
1628 				continue;
1629 			}
1630 
1631 			if (l2cap_check_enc_key_size(conn->hcon))
1632 				l2cap_start_connection(chan);
1633 			else
1634 				l2cap_chan_close(chan, ECONNREFUSED);
1635 
1636 		} else if (chan->state == BT_CONNECT2) {
1637 			struct l2cap_conn_rsp rsp;
1638 			char buf[128];
1639 			rsp.scid = cpu_to_le16(chan->dcid);
1640 			rsp.dcid = cpu_to_le16(chan->scid);
1641 
1642 			if (l2cap_chan_check_security(chan, false)) {
1643 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1644 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1645 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1646 					chan->ops->defer(chan);
1647 
1648 				} else {
1649 					l2cap_state_change(chan, BT_CONFIG);
1650 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1651 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1652 				}
1653 			} else {
1654 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1655 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1656 			}
1657 
1658 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1659 				       sizeof(rsp), &rsp);
1660 
1661 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1662 			    rsp.result != L2CAP_CR_SUCCESS) {
1663 				l2cap_chan_unlock(chan);
1664 				continue;
1665 			}
1666 
1667 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1668 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1669 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1670 			chan->num_conf_req++;
1671 		}
1672 
1673 		l2cap_chan_unlock(chan);
1674 	}
1675 
1676 	mutex_unlock(&conn->chan_lock);
1677 }
1678 
1679 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1680 {
1681 	struct hci_conn *hcon = conn->hcon;
1682 	struct hci_dev *hdev = hcon->hdev;
1683 
1684 	BT_DBG("%s conn %p", hdev->name, conn);
1685 
1686 	/* For outgoing pairing which doesn't necessarily have an
1687 	 * associated socket (e.g. mgmt_pair_device).
1688 	 */
1689 	if (hcon->out)
1690 		smp_conn_security(hcon, hcon->pending_sec_level);
1691 
1692 	/* For LE slave connections, make sure the connection interval
1693 	 * is in the range of the minium and maximum interval that has
1694 	 * been configured for this connection. If not, then trigger
1695 	 * the connection update procedure.
1696 	 */
1697 	if (hcon->role == HCI_ROLE_SLAVE &&
1698 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1699 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1700 		struct l2cap_conn_param_update_req req;
1701 
1702 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1703 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1704 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1705 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1706 
1707 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1708 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1709 	}
1710 }
1711 
1712 static void l2cap_conn_ready(struct l2cap_conn *conn)
1713 {
1714 	struct l2cap_chan *chan;
1715 	struct hci_conn *hcon = conn->hcon;
1716 
1717 	BT_DBG("conn %p", conn);
1718 
1719 	if (hcon->type == ACL_LINK)
1720 		l2cap_request_info(conn);
1721 
1722 	mutex_lock(&conn->chan_lock);
1723 
1724 	list_for_each_entry(chan, &conn->chan_l, list) {
1725 
1726 		l2cap_chan_lock(chan);
1727 
1728 		if (chan->scid == L2CAP_CID_A2MP) {
1729 			l2cap_chan_unlock(chan);
1730 			continue;
1731 		}
1732 
1733 		if (hcon->type == LE_LINK) {
1734 			l2cap_le_start(chan);
1735 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1736 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1737 				l2cap_chan_ready(chan);
1738 		} else if (chan->state == BT_CONNECT) {
1739 			l2cap_do_start(chan);
1740 		}
1741 
1742 		l2cap_chan_unlock(chan);
1743 	}
1744 
1745 	mutex_unlock(&conn->chan_lock);
1746 
1747 	if (hcon->type == LE_LINK)
1748 		l2cap_le_conn_ready(conn);
1749 
1750 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1751 }
1752 
1753 /* Notify sockets that we cannot guaranty reliability anymore */
1754 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1755 {
1756 	struct l2cap_chan *chan;
1757 
1758 	BT_DBG("conn %p", conn);
1759 
1760 	mutex_lock(&conn->chan_lock);
1761 
1762 	list_for_each_entry(chan, &conn->chan_l, list) {
1763 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1764 			l2cap_chan_set_err(chan, err);
1765 	}
1766 
1767 	mutex_unlock(&conn->chan_lock);
1768 }
1769 
1770 static void l2cap_info_timeout(struct work_struct *work)
1771 {
1772 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1773 					       info_timer.work);
1774 
1775 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1776 	conn->info_ident = 0;
1777 
1778 	l2cap_conn_start(conn);
1779 }
1780 
1781 /*
1782  * l2cap_user
1783  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1784  * callback is called during registration. The ->remove callback is called
1785  * during unregistration.
1786  * An l2cap_user object can either be explicitly unregistered or when the
1787  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1788  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1789  * External modules must own a reference to the l2cap_conn object if they intend
1790  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1791  * any time if they don't.
1792  */
1793 
1794 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1795 {
1796 	struct hci_dev *hdev = conn->hcon->hdev;
1797 	int ret;
1798 
1799 	/* We need to check whether l2cap_conn is registered. If it is not, we
1800 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1801 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1802 	 * relies on the parent hci_conn object to be locked. This itself relies
1803 	 * on the hci_dev object to be locked. So we must lock the hci device
1804 	 * here, too. */
1805 
1806 	hci_dev_lock(hdev);
1807 
1808 	if (!list_empty(&user->list)) {
1809 		ret = -EINVAL;
1810 		goto out_unlock;
1811 	}
1812 
1813 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1814 	if (!conn->hchan) {
1815 		ret = -ENODEV;
1816 		goto out_unlock;
1817 	}
1818 
1819 	ret = user->probe(conn, user);
1820 	if (ret)
1821 		goto out_unlock;
1822 
1823 	list_add(&user->list, &conn->users);
1824 	ret = 0;
1825 
1826 out_unlock:
1827 	hci_dev_unlock(hdev);
1828 	return ret;
1829 }
1830 EXPORT_SYMBOL(l2cap_register_user);
1831 
1832 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1833 {
1834 	struct hci_dev *hdev = conn->hcon->hdev;
1835 
1836 	hci_dev_lock(hdev);
1837 
1838 	if (list_empty(&user->list))
1839 		goto out_unlock;
1840 
1841 	list_del_init(&user->list);
1842 	user->remove(conn, user);
1843 
1844 out_unlock:
1845 	hci_dev_unlock(hdev);
1846 }
1847 EXPORT_SYMBOL(l2cap_unregister_user);
1848 
1849 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1850 {
1851 	struct l2cap_user *user;
1852 
1853 	while (!list_empty(&conn->users)) {
1854 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1855 		list_del_init(&user->list);
1856 		user->remove(conn, user);
1857 	}
1858 }
1859 
1860 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1861 {
1862 	struct l2cap_conn *conn = hcon->l2cap_data;
1863 	struct l2cap_chan *chan, *l;
1864 
1865 	if (!conn)
1866 		return;
1867 
1868 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1869 
1870 	kfree_skb(conn->rx_skb);
1871 
1872 	skb_queue_purge(&conn->pending_rx);
1873 
1874 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1875 	 * might block if we are running on a worker from the same workqueue
1876 	 * pending_rx_work is waiting on.
1877 	 */
1878 	if (work_pending(&conn->pending_rx_work))
1879 		cancel_work_sync(&conn->pending_rx_work);
1880 
1881 	if (work_pending(&conn->id_addr_update_work))
1882 		cancel_work_sync(&conn->id_addr_update_work);
1883 
1884 	l2cap_unregister_all_users(conn);
1885 
1886 	/* Force the connection to be immediately dropped */
1887 	hcon->disc_timeout = 0;
1888 
1889 	mutex_lock(&conn->chan_lock);
1890 
1891 	/* Kill channels */
1892 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1893 		l2cap_chan_hold(chan);
1894 		l2cap_chan_lock(chan);
1895 
1896 		l2cap_chan_del(chan, err);
1897 
1898 		chan->ops->close(chan);
1899 
1900 		l2cap_chan_unlock(chan);
1901 		l2cap_chan_put(chan);
1902 	}
1903 
1904 	mutex_unlock(&conn->chan_lock);
1905 
1906 	hci_chan_del(conn->hchan);
1907 
1908 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1909 		cancel_delayed_work_sync(&conn->info_timer);
1910 
1911 	hcon->l2cap_data = NULL;
1912 	conn->hchan = NULL;
1913 	l2cap_conn_put(conn);
1914 }
1915 
1916 static void l2cap_conn_free(struct kref *ref)
1917 {
1918 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1919 
1920 	hci_conn_put(conn->hcon);
1921 	kfree(conn);
1922 }
1923 
1924 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1925 {
1926 	kref_get(&conn->ref);
1927 	return conn;
1928 }
1929 EXPORT_SYMBOL(l2cap_conn_get);
1930 
1931 void l2cap_conn_put(struct l2cap_conn *conn)
1932 {
1933 	kref_put(&conn->ref, l2cap_conn_free);
1934 }
1935 EXPORT_SYMBOL(l2cap_conn_put);
1936 
1937 /* ---- Socket interface ---- */
1938 
1939 /* Find socket with psm and source / destination bdaddr.
1940  * Returns closest match.
1941  */
1942 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1943 						   bdaddr_t *src,
1944 						   bdaddr_t *dst,
1945 						   u8 link_type)
1946 {
1947 	struct l2cap_chan *c, *c1 = NULL;
1948 
1949 	read_lock(&chan_list_lock);
1950 
1951 	list_for_each_entry(c, &chan_list, global_l) {
1952 		if (state && c->state != state)
1953 			continue;
1954 
1955 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1956 			continue;
1957 
1958 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1959 			continue;
1960 
1961 		if (c->psm == psm) {
1962 			int src_match, dst_match;
1963 			int src_any, dst_any;
1964 
1965 			/* Exact match. */
1966 			src_match = !bacmp(&c->src, src);
1967 			dst_match = !bacmp(&c->dst, dst);
1968 			if (src_match && dst_match) {
1969 				l2cap_chan_hold(c);
1970 				read_unlock(&chan_list_lock);
1971 				return c;
1972 			}
1973 
1974 			/* Closest match */
1975 			src_any = !bacmp(&c->src, BDADDR_ANY);
1976 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1977 			if ((src_match && dst_any) || (src_any && dst_match) ||
1978 			    (src_any && dst_any))
1979 				c1 = c;
1980 		}
1981 	}
1982 
1983 	if (c1)
1984 		l2cap_chan_hold(c1);
1985 
1986 	read_unlock(&chan_list_lock);
1987 
1988 	return c1;
1989 }
1990 
1991 static void l2cap_monitor_timeout(struct work_struct *work)
1992 {
1993 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1994 					       monitor_timer.work);
1995 
1996 	BT_DBG("chan %p", chan);
1997 
1998 	l2cap_chan_lock(chan);
1999 
2000 	if (!chan->conn) {
2001 		l2cap_chan_unlock(chan);
2002 		l2cap_chan_put(chan);
2003 		return;
2004 	}
2005 
2006 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2007 
2008 	l2cap_chan_unlock(chan);
2009 	l2cap_chan_put(chan);
2010 }
2011 
2012 static void l2cap_retrans_timeout(struct work_struct *work)
2013 {
2014 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2015 					       retrans_timer.work);
2016 
2017 	BT_DBG("chan %p", chan);
2018 
2019 	l2cap_chan_lock(chan);
2020 
2021 	if (!chan->conn) {
2022 		l2cap_chan_unlock(chan);
2023 		l2cap_chan_put(chan);
2024 		return;
2025 	}
2026 
2027 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2028 	l2cap_chan_unlock(chan);
2029 	l2cap_chan_put(chan);
2030 }
2031 
2032 static void l2cap_streaming_send(struct l2cap_chan *chan,
2033 				 struct sk_buff_head *skbs)
2034 {
2035 	struct sk_buff *skb;
2036 	struct l2cap_ctrl *control;
2037 
2038 	BT_DBG("chan %p, skbs %p", chan, skbs);
2039 
2040 	if (__chan_is_moving(chan))
2041 		return;
2042 
2043 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2044 
2045 	while (!skb_queue_empty(&chan->tx_q)) {
2046 
2047 		skb = skb_dequeue(&chan->tx_q);
2048 
2049 		bt_cb(skb)->l2cap.retries = 1;
2050 		control = &bt_cb(skb)->l2cap;
2051 
2052 		control->reqseq = 0;
2053 		control->txseq = chan->next_tx_seq;
2054 
2055 		__pack_control(chan, control, skb);
2056 
2057 		if (chan->fcs == L2CAP_FCS_CRC16) {
2058 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2059 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2060 		}
2061 
2062 		l2cap_do_send(chan, skb);
2063 
2064 		BT_DBG("Sent txseq %u", control->txseq);
2065 
2066 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2067 		chan->frames_sent++;
2068 	}
2069 }
2070 
2071 static int l2cap_ertm_send(struct l2cap_chan *chan)
2072 {
2073 	struct sk_buff *skb, *tx_skb;
2074 	struct l2cap_ctrl *control;
2075 	int sent = 0;
2076 
2077 	BT_DBG("chan %p", chan);
2078 
2079 	if (chan->state != BT_CONNECTED)
2080 		return -ENOTCONN;
2081 
2082 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2083 		return 0;
2084 
2085 	if (__chan_is_moving(chan))
2086 		return 0;
2087 
2088 	while (chan->tx_send_head &&
2089 	       chan->unacked_frames < chan->remote_tx_win &&
2090 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2091 
2092 		skb = chan->tx_send_head;
2093 
2094 		bt_cb(skb)->l2cap.retries = 1;
2095 		control = &bt_cb(skb)->l2cap;
2096 
2097 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2098 			control->final = 1;
2099 
2100 		control->reqseq = chan->buffer_seq;
2101 		chan->last_acked_seq = chan->buffer_seq;
2102 		control->txseq = chan->next_tx_seq;
2103 
2104 		__pack_control(chan, control, skb);
2105 
2106 		if (chan->fcs == L2CAP_FCS_CRC16) {
2107 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2108 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2109 		}
2110 
2111 		/* Clone after data has been modified. Data is assumed to be
2112 		   read-only (for locking purposes) on cloned sk_buffs.
2113 		 */
2114 		tx_skb = skb_clone(skb, GFP_KERNEL);
2115 
2116 		if (!tx_skb)
2117 			break;
2118 
2119 		__set_retrans_timer(chan);
2120 
2121 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2122 		chan->unacked_frames++;
2123 		chan->frames_sent++;
2124 		sent++;
2125 
2126 		if (skb_queue_is_last(&chan->tx_q, skb))
2127 			chan->tx_send_head = NULL;
2128 		else
2129 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2130 
2131 		l2cap_do_send(chan, tx_skb);
2132 		BT_DBG("Sent txseq %u", control->txseq);
2133 	}
2134 
2135 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2136 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2137 
2138 	return sent;
2139 }
2140 
2141 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2142 {
2143 	struct l2cap_ctrl control;
2144 	struct sk_buff *skb;
2145 	struct sk_buff *tx_skb;
2146 	u16 seq;
2147 
2148 	BT_DBG("chan %p", chan);
2149 
2150 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2151 		return;
2152 
2153 	if (__chan_is_moving(chan))
2154 		return;
2155 
2156 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2157 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2158 
2159 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2160 		if (!skb) {
2161 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2162 			       seq);
2163 			continue;
2164 		}
2165 
2166 		bt_cb(skb)->l2cap.retries++;
2167 		control = bt_cb(skb)->l2cap;
2168 
2169 		if (chan->max_tx != 0 &&
2170 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2171 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2172 			l2cap_send_disconn_req(chan, ECONNRESET);
2173 			l2cap_seq_list_clear(&chan->retrans_list);
2174 			break;
2175 		}
2176 
2177 		control.reqseq = chan->buffer_seq;
2178 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2179 			control.final = 1;
2180 		else
2181 			control.final = 0;
2182 
2183 		if (skb_cloned(skb)) {
2184 			/* Cloned sk_buffs are read-only, so we need a
2185 			 * writeable copy
2186 			 */
2187 			tx_skb = skb_copy(skb, GFP_KERNEL);
2188 		} else {
2189 			tx_skb = skb_clone(skb, GFP_KERNEL);
2190 		}
2191 
2192 		if (!tx_skb) {
2193 			l2cap_seq_list_clear(&chan->retrans_list);
2194 			break;
2195 		}
2196 
2197 		/* Update skb contents */
2198 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2199 			put_unaligned_le32(__pack_extended_control(&control),
2200 					   tx_skb->data + L2CAP_HDR_SIZE);
2201 		} else {
2202 			put_unaligned_le16(__pack_enhanced_control(&control),
2203 					   tx_skb->data + L2CAP_HDR_SIZE);
2204 		}
2205 
2206 		/* Update FCS */
2207 		if (chan->fcs == L2CAP_FCS_CRC16) {
2208 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2209 					tx_skb->len - L2CAP_FCS_SIZE);
2210 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2211 						L2CAP_FCS_SIZE);
2212 		}
2213 
2214 		l2cap_do_send(chan, tx_skb);
2215 
2216 		BT_DBG("Resent txseq %d", control.txseq);
2217 
2218 		chan->last_acked_seq = chan->buffer_seq;
2219 	}
2220 }
2221 
2222 static void l2cap_retransmit(struct l2cap_chan *chan,
2223 			     struct l2cap_ctrl *control)
2224 {
2225 	BT_DBG("chan %p, control %p", chan, control);
2226 
2227 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2228 	l2cap_ertm_resend(chan);
2229 }
2230 
2231 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2232 				 struct l2cap_ctrl *control)
2233 {
2234 	struct sk_buff *skb;
2235 
2236 	BT_DBG("chan %p, control %p", chan, control);
2237 
2238 	if (control->poll)
2239 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2240 
2241 	l2cap_seq_list_clear(&chan->retrans_list);
2242 
2243 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2244 		return;
2245 
2246 	if (chan->unacked_frames) {
2247 		skb_queue_walk(&chan->tx_q, skb) {
2248 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2249 			    skb == chan->tx_send_head)
2250 				break;
2251 		}
2252 
2253 		skb_queue_walk_from(&chan->tx_q, skb) {
2254 			if (skb == chan->tx_send_head)
2255 				break;
2256 
2257 			l2cap_seq_list_append(&chan->retrans_list,
2258 					      bt_cb(skb)->l2cap.txseq);
2259 		}
2260 
2261 		l2cap_ertm_resend(chan);
2262 	}
2263 }
2264 
2265 static void l2cap_send_ack(struct l2cap_chan *chan)
2266 {
2267 	struct l2cap_ctrl control;
2268 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2269 					 chan->last_acked_seq);
2270 	int threshold;
2271 
2272 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2273 	       chan, chan->last_acked_seq, chan->buffer_seq);
2274 
2275 	memset(&control, 0, sizeof(control));
2276 	control.sframe = 1;
2277 
2278 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2279 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2280 		__clear_ack_timer(chan);
2281 		control.super = L2CAP_SUPER_RNR;
2282 		control.reqseq = chan->buffer_seq;
2283 		l2cap_send_sframe(chan, &control);
2284 	} else {
2285 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2286 			l2cap_ertm_send(chan);
2287 			/* If any i-frames were sent, they included an ack */
2288 			if (chan->buffer_seq == chan->last_acked_seq)
2289 				frames_to_ack = 0;
2290 		}
2291 
2292 		/* Ack now if the window is 3/4ths full.
2293 		 * Calculate without mul or div
2294 		 */
2295 		threshold = chan->ack_win;
2296 		threshold += threshold << 1;
2297 		threshold >>= 2;
2298 
2299 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2300 		       threshold);
2301 
2302 		if (frames_to_ack >= threshold) {
2303 			__clear_ack_timer(chan);
2304 			control.super = L2CAP_SUPER_RR;
2305 			control.reqseq = chan->buffer_seq;
2306 			l2cap_send_sframe(chan, &control);
2307 			frames_to_ack = 0;
2308 		}
2309 
2310 		if (frames_to_ack)
2311 			__set_ack_timer(chan);
2312 	}
2313 }
2314 
2315 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2316 					 struct msghdr *msg, int len,
2317 					 int count, struct sk_buff *skb)
2318 {
2319 	struct l2cap_conn *conn = chan->conn;
2320 	struct sk_buff **frag;
2321 	int sent = 0;
2322 
2323 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2324 		return -EFAULT;
2325 
2326 	sent += count;
2327 	len  -= count;
2328 
2329 	/* Continuation fragments (no L2CAP header) */
2330 	frag = &skb_shinfo(skb)->frag_list;
2331 	while (len) {
2332 		struct sk_buff *tmp;
2333 
2334 		count = min_t(unsigned int, conn->mtu, len);
2335 
2336 		tmp = chan->ops->alloc_skb(chan, 0, count,
2337 					   msg->msg_flags & MSG_DONTWAIT);
2338 		if (IS_ERR(tmp))
2339 			return PTR_ERR(tmp);
2340 
2341 		*frag = tmp;
2342 
2343 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2344 				   &msg->msg_iter))
2345 			return -EFAULT;
2346 
2347 		sent += count;
2348 		len  -= count;
2349 
2350 		skb->len += (*frag)->len;
2351 		skb->data_len += (*frag)->len;
2352 
2353 		frag = &(*frag)->next;
2354 	}
2355 
2356 	return sent;
2357 }
2358 
2359 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2360 						 struct msghdr *msg, size_t len)
2361 {
2362 	struct l2cap_conn *conn = chan->conn;
2363 	struct sk_buff *skb;
2364 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2365 	struct l2cap_hdr *lh;
2366 
2367 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2368 	       __le16_to_cpu(chan->psm), len);
2369 
2370 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2371 
2372 	skb = chan->ops->alloc_skb(chan, hlen, count,
2373 				   msg->msg_flags & MSG_DONTWAIT);
2374 	if (IS_ERR(skb))
2375 		return skb;
2376 
2377 	/* Create L2CAP header */
2378 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2379 	lh->cid = cpu_to_le16(chan->dcid);
2380 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2381 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2382 
2383 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2384 	if (unlikely(err < 0)) {
2385 		kfree_skb(skb);
2386 		return ERR_PTR(err);
2387 	}
2388 	return skb;
2389 }
2390 
2391 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2392 					      struct msghdr *msg, size_t len)
2393 {
2394 	struct l2cap_conn *conn = chan->conn;
2395 	struct sk_buff *skb;
2396 	int err, count;
2397 	struct l2cap_hdr *lh;
2398 
2399 	BT_DBG("chan %p len %zu", chan, len);
2400 
2401 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2402 
2403 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2404 				   msg->msg_flags & MSG_DONTWAIT);
2405 	if (IS_ERR(skb))
2406 		return skb;
2407 
2408 	/* Create L2CAP header */
2409 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2410 	lh->cid = cpu_to_le16(chan->dcid);
2411 	lh->len = cpu_to_le16(len);
2412 
2413 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2414 	if (unlikely(err < 0)) {
2415 		kfree_skb(skb);
2416 		return ERR_PTR(err);
2417 	}
2418 	return skb;
2419 }
2420 
2421 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2422 					       struct msghdr *msg, size_t len,
2423 					       u16 sdulen)
2424 {
2425 	struct l2cap_conn *conn = chan->conn;
2426 	struct sk_buff *skb;
2427 	int err, count, hlen;
2428 	struct l2cap_hdr *lh;
2429 
2430 	BT_DBG("chan %p len %zu", chan, len);
2431 
2432 	if (!conn)
2433 		return ERR_PTR(-ENOTCONN);
2434 
2435 	hlen = __ertm_hdr_size(chan);
2436 
2437 	if (sdulen)
2438 		hlen += L2CAP_SDULEN_SIZE;
2439 
2440 	if (chan->fcs == L2CAP_FCS_CRC16)
2441 		hlen += L2CAP_FCS_SIZE;
2442 
2443 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2444 
2445 	skb = chan->ops->alloc_skb(chan, hlen, count,
2446 				   msg->msg_flags & MSG_DONTWAIT);
2447 	if (IS_ERR(skb))
2448 		return skb;
2449 
2450 	/* Create L2CAP header */
2451 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2452 	lh->cid = cpu_to_le16(chan->dcid);
2453 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2454 
2455 	/* Control header is populated later */
2456 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2457 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2458 	else
2459 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2460 
2461 	if (sdulen)
2462 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2463 
2464 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2465 	if (unlikely(err < 0)) {
2466 		kfree_skb(skb);
2467 		return ERR_PTR(err);
2468 	}
2469 
2470 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2471 	bt_cb(skb)->l2cap.retries = 0;
2472 	return skb;
2473 }
2474 
2475 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2476 			     struct sk_buff_head *seg_queue,
2477 			     struct msghdr *msg, size_t len)
2478 {
2479 	struct sk_buff *skb;
2480 	u16 sdu_len;
2481 	size_t pdu_len;
2482 	u8 sar;
2483 
2484 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2485 
2486 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2487 	 * so fragmented skbs are not used.  The HCI layer's handling
2488 	 * of fragmented skbs is not compatible with ERTM's queueing.
2489 	 */
2490 
2491 	/* PDU size is derived from the HCI MTU */
2492 	pdu_len = chan->conn->mtu;
2493 
2494 	/* Constrain PDU size for BR/EDR connections */
2495 	if (!chan->hs_hcon)
2496 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2497 
2498 	/* Adjust for largest possible L2CAP overhead. */
2499 	if (chan->fcs)
2500 		pdu_len -= L2CAP_FCS_SIZE;
2501 
2502 	pdu_len -= __ertm_hdr_size(chan);
2503 
2504 	/* Remote device may have requested smaller PDUs */
2505 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2506 
2507 	if (len <= pdu_len) {
2508 		sar = L2CAP_SAR_UNSEGMENTED;
2509 		sdu_len = 0;
2510 		pdu_len = len;
2511 	} else {
2512 		sar = L2CAP_SAR_START;
2513 		sdu_len = len;
2514 	}
2515 
2516 	while (len > 0) {
2517 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2518 
2519 		if (IS_ERR(skb)) {
2520 			__skb_queue_purge(seg_queue);
2521 			return PTR_ERR(skb);
2522 		}
2523 
2524 		bt_cb(skb)->l2cap.sar = sar;
2525 		__skb_queue_tail(seg_queue, skb);
2526 
2527 		len -= pdu_len;
2528 		if (sdu_len)
2529 			sdu_len = 0;
2530 
2531 		if (len <= pdu_len) {
2532 			sar = L2CAP_SAR_END;
2533 			pdu_len = len;
2534 		} else {
2535 			sar = L2CAP_SAR_CONTINUE;
2536 		}
2537 	}
2538 
2539 	return 0;
2540 }
2541 
2542 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2543 						   struct msghdr *msg,
2544 						   size_t len, u16 sdulen)
2545 {
2546 	struct l2cap_conn *conn = chan->conn;
2547 	struct sk_buff *skb;
2548 	int err, count, hlen;
2549 	struct l2cap_hdr *lh;
2550 
2551 	BT_DBG("chan %p len %zu", chan, len);
2552 
2553 	if (!conn)
2554 		return ERR_PTR(-ENOTCONN);
2555 
2556 	hlen = L2CAP_HDR_SIZE;
2557 
2558 	if (sdulen)
2559 		hlen += L2CAP_SDULEN_SIZE;
2560 
2561 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2562 
2563 	skb = chan->ops->alloc_skb(chan, hlen, count,
2564 				   msg->msg_flags & MSG_DONTWAIT);
2565 	if (IS_ERR(skb))
2566 		return skb;
2567 
2568 	/* Create L2CAP header */
2569 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2570 	lh->cid = cpu_to_le16(chan->dcid);
2571 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2572 
2573 	if (sdulen)
2574 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2575 
2576 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2577 	if (unlikely(err < 0)) {
2578 		kfree_skb(skb);
2579 		return ERR_PTR(err);
2580 	}
2581 
2582 	return skb;
2583 }
2584 
2585 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2586 				struct sk_buff_head *seg_queue,
2587 				struct msghdr *msg, size_t len)
2588 {
2589 	struct sk_buff *skb;
2590 	size_t pdu_len;
2591 	u16 sdu_len;
2592 
2593 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2594 
2595 	sdu_len = len;
2596 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2597 
2598 	while (len > 0) {
2599 		if (len <= pdu_len)
2600 			pdu_len = len;
2601 
2602 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2603 		if (IS_ERR(skb)) {
2604 			__skb_queue_purge(seg_queue);
2605 			return PTR_ERR(skb);
2606 		}
2607 
2608 		__skb_queue_tail(seg_queue, skb);
2609 
2610 		len -= pdu_len;
2611 
2612 		if (sdu_len) {
2613 			sdu_len = 0;
2614 			pdu_len += L2CAP_SDULEN_SIZE;
2615 		}
2616 	}
2617 
2618 	return 0;
2619 }
2620 
2621 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2622 {
2623 	int sent = 0;
2624 
2625 	BT_DBG("chan %p", chan);
2626 
2627 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2628 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2629 		chan->tx_credits--;
2630 		sent++;
2631 	}
2632 
2633 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2634 	       skb_queue_len(&chan->tx_q));
2635 }
2636 
2637 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2638 {
2639 	struct sk_buff *skb;
2640 	int err;
2641 	struct sk_buff_head seg_queue;
2642 
2643 	if (!chan->conn)
2644 		return -ENOTCONN;
2645 
2646 	/* Connectionless channel */
2647 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2648 		skb = l2cap_create_connless_pdu(chan, msg, len);
2649 		if (IS_ERR(skb))
2650 			return PTR_ERR(skb);
2651 
2652 		/* Channel lock is released before requesting new skb and then
2653 		 * reacquired thus we need to recheck channel state.
2654 		 */
2655 		if (chan->state != BT_CONNECTED) {
2656 			kfree_skb(skb);
2657 			return -ENOTCONN;
2658 		}
2659 
2660 		l2cap_do_send(chan, skb);
2661 		return len;
2662 	}
2663 
2664 	switch (chan->mode) {
2665 	case L2CAP_MODE_LE_FLOWCTL:
2666 	case L2CAP_MODE_EXT_FLOWCTL:
2667 		/* Check outgoing MTU */
2668 		if (len > chan->omtu)
2669 			return -EMSGSIZE;
2670 
2671 		__skb_queue_head_init(&seg_queue);
2672 
2673 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2674 
2675 		if (chan->state != BT_CONNECTED) {
2676 			__skb_queue_purge(&seg_queue);
2677 			err = -ENOTCONN;
2678 		}
2679 
2680 		if (err)
2681 			return err;
2682 
2683 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2684 
2685 		l2cap_le_flowctl_send(chan);
2686 
2687 		if (!chan->tx_credits)
2688 			chan->ops->suspend(chan);
2689 
2690 		err = len;
2691 
2692 		break;
2693 
2694 	case L2CAP_MODE_BASIC:
2695 		/* Check outgoing MTU */
2696 		if (len > chan->omtu)
2697 			return -EMSGSIZE;
2698 
2699 		/* Create a basic PDU */
2700 		skb = l2cap_create_basic_pdu(chan, msg, len);
2701 		if (IS_ERR(skb))
2702 			return PTR_ERR(skb);
2703 
2704 		/* Channel lock is released before requesting new skb and then
2705 		 * reacquired thus we need to recheck channel state.
2706 		 */
2707 		if (chan->state != BT_CONNECTED) {
2708 			kfree_skb(skb);
2709 			return -ENOTCONN;
2710 		}
2711 
2712 		l2cap_do_send(chan, skb);
2713 		err = len;
2714 		break;
2715 
2716 	case L2CAP_MODE_ERTM:
2717 	case L2CAP_MODE_STREAMING:
2718 		/* Check outgoing MTU */
2719 		if (len > chan->omtu) {
2720 			err = -EMSGSIZE;
2721 			break;
2722 		}
2723 
2724 		__skb_queue_head_init(&seg_queue);
2725 
2726 		/* Do segmentation before calling in to the state machine,
2727 		 * since it's possible to block while waiting for memory
2728 		 * allocation.
2729 		 */
2730 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2731 
2732 		/* The channel could have been closed while segmenting,
2733 		 * check that it is still connected.
2734 		 */
2735 		if (chan->state != BT_CONNECTED) {
2736 			__skb_queue_purge(&seg_queue);
2737 			err = -ENOTCONN;
2738 		}
2739 
2740 		if (err)
2741 			break;
2742 
2743 		if (chan->mode == L2CAP_MODE_ERTM)
2744 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2745 		else
2746 			l2cap_streaming_send(chan, &seg_queue);
2747 
2748 		err = len;
2749 
2750 		/* If the skbs were not queued for sending, they'll still be in
2751 		 * seg_queue and need to be purged.
2752 		 */
2753 		__skb_queue_purge(&seg_queue);
2754 		break;
2755 
2756 	default:
2757 		BT_DBG("bad state %1.1x", chan->mode);
2758 		err = -EBADFD;
2759 	}
2760 
2761 	return err;
2762 }
2763 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2764 
2765 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2766 {
2767 	struct l2cap_ctrl control;
2768 	u16 seq;
2769 
2770 	BT_DBG("chan %p, txseq %u", chan, txseq);
2771 
2772 	memset(&control, 0, sizeof(control));
2773 	control.sframe = 1;
2774 	control.super = L2CAP_SUPER_SREJ;
2775 
2776 	for (seq = chan->expected_tx_seq; seq != txseq;
2777 	     seq = __next_seq(chan, seq)) {
2778 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2779 			control.reqseq = seq;
2780 			l2cap_send_sframe(chan, &control);
2781 			l2cap_seq_list_append(&chan->srej_list, seq);
2782 		}
2783 	}
2784 
2785 	chan->expected_tx_seq = __next_seq(chan, txseq);
2786 }
2787 
2788 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2789 {
2790 	struct l2cap_ctrl control;
2791 
2792 	BT_DBG("chan %p", chan);
2793 
2794 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2795 		return;
2796 
2797 	memset(&control, 0, sizeof(control));
2798 	control.sframe = 1;
2799 	control.super = L2CAP_SUPER_SREJ;
2800 	control.reqseq = chan->srej_list.tail;
2801 	l2cap_send_sframe(chan, &control);
2802 }
2803 
2804 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2805 {
2806 	struct l2cap_ctrl control;
2807 	u16 initial_head;
2808 	u16 seq;
2809 
2810 	BT_DBG("chan %p, txseq %u", chan, txseq);
2811 
2812 	memset(&control, 0, sizeof(control));
2813 	control.sframe = 1;
2814 	control.super = L2CAP_SUPER_SREJ;
2815 
2816 	/* Capture initial list head to allow only one pass through the list. */
2817 	initial_head = chan->srej_list.head;
2818 
2819 	do {
2820 		seq = l2cap_seq_list_pop(&chan->srej_list);
2821 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2822 			break;
2823 
2824 		control.reqseq = seq;
2825 		l2cap_send_sframe(chan, &control);
2826 		l2cap_seq_list_append(&chan->srej_list, seq);
2827 	} while (chan->srej_list.head != initial_head);
2828 }
2829 
2830 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2831 {
2832 	struct sk_buff *acked_skb;
2833 	u16 ackseq;
2834 
2835 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2836 
2837 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2838 		return;
2839 
2840 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2841 	       chan->expected_ack_seq, chan->unacked_frames);
2842 
2843 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2844 	     ackseq = __next_seq(chan, ackseq)) {
2845 
2846 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2847 		if (acked_skb) {
2848 			skb_unlink(acked_skb, &chan->tx_q);
2849 			kfree_skb(acked_skb);
2850 			chan->unacked_frames--;
2851 		}
2852 	}
2853 
2854 	chan->expected_ack_seq = reqseq;
2855 
2856 	if (chan->unacked_frames == 0)
2857 		__clear_retrans_timer(chan);
2858 
2859 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2860 }
2861 
2862 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2863 {
2864 	BT_DBG("chan %p", chan);
2865 
2866 	chan->expected_tx_seq = chan->buffer_seq;
2867 	l2cap_seq_list_clear(&chan->srej_list);
2868 	skb_queue_purge(&chan->srej_q);
2869 	chan->rx_state = L2CAP_RX_STATE_RECV;
2870 }
2871 
2872 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2873 				struct l2cap_ctrl *control,
2874 				struct sk_buff_head *skbs, u8 event)
2875 {
2876 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2877 	       event);
2878 
2879 	switch (event) {
2880 	case L2CAP_EV_DATA_REQUEST:
2881 		if (chan->tx_send_head == NULL)
2882 			chan->tx_send_head = skb_peek(skbs);
2883 
2884 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2885 		l2cap_ertm_send(chan);
2886 		break;
2887 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2888 		BT_DBG("Enter LOCAL_BUSY");
2889 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2890 
2891 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2892 			/* The SREJ_SENT state must be aborted if we are to
2893 			 * enter the LOCAL_BUSY state.
2894 			 */
2895 			l2cap_abort_rx_srej_sent(chan);
2896 		}
2897 
2898 		l2cap_send_ack(chan);
2899 
2900 		break;
2901 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2902 		BT_DBG("Exit LOCAL_BUSY");
2903 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2904 
2905 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2906 			struct l2cap_ctrl local_control;
2907 
2908 			memset(&local_control, 0, sizeof(local_control));
2909 			local_control.sframe = 1;
2910 			local_control.super = L2CAP_SUPER_RR;
2911 			local_control.poll = 1;
2912 			local_control.reqseq = chan->buffer_seq;
2913 			l2cap_send_sframe(chan, &local_control);
2914 
2915 			chan->retry_count = 1;
2916 			__set_monitor_timer(chan);
2917 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2918 		}
2919 		break;
2920 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2921 		l2cap_process_reqseq(chan, control->reqseq);
2922 		break;
2923 	case L2CAP_EV_EXPLICIT_POLL:
2924 		l2cap_send_rr_or_rnr(chan, 1);
2925 		chan->retry_count = 1;
2926 		__set_monitor_timer(chan);
2927 		__clear_ack_timer(chan);
2928 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2929 		break;
2930 	case L2CAP_EV_RETRANS_TO:
2931 		l2cap_send_rr_or_rnr(chan, 1);
2932 		chan->retry_count = 1;
2933 		__set_monitor_timer(chan);
2934 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2935 		break;
2936 	case L2CAP_EV_RECV_FBIT:
2937 		/* Nothing to process */
2938 		break;
2939 	default:
2940 		break;
2941 	}
2942 }
2943 
2944 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2945 				  struct l2cap_ctrl *control,
2946 				  struct sk_buff_head *skbs, u8 event)
2947 {
2948 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2949 	       event);
2950 
2951 	switch (event) {
2952 	case L2CAP_EV_DATA_REQUEST:
2953 		if (chan->tx_send_head == NULL)
2954 			chan->tx_send_head = skb_peek(skbs);
2955 		/* Queue data, but don't send. */
2956 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2957 		break;
2958 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2959 		BT_DBG("Enter LOCAL_BUSY");
2960 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2961 
2962 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2963 			/* The SREJ_SENT state must be aborted if we are to
2964 			 * enter the LOCAL_BUSY state.
2965 			 */
2966 			l2cap_abort_rx_srej_sent(chan);
2967 		}
2968 
2969 		l2cap_send_ack(chan);
2970 
2971 		break;
2972 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2973 		BT_DBG("Exit LOCAL_BUSY");
2974 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2975 
2976 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2977 			struct l2cap_ctrl local_control;
2978 			memset(&local_control, 0, sizeof(local_control));
2979 			local_control.sframe = 1;
2980 			local_control.super = L2CAP_SUPER_RR;
2981 			local_control.poll = 1;
2982 			local_control.reqseq = chan->buffer_seq;
2983 			l2cap_send_sframe(chan, &local_control);
2984 
2985 			chan->retry_count = 1;
2986 			__set_monitor_timer(chan);
2987 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2988 		}
2989 		break;
2990 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2991 		l2cap_process_reqseq(chan, control->reqseq);
2992 		fallthrough;
2993 
2994 	case L2CAP_EV_RECV_FBIT:
2995 		if (control && control->final) {
2996 			__clear_monitor_timer(chan);
2997 			if (chan->unacked_frames > 0)
2998 				__set_retrans_timer(chan);
2999 			chan->retry_count = 0;
3000 			chan->tx_state = L2CAP_TX_STATE_XMIT;
3001 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3002 		}
3003 		break;
3004 	case L2CAP_EV_EXPLICIT_POLL:
3005 		/* Ignore */
3006 		break;
3007 	case L2CAP_EV_MONITOR_TO:
3008 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3009 			l2cap_send_rr_or_rnr(chan, 1);
3010 			__set_monitor_timer(chan);
3011 			chan->retry_count++;
3012 		} else {
3013 			l2cap_send_disconn_req(chan, ECONNABORTED);
3014 		}
3015 		break;
3016 	default:
3017 		break;
3018 	}
3019 }
3020 
3021 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3022 		     struct sk_buff_head *skbs, u8 event)
3023 {
3024 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3025 	       chan, control, skbs, event, chan->tx_state);
3026 
3027 	switch (chan->tx_state) {
3028 	case L2CAP_TX_STATE_XMIT:
3029 		l2cap_tx_state_xmit(chan, control, skbs, event);
3030 		break;
3031 	case L2CAP_TX_STATE_WAIT_F:
3032 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3033 		break;
3034 	default:
3035 		/* Ignore event */
3036 		break;
3037 	}
3038 }
3039 
3040 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3041 			     struct l2cap_ctrl *control)
3042 {
3043 	BT_DBG("chan %p, control %p", chan, control);
3044 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3045 }
3046 
3047 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3048 				  struct l2cap_ctrl *control)
3049 {
3050 	BT_DBG("chan %p, control %p", chan, control);
3051 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3052 }
3053 
3054 /* Copy frame to all raw sockets on that connection */
3055 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3056 {
3057 	struct sk_buff *nskb;
3058 	struct l2cap_chan *chan;
3059 
3060 	BT_DBG("conn %p", conn);
3061 
3062 	mutex_lock(&conn->chan_lock);
3063 
3064 	list_for_each_entry(chan, &conn->chan_l, list) {
3065 		if (chan->chan_type != L2CAP_CHAN_RAW)
3066 			continue;
3067 
3068 		/* Don't send frame to the channel it came from */
3069 		if (bt_cb(skb)->l2cap.chan == chan)
3070 			continue;
3071 
3072 		nskb = skb_clone(skb, GFP_KERNEL);
3073 		if (!nskb)
3074 			continue;
3075 		if (chan->ops->recv(chan, nskb))
3076 			kfree_skb(nskb);
3077 	}
3078 
3079 	mutex_unlock(&conn->chan_lock);
3080 }
3081 
3082 /* ---- L2CAP signalling commands ---- */
3083 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3084 				       u8 ident, u16 dlen, void *data)
3085 {
3086 	struct sk_buff *skb, **frag;
3087 	struct l2cap_cmd_hdr *cmd;
3088 	struct l2cap_hdr *lh;
3089 	int len, count;
3090 
3091 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3092 	       conn, code, ident, dlen);
3093 
3094 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3095 		return NULL;
3096 
3097 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3098 	count = min_t(unsigned int, conn->mtu, len);
3099 
3100 	skb = bt_skb_alloc(count, GFP_KERNEL);
3101 	if (!skb)
3102 		return NULL;
3103 
3104 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3105 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3106 
3107 	if (conn->hcon->type == LE_LINK)
3108 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3109 	else
3110 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3111 
3112 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3113 	cmd->code  = code;
3114 	cmd->ident = ident;
3115 	cmd->len   = cpu_to_le16(dlen);
3116 
3117 	if (dlen) {
3118 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3119 		skb_put_data(skb, data, count);
3120 		data += count;
3121 	}
3122 
3123 	len -= skb->len;
3124 
3125 	/* Continuation fragments (no L2CAP header) */
3126 	frag = &skb_shinfo(skb)->frag_list;
3127 	while (len) {
3128 		count = min_t(unsigned int, conn->mtu, len);
3129 
3130 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3131 		if (!*frag)
3132 			goto fail;
3133 
3134 		skb_put_data(*frag, data, count);
3135 
3136 		len  -= count;
3137 		data += count;
3138 
3139 		frag = &(*frag)->next;
3140 	}
3141 
3142 	return skb;
3143 
3144 fail:
3145 	kfree_skb(skb);
3146 	return NULL;
3147 }
3148 
3149 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3150 				     unsigned long *val)
3151 {
3152 	struct l2cap_conf_opt *opt = *ptr;
3153 	int len;
3154 
3155 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3156 	*ptr += len;
3157 
3158 	*type = opt->type;
3159 	*olen = opt->len;
3160 
3161 	switch (opt->len) {
3162 	case 1:
3163 		*val = *((u8 *) opt->val);
3164 		break;
3165 
3166 	case 2:
3167 		*val = get_unaligned_le16(opt->val);
3168 		break;
3169 
3170 	case 4:
3171 		*val = get_unaligned_le32(opt->val);
3172 		break;
3173 
3174 	default:
3175 		*val = (unsigned long) opt->val;
3176 		break;
3177 	}
3178 
3179 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3180 	return len;
3181 }
3182 
3183 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3184 {
3185 	struct l2cap_conf_opt *opt = *ptr;
3186 
3187 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3188 
3189 	if (size < L2CAP_CONF_OPT_SIZE + len)
3190 		return;
3191 
3192 	opt->type = type;
3193 	opt->len  = len;
3194 
3195 	switch (len) {
3196 	case 1:
3197 		*((u8 *) opt->val)  = val;
3198 		break;
3199 
3200 	case 2:
3201 		put_unaligned_le16(val, opt->val);
3202 		break;
3203 
3204 	case 4:
3205 		put_unaligned_le32(val, opt->val);
3206 		break;
3207 
3208 	default:
3209 		memcpy(opt->val, (void *) val, len);
3210 		break;
3211 	}
3212 
3213 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3214 }
3215 
3216 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3217 {
3218 	struct l2cap_conf_efs efs;
3219 
3220 	switch (chan->mode) {
3221 	case L2CAP_MODE_ERTM:
3222 		efs.id		= chan->local_id;
3223 		efs.stype	= chan->local_stype;
3224 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3225 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3226 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3227 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3228 		break;
3229 
3230 	case L2CAP_MODE_STREAMING:
3231 		efs.id		= 1;
3232 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3233 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3234 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3235 		efs.acc_lat	= 0;
3236 		efs.flush_to	= 0;
3237 		break;
3238 
3239 	default:
3240 		return;
3241 	}
3242 
3243 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3244 			   (unsigned long) &efs, size);
3245 }
3246 
3247 static void l2cap_ack_timeout(struct work_struct *work)
3248 {
3249 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3250 					       ack_timer.work);
3251 	u16 frames_to_ack;
3252 
3253 	BT_DBG("chan %p", chan);
3254 
3255 	l2cap_chan_lock(chan);
3256 
3257 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3258 				     chan->last_acked_seq);
3259 
3260 	if (frames_to_ack)
3261 		l2cap_send_rr_or_rnr(chan, 0);
3262 
3263 	l2cap_chan_unlock(chan);
3264 	l2cap_chan_put(chan);
3265 }
3266 
3267 int l2cap_ertm_init(struct l2cap_chan *chan)
3268 {
3269 	int err;
3270 
3271 	chan->next_tx_seq = 0;
3272 	chan->expected_tx_seq = 0;
3273 	chan->expected_ack_seq = 0;
3274 	chan->unacked_frames = 0;
3275 	chan->buffer_seq = 0;
3276 	chan->frames_sent = 0;
3277 	chan->last_acked_seq = 0;
3278 	chan->sdu = NULL;
3279 	chan->sdu_last_frag = NULL;
3280 	chan->sdu_len = 0;
3281 
3282 	skb_queue_head_init(&chan->tx_q);
3283 
3284 	chan->local_amp_id = AMP_ID_BREDR;
3285 	chan->move_id = AMP_ID_BREDR;
3286 	chan->move_state = L2CAP_MOVE_STABLE;
3287 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3288 
3289 	if (chan->mode != L2CAP_MODE_ERTM)
3290 		return 0;
3291 
3292 	chan->rx_state = L2CAP_RX_STATE_RECV;
3293 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3294 
3295 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3296 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3297 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3298 
3299 	skb_queue_head_init(&chan->srej_q);
3300 
3301 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3302 	if (err < 0)
3303 		return err;
3304 
3305 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3306 	if (err < 0)
3307 		l2cap_seq_list_free(&chan->srej_list);
3308 
3309 	return err;
3310 }
3311 
3312 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3313 {
3314 	switch (mode) {
3315 	case L2CAP_MODE_STREAMING:
3316 	case L2CAP_MODE_ERTM:
3317 		if (l2cap_mode_supported(mode, remote_feat_mask))
3318 			return mode;
3319 		fallthrough;
3320 	default:
3321 		return L2CAP_MODE_BASIC;
3322 	}
3323 }
3324 
3325 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3326 {
3327 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3328 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3329 }
3330 
3331 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3332 {
3333 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3334 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3335 }
3336 
3337 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3338 				      struct l2cap_conf_rfc *rfc)
3339 {
3340 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3341 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3342 
3343 		/* Class 1 devices have must have ERTM timeouts
3344 		 * exceeding the Link Supervision Timeout.  The
3345 		 * default Link Supervision Timeout for AMP
3346 		 * controllers is 10 seconds.
3347 		 *
3348 		 * Class 1 devices use 0xffffffff for their
3349 		 * best-effort flush timeout, so the clamping logic
3350 		 * will result in a timeout that meets the above
3351 		 * requirement.  ERTM timeouts are 16-bit values, so
3352 		 * the maximum timeout is 65.535 seconds.
3353 		 */
3354 
3355 		/* Convert timeout to milliseconds and round */
3356 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3357 
3358 		/* This is the recommended formula for class 2 devices
3359 		 * that start ERTM timers when packets are sent to the
3360 		 * controller.
3361 		 */
3362 		ertm_to = 3 * ertm_to + 500;
3363 
3364 		if (ertm_to > 0xffff)
3365 			ertm_to = 0xffff;
3366 
3367 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3368 		rfc->monitor_timeout = rfc->retrans_timeout;
3369 	} else {
3370 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3371 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3372 	}
3373 }
3374 
3375 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3376 {
3377 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3378 	    __l2cap_ews_supported(chan->conn)) {
3379 		/* use extended control field */
3380 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3381 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3382 	} else {
3383 		chan->tx_win = min_t(u16, chan->tx_win,
3384 				     L2CAP_DEFAULT_TX_WINDOW);
3385 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3386 	}
3387 	chan->ack_win = chan->tx_win;
3388 }
3389 
3390 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3391 {
3392 	struct hci_conn *conn = chan->conn->hcon;
3393 
3394 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3395 
3396 	/* The 2-DH1 packet has between 2 and 56 information bytes
3397 	 * (including the 2-byte payload header)
3398 	 */
3399 	if (!(conn->pkt_type & HCI_2DH1))
3400 		chan->imtu = 54;
3401 
3402 	/* The 3-DH1 packet has between 2 and 85 information bytes
3403 	 * (including the 2-byte payload header)
3404 	 */
3405 	if (!(conn->pkt_type & HCI_3DH1))
3406 		chan->imtu = 83;
3407 
3408 	/* The 2-DH3 packet has between 2 and 369 information bytes
3409 	 * (including the 2-byte payload header)
3410 	 */
3411 	if (!(conn->pkt_type & HCI_2DH3))
3412 		chan->imtu = 367;
3413 
3414 	/* The 3-DH3 packet has between 2 and 554 information bytes
3415 	 * (including the 2-byte payload header)
3416 	 */
3417 	if (!(conn->pkt_type & HCI_3DH3))
3418 		chan->imtu = 552;
3419 
3420 	/* The 2-DH5 packet has between 2 and 681 information bytes
3421 	 * (including the 2-byte payload header)
3422 	 */
3423 	if (!(conn->pkt_type & HCI_2DH5))
3424 		chan->imtu = 679;
3425 
3426 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3427 	 * (including the 2-byte payload header)
3428 	 */
3429 	if (!(conn->pkt_type & HCI_3DH5))
3430 		chan->imtu = 1021;
3431 }
3432 
3433 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3434 {
3435 	struct l2cap_conf_req *req = data;
3436 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3437 	void *ptr = req->data;
3438 	void *endptr = data + data_size;
3439 	u16 size;
3440 
3441 	BT_DBG("chan %p", chan);
3442 
3443 	if (chan->num_conf_req || chan->num_conf_rsp)
3444 		goto done;
3445 
3446 	switch (chan->mode) {
3447 	case L2CAP_MODE_STREAMING:
3448 	case L2CAP_MODE_ERTM:
3449 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3450 			break;
3451 
3452 		if (__l2cap_efs_supported(chan->conn))
3453 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3454 
3455 		fallthrough;
3456 	default:
3457 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3458 		break;
3459 	}
3460 
3461 done:
3462 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3463 		if (!chan->imtu)
3464 			l2cap_mtu_auto(chan);
3465 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3466 				   endptr - ptr);
3467 	}
3468 
3469 	switch (chan->mode) {
3470 	case L2CAP_MODE_BASIC:
3471 		if (disable_ertm)
3472 			break;
3473 
3474 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3475 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3476 			break;
3477 
3478 		rfc.mode            = L2CAP_MODE_BASIC;
3479 		rfc.txwin_size      = 0;
3480 		rfc.max_transmit    = 0;
3481 		rfc.retrans_timeout = 0;
3482 		rfc.monitor_timeout = 0;
3483 		rfc.max_pdu_size    = 0;
3484 
3485 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3486 				   (unsigned long) &rfc, endptr - ptr);
3487 		break;
3488 
3489 	case L2CAP_MODE_ERTM:
3490 		rfc.mode            = L2CAP_MODE_ERTM;
3491 		rfc.max_transmit    = chan->max_tx;
3492 
3493 		__l2cap_set_ertm_timeouts(chan, &rfc);
3494 
3495 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3496 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3497 			     L2CAP_FCS_SIZE);
3498 		rfc.max_pdu_size = cpu_to_le16(size);
3499 
3500 		l2cap_txwin_setup(chan);
3501 
3502 		rfc.txwin_size = min_t(u16, chan->tx_win,
3503 				       L2CAP_DEFAULT_TX_WINDOW);
3504 
3505 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3506 				   (unsigned long) &rfc, endptr - ptr);
3507 
3508 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3509 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3510 
3511 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3512 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3513 					   chan->tx_win, endptr - ptr);
3514 
3515 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3516 			if (chan->fcs == L2CAP_FCS_NONE ||
3517 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3518 				chan->fcs = L2CAP_FCS_NONE;
3519 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3520 						   chan->fcs, endptr - ptr);
3521 			}
3522 		break;
3523 
3524 	case L2CAP_MODE_STREAMING:
3525 		l2cap_txwin_setup(chan);
3526 		rfc.mode            = L2CAP_MODE_STREAMING;
3527 		rfc.txwin_size      = 0;
3528 		rfc.max_transmit    = 0;
3529 		rfc.retrans_timeout = 0;
3530 		rfc.monitor_timeout = 0;
3531 
3532 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3533 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3534 			     L2CAP_FCS_SIZE);
3535 		rfc.max_pdu_size = cpu_to_le16(size);
3536 
3537 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3538 				   (unsigned long) &rfc, endptr - ptr);
3539 
3540 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3541 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3542 
3543 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3544 			if (chan->fcs == L2CAP_FCS_NONE ||
3545 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3546 				chan->fcs = L2CAP_FCS_NONE;
3547 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3548 						   chan->fcs, endptr - ptr);
3549 			}
3550 		break;
3551 	}
3552 
3553 	req->dcid  = cpu_to_le16(chan->dcid);
3554 	req->flags = cpu_to_le16(0);
3555 
3556 	return ptr - data;
3557 }
3558 
3559 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3560 {
3561 	struct l2cap_conf_rsp *rsp = data;
3562 	void *ptr = rsp->data;
3563 	void *endptr = data + data_size;
3564 	void *req = chan->conf_req;
3565 	int len = chan->conf_len;
3566 	int type, hint, olen;
3567 	unsigned long val;
3568 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3569 	struct l2cap_conf_efs efs;
3570 	u8 remote_efs = 0;
3571 	u16 mtu = L2CAP_DEFAULT_MTU;
3572 	u16 result = L2CAP_CONF_SUCCESS;
3573 	u16 size;
3574 
3575 	BT_DBG("chan %p", chan);
3576 
3577 	while (len >= L2CAP_CONF_OPT_SIZE) {
3578 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3579 		if (len < 0)
3580 			break;
3581 
3582 		hint  = type & L2CAP_CONF_HINT;
3583 		type &= L2CAP_CONF_MASK;
3584 
3585 		switch (type) {
3586 		case L2CAP_CONF_MTU:
3587 			if (olen != 2)
3588 				break;
3589 			mtu = val;
3590 			break;
3591 
3592 		case L2CAP_CONF_FLUSH_TO:
3593 			if (olen != 2)
3594 				break;
3595 			chan->flush_to = val;
3596 			break;
3597 
3598 		case L2CAP_CONF_QOS:
3599 			break;
3600 
3601 		case L2CAP_CONF_RFC:
3602 			if (olen != sizeof(rfc))
3603 				break;
3604 			memcpy(&rfc, (void *) val, olen);
3605 			break;
3606 
3607 		case L2CAP_CONF_FCS:
3608 			if (olen != 1)
3609 				break;
3610 			if (val == L2CAP_FCS_NONE)
3611 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3612 			break;
3613 
3614 		case L2CAP_CONF_EFS:
3615 			if (olen != sizeof(efs))
3616 				break;
3617 			remote_efs = 1;
3618 			memcpy(&efs, (void *) val, olen);
3619 			break;
3620 
3621 		case L2CAP_CONF_EWS:
3622 			if (olen != 2)
3623 				break;
3624 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3625 				return -ECONNREFUSED;
3626 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3627 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3628 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3629 			chan->remote_tx_win = val;
3630 			break;
3631 
3632 		default:
3633 			if (hint)
3634 				break;
3635 			result = L2CAP_CONF_UNKNOWN;
3636 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3637 			break;
3638 		}
3639 	}
3640 
3641 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3642 		goto done;
3643 
3644 	switch (chan->mode) {
3645 	case L2CAP_MODE_STREAMING:
3646 	case L2CAP_MODE_ERTM:
3647 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3648 			chan->mode = l2cap_select_mode(rfc.mode,
3649 						       chan->conn->feat_mask);
3650 			break;
3651 		}
3652 
3653 		if (remote_efs) {
3654 			if (__l2cap_efs_supported(chan->conn))
3655 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3656 			else
3657 				return -ECONNREFUSED;
3658 		}
3659 
3660 		if (chan->mode != rfc.mode)
3661 			return -ECONNREFUSED;
3662 
3663 		break;
3664 	}
3665 
3666 done:
3667 	if (chan->mode != rfc.mode) {
3668 		result = L2CAP_CONF_UNACCEPT;
3669 		rfc.mode = chan->mode;
3670 
3671 		if (chan->num_conf_rsp == 1)
3672 			return -ECONNREFUSED;
3673 
3674 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3675 				   (unsigned long) &rfc, endptr - ptr);
3676 	}
3677 
3678 	if (result == L2CAP_CONF_SUCCESS) {
3679 		/* Configure output options and let the other side know
3680 		 * which ones we don't like. */
3681 
3682 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3683 			result = L2CAP_CONF_UNACCEPT;
3684 		else {
3685 			chan->omtu = mtu;
3686 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3687 		}
3688 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3689 
3690 		if (remote_efs) {
3691 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3692 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3693 			    efs.stype != chan->local_stype) {
3694 
3695 				result = L2CAP_CONF_UNACCEPT;
3696 
3697 				if (chan->num_conf_req >= 1)
3698 					return -ECONNREFUSED;
3699 
3700 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3701 						   sizeof(efs),
3702 						   (unsigned long) &efs, endptr - ptr);
3703 			} else {
3704 				/* Send PENDING Conf Rsp */
3705 				result = L2CAP_CONF_PENDING;
3706 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3707 			}
3708 		}
3709 
3710 		switch (rfc.mode) {
3711 		case L2CAP_MODE_BASIC:
3712 			chan->fcs = L2CAP_FCS_NONE;
3713 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3714 			break;
3715 
3716 		case L2CAP_MODE_ERTM:
3717 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3718 				chan->remote_tx_win = rfc.txwin_size;
3719 			else
3720 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3721 
3722 			chan->remote_max_tx = rfc.max_transmit;
3723 
3724 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3725 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3726 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3727 			rfc.max_pdu_size = cpu_to_le16(size);
3728 			chan->remote_mps = size;
3729 
3730 			__l2cap_set_ertm_timeouts(chan, &rfc);
3731 
3732 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3733 
3734 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3735 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3736 
3737 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3738 				chan->remote_id = efs.id;
3739 				chan->remote_stype = efs.stype;
3740 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3741 				chan->remote_flush_to =
3742 					le32_to_cpu(efs.flush_to);
3743 				chan->remote_acc_lat =
3744 					le32_to_cpu(efs.acc_lat);
3745 				chan->remote_sdu_itime =
3746 					le32_to_cpu(efs.sdu_itime);
3747 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3748 						   sizeof(efs),
3749 						   (unsigned long) &efs, endptr - ptr);
3750 			}
3751 			break;
3752 
3753 		case L2CAP_MODE_STREAMING:
3754 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3755 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3756 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3757 			rfc.max_pdu_size = cpu_to_le16(size);
3758 			chan->remote_mps = size;
3759 
3760 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3761 
3762 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3763 					   (unsigned long) &rfc, endptr - ptr);
3764 
3765 			break;
3766 
3767 		default:
3768 			result = L2CAP_CONF_UNACCEPT;
3769 
3770 			memset(&rfc, 0, sizeof(rfc));
3771 			rfc.mode = chan->mode;
3772 		}
3773 
3774 		if (result == L2CAP_CONF_SUCCESS)
3775 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3776 	}
3777 	rsp->scid   = cpu_to_le16(chan->dcid);
3778 	rsp->result = cpu_to_le16(result);
3779 	rsp->flags  = cpu_to_le16(0);
3780 
3781 	return ptr - data;
3782 }
3783 
3784 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3785 				void *data, size_t size, u16 *result)
3786 {
3787 	struct l2cap_conf_req *req = data;
3788 	void *ptr = req->data;
3789 	void *endptr = data + size;
3790 	int type, olen;
3791 	unsigned long val;
3792 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3793 	struct l2cap_conf_efs efs;
3794 
3795 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3796 
3797 	while (len >= L2CAP_CONF_OPT_SIZE) {
3798 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3799 		if (len < 0)
3800 			break;
3801 
3802 		switch (type) {
3803 		case L2CAP_CONF_MTU:
3804 			if (olen != 2)
3805 				break;
3806 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3807 				*result = L2CAP_CONF_UNACCEPT;
3808 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3809 			} else
3810 				chan->imtu = val;
3811 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3812 					   endptr - ptr);
3813 			break;
3814 
3815 		case L2CAP_CONF_FLUSH_TO:
3816 			if (olen != 2)
3817 				break;
3818 			chan->flush_to = val;
3819 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3820 					   chan->flush_to, endptr - ptr);
3821 			break;
3822 
3823 		case L2CAP_CONF_RFC:
3824 			if (olen != sizeof(rfc))
3825 				break;
3826 			memcpy(&rfc, (void *)val, olen);
3827 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3828 			    rfc.mode != chan->mode)
3829 				return -ECONNREFUSED;
3830 			chan->fcs = 0;
3831 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3832 					   (unsigned long) &rfc, endptr - ptr);
3833 			break;
3834 
3835 		case L2CAP_CONF_EWS:
3836 			if (olen != 2)
3837 				break;
3838 			chan->ack_win = min_t(u16, val, chan->ack_win);
3839 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3840 					   chan->tx_win, endptr - ptr);
3841 			break;
3842 
3843 		case L2CAP_CONF_EFS:
3844 			if (olen != sizeof(efs))
3845 				break;
3846 			memcpy(&efs, (void *)val, olen);
3847 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3848 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3849 			    efs.stype != chan->local_stype)
3850 				return -ECONNREFUSED;
3851 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3852 					   (unsigned long) &efs, endptr - ptr);
3853 			break;
3854 
3855 		case L2CAP_CONF_FCS:
3856 			if (olen != 1)
3857 				break;
3858 			if (*result == L2CAP_CONF_PENDING)
3859 				if (val == L2CAP_FCS_NONE)
3860 					set_bit(CONF_RECV_NO_FCS,
3861 						&chan->conf_state);
3862 			break;
3863 		}
3864 	}
3865 
3866 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3867 		return -ECONNREFUSED;
3868 
3869 	chan->mode = rfc.mode;
3870 
3871 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3872 		switch (rfc.mode) {
3873 		case L2CAP_MODE_ERTM:
3874 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3875 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3876 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3877 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3878 				chan->ack_win = min_t(u16, chan->ack_win,
3879 						      rfc.txwin_size);
3880 
3881 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3882 				chan->local_msdu = le16_to_cpu(efs.msdu);
3883 				chan->local_sdu_itime =
3884 					le32_to_cpu(efs.sdu_itime);
3885 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3886 				chan->local_flush_to =
3887 					le32_to_cpu(efs.flush_to);
3888 			}
3889 			break;
3890 
3891 		case L2CAP_MODE_STREAMING:
3892 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3893 		}
3894 	}
3895 
3896 	req->dcid   = cpu_to_le16(chan->dcid);
3897 	req->flags  = cpu_to_le16(0);
3898 
3899 	return ptr - data;
3900 }
3901 
3902 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3903 				u16 result, u16 flags)
3904 {
3905 	struct l2cap_conf_rsp *rsp = data;
3906 	void *ptr = rsp->data;
3907 
3908 	BT_DBG("chan %p", chan);
3909 
3910 	rsp->scid   = cpu_to_le16(chan->dcid);
3911 	rsp->result = cpu_to_le16(result);
3912 	rsp->flags  = cpu_to_le16(flags);
3913 
3914 	return ptr - data;
3915 }
3916 
3917 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3918 {
3919 	struct l2cap_le_conn_rsp rsp;
3920 	struct l2cap_conn *conn = chan->conn;
3921 
3922 	BT_DBG("chan %p", chan);
3923 
3924 	rsp.dcid    = cpu_to_le16(chan->scid);
3925 	rsp.mtu     = cpu_to_le16(chan->imtu);
3926 	rsp.mps     = cpu_to_le16(chan->mps);
3927 	rsp.credits = cpu_to_le16(chan->rx_credits);
3928 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3929 
3930 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3931 		       &rsp);
3932 }
3933 
3934 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3935 {
3936 	struct {
3937 		struct l2cap_ecred_conn_rsp rsp;
3938 		__le16 dcid[5];
3939 	} __packed pdu;
3940 	struct l2cap_conn *conn = chan->conn;
3941 	u16 ident = chan->ident;
3942 	int i = 0;
3943 
3944 	if (!ident)
3945 		return;
3946 
3947 	BT_DBG("chan %p ident %d", chan, ident);
3948 
3949 	pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3950 	pdu.rsp.mps     = cpu_to_le16(chan->mps);
3951 	pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3952 	pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3953 
3954 	mutex_lock(&conn->chan_lock);
3955 
3956 	list_for_each_entry(chan, &conn->chan_l, list) {
3957 		if (chan->ident != ident)
3958 			continue;
3959 
3960 		/* Reset ident so only one response is sent */
3961 		chan->ident = 0;
3962 
3963 		/* Include all channels pending with the same ident */
3964 		pdu.dcid[i++] = cpu_to_le16(chan->scid);
3965 	}
3966 
3967 	mutex_unlock(&conn->chan_lock);
3968 
3969 	l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
3970 			sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
3971 }
3972 
3973 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3974 {
3975 	struct l2cap_conn_rsp rsp;
3976 	struct l2cap_conn *conn = chan->conn;
3977 	u8 buf[128];
3978 	u8 rsp_code;
3979 
3980 	rsp.scid   = cpu_to_le16(chan->dcid);
3981 	rsp.dcid   = cpu_to_le16(chan->scid);
3982 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3983 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3984 
3985 	if (chan->hs_hcon)
3986 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3987 	else
3988 		rsp_code = L2CAP_CONN_RSP;
3989 
3990 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3991 
3992 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3993 
3994 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3995 		return;
3996 
3997 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3998 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3999 	chan->num_conf_req++;
4000 }
4001 
4002 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4003 {
4004 	int type, olen;
4005 	unsigned long val;
4006 	/* Use sane default values in case a misbehaving remote device
4007 	 * did not send an RFC or extended window size option.
4008 	 */
4009 	u16 txwin_ext = chan->ack_win;
4010 	struct l2cap_conf_rfc rfc = {
4011 		.mode = chan->mode,
4012 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4013 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4014 		.max_pdu_size = cpu_to_le16(chan->imtu),
4015 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4016 	};
4017 
4018 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4019 
4020 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4021 		return;
4022 
4023 	while (len >= L2CAP_CONF_OPT_SIZE) {
4024 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4025 		if (len < 0)
4026 			break;
4027 
4028 		switch (type) {
4029 		case L2CAP_CONF_RFC:
4030 			if (olen != sizeof(rfc))
4031 				break;
4032 			memcpy(&rfc, (void *)val, olen);
4033 			break;
4034 		case L2CAP_CONF_EWS:
4035 			if (olen != 2)
4036 				break;
4037 			txwin_ext = val;
4038 			break;
4039 		}
4040 	}
4041 
4042 	switch (rfc.mode) {
4043 	case L2CAP_MODE_ERTM:
4044 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4045 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4046 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4047 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4048 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4049 		else
4050 			chan->ack_win = min_t(u16, chan->ack_win,
4051 					      rfc.txwin_size);
4052 		break;
4053 	case L2CAP_MODE_STREAMING:
4054 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4055 	}
4056 }
4057 
4058 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4059 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4060 				    u8 *data)
4061 {
4062 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4063 
4064 	if (cmd_len < sizeof(*rej))
4065 		return -EPROTO;
4066 
4067 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4068 		return 0;
4069 
4070 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4071 	    cmd->ident == conn->info_ident) {
4072 		cancel_delayed_work(&conn->info_timer);
4073 
4074 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4075 		conn->info_ident = 0;
4076 
4077 		l2cap_conn_start(conn);
4078 	}
4079 
4080 	return 0;
4081 }
4082 
4083 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4084 					struct l2cap_cmd_hdr *cmd,
4085 					u8 *data, u8 rsp_code, u8 amp_id)
4086 {
4087 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4088 	struct l2cap_conn_rsp rsp;
4089 	struct l2cap_chan *chan = NULL, *pchan;
4090 	int result, status = L2CAP_CS_NO_INFO;
4091 
4092 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4093 	__le16 psm = req->psm;
4094 
4095 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4096 
4097 	/* Check if we have socket listening on psm */
4098 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4099 					 &conn->hcon->dst, ACL_LINK);
4100 	if (!pchan) {
4101 		result = L2CAP_CR_BAD_PSM;
4102 		goto sendresp;
4103 	}
4104 
4105 	mutex_lock(&conn->chan_lock);
4106 	l2cap_chan_lock(pchan);
4107 
4108 	/* Check if the ACL is secure enough (if not SDP) */
4109 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4110 	    !hci_conn_check_link_mode(conn->hcon)) {
4111 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4112 		result = L2CAP_CR_SEC_BLOCK;
4113 		goto response;
4114 	}
4115 
4116 	result = L2CAP_CR_NO_MEM;
4117 
4118 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4119 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4120 		result = L2CAP_CR_INVALID_SCID;
4121 		goto response;
4122 	}
4123 
4124 	/* Check if we already have channel with that dcid */
4125 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4126 		result = L2CAP_CR_SCID_IN_USE;
4127 		goto response;
4128 	}
4129 
4130 	chan = pchan->ops->new_connection(pchan);
4131 	if (!chan)
4132 		goto response;
4133 
4134 	/* For certain devices (ex: HID mouse), support for authentication,
4135 	 * pairing and bonding is optional. For such devices, inorder to avoid
4136 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4137 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4138 	 */
4139 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4140 
4141 	bacpy(&chan->src, &conn->hcon->src);
4142 	bacpy(&chan->dst, &conn->hcon->dst);
4143 	chan->src_type = bdaddr_src_type(conn->hcon);
4144 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4145 	chan->psm  = psm;
4146 	chan->dcid = scid;
4147 	chan->local_amp_id = amp_id;
4148 
4149 	__l2cap_chan_add(conn, chan);
4150 
4151 	dcid = chan->scid;
4152 
4153 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4154 
4155 	chan->ident = cmd->ident;
4156 
4157 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4158 		if (l2cap_chan_check_security(chan, false)) {
4159 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4160 				l2cap_state_change(chan, BT_CONNECT2);
4161 				result = L2CAP_CR_PEND;
4162 				status = L2CAP_CS_AUTHOR_PEND;
4163 				chan->ops->defer(chan);
4164 			} else {
4165 				/* Force pending result for AMP controllers.
4166 				 * The connection will succeed after the
4167 				 * physical link is up.
4168 				 */
4169 				if (amp_id == AMP_ID_BREDR) {
4170 					l2cap_state_change(chan, BT_CONFIG);
4171 					result = L2CAP_CR_SUCCESS;
4172 				} else {
4173 					l2cap_state_change(chan, BT_CONNECT2);
4174 					result = L2CAP_CR_PEND;
4175 				}
4176 				status = L2CAP_CS_NO_INFO;
4177 			}
4178 		} else {
4179 			l2cap_state_change(chan, BT_CONNECT2);
4180 			result = L2CAP_CR_PEND;
4181 			status = L2CAP_CS_AUTHEN_PEND;
4182 		}
4183 	} else {
4184 		l2cap_state_change(chan, BT_CONNECT2);
4185 		result = L2CAP_CR_PEND;
4186 		status = L2CAP_CS_NO_INFO;
4187 	}
4188 
4189 response:
4190 	l2cap_chan_unlock(pchan);
4191 	mutex_unlock(&conn->chan_lock);
4192 	l2cap_chan_put(pchan);
4193 
4194 sendresp:
4195 	rsp.scid   = cpu_to_le16(scid);
4196 	rsp.dcid   = cpu_to_le16(dcid);
4197 	rsp.result = cpu_to_le16(result);
4198 	rsp.status = cpu_to_le16(status);
4199 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4200 
4201 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4202 		struct l2cap_info_req info;
4203 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4204 
4205 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4206 		conn->info_ident = l2cap_get_ident(conn);
4207 
4208 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4209 
4210 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4211 			       sizeof(info), &info);
4212 	}
4213 
4214 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4215 	    result == L2CAP_CR_SUCCESS) {
4216 		u8 buf[128];
4217 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4218 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4219 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4220 		chan->num_conf_req++;
4221 	}
4222 
4223 	return chan;
4224 }
4225 
4226 static int l2cap_connect_req(struct l2cap_conn *conn,
4227 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4228 {
4229 	struct hci_dev *hdev = conn->hcon->hdev;
4230 	struct hci_conn *hcon = conn->hcon;
4231 
4232 	if (cmd_len < sizeof(struct l2cap_conn_req))
4233 		return -EPROTO;
4234 
4235 	hci_dev_lock(hdev);
4236 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4237 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4238 		mgmt_device_connected(hdev, hcon, 0, NULL, 0);
4239 	hci_dev_unlock(hdev);
4240 
4241 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4242 	return 0;
4243 }
4244 
4245 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4246 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4247 				    u8 *data)
4248 {
4249 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4250 	u16 scid, dcid, result, status;
4251 	struct l2cap_chan *chan;
4252 	u8 req[128];
4253 	int err;
4254 
4255 	if (cmd_len < sizeof(*rsp))
4256 		return -EPROTO;
4257 
4258 	scid   = __le16_to_cpu(rsp->scid);
4259 	dcid   = __le16_to_cpu(rsp->dcid);
4260 	result = __le16_to_cpu(rsp->result);
4261 	status = __le16_to_cpu(rsp->status);
4262 
4263 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4264 	       dcid, scid, result, status);
4265 
4266 	mutex_lock(&conn->chan_lock);
4267 
4268 	if (scid) {
4269 		chan = __l2cap_get_chan_by_scid(conn, scid);
4270 		if (!chan) {
4271 			err = -EBADSLT;
4272 			goto unlock;
4273 		}
4274 	} else {
4275 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4276 		if (!chan) {
4277 			err = -EBADSLT;
4278 			goto unlock;
4279 		}
4280 	}
4281 
4282 	err = 0;
4283 
4284 	l2cap_chan_lock(chan);
4285 
4286 	switch (result) {
4287 	case L2CAP_CR_SUCCESS:
4288 		l2cap_state_change(chan, BT_CONFIG);
4289 		chan->ident = 0;
4290 		chan->dcid = dcid;
4291 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4292 
4293 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4294 			break;
4295 
4296 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4297 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4298 		chan->num_conf_req++;
4299 		break;
4300 
4301 	case L2CAP_CR_PEND:
4302 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4303 		break;
4304 
4305 	default:
4306 		l2cap_chan_del(chan, ECONNREFUSED);
4307 		break;
4308 	}
4309 
4310 	l2cap_chan_unlock(chan);
4311 
4312 unlock:
4313 	mutex_unlock(&conn->chan_lock);
4314 
4315 	return err;
4316 }
4317 
4318 static inline void set_default_fcs(struct l2cap_chan *chan)
4319 {
4320 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4321 	 * sides request it.
4322 	 */
4323 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4324 		chan->fcs = L2CAP_FCS_NONE;
4325 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4326 		chan->fcs = L2CAP_FCS_CRC16;
4327 }
4328 
4329 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4330 				    u8 ident, u16 flags)
4331 {
4332 	struct l2cap_conn *conn = chan->conn;
4333 
4334 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4335 	       flags);
4336 
4337 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4338 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4339 
4340 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4341 		       l2cap_build_conf_rsp(chan, data,
4342 					    L2CAP_CONF_SUCCESS, flags), data);
4343 }
4344 
4345 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4346 				   u16 scid, u16 dcid)
4347 {
4348 	struct l2cap_cmd_rej_cid rej;
4349 
4350 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4351 	rej.scid = __cpu_to_le16(scid);
4352 	rej.dcid = __cpu_to_le16(dcid);
4353 
4354 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4355 }
4356 
4357 static inline int l2cap_config_req(struct l2cap_conn *conn,
4358 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4359 				   u8 *data)
4360 {
4361 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4362 	u16 dcid, flags;
4363 	u8 rsp[64];
4364 	struct l2cap_chan *chan;
4365 	int len, err = 0;
4366 
4367 	if (cmd_len < sizeof(*req))
4368 		return -EPROTO;
4369 
4370 	dcid  = __le16_to_cpu(req->dcid);
4371 	flags = __le16_to_cpu(req->flags);
4372 
4373 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4374 
4375 	chan = l2cap_get_chan_by_scid(conn, dcid);
4376 	if (!chan) {
4377 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4378 		return 0;
4379 	}
4380 
4381 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4382 	    chan->state != BT_CONNECTED) {
4383 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4384 				       chan->dcid);
4385 		goto unlock;
4386 	}
4387 
4388 	/* Reject if config buffer is too small. */
4389 	len = cmd_len - sizeof(*req);
4390 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4391 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4392 			       l2cap_build_conf_rsp(chan, rsp,
4393 			       L2CAP_CONF_REJECT, flags), rsp);
4394 		goto unlock;
4395 	}
4396 
4397 	/* Store config. */
4398 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4399 	chan->conf_len += len;
4400 
4401 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4402 		/* Incomplete config. Send empty response. */
4403 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4404 			       l2cap_build_conf_rsp(chan, rsp,
4405 			       L2CAP_CONF_SUCCESS, flags), rsp);
4406 		goto unlock;
4407 	}
4408 
4409 	/* Complete config. */
4410 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4411 	if (len < 0) {
4412 		l2cap_send_disconn_req(chan, ECONNRESET);
4413 		goto unlock;
4414 	}
4415 
4416 	chan->ident = cmd->ident;
4417 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4418 	chan->num_conf_rsp++;
4419 
4420 	/* Reset config buffer. */
4421 	chan->conf_len = 0;
4422 
4423 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4424 		goto unlock;
4425 
4426 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4427 		set_default_fcs(chan);
4428 
4429 		if (chan->mode == L2CAP_MODE_ERTM ||
4430 		    chan->mode == L2CAP_MODE_STREAMING)
4431 			err = l2cap_ertm_init(chan);
4432 
4433 		if (err < 0)
4434 			l2cap_send_disconn_req(chan, -err);
4435 		else
4436 			l2cap_chan_ready(chan);
4437 
4438 		goto unlock;
4439 	}
4440 
4441 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4442 		u8 buf[64];
4443 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4444 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4445 		chan->num_conf_req++;
4446 	}
4447 
4448 	/* Got Conf Rsp PENDING from remote side and assume we sent
4449 	   Conf Rsp PENDING in the code above */
4450 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4451 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4452 
4453 		/* check compatibility */
4454 
4455 		/* Send rsp for BR/EDR channel */
4456 		if (!chan->hs_hcon)
4457 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4458 		else
4459 			chan->ident = cmd->ident;
4460 	}
4461 
4462 unlock:
4463 	l2cap_chan_unlock(chan);
4464 	return err;
4465 }
4466 
4467 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4468 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4469 				   u8 *data)
4470 {
4471 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4472 	u16 scid, flags, result;
4473 	struct l2cap_chan *chan;
4474 	int len = cmd_len - sizeof(*rsp);
4475 	int err = 0;
4476 
4477 	if (cmd_len < sizeof(*rsp))
4478 		return -EPROTO;
4479 
4480 	scid   = __le16_to_cpu(rsp->scid);
4481 	flags  = __le16_to_cpu(rsp->flags);
4482 	result = __le16_to_cpu(rsp->result);
4483 
4484 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4485 	       result, len);
4486 
4487 	chan = l2cap_get_chan_by_scid(conn, scid);
4488 	if (!chan)
4489 		return 0;
4490 
4491 	switch (result) {
4492 	case L2CAP_CONF_SUCCESS:
4493 		l2cap_conf_rfc_get(chan, rsp->data, len);
4494 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4495 		break;
4496 
4497 	case L2CAP_CONF_PENDING:
4498 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4499 
4500 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4501 			char buf[64];
4502 
4503 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4504 						   buf, sizeof(buf), &result);
4505 			if (len < 0) {
4506 				l2cap_send_disconn_req(chan, ECONNRESET);
4507 				goto done;
4508 			}
4509 
4510 			if (!chan->hs_hcon) {
4511 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4512 							0);
4513 			} else {
4514 				if (l2cap_check_efs(chan)) {
4515 					amp_create_logical_link(chan);
4516 					chan->ident = cmd->ident;
4517 				}
4518 			}
4519 		}
4520 		goto done;
4521 
4522 	case L2CAP_CONF_UNACCEPT:
4523 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4524 			char req[64];
4525 
4526 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4527 				l2cap_send_disconn_req(chan, ECONNRESET);
4528 				goto done;
4529 			}
4530 
4531 			/* throw out any old stored conf requests */
4532 			result = L2CAP_CONF_SUCCESS;
4533 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4534 						   req, sizeof(req), &result);
4535 			if (len < 0) {
4536 				l2cap_send_disconn_req(chan, ECONNRESET);
4537 				goto done;
4538 			}
4539 
4540 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4541 				       L2CAP_CONF_REQ, len, req);
4542 			chan->num_conf_req++;
4543 			if (result != L2CAP_CONF_SUCCESS)
4544 				goto done;
4545 			break;
4546 		}
4547 		fallthrough;
4548 
4549 	default:
4550 		l2cap_chan_set_err(chan, ECONNRESET);
4551 
4552 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4553 		l2cap_send_disconn_req(chan, ECONNRESET);
4554 		goto done;
4555 	}
4556 
4557 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4558 		goto done;
4559 
4560 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4561 
4562 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4563 		set_default_fcs(chan);
4564 
4565 		if (chan->mode == L2CAP_MODE_ERTM ||
4566 		    chan->mode == L2CAP_MODE_STREAMING)
4567 			err = l2cap_ertm_init(chan);
4568 
4569 		if (err < 0)
4570 			l2cap_send_disconn_req(chan, -err);
4571 		else
4572 			l2cap_chan_ready(chan);
4573 	}
4574 
4575 done:
4576 	l2cap_chan_unlock(chan);
4577 	return err;
4578 }
4579 
4580 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4581 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4582 				       u8 *data)
4583 {
4584 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4585 	struct l2cap_disconn_rsp rsp;
4586 	u16 dcid, scid;
4587 	struct l2cap_chan *chan;
4588 
4589 	if (cmd_len != sizeof(*req))
4590 		return -EPROTO;
4591 
4592 	scid = __le16_to_cpu(req->scid);
4593 	dcid = __le16_to_cpu(req->dcid);
4594 
4595 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4596 
4597 	mutex_lock(&conn->chan_lock);
4598 
4599 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4600 	if (!chan) {
4601 		mutex_unlock(&conn->chan_lock);
4602 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4603 		return 0;
4604 	}
4605 
4606 	l2cap_chan_hold(chan);
4607 	l2cap_chan_lock(chan);
4608 
4609 	rsp.dcid = cpu_to_le16(chan->scid);
4610 	rsp.scid = cpu_to_le16(chan->dcid);
4611 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4612 
4613 	chan->ops->set_shutdown(chan);
4614 
4615 	l2cap_chan_del(chan, ECONNRESET);
4616 
4617 	chan->ops->close(chan);
4618 
4619 	l2cap_chan_unlock(chan);
4620 	l2cap_chan_put(chan);
4621 
4622 	mutex_unlock(&conn->chan_lock);
4623 
4624 	return 0;
4625 }
4626 
4627 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4628 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4629 				       u8 *data)
4630 {
4631 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4632 	u16 dcid, scid;
4633 	struct l2cap_chan *chan;
4634 
4635 	if (cmd_len != sizeof(*rsp))
4636 		return -EPROTO;
4637 
4638 	scid = __le16_to_cpu(rsp->scid);
4639 	dcid = __le16_to_cpu(rsp->dcid);
4640 
4641 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4642 
4643 	mutex_lock(&conn->chan_lock);
4644 
4645 	chan = __l2cap_get_chan_by_scid(conn, scid);
4646 	if (!chan) {
4647 		mutex_unlock(&conn->chan_lock);
4648 		return 0;
4649 	}
4650 
4651 	l2cap_chan_hold(chan);
4652 	l2cap_chan_lock(chan);
4653 
4654 	if (chan->state != BT_DISCONN) {
4655 		l2cap_chan_unlock(chan);
4656 		l2cap_chan_put(chan);
4657 		mutex_unlock(&conn->chan_lock);
4658 		return 0;
4659 	}
4660 
4661 	l2cap_chan_del(chan, 0);
4662 
4663 	chan->ops->close(chan);
4664 
4665 	l2cap_chan_unlock(chan);
4666 	l2cap_chan_put(chan);
4667 
4668 	mutex_unlock(&conn->chan_lock);
4669 
4670 	return 0;
4671 }
4672 
4673 static inline int l2cap_information_req(struct l2cap_conn *conn,
4674 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4675 					u8 *data)
4676 {
4677 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4678 	u16 type;
4679 
4680 	if (cmd_len != sizeof(*req))
4681 		return -EPROTO;
4682 
4683 	type = __le16_to_cpu(req->type);
4684 
4685 	BT_DBG("type 0x%4.4x", type);
4686 
4687 	if (type == L2CAP_IT_FEAT_MASK) {
4688 		u8 buf[8];
4689 		u32 feat_mask = l2cap_feat_mask;
4690 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4691 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4692 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4693 		if (!disable_ertm)
4694 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4695 				| L2CAP_FEAT_FCS;
4696 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4697 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4698 				| L2CAP_FEAT_EXT_WINDOW;
4699 
4700 		put_unaligned_le32(feat_mask, rsp->data);
4701 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4702 			       buf);
4703 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4704 		u8 buf[12];
4705 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4706 
4707 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4708 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4709 		rsp->data[0] = conn->local_fixed_chan;
4710 		memset(rsp->data + 1, 0, 7);
4711 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4712 			       buf);
4713 	} else {
4714 		struct l2cap_info_rsp rsp;
4715 		rsp.type   = cpu_to_le16(type);
4716 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4717 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4718 			       &rsp);
4719 	}
4720 
4721 	return 0;
4722 }
4723 
4724 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4725 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4726 					u8 *data)
4727 {
4728 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4729 	u16 type, result;
4730 
4731 	if (cmd_len < sizeof(*rsp))
4732 		return -EPROTO;
4733 
4734 	type   = __le16_to_cpu(rsp->type);
4735 	result = __le16_to_cpu(rsp->result);
4736 
4737 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4738 
4739 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4740 	if (cmd->ident != conn->info_ident ||
4741 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4742 		return 0;
4743 
4744 	cancel_delayed_work(&conn->info_timer);
4745 
4746 	if (result != L2CAP_IR_SUCCESS) {
4747 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4748 		conn->info_ident = 0;
4749 
4750 		l2cap_conn_start(conn);
4751 
4752 		return 0;
4753 	}
4754 
4755 	switch (type) {
4756 	case L2CAP_IT_FEAT_MASK:
4757 		conn->feat_mask = get_unaligned_le32(rsp->data);
4758 
4759 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4760 			struct l2cap_info_req req;
4761 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4762 
4763 			conn->info_ident = l2cap_get_ident(conn);
4764 
4765 			l2cap_send_cmd(conn, conn->info_ident,
4766 				       L2CAP_INFO_REQ, sizeof(req), &req);
4767 		} else {
4768 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4769 			conn->info_ident = 0;
4770 
4771 			l2cap_conn_start(conn);
4772 		}
4773 		break;
4774 
4775 	case L2CAP_IT_FIXED_CHAN:
4776 		conn->remote_fixed_chan = rsp->data[0];
4777 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4778 		conn->info_ident = 0;
4779 
4780 		l2cap_conn_start(conn);
4781 		break;
4782 	}
4783 
4784 	return 0;
4785 }
4786 
4787 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4788 				    struct l2cap_cmd_hdr *cmd,
4789 				    u16 cmd_len, void *data)
4790 {
4791 	struct l2cap_create_chan_req *req = data;
4792 	struct l2cap_create_chan_rsp rsp;
4793 	struct l2cap_chan *chan;
4794 	struct hci_dev *hdev;
4795 	u16 psm, scid;
4796 
4797 	if (cmd_len != sizeof(*req))
4798 		return -EPROTO;
4799 
4800 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4801 		return -EINVAL;
4802 
4803 	psm = le16_to_cpu(req->psm);
4804 	scid = le16_to_cpu(req->scid);
4805 
4806 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4807 
4808 	/* For controller id 0 make BR/EDR connection */
4809 	if (req->amp_id == AMP_ID_BREDR) {
4810 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4811 			      req->amp_id);
4812 		return 0;
4813 	}
4814 
4815 	/* Validate AMP controller id */
4816 	hdev = hci_dev_get(req->amp_id);
4817 	if (!hdev)
4818 		goto error;
4819 
4820 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4821 		hci_dev_put(hdev);
4822 		goto error;
4823 	}
4824 
4825 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4826 			     req->amp_id);
4827 	if (chan) {
4828 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4829 		struct hci_conn *hs_hcon;
4830 
4831 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4832 						  &conn->hcon->dst);
4833 		if (!hs_hcon) {
4834 			hci_dev_put(hdev);
4835 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4836 					       chan->dcid);
4837 			return 0;
4838 		}
4839 
4840 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4841 
4842 		mgr->bredr_chan = chan;
4843 		chan->hs_hcon = hs_hcon;
4844 		chan->fcs = L2CAP_FCS_NONE;
4845 		conn->mtu = hdev->block_mtu;
4846 	}
4847 
4848 	hci_dev_put(hdev);
4849 
4850 	return 0;
4851 
4852 error:
4853 	rsp.dcid = 0;
4854 	rsp.scid = cpu_to_le16(scid);
4855 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4856 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4857 
4858 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4859 		       sizeof(rsp), &rsp);
4860 
4861 	return 0;
4862 }
4863 
4864 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4865 {
4866 	struct l2cap_move_chan_req req;
4867 	u8 ident;
4868 
4869 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4870 
4871 	ident = l2cap_get_ident(chan->conn);
4872 	chan->ident = ident;
4873 
4874 	req.icid = cpu_to_le16(chan->scid);
4875 	req.dest_amp_id = dest_amp_id;
4876 
4877 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4878 		       &req);
4879 
4880 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4881 }
4882 
4883 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4884 {
4885 	struct l2cap_move_chan_rsp rsp;
4886 
4887 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4888 
4889 	rsp.icid = cpu_to_le16(chan->dcid);
4890 	rsp.result = cpu_to_le16(result);
4891 
4892 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4893 		       sizeof(rsp), &rsp);
4894 }
4895 
4896 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4897 {
4898 	struct l2cap_move_chan_cfm cfm;
4899 
4900 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4901 
4902 	chan->ident = l2cap_get_ident(chan->conn);
4903 
4904 	cfm.icid = cpu_to_le16(chan->scid);
4905 	cfm.result = cpu_to_le16(result);
4906 
4907 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4908 		       sizeof(cfm), &cfm);
4909 
4910 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4911 }
4912 
4913 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4914 {
4915 	struct l2cap_move_chan_cfm cfm;
4916 
4917 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4918 
4919 	cfm.icid = cpu_to_le16(icid);
4920 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4921 
4922 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4923 		       sizeof(cfm), &cfm);
4924 }
4925 
4926 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4927 					 u16 icid)
4928 {
4929 	struct l2cap_move_chan_cfm_rsp rsp;
4930 
4931 	BT_DBG("icid 0x%4.4x", icid);
4932 
4933 	rsp.icid = cpu_to_le16(icid);
4934 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4935 }
4936 
4937 static void __release_logical_link(struct l2cap_chan *chan)
4938 {
4939 	chan->hs_hchan = NULL;
4940 	chan->hs_hcon = NULL;
4941 
4942 	/* Placeholder - release the logical link */
4943 }
4944 
4945 static void l2cap_logical_fail(struct l2cap_chan *chan)
4946 {
4947 	/* Logical link setup failed */
4948 	if (chan->state != BT_CONNECTED) {
4949 		/* Create channel failure, disconnect */
4950 		l2cap_send_disconn_req(chan, ECONNRESET);
4951 		return;
4952 	}
4953 
4954 	switch (chan->move_role) {
4955 	case L2CAP_MOVE_ROLE_RESPONDER:
4956 		l2cap_move_done(chan);
4957 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4958 		break;
4959 	case L2CAP_MOVE_ROLE_INITIATOR:
4960 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4961 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4962 			/* Remote has only sent pending or
4963 			 * success responses, clean up
4964 			 */
4965 			l2cap_move_done(chan);
4966 		}
4967 
4968 		/* Other amp move states imply that the move
4969 		 * has already aborted
4970 		 */
4971 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4972 		break;
4973 	}
4974 }
4975 
4976 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4977 					struct hci_chan *hchan)
4978 {
4979 	struct l2cap_conf_rsp rsp;
4980 
4981 	chan->hs_hchan = hchan;
4982 	chan->hs_hcon->l2cap_data = chan->conn;
4983 
4984 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4985 
4986 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4987 		int err;
4988 
4989 		set_default_fcs(chan);
4990 
4991 		err = l2cap_ertm_init(chan);
4992 		if (err < 0)
4993 			l2cap_send_disconn_req(chan, -err);
4994 		else
4995 			l2cap_chan_ready(chan);
4996 	}
4997 }
4998 
4999 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5000 				      struct hci_chan *hchan)
5001 {
5002 	chan->hs_hcon = hchan->conn;
5003 	chan->hs_hcon->l2cap_data = chan->conn;
5004 
5005 	BT_DBG("move_state %d", chan->move_state);
5006 
5007 	switch (chan->move_state) {
5008 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5009 		/* Move confirm will be sent after a success
5010 		 * response is received
5011 		 */
5012 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5013 		break;
5014 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5015 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5016 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5017 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5018 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5019 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5020 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5021 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5022 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5023 		}
5024 		break;
5025 	default:
5026 		/* Move was not in expected state, free the channel */
5027 		__release_logical_link(chan);
5028 
5029 		chan->move_state = L2CAP_MOVE_STABLE;
5030 	}
5031 }
5032 
5033 /* Call with chan locked */
5034 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5035 		       u8 status)
5036 {
5037 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5038 
5039 	if (status) {
5040 		l2cap_logical_fail(chan);
5041 		__release_logical_link(chan);
5042 		return;
5043 	}
5044 
5045 	if (chan->state != BT_CONNECTED) {
5046 		/* Ignore logical link if channel is on BR/EDR */
5047 		if (chan->local_amp_id != AMP_ID_BREDR)
5048 			l2cap_logical_finish_create(chan, hchan);
5049 	} else {
5050 		l2cap_logical_finish_move(chan, hchan);
5051 	}
5052 }
5053 
5054 void l2cap_move_start(struct l2cap_chan *chan)
5055 {
5056 	BT_DBG("chan %p", chan);
5057 
5058 	if (chan->local_amp_id == AMP_ID_BREDR) {
5059 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5060 			return;
5061 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5062 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5063 		/* Placeholder - start physical link setup */
5064 	} else {
5065 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5066 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5067 		chan->move_id = 0;
5068 		l2cap_move_setup(chan);
5069 		l2cap_send_move_chan_req(chan, 0);
5070 	}
5071 }
5072 
5073 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5074 			    u8 local_amp_id, u8 remote_amp_id)
5075 {
5076 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5077 	       local_amp_id, remote_amp_id);
5078 
5079 	chan->fcs = L2CAP_FCS_NONE;
5080 
5081 	/* Outgoing channel on AMP */
5082 	if (chan->state == BT_CONNECT) {
5083 		if (result == L2CAP_CR_SUCCESS) {
5084 			chan->local_amp_id = local_amp_id;
5085 			l2cap_send_create_chan_req(chan, remote_amp_id);
5086 		} else {
5087 			/* Revert to BR/EDR connect */
5088 			l2cap_send_conn_req(chan);
5089 		}
5090 
5091 		return;
5092 	}
5093 
5094 	/* Incoming channel on AMP */
5095 	if (__l2cap_no_conn_pending(chan)) {
5096 		struct l2cap_conn_rsp rsp;
5097 		char buf[128];
5098 		rsp.scid = cpu_to_le16(chan->dcid);
5099 		rsp.dcid = cpu_to_le16(chan->scid);
5100 
5101 		if (result == L2CAP_CR_SUCCESS) {
5102 			/* Send successful response */
5103 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5104 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5105 		} else {
5106 			/* Send negative response */
5107 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5108 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5109 		}
5110 
5111 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5112 			       sizeof(rsp), &rsp);
5113 
5114 		if (result == L2CAP_CR_SUCCESS) {
5115 			l2cap_state_change(chan, BT_CONFIG);
5116 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5117 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5118 				       L2CAP_CONF_REQ,
5119 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5120 			chan->num_conf_req++;
5121 		}
5122 	}
5123 }
5124 
5125 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5126 				   u8 remote_amp_id)
5127 {
5128 	l2cap_move_setup(chan);
5129 	chan->move_id = local_amp_id;
5130 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5131 
5132 	l2cap_send_move_chan_req(chan, remote_amp_id);
5133 }
5134 
5135 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5136 {
5137 	struct hci_chan *hchan = NULL;
5138 
5139 	/* Placeholder - get hci_chan for logical link */
5140 
5141 	if (hchan) {
5142 		if (hchan->state == BT_CONNECTED) {
5143 			/* Logical link is ready to go */
5144 			chan->hs_hcon = hchan->conn;
5145 			chan->hs_hcon->l2cap_data = chan->conn;
5146 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5147 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5148 
5149 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5150 		} else {
5151 			/* Wait for logical link to be ready */
5152 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5153 		}
5154 	} else {
5155 		/* Logical link not available */
5156 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5157 	}
5158 }
5159 
5160 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5161 {
5162 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5163 		u8 rsp_result;
5164 		if (result == -EINVAL)
5165 			rsp_result = L2CAP_MR_BAD_ID;
5166 		else
5167 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5168 
5169 		l2cap_send_move_chan_rsp(chan, rsp_result);
5170 	}
5171 
5172 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5173 	chan->move_state = L2CAP_MOVE_STABLE;
5174 
5175 	/* Restart data transmission */
5176 	l2cap_ertm_send(chan);
5177 }
5178 
5179 /* Invoke with locked chan */
5180 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5181 {
5182 	u8 local_amp_id = chan->local_amp_id;
5183 	u8 remote_amp_id = chan->remote_amp_id;
5184 
5185 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5186 	       chan, result, local_amp_id, remote_amp_id);
5187 
5188 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5189 		return;
5190 
5191 	if (chan->state != BT_CONNECTED) {
5192 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5193 	} else if (result != L2CAP_MR_SUCCESS) {
5194 		l2cap_do_move_cancel(chan, result);
5195 	} else {
5196 		switch (chan->move_role) {
5197 		case L2CAP_MOVE_ROLE_INITIATOR:
5198 			l2cap_do_move_initiate(chan, local_amp_id,
5199 					       remote_amp_id);
5200 			break;
5201 		case L2CAP_MOVE_ROLE_RESPONDER:
5202 			l2cap_do_move_respond(chan, result);
5203 			break;
5204 		default:
5205 			l2cap_do_move_cancel(chan, result);
5206 			break;
5207 		}
5208 	}
5209 }
5210 
5211 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5212 					 struct l2cap_cmd_hdr *cmd,
5213 					 u16 cmd_len, void *data)
5214 {
5215 	struct l2cap_move_chan_req *req = data;
5216 	struct l2cap_move_chan_rsp rsp;
5217 	struct l2cap_chan *chan;
5218 	u16 icid = 0;
5219 	u16 result = L2CAP_MR_NOT_ALLOWED;
5220 
5221 	if (cmd_len != sizeof(*req))
5222 		return -EPROTO;
5223 
5224 	icid = le16_to_cpu(req->icid);
5225 
5226 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5227 
5228 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5229 		return -EINVAL;
5230 
5231 	chan = l2cap_get_chan_by_dcid(conn, icid);
5232 	if (!chan) {
5233 		rsp.icid = cpu_to_le16(icid);
5234 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5235 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5236 			       sizeof(rsp), &rsp);
5237 		return 0;
5238 	}
5239 
5240 	chan->ident = cmd->ident;
5241 
5242 	if (chan->scid < L2CAP_CID_DYN_START ||
5243 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5244 	    (chan->mode != L2CAP_MODE_ERTM &&
5245 	     chan->mode != L2CAP_MODE_STREAMING)) {
5246 		result = L2CAP_MR_NOT_ALLOWED;
5247 		goto send_move_response;
5248 	}
5249 
5250 	if (chan->local_amp_id == req->dest_amp_id) {
5251 		result = L2CAP_MR_SAME_ID;
5252 		goto send_move_response;
5253 	}
5254 
5255 	if (req->dest_amp_id != AMP_ID_BREDR) {
5256 		struct hci_dev *hdev;
5257 		hdev = hci_dev_get(req->dest_amp_id);
5258 		if (!hdev || hdev->dev_type != HCI_AMP ||
5259 		    !test_bit(HCI_UP, &hdev->flags)) {
5260 			if (hdev)
5261 				hci_dev_put(hdev);
5262 
5263 			result = L2CAP_MR_BAD_ID;
5264 			goto send_move_response;
5265 		}
5266 		hci_dev_put(hdev);
5267 	}
5268 
5269 	/* Detect a move collision.  Only send a collision response
5270 	 * if this side has "lost", otherwise proceed with the move.
5271 	 * The winner has the larger bd_addr.
5272 	 */
5273 	if ((__chan_is_moving(chan) ||
5274 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5275 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5276 		result = L2CAP_MR_COLLISION;
5277 		goto send_move_response;
5278 	}
5279 
5280 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5281 	l2cap_move_setup(chan);
5282 	chan->move_id = req->dest_amp_id;
5283 
5284 	if (req->dest_amp_id == AMP_ID_BREDR) {
5285 		/* Moving to BR/EDR */
5286 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5287 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5288 			result = L2CAP_MR_PEND;
5289 		} else {
5290 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5291 			result = L2CAP_MR_SUCCESS;
5292 		}
5293 	} else {
5294 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5295 		/* Placeholder - uncomment when amp functions are available */
5296 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5297 		result = L2CAP_MR_PEND;
5298 	}
5299 
5300 send_move_response:
5301 	l2cap_send_move_chan_rsp(chan, result);
5302 
5303 	l2cap_chan_unlock(chan);
5304 
5305 	return 0;
5306 }
5307 
5308 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5309 {
5310 	struct l2cap_chan *chan;
5311 	struct hci_chan *hchan = NULL;
5312 
5313 	chan = l2cap_get_chan_by_scid(conn, icid);
5314 	if (!chan) {
5315 		l2cap_send_move_chan_cfm_icid(conn, icid);
5316 		return;
5317 	}
5318 
5319 	__clear_chan_timer(chan);
5320 	if (result == L2CAP_MR_PEND)
5321 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5322 
5323 	switch (chan->move_state) {
5324 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5325 		/* Move confirm will be sent when logical link
5326 		 * is complete.
5327 		 */
5328 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5329 		break;
5330 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5331 		if (result == L2CAP_MR_PEND) {
5332 			break;
5333 		} else if (test_bit(CONN_LOCAL_BUSY,
5334 				    &chan->conn_state)) {
5335 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5336 		} else {
5337 			/* Logical link is up or moving to BR/EDR,
5338 			 * proceed with move
5339 			 */
5340 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5341 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5342 		}
5343 		break;
5344 	case L2CAP_MOVE_WAIT_RSP:
5345 		/* Moving to AMP */
5346 		if (result == L2CAP_MR_SUCCESS) {
5347 			/* Remote is ready, send confirm immediately
5348 			 * after logical link is ready
5349 			 */
5350 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5351 		} else {
5352 			/* Both logical link and move success
5353 			 * are required to confirm
5354 			 */
5355 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5356 		}
5357 
5358 		/* Placeholder - get hci_chan for logical link */
5359 		if (!hchan) {
5360 			/* Logical link not available */
5361 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5362 			break;
5363 		}
5364 
5365 		/* If the logical link is not yet connected, do not
5366 		 * send confirmation.
5367 		 */
5368 		if (hchan->state != BT_CONNECTED)
5369 			break;
5370 
5371 		/* Logical link is already ready to go */
5372 
5373 		chan->hs_hcon = hchan->conn;
5374 		chan->hs_hcon->l2cap_data = chan->conn;
5375 
5376 		if (result == L2CAP_MR_SUCCESS) {
5377 			/* Can confirm now */
5378 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5379 		} else {
5380 			/* Now only need move success
5381 			 * to confirm
5382 			 */
5383 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5384 		}
5385 
5386 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5387 		break;
5388 	default:
5389 		/* Any other amp move state means the move failed. */
5390 		chan->move_id = chan->local_amp_id;
5391 		l2cap_move_done(chan);
5392 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5393 	}
5394 
5395 	l2cap_chan_unlock(chan);
5396 }
5397 
5398 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5399 			    u16 result)
5400 {
5401 	struct l2cap_chan *chan;
5402 
5403 	chan = l2cap_get_chan_by_ident(conn, ident);
5404 	if (!chan) {
5405 		/* Could not locate channel, icid is best guess */
5406 		l2cap_send_move_chan_cfm_icid(conn, icid);
5407 		return;
5408 	}
5409 
5410 	__clear_chan_timer(chan);
5411 
5412 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5413 		if (result == L2CAP_MR_COLLISION) {
5414 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5415 		} else {
5416 			/* Cleanup - cancel move */
5417 			chan->move_id = chan->local_amp_id;
5418 			l2cap_move_done(chan);
5419 		}
5420 	}
5421 
5422 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5423 
5424 	l2cap_chan_unlock(chan);
5425 }
5426 
5427 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5428 				  struct l2cap_cmd_hdr *cmd,
5429 				  u16 cmd_len, void *data)
5430 {
5431 	struct l2cap_move_chan_rsp *rsp = data;
5432 	u16 icid, result;
5433 
5434 	if (cmd_len != sizeof(*rsp))
5435 		return -EPROTO;
5436 
5437 	icid = le16_to_cpu(rsp->icid);
5438 	result = le16_to_cpu(rsp->result);
5439 
5440 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5441 
5442 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5443 		l2cap_move_continue(conn, icid, result);
5444 	else
5445 		l2cap_move_fail(conn, cmd->ident, icid, result);
5446 
5447 	return 0;
5448 }
5449 
5450 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5451 				      struct l2cap_cmd_hdr *cmd,
5452 				      u16 cmd_len, void *data)
5453 {
5454 	struct l2cap_move_chan_cfm *cfm = data;
5455 	struct l2cap_chan *chan;
5456 	u16 icid, result;
5457 
5458 	if (cmd_len != sizeof(*cfm))
5459 		return -EPROTO;
5460 
5461 	icid = le16_to_cpu(cfm->icid);
5462 	result = le16_to_cpu(cfm->result);
5463 
5464 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5465 
5466 	chan = l2cap_get_chan_by_dcid(conn, icid);
5467 	if (!chan) {
5468 		/* Spec requires a response even if the icid was not found */
5469 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5470 		return 0;
5471 	}
5472 
5473 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5474 		if (result == L2CAP_MC_CONFIRMED) {
5475 			chan->local_amp_id = chan->move_id;
5476 			if (chan->local_amp_id == AMP_ID_BREDR)
5477 				__release_logical_link(chan);
5478 		} else {
5479 			chan->move_id = chan->local_amp_id;
5480 		}
5481 
5482 		l2cap_move_done(chan);
5483 	}
5484 
5485 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5486 
5487 	l2cap_chan_unlock(chan);
5488 
5489 	return 0;
5490 }
5491 
5492 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5493 						 struct l2cap_cmd_hdr *cmd,
5494 						 u16 cmd_len, void *data)
5495 {
5496 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5497 	struct l2cap_chan *chan;
5498 	u16 icid;
5499 
5500 	if (cmd_len != sizeof(*rsp))
5501 		return -EPROTO;
5502 
5503 	icid = le16_to_cpu(rsp->icid);
5504 
5505 	BT_DBG("icid 0x%4.4x", icid);
5506 
5507 	chan = l2cap_get_chan_by_scid(conn, icid);
5508 	if (!chan)
5509 		return 0;
5510 
5511 	__clear_chan_timer(chan);
5512 
5513 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5514 		chan->local_amp_id = chan->move_id;
5515 
5516 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5517 			__release_logical_link(chan);
5518 
5519 		l2cap_move_done(chan);
5520 	}
5521 
5522 	l2cap_chan_unlock(chan);
5523 
5524 	return 0;
5525 }
5526 
5527 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5528 					      struct l2cap_cmd_hdr *cmd,
5529 					      u16 cmd_len, u8 *data)
5530 {
5531 	struct hci_conn *hcon = conn->hcon;
5532 	struct l2cap_conn_param_update_req *req;
5533 	struct l2cap_conn_param_update_rsp rsp;
5534 	u16 min, max, latency, to_multiplier;
5535 	int err;
5536 
5537 	if (hcon->role != HCI_ROLE_MASTER)
5538 		return -EINVAL;
5539 
5540 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5541 		return -EPROTO;
5542 
5543 	req = (struct l2cap_conn_param_update_req *) data;
5544 	min		= __le16_to_cpu(req->min);
5545 	max		= __le16_to_cpu(req->max);
5546 	latency		= __le16_to_cpu(req->latency);
5547 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5548 
5549 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5550 	       min, max, latency, to_multiplier);
5551 
5552 	memset(&rsp, 0, sizeof(rsp));
5553 
5554 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5555 	if (err)
5556 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5557 	else
5558 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5559 
5560 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5561 		       sizeof(rsp), &rsp);
5562 
5563 	if (!err) {
5564 		u8 store_hint;
5565 
5566 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5567 						to_multiplier);
5568 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5569 				    store_hint, min, max, latency,
5570 				    to_multiplier);
5571 
5572 	}
5573 
5574 	return 0;
5575 }
5576 
5577 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5578 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5579 				u8 *data)
5580 {
5581 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5582 	struct hci_conn *hcon = conn->hcon;
5583 	u16 dcid, mtu, mps, credits, result;
5584 	struct l2cap_chan *chan;
5585 	int err, sec_level;
5586 
5587 	if (cmd_len < sizeof(*rsp))
5588 		return -EPROTO;
5589 
5590 	dcid    = __le16_to_cpu(rsp->dcid);
5591 	mtu     = __le16_to_cpu(rsp->mtu);
5592 	mps     = __le16_to_cpu(rsp->mps);
5593 	credits = __le16_to_cpu(rsp->credits);
5594 	result  = __le16_to_cpu(rsp->result);
5595 
5596 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5597 					   dcid < L2CAP_CID_DYN_START ||
5598 					   dcid > L2CAP_CID_LE_DYN_END))
5599 		return -EPROTO;
5600 
5601 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5602 	       dcid, mtu, mps, credits, result);
5603 
5604 	mutex_lock(&conn->chan_lock);
5605 
5606 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5607 	if (!chan) {
5608 		err = -EBADSLT;
5609 		goto unlock;
5610 	}
5611 
5612 	err = 0;
5613 
5614 	l2cap_chan_lock(chan);
5615 
5616 	switch (result) {
5617 	case L2CAP_CR_LE_SUCCESS:
5618 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5619 			err = -EBADSLT;
5620 			break;
5621 		}
5622 
5623 		chan->ident = 0;
5624 		chan->dcid = dcid;
5625 		chan->omtu = mtu;
5626 		chan->remote_mps = mps;
5627 		chan->tx_credits = credits;
5628 		l2cap_chan_ready(chan);
5629 		break;
5630 
5631 	case L2CAP_CR_LE_AUTHENTICATION:
5632 	case L2CAP_CR_LE_ENCRYPTION:
5633 		/* If we already have MITM protection we can't do
5634 		 * anything.
5635 		 */
5636 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5637 			l2cap_chan_del(chan, ECONNREFUSED);
5638 			break;
5639 		}
5640 
5641 		sec_level = hcon->sec_level + 1;
5642 		if (chan->sec_level < sec_level)
5643 			chan->sec_level = sec_level;
5644 
5645 		/* We'll need to send a new Connect Request */
5646 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5647 
5648 		smp_conn_security(hcon, chan->sec_level);
5649 		break;
5650 
5651 	default:
5652 		l2cap_chan_del(chan, ECONNREFUSED);
5653 		break;
5654 	}
5655 
5656 	l2cap_chan_unlock(chan);
5657 
5658 unlock:
5659 	mutex_unlock(&conn->chan_lock);
5660 
5661 	return err;
5662 }
5663 
5664 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5665 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5666 				      u8 *data)
5667 {
5668 	int err = 0;
5669 
5670 	switch (cmd->code) {
5671 	case L2CAP_COMMAND_REJ:
5672 		l2cap_command_rej(conn, cmd, cmd_len, data);
5673 		break;
5674 
5675 	case L2CAP_CONN_REQ:
5676 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5677 		break;
5678 
5679 	case L2CAP_CONN_RSP:
5680 	case L2CAP_CREATE_CHAN_RSP:
5681 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5682 		break;
5683 
5684 	case L2CAP_CONF_REQ:
5685 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5686 		break;
5687 
5688 	case L2CAP_CONF_RSP:
5689 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5690 		break;
5691 
5692 	case L2CAP_DISCONN_REQ:
5693 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5694 		break;
5695 
5696 	case L2CAP_DISCONN_RSP:
5697 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5698 		break;
5699 
5700 	case L2CAP_ECHO_REQ:
5701 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5702 		break;
5703 
5704 	case L2CAP_ECHO_RSP:
5705 		break;
5706 
5707 	case L2CAP_INFO_REQ:
5708 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5709 		break;
5710 
5711 	case L2CAP_INFO_RSP:
5712 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5713 		break;
5714 
5715 	case L2CAP_CREATE_CHAN_REQ:
5716 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5717 		break;
5718 
5719 	case L2CAP_MOVE_CHAN_REQ:
5720 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5721 		break;
5722 
5723 	case L2CAP_MOVE_CHAN_RSP:
5724 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5725 		break;
5726 
5727 	case L2CAP_MOVE_CHAN_CFM:
5728 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5729 		break;
5730 
5731 	case L2CAP_MOVE_CHAN_CFM_RSP:
5732 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5733 		break;
5734 
5735 	default:
5736 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5737 		err = -EINVAL;
5738 		break;
5739 	}
5740 
5741 	return err;
5742 }
5743 
5744 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5745 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5746 				u8 *data)
5747 {
5748 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5749 	struct l2cap_le_conn_rsp rsp;
5750 	struct l2cap_chan *chan, *pchan;
5751 	u16 dcid, scid, credits, mtu, mps;
5752 	__le16 psm;
5753 	u8 result;
5754 
5755 	if (cmd_len != sizeof(*req))
5756 		return -EPROTO;
5757 
5758 	scid = __le16_to_cpu(req->scid);
5759 	mtu  = __le16_to_cpu(req->mtu);
5760 	mps  = __le16_to_cpu(req->mps);
5761 	psm  = req->psm;
5762 	dcid = 0;
5763 	credits = 0;
5764 
5765 	if (mtu < 23 || mps < 23)
5766 		return -EPROTO;
5767 
5768 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5769 	       scid, mtu, mps);
5770 
5771 	/* Check if we have socket listening on psm */
5772 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5773 					 &conn->hcon->dst, LE_LINK);
5774 	if (!pchan) {
5775 		result = L2CAP_CR_LE_BAD_PSM;
5776 		chan = NULL;
5777 		goto response;
5778 	}
5779 
5780 	mutex_lock(&conn->chan_lock);
5781 	l2cap_chan_lock(pchan);
5782 
5783 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5784 				     SMP_ALLOW_STK)) {
5785 		result = L2CAP_CR_LE_AUTHENTICATION;
5786 		chan = NULL;
5787 		goto response_unlock;
5788 	}
5789 
5790 	/* Check for valid dynamic CID range */
5791 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5792 		result = L2CAP_CR_LE_INVALID_SCID;
5793 		chan = NULL;
5794 		goto response_unlock;
5795 	}
5796 
5797 	/* Check if we already have channel with that dcid */
5798 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5799 		result = L2CAP_CR_LE_SCID_IN_USE;
5800 		chan = NULL;
5801 		goto response_unlock;
5802 	}
5803 
5804 	chan = pchan->ops->new_connection(pchan);
5805 	if (!chan) {
5806 		result = L2CAP_CR_LE_NO_MEM;
5807 		goto response_unlock;
5808 	}
5809 
5810 	bacpy(&chan->src, &conn->hcon->src);
5811 	bacpy(&chan->dst, &conn->hcon->dst);
5812 	chan->src_type = bdaddr_src_type(conn->hcon);
5813 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5814 	chan->psm  = psm;
5815 	chan->dcid = scid;
5816 	chan->omtu = mtu;
5817 	chan->remote_mps = mps;
5818 
5819 	__l2cap_chan_add(conn, chan);
5820 
5821 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5822 
5823 	dcid = chan->scid;
5824 	credits = chan->rx_credits;
5825 
5826 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5827 
5828 	chan->ident = cmd->ident;
5829 
5830 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5831 		l2cap_state_change(chan, BT_CONNECT2);
5832 		/* The following result value is actually not defined
5833 		 * for LE CoC but we use it to let the function know
5834 		 * that it should bail out after doing its cleanup
5835 		 * instead of sending a response.
5836 		 */
5837 		result = L2CAP_CR_PEND;
5838 		chan->ops->defer(chan);
5839 	} else {
5840 		l2cap_chan_ready(chan);
5841 		result = L2CAP_CR_LE_SUCCESS;
5842 	}
5843 
5844 response_unlock:
5845 	l2cap_chan_unlock(pchan);
5846 	mutex_unlock(&conn->chan_lock);
5847 	l2cap_chan_put(pchan);
5848 
5849 	if (result == L2CAP_CR_PEND)
5850 		return 0;
5851 
5852 response:
5853 	if (chan) {
5854 		rsp.mtu = cpu_to_le16(chan->imtu);
5855 		rsp.mps = cpu_to_le16(chan->mps);
5856 	} else {
5857 		rsp.mtu = 0;
5858 		rsp.mps = 0;
5859 	}
5860 
5861 	rsp.dcid    = cpu_to_le16(dcid);
5862 	rsp.credits = cpu_to_le16(credits);
5863 	rsp.result  = cpu_to_le16(result);
5864 
5865 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5866 
5867 	return 0;
5868 }
5869 
5870 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5871 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5872 				   u8 *data)
5873 {
5874 	struct l2cap_le_credits *pkt;
5875 	struct l2cap_chan *chan;
5876 	u16 cid, credits, max_credits;
5877 
5878 	if (cmd_len != sizeof(*pkt))
5879 		return -EPROTO;
5880 
5881 	pkt = (struct l2cap_le_credits *) data;
5882 	cid	= __le16_to_cpu(pkt->cid);
5883 	credits	= __le16_to_cpu(pkt->credits);
5884 
5885 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5886 
5887 	chan = l2cap_get_chan_by_dcid(conn, cid);
5888 	if (!chan)
5889 		return -EBADSLT;
5890 
5891 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5892 	if (credits > max_credits) {
5893 		BT_ERR("LE credits overflow");
5894 		l2cap_send_disconn_req(chan, ECONNRESET);
5895 		l2cap_chan_unlock(chan);
5896 
5897 		/* Return 0 so that we don't trigger an unnecessary
5898 		 * command reject packet.
5899 		 */
5900 		return 0;
5901 	}
5902 
5903 	chan->tx_credits += credits;
5904 
5905 	/* Resume sending */
5906 	l2cap_le_flowctl_send(chan);
5907 
5908 	if (chan->tx_credits)
5909 		chan->ops->resume(chan);
5910 
5911 	l2cap_chan_unlock(chan);
5912 
5913 	return 0;
5914 }
5915 
5916 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5917 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5918 				       u8 *data)
5919 {
5920 	struct l2cap_ecred_conn_req *req = (void *) data;
5921 	struct {
5922 		struct l2cap_ecred_conn_rsp rsp;
5923 		__le16 dcid[5];
5924 	} __packed pdu;
5925 	struct l2cap_chan *chan, *pchan;
5926 	u16 mtu, mps;
5927 	__le16 psm;
5928 	u8 result, len = 0;
5929 	int i, num_scid;
5930 	bool defer = false;
5931 
5932 	if (!enable_ecred)
5933 		return -EINVAL;
5934 
5935 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5936 		result = L2CAP_CR_LE_INVALID_PARAMS;
5937 		goto response;
5938 	}
5939 
5940 	mtu  = __le16_to_cpu(req->mtu);
5941 	mps  = __le16_to_cpu(req->mps);
5942 
5943 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5944 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5945 		goto response;
5946 	}
5947 
5948 	psm  = req->psm;
5949 
5950 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5951 
5952 	memset(&pdu, 0, sizeof(pdu));
5953 
5954 	/* Check if we have socket listening on psm */
5955 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5956 					 &conn->hcon->dst, LE_LINK);
5957 	if (!pchan) {
5958 		result = L2CAP_CR_LE_BAD_PSM;
5959 		goto response;
5960 	}
5961 
5962 	mutex_lock(&conn->chan_lock);
5963 	l2cap_chan_lock(pchan);
5964 
5965 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5966 				     SMP_ALLOW_STK)) {
5967 		result = L2CAP_CR_LE_AUTHENTICATION;
5968 		goto unlock;
5969 	}
5970 
5971 	result = L2CAP_CR_LE_SUCCESS;
5972 	cmd_len -= sizeof(*req);
5973 	num_scid = cmd_len / sizeof(u16);
5974 
5975 	for (i = 0; i < num_scid; i++) {
5976 		u16 scid = __le16_to_cpu(req->scid[i]);
5977 
5978 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
5979 
5980 		pdu.dcid[i] = 0x0000;
5981 		len += sizeof(*pdu.dcid);
5982 
5983 		/* Check for valid dynamic CID range */
5984 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5985 			result = L2CAP_CR_LE_INVALID_SCID;
5986 			continue;
5987 		}
5988 
5989 		/* Check if we already have channel with that dcid */
5990 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
5991 			result = L2CAP_CR_LE_SCID_IN_USE;
5992 			continue;
5993 		}
5994 
5995 		chan = pchan->ops->new_connection(pchan);
5996 		if (!chan) {
5997 			result = L2CAP_CR_LE_NO_MEM;
5998 			continue;
5999 		}
6000 
6001 		bacpy(&chan->src, &conn->hcon->src);
6002 		bacpy(&chan->dst, &conn->hcon->dst);
6003 		chan->src_type = bdaddr_src_type(conn->hcon);
6004 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6005 		chan->psm  = psm;
6006 		chan->dcid = scid;
6007 		chan->omtu = mtu;
6008 		chan->remote_mps = mps;
6009 
6010 		__l2cap_chan_add(conn, chan);
6011 
6012 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6013 
6014 		/* Init response */
6015 		if (!pdu.rsp.credits) {
6016 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6017 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6018 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6019 		}
6020 
6021 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6022 
6023 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6024 
6025 		chan->ident = cmd->ident;
6026 
6027 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6028 			l2cap_state_change(chan, BT_CONNECT2);
6029 			defer = true;
6030 			chan->ops->defer(chan);
6031 		} else {
6032 			l2cap_chan_ready(chan);
6033 		}
6034 	}
6035 
6036 unlock:
6037 	l2cap_chan_unlock(pchan);
6038 	mutex_unlock(&conn->chan_lock);
6039 	l2cap_chan_put(pchan);
6040 
6041 response:
6042 	pdu.rsp.result = cpu_to_le16(result);
6043 
6044 	if (defer)
6045 		return 0;
6046 
6047 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6048 		       sizeof(pdu.rsp) + len, &pdu);
6049 
6050 	return 0;
6051 }
6052 
6053 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6054 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6055 				       u8 *data)
6056 {
6057 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6058 	struct hci_conn *hcon = conn->hcon;
6059 	u16 mtu, mps, credits, result;
6060 	struct l2cap_chan *chan;
6061 	int err = 0, sec_level;
6062 	int i = 0;
6063 
6064 	if (cmd_len < sizeof(*rsp))
6065 		return -EPROTO;
6066 
6067 	mtu     = __le16_to_cpu(rsp->mtu);
6068 	mps     = __le16_to_cpu(rsp->mps);
6069 	credits = __le16_to_cpu(rsp->credits);
6070 	result  = __le16_to_cpu(rsp->result);
6071 
6072 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6073 	       result);
6074 
6075 	mutex_lock(&conn->chan_lock);
6076 
6077 	cmd_len -= sizeof(*rsp);
6078 
6079 	list_for_each_entry(chan, &conn->chan_l, list) {
6080 		u16 dcid;
6081 
6082 		if (chan->ident != cmd->ident ||
6083 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6084 		    chan->state == BT_CONNECTED)
6085 			continue;
6086 
6087 		l2cap_chan_lock(chan);
6088 
6089 		/* Check that there is a dcid for each pending channel */
6090 		if (cmd_len < sizeof(dcid)) {
6091 			l2cap_chan_del(chan, ECONNREFUSED);
6092 			l2cap_chan_unlock(chan);
6093 			continue;
6094 		}
6095 
6096 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6097 		cmd_len -= sizeof(u16);
6098 
6099 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6100 
6101 		/* Check if dcid is already in use */
6102 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6103 			/* If a device receives a
6104 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6105 			 * already-assigned Destination CID, then both the
6106 			 * original channel and the new channel shall be
6107 			 * immediately discarded and not used.
6108 			 */
6109 			l2cap_chan_del(chan, ECONNREFUSED);
6110 			l2cap_chan_unlock(chan);
6111 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6112 			l2cap_chan_lock(chan);
6113 			l2cap_chan_del(chan, ECONNRESET);
6114 			l2cap_chan_unlock(chan);
6115 			continue;
6116 		}
6117 
6118 		switch (result) {
6119 		case L2CAP_CR_LE_AUTHENTICATION:
6120 		case L2CAP_CR_LE_ENCRYPTION:
6121 			/* If we already have MITM protection we can't do
6122 			 * anything.
6123 			 */
6124 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6125 				l2cap_chan_del(chan, ECONNREFUSED);
6126 				break;
6127 			}
6128 
6129 			sec_level = hcon->sec_level + 1;
6130 			if (chan->sec_level < sec_level)
6131 				chan->sec_level = sec_level;
6132 
6133 			/* We'll need to send a new Connect Request */
6134 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6135 
6136 			smp_conn_security(hcon, chan->sec_level);
6137 			break;
6138 
6139 		case L2CAP_CR_LE_BAD_PSM:
6140 			l2cap_chan_del(chan, ECONNREFUSED);
6141 			break;
6142 
6143 		default:
6144 			/* If dcid was not set it means channels was refused */
6145 			if (!dcid) {
6146 				l2cap_chan_del(chan, ECONNREFUSED);
6147 				break;
6148 			}
6149 
6150 			chan->ident = 0;
6151 			chan->dcid = dcid;
6152 			chan->omtu = mtu;
6153 			chan->remote_mps = mps;
6154 			chan->tx_credits = credits;
6155 			l2cap_chan_ready(chan);
6156 			break;
6157 		}
6158 
6159 		l2cap_chan_unlock(chan);
6160 	}
6161 
6162 	mutex_unlock(&conn->chan_lock);
6163 
6164 	return err;
6165 }
6166 
6167 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6168 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6169 					 u8 *data)
6170 {
6171 	struct l2cap_ecred_reconf_req *req = (void *) data;
6172 	struct l2cap_ecred_reconf_rsp rsp;
6173 	u16 mtu, mps, result;
6174 	struct l2cap_chan *chan;
6175 	int i, num_scid;
6176 
6177 	if (!enable_ecred)
6178 		return -EINVAL;
6179 
6180 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6181 		result = L2CAP_CR_LE_INVALID_PARAMS;
6182 		goto respond;
6183 	}
6184 
6185 	mtu = __le16_to_cpu(req->mtu);
6186 	mps = __le16_to_cpu(req->mps);
6187 
6188 	BT_DBG("mtu %u mps %u", mtu, mps);
6189 
6190 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6191 		result = L2CAP_RECONF_INVALID_MTU;
6192 		goto respond;
6193 	}
6194 
6195 	if (mps < L2CAP_ECRED_MIN_MPS) {
6196 		result = L2CAP_RECONF_INVALID_MPS;
6197 		goto respond;
6198 	}
6199 
6200 	cmd_len -= sizeof(*req);
6201 	num_scid = cmd_len / sizeof(u16);
6202 	result = L2CAP_RECONF_SUCCESS;
6203 
6204 	for (i = 0; i < num_scid; i++) {
6205 		u16 scid;
6206 
6207 		scid = __le16_to_cpu(req->scid[i]);
6208 		if (!scid)
6209 			return -EPROTO;
6210 
6211 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6212 		if (!chan)
6213 			continue;
6214 
6215 		/* If the MTU value is decreased for any of the included
6216 		 * channels, then the receiver shall disconnect all
6217 		 * included channels.
6218 		 */
6219 		if (chan->omtu > mtu) {
6220 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6221 			       chan->omtu, mtu);
6222 			result = L2CAP_RECONF_INVALID_MTU;
6223 		}
6224 
6225 		chan->omtu = mtu;
6226 		chan->remote_mps = mps;
6227 	}
6228 
6229 respond:
6230 	rsp.result = cpu_to_le16(result);
6231 
6232 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6233 		       &rsp);
6234 
6235 	return 0;
6236 }
6237 
6238 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6239 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6240 					 u8 *data)
6241 {
6242 	struct l2cap_chan *chan;
6243 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6244 	u16 result;
6245 
6246 	if (cmd_len < sizeof(*rsp))
6247 		return -EPROTO;
6248 
6249 	result = __le16_to_cpu(rsp->result);
6250 
6251 	BT_DBG("result 0x%4.4x", rsp->result);
6252 
6253 	if (!result)
6254 		return 0;
6255 
6256 	list_for_each_entry(chan, &conn->chan_l, list) {
6257 		if (chan->ident != cmd->ident)
6258 			continue;
6259 
6260 		l2cap_chan_del(chan, ECONNRESET);
6261 	}
6262 
6263 	return 0;
6264 }
6265 
6266 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6267 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6268 				       u8 *data)
6269 {
6270 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6271 	struct l2cap_chan *chan;
6272 
6273 	if (cmd_len < sizeof(*rej))
6274 		return -EPROTO;
6275 
6276 	mutex_lock(&conn->chan_lock);
6277 
6278 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6279 	if (!chan)
6280 		goto done;
6281 
6282 	l2cap_chan_lock(chan);
6283 	l2cap_chan_del(chan, ECONNREFUSED);
6284 	l2cap_chan_unlock(chan);
6285 
6286 done:
6287 	mutex_unlock(&conn->chan_lock);
6288 	return 0;
6289 }
6290 
6291 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6292 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6293 				   u8 *data)
6294 {
6295 	int err = 0;
6296 
6297 	switch (cmd->code) {
6298 	case L2CAP_COMMAND_REJ:
6299 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6300 		break;
6301 
6302 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6303 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6304 		break;
6305 
6306 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6307 		break;
6308 
6309 	case L2CAP_LE_CONN_RSP:
6310 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6311 		break;
6312 
6313 	case L2CAP_LE_CONN_REQ:
6314 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6315 		break;
6316 
6317 	case L2CAP_LE_CREDITS:
6318 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6319 		break;
6320 
6321 	case L2CAP_ECRED_CONN_REQ:
6322 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6323 		break;
6324 
6325 	case L2CAP_ECRED_CONN_RSP:
6326 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6327 		break;
6328 
6329 	case L2CAP_ECRED_RECONF_REQ:
6330 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6331 		break;
6332 
6333 	case L2CAP_ECRED_RECONF_RSP:
6334 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6335 		break;
6336 
6337 	case L2CAP_DISCONN_REQ:
6338 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6339 		break;
6340 
6341 	case L2CAP_DISCONN_RSP:
6342 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6343 		break;
6344 
6345 	default:
6346 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6347 		err = -EINVAL;
6348 		break;
6349 	}
6350 
6351 	return err;
6352 }
6353 
6354 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6355 					struct sk_buff *skb)
6356 {
6357 	struct hci_conn *hcon = conn->hcon;
6358 	struct l2cap_cmd_hdr *cmd;
6359 	u16 len;
6360 	int err;
6361 
6362 	if (hcon->type != LE_LINK)
6363 		goto drop;
6364 
6365 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6366 		goto drop;
6367 
6368 	cmd = (void *) skb->data;
6369 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6370 
6371 	len = le16_to_cpu(cmd->len);
6372 
6373 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6374 
6375 	if (len != skb->len || !cmd->ident) {
6376 		BT_DBG("corrupted command");
6377 		goto drop;
6378 	}
6379 
6380 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6381 	if (err) {
6382 		struct l2cap_cmd_rej_unk rej;
6383 
6384 		BT_ERR("Wrong link type (%d)", err);
6385 
6386 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6387 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6388 			       sizeof(rej), &rej);
6389 	}
6390 
6391 drop:
6392 	kfree_skb(skb);
6393 }
6394 
6395 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6396 				     struct sk_buff *skb)
6397 {
6398 	struct hci_conn *hcon = conn->hcon;
6399 	struct l2cap_cmd_hdr *cmd;
6400 	int err;
6401 
6402 	l2cap_raw_recv(conn, skb);
6403 
6404 	if (hcon->type != ACL_LINK)
6405 		goto drop;
6406 
6407 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6408 		u16 len;
6409 
6410 		cmd = (void *) skb->data;
6411 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6412 
6413 		len = le16_to_cpu(cmd->len);
6414 
6415 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6416 		       cmd->ident);
6417 
6418 		if (len > skb->len || !cmd->ident) {
6419 			BT_DBG("corrupted command");
6420 			break;
6421 		}
6422 
6423 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6424 		if (err) {
6425 			struct l2cap_cmd_rej_unk rej;
6426 
6427 			BT_ERR("Wrong link type (%d)", err);
6428 
6429 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6430 			l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6431 				       sizeof(rej), &rej);
6432 		}
6433 
6434 		skb_pull(skb, len);
6435 	}
6436 
6437 drop:
6438 	kfree_skb(skb);
6439 }
6440 
6441 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6442 {
6443 	u16 our_fcs, rcv_fcs;
6444 	int hdr_size;
6445 
6446 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6447 		hdr_size = L2CAP_EXT_HDR_SIZE;
6448 	else
6449 		hdr_size = L2CAP_ENH_HDR_SIZE;
6450 
6451 	if (chan->fcs == L2CAP_FCS_CRC16) {
6452 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6453 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6454 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6455 
6456 		if (our_fcs != rcv_fcs)
6457 			return -EBADMSG;
6458 	}
6459 	return 0;
6460 }
6461 
6462 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6463 {
6464 	struct l2cap_ctrl control;
6465 
6466 	BT_DBG("chan %p", chan);
6467 
6468 	memset(&control, 0, sizeof(control));
6469 	control.sframe = 1;
6470 	control.final = 1;
6471 	control.reqseq = chan->buffer_seq;
6472 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6473 
6474 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6475 		control.super = L2CAP_SUPER_RNR;
6476 		l2cap_send_sframe(chan, &control);
6477 	}
6478 
6479 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6480 	    chan->unacked_frames > 0)
6481 		__set_retrans_timer(chan);
6482 
6483 	/* Send pending iframes */
6484 	l2cap_ertm_send(chan);
6485 
6486 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6487 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6488 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6489 		 * send it now.
6490 		 */
6491 		control.super = L2CAP_SUPER_RR;
6492 		l2cap_send_sframe(chan, &control);
6493 	}
6494 }
6495 
6496 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6497 			    struct sk_buff **last_frag)
6498 {
6499 	/* skb->len reflects data in skb as well as all fragments
6500 	 * skb->data_len reflects only data in fragments
6501 	 */
6502 	if (!skb_has_frag_list(skb))
6503 		skb_shinfo(skb)->frag_list = new_frag;
6504 
6505 	new_frag->next = NULL;
6506 
6507 	(*last_frag)->next = new_frag;
6508 	*last_frag = new_frag;
6509 
6510 	skb->len += new_frag->len;
6511 	skb->data_len += new_frag->len;
6512 	skb->truesize += new_frag->truesize;
6513 }
6514 
6515 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6516 				struct l2cap_ctrl *control)
6517 {
6518 	int err = -EINVAL;
6519 
6520 	switch (control->sar) {
6521 	case L2CAP_SAR_UNSEGMENTED:
6522 		if (chan->sdu)
6523 			break;
6524 
6525 		err = chan->ops->recv(chan, skb);
6526 		break;
6527 
6528 	case L2CAP_SAR_START:
6529 		if (chan->sdu)
6530 			break;
6531 
6532 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6533 			break;
6534 
6535 		chan->sdu_len = get_unaligned_le16(skb->data);
6536 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6537 
6538 		if (chan->sdu_len > chan->imtu) {
6539 			err = -EMSGSIZE;
6540 			break;
6541 		}
6542 
6543 		if (skb->len >= chan->sdu_len)
6544 			break;
6545 
6546 		chan->sdu = skb;
6547 		chan->sdu_last_frag = skb;
6548 
6549 		skb = NULL;
6550 		err = 0;
6551 		break;
6552 
6553 	case L2CAP_SAR_CONTINUE:
6554 		if (!chan->sdu)
6555 			break;
6556 
6557 		append_skb_frag(chan->sdu, skb,
6558 				&chan->sdu_last_frag);
6559 		skb = NULL;
6560 
6561 		if (chan->sdu->len >= chan->sdu_len)
6562 			break;
6563 
6564 		err = 0;
6565 		break;
6566 
6567 	case L2CAP_SAR_END:
6568 		if (!chan->sdu)
6569 			break;
6570 
6571 		append_skb_frag(chan->sdu, skb,
6572 				&chan->sdu_last_frag);
6573 		skb = NULL;
6574 
6575 		if (chan->sdu->len != chan->sdu_len)
6576 			break;
6577 
6578 		err = chan->ops->recv(chan, chan->sdu);
6579 
6580 		if (!err) {
6581 			/* Reassembly complete */
6582 			chan->sdu = NULL;
6583 			chan->sdu_last_frag = NULL;
6584 			chan->sdu_len = 0;
6585 		}
6586 		break;
6587 	}
6588 
6589 	if (err) {
6590 		kfree_skb(skb);
6591 		kfree_skb(chan->sdu);
6592 		chan->sdu = NULL;
6593 		chan->sdu_last_frag = NULL;
6594 		chan->sdu_len = 0;
6595 	}
6596 
6597 	return err;
6598 }
6599 
6600 static int l2cap_resegment(struct l2cap_chan *chan)
6601 {
6602 	/* Placeholder */
6603 	return 0;
6604 }
6605 
6606 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6607 {
6608 	u8 event;
6609 
6610 	if (chan->mode != L2CAP_MODE_ERTM)
6611 		return;
6612 
6613 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6614 	l2cap_tx(chan, NULL, NULL, event);
6615 }
6616 
6617 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6618 {
6619 	int err = 0;
6620 	/* Pass sequential frames to l2cap_reassemble_sdu()
6621 	 * until a gap is encountered.
6622 	 */
6623 
6624 	BT_DBG("chan %p", chan);
6625 
6626 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6627 		struct sk_buff *skb;
6628 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6629 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6630 
6631 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6632 
6633 		if (!skb)
6634 			break;
6635 
6636 		skb_unlink(skb, &chan->srej_q);
6637 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6638 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6639 		if (err)
6640 			break;
6641 	}
6642 
6643 	if (skb_queue_empty(&chan->srej_q)) {
6644 		chan->rx_state = L2CAP_RX_STATE_RECV;
6645 		l2cap_send_ack(chan);
6646 	}
6647 
6648 	return err;
6649 }
6650 
6651 static void l2cap_handle_srej(struct l2cap_chan *chan,
6652 			      struct l2cap_ctrl *control)
6653 {
6654 	struct sk_buff *skb;
6655 
6656 	BT_DBG("chan %p, control %p", chan, control);
6657 
6658 	if (control->reqseq == chan->next_tx_seq) {
6659 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6660 		l2cap_send_disconn_req(chan, ECONNRESET);
6661 		return;
6662 	}
6663 
6664 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6665 
6666 	if (skb == NULL) {
6667 		BT_DBG("Seq %d not available for retransmission",
6668 		       control->reqseq);
6669 		return;
6670 	}
6671 
6672 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6673 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6674 		l2cap_send_disconn_req(chan, ECONNRESET);
6675 		return;
6676 	}
6677 
6678 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6679 
6680 	if (control->poll) {
6681 		l2cap_pass_to_tx(chan, control);
6682 
6683 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6684 		l2cap_retransmit(chan, control);
6685 		l2cap_ertm_send(chan);
6686 
6687 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6688 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6689 			chan->srej_save_reqseq = control->reqseq;
6690 		}
6691 	} else {
6692 		l2cap_pass_to_tx_fbit(chan, control);
6693 
6694 		if (control->final) {
6695 			if (chan->srej_save_reqseq != control->reqseq ||
6696 			    !test_and_clear_bit(CONN_SREJ_ACT,
6697 						&chan->conn_state))
6698 				l2cap_retransmit(chan, control);
6699 		} else {
6700 			l2cap_retransmit(chan, control);
6701 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6702 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6703 				chan->srej_save_reqseq = control->reqseq;
6704 			}
6705 		}
6706 	}
6707 }
6708 
6709 static void l2cap_handle_rej(struct l2cap_chan *chan,
6710 			     struct l2cap_ctrl *control)
6711 {
6712 	struct sk_buff *skb;
6713 
6714 	BT_DBG("chan %p, control %p", chan, control);
6715 
6716 	if (control->reqseq == chan->next_tx_seq) {
6717 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6718 		l2cap_send_disconn_req(chan, ECONNRESET);
6719 		return;
6720 	}
6721 
6722 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6723 
6724 	if (chan->max_tx && skb &&
6725 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6726 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6727 		l2cap_send_disconn_req(chan, ECONNRESET);
6728 		return;
6729 	}
6730 
6731 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6732 
6733 	l2cap_pass_to_tx(chan, control);
6734 
6735 	if (control->final) {
6736 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6737 			l2cap_retransmit_all(chan, control);
6738 	} else {
6739 		l2cap_retransmit_all(chan, control);
6740 		l2cap_ertm_send(chan);
6741 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6742 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6743 	}
6744 }
6745 
6746 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6747 {
6748 	BT_DBG("chan %p, txseq %d", chan, txseq);
6749 
6750 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6751 	       chan->expected_tx_seq);
6752 
6753 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6754 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6755 		    chan->tx_win) {
6756 			/* See notes below regarding "double poll" and
6757 			 * invalid packets.
6758 			 */
6759 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6760 				BT_DBG("Invalid/Ignore - after SREJ");
6761 				return L2CAP_TXSEQ_INVALID_IGNORE;
6762 			} else {
6763 				BT_DBG("Invalid - in window after SREJ sent");
6764 				return L2CAP_TXSEQ_INVALID;
6765 			}
6766 		}
6767 
6768 		if (chan->srej_list.head == txseq) {
6769 			BT_DBG("Expected SREJ");
6770 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6771 		}
6772 
6773 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6774 			BT_DBG("Duplicate SREJ - txseq already stored");
6775 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6776 		}
6777 
6778 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6779 			BT_DBG("Unexpected SREJ - not requested");
6780 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6781 		}
6782 	}
6783 
6784 	if (chan->expected_tx_seq == txseq) {
6785 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6786 		    chan->tx_win) {
6787 			BT_DBG("Invalid - txseq outside tx window");
6788 			return L2CAP_TXSEQ_INVALID;
6789 		} else {
6790 			BT_DBG("Expected");
6791 			return L2CAP_TXSEQ_EXPECTED;
6792 		}
6793 	}
6794 
6795 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6796 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6797 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6798 		return L2CAP_TXSEQ_DUPLICATE;
6799 	}
6800 
6801 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6802 		/* A source of invalid packets is a "double poll" condition,
6803 		 * where delays cause us to send multiple poll packets.  If
6804 		 * the remote stack receives and processes both polls,
6805 		 * sequence numbers can wrap around in such a way that a
6806 		 * resent frame has a sequence number that looks like new data
6807 		 * with a sequence gap.  This would trigger an erroneous SREJ
6808 		 * request.
6809 		 *
6810 		 * Fortunately, this is impossible with a tx window that's
6811 		 * less than half of the maximum sequence number, which allows
6812 		 * invalid frames to be safely ignored.
6813 		 *
6814 		 * With tx window sizes greater than half of the tx window
6815 		 * maximum, the frame is invalid and cannot be ignored.  This
6816 		 * causes a disconnect.
6817 		 */
6818 
6819 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6820 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6821 			return L2CAP_TXSEQ_INVALID_IGNORE;
6822 		} else {
6823 			BT_DBG("Invalid - txseq outside tx window");
6824 			return L2CAP_TXSEQ_INVALID;
6825 		}
6826 	} else {
6827 		BT_DBG("Unexpected - txseq indicates missing frames");
6828 		return L2CAP_TXSEQ_UNEXPECTED;
6829 	}
6830 }
6831 
6832 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6833 			       struct l2cap_ctrl *control,
6834 			       struct sk_buff *skb, u8 event)
6835 {
6836 	int err = 0;
6837 	bool skb_in_use = false;
6838 
6839 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6840 	       event);
6841 
6842 	switch (event) {
6843 	case L2CAP_EV_RECV_IFRAME:
6844 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6845 		case L2CAP_TXSEQ_EXPECTED:
6846 			l2cap_pass_to_tx(chan, control);
6847 
6848 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6849 				BT_DBG("Busy, discarding expected seq %d",
6850 				       control->txseq);
6851 				break;
6852 			}
6853 
6854 			chan->expected_tx_seq = __next_seq(chan,
6855 							   control->txseq);
6856 
6857 			chan->buffer_seq = chan->expected_tx_seq;
6858 			skb_in_use = true;
6859 
6860 			err = l2cap_reassemble_sdu(chan, skb, control);
6861 			if (err)
6862 				break;
6863 
6864 			if (control->final) {
6865 				if (!test_and_clear_bit(CONN_REJ_ACT,
6866 							&chan->conn_state)) {
6867 					control->final = 0;
6868 					l2cap_retransmit_all(chan, control);
6869 					l2cap_ertm_send(chan);
6870 				}
6871 			}
6872 
6873 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6874 				l2cap_send_ack(chan);
6875 			break;
6876 		case L2CAP_TXSEQ_UNEXPECTED:
6877 			l2cap_pass_to_tx(chan, control);
6878 
6879 			/* Can't issue SREJ frames in the local busy state.
6880 			 * Drop this frame, it will be seen as missing
6881 			 * when local busy is exited.
6882 			 */
6883 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6884 				BT_DBG("Busy, discarding unexpected seq %d",
6885 				       control->txseq);
6886 				break;
6887 			}
6888 
6889 			/* There was a gap in the sequence, so an SREJ
6890 			 * must be sent for each missing frame.  The
6891 			 * current frame is stored for later use.
6892 			 */
6893 			skb_queue_tail(&chan->srej_q, skb);
6894 			skb_in_use = true;
6895 			BT_DBG("Queued %p (queue len %d)", skb,
6896 			       skb_queue_len(&chan->srej_q));
6897 
6898 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6899 			l2cap_seq_list_clear(&chan->srej_list);
6900 			l2cap_send_srej(chan, control->txseq);
6901 
6902 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6903 			break;
6904 		case L2CAP_TXSEQ_DUPLICATE:
6905 			l2cap_pass_to_tx(chan, control);
6906 			break;
6907 		case L2CAP_TXSEQ_INVALID_IGNORE:
6908 			break;
6909 		case L2CAP_TXSEQ_INVALID:
6910 		default:
6911 			l2cap_send_disconn_req(chan, ECONNRESET);
6912 			break;
6913 		}
6914 		break;
6915 	case L2CAP_EV_RECV_RR:
6916 		l2cap_pass_to_tx(chan, control);
6917 		if (control->final) {
6918 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6919 
6920 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6921 			    !__chan_is_moving(chan)) {
6922 				control->final = 0;
6923 				l2cap_retransmit_all(chan, control);
6924 			}
6925 
6926 			l2cap_ertm_send(chan);
6927 		} else if (control->poll) {
6928 			l2cap_send_i_or_rr_or_rnr(chan);
6929 		} else {
6930 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6931 					       &chan->conn_state) &&
6932 			    chan->unacked_frames)
6933 				__set_retrans_timer(chan);
6934 
6935 			l2cap_ertm_send(chan);
6936 		}
6937 		break;
6938 	case L2CAP_EV_RECV_RNR:
6939 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6940 		l2cap_pass_to_tx(chan, control);
6941 		if (control && control->poll) {
6942 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6943 			l2cap_send_rr_or_rnr(chan, 0);
6944 		}
6945 		__clear_retrans_timer(chan);
6946 		l2cap_seq_list_clear(&chan->retrans_list);
6947 		break;
6948 	case L2CAP_EV_RECV_REJ:
6949 		l2cap_handle_rej(chan, control);
6950 		break;
6951 	case L2CAP_EV_RECV_SREJ:
6952 		l2cap_handle_srej(chan, control);
6953 		break;
6954 	default:
6955 		break;
6956 	}
6957 
6958 	if (skb && !skb_in_use) {
6959 		BT_DBG("Freeing %p", skb);
6960 		kfree_skb(skb);
6961 	}
6962 
6963 	return err;
6964 }
6965 
6966 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6967 				    struct l2cap_ctrl *control,
6968 				    struct sk_buff *skb, u8 event)
6969 {
6970 	int err = 0;
6971 	u16 txseq = control->txseq;
6972 	bool skb_in_use = false;
6973 
6974 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6975 	       event);
6976 
6977 	switch (event) {
6978 	case L2CAP_EV_RECV_IFRAME:
6979 		switch (l2cap_classify_txseq(chan, txseq)) {
6980 		case L2CAP_TXSEQ_EXPECTED:
6981 			/* Keep frame for reassembly later */
6982 			l2cap_pass_to_tx(chan, control);
6983 			skb_queue_tail(&chan->srej_q, skb);
6984 			skb_in_use = true;
6985 			BT_DBG("Queued %p (queue len %d)", skb,
6986 			       skb_queue_len(&chan->srej_q));
6987 
6988 			chan->expected_tx_seq = __next_seq(chan, txseq);
6989 			break;
6990 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6991 			l2cap_seq_list_pop(&chan->srej_list);
6992 
6993 			l2cap_pass_to_tx(chan, control);
6994 			skb_queue_tail(&chan->srej_q, skb);
6995 			skb_in_use = true;
6996 			BT_DBG("Queued %p (queue len %d)", skb,
6997 			       skb_queue_len(&chan->srej_q));
6998 
6999 			err = l2cap_rx_queued_iframes(chan);
7000 			if (err)
7001 				break;
7002 
7003 			break;
7004 		case L2CAP_TXSEQ_UNEXPECTED:
7005 			/* Got a frame that can't be reassembled yet.
7006 			 * Save it for later, and send SREJs to cover
7007 			 * the missing frames.
7008 			 */
7009 			skb_queue_tail(&chan->srej_q, skb);
7010 			skb_in_use = true;
7011 			BT_DBG("Queued %p (queue len %d)", skb,
7012 			       skb_queue_len(&chan->srej_q));
7013 
7014 			l2cap_pass_to_tx(chan, control);
7015 			l2cap_send_srej(chan, control->txseq);
7016 			break;
7017 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7018 			/* This frame was requested with an SREJ, but
7019 			 * some expected retransmitted frames are
7020 			 * missing.  Request retransmission of missing
7021 			 * SREJ'd frames.
7022 			 */
7023 			skb_queue_tail(&chan->srej_q, skb);
7024 			skb_in_use = true;
7025 			BT_DBG("Queued %p (queue len %d)", skb,
7026 			       skb_queue_len(&chan->srej_q));
7027 
7028 			l2cap_pass_to_tx(chan, control);
7029 			l2cap_send_srej_list(chan, control->txseq);
7030 			break;
7031 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7032 			/* We've already queued this frame.  Drop this copy. */
7033 			l2cap_pass_to_tx(chan, control);
7034 			break;
7035 		case L2CAP_TXSEQ_DUPLICATE:
7036 			/* Expecting a later sequence number, so this frame
7037 			 * was already received.  Ignore it completely.
7038 			 */
7039 			break;
7040 		case L2CAP_TXSEQ_INVALID_IGNORE:
7041 			break;
7042 		case L2CAP_TXSEQ_INVALID:
7043 		default:
7044 			l2cap_send_disconn_req(chan, ECONNRESET);
7045 			break;
7046 		}
7047 		break;
7048 	case L2CAP_EV_RECV_RR:
7049 		l2cap_pass_to_tx(chan, control);
7050 		if (control->final) {
7051 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7052 
7053 			if (!test_and_clear_bit(CONN_REJ_ACT,
7054 						&chan->conn_state)) {
7055 				control->final = 0;
7056 				l2cap_retransmit_all(chan, control);
7057 			}
7058 
7059 			l2cap_ertm_send(chan);
7060 		} else if (control->poll) {
7061 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7062 					       &chan->conn_state) &&
7063 			    chan->unacked_frames) {
7064 				__set_retrans_timer(chan);
7065 			}
7066 
7067 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7068 			l2cap_send_srej_tail(chan);
7069 		} else {
7070 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7071 					       &chan->conn_state) &&
7072 			    chan->unacked_frames)
7073 				__set_retrans_timer(chan);
7074 
7075 			l2cap_send_ack(chan);
7076 		}
7077 		break;
7078 	case L2CAP_EV_RECV_RNR:
7079 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7080 		l2cap_pass_to_tx(chan, control);
7081 		if (control->poll) {
7082 			l2cap_send_srej_tail(chan);
7083 		} else {
7084 			struct l2cap_ctrl rr_control;
7085 			memset(&rr_control, 0, sizeof(rr_control));
7086 			rr_control.sframe = 1;
7087 			rr_control.super = L2CAP_SUPER_RR;
7088 			rr_control.reqseq = chan->buffer_seq;
7089 			l2cap_send_sframe(chan, &rr_control);
7090 		}
7091 
7092 		break;
7093 	case L2CAP_EV_RECV_REJ:
7094 		l2cap_handle_rej(chan, control);
7095 		break;
7096 	case L2CAP_EV_RECV_SREJ:
7097 		l2cap_handle_srej(chan, control);
7098 		break;
7099 	}
7100 
7101 	if (skb && !skb_in_use) {
7102 		BT_DBG("Freeing %p", skb);
7103 		kfree_skb(skb);
7104 	}
7105 
7106 	return err;
7107 }
7108 
7109 static int l2cap_finish_move(struct l2cap_chan *chan)
7110 {
7111 	BT_DBG("chan %p", chan);
7112 
7113 	chan->rx_state = L2CAP_RX_STATE_RECV;
7114 
7115 	if (chan->hs_hcon)
7116 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7117 	else
7118 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7119 
7120 	return l2cap_resegment(chan);
7121 }
7122 
7123 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7124 				 struct l2cap_ctrl *control,
7125 				 struct sk_buff *skb, u8 event)
7126 {
7127 	int err;
7128 
7129 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7130 	       event);
7131 
7132 	if (!control->poll)
7133 		return -EPROTO;
7134 
7135 	l2cap_process_reqseq(chan, control->reqseq);
7136 
7137 	if (!skb_queue_empty(&chan->tx_q))
7138 		chan->tx_send_head = skb_peek(&chan->tx_q);
7139 	else
7140 		chan->tx_send_head = NULL;
7141 
7142 	/* Rewind next_tx_seq to the point expected
7143 	 * by the receiver.
7144 	 */
7145 	chan->next_tx_seq = control->reqseq;
7146 	chan->unacked_frames = 0;
7147 
7148 	err = l2cap_finish_move(chan);
7149 	if (err)
7150 		return err;
7151 
7152 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7153 	l2cap_send_i_or_rr_or_rnr(chan);
7154 
7155 	if (event == L2CAP_EV_RECV_IFRAME)
7156 		return -EPROTO;
7157 
7158 	return l2cap_rx_state_recv(chan, control, NULL, event);
7159 }
7160 
7161 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7162 				 struct l2cap_ctrl *control,
7163 				 struct sk_buff *skb, u8 event)
7164 {
7165 	int err;
7166 
7167 	if (!control->final)
7168 		return -EPROTO;
7169 
7170 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7171 
7172 	chan->rx_state = L2CAP_RX_STATE_RECV;
7173 	l2cap_process_reqseq(chan, control->reqseq);
7174 
7175 	if (!skb_queue_empty(&chan->tx_q))
7176 		chan->tx_send_head = skb_peek(&chan->tx_q);
7177 	else
7178 		chan->tx_send_head = NULL;
7179 
7180 	/* Rewind next_tx_seq to the point expected
7181 	 * by the receiver.
7182 	 */
7183 	chan->next_tx_seq = control->reqseq;
7184 	chan->unacked_frames = 0;
7185 
7186 	if (chan->hs_hcon)
7187 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7188 	else
7189 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7190 
7191 	err = l2cap_resegment(chan);
7192 
7193 	if (!err)
7194 		err = l2cap_rx_state_recv(chan, control, skb, event);
7195 
7196 	return err;
7197 }
7198 
7199 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7200 {
7201 	/* Make sure reqseq is for a packet that has been sent but not acked */
7202 	u16 unacked;
7203 
7204 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7205 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7206 }
7207 
7208 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7209 		    struct sk_buff *skb, u8 event)
7210 {
7211 	int err = 0;
7212 
7213 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7214 	       control, skb, event, chan->rx_state);
7215 
7216 	if (__valid_reqseq(chan, control->reqseq)) {
7217 		switch (chan->rx_state) {
7218 		case L2CAP_RX_STATE_RECV:
7219 			err = l2cap_rx_state_recv(chan, control, skb, event);
7220 			break;
7221 		case L2CAP_RX_STATE_SREJ_SENT:
7222 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7223 						       event);
7224 			break;
7225 		case L2CAP_RX_STATE_WAIT_P:
7226 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7227 			break;
7228 		case L2CAP_RX_STATE_WAIT_F:
7229 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7230 			break;
7231 		default:
7232 			/* shut it down */
7233 			break;
7234 		}
7235 	} else {
7236 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7237 		       control->reqseq, chan->next_tx_seq,
7238 		       chan->expected_ack_seq);
7239 		l2cap_send_disconn_req(chan, ECONNRESET);
7240 	}
7241 
7242 	return err;
7243 }
7244 
7245 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7246 			   struct sk_buff *skb)
7247 {
7248 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7249 	       chan->rx_state);
7250 
7251 	if (l2cap_classify_txseq(chan, control->txseq) ==
7252 	    L2CAP_TXSEQ_EXPECTED) {
7253 		l2cap_pass_to_tx(chan, control);
7254 
7255 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
7256 		       __next_seq(chan, chan->buffer_seq));
7257 
7258 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7259 
7260 		l2cap_reassemble_sdu(chan, skb, control);
7261 	} else {
7262 		if (chan->sdu) {
7263 			kfree_skb(chan->sdu);
7264 			chan->sdu = NULL;
7265 		}
7266 		chan->sdu_last_frag = NULL;
7267 		chan->sdu_len = 0;
7268 
7269 		if (skb) {
7270 			BT_DBG("Freeing %p", skb);
7271 			kfree_skb(skb);
7272 		}
7273 	}
7274 
7275 	chan->last_acked_seq = control->txseq;
7276 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
7277 
7278 	return 0;
7279 }
7280 
7281 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7282 {
7283 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7284 	u16 len;
7285 	u8 event;
7286 
7287 	__unpack_control(chan, skb);
7288 
7289 	len = skb->len;
7290 
7291 	/*
7292 	 * We can just drop the corrupted I-frame here.
7293 	 * Receiver will miss it and start proper recovery
7294 	 * procedures and ask for retransmission.
7295 	 */
7296 	if (l2cap_check_fcs(chan, skb))
7297 		goto drop;
7298 
7299 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7300 		len -= L2CAP_SDULEN_SIZE;
7301 
7302 	if (chan->fcs == L2CAP_FCS_CRC16)
7303 		len -= L2CAP_FCS_SIZE;
7304 
7305 	if (len > chan->mps) {
7306 		l2cap_send_disconn_req(chan, ECONNRESET);
7307 		goto drop;
7308 	}
7309 
7310 	if (chan->ops->filter) {
7311 		if (chan->ops->filter(chan, skb))
7312 			goto drop;
7313 	}
7314 
7315 	if (!control->sframe) {
7316 		int err;
7317 
7318 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7319 		       control->sar, control->reqseq, control->final,
7320 		       control->txseq);
7321 
7322 		/* Validate F-bit - F=0 always valid, F=1 only
7323 		 * valid in TX WAIT_F
7324 		 */
7325 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7326 			goto drop;
7327 
7328 		if (chan->mode != L2CAP_MODE_STREAMING) {
7329 			event = L2CAP_EV_RECV_IFRAME;
7330 			err = l2cap_rx(chan, control, skb, event);
7331 		} else {
7332 			err = l2cap_stream_rx(chan, control, skb);
7333 		}
7334 
7335 		if (err)
7336 			l2cap_send_disconn_req(chan, ECONNRESET);
7337 	} else {
7338 		const u8 rx_func_to_event[4] = {
7339 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7340 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7341 		};
7342 
7343 		/* Only I-frames are expected in streaming mode */
7344 		if (chan->mode == L2CAP_MODE_STREAMING)
7345 			goto drop;
7346 
7347 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7348 		       control->reqseq, control->final, control->poll,
7349 		       control->super);
7350 
7351 		if (len != 0) {
7352 			BT_ERR("Trailing bytes: %d in sframe", len);
7353 			l2cap_send_disconn_req(chan, ECONNRESET);
7354 			goto drop;
7355 		}
7356 
7357 		/* Validate F and P bits */
7358 		if (control->final && (control->poll ||
7359 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7360 			goto drop;
7361 
7362 		event = rx_func_to_event[control->super];
7363 		if (l2cap_rx(chan, control, skb, event))
7364 			l2cap_send_disconn_req(chan, ECONNRESET);
7365 	}
7366 
7367 	return 0;
7368 
7369 drop:
7370 	kfree_skb(skb);
7371 	return 0;
7372 }
7373 
7374 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7375 {
7376 	struct l2cap_conn *conn = chan->conn;
7377 	struct l2cap_le_credits pkt;
7378 	u16 return_credits;
7379 
7380 	return_credits = (chan->imtu / chan->mps) + 1;
7381 
7382 	if (chan->rx_credits >= return_credits)
7383 		return;
7384 
7385 	return_credits -= chan->rx_credits;
7386 
7387 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7388 
7389 	chan->rx_credits += return_credits;
7390 
7391 	pkt.cid     = cpu_to_le16(chan->scid);
7392 	pkt.credits = cpu_to_le16(return_credits);
7393 
7394 	chan->ident = l2cap_get_ident(conn);
7395 
7396 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7397 }
7398 
7399 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7400 {
7401 	int err;
7402 
7403 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7404 
7405 	/* Wait recv to confirm reception before updating the credits */
7406 	err = chan->ops->recv(chan, skb);
7407 
7408 	/* Update credits whenever an SDU is received */
7409 	l2cap_chan_le_send_credits(chan);
7410 
7411 	return err;
7412 }
7413 
7414 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7415 {
7416 	int err;
7417 
7418 	if (!chan->rx_credits) {
7419 		BT_ERR("No credits to receive LE L2CAP data");
7420 		l2cap_send_disconn_req(chan, ECONNRESET);
7421 		return -ENOBUFS;
7422 	}
7423 
7424 	if (chan->imtu < skb->len) {
7425 		BT_ERR("Too big LE L2CAP PDU");
7426 		return -ENOBUFS;
7427 	}
7428 
7429 	chan->rx_credits--;
7430 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7431 
7432 	/* Update if remote had run out of credits, this should only happens
7433 	 * if the remote is not using the entire MPS.
7434 	 */
7435 	if (!chan->rx_credits)
7436 		l2cap_chan_le_send_credits(chan);
7437 
7438 	err = 0;
7439 
7440 	if (!chan->sdu) {
7441 		u16 sdu_len;
7442 
7443 		sdu_len = get_unaligned_le16(skb->data);
7444 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7445 
7446 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7447 		       sdu_len, skb->len, chan->imtu);
7448 
7449 		if (sdu_len > chan->imtu) {
7450 			BT_ERR("Too big LE L2CAP SDU length received");
7451 			err = -EMSGSIZE;
7452 			goto failed;
7453 		}
7454 
7455 		if (skb->len > sdu_len) {
7456 			BT_ERR("Too much LE L2CAP data received");
7457 			err = -EINVAL;
7458 			goto failed;
7459 		}
7460 
7461 		if (skb->len == sdu_len)
7462 			return l2cap_ecred_recv(chan, skb);
7463 
7464 		chan->sdu = skb;
7465 		chan->sdu_len = sdu_len;
7466 		chan->sdu_last_frag = skb;
7467 
7468 		/* Detect if remote is not able to use the selected MPS */
7469 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7470 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7471 
7472 			/* Adjust the number of credits */
7473 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7474 			chan->mps = mps_len;
7475 			l2cap_chan_le_send_credits(chan);
7476 		}
7477 
7478 		return 0;
7479 	}
7480 
7481 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7482 	       chan->sdu->len, skb->len, chan->sdu_len);
7483 
7484 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7485 		BT_ERR("Too much LE L2CAP data received");
7486 		err = -EINVAL;
7487 		goto failed;
7488 	}
7489 
7490 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7491 	skb = NULL;
7492 
7493 	if (chan->sdu->len == chan->sdu_len) {
7494 		err = l2cap_ecred_recv(chan, chan->sdu);
7495 		if (!err) {
7496 			chan->sdu = NULL;
7497 			chan->sdu_last_frag = NULL;
7498 			chan->sdu_len = 0;
7499 		}
7500 	}
7501 
7502 failed:
7503 	if (err) {
7504 		kfree_skb(skb);
7505 		kfree_skb(chan->sdu);
7506 		chan->sdu = NULL;
7507 		chan->sdu_last_frag = NULL;
7508 		chan->sdu_len = 0;
7509 	}
7510 
7511 	/* We can't return an error here since we took care of the skb
7512 	 * freeing internally. An error return would cause the caller to
7513 	 * do a double-free of the skb.
7514 	 */
7515 	return 0;
7516 }
7517 
7518 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7519 			       struct sk_buff *skb)
7520 {
7521 	struct l2cap_chan *chan;
7522 
7523 	chan = l2cap_get_chan_by_scid(conn, cid);
7524 	if (!chan) {
7525 		if (cid == L2CAP_CID_A2MP) {
7526 			chan = a2mp_channel_create(conn, skb);
7527 			if (!chan) {
7528 				kfree_skb(skb);
7529 				return;
7530 			}
7531 
7532 			l2cap_chan_lock(chan);
7533 		} else {
7534 			BT_DBG("unknown cid 0x%4.4x", cid);
7535 			/* Drop packet and return */
7536 			kfree_skb(skb);
7537 			return;
7538 		}
7539 	}
7540 
7541 	BT_DBG("chan %p, len %d", chan, skb->len);
7542 
7543 	/* If we receive data on a fixed channel before the info req/rsp
7544 	 * procdure is done simply assume that the channel is supported
7545 	 * and mark it as ready.
7546 	 */
7547 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7548 		l2cap_chan_ready(chan);
7549 
7550 	if (chan->state != BT_CONNECTED)
7551 		goto drop;
7552 
7553 	switch (chan->mode) {
7554 	case L2CAP_MODE_LE_FLOWCTL:
7555 	case L2CAP_MODE_EXT_FLOWCTL:
7556 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7557 			goto drop;
7558 
7559 		goto done;
7560 
7561 	case L2CAP_MODE_BASIC:
7562 		/* If socket recv buffers overflows we drop data here
7563 		 * which is *bad* because L2CAP has to be reliable.
7564 		 * But we don't have any other choice. L2CAP doesn't
7565 		 * provide flow control mechanism. */
7566 
7567 		if (chan->imtu < skb->len) {
7568 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7569 			goto drop;
7570 		}
7571 
7572 		if (!chan->ops->recv(chan, skb))
7573 			goto done;
7574 		break;
7575 
7576 	case L2CAP_MODE_ERTM:
7577 	case L2CAP_MODE_STREAMING:
7578 		l2cap_data_rcv(chan, skb);
7579 		goto done;
7580 
7581 	default:
7582 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7583 		break;
7584 	}
7585 
7586 drop:
7587 	kfree_skb(skb);
7588 
7589 done:
7590 	l2cap_chan_unlock(chan);
7591 }
7592 
7593 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7594 				  struct sk_buff *skb)
7595 {
7596 	struct hci_conn *hcon = conn->hcon;
7597 	struct l2cap_chan *chan;
7598 
7599 	if (hcon->type != ACL_LINK)
7600 		goto free_skb;
7601 
7602 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7603 					ACL_LINK);
7604 	if (!chan)
7605 		goto free_skb;
7606 
7607 	BT_DBG("chan %p, len %d", chan, skb->len);
7608 
7609 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7610 		goto drop;
7611 
7612 	if (chan->imtu < skb->len)
7613 		goto drop;
7614 
7615 	/* Store remote BD_ADDR and PSM for msg_name */
7616 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7617 	bt_cb(skb)->l2cap.psm = psm;
7618 
7619 	if (!chan->ops->recv(chan, skb)) {
7620 		l2cap_chan_put(chan);
7621 		return;
7622 	}
7623 
7624 drop:
7625 	l2cap_chan_put(chan);
7626 free_skb:
7627 	kfree_skb(skb);
7628 }
7629 
7630 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7631 {
7632 	struct l2cap_hdr *lh = (void *) skb->data;
7633 	struct hci_conn *hcon = conn->hcon;
7634 	u16 cid, len;
7635 	__le16 psm;
7636 
7637 	if (hcon->state != BT_CONNECTED) {
7638 		BT_DBG("queueing pending rx skb");
7639 		skb_queue_tail(&conn->pending_rx, skb);
7640 		return;
7641 	}
7642 
7643 	skb_pull(skb, L2CAP_HDR_SIZE);
7644 	cid = __le16_to_cpu(lh->cid);
7645 	len = __le16_to_cpu(lh->len);
7646 
7647 	if (len != skb->len) {
7648 		kfree_skb(skb);
7649 		return;
7650 	}
7651 
7652 	/* Since we can't actively block incoming LE connections we must
7653 	 * at least ensure that we ignore incoming data from them.
7654 	 */
7655 	if (hcon->type == LE_LINK &&
7656 	    hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7657 				   bdaddr_dst_type(hcon))) {
7658 		kfree_skb(skb);
7659 		return;
7660 	}
7661 
7662 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7663 
7664 	switch (cid) {
7665 	case L2CAP_CID_SIGNALING:
7666 		l2cap_sig_channel(conn, skb);
7667 		break;
7668 
7669 	case L2CAP_CID_CONN_LESS:
7670 		psm = get_unaligned((__le16 *) skb->data);
7671 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7672 		l2cap_conless_channel(conn, psm, skb);
7673 		break;
7674 
7675 	case L2CAP_CID_LE_SIGNALING:
7676 		l2cap_le_sig_channel(conn, skb);
7677 		break;
7678 
7679 	default:
7680 		l2cap_data_channel(conn, cid, skb);
7681 		break;
7682 	}
7683 }
7684 
7685 static void process_pending_rx(struct work_struct *work)
7686 {
7687 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7688 					       pending_rx_work);
7689 	struct sk_buff *skb;
7690 
7691 	BT_DBG("");
7692 
7693 	while ((skb = skb_dequeue(&conn->pending_rx)))
7694 		l2cap_recv_frame(conn, skb);
7695 }
7696 
7697 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7698 {
7699 	struct l2cap_conn *conn = hcon->l2cap_data;
7700 	struct hci_chan *hchan;
7701 
7702 	if (conn)
7703 		return conn;
7704 
7705 	hchan = hci_chan_create(hcon);
7706 	if (!hchan)
7707 		return NULL;
7708 
7709 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7710 	if (!conn) {
7711 		hci_chan_del(hchan);
7712 		return NULL;
7713 	}
7714 
7715 	kref_init(&conn->ref);
7716 	hcon->l2cap_data = conn;
7717 	conn->hcon = hci_conn_get(hcon);
7718 	conn->hchan = hchan;
7719 
7720 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7721 
7722 	switch (hcon->type) {
7723 	case LE_LINK:
7724 		if (hcon->hdev->le_mtu) {
7725 			conn->mtu = hcon->hdev->le_mtu;
7726 			break;
7727 		}
7728 		fallthrough;
7729 	default:
7730 		conn->mtu = hcon->hdev->acl_mtu;
7731 		break;
7732 	}
7733 
7734 	conn->feat_mask = 0;
7735 
7736 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7737 
7738 	if (hcon->type == ACL_LINK &&
7739 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7740 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7741 
7742 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7743 	    (bredr_sc_enabled(hcon->hdev) ||
7744 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7745 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7746 
7747 	mutex_init(&conn->ident_lock);
7748 	mutex_init(&conn->chan_lock);
7749 
7750 	INIT_LIST_HEAD(&conn->chan_l);
7751 	INIT_LIST_HEAD(&conn->users);
7752 
7753 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7754 
7755 	skb_queue_head_init(&conn->pending_rx);
7756 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7757 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7758 
7759 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7760 
7761 	return conn;
7762 }
7763 
7764 static bool is_valid_psm(u16 psm, u8 dst_type) {
7765 	if (!psm)
7766 		return false;
7767 
7768 	if (bdaddr_type_is_le(dst_type))
7769 		return (psm <= 0x00ff);
7770 
7771 	/* PSM must be odd and lsb of upper byte must be 0 */
7772 	return ((psm & 0x0101) == 0x0001);
7773 }
7774 
7775 struct l2cap_chan_data {
7776 	struct l2cap_chan *chan;
7777 	struct pid *pid;
7778 	int count;
7779 };
7780 
7781 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7782 {
7783 	struct l2cap_chan_data *d = data;
7784 	struct pid *pid;
7785 
7786 	if (chan == d->chan)
7787 		return;
7788 
7789 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7790 		return;
7791 
7792 	pid = chan->ops->get_peer_pid(chan);
7793 
7794 	/* Only count deferred channels with the same PID/PSM */
7795 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7796 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7797 		return;
7798 
7799 	d->count++;
7800 }
7801 
7802 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7803 		       bdaddr_t *dst, u8 dst_type)
7804 {
7805 	struct l2cap_conn *conn;
7806 	struct hci_conn *hcon;
7807 	struct hci_dev *hdev;
7808 	int err;
7809 
7810 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7811 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7812 
7813 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7814 	if (!hdev)
7815 		return -EHOSTUNREACH;
7816 
7817 	hci_dev_lock(hdev);
7818 
7819 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7820 	    chan->chan_type != L2CAP_CHAN_RAW) {
7821 		err = -EINVAL;
7822 		goto done;
7823 	}
7824 
7825 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7826 		err = -EINVAL;
7827 		goto done;
7828 	}
7829 
7830 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7831 		err = -EINVAL;
7832 		goto done;
7833 	}
7834 
7835 	switch (chan->mode) {
7836 	case L2CAP_MODE_BASIC:
7837 		break;
7838 	case L2CAP_MODE_LE_FLOWCTL:
7839 		break;
7840 	case L2CAP_MODE_EXT_FLOWCTL:
7841 		if (!enable_ecred) {
7842 			err = -EOPNOTSUPP;
7843 			goto done;
7844 		}
7845 		break;
7846 	case L2CAP_MODE_ERTM:
7847 	case L2CAP_MODE_STREAMING:
7848 		if (!disable_ertm)
7849 			break;
7850 		fallthrough;
7851 	default:
7852 		err = -EOPNOTSUPP;
7853 		goto done;
7854 	}
7855 
7856 	switch (chan->state) {
7857 	case BT_CONNECT:
7858 	case BT_CONNECT2:
7859 	case BT_CONFIG:
7860 		/* Already connecting */
7861 		err = 0;
7862 		goto done;
7863 
7864 	case BT_CONNECTED:
7865 		/* Already connected */
7866 		err = -EISCONN;
7867 		goto done;
7868 
7869 	case BT_OPEN:
7870 	case BT_BOUND:
7871 		/* Can connect */
7872 		break;
7873 
7874 	default:
7875 		err = -EBADFD;
7876 		goto done;
7877 	}
7878 
7879 	/* Set destination address and psm */
7880 	bacpy(&chan->dst, dst);
7881 	chan->dst_type = dst_type;
7882 
7883 	chan->psm = psm;
7884 	chan->dcid = cid;
7885 
7886 	if (bdaddr_type_is_le(dst_type)) {
7887 		/* Convert from L2CAP channel address type to HCI address type
7888 		 */
7889 		if (dst_type == BDADDR_LE_PUBLIC)
7890 			dst_type = ADDR_LE_DEV_PUBLIC;
7891 		else
7892 			dst_type = ADDR_LE_DEV_RANDOM;
7893 
7894 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7895 			hcon = hci_connect_le(hdev, dst, dst_type,
7896 					      chan->sec_level,
7897 					      HCI_LE_CONN_TIMEOUT,
7898 					      HCI_ROLE_SLAVE, NULL);
7899 		else
7900 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7901 						   chan->sec_level,
7902 						   HCI_LE_CONN_TIMEOUT,
7903 						   CONN_REASON_L2CAP_CHAN);
7904 
7905 	} else {
7906 		u8 auth_type = l2cap_get_auth_type(chan);
7907 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7908 				       CONN_REASON_L2CAP_CHAN);
7909 	}
7910 
7911 	if (IS_ERR(hcon)) {
7912 		err = PTR_ERR(hcon);
7913 		goto done;
7914 	}
7915 
7916 	conn = l2cap_conn_add(hcon);
7917 	if (!conn) {
7918 		hci_conn_drop(hcon);
7919 		err = -ENOMEM;
7920 		goto done;
7921 	}
7922 
7923 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7924 		struct l2cap_chan_data data;
7925 
7926 		data.chan = chan;
7927 		data.pid = chan->ops->get_peer_pid(chan);
7928 		data.count = 1;
7929 
7930 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7931 
7932 		/* Check if there isn't too many channels being connected */
7933 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7934 			hci_conn_drop(hcon);
7935 			err = -EPROTO;
7936 			goto done;
7937 		}
7938 	}
7939 
7940 	mutex_lock(&conn->chan_lock);
7941 	l2cap_chan_lock(chan);
7942 
7943 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7944 		hci_conn_drop(hcon);
7945 		err = -EBUSY;
7946 		goto chan_unlock;
7947 	}
7948 
7949 	/* Update source addr of the socket */
7950 	bacpy(&chan->src, &hcon->src);
7951 	chan->src_type = bdaddr_src_type(hcon);
7952 
7953 	__l2cap_chan_add(conn, chan);
7954 
7955 	/* l2cap_chan_add takes its own ref so we can drop this one */
7956 	hci_conn_drop(hcon);
7957 
7958 	l2cap_state_change(chan, BT_CONNECT);
7959 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7960 
7961 	/* Release chan->sport so that it can be reused by other
7962 	 * sockets (as it's only used for listening sockets).
7963 	 */
7964 	write_lock(&chan_list_lock);
7965 	chan->sport = 0;
7966 	write_unlock(&chan_list_lock);
7967 
7968 	if (hcon->state == BT_CONNECTED) {
7969 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7970 			__clear_chan_timer(chan);
7971 			if (l2cap_chan_check_security(chan, true))
7972 				l2cap_state_change(chan, BT_CONNECTED);
7973 		} else
7974 			l2cap_do_start(chan);
7975 	}
7976 
7977 	err = 0;
7978 
7979 chan_unlock:
7980 	l2cap_chan_unlock(chan);
7981 	mutex_unlock(&conn->chan_lock);
7982 done:
7983 	hci_dev_unlock(hdev);
7984 	hci_dev_put(hdev);
7985 	return err;
7986 }
7987 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7988 
7989 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7990 {
7991 	struct l2cap_conn *conn = chan->conn;
7992 	struct {
7993 		struct l2cap_ecred_reconf_req req;
7994 		__le16 scid;
7995 	} pdu;
7996 
7997 	pdu.req.mtu = cpu_to_le16(chan->imtu);
7998 	pdu.req.mps = cpu_to_le16(chan->mps);
7999 	pdu.scid    = cpu_to_le16(chan->scid);
8000 
8001 	chan->ident = l2cap_get_ident(conn);
8002 
8003 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8004 		       sizeof(pdu), &pdu);
8005 }
8006 
8007 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8008 {
8009 	if (chan->imtu > mtu)
8010 		return -EINVAL;
8011 
8012 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8013 
8014 	chan->imtu = mtu;
8015 
8016 	l2cap_ecred_reconfigure(chan);
8017 
8018 	return 0;
8019 }
8020 
8021 /* ---- L2CAP interface with lower layer (HCI) ---- */
8022 
8023 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8024 {
8025 	int exact = 0, lm1 = 0, lm2 = 0;
8026 	struct l2cap_chan *c;
8027 
8028 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8029 
8030 	/* Find listening sockets and check their link_mode */
8031 	read_lock(&chan_list_lock);
8032 	list_for_each_entry(c, &chan_list, global_l) {
8033 		if (c->state != BT_LISTEN)
8034 			continue;
8035 
8036 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8037 			lm1 |= HCI_LM_ACCEPT;
8038 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8039 				lm1 |= HCI_LM_MASTER;
8040 			exact++;
8041 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8042 			lm2 |= HCI_LM_ACCEPT;
8043 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8044 				lm2 |= HCI_LM_MASTER;
8045 		}
8046 	}
8047 	read_unlock(&chan_list_lock);
8048 
8049 	return exact ? lm1 : lm2;
8050 }
8051 
8052 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8053  * from an existing channel in the list or from the beginning of the
8054  * global list (by passing NULL as first parameter).
8055  */
8056 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8057 						  struct hci_conn *hcon)
8058 {
8059 	u8 src_type = bdaddr_src_type(hcon);
8060 
8061 	read_lock(&chan_list_lock);
8062 
8063 	if (c)
8064 		c = list_next_entry(c, global_l);
8065 	else
8066 		c = list_entry(chan_list.next, typeof(*c), global_l);
8067 
8068 	list_for_each_entry_from(c, &chan_list, global_l) {
8069 		if (c->chan_type != L2CAP_CHAN_FIXED)
8070 			continue;
8071 		if (c->state != BT_LISTEN)
8072 			continue;
8073 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8074 			continue;
8075 		if (src_type != c->src_type)
8076 			continue;
8077 
8078 		l2cap_chan_hold(c);
8079 		read_unlock(&chan_list_lock);
8080 		return c;
8081 	}
8082 
8083 	read_unlock(&chan_list_lock);
8084 
8085 	return NULL;
8086 }
8087 
8088 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8089 {
8090 	struct hci_dev *hdev = hcon->hdev;
8091 	struct l2cap_conn *conn;
8092 	struct l2cap_chan *pchan;
8093 	u8 dst_type;
8094 
8095 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8096 		return;
8097 
8098 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8099 
8100 	if (status) {
8101 		l2cap_conn_del(hcon, bt_to_errno(status));
8102 		return;
8103 	}
8104 
8105 	conn = l2cap_conn_add(hcon);
8106 	if (!conn)
8107 		return;
8108 
8109 	dst_type = bdaddr_dst_type(hcon);
8110 
8111 	/* If device is blocked, do not create channels for it */
8112 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
8113 		return;
8114 
8115 	/* Find fixed channels and notify them of the new connection. We
8116 	 * use multiple individual lookups, continuing each time where
8117 	 * we left off, because the list lock would prevent calling the
8118 	 * potentially sleeping l2cap_chan_lock() function.
8119 	 */
8120 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8121 	while (pchan) {
8122 		struct l2cap_chan *chan, *next;
8123 
8124 		/* Client fixed channels should override server ones */
8125 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8126 			goto next;
8127 
8128 		l2cap_chan_lock(pchan);
8129 		chan = pchan->ops->new_connection(pchan);
8130 		if (chan) {
8131 			bacpy(&chan->src, &hcon->src);
8132 			bacpy(&chan->dst, &hcon->dst);
8133 			chan->src_type = bdaddr_src_type(hcon);
8134 			chan->dst_type = dst_type;
8135 
8136 			__l2cap_chan_add(conn, chan);
8137 		}
8138 
8139 		l2cap_chan_unlock(pchan);
8140 next:
8141 		next = l2cap_global_fixed_chan(pchan, hcon);
8142 		l2cap_chan_put(pchan);
8143 		pchan = next;
8144 	}
8145 
8146 	l2cap_conn_ready(conn);
8147 }
8148 
8149 int l2cap_disconn_ind(struct hci_conn *hcon)
8150 {
8151 	struct l2cap_conn *conn = hcon->l2cap_data;
8152 
8153 	BT_DBG("hcon %p", hcon);
8154 
8155 	if (!conn)
8156 		return HCI_ERROR_REMOTE_USER_TERM;
8157 	return conn->disc_reason;
8158 }
8159 
8160 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8161 {
8162 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8163 		return;
8164 
8165 	BT_DBG("hcon %p reason %d", hcon, reason);
8166 
8167 	l2cap_conn_del(hcon, bt_to_errno(reason));
8168 }
8169 
8170 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8171 {
8172 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8173 		return;
8174 
8175 	if (encrypt == 0x00) {
8176 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8177 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8178 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8179 			   chan->sec_level == BT_SECURITY_FIPS)
8180 			l2cap_chan_close(chan, ECONNREFUSED);
8181 	} else {
8182 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8183 			__clear_chan_timer(chan);
8184 	}
8185 }
8186 
8187 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8188 {
8189 	struct l2cap_conn *conn = hcon->l2cap_data;
8190 	struct l2cap_chan *chan;
8191 
8192 	if (!conn)
8193 		return;
8194 
8195 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8196 
8197 	mutex_lock(&conn->chan_lock);
8198 
8199 	list_for_each_entry(chan, &conn->chan_l, list) {
8200 		l2cap_chan_lock(chan);
8201 
8202 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8203 		       state_to_string(chan->state));
8204 
8205 		if (chan->scid == L2CAP_CID_A2MP) {
8206 			l2cap_chan_unlock(chan);
8207 			continue;
8208 		}
8209 
8210 		if (!status && encrypt)
8211 			chan->sec_level = hcon->sec_level;
8212 
8213 		if (!__l2cap_no_conn_pending(chan)) {
8214 			l2cap_chan_unlock(chan);
8215 			continue;
8216 		}
8217 
8218 		if (!status && (chan->state == BT_CONNECTED ||
8219 				chan->state == BT_CONFIG)) {
8220 			chan->ops->resume(chan);
8221 			l2cap_check_encryption(chan, encrypt);
8222 			l2cap_chan_unlock(chan);
8223 			continue;
8224 		}
8225 
8226 		if (chan->state == BT_CONNECT) {
8227 			if (!status && l2cap_check_enc_key_size(hcon))
8228 				l2cap_start_connection(chan);
8229 			else
8230 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8231 		} else if (chan->state == BT_CONNECT2 &&
8232 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8233 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8234 			struct l2cap_conn_rsp rsp;
8235 			__u16 res, stat;
8236 
8237 			if (!status && l2cap_check_enc_key_size(hcon)) {
8238 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8239 					res = L2CAP_CR_PEND;
8240 					stat = L2CAP_CS_AUTHOR_PEND;
8241 					chan->ops->defer(chan);
8242 				} else {
8243 					l2cap_state_change(chan, BT_CONFIG);
8244 					res = L2CAP_CR_SUCCESS;
8245 					stat = L2CAP_CS_NO_INFO;
8246 				}
8247 			} else {
8248 				l2cap_state_change(chan, BT_DISCONN);
8249 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8250 				res = L2CAP_CR_SEC_BLOCK;
8251 				stat = L2CAP_CS_NO_INFO;
8252 			}
8253 
8254 			rsp.scid   = cpu_to_le16(chan->dcid);
8255 			rsp.dcid   = cpu_to_le16(chan->scid);
8256 			rsp.result = cpu_to_le16(res);
8257 			rsp.status = cpu_to_le16(stat);
8258 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8259 				       sizeof(rsp), &rsp);
8260 
8261 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8262 			    res == L2CAP_CR_SUCCESS) {
8263 				char buf[128];
8264 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8265 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8266 					       L2CAP_CONF_REQ,
8267 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8268 					       buf);
8269 				chan->num_conf_req++;
8270 			}
8271 		}
8272 
8273 		l2cap_chan_unlock(chan);
8274 	}
8275 
8276 	mutex_unlock(&conn->chan_lock);
8277 }
8278 
8279 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8280 {
8281 	struct l2cap_conn *conn = hcon->l2cap_data;
8282 	struct l2cap_hdr *hdr;
8283 	int len;
8284 
8285 	/* For AMP controller do not create l2cap conn */
8286 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8287 		goto drop;
8288 
8289 	if (!conn)
8290 		conn = l2cap_conn_add(hcon);
8291 
8292 	if (!conn)
8293 		goto drop;
8294 
8295 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
8296 
8297 	switch (flags) {
8298 	case ACL_START:
8299 	case ACL_START_NO_FLUSH:
8300 	case ACL_COMPLETE:
8301 		if (conn->rx_len) {
8302 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8303 			kfree_skb(conn->rx_skb);
8304 			conn->rx_skb = NULL;
8305 			conn->rx_len = 0;
8306 			l2cap_conn_unreliable(conn, ECOMM);
8307 		}
8308 
8309 		/* Start fragment always begin with Basic L2CAP header */
8310 		if (skb->len < L2CAP_HDR_SIZE) {
8311 			BT_ERR("Frame is too short (len %d)", skb->len);
8312 			l2cap_conn_unreliable(conn, ECOMM);
8313 			goto drop;
8314 		}
8315 
8316 		hdr = (struct l2cap_hdr *) skb->data;
8317 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
8318 
8319 		if (len == skb->len) {
8320 			/* Complete frame received */
8321 			l2cap_recv_frame(conn, skb);
8322 			return;
8323 		}
8324 
8325 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
8326 
8327 		if (skb->len > len) {
8328 			BT_ERR("Frame is too long (len %d, expected len %d)",
8329 			       skb->len, len);
8330 			l2cap_conn_unreliable(conn, ECOMM);
8331 			goto drop;
8332 		}
8333 
8334 		/* Allocate skb for the complete frame (with header) */
8335 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8336 		if (!conn->rx_skb)
8337 			goto drop;
8338 
8339 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
8340 					  skb->len);
8341 		conn->rx_len = len - skb->len;
8342 		break;
8343 
8344 	case ACL_CONT:
8345 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
8346 
8347 		if (!conn->rx_len) {
8348 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8349 			l2cap_conn_unreliable(conn, ECOMM);
8350 			goto drop;
8351 		}
8352 
8353 		if (skb->len > conn->rx_len) {
8354 			BT_ERR("Fragment is too long (len %d, expected %d)",
8355 			       skb->len, conn->rx_len);
8356 			kfree_skb(conn->rx_skb);
8357 			conn->rx_skb = NULL;
8358 			conn->rx_len = 0;
8359 			l2cap_conn_unreliable(conn, ECOMM);
8360 			goto drop;
8361 		}
8362 
8363 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
8364 					  skb->len);
8365 		conn->rx_len -= skb->len;
8366 
8367 		if (!conn->rx_len) {
8368 			/* Complete frame received. l2cap_recv_frame
8369 			 * takes ownership of the skb so set the global
8370 			 * rx_skb pointer to NULL first.
8371 			 */
8372 			struct sk_buff *rx_skb = conn->rx_skb;
8373 			conn->rx_skb = NULL;
8374 			l2cap_recv_frame(conn, rx_skb);
8375 		}
8376 		break;
8377 	}
8378 
8379 drop:
8380 	kfree_skb(skb);
8381 }
8382 
8383 static struct hci_cb l2cap_cb = {
8384 	.name		= "L2CAP",
8385 	.connect_cfm	= l2cap_connect_cfm,
8386 	.disconn_cfm	= l2cap_disconn_cfm,
8387 	.security_cfm	= l2cap_security_cfm,
8388 };
8389 
8390 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8391 {
8392 	struct l2cap_chan *c;
8393 
8394 	read_lock(&chan_list_lock);
8395 
8396 	list_for_each_entry(c, &chan_list, global_l) {
8397 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8398 			   &c->src, c->src_type, &c->dst, c->dst_type,
8399 			   c->state, __le16_to_cpu(c->psm),
8400 			   c->scid, c->dcid, c->imtu, c->omtu,
8401 			   c->sec_level, c->mode);
8402 	}
8403 
8404 	read_unlock(&chan_list_lock);
8405 
8406 	return 0;
8407 }
8408 
8409 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8410 
8411 static struct dentry *l2cap_debugfs;
8412 
8413 int __init l2cap_init(void)
8414 {
8415 	int err;
8416 
8417 	err = l2cap_init_sockets();
8418 	if (err < 0)
8419 		return err;
8420 
8421 	hci_register_cb(&l2cap_cb);
8422 
8423 	if (IS_ERR_OR_NULL(bt_debugfs))
8424 		return 0;
8425 
8426 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8427 					    NULL, &l2cap_debugfs_fops);
8428 
8429 	return 0;
8430 }
8431 
8432 void l2cap_exit(void)
8433 {
8434 	debugfs_remove(l2cap_debugfs);
8435 	hci_unregister_cb(&l2cap_cb);
8436 	l2cap_cleanup_sockets();
8437 }
8438 
8439 module_param(disable_ertm, bool, 0644);
8440 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8441 
8442 module_param(enable_ecred, bool, 0644);
8443 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8444