xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision f59a3ee6)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred;
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 static void l2cap_retrans_timeout(struct work_struct *work);
65 static void l2cap_monitor_timeout(struct work_struct *work);
66 static void l2cap_ack_timeout(struct work_struct *work);
67 
68 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
69 {
70 	if (link_type == LE_LINK) {
71 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
72 			return BDADDR_LE_PUBLIC;
73 		else
74 			return BDADDR_LE_RANDOM;
75 	}
76 
77 	return BDADDR_BREDR;
78 }
79 
80 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
81 {
82 	return bdaddr_type(hcon->type, hcon->src_type);
83 }
84 
85 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
86 {
87 	return bdaddr_type(hcon->type, hcon->dst_type);
88 }
89 
90 /* ---- L2CAP channels ---- */
91 
92 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
93 						   u16 cid)
94 {
95 	struct l2cap_chan *c;
96 
97 	list_for_each_entry(c, &conn->chan_l, list) {
98 		if (c->dcid == cid)
99 			return c;
100 	}
101 	return NULL;
102 }
103 
104 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 						   u16 cid)
106 {
107 	struct l2cap_chan *c;
108 
109 	list_for_each_entry(c, &conn->chan_l, list) {
110 		if (c->scid == cid)
111 			return c;
112 	}
113 	return NULL;
114 }
115 
116 /* Find channel with given SCID.
117  * Returns a reference locked channel.
118  */
119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
120 						 u16 cid)
121 {
122 	struct l2cap_chan *c;
123 
124 	mutex_lock(&conn->chan_lock);
125 	c = __l2cap_get_chan_by_scid(conn, cid);
126 	if (c) {
127 		/* Only lock if chan reference is not 0 */
128 		c = l2cap_chan_hold_unless_zero(c);
129 		if (c)
130 			l2cap_chan_lock(c);
131 	}
132 	mutex_unlock(&conn->chan_lock);
133 
134 	return c;
135 }
136 
137 /* Find channel with given DCID.
138  * Returns a reference locked channel.
139  */
140 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
141 						 u16 cid)
142 {
143 	struct l2cap_chan *c;
144 
145 	mutex_lock(&conn->chan_lock);
146 	c = __l2cap_get_chan_by_dcid(conn, cid);
147 	if (c) {
148 		/* Only lock if chan reference is not 0 */
149 		c = l2cap_chan_hold_unless_zero(c);
150 		if (c)
151 			l2cap_chan_lock(c);
152 	}
153 	mutex_unlock(&conn->chan_lock);
154 
155 	return c;
156 }
157 
158 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
159 						    u8 ident)
160 {
161 	struct l2cap_chan *c;
162 
163 	list_for_each_entry(c, &conn->chan_l, list) {
164 		if (c->ident == ident)
165 			return c;
166 	}
167 	return NULL;
168 }
169 
170 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
171 						  u8 ident)
172 {
173 	struct l2cap_chan *c;
174 
175 	mutex_lock(&conn->chan_lock);
176 	c = __l2cap_get_chan_by_ident(conn, ident);
177 	if (c) {
178 		/* Only lock if chan reference is not 0 */
179 		c = l2cap_chan_hold_unless_zero(c);
180 		if (c)
181 			l2cap_chan_lock(c);
182 	}
183 	mutex_unlock(&conn->chan_lock);
184 
185 	return c;
186 }
187 
188 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
189 						      u8 src_type)
190 {
191 	struct l2cap_chan *c;
192 
193 	list_for_each_entry(c, &chan_list, global_l) {
194 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
195 			continue;
196 
197 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
198 			continue;
199 
200 		if (c->sport == psm && !bacmp(&c->src, src))
201 			return c;
202 	}
203 	return NULL;
204 }
205 
206 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
207 {
208 	int err;
209 
210 	write_lock(&chan_list_lock);
211 
212 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
213 		err = -EADDRINUSE;
214 		goto done;
215 	}
216 
217 	if (psm) {
218 		chan->psm = psm;
219 		chan->sport = psm;
220 		err = 0;
221 	} else {
222 		u16 p, start, end, incr;
223 
224 		if (chan->src_type == BDADDR_BREDR) {
225 			start = L2CAP_PSM_DYN_START;
226 			end = L2CAP_PSM_AUTO_END;
227 			incr = 2;
228 		} else {
229 			start = L2CAP_PSM_LE_DYN_START;
230 			end = L2CAP_PSM_LE_DYN_END;
231 			incr = 1;
232 		}
233 
234 		err = -EINVAL;
235 		for (p = start; p <= end; p += incr)
236 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
237 							 chan->src_type)) {
238 				chan->psm   = cpu_to_le16(p);
239 				chan->sport = cpu_to_le16(p);
240 				err = 0;
241 				break;
242 			}
243 	}
244 
245 done:
246 	write_unlock(&chan_list_lock);
247 	return err;
248 }
249 EXPORT_SYMBOL_GPL(l2cap_add_psm);
250 
251 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
252 {
253 	write_lock(&chan_list_lock);
254 
255 	/* Override the defaults (which are for conn-oriented) */
256 	chan->omtu = L2CAP_DEFAULT_MTU;
257 	chan->chan_type = L2CAP_CHAN_FIXED;
258 
259 	chan->scid = scid;
260 
261 	write_unlock(&chan_list_lock);
262 
263 	return 0;
264 }
265 
266 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
267 {
268 	u16 cid, dyn_end;
269 
270 	if (conn->hcon->type == LE_LINK)
271 		dyn_end = L2CAP_CID_LE_DYN_END;
272 	else
273 		dyn_end = L2CAP_CID_DYN_END;
274 
275 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
276 		if (!__l2cap_get_chan_by_scid(conn, cid))
277 			return cid;
278 	}
279 
280 	return 0;
281 }
282 
283 static void l2cap_state_change(struct l2cap_chan *chan, int state)
284 {
285 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
286 	       state_to_string(state));
287 
288 	chan->state = state;
289 	chan->ops->state_change(chan, state, 0);
290 }
291 
292 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
293 						int state, int err)
294 {
295 	chan->state = state;
296 	chan->ops->state_change(chan, chan->state, err);
297 }
298 
299 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
300 {
301 	chan->ops->state_change(chan, chan->state, err);
302 }
303 
304 static void __set_retrans_timer(struct l2cap_chan *chan)
305 {
306 	if (!delayed_work_pending(&chan->monitor_timer) &&
307 	    chan->retrans_timeout) {
308 		l2cap_set_timer(chan, &chan->retrans_timer,
309 				msecs_to_jiffies(chan->retrans_timeout));
310 	}
311 }
312 
313 static void __set_monitor_timer(struct l2cap_chan *chan)
314 {
315 	__clear_retrans_timer(chan);
316 	if (chan->monitor_timeout) {
317 		l2cap_set_timer(chan, &chan->monitor_timer,
318 				msecs_to_jiffies(chan->monitor_timeout));
319 	}
320 }
321 
322 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
323 					       u16 seq)
324 {
325 	struct sk_buff *skb;
326 
327 	skb_queue_walk(head, skb) {
328 		if (bt_cb(skb)->l2cap.txseq == seq)
329 			return skb;
330 	}
331 
332 	return NULL;
333 }
334 
335 /* ---- L2CAP sequence number lists ---- */
336 
337 /* For ERTM, ordered lists of sequence numbers must be tracked for
338  * SREJ requests that are received and for frames that are to be
339  * retransmitted. These seq_list functions implement a singly-linked
340  * list in an array, where membership in the list can also be checked
341  * in constant time. Items can also be added to the tail of the list
342  * and removed from the head in constant time, without further memory
343  * allocs or frees.
344  */
345 
346 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
347 {
348 	size_t alloc_size, i;
349 
350 	/* Allocated size is a power of 2 to map sequence numbers
351 	 * (which may be up to 14 bits) in to a smaller array that is
352 	 * sized for the negotiated ERTM transmit windows.
353 	 */
354 	alloc_size = roundup_pow_of_two(size);
355 
356 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
357 	if (!seq_list->list)
358 		return -ENOMEM;
359 
360 	seq_list->mask = alloc_size - 1;
361 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 	for (i = 0; i < alloc_size; i++)
364 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
365 
366 	return 0;
367 }
368 
369 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
370 {
371 	kfree(seq_list->list);
372 }
373 
374 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
375 					   u16 seq)
376 {
377 	/* Constant-time check for list membership */
378 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
379 }
380 
381 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
382 {
383 	u16 seq = seq_list->head;
384 	u16 mask = seq_list->mask;
385 
386 	seq_list->head = seq_list->list[seq & mask];
387 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
388 
389 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
390 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
392 	}
393 
394 	return seq;
395 }
396 
397 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
398 {
399 	u16 i;
400 
401 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
402 		return;
403 
404 	for (i = 0; i <= seq_list->mask; i++)
405 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
406 
407 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
408 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
409 }
410 
411 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
412 {
413 	u16 mask = seq_list->mask;
414 
415 	/* All appends happen in constant time */
416 
417 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
418 		return;
419 
420 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
421 		seq_list->head = seq;
422 	else
423 		seq_list->list[seq_list->tail & mask] = seq;
424 
425 	seq_list->tail = seq;
426 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
427 }
428 
429 static void l2cap_chan_timeout(struct work_struct *work)
430 {
431 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
432 					       chan_timer.work);
433 	struct l2cap_conn *conn = chan->conn;
434 	int reason;
435 
436 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
437 
438 	mutex_lock(&conn->chan_lock);
439 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
440 	 * this work. No need to call l2cap_chan_hold(chan) here again.
441 	 */
442 	l2cap_chan_lock(chan);
443 
444 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
445 		reason = ECONNREFUSED;
446 	else if (chan->state == BT_CONNECT &&
447 		 chan->sec_level != BT_SECURITY_SDP)
448 		reason = ECONNREFUSED;
449 	else
450 		reason = ETIMEDOUT;
451 
452 	l2cap_chan_close(chan, reason);
453 
454 	chan->ops->close(chan);
455 
456 	l2cap_chan_unlock(chan);
457 	l2cap_chan_put(chan);
458 
459 	mutex_unlock(&conn->chan_lock);
460 }
461 
462 struct l2cap_chan *l2cap_chan_create(void)
463 {
464 	struct l2cap_chan *chan;
465 
466 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
467 	if (!chan)
468 		return NULL;
469 
470 	skb_queue_head_init(&chan->tx_q);
471 	skb_queue_head_init(&chan->srej_q);
472 	mutex_init(&chan->lock);
473 
474 	/* Set default lock nesting level */
475 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
476 
477 	write_lock(&chan_list_lock);
478 	list_add(&chan->global_l, &chan_list);
479 	write_unlock(&chan_list_lock);
480 
481 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
482 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
483 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
484 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
485 
486 	chan->state = BT_OPEN;
487 
488 	kref_init(&chan->kref);
489 
490 	/* This flag is cleared in l2cap_chan_ready() */
491 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
492 
493 	BT_DBG("chan %p", chan);
494 
495 	return chan;
496 }
497 EXPORT_SYMBOL_GPL(l2cap_chan_create);
498 
499 static void l2cap_chan_destroy(struct kref *kref)
500 {
501 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
502 
503 	BT_DBG("chan %p", chan);
504 
505 	write_lock(&chan_list_lock);
506 	list_del(&chan->global_l);
507 	write_unlock(&chan_list_lock);
508 
509 	kfree(chan);
510 }
511 
512 void l2cap_chan_hold(struct l2cap_chan *c)
513 {
514 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
515 
516 	kref_get(&c->kref);
517 }
518 
519 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
520 {
521 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
522 
523 	if (!kref_get_unless_zero(&c->kref))
524 		return NULL;
525 
526 	return c;
527 }
528 
529 void l2cap_chan_put(struct l2cap_chan *c)
530 {
531 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
532 
533 	kref_put(&c->kref, l2cap_chan_destroy);
534 }
535 EXPORT_SYMBOL_GPL(l2cap_chan_put);
536 
537 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
538 {
539 	chan->fcs  = L2CAP_FCS_CRC16;
540 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
541 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
542 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
543 	chan->remote_max_tx = chan->max_tx;
544 	chan->remote_tx_win = chan->tx_win;
545 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
546 	chan->sec_level = BT_SECURITY_LOW;
547 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
548 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
549 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
550 
551 	chan->conf_state = 0;
552 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
553 
554 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
555 }
556 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
557 
558 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
559 {
560 	chan->sdu = NULL;
561 	chan->sdu_last_frag = NULL;
562 	chan->sdu_len = 0;
563 	chan->tx_credits = tx_credits;
564 	/* Derive MPS from connection MTU to stop HCI fragmentation */
565 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
566 	/* Give enough credits for a full packet */
567 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
568 
569 	skb_queue_head_init(&chan->tx_q);
570 }
571 
572 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
573 {
574 	l2cap_le_flowctl_init(chan, tx_credits);
575 
576 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
577 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
578 		chan->mps = L2CAP_ECRED_MIN_MPS;
579 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
580 	}
581 }
582 
583 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
584 {
585 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
586 	       __le16_to_cpu(chan->psm), chan->dcid);
587 
588 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
589 
590 	chan->conn = conn;
591 
592 	switch (chan->chan_type) {
593 	case L2CAP_CHAN_CONN_ORIENTED:
594 		/* Alloc CID for connection-oriented socket */
595 		chan->scid = l2cap_alloc_cid(conn);
596 		if (conn->hcon->type == ACL_LINK)
597 			chan->omtu = L2CAP_DEFAULT_MTU;
598 		break;
599 
600 	case L2CAP_CHAN_CONN_LESS:
601 		/* Connectionless socket */
602 		chan->scid = L2CAP_CID_CONN_LESS;
603 		chan->dcid = L2CAP_CID_CONN_LESS;
604 		chan->omtu = L2CAP_DEFAULT_MTU;
605 		break;
606 
607 	case L2CAP_CHAN_FIXED:
608 		/* Caller will set CID and CID specific MTU values */
609 		break;
610 
611 	default:
612 		/* Raw socket can send/recv signalling messages only */
613 		chan->scid = L2CAP_CID_SIGNALING;
614 		chan->dcid = L2CAP_CID_SIGNALING;
615 		chan->omtu = L2CAP_DEFAULT_MTU;
616 	}
617 
618 	chan->local_id		= L2CAP_BESTEFFORT_ID;
619 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
620 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
621 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
622 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
623 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
624 
625 	l2cap_chan_hold(chan);
626 
627 	/* Only keep a reference for fixed channels if they requested it */
628 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
629 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
630 		hci_conn_hold(conn->hcon);
631 
632 	list_add(&chan->list, &conn->chan_l);
633 }
634 
635 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
636 {
637 	mutex_lock(&conn->chan_lock);
638 	__l2cap_chan_add(conn, chan);
639 	mutex_unlock(&conn->chan_lock);
640 }
641 
642 void l2cap_chan_del(struct l2cap_chan *chan, int err)
643 {
644 	struct l2cap_conn *conn = chan->conn;
645 
646 	__clear_chan_timer(chan);
647 
648 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
649 	       state_to_string(chan->state));
650 
651 	chan->ops->teardown(chan, err);
652 
653 	if (conn) {
654 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
655 		/* Delete from channel list */
656 		list_del(&chan->list);
657 
658 		l2cap_chan_put(chan);
659 
660 		chan->conn = NULL;
661 
662 		/* Reference was only held for non-fixed channels or
663 		 * fixed channels that explicitly requested it using the
664 		 * FLAG_HOLD_HCI_CONN flag.
665 		 */
666 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
667 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
668 			hci_conn_drop(conn->hcon);
669 
670 		if (mgr && mgr->bredr_chan == chan)
671 			mgr->bredr_chan = NULL;
672 	}
673 
674 	if (chan->hs_hchan) {
675 		struct hci_chan *hs_hchan = chan->hs_hchan;
676 
677 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
678 		amp_disconnect_logical_link(hs_hchan);
679 	}
680 
681 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
682 		return;
683 
684 	switch (chan->mode) {
685 	case L2CAP_MODE_BASIC:
686 		break;
687 
688 	case L2CAP_MODE_LE_FLOWCTL:
689 	case L2CAP_MODE_EXT_FLOWCTL:
690 		skb_queue_purge(&chan->tx_q);
691 		break;
692 
693 	case L2CAP_MODE_ERTM:
694 		__clear_retrans_timer(chan);
695 		__clear_monitor_timer(chan);
696 		__clear_ack_timer(chan);
697 
698 		skb_queue_purge(&chan->srej_q);
699 
700 		l2cap_seq_list_free(&chan->srej_list);
701 		l2cap_seq_list_free(&chan->retrans_list);
702 		fallthrough;
703 
704 	case L2CAP_MODE_STREAMING:
705 		skb_queue_purge(&chan->tx_q);
706 		break;
707 	}
708 }
709 EXPORT_SYMBOL_GPL(l2cap_chan_del);
710 
711 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
712 			      void *data)
713 {
714 	struct l2cap_chan *chan;
715 
716 	list_for_each_entry(chan, &conn->chan_l, list) {
717 		func(chan, data);
718 	}
719 }
720 
721 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
722 		     void *data)
723 {
724 	if (!conn)
725 		return;
726 
727 	mutex_lock(&conn->chan_lock);
728 	__l2cap_chan_list(conn, func, data);
729 	mutex_unlock(&conn->chan_lock);
730 }
731 
732 EXPORT_SYMBOL_GPL(l2cap_chan_list);
733 
734 static void l2cap_conn_update_id_addr(struct work_struct *work)
735 {
736 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
737 					       id_addr_update_work);
738 	struct hci_conn *hcon = conn->hcon;
739 	struct l2cap_chan *chan;
740 
741 	mutex_lock(&conn->chan_lock);
742 
743 	list_for_each_entry(chan, &conn->chan_l, list) {
744 		l2cap_chan_lock(chan);
745 		bacpy(&chan->dst, &hcon->dst);
746 		chan->dst_type = bdaddr_dst_type(hcon);
747 		l2cap_chan_unlock(chan);
748 	}
749 
750 	mutex_unlock(&conn->chan_lock);
751 }
752 
753 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
754 {
755 	struct l2cap_conn *conn = chan->conn;
756 	struct l2cap_le_conn_rsp rsp;
757 	u16 result;
758 
759 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
760 		result = L2CAP_CR_LE_AUTHORIZATION;
761 	else
762 		result = L2CAP_CR_LE_BAD_PSM;
763 
764 	l2cap_state_change(chan, BT_DISCONN);
765 
766 	rsp.dcid    = cpu_to_le16(chan->scid);
767 	rsp.mtu     = cpu_to_le16(chan->imtu);
768 	rsp.mps     = cpu_to_le16(chan->mps);
769 	rsp.credits = cpu_to_le16(chan->rx_credits);
770 	rsp.result  = cpu_to_le16(result);
771 
772 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
773 		       &rsp);
774 }
775 
776 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
777 {
778 	struct l2cap_conn *conn = chan->conn;
779 	struct l2cap_ecred_conn_rsp rsp;
780 	u16 result;
781 
782 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
783 		result = L2CAP_CR_LE_AUTHORIZATION;
784 	else
785 		result = L2CAP_CR_LE_BAD_PSM;
786 
787 	l2cap_state_change(chan, BT_DISCONN);
788 
789 	memset(&rsp, 0, sizeof(rsp));
790 
791 	rsp.result  = cpu_to_le16(result);
792 
793 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
794 		       &rsp);
795 }
796 
797 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
798 {
799 	struct l2cap_conn *conn = chan->conn;
800 	struct l2cap_conn_rsp rsp;
801 	u16 result;
802 
803 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
804 		result = L2CAP_CR_SEC_BLOCK;
805 	else
806 		result = L2CAP_CR_BAD_PSM;
807 
808 	l2cap_state_change(chan, BT_DISCONN);
809 
810 	rsp.scid   = cpu_to_le16(chan->dcid);
811 	rsp.dcid   = cpu_to_le16(chan->scid);
812 	rsp.result = cpu_to_le16(result);
813 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
814 
815 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
816 }
817 
818 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
819 {
820 	struct l2cap_conn *conn = chan->conn;
821 
822 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
823 
824 	switch (chan->state) {
825 	case BT_LISTEN:
826 		chan->ops->teardown(chan, 0);
827 		break;
828 
829 	case BT_CONNECTED:
830 	case BT_CONFIG:
831 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
832 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
833 			l2cap_send_disconn_req(chan, reason);
834 		} else
835 			l2cap_chan_del(chan, reason);
836 		break;
837 
838 	case BT_CONNECT2:
839 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
840 			if (conn->hcon->type == ACL_LINK)
841 				l2cap_chan_connect_reject(chan);
842 			else if (conn->hcon->type == LE_LINK) {
843 				switch (chan->mode) {
844 				case L2CAP_MODE_LE_FLOWCTL:
845 					l2cap_chan_le_connect_reject(chan);
846 					break;
847 				case L2CAP_MODE_EXT_FLOWCTL:
848 					l2cap_chan_ecred_connect_reject(chan);
849 					break;
850 				}
851 			}
852 		}
853 
854 		l2cap_chan_del(chan, reason);
855 		break;
856 
857 	case BT_CONNECT:
858 	case BT_DISCONN:
859 		l2cap_chan_del(chan, reason);
860 		break;
861 
862 	default:
863 		chan->ops->teardown(chan, 0);
864 		break;
865 	}
866 }
867 EXPORT_SYMBOL(l2cap_chan_close);
868 
869 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
870 {
871 	switch (chan->chan_type) {
872 	case L2CAP_CHAN_RAW:
873 		switch (chan->sec_level) {
874 		case BT_SECURITY_HIGH:
875 		case BT_SECURITY_FIPS:
876 			return HCI_AT_DEDICATED_BONDING_MITM;
877 		case BT_SECURITY_MEDIUM:
878 			return HCI_AT_DEDICATED_BONDING;
879 		default:
880 			return HCI_AT_NO_BONDING;
881 		}
882 		break;
883 	case L2CAP_CHAN_CONN_LESS:
884 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
885 			if (chan->sec_level == BT_SECURITY_LOW)
886 				chan->sec_level = BT_SECURITY_SDP;
887 		}
888 		if (chan->sec_level == BT_SECURITY_HIGH ||
889 		    chan->sec_level == BT_SECURITY_FIPS)
890 			return HCI_AT_NO_BONDING_MITM;
891 		else
892 			return HCI_AT_NO_BONDING;
893 		break;
894 	case L2CAP_CHAN_CONN_ORIENTED:
895 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
896 			if (chan->sec_level == BT_SECURITY_LOW)
897 				chan->sec_level = BT_SECURITY_SDP;
898 
899 			if (chan->sec_level == BT_SECURITY_HIGH ||
900 			    chan->sec_level == BT_SECURITY_FIPS)
901 				return HCI_AT_NO_BONDING_MITM;
902 			else
903 				return HCI_AT_NO_BONDING;
904 		}
905 		fallthrough;
906 
907 	default:
908 		switch (chan->sec_level) {
909 		case BT_SECURITY_HIGH:
910 		case BT_SECURITY_FIPS:
911 			return HCI_AT_GENERAL_BONDING_MITM;
912 		case BT_SECURITY_MEDIUM:
913 			return HCI_AT_GENERAL_BONDING;
914 		default:
915 			return HCI_AT_NO_BONDING;
916 		}
917 		break;
918 	}
919 }
920 
921 /* Service level security */
922 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
923 {
924 	struct l2cap_conn *conn = chan->conn;
925 	__u8 auth_type;
926 
927 	if (conn->hcon->type == LE_LINK)
928 		return smp_conn_security(conn->hcon, chan->sec_level);
929 
930 	auth_type = l2cap_get_auth_type(chan);
931 
932 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
933 				 initiator);
934 }
935 
936 static u8 l2cap_get_ident(struct l2cap_conn *conn)
937 {
938 	u8 id;
939 
940 	/* Get next available identificator.
941 	 *    1 - 128 are used by kernel.
942 	 *  129 - 199 are reserved.
943 	 *  200 - 254 are used by utilities like l2ping, etc.
944 	 */
945 
946 	mutex_lock(&conn->ident_lock);
947 
948 	if (++conn->tx_ident > 128)
949 		conn->tx_ident = 1;
950 
951 	id = conn->tx_ident;
952 
953 	mutex_unlock(&conn->ident_lock);
954 
955 	return id;
956 }
957 
958 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
959 			   void *data)
960 {
961 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
962 	u8 flags;
963 
964 	BT_DBG("code 0x%2.2x", code);
965 
966 	if (!skb)
967 		return;
968 
969 	/* Use NO_FLUSH if supported or we have an LE link (which does
970 	 * not support auto-flushing packets) */
971 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
972 	    conn->hcon->type == LE_LINK)
973 		flags = ACL_START_NO_FLUSH;
974 	else
975 		flags = ACL_START;
976 
977 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
978 	skb->priority = HCI_PRIO_MAX;
979 
980 	hci_send_acl(conn->hchan, skb, flags);
981 }
982 
983 static bool __chan_is_moving(struct l2cap_chan *chan)
984 {
985 	return chan->move_state != L2CAP_MOVE_STABLE &&
986 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
987 }
988 
989 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
990 {
991 	struct hci_conn *hcon = chan->conn->hcon;
992 	u16 flags;
993 
994 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
995 	       skb->priority);
996 
997 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
998 		if (chan->hs_hchan)
999 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
1000 		else
1001 			kfree_skb(skb);
1002 
1003 		return;
1004 	}
1005 
1006 	/* Use NO_FLUSH for LE links (where this is the only option) or
1007 	 * if the BR/EDR link supports it and flushing has not been
1008 	 * explicitly requested (through FLAG_FLUSHABLE).
1009 	 */
1010 	if (hcon->type == LE_LINK ||
1011 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1012 	     lmp_no_flush_capable(hcon->hdev)))
1013 		flags = ACL_START_NO_FLUSH;
1014 	else
1015 		flags = ACL_START;
1016 
1017 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1018 	hci_send_acl(chan->conn->hchan, skb, flags);
1019 }
1020 
1021 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1022 {
1023 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1024 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1025 
1026 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1027 		/* S-Frame */
1028 		control->sframe = 1;
1029 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1030 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1031 
1032 		control->sar = 0;
1033 		control->txseq = 0;
1034 	} else {
1035 		/* I-Frame */
1036 		control->sframe = 0;
1037 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1038 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1039 
1040 		control->poll = 0;
1041 		control->super = 0;
1042 	}
1043 }
1044 
1045 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1046 {
1047 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1048 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1049 
1050 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1051 		/* S-Frame */
1052 		control->sframe = 1;
1053 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1054 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1055 
1056 		control->sar = 0;
1057 		control->txseq = 0;
1058 	} else {
1059 		/* I-Frame */
1060 		control->sframe = 0;
1061 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1062 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1063 
1064 		control->poll = 0;
1065 		control->super = 0;
1066 	}
1067 }
1068 
1069 static inline void __unpack_control(struct l2cap_chan *chan,
1070 				    struct sk_buff *skb)
1071 {
1072 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1073 		__unpack_extended_control(get_unaligned_le32(skb->data),
1074 					  &bt_cb(skb)->l2cap);
1075 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1076 	} else {
1077 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1078 					  &bt_cb(skb)->l2cap);
1079 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1080 	}
1081 }
1082 
1083 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1084 {
1085 	u32 packed;
1086 
1087 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1088 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1089 
1090 	if (control->sframe) {
1091 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1092 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1093 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1094 	} else {
1095 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1096 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1097 	}
1098 
1099 	return packed;
1100 }
1101 
1102 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1103 {
1104 	u16 packed;
1105 
1106 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1107 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1108 
1109 	if (control->sframe) {
1110 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1111 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1112 		packed |= L2CAP_CTRL_FRAME_TYPE;
1113 	} else {
1114 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1115 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1116 	}
1117 
1118 	return packed;
1119 }
1120 
1121 static inline void __pack_control(struct l2cap_chan *chan,
1122 				  struct l2cap_ctrl *control,
1123 				  struct sk_buff *skb)
1124 {
1125 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1126 		put_unaligned_le32(__pack_extended_control(control),
1127 				   skb->data + L2CAP_HDR_SIZE);
1128 	} else {
1129 		put_unaligned_le16(__pack_enhanced_control(control),
1130 				   skb->data + L2CAP_HDR_SIZE);
1131 	}
1132 }
1133 
1134 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1135 {
1136 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1137 		return L2CAP_EXT_HDR_SIZE;
1138 	else
1139 		return L2CAP_ENH_HDR_SIZE;
1140 }
1141 
1142 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1143 					       u32 control)
1144 {
1145 	struct sk_buff *skb;
1146 	struct l2cap_hdr *lh;
1147 	int hlen = __ertm_hdr_size(chan);
1148 
1149 	if (chan->fcs == L2CAP_FCS_CRC16)
1150 		hlen += L2CAP_FCS_SIZE;
1151 
1152 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1153 
1154 	if (!skb)
1155 		return ERR_PTR(-ENOMEM);
1156 
1157 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1158 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1159 	lh->cid = cpu_to_le16(chan->dcid);
1160 
1161 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1162 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1163 	else
1164 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1165 
1166 	if (chan->fcs == L2CAP_FCS_CRC16) {
1167 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1168 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1169 	}
1170 
1171 	skb->priority = HCI_PRIO_MAX;
1172 	return skb;
1173 }
1174 
1175 static void l2cap_send_sframe(struct l2cap_chan *chan,
1176 			      struct l2cap_ctrl *control)
1177 {
1178 	struct sk_buff *skb;
1179 	u32 control_field;
1180 
1181 	BT_DBG("chan %p, control %p", chan, control);
1182 
1183 	if (!control->sframe)
1184 		return;
1185 
1186 	if (__chan_is_moving(chan))
1187 		return;
1188 
1189 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1190 	    !control->poll)
1191 		control->final = 1;
1192 
1193 	if (control->super == L2CAP_SUPER_RR)
1194 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1195 	else if (control->super == L2CAP_SUPER_RNR)
1196 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1197 
1198 	if (control->super != L2CAP_SUPER_SREJ) {
1199 		chan->last_acked_seq = control->reqseq;
1200 		__clear_ack_timer(chan);
1201 	}
1202 
1203 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1204 	       control->final, control->poll, control->super);
1205 
1206 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1207 		control_field = __pack_extended_control(control);
1208 	else
1209 		control_field = __pack_enhanced_control(control);
1210 
1211 	skb = l2cap_create_sframe_pdu(chan, control_field);
1212 	if (!IS_ERR(skb))
1213 		l2cap_do_send(chan, skb);
1214 }
1215 
1216 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1217 {
1218 	struct l2cap_ctrl control;
1219 
1220 	BT_DBG("chan %p, poll %d", chan, poll);
1221 
1222 	memset(&control, 0, sizeof(control));
1223 	control.sframe = 1;
1224 	control.poll = poll;
1225 
1226 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1227 		control.super = L2CAP_SUPER_RNR;
1228 	else
1229 		control.super = L2CAP_SUPER_RR;
1230 
1231 	control.reqseq = chan->buffer_seq;
1232 	l2cap_send_sframe(chan, &control);
1233 }
1234 
1235 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1236 {
1237 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1238 		return true;
1239 
1240 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1241 }
1242 
1243 static bool __amp_capable(struct l2cap_chan *chan)
1244 {
1245 	struct l2cap_conn *conn = chan->conn;
1246 	struct hci_dev *hdev;
1247 	bool amp_available = false;
1248 
1249 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1250 		return false;
1251 
1252 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1253 		return false;
1254 
1255 	read_lock(&hci_dev_list_lock);
1256 	list_for_each_entry(hdev, &hci_dev_list, list) {
1257 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1258 		    test_bit(HCI_UP, &hdev->flags)) {
1259 			amp_available = true;
1260 			break;
1261 		}
1262 	}
1263 	read_unlock(&hci_dev_list_lock);
1264 
1265 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1266 		return amp_available;
1267 
1268 	return false;
1269 }
1270 
1271 static bool l2cap_check_efs(struct l2cap_chan *chan)
1272 {
1273 	/* Check EFS parameters */
1274 	return true;
1275 }
1276 
1277 void l2cap_send_conn_req(struct l2cap_chan *chan)
1278 {
1279 	struct l2cap_conn *conn = chan->conn;
1280 	struct l2cap_conn_req req;
1281 
1282 	req.scid = cpu_to_le16(chan->scid);
1283 	req.psm  = chan->psm;
1284 
1285 	chan->ident = l2cap_get_ident(conn);
1286 
1287 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1288 
1289 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1290 }
1291 
1292 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1293 {
1294 	struct l2cap_create_chan_req req;
1295 	req.scid = cpu_to_le16(chan->scid);
1296 	req.psm  = chan->psm;
1297 	req.amp_id = amp_id;
1298 
1299 	chan->ident = l2cap_get_ident(chan->conn);
1300 
1301 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1302 		       sizeof(req), &req);
1303 }
1304 
1305 static void l2cap_move_setup(struct l2cap_chan *chan)
1306 {
1307 	struct sk_buff *skb;
1308 
1309 	BT_DBG("chan %p", chan);
1310 
1311 	if (chan->mode != L2CAP_MODE_ERTM)
1312 		return;
1313 
1314 	__clear_retrans_timer(chan);
1315 	__clear_monitor_timer(chan);
1316 	__clear_ack_timer(chan);
1317 
1318 	chan->retry_count = 0;
1319 	skb_queue_walk(&chan->tx_q, skb) {
1320 		if (bt_cb(skb)->l2cap.retries)
1321 			bt_cb(skb)->l2cap.retries = 1;
1322 		else
1323 			break;
1324 	}
1325 
1326 	chan->expected_tx_seq = chan->buffer_seq;
1327 
1328 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1329 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1330 	l2cap_seq_list_clear(&chan->retrans_list);
1331 	l2cap_seq_list_clear(&chan->srej_list);
1332 	skb_queue_purge(&chan->srej_q);
1333 
1334 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1335 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1336 
1337 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1338 }
1339 
1340 static void l2cap_move_done(struct l2cap_chan *chan)
1341 {
1342 	u8 move_role = chan->move_role;
1343 	BT_DBG("chan %p", chan);
1344 
1345 	chan->move_state = L2CAP_MOVE_STABLE;
1346 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1347 
1348 	if (chan->mode != L2CAP_MODE_ERTM)
1349 		return;
1350 
1351 	switch (move_role) {
1352 	case L2CAP_MOVE_ROLE_INITIATOR:
1353 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1354 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1355 		break;
1356 	case L2CAP_MOVE_ROLE_RESPONDER:
1357 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1358 		break;
1359 	}
1360 }
1361 
1362 static void l2cap_chan_ready(struct l2cap_chan *chan)
1363 {
1364 	/* The channel may have already been flagged as connected in
1365 	 * case of receiving data before the L2CAP info req/rsp
1366 	 * procedure is complete.
1367 	 */
1368 	if (chan->state == BT_CONNECTED)
1369 		return;
1370 
1371 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1372 	chan->conf_state = 0;
1373 	__clear_chan_timer(chan);
1374 
1375 	switch (chan->mode) {
1376 	case L2CAP_MODE_LE_FLOWCTL:
1377 	case L2CAP_MODE_EXT_FLOWCTL:
1378 		if (!chan->tx_credits)
1379 			chan->ops->suspend(chan);
1380 		break;
1381 	}
1382 
1383 	chan->state = BT_CONNECTED;
1384 
1385 	chan->ops->ready(chan);
1386 }
1387 
1388 static void l2cap_le_connect(struct l2cap_chan *chan)
1389 {
1390 	struct l2cap_conn *conn = chan->conn;
1391 	struct l2cap_le_conn_req req;
1392 
1393 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1394 		return;
1395 
1396 	if (!chan->imtu)
1397 		chan->imtu = chan->conn->mtu;
1398 
1399 	l2cap_le_flowctl_init(chan, 0);
1400 
1401 	memset(&req, 0, sizeof(req));
1402 	req.psm     = chan->psm;
1403 	req.scid    = cpu_to_le16(chan->scid);
1404 	req.mtu     = cpu_to_le16(chan->imtu);
1405 	req.mps     = cpu_to_le16(chan->mps);
1406 	req.credits = cpu_to_le16(chan->rx_credits);
1407 
1408 	chan->ident = l2cap_get_ident(conn);
1409 
1410 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1411 		       sizeof(req), &req);
1412 }
1413 
1414 struct l2cap_ecred_conn_data {
1415 	struct {
1416 		struct l2cap_ecred_conn_req req;
1417 		__le16 scid[5];
1418 	} __packed pdu;
1419 	struct l2cap_chan *chan;
1420 	struct pid *pid;
1421 	int count;
1422 };
1423 
1424 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1425 {
1426 	struct l2cap_ecred_conn_data *conn = data;
1427 	struct pid *pid;
1428 
1429 	if (chan == conn->chan)
1430 		return;
1431 
1432 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1433 		return;
1434 
1435 	pid = chan->ops->get_peer_pid(chan);
1436 
1437 	/* Only add deferred channels with the same PID/PSM */
1438 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1439 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1440 		return;
1441 
1442 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1443 		return;
1444 
1445 	l2cap_ecred_init(chan, 0);
1446 
1447 	/* Set the same ident so we can match on the rsp */
1448 	chan->ident = conn->chan->ident;
1449 
1450 	/* Include all channels deferred */
1451 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1452 
1453 	conn->count++;
1454 }
1455 
1456 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1457 {
1458 	struct l2cap_conn *conn = chan->conn;
1459 	struct l2cap_ecred_conn_data data;
1460 
1461 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1462 		return;
1463 
1464 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1465 		return;
1466 
1467 	l2cap_ecred_init(chan, 0);
1468 
1469 	memset(&data, 0, sizeof(data));
1470 	data.pdu.req.psm     = chan->psm;
1471 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1472 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1473 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1474 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1475 
1476 	chan->ident = l2cap_get_ident(conn);
1477 
1478 	data.count = 1;
1479 	data.chan = chan;
1480 	data.pid = chan->ops->get_peer_pid(chan);
1481 
1482 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1483 
1484 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1485 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1486 		       &data.pdu);
1487 }
1488 
1489 static void l2cap_le_start(struct l2cap_chan *chan)
1490 {
1491 	struct l2cap_conn *conn = chan->conn;
1492 
1493 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1494 		return;
1495 
1496 	if (!chan->psm) {
1497 		l2cap_chan_ready(chan);
1498 		return;
1499 	}
1500 
1501 	if (chan->state == BT_CONNECT) {
1502 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1503 			l2cap_ecred_connect(chan);
1504 		else
1505 			l2cap_le_connect(chan);
1506 	}
1507 }
1508 
1509 static void l2cap_start_connection(struct l2cap_chan *chan)
1510 {
1511 	if (__amp_capable(chan)) {
1512 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1513 		a2mp_discover_amp(chan);
1514 	} else if (chan->conn->hcon->type == LE_LINK) {
1515 		l2cap_le_start(chan);
1516 	} else {
1517 		l2cap_send_conn_req(chan);
1518 	}
1519 }
1520 
1521 static void l2cap_request_info(struct l2cap_conn *conn)
1522 {
1523 	struct l2cap_info_req req;
1524 
1525 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1526 		return;
1527 
1528 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1529 
1530 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1531 	conn->info_ident = l2cap_get_ident(conn);
1532 
1533 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1534 
1535 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1536 		       sizeof(req), &req);
1537 }
1538 
1539 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1540 {
1541 	/* The minimum encryption key size needs to be enforced by the
1542 	 * host stack before establishing any L2CAP connections. The
1543 	 * specification in theory allows a minimum of 1, but to align
1544 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1545 	 *
1546 	 * This check might also be called for unencrypted connections
1547 	 * that have no key size requirements. Ensure that the link is
1548 	 * actually encrypted before enforcing a key size.
1549 	 */
1550 	int min_key_size = hcon->hdev->min_enc_key_size;
1551 
1552 	/* On FIPS security level, key size must be 16 bytes */
1553 	if (hcon->sec_level == BT_SECURITY_FIPS)
1554 		min_key_size = 16;
1555 
1556 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1557 		hcon->enc_key_size >= min_key_size);
1558 }
1559 
1560 static void l2cap_do_start(struct l2cap_chan *chan)
1561 {
1562 	struct l2cap_conn *conn = chan->conn;
1563 
1564 	if (conn->hcon->type == LE_LINK) {
1565 		l2cap_le_start(chan);
1566 		return;
1567 	}
1568 
1569 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1570 		l2cap_request_info(conn);
1571 		return;
1572 	}
1573 
1574 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1575 		return;
1576 
1577 	if (!l2cap_chan_check_security(chan, true) ||
1578 	    !__l2cap_no_conn_pending(chan))
1579 		return;
1580 
1581 	if (l2cap_check_enc_key_size(conn->hcon))
1582 		l2cap_start_connection(chan);
1583 	else
1584 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1585 }
1586 
1587 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1588 {
1589 	u32 local_feat_mask = l2cap_feat_mask;
1590 	if (!disable_ertm)
1591 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1592 
1593 	switch (mode) {
1594 	case L2CAP_MODE_ERTM:
1595 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1596 	case L2CAP_MODE_STREAMING:
1597 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1598 	default:
1599 		return 0x00;
1600 	}
1601 }
1602 
1603 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1604 {
1605 	struct l2cap_conn *conn = chan->conn;
1606 	struct l2cap_disconn_req req;
1607 
1608 	if (!conn)
1609 		return;
1610 
1611 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1612 		__clear_retrans_timer(chan);
1613 		__clear_monitor_timer(chan);
1614 		__clear_ack_timer(chan);
1615 	}
1616 
1617 	if (chan->scid == L2CAP_CID_A2MP) {
1618 		l2cap_state_change(chan, BT_DISCONN);
1619 		return;
1620 	}
1621 
1622 	req.dcid = cpu_to_le16(chan->dcid);
1623 	req.scid = cpu_to_le16(chan->scid);
1624 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1625 		       sizeof(req), &req);
1626 
1627 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1628 }
1629 
1630 /* ---- L2CAP connections ---- */
1631 static void l2cap_conn_start(struct l2cap_conn *conn)
1632 {
1633 	struct l2cap_chan *chan, *tmp;
1634 
1635 	BT_DBG("conn %p", conn);
1636 
1637 	mutex_lock(&conn->chan_lock);
1638 
1639 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1640 		l2cap_chan_lock(chan);
1641 
1642 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1643 			l2cap_chan_ready(chan);
1644 			l2cap_chan_unlock(chan);
1645 			continue;
1646 		}
1647 
1648 		if (chan->state == BT_CONNECT) {
1649 			if (!l2cap_chan_check_security(chan, true) ||
1650 			    !__l2cap_no_conn_pending(chan)) {
1651 				l2cap_chan_unlock(chan);
1652 				continue;
1653 			}
1654 
1655 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1656 			    && test_bit(CONF_STATE2_DEVICE,
1657 					&chan->conf_state)) {
1658 				l2cap_chan_close(chan, ECONNRESET);
1659 				l2cap_chan_unlock(chan);
1660 				continue;
1661 			}
1662 
1663 			if (l2cap_check_enc_key_size(conn->hcon))
1664 				l2cap_start_connection(chan);
1665 			else
1666 				l2cap_chan_close(chan, ECONNREFUSED);
1667 
1668 		} else if (chan->state == BT_CONNECT2) {
1669 			struct l2cap_conn_rsp rsp;
1670 			char buf[128];
1671 			rsp.scid = cpu_to_le16(chan->dcid);
1672 			rsp.dcid = cpu_to_le16(chan->scid);
1673 
1674 			if (l2cap_chan_check_security(chan, false)) {
1675 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1676 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1677 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1678 					chan->ops->defer(chan);
1679 
1680 				} else {
1681 					l2cap_state_change(chan, BT_CONFIG);
1682 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1683 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1684 				}
1685 			} else {
1686 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1687 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1688 			}
1689 
1690 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1691 				       sizeof(rsp), &rsp);
1692 
1693 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1694 			    rsp.result != L2CAP_CR_SUCCESS) {
1695 				l2cap_chan_unlock(chan);
1696 				continue;
1697 			}
1698 
1699 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1700 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1701 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1702 			chan->num_conf_req++;
1703 		}
1704 
1705 		l2cap_chan_unlock(chan);
1706 	}
1707 
1708 	mutex_unlock(&conn->chan_lock);
1709 }
1710 
1711 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1712 {
1713 	struct hci_conn *hcon = conn->hcon;
1714 	struct hci_dev *hdev = hcon->hdev;
1715 
1716 	BT_DBG("%s conn %p", hdev->name, conn);
1717 
1718 	/* For outgoing pairing which doesn't necessarily have an
1719 	 * associated socket (e.g. mgmt_pair_device).
1720 	 */
1721 	if (hcon->out)
1722 		smp_conn_security(hcon, hcon->pending_sec_level);
1723 
1724 	/* For LE peripheral connections, make sure the connection interval
1725 	 * is in the range of the minimum and maximum interval that has
1726 	 * been configured for this connection. If not, then trigger
1727 	 * the connection update procedure.
1728 	 */
1729 	if (hcon->role == HCI_ROLE_SLAVE &&
1730 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1731 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1732 		struct l2cap_conn_param_update_req req;
1733 
1734 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1735 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1736 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1737 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1738 
1739 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1740 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1741 	}
1742 }
1743 
1744 static void l2cap_conn_ready(struct l2cap_conn *conn)
1745 {
1746 	struct l2cap_chan *chan;
1747 	struct hci_conn *hcon = conn->hcon;
1748 
1749 	BT_DBG("conn %p", conn);
1750 
1751 	if (hcon->type == ACL_LINK)
1752 		l2cap_request_info(conn);
1753 
1754 	mutex_lock(&conn->chan_lock);
1755 
1756 	list_for_each_entry(chan, &conn->chan_l, list) {
1757 
1758 		l2cap_chan_lock(chan);
1759 
1760 		if (chan->scid == L2CAP_CID_A2MP) {
1761 			l2cap_chan_unlock(chan);
1762 			continue;
1763 		}
1764 
1765 		if (hcon->type == LE_LINK) {
1766 			l2cap_le_start(chan);
1767 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1768 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1769 				l2cap_chan_ready(chan);
1770 		} else if (chan->state == BT_CONNECT) {
1771 			l2cap_do_start(chan);
1772 		}
1773 
1774 		l2cap_chan_unlock(chan);
1775 	}
1776 
1777 	mutex_unlock(&conn->chan_lock);
1778 
1779 	if (hcon->type == LE_LINK)
1780 		l2cap_le_conn_ready(conn);
1781 
1782 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1783 }
1784 
1785 /* Notify sockets that we cannot guaranty reliability anymore */
1786 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1787 {
1788 	struct l2cap_chan *chan;
1789 
1790 	BT_DBG("conn %p", conn);
1791 
1792 	mutex_lock(&conn->chan_lock);
1793 
1794 	list_for_each_entry(chan, &conn->chan_l, list) {
1795 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1796 			l2cap_chan_set_err(chan, err);
1797 	}
1798 
1799 	mutex_unlock(&conn->chan_lock);
1800 }
1801 
1802 static void l2cap_info_timeout(struct work_struct *work)
1803 {
1804 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1805 					       info_timer.work);
1806 
1807 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1808 	conn->info_ident = 0;
1809 
1810 	l2cap_conn_start(conn);
1811 }
1812 
1813 /*
1814  * l2cap_user
1815  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1816  * callback is called during registration. The ->remove callback is called
1817  * during unregistration.
1818  * An l2cap_user object can either be explicitly unregistered or when the
1819  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1820  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1821  * External modules must own a reference to the l2cap_conn object if they intend
1822  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1823  * any time if they don't.
1824  */
1825 
1826 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1827 {
1828 	struct hci_dev *hdev = conn->hcon->hdev;
1829 	int ret;
1830 
1831 	/* We need to check whether l2cap_conn is registered. If it is not, we
1832 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1833 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1834 	 * relies on the parent hci_conn object to be locked. This itself relies
1835 	 * on the hci_dev object to be locked. So we must lock the hci device
1836 	 * here, too. */
1837 
1838 	hci_dev_lock(hdev);
1839 
1840 	if (!list_empty(&user->list)) {
1841 		ret = -EINVAL;
1842 		goto out_unlock;
1843 	}
1844 
1845 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1846 	if (!conn->hchan) {
1847 		ret = -ENODEV;
1848 		goto out_unlock;
1849 	}
1850 
1851 	ret = user->probe(conn, user);
1852 	if (ret)
1853 		goto out_unlock;
1854 
1855 	list_add(&user->list, &conn->users);
1856 	ret = 0;
1857 
1858 out_unlock:
1859 	hci_dev_unlock(hdev);
1860 	return ret;
1861 }
1862 EXPORT_SYMBOL(l2cap_register_user);
1863 
1864 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1865 {
1866 	struct hci_dev *hdev = conn->hcon->hdev;
1867 
1868 	hci_dev_lock(hdev);
1869 
1870 	if (list_empty(&user->list))
1871 		goto out_unlock;
1872 
1873 	list_del_init(&user->list);
1874 	user->remove(conn, user);
1875 
1876 out_unlock:
1877 	hci_dev_unlock(hdev);
1878 }
1879 EXPORT_SYMBOL(l2cap_unregister_user);
1880 
1881 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1882 {
1883 	struct l2cap_user *user;
1884 
1885 	while (!list_empty(&conn->users)) {
1886 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1887 		list_del_init(&user->list);
1888 		user->remove(conn, user);
1889 	}
1890 }
1891 
1892 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1893 {
1894 	struct l2cap_conn *conn = hcon->l2cap_data;
1895 	struct l2cap_chan *chan, *l;
1896 
1897 	if (!conn)
1898 		return;
1899 
1900 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1901 
1902 	kfree_skb(conn->rx_skb);
1903 
1904 	skb_queue_purge(&conn->pending_rx);
1905 
1906 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1907 	 * might block if we are running on a worker from the same workqueue
1908 	 * pending_rx_work is waiting on.
1909 	 */
1910 	if (work_pending(&conn->pending_rx_work))
1911 		cancel_work_sync(&conn->pending_rx_work);
1912 
1913 	if (work_pending(&conn->id_addr_update_work))
1914 		cancel_work_sync(&conn->id_addr_update_work);
1915 
1916 	l2cap_unregister_all_users(conn);
1917 
1918 	/* Force the connection to be immediately dropped */
1919 	hcon->disc_timeout = 0;
1920 
1921 	mutex_lock(&conn->chan_lock);
1922 
1923 	/* Kill channels */
1924 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1925 		l2cap_chan_hold(chan);
1926 		l2cap_chan_lock(chan);
1927 
1928 		l2cap_chan_del(chan, err);
1929 
1930 		chan->ops->close(chan);
1931 
1932 		l2cap_chan_unlock(chan);
1933 		l2cap_chan_put(chan);
1934 	}
1935 
1936 	mutex_unlock(&conn->chan_lock);
1937 
1938 	hci_chan_del(conn->hchan);
1939 
1940 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1941 		cancel_delayed_work_sync(&conn->info_timer);
1942 
1943 	hcon->l2cap_data = NULL;
1944 	conn->hchan = NULL;
1945 	l2cap_conn_put(conn);
1946 }
1947 
1948 static void l2cap_conn_free(struct kref *ref)
1949 {
1950 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1951 
1952 	hci_conn_put(conn->hcon);
1953 	kfree(conn);
1954 }
1955 
1956 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1957 {
1958 	kref_get(&conn->ref);
1959 	return conn;
1960 }
1961 EXPORT_SYMBOL(l2cap_conn_get);
1962 
1963 void l2cap_conn_put(struct l2cap_conn *conn)
1964 {
1965 	kref_put(&conn->ref, l2cap_conn_free);
1966 }
1967 EXPORT_SYMBOL(l2cap_conn_put);
1968 
1969 /* ---- Socket interface ---- */
1970 
1971 /* Find socket with psm and source / destination bdaddr.
1972  * Returns closest match.
1973  */
1974 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1975 						   bdaddr_t *src,
1976 						   bdaddr_t *dst,
1977 						   u8 link_type)
1978 {
1979 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1980 
1981 	read_lock(&chan_list_lock);
1982 
1983 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1984 		if (state && c->state != state)
1985 			continue;
1986 
1987 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1988 			continue;
1989 
1990 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1991 			continue;
1992 
1993 		if (c->psm == psm) {
1994 			int src_match, dst_match;
1995 			int src_any, dst_any;
1996 
1997 			/* Exact match. */
1998 			src_match = !bacmp(&c->src, src);
1999 			dst_match = !bacmp(&c->dst, dst);
2000 			if (src_match && dst_match) {
2001 				if (!l2cap_chan_hold_unless_zero(c))
2002 					continue;
2003 
2004 				read_unlock(&chan_list_lock);
2005 				return c;
2006 			}
2007 
2008 			/* Closest match */
2009 			src_any = !bacmp(&c->src, BDADDR_ANY);
2010 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
2011 			if ((src_match && dst_any) || (src_any && dst_match) ||
2012 			    (src_any && dst_any))
2013 				c1 = c;
2014 		}
2015 	}
2016 
2017 	if (c1)
2018 		c1 = l2cap_chan_hold_unless_zero(c1);
2019 
2020 	read_unlock(&chan_list_lock);
2021 
2022 	return c1;
2023 }
2024 
2025 static void l2cap_monitor_timeout(struct work_struct *work)
2026 {
2027 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2028 					       monitor_timer.work);
2029 
2030 	BT_DBG("chan %p", chan);
2031 
2032 	l2cap_chan_lock(chan);
2033 
2034 	if (!chan->conn) {
2035 		l2cap_chan_unlock(chan);
2036 		l2cap_chan_put(chan);
2037 		return;
2038 	}
2039 
2040 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2041 
2042 	l2cap_chan_unlock(chan);
2043 	l2cap_chan_put(chan);
2044 }
2045 
2046 static void l2cap_retrans_timeout(struct work_struct *work)
2047 {
2048 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2049 					       retrans_timer.work);
2050 
2051 	BT_DBG("chan %p", chan);
2052 
2053 	l2cap_chan_lock(chan);
2054 
2055 	if (!chan->conn) {
2056 		l2cap_chan_unlock(chan);
2057 		l2cap_chan_put(chan);
2058 		return;
2059 	}
2060 
2061 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2062 	l2cap_chan_unlock(chan);
2063 	l2cap_chan_put(chan);
2064 }
2065 
2066 static void l2cap_streaming_send(struct l2cap_chan *chan,
2067 				 struct sk_buff_head *skbs)
2068 {
2069 	struct sk_buff *skb;
2070 	struct l2cap_ctrl *control;
2071 
2072 	BT_DBG("chan %p, skbs %p", chan, skbs);
2073 
2074 	if (__chan_is_moving(chan))
2075 		return;
2076 
2077 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2078 
2079 	while (!skb_queue_empty(&chan->tx_q)) {
2080 
2081 		skb = skb_dequeue(&chan->tx_q);
2082 
2083 		bt_cb(skb)->l2cap.retries = 1;
2084 		control = &bt_cb(skb)->l2cap;
2085 
2086 		control->reqseq = 0;
2087 		control->txseq = chan->next_tx_seq;
2088 
2089 		__pack_control(chan, control, skb);
2090 
2091 		if (chan->fcs == L2CAP_FCS_CRC16) {
2092 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2093 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2094 		}
2095 
2096 		l2cap_do_send(chan, skb);
2097 
2098 		BT_DBG("Sent txseq %u", control->txseq);
2099 
2100 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2101 		chan->frames_sent++;
2102 	}
2103 }
2104 
2105 static int l2cap_ertm_send(struct l2cap_chan *chan)
2106 {
2107 	struct sk_buff *skb, *tx_skb;
2108 	struct l2cap_ctrl *control;
2109 	int sent = 0;
2110 
2111 	BT_DBG("chan %p", chan);
2112 
2113 	if (chan->state != BT_CONNECTED)
2114 		return -ENOTCONN;
2115 
2116 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2117 		return 0;
2118 
2119 	if (__chan_is_moving(chan))
2120 		return 0;
2121 
2122 	while (chan->tx_send_head &&
2123 	       chan->unacked_frames < chan->remote_tx_win &&
2124 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2125 
2126 		skb = chan->tx_send_head;
2127 
2128 		bt_cb(skb)->l2cap.retries = 1;
2129 		control = &bt_cb(skb)->l2cap;
2130 
2131 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2132 			control->final = 1;
2133 
2134 		control->reqseq = chan->buffer_seq;
2135 		chan->last_acked_seq = chan->buffer_seq;
2136 		control->txseq = chan->next_tx_seq;
2137 
2138 		__pack_control(chan, control, skb);
2139 
2140 		if (chan->fcs == L2CAP_FCS_CRC16) {
2141 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2142 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2143 		}
2144 
2145 		/* Clone after data has been modified. Data is assumed to be
2146 		   read-only (for locking purposes) on cloned sk_buffs.
2147 		 */
2148 		tx_skb = skb_clone(skb, GFP_KERNEL);
2149 
2150 		if (!tx_skb)
2151 			break;
2152 
2153 		__set_retrans_timer(chan);
2154 
2155 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2156 		chan->unacked_frames++;
2157 		chan->frames_sent++;
2158 		sent++;
2159 
2160 		if (skb_queue_is_last(&chan->tx_q, skb))
2161 			chan->tx_send_head = NULL;
2162 		else
2163 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2164 
2165 		l2cap_do_send(chan, tx_skb);
2166 		BT_DBG("Sent txseq %u", control->txseq);
2167 	}
2168 
2169 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2170 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2171 
2172 	return sent;
2173 }
2174 
2175 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2176 {
2177 	struct l2cap_ctrl control;
2178 	struct sk_buff *skb;
2179 	struct sk_buff *tx_skb;
2180 	u16 seq;
2181 
2182 	BT_DBG("chan %p", chan);
2183 
2184 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2185 		return;
2186 
2187 	if (__chan_is_moving(chan))
2188 		return;
2189 
2190 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2191 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2192 
2193 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2194 		if (!skb) {
2195 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2196 			       seq);
2197 			continue;
2198 		}
2199 
2200 		bt_cb(skb)->l2cap.retries++;
2201 		control = bt_cb(skb)->l2cap;
2202 
2203 		if (chan->max_tx != 0 &&
2204 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2205 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2206 			l2cap_send_disconn_req(chan, ECONNRESET);
2207 			l2cap_seq_list_clear(&chan->retrans_list);
2208 			break;
2209 		}
2210 
2211 		control.reqseq = chan->buffer_seq;
2212 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2213 			control.final = 1;
2214 		else
2215 			control.final = 0;
2216 
2217 		if (skb_cloned(skb)) {
2218 			/* Cloned sk_buffs are read-only, so we need a
2219 			 * writeable copy
2220 			 */
2221 			tx_skb = skb_copy(skb, GFP_KERNEL);
2222 		} else {
2223 			tx_skb = skb_clone(skb, GFP_KERNEL);
2224 		}
2225 
2226 		if (!tx_skb) {
2227 			l2cap_seq_list_clear(&chan->retrans_list);
2228 			break;
2229 		}
2230 
2231 		/* Update skb contents */
2232 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2233 			put_unaligned_le32(__pack_extended_control(&control),
2234 					   tx_skb->data + L2CAP_HDR_SIZE);
2235 		} else {
2236 			put_unaligned_le16(__pack_enhanced_control(&control),
2237 					   tx_skb->data + L2CAP_HDR_SIZE);
2238 		}
2239 
2240 		/* Update FCS */
2241 		if (chan->fcs == L2CAP_FCS_CRC16) {
2242 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2243 					tx_skb->len - L2CAP_FCS_SIZE);
2244 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2245 						L2CAP_FCS_SIZE);
2246 		}
2247 
2248 		l2cap_do_send(chan, tx_skb);
2249 
2250 		BT_DBG("Resent txseq %d", control.txseq);
2251 
2252 		chan->last_acked_seq = chan->buffer_seq;
2253 	}
2254 }
2255 
2256 static void l2cap_retransmit(struct l2cap_chan *chan,
2257 			     struct l2cap_ctrl *control)
2258 {
2259 	BT_DBG("chan %p, control %p", chan, control);
2260 
2261 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2262 	l2cap_ertm_resend(chan);
2263 }
2264 
2265 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2266 				 struct l2cap_ctrl *control)
2267 {
2268 	struct sk_buff *skb;
2269 
2270 	BT_DBG("chan %p, control %p", chan, control);
2271 
2272 	if (control->poll)
2273 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2274 
2275 	l2cap_seq_list_clear(&chan->retrans_list);
2276 
2277 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2278 		return;
2279 
2280 	if (chan->unacked_frames) {
2281 		skb_queue_walk(&chan->tx_q, skb) {
2282 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2283 			    skb == chan->tx_send_head)
2284 				break;
2285 		}
2286 
2287 		skb_queue_walk_from(&chan->tx_q, skb) {
2288 			if (skb == chan->tx_send_head)
2289 				break;
2290 
2291 			l2cap_seq_list_append(&chan->retrans_list,
2292 					      bt_cb(skb)->l2cap.txseq);
2293 		}
2294 
2295 		l2cap_ertm_resend(chan);
2296 	}
2297 }
2298 
2299 static void l2cap_send_ack(struct l2cap_chan *chan)
2300 {
2301 	struct l2cap_ctrl control;
2302 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2303 					 chan->last_acked_seq);
2304 	int threshold;
2305 
2306 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2307 	       chan, chan->last_acked_seq, chan->buffer_seq);
2308 
2309 	memset(&control, 0, sizeof(control));
2310 	control.sframe = 1;
2311 
2312 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2313 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2314 		__clear_ack_timer(chan);
2315 		control.super = L2CAP_SUPER_RNR;
2316 		control.reqseq = chan->buffer_seq;
2317 		l2cap_send_sframe(chan, &control);
2318 	} else {
2319 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2320 			l2cap_ertm_send(chan);
2321 			/* If any i-frames were sent, they included an ack */
2322 			if (chan->buffer_seq == chan->last_acked_seq)
2323 				frames_to_ack = 0;
2324 		}
2325 
2326 		/* Ack now if the window is 3/4ths full.
2327 		 * Calculate without mul or div
2328 		 */
2329 		threshold = chan->ack_win;
2330 		threshold += threshold << 1;
2331 		threshold >>= 2;
2332 
2333 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2334 		       threshold);
2335 
2336 		if (frames_to_ack >= threshold) {
2337 			__clear_ack_timer(chan);
2338 			control.super = L2CAP_SUPER_RR;
2339 			control.reqseq = chan->buffer_seq;
2340 			l2cap_send_sframe(chan, &control);
2341 			frames_to_ack = 0;
2342 		}
2343 
2344 		if (frames_to_ack)
2345 			__set_ack_timer(chan);
2346 	}
2347 }
2348 
2349 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2350 					 struct msghdr *msg, int len,
2351 					 int count, struct sk_buff *skb)
2352 {
2353 	struct l2cap_conn *conn = chan->conn;
2354 	struct sk_buff **frag;
2355 	int sent = 0;
2356 
2357 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2358 		return -EFAULT;
2359 
2360 	sent += count;
2361 	len  -= count;
2362 
2363 	/* Continuation fragments (no L2CAP header) */
2364 	frag = &skb_shinfo(skb)->frag_list;
2365 	while (len) {
2366 		struct sk_buff *tmp;
2367 
2368 		count = min_t(unsigned int, conn->mtu, len);
2369 
2370 		tmp = chan->ops->alloc_skb(chan, 0, count,
2371 					   msg->msg_flags & MSG_DONTWAIT);
2372 		if (IS_ERR(tmp))
2373 			return PTR_ERR(tmp);
2374 
2375 		*frag = tmp;
2376 
2377 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2378 				   &msg->msg_iter))
2379 			return -EFAULT;
2380 
2381 		sent += count;
2382 		len  -= count;
2383 
2384 		skb->len += (*frag)->len;
2385 		skb->data_len += (*frag)->len;
2386 
2387 		frag = &(*frag)->next;
2388 	}
2389 
2390 	return sent;
2391 }
2392 
2393 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2394 						 struct msghdr *msg, size_t len)
2395 {
2396 	struct l2cap_conn *conn = chan->conn;
2397 	struct sk_buff *skb;
2398 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2399 	struct l2cap_hdr *lh;
2400 
2401 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2402 	       __le16_to_cpu(chan->psm), len);
2403 
2404 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2405 
2406 	skb = chan->ops->alloc_skb(chan, hlen, count,
2407 				   msg->msg_flags & MSG_DONTWAIT);
2408 	if (IS_ERR(skb))
2409 		return skb;
2410 
2411 	/* Create L2CAP header */
2412 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2413 	lh->cid = cpu_to_le16(chan->dcid);
2414 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2415 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2416 
2417 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2418 	if (unlikely(err < 0)) {
2419 		kfree_skb(skb);
2420 		return ERR_PTR(err);
2421 	}
2422 	return skb;
2423 }
2424 
2425 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2426 					      struct msghdr *msg, size_t len)
2427 {
2428 	struct l2cap_conn *conn = chan->conn;
2429 	struct sk_buff *skb;
2430 	int err, count;
2431 	struct l2cap_hdr *lh;
2432 
2433 	BT_DBG("chan %p len %zu", chan, len);
2434 
2435 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2436 
2437 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2438 				   msg->msg_flags & MSG_DONTWAIT);
2439 	if (IS_ERR(skb))
2440 		return skb;
2441 
2442 	/* Create L2CAP header */
2443 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2444 	lh->cid = cpu_to_le16(chan->dcid);
2445 	lh->len = cpu_to_le16(len);
2446 
2447 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2448 	if (unlikely(err < 0)) {
2449 		kfree_skb(skb);
2450 		return ERR_PTR(err);
2451 	}
2452 	return skb;
2453 }
2454 
2455 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2456 					       struct msghdr *msg, size_t len,
2457 					       u16 sdulen)
2458 {
2459 	struct l2cap_conn *conn = chan->conn;
2460 	struct sk_buff *skb;
2461 	int err, count, hlen;
2462 	struct l2cap_hdr *lh;
2463 
2464 	BT_DBG("chan %p len %zu", chan, len);
2465 
2466 	if (!conn)
2467 		return ERR_PTR(-ENOTCONN);
2468 
2469 	hlen = __ertm_hdr_size(chan);
2470 
2471 	if (sdulen)
2472 		hlen += L2CAP_SDULEN_SIZE;
2473 
2474 	if (chan->fcs == L2CAP_FCS_CRC16)
2475 		hlen += L2CAP_FCS_SIZE;
2476 
2477 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2478 
2479 	skb = chan->ops->alloc_skb(chan, hlen, count,
2480 				   msg->msg_flags & MSG_DONTWAIT);
2481 	if (IS_ERR(skb))
2482 		return skb;
2483 
2484 	/* Create L2CAP header */
2485 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2486 	lh->cid = cpu_to_le16(chan->dcid);
2487 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2488 
2489 	/* Control header is populated later */
2490 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2491 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2492 	else
2493 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2494 
2495 	if (sdulen)
2496 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2497 
2498 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2499 	if (unlikely(err < 0)) {
2500 		kfree_skb(skb);
2501 		return ERR_PTR(err);
2502 	}
2503 
2504 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2505 	bt_cb(skb)->l2cap.retries = 0;
2506 	return skb;
2507 }
2508 
2509 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2510 			     struct sk_buff_head *seg_queue,
2511 			     struct msghdr *msg, size_t len)
2512 {
2513 	struct sk_buff *skb;
2514 	u16 sdu_len;
2515 	size_t pdu_len;
2516 	u8 sar;
2517 
2518 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2519 
2520 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2521 	 * so fragmented skbs are not used.  The HCI layer's handling
2522 	 * of fragmented skbs is not compatible with ERTM's queueing.
2523 	 */
2524 
2525 	/* PDU size is derived from the HCI MTU */
2526 	pdu_len = chan->conn->mtu;
2527 
2528 	/* Constrain PDU size for BR/EDR connections */
2529 	if (!chan->hs_hcon)
2530 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2531 
2532 	/* Adjust for largest possible L2CAP overhead. */
2533 	if (chan->fcs)
2534 		pdu_len -= L2CAP_FCS_SIZE;
2535 
2536 	pdu_len -= __ertm_hdr_size(chan);
2537 
2538 	/* Remote device may have requested smaller PDUs */
2539 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2540 
2541 	if (len <= pdu_len) {
2542 		sar = L2CAP_SAR_UNSEGMENTED;
2543 		sdu_len = 0;
2544 		pdu_len = len;
2545 	} else {
2546 		sar = L2CAP_SAR_START;
2547 		sdu_len = len;
2548 	}
2549 
2550 	while (len > 0) {
2551 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2552 
2553 		if (IS_ERR(skb)) {
2554 			__skb_queue_purge(seg_queue);
2555 			return PTR_ERR(skb);
2556 		}
2557 
2558 		bt_cb(skb)->l2cap.sar = sar;
2559 		__skb_queue_tail(seg_queue, skb);
2560 
2561 		len -= pdu_len;
2562 		if (sdu_len)
2563 			sdu_len = 0;
2564 
2565 		if (len <= pdu_len) {
2566 			sar = L2CAP_SAR_END;
2567 			pdu_len = len;
2568 		} else {
2569 			sar = L2CAP_SAR_CONTINUE;
2570 		}
2571 	}
2572 
2573 	return 0;
2574 }
2575 
2576 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2577 						   struct msghdr *msg,
2578 						   size_t len, u16 sdulen)
2579 {
2580 	struct l2cap_conn *conn = chan->conn;
2581 	struct sk_buff *skb;
2582 	int err, count, hlen;
2583 	struct l2cap_hdr *lh;
2584 
2585 	BT_DBG("chan %p len %zu", chan, len);
2586 
2587 	if (!conn)
2588 		return ERR_PTR(-ENOTCONN);
2589 
2590 	hlen = L2CAP_HDR_SIZE;
2591 
2592 	if (sdulen)
2593 		hlen += L2CAP_SDULEN_SIZE;
2594 
2595 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2596 
2597 	skb = chan->ops->alloc_skb(chan, hlen, count,
2598 				   msg->msg_flags & MSG_DONTWAIT);
2599 	if (IS_ERR(skb))
2600 		return skb;
2601 
2602 	/* Create L2CAP header */
2603 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2604 	lh->cid = cpu_to_le16(chan->dcid);
2605 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2606 
2607 	if (sdulen)
2608 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2609 
2610 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2611 	if (unlikely(err < 0)) {
2612 		kfree_skb(skb);
2613 		return ERR_PTR(err);
2614 	}
2615 
2616 	return skb;
2617 }
2618 
2619 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2620 				struct sk_buff_head *seg_queue,
2621 				struct msghdr *msg, size_t len)
2622 {
2623 	struct sk_buff *skb;
2624 	size_t pdu_len;
2625 	u16 sdu_len;
2626 
2627 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2628 
2629 	sdu_len = len;
2630 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2631 
2632 	while (len > 0) {
2633 		if (len <= pdu_len)
2634 			pdu_len = len;
2635 
2636 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2637 		if (IS_ERR(skb)) {
2638 			__skb_queue_purge(seg_queue);
2639 			return PTR_ERR(skb);
2640 		}
2641 
2642 		__skb_queue_tail(seg_queue, skb);
2643 
2644 		len -= pdu_len;
2645 
2646 		if (sdu_len) {
2647 			sdu_len = 0;
2648 			pdu_len += L2CAP_SDULEN_SIZE;
2649 		}
2650 	}
2651 
2652 	return 0;
2653 }
2654 
2655 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2656 {
2657 	int sent = 0;
2658 
2659 	BT_DBG("chan %p", chan);
2660 
2661 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2662 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2663 		chan->tx_credits--;
2664 		sent++;
2665 	}
2666 
2667 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2668 	       skb_queue_len(&chan->tx_q));
2669 }
2670 
2671 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2672 {
2673 	struct sk_buff *skb;
2674 	int err;
2675 	struct sk_buff_head seg_queue;
2676 
2677 	if (!chan->conn)
2678 		return -ENOTCONN;
2679 
2680 	/* Connectionless channel */
2681 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2682 		skb = l2cap_create_connless_pdu(chan, msg, len);
2683 		if (IS_ERR(skb))
2684 			return PTR_ERR(skb);
2685 
2686 		/* Channel lock is released before requesting new skb and then
2687 		 * reacquired thus we need to recheck channel state.
2688 		 */
2689 		if (chan->state != BT_CONNECTED) {
2690 			kfree_skb(skb);
2691 			return -ENOTCONN;
2692 		}
2693 
2694 		l2cap_do_send(chan, skb);
2695 		return len;
2696 	}
2697 
2698 	switch (chan->mode) {
2699 	case L2CAP_MODE_LE_FLOWCTL:
2700 	case L2CAP_MODE_EXT_FLOWCTL:
2701 		/* Check outgoing MTU */
2702 		if (len > chan->omtu)
2703 			return -EMSGSIZE;
2704 
2705 		__skb_queue_head_init(&seg_queue);
2706 
2707 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2708 
2709 		if (chan->state != BT_CONNECTED) {
2710 			__skb_queue_purge(&seg_queue);
2711 			err = -ENOTCONN;
2712 		}
2713 
2714 		if (err)
2715 			return err;
2716 
2717 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2718 
2719 		l2cap_le_flowctl_send(chan);
2720 
2721 		if (!chan->tx_credits)
2722 			chan->ops->suspend(chan);
2723 
2724 		err = len;
2725 
2726 		break;
2727 
2728 	case L2CAP_MODE_BASIC:
2729 		/* Check outgoing MTU */
2730 		if (len > chan->omtu)
2731 			return -EMSGSIZE;
2732 
2733 		/* Create a basic PDU */
2734 		skb = l2cap_create_basic_pdu(chan, msg, len);
2735 		if (IS_ERR(skb))
2736 			return PTR_ERR(skb);
2737 
2738 		/* Channel lock is released before requesting new skb and then
2739 		 * reacquired thus we need to recheck channel state.
2740 		 */
2741 		if (chan->state != BT_CONNECTED) {
2742 			kfree_skb(skb);
2743 			return -ENOTCONN;
2744 		}
2745 
2746 		l2cap_do_send(chan, skb);
2747 		err = len;
2748 		break;
2749 
2750 	case L2CAP_MODE_ERTM:
2751 	case L2CAP_MODE_STREAMING:
2752 		/* Check outgoing MTU */
2753 		if (len > chan->omtu) {
2754 			err = -EMSGSIZE;
2755 			break;
2756 		}
2757 
2758 		__skb_queue_head_init(&seg_queue);
2759 
2760 		/* Do segmentation before calling in to the state machine,
2761 		 * since it's possible to block while waiting for memory
2762 		 * allocation.
2763 		 */
2764 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2765 
2766 		/* The channel could have been closed while segmenting,
2767 		 * check that it is still connected.
2768 		 */
2769 		if (chan->state != BT_CONNECTED) {
2770 			__skb_queue_purge(&seg_queue);
2771 			err = -ENOTCONN;
2772 		}
2773 
2774 		if (err)
2775 			break;
2776 
2777 		if (chan->mode == L2CAP_MODE_ERTM)
2778 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2779 		else
2780 			l2cap_streaming_send(chan, &seg_queue);
2781 
2782 		err = len;
2783 
2784 		/* If the skbs were not queued for sending, they'll still be in
2785 		 * seg_queue and need to be purged.
2786 		 */
2787 		__skb_queue_purge(&seg_queue);
2788 		break;
2789 
2790 	default:
2791 		BT_DBG("bad state %1.1x", chan->mode);
2792 		err = -EBADFD;
2793 	}
2794 
2795 	return err;
2796 }
2797 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2798 
2799 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2800 {
2801 	struct l2cap_ctrl control;
2802 	u16 seq;
2803 
2804 	BT_DBG("chan %p, txseq %u", chan, txseq);
2805 
2806 	memset(&control, 0, sizeof(control));
2807 	control.sframe = 1;
2808 	control.super = L2CAP_SUPER_SREJ;
2809 
2810 	for (seq = chan->expected_tx_seq; seq != txseq;
2811 	     seq = __next_seq(chan, seq)) {
2812 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2813 			control.reqseq = seq;
2814 			l2cap_send_sframe(chan, &control);
2815 			l2cap_seq_list_append(&chan->srej_list, seq);
2816 		}
2817 	}
2818 
2819 	chan->expected_tx_seq = __next_seq(chan, txseq);
2820 }
2821 
2822 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2823 {
2824 	struct l2cap_ctrl control;
2825 
2826 	BT_DBG("chan %p", chan);
2827 
2828 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2829 		return;
2830 
2831 	memset(&control, 0, sizeof(control));
2832 	control.sframe = 1;
2833 	control.super = L2CAP_SUPER_SREJ;
2834 	control.reqseq = chan->srej_list.tail;
2835 	l2cap_send_sframe(chan, &control);
2836 }
2837 
2838 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2839 {
2840 	struct l2cap_ctrl control;
2841 	u16 initial_head;
2842 	u16 seq;
2843 
2844 	BT_DBG("chan %p, txseq %u", chan, txseq);
2845 
2846 	memset(&control, 0, sizeof(control));
2847 	control.sframe = 1;
2848 	control.super = L2CAP_SUPER_SREJ;
2849 
2850 	/* Capture initial list head to allow only one pass through the list. */
2851 	initial_head = chan->srej_list.head;
2852 
2853 	do {
2854 		seq = l2cap_seq_list_pop(&chan->srej_list);
2855 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2856 			break;
2857 
2858 		control.reqseq = seq;
2859 		l2cap_send_sframe(chan, &control);
2860 		l2cap_seq_list_append(&chan->srej_list, seq);
2861 	} while (chan->srej_list.head != initial_head);
2862 }
2863 
2864 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2865 {
2866 	struct sk_buff *acked_skb;
2867 	u16 ackseq;
2868 
2869 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2870 
2871 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2872 		return;
2873 
2874 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2875 	       chan->expected_ack_seq, chan->unacked_frames);
2876 
2877 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2878 	     ackseq = __next_seq(chan, ackseq)) {
2879 
2880 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2881 		if (acked_skb) {
2882 			skb_unlink(acked_skb, &chan->tx_q);
2883 			kfree_skb(acked_skb);
2884 			chan->unacked_frames--;
2885 		}
2886 	}
2887 
2888 	chan->expected_ack_seq = reqseq;
2889 
2890 	if (chan->unacked_frames == 0)
2891 		__clear_retrans_timer(chan);
2892 
2893 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2894 }
2895 
2896 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2897 {
2898 	BT_DBG("chan %p", chan);
2899 
2900 	chan->expected_tx_seq = chan->buffer_seq;
2901 	l2cap_seq_list_clear(&chan->srej_list);
2902 	skb_queue_purge(&chan->srej_q);
2903 	chan->rx_state = L2CAP_RX_STATE_RECV;
2904 }
2905 
2906 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2907 				struct l2cap_ctrl *control,
2908 				struct sk_buff_head *skbs, u8 event)
2909 {
2910 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2911 	       event);
2912 
2913 	switch (event) {
2914 	case L2CAP_EV_DATA_REQUEST:
2915 		if (chan->tx_send_head == NULL)
2916 			chan->tx_send_head = skb_peek(skbs);
2917 
2918 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2919 		l2cap_ertm_send(chan);
2920 		break;
2921 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2922 		BT_DBG("Enter LOCAL_BUSY");
2923 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2924 
2925 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2926 			/* The SREJ_SENT state must be aborted if we are to
2927 			 * enter the LOCAL_BUSY state.
2928 			 */
2929 			l2cap_abort_rx_srej_sent(chan);
2930 		}
2931 
2932 		l2cap_send_ack(chan);
2933 
2934 		break;
2935 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2936 		BT_DBG("Exit LOCAL_BUSY");
2937 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2938 
2939 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2940 			struct l2cap_ctrl local_control;
2941 
2942 			memset(&local_control, 0, sizeof(local_control));
2943 			local_control.sframe = 1;
2944 			local_control.super = L2CAP_SUPER_RR;
2945 			local_control.poll = 1;
2946 			local_control.reqseq = chan->buffer_seq;
2947 			l2cap_send_sframe(chan, &local_control);
2948 
2949 			chan->retry_count = 1;
2950 			__set_monitor_timer(chan);
2951 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2952 		}
2953 		break;
2954 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2955 		l2cap_process_reqseq(chan, control->reqseq);
2956 		break;
2957 	case L2CAP_EV_EXPLICIT_POLL:
2958 		l2cap_send_rr_or_rnr(chan, 1);
2959 		chan->retry_count = 1;
2960 		__set_monitor_timer(chan);
2961 		__clear_ack_timer(chan);
2962 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2963 		break;
2964 	case L2CAP_EV_RETRANS_TO:
2965 		l2cap_send_rr_or_rnr(chan, 1);
2966 		chan->retry_count = 1;
2967 		__set_monitor_timer(chan);
2968 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2969 		break;
2970 	case L2CAP_EV_RECV_FBIT:
2971 		/* Nothing to process */
2972 		break;
2973 	default:
2974 		break;
2975 	}
2976 }
2977 
2978 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2979 				  struct l2cap_ctrl *control,
2980 				  struct sk_buff_head *skbs, u8 event)
2981 {
2982 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2983 	       event);
2984 
2985 	switch (event) {
2986 	case L2CAP_EV_DATA_REQUEST:
2987 		if (chan->tx_send_head == NULL)
2988 			chan->tx_send_head = skb_peek(skbs);
2989 		/* Queue data, but don't send. */
2990 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2991 		break;
2992 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2993 		BT_DBG("Enter LOCAL_BUSY");
2994 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2995 
2996 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2997 			/* The SREJ_SENT state must be aborted if we are to
2998 			 * enter the LOCAL_BUSY state.
2999 			 */
3000 			l2cap_abort_rx_srej_sent(chan);
3001 		}
3002 
3003 		l2cap_send_ack(chan);
3004 
3005 		break;
3006 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
3007 		BT_DBG("Exit LOCAL_BUSY");
3008 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3009 
3010 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
3011 			struct l2cap_ctrl local_control;
3012 			memset(&local_control, 0, sizeof(local_control));
3013 			local_control.sframe = 1;
3014 			local_control.super = L2CAP_SUPER_RR;
3015 			local_control.poll = 1;
3016 			local_control.reqseq = chan->buffer_seq;
3017 			l2cap_send_sframe(chan, &local_control);
3018 
3019 			chan->retry_count = 1;
3020 			__set_monitor_timer(chan);
3021 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
3022 		}
3023 		break;
3024 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
3025 		l2cap_process_reqseq(chan, control->reqseq);
3026 		fallthrough;
3027 
3028 	case L2CAP_EV_RECV_FBIT:
3029 		if (control && control->final) {
3030 			__clear_monitor_timer(chan);
3031 			if (chan->unacked_frames > 0)
3032 				__set_retrans_timer(chan);
3033 			chan->retry_count = 0;
3034 			chan->tx_state = L2CAP_TX_STATE_XMIT;
3035 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3036 		}
3037 		break;
3038 	case L2CAP_EV_EXPLICIT_POLL:
3039 		/* Ignore */
3040 		break;
3041 	case L2CAP_EV_MONITOR_TO:
3042 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3043 			l2cap_send_rr_or_rnr(chan, 1);
3044 			__set_monitor_timer(chan);
3045 			chan->retry_count++;
3046 		} else {
3047 			l2cap_send_disconn_req(chan, ECONNABORTED);
3048 		}
3049 		break;
3050 	default:
3051 		break;
3052 	}
3053 }
3054 
3055 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3056 		     struct sk_buff_head *skbs, u8 event)
3057 {
3058 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3059 	       chan, control, skbs, event, chan->tx_state);
3060 
3061 	switch (chan->tx_state) {
3062 	case L2CAP_TX_STATE_XMIT:
3063 		l2cap_tx_state_xmit(chan, control, skbs, event);
3064 		break;
3065 	case L2CAP_TX_STATE_WAIT_F:
3066 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3067 		break;
3068 	default:
3069 		/* Ignore event */
3070 		break;
3071 	}
3072 }
3073 
3074 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3075 			     struct l2cap_ctrl *control)
3076 {
3077 	BT_DBG("chan %p, control %p", chan, control);
3078 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3079 }
3080 
3081 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3082 				  struct l2cap_ctrl *control)
3083 {
3084 	BT_DBG("chan %p, control %p", chan, control);
3085 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3086 }
3087 
3088 /* Copy frame to all raw sockets on that connection */
3089 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3090 {
3091 	struct sk_buff *nskb;
3092 	struct l2cap_chan *chan;
3093 
3094 	BT_DBG("conn %p", conn);
3095 
3096 	mutex_lock(&conn->chan_lock);
3097 
3098 	list_for_each_entry(chan, &conn->chan_l, list) {
3099 		if (chan->chan_type != L2CAP_CHAN_RAW)
3100 			continue;
3101 
3102 		/* Don't send frame to the channel it came from */
3103 		if (bt_cb(skb)->l2cap.chan == chan)
3104 			continue;
3105 
3106 		nskb = skb_clone(skb, GFP_KERNEL);
3107 		if (!nskb)
3108 			continue;
3109 		if (chan->ops->recv(chan, nskb))
3110 			kfree_skb(nskb);
3111 	}
3112 
3113 	mutex_unlock(&conn->chan_lock);
3114 }
3115 
3116 /* ---- L2CAP signalling commands ---- */
3117 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3118 				       u8 ident, u16 dlen, void *data)
3119 {
3120 	struct sk_buff *skb, **frag;
3121 	struct l2cap_cmd_hdr *cmd;
3122 	struct l2cap_hdr *lh;
3123 	int len, count;
3124 
3125 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3126 	       conn, code, ident, dlen);
3127 
3128 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3129 		return NULL;
3130 
3131 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3132 	count = min_t(unsigned int, conn->mtu, len);
3133 
3134 	skb = bt_skb_alloc(count, GFP_KERNEL);
3135 	if (!skb)
3136 		return NULL;
3137 
3138 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3139 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3140 
3141 	if (conn->hcon->type == LE_LINK)
3142 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3143 	else
3144 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3145 
3146 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3147 	cmd->code  = code;
3148 	cmd->ident = ident;
3149 	cmd->len   = cpu_to_le16(dlen);
3150 
3151 	if (dlen) {
3152 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3153 		skb_put_data(skb, data, count);
3154 		data += count;
3155 	}
3156 
3157 	len -= skb->len;
3158 
3159 	/* Continuation fragments (no L2CAP header) */
3160 	frag = &skb_shinfo(skb)->frag_list;
3161 	while (len) {
3162 		count = min_t(unsigned int, conn->mtu, len);
3163 
3164 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3165 		if (!*frag)
3166 			goto fail;
3167 
3168 		skb_put_data(*frag, data, count);
3169 
3170 		len  -= count;
3171 		data += count;
3172 
3173 		frag = &(*frag)->next;
3174 	}
3175 
3176 	return skb;
3177 
3178 fail:
3179 	kfree_skb(skb);
3180 	return NULL;
3181 }
3182 
3183 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3184 				     unsigned long *val)
3185 {
3186 	struct l2cap_conf_opt *opt = *ptr;
3187 	int len;
3188 
3189 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3190 	*ptr += len;
3191 
3192 	*type = opt->type;
3193 	*olen = opt->len;
3194 
3195 	switch (opt->len) {
3196 	case 1:
3197 		*val = *((u8 *) opt->val);
3198 		break;
3199 
3200 	case 2:
3201 		*val = get_unaligned_le16(opt->val);
3202 		break;
3203 
3204 	case 4:
3205 		*val = get_unaligned_le32(opt->val);
3206 		break;
3207 
3208 	default:
3209 		*val = (unsigned long) opt->val;
3210 		break;
3211 	}
3212 
3213 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3214 	return len;
3215 }
3216 
3217 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3218 {
3219 	struct l2cap_conf_opt *opt = *ptr;
3220 
3221 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3222 
3223 	if (size < L2CAP_CONF_OPT_SIZE + len)
3224 		return;
3225 
3226 	opt->type = type;
3227 	opt->len  = len;
3228 
3229 	switch (len) {
3230 	case 1:
3231 		*((u8 *) opt->val)  = val;
3232 		break;
3233 
3234 	case 2:
3235 		put_unaligned_le16(val, opt->val);
3236 		break;
3237 
3238 	case 4:
3239 		put_unaligned_le32(val, opt->val);
3240 		break;
3241 
3242 	default:
3243 		memcpy(opt->val, (void *) val, len);
3244 		break;
3245 	}
3246 
3247 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3248 }
3249 
3250 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3251 {
3252 	struct l2cap_conf_efs efs;
3253 
3254 	switch (chan->mode) {
3255 	case L2CAP_MODE_ERTM:
3256 		efs.id		= chan->local_id;
3257 		efs.stype	= chan->local_stype;
3258 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3259 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3260 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3261 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3262 		break;
3263 
3264 	case L2CAP_MODE_STREAMING:
3265 		efs.id		= 1;
3266 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3267 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3268 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3269 		efs.acc_lat	= 0;
3270 		efs.flush_to	= 0;
3271 		break;
3272 
3273 	default:
3274 		return;
3275 	}
3276 
3277 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3278 			   (unsigned long) &efs, size);
3279 }
3280 
3281 static void l2cap_ack_timeout(struct work_struct *work)
3282 {
3283 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3284 					       ack_timer.work);
3285 	u16 frames_to_ack;
3286 
3287 	BT_DBG("chan %p", chan);
3288 
3289 	l2cap_chan_lock(chan);
3290 
3291 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3292 				     chan->last_acked_seq);
3293 
3294 	if (frames_to_ack)
3295 		l2cap_send_rr_or_rnr(chan, 0);
3296 
3297 	l2cap_chan_unlock(chan);
3298 	l2cap_chan_put(chan);
3299 }
3300 
3301 int l2cap_ertm_init(struct l2cap_chan *chan)
3302 {
3303 	int err;
3304 
3305 	chan->next_tx_seq = 0;
3306 	chan->expected_tx_seq = 0;
3307 	chan->expected_ack_seq = 0;
3308 	chan->unacked_frames = 0;
3309 	chan->buffer_seq = 0;
3310 	chan->frames_sent = 0;
3311 	chan->last_acked_seq = 0;
3312 	chan->sdu = NULL;
3313 	chan->sdu_last_frag = NULL;
3314 	chan->sdu_len = 0;
3315 
3316 	skb_queue_head_init(&chan->tx_q);
3317 
3318 	chan->local_amp_id = AMP_ID_BREDR;
3319 	chan->move_id = AMP_ID_BREDR;
3320 	chan->move_state = L2CAP_MOVE_STABLE;
3321 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3322 
3323 	if (chan->mode != L2CAP_MODE_ERTM)
3324 		return 0;
3325 
3326 	chan->rx_state = L2CAP_RX_STATE_RECV;
3327 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3328 
3329 	skb_queue_head_init(&chan->srej_q);
3330 
3331 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3332 	if (err < 0)
3333 		return err;
3334 
3335 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3336 	if (err < 0)
3337 		l2cap_seq_list_free(&chan->srej_list);
3338 
3339 	return err;
3340 }
3341 
3342 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3343 {
3344 	switch (mode) {
3345 	case L2CAP_MODE_STREAMING:
3346 	case L2CAP_MODE_ERTM:
3347 		if (l2cap_mode_supported(mode, remote_feat_mask))
3348 			return mode;
3349 		fallthrough;
3350 	default:
3351 		return L2CAP_MODE_BASIC;
3352 	}
3353 }
3354 
3355 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3356 {
3357 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3358 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3359 }
3360 
3361 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3362 {
3363 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3364 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3365 }
3366 
3367 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3368 				      struct l2cap_conf_rfc *rfc)
3369 {
3370 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3371 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3372 
3373 		/* Class 1 devices have must have ERTM timeouts
3374 		 * exceeding the Link Supervision Timeout.  The
3375 		 * default Link Supervision Timeout for AMP
3376 		 * controllers is 10 seconds.
3377 		 *
3378 		 * Class 1 devices use 0xffffffff for their
3379 		 * best-effort flush timeout, so the clamping logic
3380 		 * will result in a timeout that meets the above
3381 		 * requirement.  ERTM timeouts are 16-bit values, so
3382 		 * the maximum timeout is 65.535 seconds.
3383 		 */
3384 
3385 		/* Convert timeout to milliseconds and round */
3386 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3387 
3388 		/* This is the recommended formula for class 2 devices
3389 		 * that start ERTM timers when packets are sent to the
3390 		 * controller.
3391 		 */
3392 		ertm_to = 3 * ertm_to + 500;
3393 
3394 		if (ertm_to > 0xffff)
3395 			ertm_to = 0xffff;
3396 
3397 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3398 		rfc->monitor_timeout = rfc->retrans_timeout;
3399 	} else {
3400 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3401 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3402 	}
3403 }
3404 
3405 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3406 {
3407 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3408 	    __l2cap_ews_supported(chan->conn)) {
3409 		/* use extended control field */
3410 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3411 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3412 	} else {
3413 		chan->tx_win = min_t(u16, chan->tx_win,
3414 				     L2CAP_DEFAULT_TX_WINDOW);
3415 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3416 	}
3417 	chan->ack_win = chan->tx_win;
3418 }
3419 
3420 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3421 {
3422 	struct hci_conn *conn = chan->conn->hcon;
3423 
3424 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3425 
3426 	/* The 2-DH1 packet has between 2 and 56 information bytes
3427 	 * (including the 2-byte payload header)
3428 	 */
3429 	if (!(conn->pkt_type & HCI_2DH1))
3430 		chan->imtu = 54;
3431 
3432 	/* The 3-DH1 packet has between 2 and 85 information bytes
3433 	 * (including the 2-byte payload header)
3434 	 */
3435 	if (!(conn->pkt_type & HCI_3DH1))
3436 		chan->imtu = 83;
3437 
3438 	/* The 2-DH3 packet has between 2 and 369 information bytes
3439 	 * (including the 2-byte payload header)
3440 	 */
3441 	if (!(conn->pkt_type & HCI_2DH3))
3442 		chan->imtu = 367;
3443 
3444 	/* The 3-DH3 packet has between 2 and 554 information bytes
3445 	 * (including the 2-byte payload header)
3446 	 */
3447 	if (!(conn->pkt_type & HCI_3DH3))
3448 		chan->imtu = 552;
3449 
3450 	/* The 2-DH5 packet has between 2 and 681 information bytes
3451 	 * (including the 2-byte payload header)
3452 	 */
3453 	if (!(conn->pkt_type & HCI_2DH5))
3454 		chan->imtu = 679;
3455 
3456 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3457 	 * (including the 2-byte payload header)
3458 	 */
3459 	if (!(conn->pkt_type & HCI_3DH5))
3460 		chan->imtu = 1021;
3461 }
3462 
3463 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3464 {
3465 	struct l2cap_conf_req *req = data;
3466 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3467 	void *ptr = req->data;
3468 	void *endptr = data + data_size;
3469 	u16 size;
3470 
3471 	BT_DBG("chan %p", chan);
3472 
3473 	if (chan->num_conf_req || chan->num_conf_rsp)
3474 		goto done;
3475 
3476 	switch (chan->mode) {
3477 	case L2CAP_MODE_STREAMING:
3478 	case L2CAP_MODE_ERTM:
3479 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3480 			break;
3481 
3482 		if (__l2cap_efs_supported(chan->conn))
3483 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3484 
3485 		fallthrough;
3486 	default:
3487 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3488 		break;
3489 	}
3490 
3491 done:
3492 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3493 		if (!chan->imtu)
3494 			l2cap_mtu_auto(chan);
3495 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3496 				   endptr - ptr);
3497 	}
3498 
3499 	switch (chan->mode) {
3500 	case L2CAP_MODE_BASIC:
3501 		if (disable_ertm)
3502 			break;
3503 
3504 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3505 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3506 			break;
3507 
3508 		rfc.mode            = L2CAP_MODE_BASIC;
3509 		rfc.txwin_size      = 0;
3510 		rfc.max_transmit    = 0;
3511 		rfc.retrans_timeout = 0;
3512 		rfc.monitor_timeout = 0;
3513 		rfc.max_pdu_size    = 0;
3514 
3515 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3516 				   (unsigned long) &rfc, endptr - ptr);
3517 		break;
3518 
3519 	case L2CAP_MODE_ERTM:
3520 		rfc.mode            = L2CAP_MODE_ERTM;
3521 		rfc.max_transmit    = chan->max_tx;
3522 
3523 		__l2cap_set_ertm_timeouts(chan, &rfc);
3524 
3525 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3526 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3527 			     L2CAP_FCS_SIZE);
3528 		rfc.max_pdu_size = cpu_to_le16(size);
3529 
3530 		l2cap_txwin_setup(chan);
3531 
3532 		rfc.txwin_size = min_t(u16, chan->tx_win,
3533 				       L2CAP_DEFAULT_TX_WINDOW);
3534 
3535 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3536 				   (unsigned long) &rfc, endptr - ptr);
3537 
3538 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3539 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3540 
3541 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3542 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3543 					   chan->tx_win, endptr - ptr);
3544 
3545 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3546 			if (chan->fcs == L2CAP_FCS_NONE ||
3547 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3548 				chan->fcs = L2CAP_FCS_NONE;
3549 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3550 						   chan->fcs, endptr - ptr);
3551 			}
3552 		break;
3553 
3554 	case L2CAP_MODE_STREAMING:
3555 		l2cap_txwin_setup(chan);
3556 		rfc.mode            = L2CAP_MODE_STREAMING;
3557 		rfc.txwin_size      = 0;
3558 		rfc.max_transmit    = 0;
3559 		rfc.retrans_timeout = 0;
3560 		rfc.monitor_timeout = 0;
3561 
3562 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3563 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3564 			     L2CAP_FCS_SIZE);
3565 		rfc.max_pdu_size = cpu_to_le16(size);
3566 
3567 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3568 				   (unsigned long) &rfc, endptr - ptr);
3569 
3570 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3571 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3572 
3573 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3574 			if (chan->fcs == L2CAP_FCS_NONE ||
3575 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3576 				chan->fcs = L2CAP_FCS_NONE;
3577 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3578 						   chan->fcs, endptr - ptr);
3579 			}
3580 		break;
3581 	}
3582 
3583 	req->dcid  = cpu_to_le16(chan->dcid);
3584 	req->flags = cpu_to_le16(0);
3585 
3586 	return ptr - data;
3587 }
3588 
3589 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3590 {
3591 	struct l2cap_conf_rsp *rsp = data;
3592 	void *ptr = rsp->data;
3593 	void *endptr = data + data_size;
3594 	void *req = chan->conf_req;
3595 	int len = chan->conf_len;
3596 	int type, hint, olen;
3597 	unsigned long val;
3598 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3599 	struct l2cap_conf_efs efs;
3600 	u8 remote_efs = 0;
3601 	u16 mtu = L2CAP_DEFAULT_MTU;
3602 	u16 result = L2CAP_CONF_SUCCESS;
3603 	u16 size;
3604 
3605 	BT_DBG("chan %p", chan);
3606 
3607 	while (len >= L2CAP_CONF_OPT_SIZE) {
3608 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3609 		if (len < 0)
3610 			break;
3611 
3612 		hint  = type & L2CAP_CONF_HINT;
3613 		type &= L2CAP_CONF_MASK;
3614 
3615 		switch (type) {
3616 		case L2CAP_CONF_MTU:
3617 			if (olen != 2)
3618 				break;
3619 			mtu = val;
3620 			break;
3621 
3622 		case L2CAP_CONF_FLUSH_TO:
3623 			if (olen != 2)
3624 				break;
3625 			chan->flush_to = val;
3626 			break;
3627 
3628 		case L2CAP_CONF_QOS:
3629 			break;
3630 
3631 		case L2CAP_CONF_RFC:
3632 			if (olen != sizeof(rfc))
3633 				break;
3634 			memcpy(&rfc, (void *) val, olen);
3635 			break;
3636 
3637 		case L2CAP_CONF_FCS:
3638 			if (olen != 1)
3639 				break;
3640 			if (val == L2CAP_FCS_NONE)
3641 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3642 			break;
3643 
3644 		case L2CAP_CONF_EFS:
3645 			if (olen != sizeof(efs))
3646 				break;
3647 			remote_efs = 1;
3648 			memcpy(&efs, (void *) val, olen);
3649 			break;
3650 
3651 		case L2CAP_CONF_EWS:
3652 			if (olen != 2)
3653 				break;
3654 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3655 				return -ECONNREFUSED;
3656 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3657 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3658 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3659 			chan->remote_tx_win = val;
3660 			break;
3661 
3662 		default:
3663 			if (hint)
3664 				break;
3665 			result = L2CAP_CONF_UNKNOWN;
3666 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3667 			break;
3668 		}
3669 	}
3670 
3671 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3672 		goto done;
3673 
3674 	switch (chan->mode) {
3675 	case L2CAP_MODE_STREAMING:
3676 	case L2CAP_MODE_ERTM:
3677 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3678 			chan->mode = l2cap_select_mode(rfc.mode,
3679 						       chan->conn->feat_mask);
3680 			break;
3681 		}
3682 
3683 		if (remote_efs) {
3684 			if (__l2cap_efs_supported(chan->conn))
3685 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3686 			else
3687 				return -ECONNREFUSED;
3688 		}
3689 
3690 		if (chan->mode != rfc.mode)
3691 			return -ECONNREFUSED;
3692 
3693 		break;
3694 	}
3695 
3696 done:
3697 	if (chan->mode != rfc.mode) {
3698 		result = L2CAP_CONF_UNACCEPT;
3699 		rfc.mode = chan->mode;
3700 
3701 		if (chan->num_conf_rsp == 1)
3702 			return -ECONNREFUSED;
3703 
3704 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3705 				   (unsigned long) &rfc, endptr - ptr);
3706 	}
3707 
3708 	if (result == L2CAP_CONF_SUCCESS) {
3709 		/* Configure output options and let the other side know
3710 		 * which ones we don't like. */
3711 
3712 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3713 			result = L2CAP_CONF_UNACCEPT;
3714 		else {
3715 			chan->omtu = mtu;
3716 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3717 		}
3718 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3719 
3720 		if (remote_efs) {
3721 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3722 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3723 			    efs.stype != chan->local_stype) {
3724 
3725 				result = L2CAP_CONF_UNACCEPT;
3726 
3727 				if (chan->num_conf_req >= 1)
3728 					return -ECONNREFUSED;
3729 
3730 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3731 						   sizeof(efs),
3732 						   (unsigned long) &efs, endptr - ptr);
3733 			} else {
3734 				/* Send PENDING Conf Rsp */
3735 				result = L2CAP_CONF_PENDING;
3736 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3737 			}
3738 		}
3739 
3740 		switch (rfc.mode) {
3741 		case L2CAP_MODE_BASIC:
3742 			chan->fcs = L2CAP_FCS_NONE;
3743 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3744 			break;
3745 
3746 		case L2CAP_MODE_ERTM:
3747 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3748 				chan->remote_tx_win = rfc.txwin_size;
3749 			else
3750 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3751 
3752 			chan->remote_max_tx = rfc.max_transmit;
3753 
3754 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3755 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3756 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3757 			rfc.max_pdu_size = cpu_to_le16(size);
3758 			chan->remote_mps = size;
3759 
3760 			__l2cap_set_ertm_timeouts(chan, &rfc);
3761 
3762 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3763 
3764 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3765 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3766 
3767 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3768 				chan->remote_id = efs.id;
3769 				chan->remote_stype = efs.stype;
3770 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3771 				chan->remote_flush_to =
3772 					le32_to_cpu(efs.flush_to);
3773 				chan->remote_acc_lat =
3774 					le32_to_cpu(efs.acc_lat);
3775 				chan->remote_sdu_itime =
3776 					le32_to_cpu(efs.sdu_itime);
3777 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3778 						   sizeof(efs),
3779 						   (unsigned long) &efs, endptr - ptr);
3780 			}
3781 			break;
3782 
3783 		case L2CAP_MODE_STREAMING:
3784 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3785 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3786 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3787 			rfc.max_pdu_size = cpu_to_le16(size);
3788 			chan->remote_mps = size;
3789 
3790 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3791 
3792 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3793 					   (unsigned long) &rfc, endptr - ptr);
3794 
3795 			break;
3796 
3797 		default:
3798 			result = L2CAP_CONF_UNACCEPT;
3799 
3800 			memset(&rfc, 0, sizeof(rfc));
3801 			rfc.mode = chan->mode;
3802 		}
3803 
3804 		if (result == L2CAP_CONF_SUCCESS)
3805 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3806 	}
3807 	rsp->scid   = cpu_to_le16(chan->dcid);
3808 	rsp->result = cpu_to_le16(result);
3809 	rsp->flags  = cpu_to_le16(0);
3810 
3811 	return ptr - data;
3812 }
3813 
3814 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3815 				void *data, size_t size, u16 *result)
3816 {
3817 	struct l2cap_conf_req *req = data;
3818 	void *ptr = req->data;
3819 	void *endptr = data + size;
3820 	int type, olen;
3821 	unsigned long val;
3822 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3823 	struct l2cap_conf_efs efs;
3824 
3825 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3826 
3827 	while (len >= L2CAP_CONF_OPT_SIZE) {
3828 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3829 		if (len < 0)
3830 			break;
3831 
3832 		switch (type) {
3833 		case L2CAP_CONF_MTU:
3834 			if (olen != 2)
3835 				break;
3836 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3837 				*result = L2CAP_CONF_UNACCEPT;
3838 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3839 			} else
3840 				chan->imtu = val;
3841 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3842 					   endptr - ptr);
3843 			break;
3844 
3845 		case L2CAP_CONF_FLUSH_TO:
3846 			if (olen != 2)
3847 				break;
3848 			chan->flush_to = val;
3849 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3850 					   chan->flush_to, endptr - ptr);
3851 			break;
3852 
3853 		case L2CAP_CONF_RFC:
3854 			if (olen != sizeof(rfc))
3855 				break;
3856 			memcpy(&rfc, (void *)val, olen);
3857 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3858 			    rfc.mode != chan->mode)
3859 				return -ECONNREFUSED;
3860 			chan->fcs = 0;
3861 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3862 					   (unsigned long) &rfc, endptr - ptr);
3863 			break;
3864 
3865 		case L2CAP_CONF_EWS:
3866 			if (olen != 2)
3867 				break;
3868 			chan->ack_win = min_t(u16, val, chan->ack_win);
3869 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3870 					   chan->tx_win, endptr - ptr);
3871 			break;
3872 
3873 		case L2CAP_CONF_EFS:
3874 			if (olen != sizeof(efs))
3875 				break;
3876 			memcpy(&efs, (void *)val, olen);
3877 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3878 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3879 			    efs.stype != chan->local_stype)
3880 				return -ECONNREFUSED;
3881 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3882 					   (unsigned long) &efs, endptr - ptr);
3883 			break;
3884 
3885 		case L2CAP_CONF_FCS:
3886 			if (olen != 1)
3887 				break;
3888 			if (*result == L2CAP_CONF_PENDING)
3889 				if (val == L2CAP_FCS_NONE)
3890 					set_bit(CONF_RECV_NO_FCS,
3891 						&chan->conf_state);
3892 			break;
3893 		}
3894 	}
3895 
3896 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3897 		return -ECONNREFUSED;
3898 
3899 	chan->mode = rfc.mode;
3900 
3901 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3902 		switch (rfc.mode) {
3903 		case L2CAP_MODE_ERTM:
3904 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3905 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3906 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3907 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3908 				chan->ack_win = min_t(u16, chan->ack_win,
3909 						      rfc.txwin_size);
3910 
3911 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3912 				chan->local_msdu = le16_to_cpu(efs.msdu);
3913 				chan->local_sdu_itime =
3914 					le32_to_cpu(efs.sdu_itime);
3915 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3916 				chan->local_flush_to =
3917 					le32_to_cpu(efs.flush_to);
3918 			}
3919 			break;
3920 
3921 		case L2CAP_MODE_STREAMING:
3922 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3923 		}
3924 	}
3925 
3926 	req->dcid   = cpu_to_le16(chan->dcid);
3927 	req->flags  = cpu_to_le16(0);
3928 
3929 	return ptr - data;
3930 }
3931 
3932 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3933 				u16 result, u16 flags)
3934 {
3935 	struct l2cap_conf_rsp *rsp = data;
3936 	void *ptr = rsp->data;
3937 
3938 	BT_DBG("chan %p", chan);
3939 
3940 	rsp->scid   = cpu_to_le16(chan->dcid);
3941 	rsp->result = cpu_to_le16(result);
3942 	rsp->flags  = cpu_to_le16(flags);
3943 
3944 	return ptr - data;
3945 }
3946 
3947 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3948 {
3949 	struct l2cap_le_conn_rsp rsp;
3950 	struct l2cap_conn *conn = chan->conn;
3951 
3952 	BT_DBG("chan %p", chan);
3953 
3954 	rsp.dcid    = cpu_to_le16(chan->scid);
3955 	rsp.mtu     = cpu_to_le16(chan->imtu);
3956 	rsp.mps     = cpu_to_le16(chan->mps);
3957 	rsp.credits = cpu_to_le16(chan->rx_credits);
3958 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3959 
3960 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3961 		       &rsp);
3962 }
3963 
3964 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3965 {
3966 	struct {
3967 		struct l2cap_ecred_conn_rsp rsp;
3968 		__le16 dcid[5];
3969 	} __packed pdu;
3970 	struct l2cap_conn *conn = chan->conn;
3971 	u16 ident = chan->ident;
3972 	int i = 0;
3973 
3974 	if (!ident)
3975 		return;
3976 
3977 	BT_DBG("chan %p ident %d", chan, ident);
3978 
3979 	pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3980 	pdu.rsp.mps     = cpu_to_le16(chan->mps);
3981 	pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3982 	pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3983 
3984 	mutex_lock(&conn->chan_lock);
3985 
3986 	list_for_each_entry(chan, &conn->chan_l, list) {
3987 		if (chan->ident != ident)
3988 			continue;
3989 
3990 		/* Reset ident so only one response is sent */
3991 		chan->ident = 0;
3992 
3993 		/* Include all channels pending with the same ident */
3994 		pdu.dcid[i++] = cpu_to_le16(chan->scid);
3995 	}
3996 
3997 	mutex_unlock(&conn->chan_lock);
3998 
3999 	l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
4000 			sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
4001 }
4002 
4003 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4004 {
4005 	struct l2cap_conn_rsp rsp;
4006 	struct l2cap_conn *conn = chan->conn;
4007 	u8 buf[128];
4008 	u8 rsp_code;
4009 
4010 	rsp.scid   = cpu_to_le16(chan->dcid);
4011 	rsp.dcid   = cpu_to_le16(chan->scid);
4012 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4013 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4014 
4015 	if (chan->hs_hcon)
4016 		rsp_code = L2CAP_CREATE_CHAN_RSP;
4017 	else
4018 		rsp_code = L2CAP_CONN_RSP;
4019 
4020 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4021 
4022 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4023 
4024 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4025 		return;
4026 
4027 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4028 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4029 	chan->num_conf_req++;
4030 }
4031 
4032 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4033 {
4034 	int type, olen;
4035 	unsigned long val;
4036 	/* Use sane default values in case a misbehaving remote device
4037 	 * did not send an RFC or extended window size option.
4038 	 */
4039 	u16 txwin_ext = chan->ack_win;
4040 	struct l2cap_conf_rfc rfc = {
4041 		.mode = chan->mode,
4042 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4043 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4044 		.max_pdu_size = cpu_to_le16(chan->imtu),
4045 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4046 	};
4047 
4048 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4049 
4050 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4051 		return;
4052 
4053 	while (len >= L2CAP_CONF_OPT_SIZE) {
4054 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4055 		if (len < 0)
4056 			break;
4057 
4058 		switch (type) {
4059 		case L2CAP_CONF_RFC:
4060 			if (olen != sizeof(rfc))
4061 				break;
4062 			memcpy(&rfc, (void *)val, olen);
4063 			break;
4064 		case L2CAP_CONF_EWS:
4065 			if (olen != 2)
4066 				break;
4067 			txwin_ext = val;
4068 			break;
4069 		}
4070 	}
4071 
4072 	switch (rfc.mode) {
4073 	case L2CAP_MODE_ERTM:
4074 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4075 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4076 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4077 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4078 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4079 		else
4080 			chan->ack_win = min_t(u16, chan->ack_win,
4081 					      rfc.txwin_size);
4082 		break;
4083 	case L2CAP_MODE_STREAMING:
4084 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4085 	}
4086 }
4087 
4088 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4089 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4090 				    u8 *data)
4091 {
4092 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4093 
4094 	if (cmd_len < sizeof(*rej))
4095 		return -EPROTO;
4096 
4097 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4098 		return 0;
4099 
4100 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4101 	    cmd->ident == conn->info_ident) {
4102 		cancel_delayed_work(&conn->info_timer);
4103 
4104 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4105 		conn->info_ident = 0;
4106 
4107 		l2cap_conn_start(conn);
4108 	}
4109 
4110 	return 0;
4111 }
4112 
4113 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4114 					struct l2cap_cmd_hdr *cmd,
4115 					u8 *data, u8 rsp_code, u8 amp_id)
4116 {
4117 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4118 	struct l2cap_conn_rsp rsp;
4119 	struct l2cap_chan *chan = NULL, *pchan;
4120 	int result, status = L2CAP_CS_NO_INFO;
4121 
4122 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4123 	__le16 psm = req->psm;
4124 
4125 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4126 
4127 	/* Check if we have socket listening on psm */
4128 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4129 					 &conn->hcon->dst, ACL_LINK);
4130 	if (!pchan) {
4131 		result = L2CAP_CR_BAD_PSM;
4132 		goto sendresp;
4133 	}
4134 
4135 	mutex_lock(&conn->chan_lock);
4136 	l2cap_chan_lock(pchan);
4137 
4138 	/* Check if the ACL is secure enough (if not SDP) */
4139 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4140 	    !hci_conn_check_link_mode(conn->hcon)) {
4141 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4142 		result = L2CAP_CR_SEC_BLOCK;
4143 		goto response;
4144 	}
4145 
4146 	result = L2CAP_CR_NO_MEM;
4147 
4148 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4149 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4150 		result = L2CAP_CR_INVALID_SCID;
4151 		goto response;
4152 	}
4153 
4154 	/* Check if we already have channel with that dcid */
4155 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4156 		result = L2CAP_CR_SCID_IN_USE;
4157 		goto response;
4158 	}
4159 
4160 	chan = pchan->ops->new_connection(pchan);
4161 	if (!chan)
4162 		goto response;
4163 
4164 	/* For certain devices (ex: HID mouse), support for authentication,
4165 	 * pairing and bonding is optional. For such devices, inorder to avoid
4166 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4167 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4168 	 */
4169 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4170 
4171 	bacpy(&chan->src, &conn->hcon->src);
4172 	bacpy(&chan->dst, &conn->hcon->dst);
4173 	chan->src_type = bdaddr_src_type(conn->hcon);
4174 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4175 	chan->psm  = psm;
4176 	chan->dcid = scid;
4177 	chan->local_amp_id = amp_id;
4178 
4179 	__l2cap_chan_add(conn, chan);
4180 
4181 	dcid = chan->scid;
4182 
4183 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4184 
4185 	chan->ident = cmd->ident;
4186 
4187 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4188 		if (l2cap_chan_check_security(chan, false)) {
4189 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4190 				l2cap_state_change(chan, BT_CONNECT2);
4191 				result = L2CAP_CR_PEND;
4192 				status = L2CAP_CS_AUTHOR_PEND;
4193 				chan->ops->defer(chan);
4194 			} else {
4195 				/* Force pending result for AMP controllers.
4196 				 * The connection will succeed after the
4197 				 * physical link is up.
4198 				 */
4199 				if (amp_id == AMP_ID_BREDR) {
4200 					l2cap_state_change(chan, BT_CONFIG);
4201 					result = L2CAP_CR_SUCCESS;
4202 				} else {
4203 					l2cap_state_change(chan, BT_CONNECT2);
4204 					result = L2CAP_CR_PEND;
4205 				}
4206 				status = L2CAP_CS_NO_INFO;
4207 			}
4208 		} else {
4209 			l2cap_state_change(chan, BT_CONNECT2);
4210 			result = L2CAP_CR_PEND;
4211 			status = L2CAP_CS_AUTHEN_PEND;
4212 		}
4213 	} else {
4214 		l2cap_state_change(chan, BT_CONNECT2);
4215 		result = L2CAP_CR_PEND;
4216 		status = L2CAP_CS_NO_INFO;
4217 	}
4218 
4219 response:
4220 	l2cap_chan_unlock(pchan);
4221 	mutex_unlock(&conn->chan_lock);
4222 	l2cap_chan_put(pchan);
4223 
4224 sendresp:
4225 	rsp.scid   = cpu_to_le16(scid);
4226 	rsp.dcid   = cpu_to_le16(dcid);
4227 	rsp.result = cpu_to_le16(result);
4228 	rsp.status = cpu_to_le16(status);
4229 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4230 
4231 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4232 		struct l2cap_info_req info;
4233 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4234 
4235 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4236 		conn->info_ident = l2cap_get_ident(conn);
4237 
4238 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4239 
4240 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4241 			       sizeof(info), &info);
4242 	}
4243 
4244 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4245 	    result == L2CAP_CR_SUCCESS) {
4246 		u8 buf[128];
4247 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4248 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4249 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4250 		chan->num_conf_req++;
4251 	}
4252 
4253 	return chan;
4254 }
4255 
4256 static int l2cap_connect_req(struct l2cap_conn *conn,
4257 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4258 {
4259 	struct hci_dev *hdev = conn->hcon->hdev;
4260 	struct hci_conn *hcon = conn->hcon;
4261 
4262 	if (cmd_len < sizeof(struct l2cap_conn_req))
4263 		return -EPROTO;
4264 
4265 	hci_dev_lock(hdev);
4266 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4267 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4268 		mgmt_device_connected(hdev, hcon, NULL, 0);
4269 	hci_dev_unlock(hdev);
4270 
4271 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4272 	return 0;
4273 }
4274 
4275 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4276 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4277 				    u8 *data)
4278 {
4279 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4280 	u16 scid, dcid, result, status;
4281 	struct l2cap_chan *chan;
4282 	u8 req[128];
4283 	int err;
4284 
4285 	if (cmd_len < sizeof(*rsp))
4286 		return -EPROTO;
4287 
4288 	scid   = __le16_to_cpu(rsp->scid);
4289 	dcid   = __le16_to_cpu(rsp->dcid);
4290 	result = __le16_to_cpu(rsp->result);
4291 	status = __le16_to_cpu(rsp->status);
4292 
4293 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4294 	       dcid, scid, result, status);
4295 
4296 	mutex_lock(&conn->chan_lock);
4297 
4298 	if (scid) {
4299 		chan = __l2cap_get_chan_by_scid(conn, scid);
4300 		if (!chan) {
4301 			err = -EBADSLT;
4302 			goto unlock;
4303 		}
4304 	} else {
4305 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4306 		if (!chan) {
4307 			err = -EBADSLT;
4308 			goto unlock;
4309 		}
4310 	}
4311 
4312 	chan = l2cap_chan_hold_unless_zero(chan);
4313 	if (!chan) {
4314 		err = -EBADSLT;
4315 		goto unlock;
4316 	}
4317 
4318 	err = 0;
4319 
4320 	l2cap_chan_lock(chan);
4321 
4322 	switch (result) {
4323 	case L2CAP_CR_SUCCESS:
4324 		l2cap_state_change(chan, BT_CONFIG);
4325 		chan->ident = 0;
4326 		chan->dcid = dcid;
4327 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4328 
4329 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4330 			break;
4331 
4332 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4333 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4334 		chan->num_conf_req++;
4335 		break;
4336 
4337 	case L2CAP_CR_PEND:
4338 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4339 		break;
4340 
4341 	default:
4342 		l2cap_chan_del(chan, ECONNREFUSED);
4343 		break;
4344 	}
4345 
4346 	l2cap_chan_unlock(chan);
4347 	l2cap_chan_put(chan);
4348 
4349 unlock:
4350 	mutex_unlock(&conn->chan_lock);
4351 
4352 	return err;
4353 }
4354 
4355 static inline void set_default_fcs(struct l2cap_chan *chan)
4356 {
4357 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4358 	 * sides request it.
4359 	 */
4360 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4361 		chan->fcs = L2CAP_FCS_NONE;
4362 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4363 		chan->fcs = L2CAP_FCS_CRC16;
4364 }
4365 
4366 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4367 				    u8 ident, u16 flags)
4368 {
4369 	struct l2cap_conn *conn = chan->conn;
4370 
4371 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4372 	       flags);
4373 
4374 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4375 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4376 
4377 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4378 		       l2cap_build_conf_rsp(chan, data,
4379 					    L2CAP_CONF_SUCCESS, flags), data);
4380 }
4381 
4382 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4383 				   u16 scid, u16 dcid)
4384 {
4385 	struct l2cap_cmd_rej_cid rej;
4386 
4387 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4388 	rej.scid = __cpu_to_le16(scid);
4389 	rej.dcid = __cpu_to_le16(dcid);
4390 
4391 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4392 }
4393 
4394 static inline int l2cap_config_req(struct l2cap_conn *conn,
4395 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4396 				   u8 *data)
4397 {
4398 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4399 	u16 dcid, flags;
4400 	u8 rsp[64];
4401 	struct l2cap_chan *chan;
4402 	int len, err = 0;
4403 
4404 	if (cmd_len < sizeof(*req))
4405 		return -EPROTO;
4406 
4407 	dcid  = __le16_to_cpu(req->dcid);
4408 	flags = __le16_to_cpu(req->flags);
4409 
4410 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4411 
4412 	chan = l2cap_get_chan_by_scid(conn, dcid);
4413 	if (!chan) {
4414 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4415 		return 0;
4416 	}
4417 
4418 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4419 	    chan->state != BT_CONNECTED) {
4420 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4421 				       chan->dcid);
4422 		goto unlock;
4423 	}
4424 
4425 	/* Reject if config buffer is too small. */
4426 	len = cmd_len - sizeof(*req);
4427 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4428 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4429 			       l2cap_build_conf_rsp(chan, rsp,
4430 			       L2CAP_CONF_REJECT, flags), rsp);
4431 		goto unlock;
4432 	}
4433 
4434 	/* Store config. */
4435 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4436 	chan->conf_len += len;
4437 
4438 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4439 		/* Incomplete config. Send empty response. */
4440 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4441 			       l2cap_build_conf_rsp(chan, rsp,
4442 			       L2CAP_CONF_SUCCESS, flags), rsp);
4443 		goto unlock;
4444 	}
4445 
4446 	/* Complete config. */
4447 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4448 	if (len < 0) {
4449 		l2cap_send_disconn_req(chan, ECONNRESET);
4450 		goto unlock;
4451 	}
4452 
4453 	chan->ident = cmd->ident;
4454 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4455 	chan->num_conf_rsp++;
4456 
4457 	/* Reset config buffer. */
4458 	chan->conf_len = 0;
4459 
4460 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4461 		goto unlock;
4462 
4463 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4464 		set_default_fcs(chan);
4465 
4466 		if (chan->mode == L2CAP_MODE_ERTM ||
4467 		    chan->mode == L2CAP_MODE_STREAMING)
4468 			err = l2cap_ertm_init(chan);
4469 
4470 		if (err < 0)
4471 			l2cap_send_disconn_req(chan, -err);
4472 		else
4473 			l2cap_chan_ready(chan);
4474 
4475 		goto unlock;
4476 	}
4477 
4478 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4479 		u8 buf[64];
4480 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4481 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4482 		chan->num_conf_req++;
4483 	}
4484 
4485 	/* Got Conf Rsp PENDING from remote side and assume we sent
4486 	   Conf Rsp PENDING in the code above */
4487 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4488 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4489 
4490 		/* check compatibility */
4491 
4492 		/* Send rsp for BR/EDR channel */
4493 		if (!chan->hs_hcon)
4494 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4495 		else
4496 			chan->ident = cmd->ident;
4497 	}
4498 
4499 unlock:
4500 	l2cap_chan_unlock(chan);
4501 	l2cap_chan_put(chan);
4502 	return err;
4503 }
4504 
4505 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4506 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4507 				   u8 *data)
4508 {
4509 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4510 	u16 scid, flags, result;
4511 	struct l2cap_chan *chan;
4512 	int len = cmd_len - sizeof(*rsp);
4513 	int err = 0;
4514 
4515 	if (cmd_len < sizeof(*rsp))
4516 		return -EPROTO;
4517 
4518 	scid   = __le16_to_cpu(rsp->scid);
4519 	flags  = __le16_to_cpu(rsp->flags);
4520 	result = __le16_to_cpu(rsp->result);
4521 
4522 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4523 	       result, len);
4524 
4525 	chan = l2cap_get_chan_by_scid(conn, scid);
4526 	if (!chan)
4527 		return 0;
4528 
4529 	switch (result) {
4530 	case L2CAP_CONF_SUCCESS:
4531 		l2cap_conf_rfc_get(chan, rsp->data, len);
4532 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4533 		break;
4534 
4535 	case L2CAP_CONF_PENDING:
4536 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4537 
4538 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4539 			char buf[64];
4540 
4541 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4542 						   buf, sizeof(buf), &result);
4543 			if (len < 0) {
4544 				l2cap_send_disconn_req(chan, ECONNRESET);
4545 				goto done;
4546 			}
4547 
4548 			if (!chan->hs_hcon) {
4549 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4550 							0);
4551 			} else {
4552 				if (l2cap_check_efs(chan)) {
4553 					amp_create_logical_link(chan);
4554 					chan->ident = cmd->ident;
4555 				}
4556 			}
4557 		}
4558 		goto done;
4559 
4560 	case L2CAP_CONF_UNKNOWN:
4561 	case L2CAP_CONF_UNACCEPT:
4562 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4563 			char req[64];
4564 
4565 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4566 				l2cap_send_disconn_req(chan, ECONNRESET);
4567 				goto done;
4568 			}
4569 
4570 			/* throw out any old stored conf requests */
4571 			result = L2CAP_CONF_SUCCESS;
4572 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4573 						   req, sizeof(req), &result);
4574 			if (len < 0) {
4575 				l2cap_send_disconn_req(chan, ECONNRESET);
4576 				goto done;
4577 			}
4578 
4579 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4580 				       L2CAP_CONF_REQ, len, req);
4581 			chan->num_conf_req++;
4582 			if (result != L2CAP_CONF_SUCCESS)
4583 				goto done;
4584 			break;
4585 		}
4586 		fallthrough;
4587 
4588 	default:
4589 		l2cap_chan_set_err(chan, ECONNRESET);
4590 
4591 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4592 		l2cap_send_disconn_req(chan, ECONNRESET);
4593 		goto done;
4594 	}
4595 
4596 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4597 		goto done;
4598 
4599 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4600 
4601 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4602 		set_default_fcs(chan);
4603 
4604 		if (chan->mode == L2CAP_MODE_ERTM ||
4605 		    chan->mode == L2CAP_MODE_STREAMING)
4606 			err = l2cap_ertm_init(chan);
4607 
4608 		if (err < 0)
4609 			l2cap_send_disconn_req(chan, -err);
4610 		else
4611 			l2cap_chan_ready(chan);
4612 	}
4613 
4614 done:
4615 	l2cap_chan_unlock(chan);
4616 	l2cap_chan_put(chan);
4617 	return err;
4618 }
4619 
4620 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4621 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4622 				       u8 *data)
4623 {
4624 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4625 	struct l2cap_disconn_rsp rsp;
4626 	u16 dcid, scid;
4627 	struct l2cap_chan *chan;
4628 
4629 	if (cmd_len != sizeof(*req))
4630 		return -EPROTO;
4631 
4632 	scid = __le16_to_cpu(req->scid);
4633 	dcid = __le16_to_cpu(req->dcid);
4634 
4635 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4636 
4637 	mutex_lock(&conn->chan_lock);
4638 
4639 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4640 	if (!chan) {
4641 		mutex_unlock(&conn->chan_lock);
4642 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4643 		return 0;
4644 	}
4645 
4646 	l2cap_chan_hold(chan);
4647 	l2cap_chan_lock(chan);
4648 
4649 	rsp.dcid = cpu_to_le16(chan->scid);
4650 	rsp.scid = cpu_to_le16(chan->dcid);
4651 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4652 
4653 	chan->ops->set_shutdown(chan);
4654 
4655 	l2cap_chan_del(chan, ECONNRESET);
4656 
4657 	chan->ops->close(chan);
4658 
4659 	l2cap_chan_unlock(chan);
4660 	l2cap_chan_put(chan);
4661 
4662 	mutex_unlock(&conn->chan_lock);
4663 
4664 	return 0;
4665 }
4666 
4667 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4668 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4669 				       u8 *data)
4670 {
4671 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4672 	u16 dcid, scid;
4673 	struct l2cap_chan *chan;
4674 
4675 	if (cmd_len != sizeof(*rsp))
4676 		return -EPROTO;
4677 
4678 	scid = __le16_to_cpu(rsp->scid);
4679 	dcid = __le16_to_cpu(rsp->dcid);
4680 
4681 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4682 
4683 	mutex_lock(&conn->chan_lock);
4684 
4685 	chan = __l2cap_get_chan_by_scid(conn, scid);
4686 	if (!chan) {
4687 		mutex_unlock(&conn->chan_lock);
4688 		return 0;
4689 	}
4690 
4691 	l2cap_chan_hold(chan);
4692 	l2cap_chan_lock(chan);
4693 
4694 	if (chan->state != BT_DISCONN) {
4695 		l2cap_chan_unlock(chan);
4696 		l2cap_chan_put(chan);
4697 		mutex_unlock(&conn->chan_lock);
4698 		return 0;
4699 	}
4700 
4701 	l2cap_chan_del(chan, 0);
4702 
4703 	chan->ops->close(chan);
4704 
4705 	l2cap_chan_unlock(chan);
4706 	l2cap_chan_put(chan);
4707 
4708 	mutex_unlock(&conn->chan_lock);
4709 
4710 	return 0;
4711 }
4712 
4713 static inline int l2cap_information_req(struct l2cap_conn *conn,
4714 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4715 					u8 *data)
4716 {
4717 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4718 	u16 type;
4719 
4720 	if (cmd_len != sizeof(*req))
4721 		return -EPROTO;
4722 
4723 	type = __le16_to_cpu(req->type);
4724 
4725 	BT_DBG("type 0x%4.4x", type);
4726 
4727 	if (type == L2CAP_IT_FEAT_MASK) {
4728 		u8 buf[8];
4729 		u32 feat_mask = l2cap_feat_mask;
4730 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4731 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4732 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4733 		if (!disable_ertm)
4734 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4735 				| L2CAP_FEAT_FCS;
4736 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4737 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4738 				| L2CAP_FEAT_EXT_WINDOW;
4739 
4740 		put_unaligned_le32(feat_mask, rsp->data);
4741 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4742 			       buf);
4743 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4744 		u8 buf[12];
4745 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4746 
4747 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4748 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4749 		rsp->data[0] = conn->local_fixed_chan;
4750 		memset(rsp->data + 1, 0, 7);
4751 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4752 			       buf);
4753 	} else {
4754 		struct l2cap_info_rsp rsp;
4755 		rsp.type   = cpu_to_le16(type);
4756 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4757 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4758 			       &rsp);
4759 	}
4760 
4761 	return 0;
4762 }
4763 
4764 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4765 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4766 					u8 *data)
4767 {
4768 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4769 	u16 type, result;
4770 
4771 	if (cmd_len < sizeof(*rsp))
4772 		return -EPROTO;
4773 
4774 	type   = __le16_to_cpu(rsp->type);
4775 	result = __le16_to_cpu(rsp->result);
4776 
4777 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4778 
4779 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4780 	if (cmd->ident != conn->info_ident ||
4781 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4782 		return 0;
4783 
4784 	cancel_delayed_work(&conn->info_timer);
4785 
4786 	if (result != L2CAP_IR_SUCCESS) {
4787 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4788 		conn->info_ident = 0;
4789 
4790 		l2cap_conn_start(conn);
4791 
4792 		return 0;
4793 	}
4794 
4795 	switch (type) {
4796 	case L2CAP_IT_FEAT_MASK:
4797 		conn->feat_mask = get_unaligned_le32(rsp->data);
4798 
4799 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4800 			struct l2cap_info_req req;
4801 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4802 
4803 			conn->info_ident = l2cap_get_ident(conn);
4804 
4805 			l2cap_send_cmd(conn, conn->info_ident,
4806 				       L2CAP_INFO_REQ, sizeof(req), &req);
4807 		} else {
4808 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4809 			conn->info_ident = 0;
4810 
4811 			l2cap_conn_start(conn);
4812 		}
4813 		break;
4814 
4815 	case L2CAP_IT_FIXED_CHAN:
4816 		conn->remote_fixed_chan = rsp->data[0];
4817 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4818 		conn->info_ident = 0;
4819 
4820 		l2cap_conn_start(conn);
4821 		break;
4822 	}
4823 
4824 	return 0;
4825 }
4826 
4827 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4828 				    struct l2cap_cmd_hdr *cmd,
4829 				    u16 cmd_len, void *data)
4830 {
4831 	struct l2cap_create_chan_req *req = data;
4832 	struct l2cap_create_chan_rsp rsp;
4833 	struct l2cap_chan *chan;
4834 	struct hci_dev *hdev;
4835 	u16 psm, scid;
4836 
4837 	if (cmd_len != sizeof(*req))
4838 		return -EPROTO;
4839 
4840 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4841 		return -EINVAL;
4842 
4843 	psm = le16_to_cpu(req->psm);
4844 	scid = le16_to_cpu(req->scid);
4845 
4846 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4847 
4848 	/* For controller id 0 make BR/EDR connection */
4849 	if (req->amp_id == AMP_ID_BREDR) {
4850 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4851 			      req->amp_id);
4852 		return 0;
4853 	}
4854 
4855 	/* Validate AMP controller id */
4856 	hdev = hci_dev_get(req->amp_id);
4857 	if (!hdev)
4858 		goto error;
4859 
4860 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4861 		hci_dev_put(hdev);
4862 		goto error;
4863 	}
4864 
4865 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4866 			     req->amp_id);
4867 	if (chan) {
4868 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4869 		struct hci_conn *hs_hcon;
4870 
4871 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4872 						  &conn->hcon->dst);
4873 		if (!hs_hcon) {
4874 			hci_dev_put(hdev);
4875 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4876 					       chan->dcid);
4877 			return 0;
4878 		}
4879 
4880 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4881 
4882 		mgr->bredr_chan = chan;
4883 		chan->hs_hcon = hs_hcon;
4884 		chan->fcs = L2CAP_FCS_NONE;
4885 		conn->mtu = hdev->block_mtu;
4886 	}
4887 
4888 	hci_dev_put(hdev);
4889 
4890 	return 0;
4891 
4892 error:
4893 	rsp.dcid = 0;
4894 	rsp.scid = cpu_to_le16(scid);
4895 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4896 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4897 
4898 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4899 		       sizeof(rsp), &rsp);
4900 
4901 	return 0;
4902 }
4903 
4904 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4905 {
4906 	struct l2cap_move_chan_req req;
4907 	u8 ident;
4908 
4909 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4910 
4911 	ident = l2cap_get_ident(chan->conn);
4912 	chan->ident = ident;
4913 
4914 	req.icid = cpu_to_le16(chan->scid);
4915 	req.dest_amp_id = dest_amp_id;
4916 
4917 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4918 		       &req);
4919 
4920 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4921 }
4922 
4923 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4924 {
4925 	struct l2cap_move_chan_rsp rsp;
4926 
4927 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4928 
4929 	rsp.icid = cpu_to_le16(chan->dcid);
4930 	rsp.result = cpu_to_le16(result);
4931 
4932 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4933 		       sizeof(rsp), &rsp);
4934 }
4935 
4936 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4937 {
4938 	struct l2cap_move_chan_cfm cfm;
4939 
4940 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4941 
4942 	chan->ident = l2cap_get_ident(chan->conn);
4943 
4944 	cfm.icid = cpu_to_le16(chan->scid);
4945 	cfm.result = cpu_to_le16(result);
4946 
4947 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4948 		       sizeof(cfm), &cfm);
4949 
4950 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4951 }
4952 
4953 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4954 {
4955 	struct l2cap_move_chan_cfm cfm;
4956 
4957 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4958 
4959 	cfm.icid = cpu_to_le16(icid);
4960 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4961 
4962 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4963 		       sizeof(cfm), &cfm);
4964 }
4965 
4966 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4967 					 u16 icid)
4968 {
4969 	struct l2cap_move_chan_cfm_rsp rsp;
4970 
4971 	BT_DBG("icid 0x%4.4x", icid);
4972 
4973 	rsp.icid = cpu_to_le16(icid);
4974 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4975 }
4976 
4977 static void __release_logical_link(struct l2cap_chan *chan)
4978 {
4979 	chan->hs_hchan = NULL;
4980 	chan->hs_hcon = NULL;
4981 
4982 	/* Placeholder - release the logical link */
4983 }
4984 
4985 static void l2cap_logical_fail(struct l2cap_chan *chan)
4986 {
4987 	/* Logical link setup failed */
4988 	if (chan->state != BT_CONNECTED) {
4989 		/* Create channel failure, disconnect */
4990 		l2cap_send_disconn_req(chan, ECONNRESET);
4991 		return;
4992 	}
4993 
4994 	switch (chan->move_role) {
4995 	case L2CAP_MOVE_ROLE_RESPONDER:
4996 		l2cap_move_done(chan);
4997 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4998 		break;
4999 	case L2CAP_MOVE_ROLE_INITIATOR:
5000 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
5001 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
5002 			/* Remote has only sent pending or
5003 			 * success responses, clean up
5004 			 */
5005 			l2cap_move_done(chan);
5006 		}
5007 
5008 		/* Other amp move states imply that the move
5009 		 * has already aborted
5010 		 */
5011 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5012 		break;
5013 	}
5014 }
5015 
5016 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5017 					struct hci_chan *hchan)
5018 {
5019 	struct l2cap_conf_rsp rsp;
5020 
5021 	chan->hs_hchan = hchan;
5022 	chan->hs_hcon->l2cap_data = chan->conn;
5023 
5024 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5025 
5026 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5027 		int err;
5028 
5029 		set_default_fcs(chan);
5030 
5031 		err = l2cap_ertm_init(chan);
5032 		if (err < 0)
5033 			l2cap_send_disconn_req(chan, -err);
5034 		else
5035 			l2cap_chan_ready(chan);
5036 	}
5037 }
5038 
5039 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5040 				      struct hci_chan *hchan)
5041 {
5042 	chan->hs_hcon = hchan->conn;
5043 	chan->hs_hcon->l2cap_data = chan->conn;
5044 
5045 	BT_DBG("move_state %d", chan->move_state);
5046 
5047 	switch (chan->move_state) {
5048 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5049 		/* Move confirm will be sent after a success
5050 		 * response is received
5051 		 */
5052 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5053 		break;
5054 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5055 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5056 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5057 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5058 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5059 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5060 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5061 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5062 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5063 		}
5064 		break;
5065 	default:
5066 		/* Move was not in expected state, free the channel */
5067 		__release_logical_link(chan);
5068 
5069 		chan->move_state = L2CAP_MOVE_STABLE;
5070 	}
5071 }
5072 
5073 /* Call with chan locked */
5074 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5075 		       u8 status)
5076 {
5077 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5078 
5079 	if (status) {
5080 		l2cap_logical_fail(chan);
5081 		__release_logical_link(chan);
5082 		return;
5083 	}
5084 
5085 	if (chan->state != BT_CONNECTED) {
5086 		/* Ignore logical link if channel is on BR/EDR */
5087 		if (chan->local_amp_id != AMP_ID_BREDR)
5088 			l2cap_logical_finish_create(chan, hchan);
5089 	} else {
5090 		l2cap_logical_finish_move(chan, hchan);
5091 	}
5092 }
5093 
5094 void l2cap_move_start(struct l2cap_chan *chan)
5095 {
5096 	BT_DBG("chan %p", chan);
5097 
5098 	if (chan->local_amp_id == AMP_ID_BREDR) {
5099 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5100 			return;
5101 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5102 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5103 		/* Placeholder - start physical link setup */
5104 	} else {
5105 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5106 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5107 		chan->move_id = 0;
5108 		l2cap_move_setup(chan);
5109 		l2cap_send_move_chan_req(chan, 0);
5110 	}
5111 }
5112 
5113 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5114 			    u8 local_amp_id, u8 remote_amp_id)
5115 {
5116 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5117 	       local_amp_id, remote_amp_id);
5118 
5119 	chan->fcs = L2CAP_FCS_NONE;
5120 
5121 	/* Outgoing channel on AMP */
5122 	if (chan->state == BT_CONNECT) {
5123 		if (result == L2CAP_CR_SUCCESS) {
5124 			chan->local_amp_id = local_amp_id;
5125 			l2cap_send_create_chan_req(chan, remote_amp_id);
5126 		} else {
5127 			/* Revert to BR/EDR connect */
5128 			l2cap_send_conn_req(chan);
5129 		}
5130 
5131 		return;
5132 	}
5133 
5134 	/* Incoming channel on AMP */
5135 	if (__l2cap_no_conn_pending(chan)) {
5136 		struct l2cap_conn_rsp rsp;
5137 		char buf[128];
5138 		rsp.scid = cpu_to_le16(chan->dcid);
5139 		rsp.dcid = cpu_to_le16(chan->scid);
5140 
5141 		if (result == L2CAP_CR_SUCCESS) {
5142 			/* Send successful response */
5143 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5144 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5145 		} else {
5146 			/* Send negative response */
5147 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5148 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5149 		}
5150 
5151 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5152 			       sizeof(rsp), &rsp);
5153 
5154 		if (result == L2CAP_CR_SUCCESS) {
5155 			l2cap_state_change(chan, BT_CONFIG);
5156 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5157 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5158 				       L2CAP_CONF_REQ,
5159 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5160 			chan->num_conf_req++;
5161 		}
5162 	}
5163 }
5164 
5165 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5166 				   u8 remote_amp_id)
5167 {
5168 	l2cap_move_setup(chan);
5169 	chan->move_id = local_amp_id;
5170 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5171 
5172 	l2cap_send_move_chan_req(chan, remote_amp_id);
5173 }
5174 
5175 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5176 {
5177 	struct hci_chan *hchan = NULL;
5178 
5179 	/* Placeholder - get hci_chan for logical link */
5180 
5181 	if (hchan) {
5182 		if (hchan->state == BT_CONNECTED) {
5183 			/* Logical link is ready to go */
5184 			chan->hs_hcon = hchan->conn;
5185 			chan->hs_hcon->l2cap_data = chan->conn;
5186 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5187 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5188 
5189 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5190 		} else {
5191 			/* Wait for logical link to be ready */
5192 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5193 		}
5194 	} else {
5195 		/* Logical link not available */
5196 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5197 	}
5198 }
5199 
5200 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5201 {
5202 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5203 		u8 rsp_result;
5204 		if (result == -EINVAL)
5205 			rsp_result = L2CAP_MR_BAD_ID;
5206 		else
5207 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5208 
5209 		l2cap_send_move_chan_rsp(chan, rsp_result);
5210 	}
5211 
5212 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5213 	chan->move_state = L2CAP_MOVE_STABLE;
5214 
5215 	/* Restart data transmission */
5216 	l2cap_ertm_send(chan);
5217 }
5218 
5219 /* Invoke with locked chan */
5220 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5221 {
5222 	u8 local_amp_id = chan->local_amp_id;
5223 	u8 remote_amp_id = chan->remote_amp_id;
5224 
5225 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5226 	       chan, result, local_amp_id, remote_amp_id);
5227 
5228 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5229 		return;
5230 
5231 	if (chan->state != BT_CONNECTED) {
5232 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5233 	} else if (result != L2CAP_MR_SUCCESS) {
5234 		l2cap_do_move_cancel(chan, result);
5235 	} else {
5236 		switch (chan->move_role) {
5237 		case L2CAP_MOVE_ROLE_INITIATOR:
5238 			l2cap_do_move_initiate(chan, local_amp_id,
5239 					       remote_amp_id);
5240 			break;
5241 		case L2CAP_MOVE_ROLE_RESPONDER:
5242 			l2cap_do_move_respond(chan, result);
5243 			break;
5244 		default:
5245 			l2cap_do_move_cancel(chan, result);
5246 			break;
5247 		}
5248 	}
5249 }
5250 
5251 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5252 					 struct l2cap_cmd_hdr *cmd,
5253 					 u16 cmd_len, void *data)
5254 {
5255 	struct l2cap_move_chan_req *req = data;
5256 	struct l2cap_move_chan_rsp rsp;
5257 	struct l2cap_chan *chan;
5258 	u16 icid = 0;
5259 	u16 result = L2CAP_MR_NOT_ALLOWED;
5260 
5261 	if (cmd_len != sizeof(*req))
5262 		return -EPROTO;
5263 
5264 	icid = le16_to_cpu(req->icid);
5265 
5266 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5267 
5268 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5269 		return -EINVAL;
5270 
5271 	chan = l2cap_get_chan_by_dcid(conn, icid);
5272 	if (!chan) {
5273 		rsp.icid = cpu_to_le16(icid);
5274 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5275 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5276 			       sizeof(rsp), &rsp);
5277 		return 0;
5278 	}
5279 
5280 	chan->ident = cmd->ident;
5281 
5282 	if (chan->scid < L2CAP_CID_DYN_START ||
5283 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5284 	    (chan->mode != L2CAP_MODE_ERTM &&
5285 	     chan->mode != L2CAP_MODE_STREAMING)) {
5286 		result = L2CAP_MR_NOT_ALLOWED;
5287 		goto send_move_response;
5288 	}
5289 
5290 	if (chan->local_amp_id == req->dest_amp_id) {
5291 		result = L2CAP_MR_SAME_ID;
5292 		goto send_move_response;
5293 	}
5294 
5295 	if (req->dest_amp_id != AMP_ID_BREDR) {
5296 		struct hci_dev *hdev;
5297 		hdev = hci_dev_get(req->dest_amp_id);
5298 		if (!hdev || hdev->dev_type != HCI_AMP ||
5299 		    !test_bit(HCI_UP, &hdev->flags)) {
5300 			if (hdev)
5301 				hci_dev_put(hdev);
5302 
5303 			result = L2CAP_MR_BAD_ID;
5304 			goto send_move_response;
5305 		}
5306 		hci_dev_put(hdev);
5307 	}
5308 
5309 	/* Detect a move collision.  Only send a collision response
5310 	 * if this side has "lost", otherwise proceed with the move.
5311 	 * The winner has the larger bd_addr.
5312 	 */
5313 	if ((__chan_is_moving(chan) ||
5314 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5315 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5316 		result = L2CAP_MR_COLLISION;
5317 		goto send_move_response;
5318 	}
5319 
5320 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5321 	l2cap_move_setup(chan);
5322 	chan->move_id = req->dest_amp_id;
5323 
5324 	if (req->dest_amp_id == AMP_ID_BREDR) {
5325 		/* Moving to BR/EDR */
5326 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5327 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5328 			result = L2CAP_MR_PEND;
5329 		} else {
5330 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5331 			result = L2CAP_MR_SUCCESS;
5332 		}
5333 	} else {
5334 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5335 		/* Placeholder - uncomment when amp functions are available */
5336 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5337 		result = L2CAP_MR_PEND;
5338 	}
5339 
5340 send_move_response:
5341 	l2cap_send_move_chan_rsp(chan, result);
5342 
5343 	l2cap_chan_unlock(chan);
5344 	l2cap_chan_put(chan);
5345 
5346 	return 0;
5347 }
5348 
5349 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5350 {
5351 	struct l2cap_chan *chan;
5352 	struct hci_chan *hchan = NULL;
5353 
5354 	chan = l2cap_get_chan_by_scid(conn, icid);
5355 	if (!chan) {
5356 		l2cap_send_move_chan_cfm_icid(conn, icid);
5357 		return;
5358 	}
5359 
5360 	__clear_chan_timer(chan);
5361 	if (result == L2CAP_MR_PEND)
5362 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5363 
5364 	switch (chan->move_state) {
5365 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5366 		/* Move confirm will be sent when logical link
5367 		 * is complete.
5368 		 */
5369 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5370 		break;
5371 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5372 		if (result == L2CAP_MR_PEND) {
5373 			break;
5374 		} else if (test_bit(CONN_LOCAL_BUSY,
5375 				    &chan->conn_state)) {
5376 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5377 		} else {
5378 			/* Logical link is up or moving to BR/EDR,
5379 			 * proceed with move
5380 			 */
5381 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5382 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5383 		}
5384 		break;
5385 	case L2CAP_MOVE_WAIT_RSP:
5386 		/* Moving to AMP */
5387 		if (result == L2CAP_MR_SUCCESS) {
5388 			/* Remote is ready, send confirm immediately
5389 			 * after logical link is ready
5390 			 */
5391 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5392 		} else {
5393 			/* Both logical link and move success
5394 			 * are required to confirm
5395 			 */
5396 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5397 		}
5398 
5399 		/* Placeholder - get hci_chan for logical link */
5400 		if (!hchan) {
5401 			/* Logical link not available */
5402 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5403 			break;
5404 		}
5405 
5406 		/* If the logical link is not yet connected, do not
5407 		 * send confirmation.
5408 		 */
5409 		if (hchan->state != BT_CONNECTED)
5410 			break;
5411 
5412 		/* Logical link is already ready to go */
5413 
5414 		chan->hs_hcon = hchan->conn;
5415 		chan->hs_hcon->l2cap_data = chan->conn;
5416 
5417 		if (result == L2CAP_MR_SUCCESS) {
5418 			/* Can confirm now */
5419 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5420 		} else {
5421 			/* Now only need move success
5422 			 * to confirm
5423 			 */
5424 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5425 		}
5426 
5427 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5428 		break;
5429 	default:
5430 		/* Any other amp move state means the move failed. */
5431 		chan->move_id = chan->local_amp_id;
5432 		l2cap_move_done(chan);
5433 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5434 	}
5435 
5436 	l2cap_chan_unlock(chan);
5437 	l2cap_chan_put(chan);
5438 }
5439 
5440 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5441 			    u16 result)
5442 {
5443 	struct l2cap_chan *chan;
5444 
5445 	chan = l2cap_get_chan_by_ident(conn, ident);
5446 	if (!chan) {
5447 		/* Could not locate channel, icid is best guess */
5448 		l2cap_send_move_chan_cfm_icid(conn, icid);
5449 		return;
5450 	}
5451 
5452 	__clear_chan_timer(chan);
5453 
5454 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5455 		if (result == L2CAP_MR_COLLISION) {
5456 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5457 		} else {
5458 			/* Cleanup - cancel move */
5459 			chan->move_id = chan->local_amp_id;
5460 			l2cap_move_done(chan);
5461 		}
5462 	}
5463 
5464 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5465 
5466 	l2cap_chan_unlock(chan);
5467 	l2cap_chan_put(chan);
5468 }
5469 
5470 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5471 				  struct l2cap_cmd_hdr *cmd,
5472 				  u16 cmd_len, void *data)
5473 {
5474 	struct l2cap_move_chan_rsp *rsp = data;
5475 	u16 icid, result;
5476 
5477 	if (cmd_len != sizeof(*rsp))
5478 		return -EPROTO;
5479 
5480 	icid = le16_to_cpu(rsp->icid);
5481 	result = le16_to_cpu(rsp->result);
5482 
5483 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5484 
5485 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5486 		l2cap_move_continue(conn, icid, result);
5487 	else
5488 		l2cap_move_fail(conn, cmd->ident, icid, result);
5489 
5490 	return 0;
5491 }
5492 
5493 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5494 				      struct l2cap_cmd_hdr *cmd,
5495 				      u16 cmd_len, void *data)
5496 {
5497 	struct l2cap_move_chan_cfm *cfm = data;
5498 	struct l2cap_chan *chan;
5499 	u16 icid, result;
5500 
5501 	if (cmd_len != sizeof(*cfm))
5502 		return -EPROTO;
5503 
5504 	icid = le16_to_cpu(cfm->icid);
5505 	result = le16_to_cpu(cfm->result);
5506 
5507 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5508 
5509 	chan = l2cap_get_chan_by_dcid(conn, icid);
5510 	if (!chan) {
5511 		/* Spec requires a response even if the icid was not found */
5512 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5513 		return 0;
5514 	}
5515 
5516 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5517 		if (result == L2CAP_MC_CONFIRMED) {
5518 			chan->local_amp_id = chan->move_id;
5519 			if (chan->local_amp_id == AMP_ID_BREDR)
5520 				__release_logical_link(chan);
5521 		} else {
5522 			chan->move_id = chan->local_amp_id;
5523 		}
5524 
5525 		l2cap_move_done(chan);
5526 	}
5527 
5528 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5529 
5530 	l2cap_chan_unlock(chan);
5531 	l2cap_chan_put(chan);
5532 
5533 	return 0;
5534 }
5535 
5536 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5537 						 struct l2cap_cmd_hdr *cmd,
5538 						 u16 cmd_len, void *data)
5539 {
5540 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5541 	struct l2cap_chan *chan;
5542 	u16 icid;
5543 
5544 	if (cmd_len != sizeof(*rsp))
5545 		return -EPROTO;
5546 
5547 	icid = le16_to_cpu(rsp->icid);
5548 
5549 	BT_DBG("icid 0x%4.4x", icid);
5550 
5551 	chan = l2cap_get_chan_by_scid(conn, icid);
5552 	if (!chan)
5553 		return 0;
5554 
5555 	__clear_chan_timer(chan);
5556 
5557 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5558 		chan->local_amp_id = chan->move_id;
5559 
5560 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5561 			__release_logical_link(chan);
5562 
5563 		l2cap_move_done(chan);
5564 	}
5565 
5566 	l2cap_chan_unlock(chan);
5567 	l2cap_chan_put(chan);
5568 
5569 	return 0;
5570 }
5571 
5572 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5573 					      struct l2cap_cmd_hdr *cmd,
5574 					      u16 cmd_len, u8 *data)
5575 {
5576 	struct hci_conn *hcon = conn->hcon;
5577 	struct l2cap_conn_param_update_req *req;
5578 	struct l2cap_conn_param_update_rsp rsp;
5579 	u16 min, max, latency, to_multiplier;
5580 	int err;
5581 
5582 	if (hcon->role != HCI_ROLE_MASTER)
5583 		return -EINVAL;
5584 
5585 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5586 		return -EPROTO;
5587 
5588 	req = (struct l2cap_conn_param_update_req *) data;
5589 	min		= __le16_to_cpu(req->min);
5590 	max		= __le16_to_cpu(req->max);
5591 	latency		= __le16_to_cpu(req->latency);
5592 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5593 
5594 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5595 	       min, max, latency, to_multiplier);
5596 
5597 	memset(&rsp, 0, sizeof(rsp));
5598 
5599 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5600 	if (err)
5601 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5602 	else
5603 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5604 
5605 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5606 		       sizeof(rsp), &rsp);
5607 
5608 	if (!err) {
5609 		u8 store_hint;
5610 
5611 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5612 						to_multiplier);
5613 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5614 				    store_hint, min, max, latency,
5615 				    to_multiplier);
5616 
5617 	}
5618 
5619 	return 0;
5620 }
5621 
5622 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5623 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5624 				u8 *data)
5625 {
5626 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5627 	struct hci_conn *hcon = conn->hcon;
5628 	u16 dcid, mtu, mps, credits, result;
5629 	struct l2cap_chan *chan;
5630 	int err, sec_level;
5631 
5632 	if (cmd_len < sizeof(*rsp))
5633 		return -EPROTO;
5634 
5635 	dcid    = __le16_to_cpu(rsp->dcid);
5636 	mtu     = __le16_to_cpu(rsp->mtu);
5637 	mps     = __le16_to_cpu(rsp->mps);
5638 	credits = __le16_to_cpu(rsp->credits);
5639 	result  = __le16_to_cpu(rsp->result);
5640 
5641 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5642 					   dcid < L2CAP_CID_DYN_START ||
5643 					   dcid > L2CAP_CID_LE_DYN_END))
5644 		return -EPROTO;
5645 
5646 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5647 	       dcid, mtu, mps, credits, result);
5648 
5649 	mutex_lock(&conn->chan_lock);
5650 
5651 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5652 	if (!chan) {
5653 		err = -EBADSLT;
5654 		goto unlock;
5655 	}
5656 
5657 	err = 0;
5658 
5659 	l2cap_chan_lock(chan);
5660 
5661 	switch (result) {
5662 	case L2CAP_CR_LE_SUCCESS:
5663 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5664 			err = -EBADSLT;
5665 			break;
5666 		}
5667 
5668 		chan->ident = 0;
5669 		chan->dcid = dcid;
5670 		chan->omtu = mtu;
5671 		chan->remote_mps = mps;
5672 		chan->tx_credits = credits;
5673 		l2cap_chan_ready(chan);
5674 		break;
5675 
5676 	case L2CAP_CR_LE_AUTHENTICATION:
5677 	case L2CAP_CR_LE_ENCRYPTION:
5678 		/* If we already have MITM protection we can't do
5679 		 * anything.
5680 		 */
5681 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5682 			l2cap_chan_del(chan, ECONNREFUSED);
5683 			break;
5684 		}
5685 
5686 		sec_level = hcon->sec_level + 1;
5687 		if (chan->sec_level < sec_level)
5688 			chan->sec_level = sec_level;
5689 
5690 		/* We'll need to send a new Connect Request */
5691 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5692 
5693 		smp_conn_security(hcon, chan->sec_level);
5694 		break;
5695 
5696 	default:
5697 		l2cap_chan_del(chan, ECONNREFUSED);
5698 		break;
5699 	}
5700 
5701 	l2cap_chan_unlock(chan);
5702 
5703 unlock:
5704 	mutex_unlock(&conn->chan_lock);
5705 
5706 	return err;
5707 }
5708 
5709 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5710 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5711 				      u8 *data)
5712 {
5713 	int err = 0;
5714 
5715 	switch (cmd->code) {
5716 	case L2CAP_COMMAND_REJ:
5717 		l2cap_command_rej(conn, cmd, cmd_len, data);
5718 		break;
5719 
5720 	case L2CAP_CONN_REQ:
5721 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5722 		break;
5723 
5724 	case L2CAP_CONN_RSP:
5725 	case L2CAP_CREATE_CHAN_RSP:
5726 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5727 		break;
5728 
5729 	case L2CAP_CONF_REQ:
5730 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5731 		break;
5732 
5733 	case L2CAP_CONF_RSP:
5734 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5735 		break;
5736 
5737 	case L2CAP_DISCONN_REQ:
5738 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5739 		break;
5740 
5741 	case L2CAP_DISCONN_RSP:
5742 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5743 		break;
5744 
5745 	case L2CAP_ECHO_REQ:
5746 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5747 		break;
5748 
5749 	case L2CAP_ECHO_RSP:
5750 		break;
5751 
5752 	case L2CAP_INFO_REQ:
5753 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5754 		break;
5755 
5756 	case L2CAP_INFO_RSP:
5757 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5758 		break;
5759 
5760 	case L2CAP_CREATE_CHAN_REQ:
5761 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5762 		break;
5763 
5764 	case L2CAP_MOVE_CHAN_REQ:
5765 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5766 		break;
5767 
5768 	case L2CAP_MOVE_CHAN_RSP:
5769 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5770 		break;
5771 
5772 	case L2CAP_MOVE_CHAN_CFM:
5773 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5774 		break;
5775 
5776 	case L2CAP_MOVE_CHAN_CFM_RSP:
5777 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5778 		break;
5779 
5780 	default:
5781 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5782 		err = -EINVAL;
5783 		break;
5784 	}
5785 
5786 	return err;
5787 }
5788 
5789 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5790 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5791 				u8 *data)
5792 {
5793 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5794 	struct l2cap_le_conn_rsp rsp;
5795 	struct l2cap_chan *chan, *pchan;
5796 	u16 dcid, scid, credits, mtu, mps;
5797 	__le16 psm;
5798 	u8 result;
5799 
5800 	if (cmd_len != sizeof(*req))
5801 		return -EPROTO;
5802 
5803 	scid = __le16_to_cpu(req->scid);
5804 	mtu  = __le16_to_cpu(req->mtu);
5805 	mps  = __le16_to_cpu(req->mps);
5806 	psm  = req->psm;
5807 	dcid = 0;
5808 	credits = 0;
5809 
5810 	if (mtu < 23 || mps < 23)
5811 		return -EPROTO;
5812 
5813 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5814 	       scid, mtu, mps);
5815 
5816 	/* Check if we have socket listening on psm */
5817 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5818 					 &conn->hcon->dst, LE_LINK);
5819 	if (!pchan) {
5820 		result = L2CAP_CR_LE_BAD_PSM;
5821 		chan = NULL;
5822 		goto response;
5823 	}
5824 
5825 	mutex_lock(&conn->chan_lock);
5826 	l2cap_chan_lock(pchan);
5827 
5828 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5829 				     SMP_ALLOW_STK)) {
5830 		result = L2CAP_CR_LE_AUTHENTICATION;
5831 		chan = NULL;
5832 		goto response_unlock;
5833 	}
5834 
5835 	/* Check for valid dynamic CID range */
5836 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5837 		result = L2CAP_CR_LE_INVALID_SCID;
5838 		chan = NULL;
5839 		goto response_unlock;
5840 	}
5841 
5842 	/* Check if we already have channel with that dcid */
5843 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5844 		result = L2CAP_CR_LE_SCID_IN_USE;
5845 		chan = NULL;
5846 		goto response_unlock;
5847 	}
5848 
5849 	chan = pchan->ops->new_connection(pchan);
5850 	if (!chan) {
5851 		result = L2CAP_CR_LE_NO_MEM;
5852 		goto response_unlock;
5853 	}
5854 
5855 	bacpy(&chan->src, &conn->hcon->src);
5856 	bacpy(&chan->dst, &conn->hcon->dst);
5857 	chan->src_type = bdaddr_src_type(conn->hcon);
5858 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5859 	chan->psm  = psm;
5860 	chan->dcid = scid;
5861 	chan->omtu = mtu;
5862 	chan->remote_mps = mps;
5863 
5864 	__l2cap_chan_add(conn, chan);
5865 
5866 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5867 
5868 	dcid = chan->scid;
5869 	credits = chan->rx_credits;
5870 
5871 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5872 
5873 	chan->ident = cmd->ident;
5874 
5875 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5876 		l2cap_state_change(chan, BT_CONNECT2);
5877 		/* The following result value is actually not defined
5878 		 * for LE CoC but we use it to let the function know
5879 		 * that it should bail out after doing its cleanup
5880 		 * instead of sending a response.
5881 		 */
5882 		result = L2CAP_CR_PEND;
5883 		chan->ops->defer(chan);
5884 	} else {
5885 		l2cap_chan_ready(chan);
5886 		result = L2CAP_CR_LE_SUCCESS;
5887 	}
5888 
5889 response_unlock:
5890 	l2cap_chan_unlock(pchan);
5891 	mutex_unlock(&conn->chan_lock);
5892 	l2cap_chan_put(pchan);
5893 
5894 	if (result == L2CAP_CR_PEND)
5895 		return 0;
5896 
5897 response:
5898 	if (chan) {
5899 		rsp.mtu = cpu_to_le16(chan->imtu);
5900 		rsp.mps = cpu_to_le16(chan->mps);
5901 	} else {
5902 		rsp.mtu = 0;
5903 		rsp.mps = 0;
5904 	}
5905 
5906 	rsp.dcid    = cpu_to_le16(dcid);
5907 	rsp.credits = cpu_to_le16(credits);
5908 	rsp.result  = cpu_to_le16(result);
5909 
5910 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5911 
5912 	return 0;
5913 }
5914 
5915 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5916 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5917 				   u8 *data)
5918 {
5919 	struct l2cap_le_credits *pkt;
5920 	struct l2cap_chan *chan;
5921 	u16 cid, credits, max_credits;
5922 
5923 	if (cmd_len != sizeof(*pkt))
5924 		return -EPROTO;
5925 
5926 	pkt = (struct l2cap_le_credits *) data;
5927 	cid	= __le16_to_cpu(pkt->cid);
5928 	credits	= __le16_to_cpu(pkt->credits);
5929 
5930 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5931 
5932 	chan = l2cap_get_chan_by_dcid(conn, cid);
5933 	if (!chan)
5934 		return -EBADSLT;
5935 
5936 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5937 	if (credits > max_credits) {
5938 		BT_ERR("LE credits overflow");
5939 		l2cap_send_disconn_req(chan, ECONNRESET);
5940 
5941 		/* Return 0 so that we don't trigger an unnecessary
5942 		 * command reject packet.
5943 		 */
5944 		goto unlock;
5945 	}
5946 
5947 	chan->tx_credits += credits;
5948 
5949 	/* Resume sending */
5950 	l2cap_le_flowctl_send(chan);
5951 
5952 	if (chan->tx_credits)
5953 		chan->ops->resume(chan);
5954 
5955 unlock:
5956 	l2cap_chan_unlock(chan);
5957 	l2cap_chan_put(chan);
5958 
5959 	return 0;
5960 }
5961 
5962 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5963 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5964 				       u8 *data)
5965 {
5966 	struct l2cap_ecred_conn_req *req = (void *) data;
5967 	struct {
5968 		struct l2cap_ecred_conn_rsp rsp;
5969 		__le16 dcid[L2CAP_ECRED_MAX_CID];
5970 	} __packed pdu;
5971 	struct l2cap_chan *chan, *pchan;
5972 	u16 mtu, mps;
5973 	__le16 psm;
5974 	u8 result, len = 0;
5975 	int i, num_scid;
5976 	bool defer = false;
5977 
5978 	if (!enable_ecred)
5979 		return -EINVAL;
5980 
5981 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5982 		result = L2CAP_CR_LE_INVALID_PARAMS;
5983 		goto response;
5984 	}
5985 
5986 	cmd_len -= sizeof(*req);
5987 	num_scid = cmd_len / sizeof(u16);
5988 
5989 	if (num_scid > ARRAY_SIZE(pdu.dcid)) {
5990 		result = L2CAP_CR_LE_INVALID_PARAMS;
5991 		goto response;
5992 	}
5993 
5994 	mtu  = __le16_to_cpu(req->mtu);
5995 	mps  = __le16_to_cpu(req->mps);
5996 
5997 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5998 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5999 		goto response;
6000 	}
6001 
6002 	psm  = req->psm;
6003 
6004 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
6005 
6006 	memset(&pdu, 0, sizeof(pdu));
6007 
6008 	/* Check if we have socket listening on psm */
6009 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6010 					 &conn->hcon->dst, LE_LINK);
6011 	if (!pchan) {
6012 		result = L2CAP_CR_LE_BAD_PSM;
6013 		goto response;
6014 	}
6015 
6016 	mutex_lock(&conn->chan_lock);
6017 	l2cap_chan_lock(pchan);
6018 
6019 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6020 				     SMP_ALLOW_STK)) {
6021 		result = L2CAP_CR_LE_AUTHENTICATION;
6022 		goto unlock;
6023 	}
6024 
6025 	result = L2CAP_CR_LE_SUCCESS;
6026 
6027 	for (i = 0; i < num_scid; i++) {
6028 		u16 scid = __le16_to_cpu(req->scid[i]);
6029 
6030 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
6031 
6032 		pdu.dcid[i] = 0x0000;
6033 		len += sizeof(*pdu.dcid);
6034 
6035 		/* Check for valid dynamic CID range */
6036 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6037 			result = L2CAP_CR_LE_INVALID_SCID;
6038 			continue;
6039 		}
6040 
6041 		/* Check if we already have channel with that dcid */
6042 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
6043 			result = L2CAP_CR_LE_SCID_IN_USE;
6044 			continue;
6045 		}
6046 
6047 		chan = pchan->ops->new_connection(pchan);
6048 		if (!chan) {
6049 			result = L2CAP_CR_LE_NO_MEM;
6050 			continue;
6051 		}
6052 
6053 		bacpy(&chan->src, &conn->hcon->src);
6054 		bacpy(&chan->dst, &conn->hcon->dst);
6055 		chan->src_type = bdaddr_src_type(conn->hcon);
6056 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6057 		chan->psm  = psm;
6058 		chan->dcid = scid;
6059 		chan->omtu = mtu;
6060 		chan->remote_mps = mps;
6061 
6062 		__l2cap_chan_add(conn, chan);
6063 
6064 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6065 
6066 		/* Init response */
6067 		if (!pdu.rsp.credits) {
6068 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6069 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6070 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6071 		}
6072 
6073 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6074 
6075 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6076 
6077 		chan->ident = cmd->ident;
6078 
6079 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6080 			l2cap_state_change(chan, BT_CONNECT2);
6081 			defer = true;
6082 			chan->ops->defer(chan);
6083 		} else {
6084 			l2cap_chan_ready(chan);
6085 		}
6086 	}
6087 
6088 unlock:
6089 	l2cap_chan_unlock(pchan);
6090 	mutex_unlock(&conn->chan_lock);
6091 	l2cap_chan_put(pchan);
6092 
6093 response:
6094 	pdu.rsp.result = cpu_to_le16(result);
6095 
6096 	if (defer)
6097 		return 0;
6098 
6099 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6100 		       sizeof(pdu.rsp) + len, &pdu);
6101 
6102 	return 0;
6103 }
6104 
6105 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6106 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6107 				       u8 *data)
6108 {
6109 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6110 	struct hci_conn *hcon = conn->hcon;
6111 	u16 mtu, mps, credits, result;
6112 	struct l2cap_chan *chan, *tmp;
6113 	int err = 0, sec_level;
6114 	int i = 0;
6115 
6116 	if (cmd_len < sizeof(*rsp))
6117 		return -EPROTO;
6118 
6119 	mtu     = __le16_to_cpu(rsp->mtu);
6120 	mps     = __le16_to_cpu(rsp->mps);
6121 	credits = __le16_to_cpu(rsp->credits);
6122 	result  = __le16_to_cpu(rsp->result);
6123 
6124 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6125 	       result);
6126 
6127 	mutex_lock(&conn->chan_lock);
6128 
6129 	cmd_len -= sizeof(*rsp);
6130 
6131 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6132 		u16 dcid;
6133 
6134 		if (chan->ident != cmd->ident ||
6135 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6136 		    chan->state == BT_CONNECTED)
6137 			continue;
6138 
6139 		l2cap_chan_lock(chan);
6140 
6141 		/* Check that there is a dcid for each pending channel */
6142 		if (cmd_len < sizeof(dcid)) {
6143 			l2cap_chan_del(chan, ECONNREFUSED);
6144 			l2cap_chan_unlock(chan);
6145 			continue;
6146 		}
6147 
6148 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6149 		cmd_len -= sizeof(u16);
6150 
6151 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6152 
6153 		/* Check if dcid is already in use */
6154 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6155 			/* If a device receives a
6156 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6157 			 * already-assigned Destination CID, then both the
6158 			 * original channel and the new channel shall be
6159 			 * immediately discarded and not used.
6160 			 */
6161 			l2cap_chan_del(chan, ECONNREFUSED);
6162 			l2cap_chan_unlock(chan);
6163 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6164 			l2cap_chan_lock(chan);
6165 			l2cap_chan_del(chan, ECONNRESET);
6166 			l2cap_chan_unlock(chan);
6167 			continue;
6168 		}
6169 
6170 		switch (result) {
6171 		case L2CAP_CR_LE_AUTHENTICATION:
6172 		case L2CAP_CR_LE_ENCRYPTION:
6173 			/* If we already have MITM protection we can't do
6174 			 * anything.
6175 			 */
6176 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6177 				l2cap_chan_del(chan, ECONNREFUSED);
6178 				break;
6179 			}
6180 
6181 			sec_level = hcon->sec_level + 1;
6182 			if (chan->sec_level < sec_level)
6183 				chan->sec_level = sec_level;
6184 
6185 			/* We'll need to send a new Connect Request */
6186 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6187 
6188 			smp_conn_security(hcon, chan->sec_level);
6189 			break;
6190 
6191 		case L2CAP_CR_LE_BAD_PSM:
6192 			l2cap_chan_del(chan, ECONNREFUSED);
6193 			break;
6194 
6195 		default:
6196 			/* If dcid was not set it means channels was refused */
6197 			if (!dcid) {
6198 				l2cap_chan_del(chan, ECONNREFUSED);
6199 				break;
6200 			}
6201 
6202 			chan->ident = 0;
6203 			chan->dcid = dcid;
6204 			chan->omtu = mtu;
6205 			chan->remote_mps = mps;
6206 			chan->tx_credits = credits;
6207 			l2cap_chan_ready(chan);
6208 			break;
6209 		}
6210 
6211 		l2cap_chan_unlock(chan);
6212 	}
6213 
6214 	mutex_unlock(&conn->chan_lock);
6215 
6216 	return err;
6217 }
6218 
6219 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6220 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6221 					 u8 *data)
6222 {
6223 	struct l2cap_ecred_reconf_req *req = (void *) data;
6224 	struct l2cap_ecred_reconf_rsp rsp;
6225 	u16 mtu, mps, result;
6226 	struct l2cap_chan *chan;
6227 	int i, num_scid;
6228 
6229 	if (!enable_ecred)
6230 		return -EINVAL;
6231 
6232 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6233 		result = L2CAP_CR_LE_INVALID_PARAMS;
6234 		goto respond;
6235 	}
6236 
6237 	mtu = __le16_to_cpu(req->mtu);
6238 	mps = __le16_to_cpu(req->mps);
6239 
6240 	BT_DBG("mtu %u mps %u", mtu, mps);
6241 
6242 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6243 		result = L2CAP_RECONF_INVALID_MTU;
6244 		goto respond;
6245 	}
6246 
6247 	if (mps < L2CAP_ECRED_MIN_MPS) {
6248 		result = L2CAP_RECONF_INVALID_MPS;
6249 		goto respond;
6250 	}
6251 
6252 	cmd_len -= sizeof(*req);
6253 	num_scid = cmd_len / sizeof(u16);
6254 	result = L2CAP_RECONF_SUCCESS;
6255 
6256 	for (i = 0; i < num_scid; i++) {
6257 		u16 scid;
6258 
6259 		scid = __le16_to_cpu(req->scid[i]);
6260 		if (!scid)
6261 			return -EPROTO;
6262 
6263 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6264 		if (!chan)
6265 			continue;
6266 
6267 		/* If the MTU value is decreased for any of the included
6268 		 * channels, then the receiver shall disconnect all
6269 		 * included channels.
6270 		 */
6271 		if (chan->omtu > mtu) {
6272 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6273 			       chan->omtu, mtu);
6274 			result = L2CAP_RECONF_INVALID_MTU;
6275 		}
6276 
6277 		chan->omtu = mtu;
6278 		chan->remote_mps = mps;
6279 	}
6280 
6281 respond:
6282 	rsp.result = cpu_to_le16(result);
6283 
6284 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6285 		       &rsp);
6286 
6287 	return 0;
6288 }
6289 
6290 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6291 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6292 					 u8 *data)
6293 {
6294 	struct l2cap_chan *chan, *tmp;
6295 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6296 	u16 result;
6297 
6298 	if (cmd_len < sizeof(*rsp))
6299 		return -EPROTO;
6300 
6301 	result = __le16_to_cpu(rsp->result);
6302 
6303 	BT_DBG("result 0x%4.4x", rsp->result);
6304 
6305 	if (!result)
6306 		return 0;
6307 
6308 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6309 		if (chan->ident != cmd->ident)
6310 			continue;
6311 
6312 		l2cap_chan_del(chan, ECONNRESET);
6313 	}
6314 
6315 	return 0;
6316 }
6317 
6318 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6319 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6320 				       u8 *data)
6321 {
6322 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6323 	struct l2cap_chan *chan;
6324 
6325 	if (cmd_len < sizeof(*rej))
6326 		return -EPROTO;
6327 
6328 	mutex_lock(&conn->chan_lock);
6329 
6330 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6331 	if (!chan)
6332 		goto done;
6333 
6334 	l2cap_chan_lock(chan);
6335 	l2cap_chan_del(chan, ECONNREFUSED);
6336 	l2cap_chan_unlock(chan);
6337 
6338 done:
6339 	mutex_unlock(&conn->chan_lock);
6340 	return 0;
6341 }
6342 
6343 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6344 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6345 				   u8 *data)
6346 {
6347 	int err = 0;
6348 
6349 	switch (cmd->code) {
6350 	case L2CAP_COMMAND_REJ:
6351 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6352 		break;
6353 
6354 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6355 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6356 		break;
6357 
6358 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6359 		break;
6360 
6361 	case L2CAP_LE_CONN_RSP:
6362 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6363 		break;
6364 
6365 	case L2CAP_LE_CONN_REQ:
6366 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6367 		break;
6368 
6369 	case L2CAP_LE_CREDITS:
6370 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6371 		break;
6372 
6373 	case L2CAP_ECRED_CONN_REQ:
6374 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6375 		break;
6376 
6377 	case L2CAP_ECRED_CONN_RSP:
6378 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6379 		break;
6380 
6381 	case L2CAP_ECRED_RECONF_REQ:
6382 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6383 		break;
6384 
6385 	case L2CAP_ECRED_RECONF_RSP:
6386 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6387 		break;
6388 
6389 	case L2CAP_DISCONN_REQ:
6390 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6391 		break;
6392 
6393 	case L2CAP_DISCONN_RSP:
6394 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6395 		break;
6396 
6397 	default:
6398 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6399 		err = -EINVAL;
6400 		break;
6401 	}
6402 
6403 	return err;
6404 }
6405 
6406 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6407 					struct sk_buff *skb)
6408 {
6409 	struct hci_conn *hcon = conn->hcon;
6410 	struct l2cap_cmd_hdr *cmd;
6411 	u16 len;
6412 	int err;
6413 
6414 	if (hcon->type != LE_LINK)
6415 		goto drop;
6416 
6417 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6418 		goto drop;
6419 
6420 	cmd = (void *) skb->data;
6421 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6422 
6423 	len = le16_to_cpu(cmd->len);
6424 
6425 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6426 
6427 	if (len != skb->len || !cmd->ident) {
6428 		BT_DBG("corrupted command");
6429 		goto drop;
6430 	}
6431 
6432 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6433 	if (err) {
6434 		struct l2cap_cmd_rej_unk rej;
6435 
6436 		BT_ERR("Wrong link type (%d)", err);
6437 
6438 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6439 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6440 			       sizeof(rej), &rej);
6441 	}
6442 
6443 drop:
6444 	kfree_skb(skb);
6445 }
6446 
6447 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6448 				     struct sk_buff *skb)
6449 {
6450 	struct hci_conn *hcon = conn->hcon;
6451 	struct l2cap_cmd_hdr *cmd;
6452 	int err;
6453 
6454 	l2cap_raw_recv(conn, skb);
6455 
6456 	if (hcon->type != ACL_LINK)
6457 		goto drop;
6458 
6459 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6460 		u16 len;
6461 
6462 		cmd = (void *) skb->data;
6463 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6464 
6465 		len = le16_to_cpu(cmd->len);
6466 
6467 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6468 		       cmd->ident);
6469 
6470 		if (len > skb->len || !cmd->ident) {
6471 			BT_DBG("corrupted command");
6472 			break;
6473 		}
6474 
6475 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6476 		if (err) {
6477 			struct l2cap_cmd_rej_unk rej;
6478 
6479 			BT_ERR("Wrong link type (%d)", err);
6480 
6481 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6482 			l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6483 				       sizeof(rej), &rej);
6484 		}
6485 
6486 		skb_pull(skb, len);
6487 	}
6488 
6489 drop:
6490 	kfree_skb(skb);
6491 }
6492 
6493 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6494 {
6495 	u16 our_fcs, rcv_fcs;
6496 	int hdr_size;
6497 
6498 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6499 		hdr_size = L2CAP_EXT_HDR_SIZE;
6500 	else
6501 		hdr_size = L2CAP_ENH_HDR_SIZE;
6502 
6503 	if (chan->fcs == L2CAP_FCS_CRC16) {
6504 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6505 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6506 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6507 
6508 		if (our_fcs != rcv_fcs)
6509 			return -EBADMSG;
6510 	}
6511 	return 0;
6512 }
6513 
6514 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6515 {
6516 	struct l2cap_ctrl control;
6517 
6518 	BT_DBG("chan %p", chan);
6519 
6520 	memset(&control, 0, sizeof(control));
6521 	control.sframe = 1;
6522 	control.final = 1;
6523 	control.reqseq = chan->buffer_seq;
6524 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6525 
6526 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6527 		control.super = L2CAP_SUPER_RNR;
6528 		l2cap_send_sframe(chan, &control);
6529 	}
6530 
6531 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6532 	    chan->unacked_frames > 0)
6533 		__set_retrans_timer(chan);
6534 
6535 	/* Send pending iframes */
6536 	l2cap_ertm_send(chan);
6537 
6538 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6539 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6540 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6541 		 * send it now.
6542 		 */
6543 		control.super = L2CAP_SUPER_RR;
6544 		l2cap_send_sframe(chan, &control);
6545 	}
6546 }
6547 
6548 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6549 			    struct sk_buff **last_frag)
6550 {
6551 	/* skb->len reflects data in skb as well as all fragments
6552 	 * skb->data_len reflects only data in fragments
6553 	 */
6554 	if (!skb_has_frag_list(skb))
6555 		skb_shinfo(skb)->frag_list = new_frag;
6556 
6557 	new_frag->next = NULL;
6558 
6559 	(*last_frag)->next = new_frag;
6560 	*last_frag = new_frag;
6561 
6562 	skb->len += new_frag->len;
6563 	skb->data_len += new_frag->len;
6564 	skb->truesize += new_frag->truesize;
6565 }
6566 
6567 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6568 				struct l2cap_ctrl *control)
6569 {
6570 	int err = -EINVAL;
6571 
6572 	switch (control->sar) {
6573 	case L2CAP_SAR_UNSEGMENTED:
6574 		if (chan->sdu)
6575 			break;
6576 
6577 		err = chan->ops->recv(chan, skb);
6578 		break;
6579 
6580 	case L2CAP_SAR_START:
6581 		if (chan->sdu)
6582 			break;
6583 
6584 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6585 			break;
6586 
6587 		chan->sdu_len = get_unaligned_le16(skb->data);
6588 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6589 
6590 		if (chan->sdu_len > chan->imtu) {
6591 			err = -EMSGSIZE;
6592 			break;
6593 		}
6594 
6595 		if (skb->len >= chan->sdu_len)
6596 			break;
6597 
6598 		chan->sdu = skb;
6599 		chan->sdu_last_frag = skb;
6600 
6601 		skb = NULL;
6602 		err = 0;
6603 		break;
6604 
6605 	case L2CAP_SAR_CONTINUE:
6606 		if (!chan->sdu)
6607 			break;
6608 
6609 		append_skb_frag(chan->sdu, skb,
6610 				&chan->sdu_last_frag);
6611 		skb = NULL;
6612 
6613 		if (chan->sdu->len >= chan->sdu_len)
6614 			break;
6615 
6616 		err = 0;
6617 		break;
6618 
6619 	case L2CAP_SAR_END:
6620 		if (!chan->sdu)
6621 			break;
6622 
6623 		append_skb_frag(chan->sdu, skb,
6624 				&chan->sdu_last_frag);
6625 		skb = NULL;
6626 
6627 		if (chan->sdu->len != chan->sdu_len)
6628 			break;
6629 
6630 		err = chan->ops->recv(chan, chan->sdu);
6631 
6632 		if (!err) {
6633 			/* Reassembly complete */
6634 			chan->sdu = NULL;
6635 			chan->sdu_last_frag = NULL;
6636 			chan->sdu_len = 0;
6637 		}
6638 		break;
6639 	}
6640 
6641 	if (err) {
6642 		kfree_skb(skb);
6643 		kfree_skb(chan->sdu);
6644 		chan->sdu = NULL;
6645 		chan->sdu_last_frag = NULL;
6646 		chan->sdu_len = 0;
6647 	}
6648 
6649 	return err;
6650 }
6651 
6652 static int l2cap_resegment(struct l2cap_chan *chan)
6653 {
6654 	/* Placeholder */
6655 	return 0;
6656 }
6657 
6658 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6659 {
6660 	u8 event;
6661 
6662 	if (chan->mode != L2CAP_MODE_ERTM)
6663 		return;
6664 
6665 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6666 	l2cap_tx(chan, NULL, NULL, event);
6667 }
6668 
6669 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6670 {
6671 	int err = 0;
6672 	/* Pass sequential frames to l2cap_reassemble_sdu()
6673 	 * until a gap is encountered.
6674 	 */
6675 
6676 	BT_DBG("chan %p", chan);
6677 
6678 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6679 		struct sk_buff *skb;
6680 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6681 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6682 
6683 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6684 
6685 		if (!skb)
6686 			break;
6687 
6688 		skb_unlink(skb, &chan->srej_q);
6689 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6690 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6691 		if (err)
6692 			break;
6693 	}
6694 
6695 	if (skb_queue_empty(&chan->srej_q)) {
6696 		chan->rx_state = L2CAP_RX_STATE_RECV;
6697 		l2cap_send_ack(chan);
6698 	}
6699 
6700 	return err;
6701 }
6702 
6703 static void l2cap_handle_srej(struct l2cap_chan *chan,
6704 			      struct l2cap_ctrl *control)
6705 {
6706 	struct sk_buff *skb;
6707 
6708 	BT_DBG("chan %p, control %p", chan, control);
6709 
6710 	if (control->reqseq == chan->next_tx_seq) {
6711 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6712 		l2cap_send_disconn_req(chan, ECONNRESET);
6713 		return;
6714 	}
6715 
6716 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6717 
6718 	if (skb == NULL) {
6719 		BT_DBG("Seq %d not available for retransmission",
6720 		       control->reqseq);
6721 		return;
6722 	}
6723 
6724 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6725 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6726 		l2cap_send_disconn_req(chan, ECONNRESET);
6727 		return;
6728 	}
6729 
6730 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6731 
6732 	if (control->poll) {
6733 		l2cap_pass_to_tx(chan, control);
6734 
6735 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6736 		l2cap_retransmit(chan, control);
6737 		l2cap_ertm_send(chan);
6738 
6739 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6740 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6741 			chan->srej_save_reqseq = control->reqseq;
6742 		}
6743 	} else {
6744 		l2cap_pass_to_tx_fbit(chan, control);
6745 
6746 		if (control->final) {
6747 			if (chan->srej_save_reqseq != control->reqseq ||
6748 			    !test_and_clear_bit(CONN_SREJ_ACT,
6749 						&chan->conn_state))
6750 				l2cap_retransmit(chan, control);
6751 		} else {
6752 			l2cap_retransmit(chan, control);
6753 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6754 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6755 				chan->srej_save_reqseq = control->reqseq;
6756 			}
6757 		}
6758 	}
6759 }
6760 
6761 static void l2cap_handle_rej(struct l2cap_chan *chan,
6762 			     struct l2cap_ctrl *control)
6763 {
6764 	struct sk_buff *skb;
6765 
6766 	BT_DBG("chan %p, control %p", chan, control);
6767 
6768 	if (control->reqseq == chan->next_tx_seq) {
6769 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6770 		l2cap_send_disconn_req(chan, ECONNRESET);
6771 		return;
6772 	}
6773 
6774 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6775 
6776 	if (chan->max_tx && skb &&
6777 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6778 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6779 		l2cap_send_disconn_req(chan, ECONNRESET);
6780 		return;
6781 	}
6782 
6783 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6784 
6785 	l2cap_pass_to_tx(chan, control);
6786 
6787 	if (control->final) {
6788 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6789 			l2cap_retransmit_all(chan, control);
6790 	} else {
6791 		l2cap_retransmit_all(chan, control);
6792 		l2cap_ertm_send(chan);
6793 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6794 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6795 	}
6796 }
6797 
6798 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6799 {
6800 	BT_DBG("chan %p, txseq %d", chan, txseq);
6801 
6802 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6803 	       chan->expected_tx_seq);
6804 
6805 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6806 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6807 		    chan->tx_win) {
6808 			/* See notes below regarding "double poll" and
6809 			 * invalid packets.
6810 			 */
6811 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6812 				BT_DBG("Invalid/Ignore - after SREJ");
6813 				return L2CAP_TXSEQ_INVALID_IGNORE;
6814 			} else {
6815 				BT_DBG("Invalid - in window after SREJ sent");
6816 				return L2CAP_TXSEQ_INVALID;
6817 			}
6818 		}
6819 
6820 		if (chan->srej_list.head == txseq) {
6821 			BT_DBG("Expected SREJ");
6822 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6823 		}
6824 
6825 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6826 			BT_DBG("Duplicate SREJ - txseq already stored");
6827 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6828 		}
6829 
6830 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6831 			BT_DBG("Unexpected SREJ - not requested");
6832 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6833 		}
6834 	}
6835 
6836 	if (chan->expected_tx_seq == txseq) {
6837 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6838 		    chan->tx_win) {
6839 			BT_DBG("Invalid - txseq outside tx window");
6840 			return L2CAP_TXSEQ_INVALID;
6841 		} else {
6842 			BT_DBG("Expected");
6843 			return L2CAP_TXSEQ_EXPECTED;
6844 		}
6845 	}
6846 
6847 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6848 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6849 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6850 		return L2CAP_TXSEQ_DUPLICATE;
6851 	}
6852 
6853 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6854 		/* A source of invalid packets is a "double poll" condition,
6855 		 * where delays cause us to send multiple poll packets.  If
6856 		 * the remote stack receives and processes both polls,
6857 		 * sequence numbers can wrap around in such a way that a
6858 		 * resent frame has a sequence number that looks like new data
6859 		 * with a sequence gap.  This would trigger an erroneous SREJ
6860 		 * request.
6861 		 *
6862 		 * Fortunately, this is impossible with a tx window that's
6863 		 * less than half of the maximum sequence number, which allows
6864 		 * invalid frames to be safely ignored.
6865 		 *
6866 		 * With tx window sizes greater than half of the tx window
6867 		 * maximum, the frame is invalid and cannot be ignored.  This
6868 		 * causes a disconnect.
6869 		 */
6870 
6871 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6872 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6873 			return L2CAP_TXSEQ_INVALID_IGNORE;
6874 		} else {
6875 			BT_DBG("Invalid - txseq outside tx window");
6876 			return L2CAP_TXSEQ_INVALID;
6877 		}
6878 	} else {
6879 		BT_DBG("Unexpected - txseq indicates missing frames");
6880 		return L2CAP_TXSEQ_UNEXPECTED;
6881 	}
6882 }
6883 
6884 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6885 			       struct l2cap_ctrl *control,
6886 			       struct sk_buff *skb, u8 event)
6887 {
6888 	int err = 0;
6889 	bool skb_in_use = false;
6890 
6891 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6892 	       event);
6893 
6894 	switch (event) {
6895 	case L2CAP_EV_RECV_IFRAME:
6896 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6897 		case L2CAP_TXSEQ_EXPECTED:
6898 			l2cap_pass_to_tx(chan, control);
6899 
6900 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6901 				BT_DBG("Busy, discarding expected seq %d",
6902 				       control->txseq);
6903 				break;
6904 			}
6905 
6906 			chan->expected_tx_seq = __next_seq(chan,
6907 							   control->txseq);
6908 
6909 			chan->buffer_seq = chan->expected_tx_seq;
6910 			skb_in_use = true;
6911 
6912 			err = l2cap_reassemble_sdu(chan, skb, control);
6913 			if (err)
6914 				break;
6915 
6916 			if (control->final) {
6917 				if (!test_and_clear_bit(CONN_REJ_ACT,
6918 							&chan->conn_state)) {
6919 					control->final = 0;
6920 					l2cap_retransmit_all(chan, control);
6921 					l2cap_ertm_send(chan);
6922 				}
6923 			}
6924 
6925 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6926 				l2cap_send_ack(chan);
6927 			break;
6928 		case L2CAP_TXSEQ_UNEXPECTED:
6929 			l2cap_pass_to_tx(chan, control);
6930 
6931 			/* Can't issue SREJ frames in the local busy state.
6932 			 * Drop this frame, it will be seen as missing
6933 			 * when local busy is exited.
6934 			 */
6935 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6936 				BT_DBG("Busy, discarding unexpected seq %d",
6937 				       control->txseq);
6938 				break;
6939 			}
6940 
6941 			/* There was a gap in the sequence, so an SREJ
6942 			 * must be sent for each missing frame.  The
6943 			 * current frame is stored for later use.
6944 			 */
6945 			skb_queue_tail(&chan->srej_q, skb);
6946 			skb_in_use = true;
6947 			BT_DBG("Queued %p (queue len %d)", skb,
6948 			       skb_queue_len(&chan->srej_q));
6949 
6950 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6951 			l2cap_seq_list_clear(&chan->srej_list);
6952 			l2cap_send_srej(chan, control->txseq);
6953 
6954 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6955 			break;
6956 		case L2CAP_TXSEQ_DUPLICATE:
6957 			l2cap_pass_to_tx(chan, control);
6958 			break;
6959 		case L2CAP_TXSEQ_INVALID_IGNORE:
6960 			break;
6961 		case L2CAP_TXSEQ_INVALID:
6962 		default:
6963 			l2cap_send_disconn_req(chan, ECONNRESET);
6964 			break;
6965 		}
6966 		break;
6967 	case L2CAP_EV_RECV_RR:
6968 		l2cap_pass_to_tx(chan, control);
6969 		if (control->final) {
6970 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6971 
6972 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6973 			    !__chan_is_moving(chan)) {
6974 				control->final = 0;
6975 				l2cap_retransmit_all(chan, control);
6976 			}
6977 
6978 			l2cap_ertm_send(chan);
6979 		} else if (control->poll) {
6980 			l2cap_send_i_or_rr_or_rnr(chan);
6981 		} else {
6982 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6983 					       &chan->conn_state) &&
6984 			    chan->unacked_frames)
6985 				__set_retrans_timer(chan);
6986 
6987 			l2cap_ertm_send(chan);
6988 		}
6989 		break;
6990 	case L2CAP_EV_RECV_RNR:
6991 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6992 		l2cap_pass_to_tx(chan, control);
6993 		if (control && control->poll) {
6994 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6995 			l2cap_send_rr_or_rnr(chan, 0);
6996 		}
6997 		__clear_retrans_timer(chan);
6998 		l2cap_seq_list_clear(&chan->retrans_list);
6999 		break;
7000 	case L2CAP_EV_RECV_REJ:
7001 		l2cap_handle_rej(chan, control);
7002 		break;
7003 	case L2CAP_EV_RECV_SREJ:
7004 		l2cap_handle_srej(chan, control);
7005 		break;
7006 	default:
7007 		break;
7008 	}
7009 
7010 	if (skb && !skb_in_use) {
7011 		BT_DBG("Freeing %p", skb);
7012 		kfree_skb(skb);
7013 	}
7014 
7015 	return err;
7016 }
7017 
7018 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7019 				    struct l2cap_ctrl *control,
7020 				    struct sk_buff *skb, u8 event)
7021 {
7022 	int err = 0;
7023 	u16 txseq = control->txseq;
7024 	bool skb_in_use = false;
7025 
7026 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7027 	       event);
7028 
7029 	switch (event) {
7030 	case L2CAP_EV_RECV_IFRAME:
7031 		switch (l2cap_classify_txseq(chan, txseq)) {
7032 		case L2CAP_TXSEQ_EXPECTED:
7033 			/* Keep frame for reassembly later */
7034 			l2cap_pass_to_tx(chan, control);
7035 			skb_queue_tail(&chan->srej_q, skb);
7036 			skb_in_use = true;
7037 			BT_DBG("Queued %p (queue len %d)", skb,
7038 			       skb_queue_len(&chan->srej_q));
7039 
7040 			chan->expected_tx_seq = __next_seq(chan, txseq);
7041 			break;
7042 		case L2CAP_TXSEQ_EXPECTED_SREJ:
7043 			l2cap_seq_list_pop(&chan->srej_list);
7044 
7045 			l2cap_pass_to_tx(chan, control);
7046 			skb_queue_tail(&chan->srej_q, skb);
7047 			skb_in_use = true;
7048 			BT_DBG("Queued %p (queue len %d)", skb,
7049 			       skb_queue_len(&chan->srej_q));
7050 
7051 			err = l2cap_rx_queued_iframes(chan);
7052 			if (err)
7053 				break;
7054 
7055 			break;
7056 		case L2CAP_TXSEQ_UNEXPECTED:
7057 			/* Got a frame that can't be reassembled yet.
7058 			 * Save it for later, and send SREJs to cover
7059 			 * the missing frames.
7060 			 */
7061 			skb_queue_tail(&chan->srej_q, skb);
7062 			skb_in_use = true;
7063 			BT_DBG("Queued %p (queue len %d)", skb,
7064 			       skb_queue_len(&chan->srej_q));
7065 
7066 			l2cap_pass_to_tx(chan, control);
7067 			l2cap_send_srej(chan, control->txseq);
7068 			break;
7069 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7070 			/* This frame was requested with an SREJ, but
7071 			 * some expected retransmitted frames are
7072 			 * missing.  Request retransmission of missing
7073 			 * SREJ'd frames.
7074 			 */
7075 			skb_queue_tail(&chan->srej_q, skb);
7076 			skb_in_use = true;
7077 			BT_DBG("Queued %p (queue len %d)", skb,
7078 			       skb_queue_len(&chan->srej_q));
7079 
7080 			l2cap_pass_to_tx(chan, control);
7081 			l2cap_send_srej_list(chan, control->txseq);
7082 			break;
7083 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7084 			/* We've already queued this frame.  Drop this copy. */
7085 			l2cap_pass_to_tx(chan, control);
7086 			break;
7087 		case L2CAP_TXSEQ_DUPLICATE:
7088 			/* Expecting a later sequence number, so this frame
7089 			 * was already received.  Ignore it completely.
7090 			 */
7091 			break;
7092 		case L2CAP_TXSEQ_INVALID_IGNORE:
7093 			break;
7094 		case L2CAP_TXSEQ_INVALID:
7095 		default:
7096 			l2cap_send_disconn_req(chan, ECONNRESET);
7097 			break;
7098 		}
7099 		break;
7100 	case L2CAP_EV_RECV_RR:
7101 		l2cap_pass_to_tx(chan, control);
7102 		if (control->final) {
7103 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7104 
7105 			if (!test_and_clear_bit(CONN_REJ_ACT,
7106 						&chan->conn_state)) {
7107 				control->final = 0;
7108 				l2cap_retransmit_all(chan, control);
7109 			}
7110 
7111 			l2cap_ertm_send(chan);
7112 		} else if (control->poll) {
7113 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7114 					       &chan->conn_state) &&
7115 			    chan->unacked_frames) {
7116 				__set_retrans_timer(chan);
7117 			}
7118 
7119 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7120 			l2cap_send_srej_tail(chan);
7121 		} else {
7122 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7123 					       &chan->conn_state) &&
7124 			    chan->unacked_frames)
7125 				__set_retrans_timer(chan);
7126 
7127 			l2cap_send_ack(chan);
7128 		}
7129 		break;
7130 	case L2CAP_EV_RECV_RNR:
7131 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7132 		l2cap_pass_to_tx(chan, control);
7133 		if (control->poll) {
7134 			l2cap_send_srej_tail(chan);
7135 		} else {
7136 			struct l2cap_ctrl rr_control;
7137 			memset(&rr_control, 0, sizeof(rr_control));
7138 			rr_control.sframe = 1;
7139 			rr_control.super = L2CAP_SUPER_RR;
7140 			rr_control.reqseq = chan->buffer_seq;
7141 			l2cap_send_sframe(chan, &rr_control);
7142 		}
7143 
7144 		break;
7145 	case L2CAP_EV_RECV_REJ:
7146 		l2cap_handle_rej(chan, control);
7147 		break;
7148 	case L2CAP_EV_RECV_SREJ:
7149 		l2cap_handle_srej(chan, control);
7150 		break;
7151 	}
7152 
7153 	if (skb && !skb_in_use) {
7154 		BT_DBG("Freeing %p", skb);
7155 		kfree_skb(skb);
7156 	}
7157 
7158 	return err;
7159 }
7160 
7161 static int l2cap_finish_move(struct l2cap_chan *chan)
7162 {
7163 	BT_DBG("chan %p", chan);
7164 
7165 	chan->rx_state = L2CAP_RX_STATE_RECV;
7166 
7167 	if (chan->hs_hcon)
7168 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7169 	else
7170 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7171 
7172 	return l2cap_resegment(chan);
7173 }
7174 
7175 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7176 				 struct l2cap_ctrl *control,
7177 				 struct sk_buff *skb, u8 event)
7178 {
7179 	int err;
7180 
7181 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7182 	       event);
7183 
7184 	if (!control->poll)
7185 		return -EPROTO;
7186 
7187 	l2cap_process_reqseq(chan, control->reqseq);
7188 
7189 	if (!skb_queue_empty(&chan->tx_q))
7190 		chan->tx_send_head = skb_peek(&chan->tx_q);
7191 	else
7192 		chan->tx_send_head = NULL;
7193 
7194 	/* Rewind next_tx_seq to the point expected
7195 	 * by the receiver.
7196 	 */
7197 	chan->next_tx_seq = control->reqseq;
7198 	chan->unacked_frames = 0;
7199 
7200 	err = l2cap_finish_move(chan);
7201 	if (err)
7202 		return err;
7203 
7204 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7205 	l2cap_send_i_or_rr_or_rnr(chan);
7206 
7207 	if (event == L2CAP_EV_RECV_IFRAME)
7208 		return -EPROTO;
7209 
7210 	return l2cap_rx_state_recv(chan, control, NULL, event);
7211 }
7212 
7213 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7214 				 struct l2cap_ctrl *control,
7215 				 struct sk_buff *skb, u8 event)
7216 {
7217 	int err;
7218 
7219 	if (!control->final)
7220 		return -EPROTO;
7221 
7222 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7223 
7224 	chan->rx_state = L2CAP_RX_STATE_RECV;
7225 	l2cap_process_reqseq(chan, control->reqseq);
7226 
7227 	if (!skb_queue_empty(&chan->tx_q))
7228 		chan->tx_send_head = skb_peek(&chan->tx_q);
7229 	else
7230 		chan->tx_send_head = NULL;
7231 
7232 	/* Rewind next_tx_seq to the point expected
7233 	 * by the receiver.
7234 	 */
7235 	chan->next_tx_seq = control->reqseq;
7236 	chan->unacked_frames = 0;
7237 
7238 	if (chan->hs_hcon)
7239 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7240 	else
7241 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7242 
7243 	err = l2cap_resegment(chan);
7244 
7245 	if (!err)
7246 		err = l2cap_rx_state_recv(chan, control, skb, event);
7247 
7248 	return err;
7249 }
7250 
7251 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7252 {
7253 	/* Make sure reqseq is for a packet that has been sent but not acked */
7254 	u16 unacked;
7255 
7256 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7257 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7258 }
7259 
7260 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7261 		    struct sk_buff *skb, u8 event)
7262 {
7263 	int err = 0;
7264 
7265 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7266 	       control, skb, event, chan->rx_state);
7267 
7268 	if (__valid_reqseq(chan, control->reqseq)) {
7269 		switch (chan->rx_state) {
7270 		case L2CAP_RX_STATE_RECV:
7271 			err = l2cap_rx_state_recv(chan, control, skb, event);
7272 			break;
7273 		case L2CAP_RX_STATE_SREJ_SENT:
7274 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7275 						       event);
7276 			break;
7277 		case L2CAP_RX_STATE_WAIT_P:
7278 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7279 			break;
7280 		case L2CAP_RX_STATE_WAIT_F:
7281 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7282 			break;
7283 		default:
7284 			/* shut it down */
7285 			break;
7286 		}
7287 	} else {
7288 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7289 		       control->reqseq, chan->next_tx_seq,
7290 		       chan->expected_ack_seq);
7291 		l2cap_send_disconn_req(chan, ECONNRESET);
7292 	}
7293 
7294 	return err;
7295 }
7296 
7297 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7298 			   struct sk_buff *skb)
7299 {
7300 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7301 	       chan->rx_state);
7302 
7303 	if (l2cap_classify_txseq(chan, control->txseq) ==
7304 	    L2CAP_TXSEQ_EXPECTED) {
7305 		l2cap_pass_to_tx(chan, control);
7306 
7307 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
7308 		       __next_seq(chan, chan->buffer_seq));
7309 
7310 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7311 
7312 		l2cap_reassemble_sdu(chan, skb, control);
7313 	} else {
7314 		if (chan->sdu) {
7315 			kfree_skb(chan->sdu);
7316 			chan->sdu = NULL;
7317 		}
7318 		chan->sdu_last_frag = NULL;
7319 		chan->sdu_len = 0;
7320 
7321 		if (skb) {
7322 			BT_DBG("Freeing %p", skb);
7323 			kfree_skb(skb);
7324 		}
7325 	}
7326 
7327 	chan->last_acked_seq = control->txseq;
7328 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
7329 
7330 	return 0;
7331 }
7332 
7333 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7334 {
7335 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7336 	u16 len;
7337 	u8 event;
7338 
7339 	__unpack_control(chan, skb);
7340 
7341 	len = skb->len;
7342 
7343 	/*
7344 	 * We can just drop the corrupted I-frame here.
7345 	 * Receiver will miss it and start proper recovery
7346 	 * procedures and ask for retransmission.
7347 	 */
7348 	if (l2cap_check_fcs(chan, skb))
7349 		goto drop;
7350 
7351 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7352 		len -= L2CAP_SDULEN_SIZE;
7353 
7354 	if (chan->fcs == L2CAP_FCS_CRC16)
7355 		len -= L2CAP_FCS_SIZE;
7356 
7357 	if (len > chan->mps) {
7358 		l2cap_send_disconn_req(chan, ECONNRESET);
7359 		goto drop;
7360 	}
7361 
7362 	if (chan->ops->filter) {
7363 		if (chan->ops->filter(chan, skb))
7364 			goto drop;
7365 	}
7366 
7367 	if (!control->sframe) {
7368 		int err;
7369 
7370 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7371 		       control->sar, control->reqseq, control->final,
7372 		       control->txseq);
7373 
7374 		/* Validate F-bit - F=0 always valid, F=1 only
7375 		 * valid in TX WAIT_F
7376 		 */
7377 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7378 			goto drop;
7379 
7380 		if (chan->mode != L2CAP_MODE_STREAMING) {
7381 			event = L2CAP_EV_RECV_IFRAME;
7382 			err = l2cap_rx(chan, control, skb, event);
7383 		} else {
7384 			err = l2cap_stream_rx(chan, control, skb);
7385 		}
7386 
7387 		if (err)
7388 			l2cap_send_disconn_req(chan, ECONNRESET);
7389 	} else {
7390 		const u8 rx_func_to_event[4] = {
7391 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7392 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7393 		};
7394 
7395 		/* Only I-frames are expected in streaming mode */
7396 		if (chan->mode == L2CAP_MODE_STREAMING)
7397 			goto drop;
7398 
7399 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7400 		       control->reqseq, control->final, control->poll,
7401 		       control->super);
7402 
7403 		if (len != 0) {
7404 			BT_ERR("Trailing bytes: %d in sframe", len);
7405 			l2cap_send_disconn_req(chan, ECONNRESET);
7406 			goto drop;
7407 		}
7408 
7409 		/* Validate F and P bits */
7410 		if (control->final && (control->poll ||
7411 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7412 			goto drop;
7413 
7414 		event = rx_func_to_event[control->super];
7415 		if (l2cap_rx(chan, control, skb, event))
7416 			l2cap_send_disconn_req(chan, ECONNRESET);
7417 	}
7418 
7419 	return 0;
7420 
7421 drop:
7422 	kfree_skb(skb);
7423 	return 0;
7424 }
7425 
7426 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7427 {
7428 	struct l2cap_conn *conn = chan->conn;
7429 	struct l2cap_le_credits pkt;
7430 	u16 return_credits;
7431 
7432 	return_credits = (chan->imtu / chan->mps) + 1;
7433 
7434 	if (chan->rx_credits >= return_credits)
7435 		return;
7436 
7437 	return_credits -= chan->rx_credits;
7438 
7439 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7440 
7441 	chan->rx_credits += return_credits;
7442 
7443 	pkt.cid     = cpu_to_le16(chan->scid);
7444 	pkt.credits = cpu_to_le16(return_credits);
7445 
7446 	chan->ident = l2cap_get_ident(conn);
7447 
7448 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7449 }
7450 
7451 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7452 {
7453 	int err;
7454 
7455 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7456 
7457 	/* Wait recv to confirm reception before updating the credits */
7458 	err = chan->ops->recv(chan, skb);
7459 
7460 	/* Update credits whenever an SDU is received */
7461 	l2cap_chan_le_send_credits(chan);
7462 
7463 	return err;
7464 }
7465 
7466 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7467 {
7468 	int err;
7469 
7470 	if (!chan->rx_credits) {
7471 		BT_ERR("No credits to receive LE L2CAP data");
7472 		l2cap_send_disconn_req(chan, ECONNRESET);
7473 		return -ENOBUFS;
7474 	}
7475 
7476 	if (chan->imtu < skb->len) {
7477 		BT_ERR("Too big LE L2CAP PDU");
7478 		return -ENOBUFS;
7479 	}
7480 
7481 	chan->rx_credits--;
7482 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7483 
7484 	/* Update if remote had run out of credits, this should only happens
7485 	 * if the remote is not using the entire MPS.
7486 	 */
7487 	if (!chan->rx_credits)
7488 		l2cap_chan_le_send_credits(chan);
7489 
7490 	err = 0;
7491 
7492 	if (!chan->sdu) {
7493 		u16 sdu_len;
7494 
7495 		sdu_len = get_unaligned_le16(skb->data);
7496 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7497 
7498 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7499 		       sdu_len, skb->len, chan->imtu);
7500 
7501 		if (sdu_len > chan->imtu) {
7502 			BT_ERR("Too big LE L2CAP SDU length received");
7503 			err = -EMSGSIZE;
7504 			goto failed;
7505 		}
7506 
7507 		if (skb->len > sdu_len) {
7508 			BT_ERR("Too much LE L2CAP data received");
7509 			err = -EINVAL;
7510 			goto failed;
7511 		}
7512 
7513 		if (skb->len == sdu_len)
7514 			return l2cap_ecred_recv(chan, skb);
7515 
7516 		chan->sdu = skb;
7517 		chan->sdu_len = sdu_len;
7518 		chan->sdu_last_frag = skb;
7519 
7520 		/* Detect if remote is not able to use the selected MPS */
7521 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7522 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7523 
7524 			/* Adjust the number of credits */
7525 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7526 			chan->mps = mps_len;
7527 			l2cap_chan_le_send_credits(chan);
7528 		}
7529 
7530 		return 0;
7531 	}
7532 
7533 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7534 	       chan->sdu->len, skb->len, chan->sdu_len);
7535 
7536 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7537 		BT_ERR("Too much LE L2CAP data received");
7538 		err = -EINVAL;
7539 		goto failed;
7540 	}
7541 
7542 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7543 	skb = NULL;
7544 
7545 	if (chan->sdu->len == chan->sdu_len) {
7546 		err = l2cap_ecred_recv(chan, chan->sdu);
7547 		if (!err) {
7548 			chan->sdu = NULL;
7549 			chan->sdu_last_frag = NULL;
7550 			chan->sdu_len = 0;
7551 		}
7552 	}
7553 
7554 failed:
7555 	if (err) {
7556 		kfree_skb(skb);
7557 		kfree_skb(chan->sdu);
7558 		chan->sdu = NULL;
7559 		chan->sdu_last_frag = NULL;
7560 		chan->sdu_len = 0;
7561 	}
7562 
7563 	/* We can't return an error here since we took care of the skb
7564 	 * freeing internally. An error return would cause the caller to
7565 	 * do a double-free of the skb.
7566 	 */
7567 	return 0;
7568 }
7569 
7570 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7571 			       struct sk_buff *skb)
7572 {
7573 	struct l2cap_chan *chan;
7574 
7575 	chan = l2cap_get_chan_by_scid(conn, cid);
7576 	if (!chan) {
7577 		if (cid == L2CAP_CID_A2MP) {
7578 			chan = a2mp_channel_create(conn, skb);
7579 			if (!chan) {
7580 				kfree_skb(skb);
7581 				return;
7582 			}
7583 
7584 			l2cap_chan_lock(chan);
7585 		} else {
7586 			BT_DBG("unknown cid 0x%4.4x", cid);
7587 			/* Drop packet and return */
7588 			kfree_skb(skb);
7589 			return;
7590 		}
7591 	}
7592 
7593 	BT_DBG("chan %p, len %d", chan, skb->len);
7594 
7595 	/* If we receive data on a fixed channel before the info req/rsp
7596 	 * procedure is done simply assume that the channel is supported
7597 	 * and mark it as ready.
7598 	 */
7599 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7600 		l2cap_chan_ready(chan);
7601 
7602 	if (chan->state != BT_CONNECTED)
7603 		goto drop;
7604 
7605 	switch (chan->mode) {
7606 	case L2CAP_MODE_LE_FLOWCTL:
7607 	case L2CAP_MODE_EXT_FLOWCTL:
7608 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7609 			goto drop;
7610 
7611 		goto done;
7612 
7613 	case L2CAP_MODE_BASIC:
7614 		/* If socket recv buffers overflows we drop data here
7615 		 * which is *bad* because L2CAP has to be reliable.
7616 		 * But we don't have any other choice. L2CAP doesn't
7617 		 * provide flow control mechanism. */
7618 
7619 		if (chan->imtu < skb->len) {
7620 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7621 			goto drop;
7622 		}
7623 
7624 		if (!chan->ops->recv(chan, skb))
7625 			goto done;
7626 		break;
7627 
7628 	case L2CAP_MODE_ERTM:
7629 	case L2CAP_MODE_STREAMING:
7630 		l2cap_data_rcv(chan, skb);
7631 		goto done;
7632 
7633 	default:
7634 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7635 		break;
7636 	}
7637 
7638 drop:
7639 	kfree_skb(skb);
7640 
7641 done:
7642 	l2cap_chan_unlock(chan);
7643 	l2cap_chan_put(chan);
7644 }
7645 
7646 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7647 				  struct sk_buff *skb)
7648 {
7649 	struct hci_conn *hcon = conn->hcon;
7650 	struct l2cap_chan *chan;
7651 
7652 	if (hcon->type != ACL_LINK)
7653 		goto free_skb;
7654 
7655 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7656 					ACL_LINK);
7657 	if (!chan)
7658 		goto free_skb;
7659 
7660 	BT_DBG("chan %p, len %d", chan, skb->len);
7661 
7662 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7663 		goto drop;
7664 
7665 	if (chan->imtu < skb->len)
7666 		goto drop;
7667 
7668 	/* Store remote BD_ADDR and PSM for msg_name */
7669 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7670 	bt_cb(skb)->l2cap.psm = psm;
7671 
7672 	if (!chan->ops->recv(chan, skb)) {
7673 		l2cap_chan_put(chan);
7674 		return;
7675 	}
7676 
7677 drop:
7678 	l2cap_chan_put(chan);
7679 free_skb:
7680 	kfree_skb(skb);
7681 }
7682 
7683 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7684 {
7685 	struct l2cap_hdr *lh = (void *) skb->data;
7686 	struct hci_conn *hcon = conn->hcon;
7687 	u16 cid, len;
7688 	__le16 psm;
7689 
7690 	if (hcon->state != BT_CONNECTED) {
7691 		BT_DBG("queueing pending rx skb");
7692 		skb_queue_tail(&conn->pending_rx, skb);
7693 		return;
7694 	}
7695 
7696 	skb_pull(skb, L2CAP_HDR_SIZE);
7697 	cid = __le16_to_cpu(lh->cid);
7698 	len = __le16_to_cpu(lh->len);
7699 
7700 	if (len != skb->len) {
7701 		kfree_skb(skb);
7702 		return;
7703 	}
7704 
7705 	/* Since we can't actively block incoming LE connections we must
7706 	 * at least ensure that we ignore incoming data from them.
7707 	 */
7708 	if (hcon->type == LE_LINK &&
7709 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7710 				   bdaddr_dst_type(hcon))) {
7711 		kfree_skb(skb);
7712 		return;
7713 	}
7714 
7715 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7716 
7717 	switch (cid) {
7718 	case L2CAP_CID_SIGNALING:
7719 		l2cap_sig_channel(conn, skb);
7720 		break;
7721 
7722 	case L2CAP_CID_CONN_LESS:
7723 		psm = get_unaligned((__le16 *) skb->data);
7724 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7725 		l2cap_conless_channel(conn, psm, skb);
7726 		break;
7727 
7728 	case L2CAP_CID_LE_SIGNALING:
7729 		l2cap_le_sig_channel(conn, skb);
7730 		break;
7731 
7732 	default:
7733 		l2cap_data_channel(conn, cid, skb);
7734 		break;
7735 	}
7736 }
7737 
7738 static void process_pending_rx(struct work_struct *work)
7739 {
7740 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7741 					       pending_rx_work);
7742 	struct sk_buff *skb;
7743 
7744 	BT_DBG("");
7745 
7746 	while ((skb = skb_dequeue(&conn->pending_rx)))
7747 		l2cap_recv_frame(conn, skb);
7748 }
7749 
7750 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7751 {
7752 	struct l2cap_conn *conn = hcon->l2cap_data;
7753 	struct hci_chan *hchan;
7754 
7755 	if (conn)
7756 		return conn;
7757 
7758 	hchan = hci_chan_create(hcon);
7759 	if (!hchan)
7760 		return NULL;
7761 
7762 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7763 	if (!conn) {
7764 		hci_chan_del(hchan);
7765 		return NULL;
7766 	}
7767 
7768 	kref_init(&conn->ref);
7769 	hcon->l2cap_data = conn;
7770 	conn->hcon = hci_conn_get(hcon);
7771 	conn->hchan = hchan;
7772 
7773 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7774 
7775 	switch (hcon->type) {
7776 	case LE_LINK:
7777 		if (hcon->hdev->le_mtu) {
7778 			conn->mtu = hcon->hdev->le_mtu;
7779 			break;
7780 		}
7781 		fallthrough;
7782 	default:
7783 		conn->mtu = hcon->hdev->acl_mtu;
7784 		break;
7785 	}
7786 
7787 	conn->feat_mask = 0;
7788 
7789 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7790 
7791 	if (hcon->type == ACL_LINK &&
7792 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7793 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7794 
7795 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7796 	    (bredr_sc_enabled(hcon->hdev) ||
7797 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7798 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7799 
7800 	mutex_init(&conn->ident_lock);
7801 	mutex_init(&conn->chan_lock);
7802 
7803 	INIT_LIST_HEAD(&conn->chan_l);
7804 	INIT_LIST_HEAD(&conn->users);
7805 
7806 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7807 
7808 	skb_queue_head_init(&conn->pending_rx);
7809 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7810 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7811 
7812 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7813 
7814 	return conn;
7815 }
7816 
7817 static bool is_valid_psm(u16 psm, u8 dst_type)
7818 {
7819 	if (!psm)
7820 		return false;
7821 
7822 	if (bdaddr_type_is_le(dst_type))
7823 		return (psm <= 0x00ff);
7824 
7825 	/* PSM must be odd and lsb of upper byte must be 0 */
7826 	return ((psm & 0x0101) == 0x0001);
7827 }
7828 
7829 struct l2cap_chan_data {
7830 	struct l2cap_chan *chan;
7831 	struct pid *pid;
7832 	int count;
7833 };
7834 
7835 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7836 {
7837 	struct l2cap_chan_data *d = data;
7838 	struct pid *pid;
7839 
7840 	if (chan == d->chan)
7841 		return;
7842 
7843 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7844 		return;
7845 
7846 	pid = chan->ops->get_peer_pid(chan);
7847 
7848 	/* Only count deferred channels with the same PID/PSM */
7849 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7850 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7851 		return;
7852 
7853 	d->count++;
7854 }
7855 
7856 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7857 		       bdaddr_t *dst, u8 dst_type)
7858 {
7859 	struct l2cap_conn *conn;
7860 	struct hci_conn *hcon;
7861 	struct hci_dev *hdev;
7862 	int err;
7863 
7864 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7865 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7866 
7867 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7868 	if (!hdev)
7869 		return -EHOSTUNREACH;
7870 
7871 	hci_dev_lock(hdev);
7872 
7873 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7874 	    chan->chan_type != L2CAP_CHAN_RAW) {
7875 		err = -EINVAL;
7876 		goto done;
7877 	}
7878 
7879 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7880 		err = -EINVAL;
7881 		goto done;
7882 	}
7883 
7884 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7885 		err = -EINVAL;
7886 		goto done;
7887 	}
7888 
7889 	switch (chan->mode) {
7890 	case L2CAP_MODE_BASIC:
7891 		break;
7892 	case L2CAP_MODE_LE_FLOWCTL:
7893 		break;
7894 	case L2CAP_MODE_EXT_FLOWCTL:
7895 		if (!enable_ecred) {
7896 			err = -EOPNOTSUPP;
7897 			goto done;
7898 		}
7899 		break;
7900 	case L2CAP_MODE_ERTM:
7901 	case L2CAP_MODE_STREAMING:
7902 		if (!disable_ertm)
7903 			break;
7904 		fallthrough;
7905 	default:
7906 		err = -EOPNOTSUPP;
7907 		goto done;
7908 	}
7909 
7910 	switch (chan->state) {
7911 	case BT_CONNECT:
7912 	case BT_CONNECT2:
7913 	case BT_CONFIG:
7914 		/* Already connecting */
7915 		err = 0;
7916 		goto done;
7917 
7918 	case BT_CONNECTED:
7919 		/* Already connected */
7920 		err = -EISCONN;
7921 		goto done;
7922 
7923 	case BT_OPEN:
7924 	case BT_BOUND:
7925 		/* Can connect */
7926 		break;
7927 
7928 	default:
7929 		err = -EBADFD;
7930 		goto done;
7931 	}
7932 
7933 	/* Set destination address and psm */
7934 	bacpy(&chan->dst, dst);
7935 	chan->dst_type = dst_type;
7936 
7937 	chan->psm = psm;
7938 	chan->dcid = cid;
7939 
7940 	if (bdaddr_type_is_le(dst_type)) {
7941 		/* Convert from L2CAP channel address type to HCI address type
7942 		 */
7943 		if (dst_type == BDADDR_LE_PUBLIC)
7944 			dst_type = ADDR_LE_DEV_PUBLIC;
7945 		else
7946 			dst_type = ADDR_LE_DEV_RANDOM;
7947 
7948 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7949 			hcon = hci_connect_le(hdev, dst, dst_type, false,
7950 					      chan->sec_level,
7951 					      HCI_LE_CONN_TIMEOUT,
7952 					      HCI_ROLE_SLAVE);
7953 		else
7954 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7955 						   chan->sec_level,
7956 						   HCI_LE_CONN_TIMEOUT,
7957 						   CONN_REASON_L2CAP_CHAN);
7958 
7959 	} else {
7960 		u8 auth_type = l2cap_get_auth_type(chan);
7961 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7962 				       CONN_REASON_L2CAP_CHAN);
7963 	}
7964 
7965 	if (IS_ERR(hcon)) {
7966 		err = PTR_ERR(hcon);
7967 		goto done;
7968 	}
7969 
7970 	conn = l2cap_conn_add(hcon);
7971 	if (!conn) {
7972 		hci_conn_drop(hcon);
7973 		err = -ENOMEM;
7974 		goto done;
7975 	}
7976 
7977 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7978 		struct l2cap_chan_data data;
7979 
7980 		data.chan = chan;
7981 		data.pid = chan->ops->get_peer_pid(chan);
7982 		data.count = 1;
7983 
7984 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7985 
7986 		/* Check if there isn't too many channels being connected */
7987 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7988 			hci_conn_drop(hcon);
7989 			err = -EPROTO;
7990 			goto done;
7991 		}
7992 	}
7993 
7994 	mutex_lock(&conn->chan_lock);
7995 	l2cap_chan_lock(chan);
7996 
7997 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7998 		hci_conn_drop(hcon);
7999 		err = -EBUSY;
8000 		goto chan_unlock;
8001 	}
8002 
8003 	/* Update source addr of the socket */
8004 	bacpy(&chan->src, &hcon->src);
8005 	chan->src_type = bdaddr_src_type(hcon);
8006 
8007 	__l2cap_chan_add(conn, chan);
8008 
8009 	/* l2cap_chan_add takes its own ref so we can drop this one */
8010 	hci_conn_drop(hcon);
8011 
8012 	l2cap_state_change(chan, BT_CONNECT);
8013 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8014 
8015 	/* Release chan->sport so that it can be reused by other
8016 	 * sockets (as it's only used for listening sockets).
8017 	 */
8018 	write_lock(&chan_list_lock);
8019 	chan->sport = 0;
8020 	write_unlock(&chan_list_lock);
8021 
8022 	if (hcon->state == BT_CONNECTED) {
8023 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8024 			__clear_chan_timer(chan);
8025 			if (l2cap_chan_check_security(chan, true))
8026 				l2cap_state_change(chan, BT_CONNECTED);
8027 		} else
8028 			l2cap_do_start(chan);
8029 	}
8030 
8031 	err = 0;
8032 
8033 chan_unlock:
8034 	l2cap_chan_unlock(chan);
8035 	mutex_unlock(&conn->chan_lock);
8036 done:
8037 	hci_dev_unlock(hdev);
8038 	hci_dev_put(hdev);
8039 	return err;
8040 }
8041 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8042 
8043 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8044 {
8045 	struct l2cap_conn *conn = chan->conn;
8046 	struct {
8047 		struct l2cap_ecred_reconf_req req;
8048 		__le16 scid;
8049 	} pdu;
8050 
8051 	pdu.req.mtu = cpu_to_le16(chan->imtu);
8052 	pdu.req.mps = cpu_to_le16(chan->mps);
8053 	pdu.scid    = cpu_to_le16(chan->scid);
8054 
8055 	chan->ident = l2cap_get_ident(conn);
8056 
8057 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8058 		       sizeof(pdu), &pdu);
8059 }
8060 
8061 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8062 {
8063 	if (chan->imtu > mtu)
8064 		return -EINVAL;
8065 
8066 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8067 
8068 	chan->imtu = mtu;
8069 
8070 	l2cap_ecred_reconfigure(chan);
8071 
8072 	return 0;
8073 }
8074 
8075 /* ---- L2CAP interface with lower layer (HCI) ---- */
8076 
8077 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8078 {
8079 	int exact = 0, lm1 = 0, lm2 = 0;
8080 	struct l2cap_chan *c;
8081 
8082 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8083 
8084 	/* Find listening sockets and check their link_mode */
8085 	read_lock(&chan_list_lock);
8086 	list_for_each_entry(c, &chan_list, global_l) {
8087 		if (c->state != BT_LISTEN)
8088 			continue;
8089 
8090 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8091 			lm1 |= HCI_LM_ACCEPT;
8092 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8093 				lm1 |= HCI_LM_MASTER;
8094 			exact++;
8095 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8096 			lm2 |= HCI_LM_ACCEPT;
8097 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8098 				lm2 |= HCI_LM_MASTER;
8099 		}
8100 	}
8101 	read_unlock(&chan_list_lock);
8102 
8103 	return exact ? lm1 : lm2;
8104 }
8105 
8106 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8107  * from an existing channel in the list or from the beginning of the
8108  * global list (by passing NULL as first parameter).
8109  */
8110 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8111 						  struct hci_conn *hcon)
8112 {
8113 	u8 src_type = bdaddr_src_type(hcon);
8114 
8115 	read_lock(&chan_list_lock);
8116 
8117 	if (c)
8118 		c = list_next_entry(c, global_l);
8119 	else
8120 		c = list_entry(chan_list.next, typeof(*c), global_l);
8121 
8122 	list_for_each_entry_from(c, &chan_list, global_l) {
8123 		if (c->chan_type != L2CAP_CHAN_FIXED)
8124 			continue;
8125 		if (c->state != BT_LISTEN)
8126 			continue;
8127 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8128 			continue;
8129 		if (src_type != c->src_type)
8130 			continue;
8131 
8132 		c = l2cap_chan_hold_unless_zero(c);
8133 		read_unlock(&chan_list_lock);
8134 		return c;
8135 	}
8136 
8137 	read_unlock(&chan_list_lock);
8138 
8139 	return NULL;
8140 }
8141 
8142 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8143 {
8144 	struct hci_dev *hdev = hcon->hdev;
8145 	struct l2cap_conn *conn;
8146 	struct l2cap_chan *pchan;
8147 	u8 dst_type;
8148 
8149 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8150 		return;
8151 
8152 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8153 
8154 	if (status) {
8155 		l2cap_conn_del(hcon, bt_to_errno(status));
8156 		return;
8157 	}
8158 
8159 	conn = l2cap_conn_add(hcon);
8160 	if (!conn)
8161 		return;
8162 
8163 	dst_type = bdaddr_dst_type(hcon);
8164 
8165 	/* If device is blocked, do not create channels for it */
8166 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8167 		return;
8168 
8169 	/* Find fixed channels and notify them of the new connection. We
8170 	 * use multiple individual lookups, continuing each time where
8171 	 * we left off, because the list lock would prevent calling the
8172 	 * potentially sleeping l2cap_chan_lock() function.
8173 	 */
8174 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8175 	while (pchan) {
8176 		struct l2cap_chan *chan, *next;
8177 
8178 		/* Client fixed channels should override server ones */
8179 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8180 			goto next;
8181 
8182 		l2cap_chan_lock(pchan);
8183 		chan = pchan->ops->new_connection(pchan);
8184 		if (chan) {
8185 			bacpy(&chan->src, &hcon->src);
8186 			bacpy(&chan->dst, &hcon->dst);
8187 			chan->src_type = bdaddr_src_type(hcon);
8188 			chan->dst_type = dst_type;
8189 
8190 			__l2cap_chan_add(conn, chan);
8191 		}
8192 
8193 		l2cap_chan_unlock(pchan);
8194 next:
8195 		next = l2cap_global_fixed_chan(pchan, hcon);
8196 		l2cap_chan_put(pchan);
8197 		pchan = next;
8198 	}
8199 
8200 	l2cap_conn_ready(conn);
8201 }
8202 
8203 int l2cap_disconn_ind(struct hci_conn *hcon)
8204 {
8205 	struct l2cap_conn *conn = hcon->l2cap_data;
8206 
8207 	BT_DBG("hcon %p", hcon);
8208 
8209 	if (!conn)
8210 		return HCI_ERROR_REMOTE_USER_TERM;
8211 	return conn->disc_reason;
8212 }
8213 
8214 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8215 {
8216 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8217 		return;
8218 
8219 	BT_DBG("hcon %p reason %d", hcon, reason);
8220 
8221 	l2cap_conn_del(hcon, bt_to_errno(reason));
8222 }
8223 
8224 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8225 {
8226 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8227 		return;
8228 
8229 	if (encrypt == 0x00) {
8230 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8231 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8232 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8233 			   chan->sec_level == BT_SECURITY_FIPS)
8234 			l2cap_chan_close(chan, ECONNREFUSED);
8235 	} else {
8236 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8237 			__clear_chan_timer(chan);
8238 	}
8239 }
8240 
8241 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8242 {
8243 	struct l2cap_conn *conn = hcon->l2cap_data;
8244 	struct l2cap_chan *chan;
8245 
8246 	if (!conn)
8247 		return;
8248 
8249 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8250 
8251 	mutex_lock(&conn->chan_lock);
8252 
8253 	list_for_each_entry(chan, &conn->chan_l, list) {
8254 		l2cap_chan_lock(chan);
8255 
8256 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8257 		       state_to_string(chan->state));
8258 
8259 		if (chan->scid == L2CAP_CID_A2MP) {
8260 			l2cap_chan_unlock(chan);
8261 			continue;
8262 		}
8263 
8264 		if (!status && encrypt)
8265 			chan->sec_level = hcon->sec_level;
8266 
8267 		if (!__l2cap_no_conn_pending(chan)) {
8268 			l2cap_chan_unlock(chan);
8269 			continue;
8270 		}
8271 
8272 		if (!status && (chan->state == BT_CONNECTED ||
8273 				chan->state == BT_CONFIG)) {
8274 			chan->ops->resume(chan);
8275 			l2cap_check_encryption(chan, encrypt);
8276 			l2cap_chan_unlock(chan);
8277 			continue;
8278 		}
8279 
8280 		if (chan->state == BT_CONNECT) {
8281 			if (!status && l2cap_check_enc_key_size(hcon))
8282 				l2cap_start_connection(chan);
8283 			else
8284 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8285 		} else if (chan->state == BT_CONNECT2 &&
8286 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8287 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8288 			struct l2cap_conn_rsp rsp;
8289 			__u16 res, stat;
8290 
8291 			if (!status && l2cap_check_enc_key_size(hcon)) {
8292 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8293 					res = L2CAP_CR_PEND;
8294 					stat = L2CAP_CS_AUTHOR_PEND;
8295 					chan->ops->defer(chan);
8296 				} else {
8297 					l2cap_state_change(chan, BT_CONFIG);
8298 					res = L2CAP_CR_SUCCESS;
8299 					stat = L2CAP_CS_NO_INFO;
8300 				}
8301 			} else {
8302 				l2cap_state_change(chan, BT_DISCONN);
8303 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8304 				res = L2CAP_CR_SEC_BLOCK;
8305 				stat = L2CAP_CS_NO_INFO;
8306 			}
8307 
8308 			rsp.scid   = cpu_to_le16(chan->dcid);
8309 			rsp.dcid   = cpu_to_le16(chan->scid);
8310 			rsp.result = cpu_to_le16(res);
8311 			rsp.status = cpu_to_le16(stat);
8312 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8313 				       sizeof(rsp), &rsp);
8314 
8315 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8316 			    res == L2CAP_CR_SUCCESS) {
8317 				char buf[128];
8318 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8319 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8320 					       L2CAP_CONF_REQ,
8321 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8322 					       buf);
8323 				chan->num_conf_req++;
8324 			}
8325 		}
8326 
8327 		l2cap_chan_unlock(chan);
8328 	}
8329 
8330 	mutex_unlock(&conn->chan_lock);
8331 }
8332 
8333 /* Append fragment into frame respecting the maximum len of rx_skb */
8334 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
8335 			   u16 len)
8336 {
8337 	if (!conn->rx_skb) {
8338 		/* Allocate skb for the complete frame (with header) */
8339 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8340 		if (!conn->rx_skb)
8341 			return -ENOMEM;
8342 		/* Init rx_len */
8343 		conn->rx_len = len;
8344 	}
8345 
8346 	/* Copy as much as the rx_skb can hold */
8347 	len = min_t(u16, len, skb->len);
8348 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
8349 	skb_pull(skb, len);
8350 	conn->rx_len -= len;
8351 
8352 	return len;
8353 }
8354 
8355 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
8356 {
8357 	struct sk_buff *rx_skb;
8358 	int len;
8359 
8360 	/* Append just enough to complete the header */
8361 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
8362 
8363 	/* If header could not be read just continue */
8364 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
8365 		return len;
8366 
8367 	rx_skb = conn->rx_skb;
8368 	len = get_unaligned_le16(rx_skb->data);
8369 
8370 	/* Check if rx_skb has enough space to received all fragments */
8371 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
8372 		/* Update expected len */
8373 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
8374 		return L2CAP_LEN_SIZE;
8375 	}
8376 
8377 	/* Reset conn->rx_skb since it will need to be reallocated in order to
8378 	 * fit all fragments.
8379 	 */
8380 	conn->rx_skb = NULL;
8381 
8382 	/* Reallocates rx_skb using the exact expected length */
8383 	len = l2cap_recv_frag(conn, rx_skb,
8384 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
8385 	kfree_skb(rx_skb);
8386 
8387 	return len;
8388 }
8389 
8390 static void l2cap_recv_reset(struct l2cap_conn *conn)
8391 {
8392 	kfree_skb(conn->rx_skb);
8393 	conn->rx_skb = NULL;
8394 	conn->rx_len = 0;
8395 }
8396 
8397 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8398 {
8399 	struct l2cap_conn *conn = hcon->l2cap_data;
8400 	int len;
8401 
8402 	/* For AMP controller do not create l2cap conn */
8403 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8404 		goto drop;
8405 
8406 	if (!conn)
8407 		conn = l2cap_conn_add(hcon);
8408 
8409 	if (!conn)
8410 		goto drop;
8411 
8412 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
8413 
8414 	switch (flags) {
8415 	case ACL_START:
8416 	case ACL_START_NO_FLUSH:
8417 	case ACL_COMPLETE:
8418 		if (conn->rx_skb) {
8419 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8420 			l2cap_recv_reset(conn);
8421 			l2cap_conn_unreliable(conn, ECOMM);
8422 		}
8423 
8424 		/* Start fragment may not contain the L2CAP length so just
8425 		 * copy the initial byte when that happens and use conn->mtu as
8426 		 * expected length.
8427 		 */
8428 		if (skb->len < L2CAP_LEN_SIZE) {
8429 			if (l2cap_recv_frag(conn, skb, conn->mtu) < 0)
8430 				goto drop;
8431 			return;
8432 		}
8433 
8434 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
8435 
8436 		if (len == skb->len) {
8437 			/* Complete frame received */
8438 			l2cap_recv_frame(conn, skb);
8439 			return;
8440 		}
8441 
8442 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
8443 
8444 		if (skb->len > len) {
8445 			BT_ERR("Frame is too long (len %u, expected len %d)",
8446 			       skb->len, len);
8447 			l2cap_conn_unreliable(conn, ECOMM);
8448 			goto drop;
8449 		}
8450 
8451 		/* Append fragment into frame (with header) */
8452 		if (l2cap_recv_frag(conn, skb, len) < 0)
8453 			goto drop;
8454 
8455 		break;
8456 
8457 	case ACL_CONT:
8458 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
8459 
8460 		if (!conn->rx_skb) {
8461 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8462 			l2cap_conn_unreliable(conn, ECOMM);
8463 			goto drop;
8464 		}
8465 
8466 		/* Complete the L2CAP length if it has not been read */
8467 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
8468 			if (l2cap_recv_len(conn, skb) < 0) {
8469 				l2cap_conn_unreliable(conn, ECOMM);
8470 				goto drop;
8471 			}
8472 
8473 			/* Header still could not be read just continue */
8474 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
8475 				return;
8476 		}
8477 
8478 		if (skb->len > conn->rx_len) {
8479 			BT_ERR("Fragment is too long (len %u, expected %u)",
8480 			       skb->len, conn->rx_len);
8481 			l2cap_recv_reset(conn);
8482 			l2cap_conn_unreliable(conn, ECOMM);
8483 			goto drop;
8484 		}
8485 
8486 		/* Append fragment into frame (with header) */
8487 		l2cap_recv_frag(conn, skb, skb->len);
8488 
8489 		if (!conn->rx_len) {
8490 			/* Complete frame received. l2cap_recv_frame
8491 			 * takes ownership of the skb so set the global
8492 			 * rx_skb pointer to NULL first.
8493 			 */
8494 			struct sk_buff *rx_skb = conn->rx_skb;
8495 			conn->rx_skb = NULL;
8496 			l2cap_recv_frame(conn, rx_skb);
8497 		}
8498 		break;
8499 	}
8500 
8501 drop:
8502 	kfree_skb(skb);
8503 }
8504 
8505 static struct hci_cb l2cap_cb = {
8506 	.name		= "L2CAP",
8507 	.connect_cfm	= l2cap_connect_cfm,
8508 	.disconn_cfm	= l2cap_disconn_cfm,
8509 	.security_cfm	= l2cap_security_cfm,
8510 };
8511 
8512 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8513 {
8514 	struct l2cap_chan *c;
8515 
8516 	read_lock(&chan_list_lock);
8517 
8518 	list_for_each_entry(c, &chan_list, global_l) {
8519 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8520 			   &c->src, c->src_type, &c->dst, c->dst_type,
8521 			   c->state, __le16_to_cpu(c->psm),
8522 			   c->scid, c->dcid, c->imtu, c->omtu,
8523 			   c->sec_level, c->mode);
8524 	}
8525 
8526 	read_unlock(&chan_list_lock);
8527 
8528 	return 0;
8529 }
8530 
8531 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8532 
8533 static struct dentry *l2cap_debugfs;
8534 
8535 int __init l2cap_init(void)
8536 {
8537 	int err;
8538 
8539 	err = l2cap_init_sockets();
8540 	if (err < 0)
8541 		return err;
8542 
8543 	hci_register_cb(&l2cap_cb);
8544 
8545 	if (IS_ERR_OR_NULL(bt_debugfs))
8546 		return 0;
8547 
8548 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8549 					    NULL, &l2cap_debugfs_fops);
8550 
8551 	return 0;
8552 }
8553 
8554 void l2cap_exit(void)
8555 {
8556 	debugfs_remove(l2cap_debugfs);
8557 	hci_unregister_cb(&l2cap_cb);
8558 	l2cap_cleanup_sockets();
8559 }
8560 
8561 module_param(disable_ertm, bool, 0644);
8562 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8563 
8564 module_param(enable_ecred, bool, 0644);
8565 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8566