xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision 293d5b43)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43 
44 #define LE_FLOWCTL_MAX_CREDITS 65535
45 
46 bool disable_ertm;
47 
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52 
53 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
54 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
55 
56 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
57 				       u8 code, u8 ident, u16 dlen, void *data);
58 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
59 			   void *data);
60 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
61 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
62 
63 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
64 		     struct sk_buff_head *skbs, u8 event);
65 
66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 	if (link_type == LE_LINK) {
69 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 			return BDADDR_LE_PUBLIC;
71 		else
72 			return BDADDR_LE_RANDOM;
73 	}
74 
75 	return BDADDR_BREDR;
76 }
77 
78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 	return bdaddr_type(hcon->type, hcon->src_type);
81 }
82 
83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 	return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87 
88 /* ---- L2CAP channels ---- */
89 
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 						   u16 cid)
92 {
93 	struct l2cap_chan *c;
94 
95 	list_for_each_entry(c, &conn->chan_l, list) {
96 		if (c->dcid == cid)
97 			return c;
98 	}
99 	return NULL;
100 }
101 
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 						   u16 cid)
104 {
105 	struct l2cap_chan *c;
106 
107 	list_for_each_entry(c, &conn->chan_l, list) {
108 		if (c->scid == cid)
109 			return c;
110 	}
111 	return NULL;
112 }
113 
114 /* Find channel with given SCID.
115  * Returns locked channel. */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
117 						 u16 cid)
118 {
119 	struct l2cap_chan *c;
120 
121 	mutex_lock(&conn->chan_lock);
122 	c = __l2cap_get_chan_by_scid(conn, cid);
123 	if (c)
124 		l2cap_chan_lock(c);
125 	mutex_unlock(&conn->chan_lock);
126 
127 	return c;
128 }
129 
130 /* Find channel with given DCID.
131  * Returns locked channel.
132  */
133 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
134 						 u16 cid)
135 {
136 	struct l2cap_chan *c;
137 
138 	mutex_lock(&conn->chan_lock);
139 	c = __l2cap_get_chan_by_dcid(conn, cid);
140 	if (c)
141 		l2cap_chan_lock(c);
142 	mutex_unlock(&conn->chan_lock);
143 
144 	return c;
145 }
146 
147 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 						    u8 ident)
149 {
150 	struct l2cap_chan *c;
151 
152 	list_for_each_entry(c, &conn->chan_l, list) {
153 		if (c->ident == ident)
154 			return c;
155 	}
156 	return NULL;
157 }
158 
159 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
160 						  u8 ident)
161 {
162 	struct l2cap_chan *c;
163 
164 	mutex_lock(&conn->chan_lock);
165 	c = __l2cap_get_chan_by_ident(conn, ident);
166 	if (c)
167 		l2cap_chan_lock(c);
168 	mutex_unlock(&conn->chan_lock);
169 
170 	return c;
171 }
172 
173 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
174 {
175 	struct l2cap_chan *c;
176 
177 	list_for_each_entry(c, &chan_list, global_l) {
178 		if (c->sport == psm && !bacmp(&c->src, src))
179 			return c;
180 	}
181 	return NULL;
182 }
183 
184 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
185 {
186 	int err;
187 
188 	write_lock(&chan_list_lock);
189 
190 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
191 		err = -EADDRINUSE;
192 		goto done;
193 	}
194 
195 	if (psm) {
196 		chan->psm = psm;
197 		chan->sport = psm;
198 		err = 0;
199 	} else {
200 		u16 p, start, end, incr;
201 
202 		if (chan->src_type == BDADDR_BREDR) {
203 			start = L2CAP_PSM_DYN_START;
204 			end = L2CAP_PSM_AUTO_END;
205 			incr = 2;
206 		} else {
207 			start = L2CAP_PSM_LE_DYN_START;
208 			end = L2CAP_PSM_LE_DYN_END;
209 			incr = 1;
210 		}
211 
212 		err = -EINVAL;
213 		for (p = start; p <= end; p += incr)
214 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
215 				chan->psm   = cpu_to_le16(p);
216 				chan->sport = cpu_to_le16(p);
217 				err = 0;
218 				break;
219 			}
220 	}
221 
222 done:
223 	write_unlock(&chan_list_lock);
224 	return err;
225 }
226 EXPORT_SYMBOL_GPL(l2cap_add_psm);
227 
228 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
229 {
230 	write_lock(&chan_list_lock);
231 
232 	/* Override the defaults (which are for conn-oriented) */
233 	chan->omtu = L2CAP_DEFAULT_MTU;
234 	chan->chan_type = L2CAP_CHAN_FIXED;
235 
236 	chan->scid = scid;
237 
238 	write_unlock(&chan_list_lock);
239 
240 	return 0;
241 }
242 
243 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
244 {
245 	u16 cid, dyn_end;
246 
247 	if (conn->hcon->type == LE_LINK)
248 		dyn_end = L2CAP_CID_LE_DYN_END;
249 	else
250 		dyn_end = L2CAP_CID_DYN_END;
251 
252 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
253 		if (!__l2cap_get_chan_by_scid(conn, cid))
254 			return cid;
255 	}
256 
257 	return 0;
258 }
259 
260 static void l2cap_state_change(struct l2cap_chan *chan, int state)
261 {
262 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
263 	       state_to_string(state));
264 
265 	chan->state = state;
266 	chan->ops->state_change(chan, state, 0);
267 }
268 
269 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
270 						int state, int err)
271 {
272 	chan->state = state;
273 	chan->ops->state_change(chan, chan->state, err);
274 }
275 
276 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
277 {
278 	chan->ops->state_change(chan, chan->state, err);
279 }
280 
281 static void __set_retrans_timer(struct l2cap_chan *chan)
282 {
283 	if (!delayed_work_pending(&chan->monitor_timer) &&
284 	    chan->retrans_timeout) {
285 		l2cap_set_timer(chan, &chan->retrans_timer,
286 				msecs_to_jiffies(chan->retrans_timeout));
287 	}
288 }
289 
290 static void __set_monitor_timer(struct l2cap_chan *chan)
291 {
292 	__clear_retrans_timer(chan);
293 	if (chan->monitor_timeout) {
294 		l2cap_set_timer(chan, &chan->monitor_timer,
295 				msecs_to_jiffies(chan->monitor_timeout));
296 	}
297 }
298 
299 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
300 					       u16 seq)
301 {
302 	struct sk_buff *skb;
303 
304 	skb_queue_walk(head, skb) {
305 		if (bt_cb(skb)->l2cap.txseq == seq)
306 			return skb;
307 	}
308 
309 	return NULL;
310 }
311 
312 /* ---- L2CAP sequence number lists ---- */
313 
314 /* For ERTM, ordered lists of sequence numbers must be tracked for
315  * SREJ requests that are received and for frames that are to be
316  * retransmitted. These seq_list functions implement a singly-linked
317  * list in an array, where membership in the list can also be checked
318  * in constant time. Items can also be added to the tail of the list
319  * and removed from the head in constant time, without further memory
320  * allocs or frees.
321  */
322 
323 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
324 {
325 	size_t alloc_size, i;
326 
327 	/* Allocated size is a power of 2 to map sequence numbers
328 	 * (which may be up to 14 bits) in to a smaller array that is
329 	 * sized for the negotiated ERTM transmit windows.
330 	 */
331 	alloc_size = roundup_pow_of_two(size);
332 
333 	seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
334 	if (!seq_list->list)
335 		return -ENOMEM;
336 
337 	seq_list->mask = alloc_size - 1;
338 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
339 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
340 	for (i = 0; i < alloc_size; i++)
341 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
342 
343 	return 0;
344 }
345 
346 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
347 {
348 	kfree(seq_list->list);
349 }
350 
351 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
352 					   u16 seq)
353 {
354 	/* Constant-time check for list membership */
355 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
356 }
357 
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
359 {
360 	u16 seq = seq_list->head;
361 	u16 mask = seq_list->mask;
362 
363 	seq_list->head = seq_list->list[seq & mask];
364 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
365 
366 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
367 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
368 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
369 	}
370 
371 	return seq;
372 }
373 
374 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
375 {
376 	u16 i;
377 
378 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
379 		return;
380 
381 	for (i = 0; i <= seq_list->mask; i++)
382 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
383 
384 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
385 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
386 }
387 
388 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
389 {
390 	u16 mask = seq_list->mask;
391 
392 	/* All appends happen in constant time */
393 
394 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
395 		return;
396 
397 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
398 		seq_list->head = seq;
399 	else
400 		seq_list->list[seq_list->tail & mask] = seq;
401 
402 	seq_list->tail = seq;
403 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
404 }
405 
406 static void l2cap_chan_timeout(struct work_struct *work)
407 {
408 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
409 					       chan_timer.work);
410 	struct l2cap_conn *conn = chan->conn;
411 	int reason;
412 
413 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
414 
415 	mutex_lock(&conn->chan_lock);
416 	l2cap_chan_lock(chan);
417 
418 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
419 		reason = ECONNREFUSED;
420 	else if (chan->state == BT_CONNECT &&
421 		 chan->sec_level != BT_SECURITY_SDP)
422 		reason = ECONNREFUSED;
423 	else
424 		reason = ETIMEDOUT;
425 
426 	l2cap_chan_close(chan, reason);
427 
428 	l2cap_chan_unlock(chan);
429 
430 	chan->ops->close(chan);
431 	mutex_unlock(&conn->chan_lock);
432 
433 	l2cap_chan_put(chan);
434 }
435 
436 struct l2cap_chan *l2cap_chan_create(void)
437 {
438 	struct l2cap_chan *chan;
439 
440 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
441 	if (!chan)
442 		return NULL;
443 
444 	mutex_init(&chan->lock);
445 
446 	/* Set default lock nesting level */
447 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
448 
449 	write_lock(&chan_list_lock);
450 	list_add(&chan->global_l, &chan_list);
451 	write_unlock(&chan_list_lock);
452 
453 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
454 
455 	chan->state = BT_OPEN;
456 
457 	kref_init(&chan->kref);
458 
459 	/* This flag is cleared in l2cap_chan_ready() */
460 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
461 
462 	BT_DBG("chan %p", chan);
463 
464 	return chan;
465 }
466 EXPORT_SYMBOL_GPL(l2cap_chan_create);
467 
468 static void l2cap_chan_destroy(struct kref *kref)
469 {
470 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
471 
472 	BT_DBG("chan %p", chan);
473 
474 	write_lock(&chan_list_lock);
475 	list_del(&chan->global_l);
476 	write_unlock(&chan_list_lock);
477 
478 	kfree(chan);
479 }
480 
481 void l2cap_chan_hold(struct l2cap_chan *c)
482 {
483 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
484 
485 	kref_get(&c->kref);
486 }
487 
488 void l2cap_chan_put(struct l2cap_chan *c)
489 {
490 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
491 
492 	kref_put(&c->kref, l2cap_chan_destroy);
493 }
494 EXPORT_SYMBOL_GPL(l2cap_chan_put);
495 
496 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
497 {
498 	chan->fcs  = L2CAP_FCS_CRC16;
499 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
500 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
501 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
502 	chan->remote_max_tx = chan->max_tx;
503 	chan->remote_tx_win = chan->tx_win;
504 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
505 	chan->sec_level = BT_SECURITY_LOW;
506 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
507 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
508 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
509 	chan->conf_state = 0;
510 
511 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
512 }
513 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
514 
515 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
516 {
517 	chan->sdu = NULL;
518 	chan->sdu_last_frag = NULL;
519 	chan->sdu_len = 0;
520 	chan->tx_credits = 0;
521 	chan->rx_credits = le_max_credits;
522 	chan->mps = min_t(u16, chan->imtu, le_default_mps);
523 
524 	skb_queue_head_init(&chan->tx_q);
525 }
526 
527 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
528 {
529 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
530 	       __le16_to_cpu(chan->psm), chan->dcid);
531 
532 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
533 
534 	chan->conn = conn;
535 
536 	switch (chan->chan_type) {
537 	case L2CAP_CHAN_CONN_ORIENTED:
538 		/* Alloc CID for connection-oriented socket */
539 		chan->scid = l2cap_alloc_cid(conn);
540 		if (conn->hcon->type == ACL_LINK)
541 			chan->omtu = L2CAP_DEFAULT_MTU;
542 		break;
543 
544 	case L2CAP_CHAN_CONN_LESS:
545 		/* Connectionless socket */
546 		chan->scid = L2CAP_CID_CONN_LESS;
547 		chan->dcid = L2CAP_CID_CONN_LESS;
548 		chan->omtu = L2CAP_DEFAULT_MTU;
549 		break;
550 
551 	case L2CAP_CHAN_FIXED:
552 		/* Caller will set CID and CID specific MTU values */
553 		break;
554 
555 	default:
556 		/* Raw socket can send/recv signalling messages only */
557 		chan->scid = L2CAP_CID_SIGNALING;
558 		chan->dcid = L2CAP_CID_SIGNALING;
559 		chan->omtu = L2CAP_DEFAULT_MTU;
560 	}
561 
562 	chan->local_id		= L2CAP_BESTEFFORT_ID;
563 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
564 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
565 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
566 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
567 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
568 
569 	l2cap_chan_hold(chan);
570 
571 	/* Only keep a reference for fixed channels if they requested it */
572 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
573 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
574 		hci_conn_hold(conn->hcon);
575 
576 	list_add(&chan->list, &conn->chan_l);
577 }
578 
579 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
580 {
581 	mutex_lock(&conn->chan_lock);
582 	__l2cap_chan_add(conn, chan);
583 	mutex_unlock(&conn->chan_lock);
584 }
585 
586 void l2cap_chan_del(struct l2cap_chan *chan, int err)
587 {
588 	struct l2cap_conn *conn = chan->conn;
589 
590 	__clear_chan_timer(chan);
591 
592 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
593 	       state_to_string(chan->state));
594 
595 	chan->ops->teardown(chan, err);
596 
597 	if (conn) {
598 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
599 		/* Delete from channel list */
600 		list_del(&chan->list);
601 
602 		l2cap_chan_put(chan);
603 
604 		chan->conn = NULL;
605 
606 		/* Reference was only held for non-fixed channels or
607 		 * fixed channels that explicitly requested it using the
608 		 * FLAG_HOLD_HCI_CONN flag.
609 		 */
610 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
611 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
612 			hci_conn_drop(conn->hcon);
613 
614 		if (mgr && mgr->bredr_chan == chan)
615 			mgr->bredr_chan = NULL;
616 	}
617 
618 	if (chan->hs_hchan) {
619 		struct hci_chan *hs_hchan = chan->hs_hchan;
620 
621 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
622 		amp_disconnect_logical_link(hs_hchan);
623 	}
624 
625 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
626 		return;
627 
628 	switch(chan->mode) {
629 	case L2CAP_MODE_BASIC:
630 		break;
631 
632 	case L2CAP_MODE_LE_FLOWCTL:
633 		skb_queue_purge(&chan->tx_q);
634 		break;
635 
636 	case L2CAP_MODE_ERTM:
637 		__clear_retrans_timer(chan);
638 		__clear_monitor_timer(chan);
639 		__clear_ack_timer(chan);
640 
641 		skb_queue_purge(&chan->srej_q);
642 
643 		l2cap_seq_list_free(&chan->srej_list);
644 		l2cap_seq_list_free(&chan->retrans_list);
645 
646 		/* fall through */
647 
648 	case L2CAP_MODE_STREAMING:
649 		skb_queue_purge(&chan->tx_q);
650 		break;
651 	}
652 
653 	return;
654 }
655 EXPORT_SYMBOL_GPL(l2cap_chan_del);
656 
657 static void l2cap_conn_update_id_addr(struct work_struct *work)
658 {
659 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
660 					       id_addr_update_work);
661 	struct hci_conn *hcon = conn->hcon;
662 	struct l2cap_chan *chan;
663 
664 	mutex_lock(&conn->chan_lock);
665 
666 	list_for_each_entry(chan, &conn->chan_l, list) {
667 		l2cap_chan_lock(chan);
668 		bacpy(&chan->dst, &hcon->dst);
669 		chan->dst_type = bdaddr_dst_type(hcon);
670 		l2cap_chan_unlock(chan);
671 	}
672 
673 	mutex_unlock(&conn->chan_lock);
674 }
675 
676 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
677 {
678 	struct l2cap_conn *conn = chan->conn;
679 	struct l2cap_le_conn_rsp rsp;
680 	u16 result;
681 
682 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
683 		result = L2CAP_CR_AUTHORIZATION;
684 	else
685 		result = L2CAP_CR_BAD_PSM;
686 
687 	l2cap_state_change(chan, BT_DISCONN);
688 
689 	rsp.dcid    = cpu_to_le16(chan->scid);
690 	rsp.mtu     = cpu_to_le16(chan->imtu);
691 	rsp.mps     = cpu_to_le16(chan->mps);
692 	rsp.credits = cpu_to_le16(chan->rx_credits);
693 	rsp.result  = cpu_to_le16(result);
694 
695 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
696 		       &rsp);
697 }
698 
699 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
700 {
701 	struct l2cap_conn *conn = chan->conn;
702 	struct l2cap_conn_rsp rsp;
703 	u16 result;
704 
705 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
706 		result = L2CAP_CR_SEC_BLOCK;
707 	else
708 		result = L2CAP_CR_BAD_PSM;
709 
710 	l2cap_state_change(chan, BT_DISCONN);
711 
712 	rsp.scid   = cpu_to_le16(chan->dcid);
713 	rsp.dcid   = cpu_to_le16(chan->scid);
714 	rsp.result = cpu_to_le16(result);
715 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
716 
717 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
718 }
719 
720 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
721 {
722 	struct l2cap_conn *conn = chan->conn;
723 
724 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
725 
726 	switch (chan->state) {
727 	case BT_LISTEN:
728 		chan->ops->teardown(chan, 0);
729 		break;
730 
731 	case BT_CONNECTED:
732 	case BT_CONFIG:
733 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
734 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
735 			l2cap_send_disconn_req(chan, reason);
736 		} else
737 			l2cap_chan_del(chan, reason);
738 		break;
739 
740 	case BT_CONNECT2:
741 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
742 			if (conn->hcon->type == ACL_LINK)
743 				l2cap_chan_connect_reject(chan);
744 			else if (conn->hcon->type == LE_LINK)
745 				l2cap_chan_le_connect_reject(chan);
746 		}
747 
748 		l2cap_chan_del(chan, reason);
749 		break;
750 
751 	case BT_CONNECT:
752 	case BT_DISCONN:
753 		l2cap_chan_del(chan, reason);
754 		break;
755 
756 	default:
757 		chan->ops->teardown(chan, 0);
758 		break;
759 	}
760 }
761 EXPORT_SYMBOL(l2cap_chan_close);
762 
763 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
764 {
765 	switch (chan->chan_type) {
766 	case L2CAP_CHAN_RAW:
767 		switch (chan->sec_level) {
768 		case BT_SECURITY_HIGH:
769 		case BT_SECURITY_FIPS:
770 			return HCI_AT_DEDICATED_BONDING_MITM;
771 		case BT_SECURITY_MEDIUM:
772 			return HCI_AT_DEDICATED_BONDING;
773 		default:
774 			return HCI_AT_NO_BONDING;
775 		}
776 		break;
777 	case L2CAP_CHAN_CONN_LESS:
778 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
779 			if (chan->sec_level == BT_SECURITY_LOW)
780 				chan->sec_level = BT_SECURITY_SDP;
781 		}
782 		if (chan->sec_level == BT_SECURITY_HIGH ||
783 		    chan->sec_level == BT_SECURITY_FIPS)
784 			return HCI_AT_NO_BONDING_MITM;
785 		else
786 			return HCI_AT_NO_BONDING;
787 		break;
788 	case L2CAP_CHAN_CONN_ORIENTED:
789 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
790 			if (chan->sec_level == BT_SECURITY_LOW)
791 				chan->sec_level = BT_SECURITY_SDP;
792 
793 			if (chan->sec_level == BT_SECURITY_HIGH ||
794 			    chan->sec_level == BT_SECURITY_FIPS)
795 				return HCI_AT_NO_BONDING_MITM;
796 			else
797 				return HCI_AT_NO_BONDING;
798 		}
799 		/* fall through */
800 	default:
801 		switch (chan->sec_level) {
802 		case BT_SECURITY_HIGH:
803 		case BT_SECURITY_FIPS:
804 			return HCI_AT_GENERAL_BONDING_MITM;
805 		case BT_SECURITY_MEDIUM:
806 			return HCI_AT_GENERAL_BONDING;
807 		default:
808 			return HCI_AT_NO_BONDING;
809 		}
810 		break;
811 	}
812 }
813 
814 /* Service level security */
815 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
816 {
817 	struct l2cap_conn *conn = chan->conn;
818 	__u8 auth_type;
819 
820 	if (conn->hcon->type == LE_LINK)
821 		return smp_conn_security(conn->hcon, chan->sec_level);
822 
823 	auth_type = l2cap_get_auth_type(chan);
824 
825 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
826 				 initiator);
827 }
828 
829 static u8 l2cap_get_ident(struct l2cap_conn *conn)
830 {
831 	u8 id;
832 
833 	/* Get next available identificator.
834 	 *    1 - 128 are used by kernel.
835 	 *  129 - 199 are reserved.
836 	 *  200 - 254 are used by utilities like l2ping, etc.
837 	 */
838 
839 	mutex_lock(&conn->ident_lock);
840 
841 	if (++conn->tx_ident > 128)
842 		conn->tx_ident = 1;
843 
844 	id = conn->tx_ident;
845 
846 	mutex_unlock(&conn->ident_lock);
847 
848 	return id;
849 }
850 
851 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
852 			   void *data)
853 {
854 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
855 	u8 flags;
856 
857 	BT_DBG("code 0x%2.2x", code);
858 
859 	if (!skb)
860 		return;
861 
862 	/* Use NO_FLUSH if supported or we have an LE link (which does
863 	 * not support auto-flushing packets) */
864 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
865 	    conn->hcon->type == LE_LINK)
866 		flags = ACL_START_NO_FLUSH;
867 	else
868 		flags = ACL_START;
869 
870 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
871 	skb->priority = HCI_PRIO_MAX;
872 
873 	hci_send_acl(conn->hchan, skb, flags);
874 }
875 
876 static bool __chan_is_moving(struct l2cap_chan *chan)
877 {
878 	return chan->move_state != L2CAP_MOVE_STABLE &&
879 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
880 }
881 
882 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
883 {
884 	struct hci_conn *hcon = chan->conn->hcon;
885 	u16 flags;
886 
887 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
888 	       skb->priority);
889 
890 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
891 		if (chan->hs_hchan)
892 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
893 		else
894 			kfree_skb(skb);
895 
896 		return;
897 	}
898 
899 	/* Use NO_FLUSH for LE links (where this is the only option) or
900 	 * if the BR/EDR link supports it and flushing has not been
901 	 * explicitly requested (through FLAG_FLUSHABLE).
902 	 */
903 	if (hcon->type == LE_LINK ||
904 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
905 	     lmp_no_flush_capable(hcon->hdev)))
906 		flags = ACL_START_NO_FLUSH;
907 	else
908 		flags = ACL_START;
909 
910 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
911 	hci_send_acl(chan->conn->hchan, skb, flags);
912 }
913 
914 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
915 {
916 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
917 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
918 
919 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
920 		/* S-Frame */
921 		control->sframe = 1;
922 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
923 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
924 
925 		control->sar = 0;
926 		control->txseq = 0;
927 	} else {
928 		/* I-Frame */
929 		control->sframe = 0;
930 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
931 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
932 
933 		control->poll = 0;
934 		control->super = 0;
935 	}
936 }
937 
938 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
939 {
940 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
941 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
942 
943 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
944 		/* S-Frame */
945 		control->sframe = 1;
946 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
947 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
948 
949 		control->sar = 0;
950 		control->txseq = 0;
951 	} else {
952 		/* I-Frame */
953 		control->sframe = 0;
954 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
955 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
956 
957 		control->poll = 0;
958 		control->super = 0;
959 	}
960 }
961 
962 static inline void __unpack_control(struct l2cap_chan *chan,
963 				    struct sk_buff *skb)
964 {
965 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
966 		__unpack_extended_control(get_unaligned_le32(skb->data),
967 					  &bt_cb(skb)->l2cap);
968 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
969 	} else {
970 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
971 					  &bt_cb(skb)->l2cap);
972 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
973 	}
974 }
975 
976 static u32 __pack_extended_control(struct l2cap_ctrl *control)
977 {
978 	u32 packed;
979 
980 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
981 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
982 
983 	if (control->sframe) {
984 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
985 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
986 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
987 	} else {
988 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
989 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
990 	}
991 
992 	return packed;
993 }
994 
995 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
996 {
997 	u16 packed;
998 
999 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1000 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1001 
1002 	if (control->sframe) {
1003 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1004 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1005 		packed |= L2CAP_CTRL_FRAME_TYPE;
1006 	} else {
1007 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1008 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1009 	}
1010 
1011 	return packed;
1012 }
1013 
1014 static inline void __pack_control(struct l2cap_chan *chan,
1015 				  struct l2cap_ctrl *control,
1016 				  struct sk_buff *skb)
1017 {
1018 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1019 		put_unaligned_le32(__pack_extended_control(control),
1020 				   skb->data + L2CAP_HDR_SIZE);
1021 	} else {
1022 		put_unaligned_le16(__pack_enhanced_control(control),
1023 				   skb->data + L2CAP_HDR_SIZE);
1024 	}
1025 }
1026 
1027 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1028 {
1029 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1030 		return L2CAP_EXT_HDR_SIZE;
1031 	else
1032 		return L2CAP_ENH_HDR_SIZE;
1033 }
1034 
1035 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1036 					       u32 control)
1037 {
1038 	struct sk_buff *skb;
1039 	struct l2cap_hdr *lh;
1040 	int hlen = __ertm_hdr_size(chan);
1041 
1042 	if (chan->fcs == L2CAP_FCS_CRC16)
1043 		hlen += L2CAP_FCS_SIZE;
1044 
1045 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1046 
1047 	if (!skb)
1048 		return ERR_PTR(-ENOMEM);
1049 
1050 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1051 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1052 	lh->cid = cpu_to_le16(chan->dcid);
1053 
1054 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1055 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1056 	else
1057 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1058 
1059 	if (chan->fcs == L2CAP_FCS_CRC16) {
1060 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1061 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1062 	}
1063 
1064 	skb->priority = HCI_PRIO_MAX;
1065 	return skb;
1066 }
1067 
1068 static void l2cap_send_sframe(struct l2cap_chan *chan,
1069 			      struct l2cap_ctrl *control)
1070 {
1071 	struct sk_buff *skb;
1072 	u32 control_field;
1073 
1074 	BT_DBG("chan %p, control %p", chan, control);
1075 
1076 	if (!control->sframe)
1077 		return;
1078 
1079 	if (__chan_is_moving(chan))
1080 		return;
1081 
1082 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1083 	    !control->poll)
1084 		control->final = 1;
1085 
1086 	if (control->super == L2CAP_SUPER_RR)
1087 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1088 	else if (control->super == L2CAP_SUPER_RNR)
1089 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1090 
1091 	if (control->super != L2CAP_SUPER_SREJ) {
1092 		chan->last_acked_seq = control->reqseq;
1093 		__clear_ack_timer(chan);
1094 	}
1095 
1096 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1097 	       control->final, control->poll, control->super);
1098 
1099 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1100 		control_field = __pack_extended_control(control);
1101 	else
1102 		control_field = __pack_enhanced_control(control);
1103 
1104 	skb = l2cap_create_sframe_pdu(chan, control_field);
1105 	if (!IS_ERR(skb))
1106 		l2cap_do_send(chan, skb);
1107 }
1108 
1109 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1110 {
1111 	struct l2cap_ctrl control;
1112 
1113 	BT_DBG("chan %p, poll %d", chan, poll);
1114 
1115 	memset(&control, 0, sizeof(control));
1116 	control.sframe = 1;
1117 	control.poll = poll;
1118 
1119 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1120 		control.super = L2CAP_SUPER_RNR;
1121 	else
1122 		control.super = L2CAP_SUPER_RR;
1123 
1124 	control.reqseq = chan->buffer_seq;
1125 	l2cap_send_sframe(chan, &control);
1126 }
1127 
1128 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1129 {
1130 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1131 		return true;
1132 
1133 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1134 }
1135 
1136 static bool __amp_capable(struct l2cap_chan *chan)
1137 {
1138 	struct l2cap_conn *conn = chan->conn;
1139 	struct hci_dev *hdev;
1140 	bool amp_available = false;
1141 
1142 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1143 		return false;
1144 
1145 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1146 		return false;
1147 
1148 	read_lock(&hci_dev_list_lock);
1149 	list_for_each_entry(hdev, &hci_dev_list, list) {
1150 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1151 		    test_bit(HCI_UP, &hdev->flags)) {
1152 			amp_available = true;
1153 			break;
1154 		}
1155 	}
1156 	read_unlock(&hci_dev_list_lock);
1157 
1158 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1159 		return amp_available;
1160 
1161 	return false;
1162 }
1163 
1164 static bool l2cap_check_efs(struct l2cap_chan *chan)
1165 {
1166 	/* Check EFS parameters */
1167 	return true;
1168 }
1169 
1170 void l2cap_send_conn_req(struct l2cap_chan *chan)
1171 {
1172 	struct l2cap_conn *conn = chan->conn;
1173 	struct l2cap_conn_req req;
1174 
1175 	req.scid = cpu_to_le16(chan->scid);
1176 	req.psm  = chan->psm;
1177 
1178 	chan->ident = l2cap_get_ident(conn);
1179 
1180 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1181 
1182 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1183 }
1184 
1185 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1186 {
1187 	struct l2cap_create_chan_req req;
1188 	req.scid = cpu_to_le16(chan->scid);
1189 	req.psm  = chan->psm;
1190 	req.amp_id = amp_id;
1191 
1192 	chan->ident = l2cap_get_ident(chan->conn);
1193 
1194 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1195 		       sizeof(req), &req);
1196 }
1197 
1198 static void l2cap_move_setup(struct l2cap_chan *chan)
1199 {
1200 	struct sk_buff *skb;
1201 
1202 	BT_DBG("chan %p", chan);
1203 
1204 	if (chan->mode != L2CAP_MODE_ERTM)
1205 		return;
1206 
1207 	__clear_retrans_timer(chan);
1208 	__clear_monitor_timer(chan);
1209 	__clear_ack_timer(chan);
1210 
1211 	chan->retry_count = 0;
1212 	skb_queue_walk(&chan->tx_q, skb) {
1213 		if (bt_cb(skb)->l2cap.retries)
1214 			bt_cb(skb)->l2cap.retries = 1;
1215 		else
1216 			break;
1217 	}
1218 
1219 	chan->expected_tx_seq = chan->buffer_seq;
1220 
1221 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1222 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1223 	l2cap_seq_list_clear(&chan->retrans_list);
1224 	l2cap_seq_list_clear(&chan->srej_list);
1225 	skb_queue_purge(&chan->srej_q);
1226 
1227 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1228 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1229 
1230 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1231 }
1232 
1233 static void l2cap_move_done(struct l2cap_chan *chan)
1234 {
1235 	u8 move_role = chan->move_role;
1236 	BT_DBG("chan %p", chan);
1237 
1238 	chan->move_state = L2CAP_MOVE_STABLE;
1239 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1240 
1241 	if (chan->mode != L2CAP_MODE_ERTM)
1242 		return;
1243 
1244 	switch (move_role) {
1245 	case L2CAP_MOVE_ROLE_INITIATOR:
1246 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1247 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1248 		break;
1249 	case L2CAP_MOVE_ROLE_RESPONDER:
1250 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1251 		break;
1252 	}
1253 }
1254 
1255 static void l2cap_chan_ready(struct l2cap_chan *chan)
1256 {
1257 	/* The channel may have already been flagged as connected in
1258 	 * case of receiving data before the L2CAP info req/rsp
1259 	 * procedure is complete.
1260 	 */
1261 	if (chan->state == BT_CONNECTED)
1262 		return;
1263 
1264 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1265 	chan->conf_state = 0;
1266 	__clear_chan_timer(chan);
1267 
1268 	if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1269 		chan->ops->suspend(chan);
1270 
1271 	chan->state = BT_CONNECTED;
1272 
1273 	chan->ops->ready(chan);
1274 }
1275 
1276 static void l2cap_le_connect(struct l2cap_chan *chan)
1277 {
1278 	struct l2cap_conn *conn = chan->conn;
1279 	struct l2cap_le_conn_req req;
1280 
1281 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1282 		return;
1283 
1284 	req.psm     = chan->psm;
1285 	req.scid    = cpu_to_le16(chan->scid);
1286 	req.mtu     = cpu_to_le16(chan->imtu);
1287 	req.mps     = cpu_to_le16(chan->mps);
1288 	req.credits = cpu_to_le16(chan->rx_credits);
1289 
1290 	chan->ident = l2cap_get_ident(conn);
1291 
1292 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1293 		       sizeof(req), &req);
1294 }
1295 
1296 static void l2cap_le_start(struct l2cap_chan *chan)
1297 {
1298 	struct l2cap_conn *conn = chan->conn;
1299 
1300 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1301 		return;
1302 
1303 	if (!chan->psm) {
1304 		l2cap_chan_ready(chan);
1305 		return;
1306 	}
1307 
1308 	if (chan->state == BT_CONNECT)
1309 		l2cap_le_connect(chan);
1310 }
1311 
1312 static void l2cap_start_connection(struct l2cap_chan *chan)
1313 {
1314 	if (__amp_capable(chan)) {
1315 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1316 		a2mp_discover_amp(chan);
1317 	} else if (chan->conn->hcon->type == LE_LINK) {
1318 		l2cap_le_start(chan);
1319 	} else {
1320 		l2cap_send_conn_req(chan);
1321 	}
1322 }
1323 
1324 static void l2cap_request_info(struct l2cap_conn *conn)
1325 {
1326 	struct l2cap_info_req req;
1327 
1328 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1329 		return;
1330 
1331 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1332 
1333 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1334 	conn->info_ident = l2cap_get_ident(conn);
1335 
1336 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1337 
1338 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1339 		       sizeof(req), &req);
1340 }
1341 
1342 static void l2cap_do_start(struct l2cap_chan *chan)
1343 {
1344 	struct l2cap_conn *conn = chan->conn;
1345 
1346 	if (conn->hcon->type == LE_LINK) {
1347 		l2cap_le_start(chan);
1348 		return;
1349 	}
1350 
1351 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1352 		l2cap_request_info(conn);
1353 		return;
1354 	}
1355 
1356 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1357 		return;
1358 
1359 	if (l2cap_chan_check_security(chan, true) &&
1360 	    __l2cap_no_conn_pending(chan))
1361 		l2cap_start_connection(chan);
1362 }
1363 
1364 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1365 {
1366 	u32 local_feat_mask = l2cap_feat_mask;
1367 	if (!disable_ertm)
1368 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1369 
1370 	switch (mode) {
1371 	case L2CAP_MODE_ERTM:
1372 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1373 	case L2CAP_MODE_STREAMING:
1374 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1375 	default:
1376 		return 0x00;
1377 	}
1378 }
1379 
1380 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1381 {
1382 	struct l2cap_conn *conn = chan->conn;
1383 	struct l2cap_disconn_req req;
1384 
1385 	if (!conn)
1386 		return;
1387 
1388 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1389 		__clear_retrans_timer(chan);
1390 		__clear_monitor_timer(chan);
1391 		__clear_ack_timer(chan);
1392 	}
1393 
1394 	if (chan->scid == L2CAP_CID_A2MP) {
1395 		l2cap_state_change(chan, BT_DISCONN);
1396 		return;
1397 	}
1398 
1399 	req.dcid = cpu_to_le16(chan->dcid);
1400 	req.scid = cpu_to_le16(chan->scid);
1401 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1402 		       sizeof(req), &req);
1403 
1404 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1405 }
1406 
1407 /* ---- L2CAP connections ---- */
1408 static void l2cap_conn_start(struct l2cap_conn *conn)
1409 {
1410 	struct l2cap_chan *chan, *tmp;
1411 
1412 	BT_DBG("conn %p", conn);
1413 
1414 	mutex_lock(&conn->chan_lock);
1415 
1416 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1417 		l2cap_chan_lock(chan);
1418 
1419 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1420 			l2cap_chan_ready(chan);
1421 			l2cap_chan_unlock(chan);
1422 			continue;
1423 		}
1424 
1425 		if (chan->state == BT_CONNECT) {
1426 			if (!l2cap_chan_check_security(chan, true) ||
1427 			    !__l2cap_no_conn_pending(chan)) {
1428 				l2cap_chan_unlock(chan);
1429 				continue;
1430 			}
1431 
1432 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1433 			    && test_bit(CONF_STATE2_DEVICE,
1434 					&chan->conf_state)) {
1435 				l2cap_chan_close(chan, ECONNRESET);
1436 				l2cap_chan_unlock(chan);
1437 				continue;
1438 			}
1439 
1440 			l2cap_start_connection(chan);
1441 
1442 		} else if (chan->state == BT_CONNECT2) {
1443 			struct l2cap_conn_rsp rsp;
1444 			char buf[128];
1445 			rsp.scid = cpu_to_le16(chan->dcid);
1446 			rsp.dcid = cpu_to_le16(chan->scid);
1447 
1448 			if (l2cap_chan_check_security(chan, false)) {
1449 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1450 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1451 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1452 					chan->ops->defer(chan);
1453 
1454 				} else {
1455 					l2cap_state_change(chan, BT_CONFIG);
1456 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1457 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1458 				}
1459 			} else {
1460 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1461 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1462 			}
1463 
1464 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1465 				       sizeof(rsp), &rsp);
1466 
1467 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1468 			    rsp.result != L2CAP_CR_SUCCESS) {
1469 				l2cap_chan_unlock(chan);
1470 				continue;
1471 			}
1472 
1473 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1474 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1475 				       l2cap_build_conf_req(chan, buf), buf);
1476 			chan->num_conf_req++;
1477 		}
1478 
1479 		l2cap_chan_unlock(chan);
1480 	}
1481 
1482 	mutex_unlock(&conn->chan_lock);
1483 }
1484 
1485 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1486 {
1487 	struct hci_conn *hcon = conn->hcon;
1488 	struct hci_dev *hdev = hcon->hdev;
1489 
1490 	BT_DBG("%s conn %p", hdev->name, conn);
1491 
1492 	/* For outgoing pairing which doesn't necessarily have an
1493 	 * associated socket (e.g. mgmt_pair_device).
1494 	 */
1495 	if (hcon->out)
1496 		smp_conn_security(hcon, hcon->pending_sec_level);
1497 
1498 	/* For LE slave connections, make sure the connection interval
1499 	 * is in the range of the minium and maximum interval that has
1500 	 * been configured for this connection. If not, then trigger
1501 	 * the connection update procedure.
1502 	 */
1503 	if (hcon->role == HCI_ROLE_SLAVE &&
1504 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1505 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1506 		struct l2cap_conn_param_update_req req;
1507 
1508 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1509 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1510 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1511 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1512 
1513 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1514 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1515 	}
1516 }
1517 
1518 static void l2cap_conn_ready(struct l2cap_conn *conn)
1519 {
1520 	struct l2cap_chan *chan;
1521 	struct hci_conn *hcon = conn->hcon;
1522 
1523 	BT_DBG("conn %p", conn);
1524 
1525 	if (hcon->type == ACL_LINK)
1526 		l2cap_request_info(conn);
1527 
1528 	mutex_lock(&conn->chan_lock);
1529 
1530 	list_for_each_entry(chan, &conn->chan_l, list) {
1531 
1532 		l2cap_chan_lock(chan);
1533 
1534 		if (chan->scid == L2CAP_CID_A2MP) {
1535 			l2cap_chan_unlock(chan);
1536 			continue;
1537 		}
1538 
1539 		if (hcon->type == LE_LINK) {
1540 			l2cap_le_start(chan);
1541 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1542 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1543 				l2cap_chan_ready(chan);
1544 		} else if (chan->state == BT_CONNECT) {
1545 			l2cap_do_start(chan);
1546 		}
1547 
1548 		l2cap_chan_unlock(chan);
1549 	}
1550 
1551 	mutex_unlock(&conn->chan_lock);
1552 
1553 	if (hcon->type == LE_LINK)
1554 		l2cap_le_conn_ready(conn);
1555 
1556 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1557 }
1558 
1559 /* Notify sockets that we cannot guaranty reliability anymore */
1560 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1561 {
1562 	struct l2cap_chan *chan;
1563 
1564 	BT_DBG("conn %p", conn);
1565 
1566 	mutex_lock(&conn->chan_lock);
1567 
1568 	list_for_each_entry(chan, &conn->chan_l, list) {
1569 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1570 			l2cap_chan_set_err(chan, err);
1571 	}
1572 
1573 	mutex_unlock(&conn->chan_lock);
1574 }
1575 
1576 static void l2cap_info_timeout(struct work_struct *work)
1577 {
1578 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1579 					       info_timer.work);
1580 
1581 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1582 	conn->info_ident = 0;
1583 
1584 	l2cap_conn_start(conn);
1585 }
1586 
1587 /*
1588  * l2cap_user
1589  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1590  * callback is called during registration. The ->remove callback is called
1591  * during unregistration.
1592  * An l2cap_user object can either be explicitly unregistered or when the
1593  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1594  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1595  * External modules must own a reference to the l2cap_conn object if they intend
1596  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1597  * any time if they don't.
1598  */
1599 
1600 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1601 {
1602 	struct hci_dev *hdev = conn->hcon->hdev;
1603 	int ret;
1604 
1605 	/* We need to check whether l2cap_conn is registered. If it is not, we
1606 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1607 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1608 	 * relies on the parent hci_conn object to be locked. This itself relies
1609 	 * on the hci_dev object to be locked. So we must lock the hci device
1610 	 * here, too. */
1611 
1612 	hci_dev_lock(hdev);
1613 
1614 	if (!list_empty(&user->list)) {
1615 		ret = -EINVAL;
1616 		goto out_unlock;
1617 	}
1618 
1619 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1620 	if (!conn->hchan) {
1621 		ret = -ENODEV;
1622 		goto out_unlock;
1623 	}
1624 
1625 	ret = user->probe(conn, user);
1626 	if (ret)
1627 		goto out_unlock;
1628 
1629 	list_add(&user->list, &conn->users);
1630 	ret = 0;
1631 
1632 out_unlock:
1633 	hci_dev_unlock(hdev);
1634 	return ret;
1635 }
1636 EXPORT_SYMBOL(l2cap_register_user);
1637 
1638 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1639 {
1640 	struct hci_dev *hdev = conn->hcon->hdev;
1641 
1642 	hci_dev_lock(hdev);
1643 
1644 	if (list_empty(&user->list))
1645 		goto out_unlock;
1646 
1647 	list_del_init(&user->list);
1648 	user->remove(conn, user);
1649 
1650 out_unlock:
1651 	hci_dev_unlock(hdev);
1652 }
1653 EXPORT_SYMBOL(l2cap_unregister_user);
1654 
1655 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1656 {
1657 	struct l2cap_user *user;
1658 
1659 	while (!list_empty(&conn->users)) {
1660 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1661 		list_del_init(&user->list);
1662 		user->remove(conn, user);
1663 	}
1664 }
1665 
1666 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1667 {
1668 	struct l2cap_conn *conn = hcon->l2cap_data;
1669 	struct l2cap_chan *chan, *l;
1670 
1671 	if (!conn)
1672 		return;
1673 
1674 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1675 
1676 	kfree_skb(conn->rx_skb);
1677 
1678 	skb_queue_purge(&conn->pending_rx);
1679 
1680 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1681 	 * might block if we are running on a worker from the same workqueue
1682 	 * pending_rx_work is waiting on.
1683 	 */
1684 	if (work_pending(&conn->pending_rx_work))
1685 		cancel_work_sync(&conn->pending_rx_work);
1686 
1687 	if (work_pending(&conn->id_addr_update_work))
1688 		cancel_work_sync(&conn->id_addr_update_work);
1689 
1690 	l2cap_unregister_all_users(conn);
1691 
1692 	/* Force the connection to be immediately dropped */
1693 	hcon->disc_timeout = 0;
1694 
1695 	mutex_lock(&conn->chan_lock);
1696 
1697 	/* Kill channels */
1698 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1699 		l2cap_chan_hold(chan);
1700 		l2cap_chan_lock(chan);
1701 
1702 		l2cap_chan_del(chan, err);
1703 
1704 		l2cap_chan_unlock(chan);
1705 
1706 		chan->ops->close(chan);
1707 		l2cap_chan_put(chan);
1708 	}
1709 
1710 	mutex_unlock(&conn->chan_lock);
1711 
1712 	hci_chan_del(conn->hchan);
1713 
1714 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1715 		cancel_delayed_work_sync(&conn->info_timer);
1716 
1717 	hcon->l2cap_data = NULL;
1718 	conn->hchan = NULL;
1719 	l2cap_conn_put(conn);
1720 }
1721 
1722 static void l2cap_conn_free(struct kref *ref)
1723 {
1724 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1725 
1726 	hci_conn_put(conn->hcon);
1727 	kfree(conn);
1728 }
1729 
1730 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1731 {
1732 	kref_get(&conn->ref);
1733 	return conn;
1734 }
1735 EXPORT_SYMBOL(l2cap_conn_get);
1736 
1737 void l2cap_conn_put(struct l2cap_conn *conn)
1738 {
1739 	kref_put(&conn->ref, l2cap_conn_free);
1740 }
1741 EXPORT_SYMBOL(l2cap_conn_put);
1742 
1743 /* ---- Socket interface ---- */
1744 
1745 /* Find socket with psm and source / destination bdaddr.
1746  * Returns closest match.
1747  */
1748 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1749 						   bdaddr_t *src,
1750 						   bdaddr_t *dst,
1751 						   u8 link_type)
1752 {
1753 	struct l2cap_chan *c, *c1 = NULL;
1754 
1755 	read_lock(&chan_list_lock);
1756 
1757 	list_for_each_entry(c, &chan_list, global_l) {
1758 		if (state && c->state != state)
1759 			continue;
1760 
1761 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1762 			continue;
1763 
1764 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1765 			continue;
1766 
1767 		if (c->psm == psm) {
1768 			int src_match, dst_match;
1769 			int src_any, dst_any;
1770 
1771 			/* Exact match. */
1772 			src_match = !bacmp(&c->src, src);
1773 			dst_match = !bacmp(&c->dst, dst);
1774 			if (src_match && dst_match) {
1775 				l2cap_chan_hold(c);
1776 				read_unlock(&chan_list_lock);
1777 				return c;
1778 			}
1779 
1780 			/* Closest match */
1781 			src_any = !bacmp(&c->src, BDADDR_ANY);
1782 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1783 			if ((src_match && dst_any) || (src_any && dst_match) ||
1784 			    (src_any && dst_any))
1785 				c1 = c;
1786 		}
1787 	}
1788 
1789 	if (c1)
1790 		l2cap_chan_hold(c1);
1791 
1792 	read_unlock(&chan_list_lock);
1793 
1794 	return c1;
1795 }
1796 
1797 static void l2cap_monitor_timeout(struct work_struct *work)
1798 {
1799 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1800 					       monitor_timer.work);
1801 
1802 	BT_DBG("chan %p", chan);
1803 
1804 	l2cap_chan_lock(chan);
1805 
1806 	if (!chan->conn) {
1807 		l2cap_chan_unlock(chan);
1808 		l2cap_chan_put(chan);
1809 		return;
1810 	}
1811 
1812 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1813 
1814 	l2cap_chan_unlock(chan);
1815 	l2cap_chan_put(chan);
1816 }
1817 
1818 static void l2cap_retrans_timeout(struct work_struct *work)
1819 {
1820 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1821 					       retrans_timer.work);
1822 
1823 	BT_DBG("chan %p", chan);
1824 
1825 	l2cap_chan_lock(chan);
1826 
1827 	if (!chan->conn) {
1828 		l2cap_chan_unlock(chan);
1829 		l2cap_chan_put(chan);
1830 		return;
1831 	}
1832 
1833 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1834 	l2cap_chan_unlock(chan);
1835 	l2cap_chan_put(chan);
1836 }
1837 
1838 static void l2cap_streaming_send(struct l2cap_chan *chan,
1839 				 struct sk_buff_head *skbs)
1840 {
1841 	struct sk_buff *skb;
1842 	struct l2cap_ctrl *control;
1843 
1844 	BT_DBG("chan %p, skbs %p", chan, skbs);
1845 
1846 	if (__chan_is_moving(chan))
1847 		return;
1848 
1849 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1850 
1851 	while (!skb_queue_empty(&chan->tx_q)) {
1852 
1853 		skb = skb_dequeue(&chan->tx_q);
1854 
1855 		bt_cb(skb)->l2cap.retries = 1;
1856 		control = &bt_cb(skb)->l2cap;
1857 
1858 		control->reqseq = 0;
1859 		control->txseq = chan->next_tx_seq;
1860 
1861 		__pack_control(chan, control, skb);
1862 
1863 		if (chan->fcs == L2CAP_FCS_CRC16) {
1864 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1865 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1866 		}
1867 
1868 		l2cap_do_send(chan, skb);
1869 
1870 		BT_DBG("Sent txseq %u", control->txseq);
1871 
1872 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1873 		chan->frames_sent++;
1874 	}
1875 }
1876 
1877 static int l2cap_ertm_send(struct l2cap_chan *chan)
1878 {
1879 	struct sk_buff *skb, *tx_skb;
1880 	struct l2cap_ctrl *control;
1881 	int sent = 0;
1882 
1883 	BT_DBG("chan %p", chan);
1884 
1885 	if (chan->state != BT_CONNECTED)
1886 		return -ENOTCONN;
1887 
1888 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1889 		return 0;
1890 
1891 	if (__chan_is_moving(chan))
1892 		return 0;
1893 
1894 	while (chan->tx_send_head &&
1895 	       chan->unacked_frames < chan->remote_tx_win &&
1896 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1897 
1898 		skb = chan->tx_send_head;
1899 
1900 		bt_cb(skb)->l2cap.retries = 1;
1901 		control = &bt_cb(skb)->l2cap;
1902 
1903 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1904 			control->final = 1;
1905 
1906 		control->reqseq = chan->buffer_seq;
1907 		chan->last_acked_seq = chan->buffer_seq;
1908 		control->txseq = chan->next_tx_seq;
1909 
1910 		__pack_control(chan, control, skb);
1911 
1912 		if (chan->fcs == L2CAP_FCS_CRC16) {
1913 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1914 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1915 		}
1916 
1917 		/* Clone after data has been modified. Data is assumed to be
1918 		   read-only (for locking purposes) on cloned sk_buffs.
1919 		 */
1920 		tx_skb = skb_clone(skb, GFP_KERNEL);
1921 
1922 		if (!tx_skb)
1923 			break;
1924 
1925 		__set_retrans_timer(chan);
1926 
1927 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1928 		chan->unacked_frames++;
1929 		chan->frames_sent++;
1930 		sent++;
1931 
1932 		if (skb_queue_is_last(&chan->tx_q, skb))
1933 			chan->tx_send_head = NULL;
1934 		else
1935 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1936 
1937 		l2cap_do_send(chan, tx_skb);
1938 		BT_DBG("Sent txseq %u", control->txseq);
1939 	}
1940 
1941 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1942 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
1943 
1944 	return sent;
1945 }
1946 
1947 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1948 {
1949 	struct l2cap_ctrl control;
1950 	struct sk_buff *skb;
1951 	struct sk_buff *tx_skb;
1952 	u16 seq;
1953 
1954 	BT_DBG("chan %p", chan);
1955 
1956 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1957 		return;
1958 
1959 	if (__chan_is_moving(chan))
1960 		return;
1961 
1962 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1963 		seq = l2cap_seq_list_pop(&chan->retrans_list);
1964 
1965 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1966 		if (!skb) {
1967 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
1968 			       seq);
1969 			continue;
1970 		}
1971 
1972 		bt_cb(skb)->l2cap.retries++;
1973 		control = bt_cb(skb)->l2cap;
1974 
1975 		if (chan->max_tx != 0 &&
1976 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
1977 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1978 			l2cap_send_disconn_req(chan, ECONNRESET);
1979 			l2cap_seq_list_clear(&chan->retrans_list);
1980 			break;
1981 		}
1982 
1983 		control.reqseq = chan->buffer_seq;
1984 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1985 			control.final = 1;
1986 		else
1987 			control.final = 0;
1988 
1989 		if (skb_cloned(skb)) {
1990 			/* Cloned sk_buffs are read-only, so we need a
1991 			 * writeable copy
1992 			 */
1993 			tx_skb = skb_copy(skb, GFP_KERNEL);
1994 		} else {
1995 			tx_skb = skb_clone(skb, GFP_KERNEL);
1996 		}
1997 
1998 		if (!tx_skb) {
1999 			l2cap_seq_list_clear(&chan->retrans_list);
2000 			break;
2001 		}
2002 
2003 		/* Update skb contents */
2004 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2005 			put_unaligned_le32(__pack_extended_control(&control),
2006 					   tx_skb->data + L2CAP_HDR_SIZE);
2007 		} else {
2008 			put_unaligned_le16(__pack_enhanced_control(&control),
2009 					   tx_skb->data + L2CAP_HDR_SIZE);
2010 		}
2011 
2012 		/* Update FCS */
2013 		if (chan->fcs == L2CAP_FCS_CRC16) {
2014 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2015 					tx_skb->len - L2CAP_FCS_SIZE);
2016 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2017 						L2CAP_FCS_SIZE);
2018 		}
2019 
2020 		l2cap_do_send(chan, tx_skb);
2021 
2022 		BT_DBG("Resent txseq %d", control.txseq);
2023 
2024 		chan->last_acked_seq = chan->buffer_seq;
2025 	}
2026 }
2027 
2028 static void l2cap_retransmit(struct l2cap_chan *chan,
2029 			     struct l2cap_ctrl *control)
2030 {
2031 	BT_DBG("chan %p, control %p", chan, control);
2032 
2033 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2034 	l2cap_ertm_resend(chan);
2035 }
2036 
2037 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2038 				 struct l2cap_ctrl *control)
2039 {
2040 	struct sk_buff *skb;
2041 
2042 	BT_DBG("chan %p, control %p", chan, control);
2043 
2044 	if (control->poll)
2045 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2046 
2047 	l2cap_seq_list_clear(&chan->retrans_list);
2048 
2049 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2050 		return;
2051 
2052 	if (chan->unacked_frames) {
2053 		skb_queue_walk(&chan->tx_q, skb) {
2054 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2055 			    skb == chan->tx_send_head)
2056 				break;
2057 		}
2058 
2059 		skb_queue_walk_from(&chan->tx_q, skb) {
2060 			if (skb == chan->tx_send_head)
2061 				break;
2062 
2063 			l2cap_seq_list_append(&chan->retrans_list,
2064 					      bt_cb(skb)->l2cap.txseq);
2065 		}
2066 
2067 		l2cap_ertm_resend(chan);
2068 	}
2069 }
2070 
2071 static void l2cap_send_ack(struct l2cap_chan *chan)
2072 {
2073 	struct l2cap_ctrl control;
2074 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2075 					 chan->last_acked_seq);
2076 	int threshold;
2077 
2078 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2079 	       chan, chan->last_acked_seq, chan->buffer_seq);
2080 
2081 	memset(&control, 0, sizeof(control));
2082 	control.sframe = 1;
2083 
2084 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2085 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2086 		__clear_ack_timer(chan);
2087 		control.super = L2CAP_SUPER_RNR;
2088 		control.reqseq = chan->buffer_seq;
2089 		l2cap_send_sframe(chan, &control);
2090 	} else {
2091 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2092 			l2cap_ertm_send(chan);
2093 			/* If any i-frames were sent, they included an ack */
2094 			if (chan->buffer_seq == chan->last_acked_seq)
2095 				frames_to_ack = 0;
2096 		}
2097 
2098 		/* Ack now if the window is 3/4ths full.
2099 		 * Calculate without mul or div
2100 		 */
2101 		threshold = chan->ack_win;
2102 		threshold += threshold << 1;
2103 		threshold >>= 2;
2104 
2105 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2106 		       threshold);
2107 
2108 		if (frames_to_ack >= threshold) {
2109 			__clear_ack_timer(chan);
2110 			control.super = L2CAP_SUPER_RR;
2111 			control.reqseq = chan->buffer_seq;
2112 			l2cap_send_sframe(chan, &control);
2113 			frames_to_ack = 0;
2114 		}
2115 
2116 		if (frames_to_ack)
2117 			__set_ack_timer(chan);
2118 	}
2119 }
2120 
2121 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2122 					 struct msghdr *msg, int len,
2123 					 int count, struct sk_buff *skb)
2124 {
2125 	struct l2cap_conn *conn = chan->conn;
2126 	struct sk_buff **frag;
2127 	int sent = 0;
2128 
2129 	if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
2130 		return -EFAULT;
2131 
2132 	sent += count;
2133 	len  -= count;
2134 
2135 	/* Continuation fragments (no L2CAP header) */
2136 	frag = &skb_shinfo(skb)->frag_list;
2137 	while (len) {
2138 		struct sk_buff *tmp;
2139 
2140 		count = min_t(unsigned int, conn->mtu, len);
2141 
2142 		tmp = chan->ops->alloc_skb(chan, 0, count,
2143 					   msg->msg_flags & MSG_DONTWAIT);
2144 		if (IS_ERR(tmp))
2145 			return PTR_ERR(tmp);
2146 
2147 		*frag = tmp;
2148 
2149 		if (copy_from_iter(skb_put(*frag, count), count,
2150 				   &msg->msg_iter) != count)
2151 			return -EFAULT;
2152 
2153 		sent += count;
2154 		len  -= count;
2155 
2156 		skb->len += (*frag)->len;
2157 		skb->data_len += (*frag)->len;
2158 
2159 		frag = &(*frag)->next;
2160 	}
2161 
2162 	return sent;
2163 }
2164 
2165 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2166 						 struct msghdr *msg, size_t len)
2167 {
2168 	struct l2cap_conn *conn = chan->conn;
2169 	struct sk_buff *skb;
2170 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2171 	struct l2cap_hdr *lh;
2172 
2173 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2174 	       __le16_to_cpu(chan->psm), len);
2175 
2176 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2177 
2178 	skb = chan->ops->alloc_skb(chan, hlen, count,
2179 				   msg->msg_flags & MSG_DONTWAIT);
2180 	if (IS_ERR(skb))
2181 		return skb;
2182 
2183 	/* Create L2CAP header */
2184 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2185 	lh->cid = cpu_to_le16(chan->dcid);
2186 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2187 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2188 
2189 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2190 	if (unlikely(err < 0)) {
2191 		kfree_skb(skb);
2192 		return ERR_PTR(err);
2193 	}
2194 	return skb;
2195 }
2196 
2197 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2198 					      struct msghdr *msg, size_t len)
2199 {
2200 	struct l2cap_conn *conn = chan->conn;
2201 	struct sk_buff *skb;
2202 	int err, count;
2203 	struct l2cap_hdr *lh;
2204 
2205 	BT_DBG("chan %p len %zu", chan, len);
2206 
2207 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2208 
2209 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2210 				   msg->msg_flags & MSG_DONTWAIT);
2211 	if (IS_ERR(skb))
2212 		return skb;
2213 
2214 	/* Create L2CAP header */
2215 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2216 	lh->cid = cpu_to_le16(chan->dcid);
2217 	lh->len = cpu_to_le16(len);
2218 
2219 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2220 	if (unlikely(err < 0)) {
2221 		kfree_skb(skb);
2222 		return ERR_PTR(err);
2223 	}
2224 	return skb;
2225 }
2226 
2227 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2228 					       struct msghdr *msg, size_t len,
2229 					       u16 sdulen)
2230 {
2231 	struct l2cap_conn *conn = chan->conn;
2232 	struct sk_buff *skb;
2233 	int err, count, hlen;
2234 	struct l2cap_hdr *lh;
2235 
2236 	BT_DBG("chan %p len %zu", chan, len);
2237 
2238 	if (!conn)
2239 		return ERR_PTR(-ENOTCONN);
2240 
2241 	hlen = __ertm_hdr_size(chan);
2242 
2243 	if (sdulen)
2244 		hlen += L2CAP_SDULEN_SIZE;
2245 
2246 	if (chan->fcs == L2CAP_FCS_CRC16)
2247 		hlen += L2CAP_FCS_SIZE;
2248 
2249 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2250 
2251 	skb = chan->ops->alloc_skb(chan, hlen, count,
2252 				   msg->msg_flags & MSG_DONTWAIT);
2253 	if (IS_ERR(skb))
2254 		return skb;
2255 
2256 	/* Create L2CAP header */
2257 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2258 	lh->cid = cpu_to_le16(chan->dcid);
2259 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2260 
2261 	/* Control header is populated later */
2262 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2263 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2264 	else
2265 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2266 
2267 	if (sdulen)
2268 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2269 
2270 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2271 	if (unlikely(err < 0)) {
2272 		kfree_skb(skb);
2273 		return ERR_PTR(err);
2274 	}
2275 
2276 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2277 	bt_cb(skb)->l2cap.retries = 0;
2278 	return skb;
2279 }
2280 
2281 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2282 			     struct sk_buff_head *seg_queue,
2283 			     struct msghdr *msg, size_t len)
2284 {
2285 	struct sk_buff *skb;
2286 	u16 sdu_len;
2287 	size_t pdu_len;
2288 	u8 sar;
2289 
2290 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2291 
2292 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2293 	 * so fragmented skbs are not used.  The HCI layer's handling
2294 	 * of fragmented skbs is not compatible with ERTM's queueing.
2295 	 */
2296 
2297 	/* PDU size is derived from the HCI MTU */
2298 	pdu_len = chan->conn->mtu;
2299 
2300 	/* Constrain PDU size for BR/EDR connections */
2301 	if (!chan->hs_hcon)
2302 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2303 
2304 	/* Adjust for largest possible L2CAP overhead. */
2305 	if (chan->fcs)
2306 		pdu_len -= L2CAP_FCS_SIZE;
2307 
2308 	pdu_len -= __ertm_hdr_size(chan);
2309 
2310 	/* Remote device may have requested smaller PDUs */
2311 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2312 
2313 	if (len <= pdu_len) {
2314 		sar = L2CAP_SAR_UNSEGMENTED;
2315 		sdu_len = 0;
2316 		pdu_len = len;
2317 	} else {
2318 		sar = L2CAP_SAR_START;
2319 		sdu_len = len;
2320 	}
2321 
2322 	while (len > 0) {
2323 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2324 
2325 		if (IS_ERR(skb)) {
2326 			__skb_queue_purge(seg_queue);
2327 			return PTR_ERR(skb);
2328 		}
2329 
2330 		bt_cb(skb)->l2cap.sar = sar;
2331 		__skb_queue_tail(seg_queue, skb);
2332 
2333 		len -= pdu_len;
2334 		if (sdu_len)
2335 			sdu_len = 0;
2336 
2337 		if (len <= pdu_len) {
2338 			sar = L2CAP_SAR_END;
2339 			pdu_len = len;
2340 		} else {
2341 			sar = L2CAP_SAR_CONTINUE;
2342 		}
2343 	}
2344 
2345 	return 0;
2346 }
2347 
2348 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2349 						   struct msghdr *msg,
2350 						   size_t len, u16 sdulen)
2351 {
2352 	struct l2cap_conn *conn = chan->conn;
2353 	struct sk_buff *skb;
2354 	int err, count, hlen;
2355 	struct l2cap_hdr *lh;
2356 
2357 	BT_DBG("chan %p len %zu", chan, len);
2358 
2359 	if (!conn)
2360 		return ERR_PTR(-ENOTCONN);
2361 
2362 	hlen = L2CAP_HDR_SIZE;
2363 
2364 	if (sdulen)
2365 		hlen += L2CAP_SDULEN_SIZE;
2366 
2367 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2368 
2369 	skb = chan->ops->alloc_skb(chan, hlen, count,
2370 				   msg->msg_flags & MSG_DONTWAIT);
2371 	if (IS_ERR(skb))
2372 		return skb;
2373 
2374 	/* Create L2CAP header */
2375 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2376 	lh->cid = cpu_to_le16(chan->dcid);
2377 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2378 
2379 	if (sdulen)
2380 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2381 
2382 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2383 	if (unlikely(err < 0)) {
2384 		kfree_skb(skb);
2385 		return ERR_PTR(err);
2386 	}
2387 
2388 	return skb;
2389 }
2390 
2391 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2392 				struct sk_buff_head *seg_queue,
2393 				struct msghdr *msg, size_t len)
2394 {
2395 	struct sk_buff *skb;
2396 	size_t pdu_len;
2397 	u16 sdu_len;
2398 
2399 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2400 
2401 	sdu_len = len;
2402 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2403 
2404 	while (len > 0) {
2405 		if (len <= pdu_len)
2406 			pdu_len = len;
2407 
2408 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2409 		if (IS_ERR(skb)) {
2410 			__skb_queue_purge(seg_queue);
2411 			return PTR_ERR(skb);
2412 		}
2413 
2414 		__skb_queue_tail(seg_queue, skb);
2415 
2416 		len -= pdu_len;
2417 
2418 		if (sdu_len) {
2419 			sdu_len = 0;
2420 			pdu_len += L2CAP_SDULEN_SIZE;
2421 		}
2422 	}
2423 
2424 	return 0;
2425 }
2426 
2427 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2428 {
2429 	struct sk_buff *skb;
2430 	int err;
2431 	struct sk_buff_head seg_queue;
2432 
2433 	if (!chan->conn)
2434 		return -ENOTCONN;
2435 
2436 	/* Connectionless channel */
2437 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2438 		skb = l2cap_create_connless_pdu(chan, msg, len);
2439 		if (IS_ERR(skb))
2440 			return PTR_ERR(skb);
2441 
2442 		/* Channel lock is released before requesting new skb and then
2443 		 * reacquired thus we need to recheck channel state.
2444 		 */
2445 		if (chan->state != BT_CONNECTED) {
2446 			kfree_skb(skb);
2447 			return -ENOTCONN;
2448 		}
2449 
2450 		l2cap_do_send(chan, skb);
2451 		return len;
2452 	}
2453 
2454 	switch (chan->mode) {
2455 	case L2CAP_MODE_LE_FLOWCTL:
2456 		/* Check outgoing MTU */
2457 		if (len > chan->omtu)
2458 			return -EMSGSIZE;
2459 
2460 		if (!chan->tx_credits)
2461 			return -EAGAIN;
2462 
2463 		__skb_queue_head_init(&seg_queue);
2464 
2465 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2466 
2467 		if (chan->state != BT_CONNECTED) {
2468 			__skb_queue_purge(&seg_queue);
2469 			err = -ENOTCONN;
2470 		}
2471 
2472 		if (err)
2473 			return err;
2474 
2475 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2476 
2477 		while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2478 			l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2479 			chan->tx_credits--;
2480 		}
2481 
2482 		if (!chan->tx_credits)
2483 			chan->ops->suspend(chan);
2484 
2485 		err = len;
2486 
2487 		break;
2488 
2489 	case L2CAP_MODE_BASIC:
2490 		/* Check outgoing MTU */
2491 		if (len > chan->omtu)
2492 			return -EMSGSIZE;
2493 
2494 		/* Create a basic PDU */
2495 		skb = l2cap_create_basic_pdu(chan, msg, len);
2496 		if (IS_ERR(skb))
2497 			return PTR_ERR(skb);
2498 
2499 		/* Channel lock is released before requesting new skb and then
2500 		 * reacquired thus we need to recheck channel state.
2501 		 */
2502 		if (chan->state != BT_CONNECTED) {
2503 			kfree_skb(skb);
2504 			return -ENOTCONN;
2505 		}
2506 
2507 		l2cap_do_send(chan, skb);
2508 		err = len;
2509 		break;
2510 
2511 	case L2CAP_MODE_ERTM:
2512 	case L2CAP_MODE_STREAMING:
2513 		/* Check outgoing MTU */
2514 		if (len > chan->omtu) {
2515 			err = -EMSGSIZE;
2516 			break;
2517 		}
2518 
2519 		__skb_queue_head_init(&seg_queue);
2520 
2521 		/* Do segmentation before calling in to the state machine,
2522 		 * since it's possible to block while waiting for memory
2523 		 * allocation.
2524 		 */
2525 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2526 
2527 		/* The channel could have been closed while segmenting,
2528 		 * check that it is still connected.
2529 		 */
2530 		if (chan->state != BT_CONNECTED) {
2531 			__skb_queue_purge(&seg_queue);
2532 			err = -ENOTCONN;
2533 		}
2534 
2535 		if (err)
2536 			break;
2537 
2538 		if (chan->mode == L2CAP_MODE_ERTM)
2539 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2540 		else
2541 			l2cap_streaming_send(chan, &seg_queue);
2542 
2543 		err = len;
2544 
2545 		/* If the skbs were not queued for sending, they'll still be in
2546 		 * seg_queue and need to be purged.
2547 		 */
2548 		__skb_queue_purge(&seg_queue);
2549 		break;
2550 
2551 	default:
2552 		BT_DBG("bad state %1.1x", chan->mode);
2553 		err = -EBADFD;
2554 	}
2555 
2556 	return err;
2557 }
2558 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2559 
2560 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2561 {
2562 	struct l2cap_ctrl control;
2563 	u16 seq;
2564 
2565 	BT_DBG("chan %p, txseq %u", chan, txseq);
2566 
2567 	memset(&control, 0, sizeof(control));
2568 	control.sframe = 1;
2569 	control.super = L2CAP_SUPER_SREJ;
2570 
2571 	for (seq = chan->expected_tx_seq; seq != txseq;
2572 	     seq = __next_seq(chan, seq)) {
2573 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2574 			control.reqseq = seq;
2575 			l2cap_send_sframe(chan, &control);
2576 			l2cap_seq_list_append(&chan->srej_list, seq);
2577 		}
2578 	}
2579 
2580 	chan->expected_tx_seq = __next_seq(chan, txseq);
2581 }
2582 
2583 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2584 {
2585 	struct l2cap_ctrl control;
2586 
2587 	BT_DBG("chan %p", chan);
2588 
2589 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2590 		return;
2591 
2592 	memset(&control, 0, sizeof(control));
2593 	control.sframe = 1;
2594 	control.super = L2CAP_SUPER_SREJ;
2595 	control.reqseq = chan->srej_list.tail;
2596 	l2cap_send_sframe(chan, &control);
2597 }
2598 
2599 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2600 {
2601 	struct l2cap_ctrl control;
2602 	u16 initial_head;
2603 	u16 seq;
2604 
2605 	BT_DBG("chan %p, txseq %u", chan, txseq);
2606 
2607 	memset(&control, 0, sizeof(control));
2608 	control.sframe = 1;
2609 	control.super = L2CAP_SUPER_SREJ;
2610 
2611 	/* Capture initial list head to allow only one pass through the list. */
2612 	initial_head = chan->srej_list.head;
2613 
2614 	do {
2615 		seq = l2cap_seq_list_pop(&chan->srej_list);
2616 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2617 			break;
2618 
2619 		control.reqseq = seq;
2620 		l2cap_send_sframe(chan, &control);
2621 		l2cap_seq_list_append(&chan->srej_list, seq);
2622 	} while (chan->srej_list.head != initial_head);
2623 }
2624 
2625 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2626 {
2627 	struct sk_buff *acked_skb;
2628 	u16 ackseq;
2629 
2630 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2631 
2632 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2633 		return;
2634 
2635 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2636 	       chan->expected_ack_seq, chan->unacked_frames);
2637 
2638 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2639 	     ackseq = __next_seq(chan, ackseq)) {
2640 
2641 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2642 		if (acked_skb) {
2643 			skb_unlink(acked_skb, &chan->tx_q);
2644 			kfree_skb(acked_skb);
2645 			chan->unacked_frames--;
2646 		}
2647 	}
2648 
2649 	chan->expected_ack_seq = reqseq;
2650 
2651 	if (chan->unacked_frames == 0)
2652 		__clear_retrans_timer(chan);
2653 
2654 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2655 }
2656 
2657 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2658 {
2659 	BT_DBG("chan %p", chan);
2660 
2661 	chan->expected_tx_seq = chan->buffer_seq;
2662 	l2cap_seq_list_clear(&chan->srej_list);
2663 	skb_queue_purge(&chan->srej_q);
2664 	chan->rx_state = L2CAP_RX_STATE_RECV;
2665 }
2666 
2667 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2668 				struct l2cap_ctrl *control,
2669 				struct sk_buff_head *skbs, u8 event)
2670 {
2671 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2672 	       event);
2673 
2674 	switch (event) {
2675 	case L2CAP_EV_DATA_REQUEST:
2676 		if (chan->tx_send_head == NULL)
2677 			chan->tx_send_head = skb_peek(skbs);
2678 
2679 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2680 		l2cap_ertm_send(chan);
2681 		break;
2682 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2683 		BT_DBG("Enter LOCAL_BUSY");
2684 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2685 
2686 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2687 			/* The SREJ_SENT state must be aborted if we are to
2688 			 * enter the LOCAL_BUSY state.
2689 			 */
2690 			l2cap_abort_rx_srej_sent(chan);
2691 		}
2692 
2693 		l2cap_send_ack(chan);
2694 
2695 		break;
2696 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2697 		BT_DBG("Exit LOCAL_BUSY");
2698 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2699 
2700 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2701 			struct l2cap_ctrl local_control;
2702 
2703 			memset(&local_control, 0, sizeof(local_control));
2704 			local_control.sframe = 1;
2705 			local_control.super = L2CAP_SUPER_RR;
2706 			local_control.poll = 1;
2707 			local_control.reqseq = chan->buffer_seq;
2708 			l2cap_send_sframe(chan, &local_control);
2709 
2710 			chan->retry_count = 1;
2711 			__set_monitor_timer(chan);
2712 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2713 		}
2714 		break;
2715 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2716 		l2cap_process_reqseq(chan, control->reqseq);
2717 		break;
2718 	case L2CAP_EV_EXPLICIT_POLL:
2719 		l2cap_send_rr_or_rnr(chan, 1);
2720 		chan->retry_count = 1;
2721 		__set_monitor_timer(chan);
2722 		__clear_ack_timer(chan);
2723 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2724 		break;
2725 	case L2CAP_EV_RETRANS_TO:
2726 		l2cap_send_rr_or_rnr(chan, 1);
2727 		chan->retry_count = 1;
2728 		__set_monitor_timer(chan);
2729 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2730 		break;
2731 	case L2CAP_EV_RECV_FBIT:
2732 		/* Nothing to process */
2733 		break;
2734 	default:
2735 		break;
2736 	}
2737 }
2738 
2739 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2740 				  struct l2cap_ctrl *control,
2741 				  struct sk_buff_head *skbs, u8 event)
2742 {
2743 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2744 	       event);
2745 
2746 	switch (event) {
2747 	case L2CAP_EV_DATA_REQUEST:
2748 		if (chan->tx_send_head == NULL)
2749 			chan->tx_send_head = skb_peek(skbs);
2750 		/* Queue data, but don't send. */
2751 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2752 		break;
2753 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2754 		BT_DBG("Enter LOCAL_BUSY");
2755 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2756 
2757 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2758 			/* The SREJ_SENT state must be aborted if we are to
2759 			 * enter the LOCAL_BUSY state.
2760 			 */
2761 			l2cap_abort_rx_srej_sent(chan);
2762 		}
2763 
2764 		l2cap_send_ack(chan);
2765 
2766 		break;
2767 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2768 		BT_DBG("Exit LOCAL_BUSY");
2769 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2770 
2771 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2772 			struct l2cap_ctrl local_control;
2773 			memset(&local_control, 0, sizeof(local_control));
2774 			local_control.sframe = 1;
2775 			local_control.super = L2CAP_SUPER_RR;
2776 			local_control.poll = 1;
2777 			local_control.reqseq = chan->buffer_seq;
2778 			l2cap_send_sframe(chan, &local_control);
2779 
2780 			chan->retry_count = 1;
2781 			__set_monitor_timer(chan);
2782 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2783 		}
2784 		break;
2785 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2786 		l2cap_process_reqseq(chan, control->reqseq);
2787 
2788 		/* Fall through */
2789 
2790 	case L2CAP_EV_RECV_FBIT:
2791 		if (control && control->final) {
2792 			__clear_monitor_timer(chan);
2793 			if (chan->unacked_frames > 0)
2794 				__set_retrans_timer(chan);
2795 			chan->retry_count = 0;
2796 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2797 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2798 		}
2799 		break;
2800 	case L2CAP_EV_EXPLICIT_POLL:
2801 		/* Ignore */
2802 		break;
2803 	case L2CAP_EV_MONITOR_TO:
2804 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2805 			l2cap_send_rr_or_rnr(chan, 1);
2806 			__set_monitor_timer(chan);
2807 			chan->retry_count++;
2808 		} else {
2809 			l2cap_send_disconn_req(chan, ECONNABORTED);
2810 		}
2811 		break;
2812 	default:
2813 		break;
2814 	}
2815 }
2816 
2817 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2818 		     struct sk_buff_head *skbs, u8 event)
2819 {
2820 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2821 	       chan, control, skbs, event, chan->tx_state);
2822 
2823 	switch (chan->tx_state) {
2824 	case L2CAP_TX_STATE_XMIT:
2825 		l2cap_tx_state_xmit(chan, control, skbs, event);
2826 		break;
2827 	case L2CAP_TX_STATE_WAIT_F:
2828 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2829 		break;
2830 	default:
2831 		/* Ignore event */
2832 		break;
2833 	}
2834 }
2835 
2836 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2837 			     struct l2cap_ctrl *control)
2838 {
2839 	BT_DBG("chan %p, control %p", chan, control);
2840 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2841 }
2842 
2843 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2844 				  struct l2cap_ctrl *control)
2845 {
2846 	BT_DBG("chan %p, control %p", chan, control);
2847 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2848 }
2849 
2850 /* Copy frame to all raw sockets on that connection */
2851 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2852 {
2853 	struct sk_buff *nskb;
2854 	struct l2cap_chan *chan;
2855 
2856 	BT_DBG("conn %p", conn);
2857 
2858 	mutex_lock(&conn->chan_lock);
2859 
2860 	list_for_each_entry(chan, &conn->chan_l, list) {
2861 		if (chan->chan_type != L2CAP_CHAN_RAW)
2862 			continue;
2863 
2864 		/* Don't send frame to the channel it came from */
2865 		if (bt_cb(skb)->l2cap.chan == chan)
2866 			continue;
2867 
2868 		nskb = skb_clone(skb, GFP_KERNEL);
2869 		if (!nskb)
2870 			continue;
2871 		if (chan->ops->recv(chan, nskb))
2872 			kfree_skb(nskb);
2873 	}
2874 
2875 	mutex_unlock(&conn->chan_lock);
2876 }
2877 
2878 /* ---- L2CAP signalling commands ---- */
2879 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2880 				       u8 ident, u16 dlen, void *data)
2881 {
2882 	struct sk_buff *skb, **frag;
2883 	struct l2cap_cmd_hdr *cmd;
2884 	struct l2cap_hdr *lh;
2885 	int len, count;
2886 
2887 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2888 	       conn, code, ident, dlen);
2889 
2890 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2891 		return NULL;
2892 
2893 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2894 	count = min_t(unsigned int, conn->mtu, len);
2895 
2896 	skb = bt_skb_alloc(count, GFP_KERNEL);
2897 	if (!skb)
2898 		return NULL;
2899 
2900 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2901 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2902 
2903 	if (conn->hcon->type == LE_LINK)
2904 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2905 	else
2906 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2907 
2908 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2909 	cmd->code  = code;
2910 	cmd->ident = ident;
2911 	cmd->len   = cpu_to_le16(dlen);
2912 
2913 	if (dlen) {
2914 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2915 		memcpy(skb_put(skb, count), data, count);
2916 		data += count;
2917 	}
2918 
2919 	len -= skb->len;
2920 
2921 	/* Continuation fragments (no L2CAP header) */
2922 	frag = &skb_shinfo(skb)->frag_list;
2923 	while (len) {
2924 		count = min_t(unsigned int, conn->mtu, len);
2925 
2926 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2927 		if (!*frag)
2928 			goto fail;
2929 
2930 		memcpy(skb_put(*frag, count), data, count);
2931 
2932 		len  -= count;
2933 		data += count;
2934 
2935 		frag = &(*frag)->next;
2936 	}
2937 
2938 	return skb;
2939 
2940 fail:
2941 	kfree_skb(skb);
2942 	return NULL;
2943 }
2944 
2945 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2946 				     unsigned long *val)
2947 {
2948 	struct l2cap_conf_opt *opt = *ptr;
2949 	int len;
2950 
2951 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2952 	*ptr += len;
2953 
2954 	*type = opt->type;
2955 	*olen = opt->len;
2956 
2957 	switch (opt->len) {
2958 	case 1:
2959 		*val = *((u8 *) opt->val);
2960 		break;
2961 
2962 	case 2:
2963 		*val = get_unaligned_le16(opt->val);
2964 		break;
2965 
2966 	case 4:
2967 		*val = get_unaligned_le32(opt->val);
2968 		break;
2969 
2970 	default:
2971 		*val = (unsigned long) opt->val;
2972 		break;
2973 	}
2974 
2975 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2976 	return len;
2977 }
2978 
2979 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2980 {
2981 	struct l2cap_conf_opt *opt = *ptr;
2982 
2983 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2984 
2985 	opt->type = type;
2986 	opt->len  = len;
2987 
2988 	switch (len) {
2989 	case 1:
2990 		*((u8 *) opt->val)  = val;
2991 		break;
2992 
2993 	case 2:
2994 		put_unaligned_le16(val, opt->val);
2995 		break;
2996 
2997 	case 4:
2998 		put_unaligned_le32(val, opt->val);
2999 		break;
3000 
3001 	default:
3002 		memcpy(opt->val, (void *) val, len);
3003 		break;
3004 	}
3005 
3006 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3007 }
3008 
3009 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3010 {
3011 	struct l2cap_conf_efs efs;
3012 
3013 	switch (chan->mode) {
3014 	case L2CAP_MODE_ERTM:
3015 		efs.id		= chan->local_id;
3016 		efs.stype	= chan->local_stype;
3017 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3018 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3019 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3020 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3021 		break;
3022 
3023 	case L2CAP_MODE_STREAMING:
3024 		efs.id		= 1;
3025 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3026 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3027 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3028 		efs.acc_lat	= 0;
3029 		efs.flush_to	= 0;
3030 		break;
3031 
3032 	default:
3033 		return;
3034 	}
3035 
3036 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3037 			   (unsigned long) &efs);
3038 }
3039 
3040 static void l2cap_ack_timeout(struct work_struct *work)
3041 {
3042 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3043 					       ack_timer.work);
3044 	u16 frames_to_ack;
3045 
3046 	BT_DBG("chan %p", chan);
3047 
3048 	l2cap_chan_lock(chan);
3049 
3050 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3051 				     chan->last_acked_seq);
3052 
3053 	if (frames_to_ack)
3054 		l2cap_send_rr_or_rnr(chan, 0);
3055 
3056 	l2cap_chan_unlock(chan);
3057 	l2cap_chan_put(chan);
3058 }
3059 
3060 int l2cap_ertm_init(struct l2cap_chan *chan)
3061 {
3062 	int err;
3063 
3064 	chan->next_tx_seq = 0;
3065 	chan->expected_tx_seq = 0;
3066 	chan->expected_ack_seq = 0;
3067 	chan->unacked_frames = 0;
3068 	chan->buffer_seq = 0;
3069 	chan->frames_sent = 0;
3070 	chan->last_acked_seq = 0;
3071 	chan->sdu = NULL;
3072 	chan->sdu_last_frag = NULL;
3073 	chan->sdu_len = 0;
3074 
3075 	skb_queue_head_init(&chan->tx_q);
3076 
3077 	chan->local_amp_id = AMP_ID_BREDR;
3078 	chan->move_id = AMP_ID_BREDR;
3079 	chan->move_state = L2CAP_MOVE_STABLE;
3080 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3081 
3082 	if (chan->mode != L2CAP_MODE_ERTM)
3083 		return 0;
3084 
3085 	chan->rx_state = L2CAP_RX_STATE_RECV;
3086 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3087 
3088 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3089 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3090 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3091 
3092 	skb_queue_head_init(&chan->srej_q);
3093 
3094 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3095 	if (err < 0)
3096 		return err;
3097 
3098 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3099 	if (err < 0)
3100 		l2cap_seq_list_free(&chan->srej_list);
3101 
3102 	return err;
3103 }
3104 
3105 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3106 {
3107 	switch (mode) {
3108 	case L2CAP_MODE_STREAMING:
3109 	case L2CAP_MODE_ERTM:
3110 		if (l2cap_mode_supported(mode, remote_feat_mask))
3111 			return mode;
3112 		/* fall through */
3113 	default:
3114 		return L2CAP_MODE_BASIC;
3115 	}
3116 }
3117 
3118 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3119 {
3120 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3121 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3122 }
3123 
3124 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3125 {
3126 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3127 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3128 }
3129 
3130 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3131 				      struct l2cap_conf_rfc *rfc)
3132 {
3133 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3134 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3135 
3136 		/* Class 1 devices have must have ERTM timeouts
3137 		 * exceeding the Link Supervision Timeout.  The
3138 		 * default Link Supervision Timeout for AMP
3139 		 * controllers is 10 seconds.
3140 		 *
3141 		 * Class 1 devices use 0xffffffff for their
3142 		 * best-effort flush timeout, so the clamping logic
3143 		 * will result in a timeout that meets the above
3144 		 * requirement.  ERTM timeouts are 16-bit values, so
3145 		 * the maximum timeout is 65.535 seconds.
3146 		 */
3147 
3148 		/* Convert timeout to milliseconds and round */
3149 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3150 
3151 		/* This is the recommended formula for class 2 devices
3152 		 * that start ERTM timers when packets are sent to the
3153 		 * controller.
3154 		 */
3155 		ertm_to = 3 * ertm_to + 500;
3156 
3157 		if (ertm_to > 0xffff)
3158 			ertm_to = 0xffff;
3159 
3160 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3161 		rfc->monitor_timeout = rfc->retrans_timeout;
3162 	} else {
3163 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3164 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3165 	}
3166 }
3167 
3168 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3169 {
3170 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3171 	    __l2cap_ews_supported(chan->conn)) {
3172 		/* use extended control field */
3173 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3174 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3175 	} else {
3176 		chan->tx_win = min_t(u16, chan->tx_win,
3177 				     L2CAP_DEFAULT_TX_WINDOW);
3178 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3179 	}
3180 	chan->ack_win = chan->tx_win;
3181 }
3182 
3183 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3184 {
3185 	struct l2cap_conf_req *req = data;
3186 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3187 	void *ptr = req->data;
3188 	u16 size;
3189 
3190 	BT_DBG("chan %p", chan);
3191 
3192 	if (chan->num_conf_req || chan->num_conf_rsp)
3193 		goto done;
3194 
3195 	switch (chan->mode) {
3196 	case L2CAP_MODE_STREAMING:
3197 	case L2CAP_MODE_ERTM:
3198 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3199 			break;
3200 
3201 		if (__l2cap_efs_supported(chan->conn))
3202 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3203 
3204 		/* fall through */
3205 	default:
3206 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3207 		break;
3208 	}
3209 
3210 done:
3211 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3212 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3213 
3214 	switch (chan->mode) {
3215 	case L2CAP_MODE_BASIC:
3216 		if (disable_ertm)
3217 			break;
3218 
3219 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3220 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3221 			break;
3222 
3223 		rfc.mode            = L2CAP_MODE_BASIC;
3224 		rfc.txwin_size      = 0;
3225 		rfc.max_transmit    = 0;
3226 		rfc.retrans_timeout = 0;
3227 		rfc.monitor_timeout = 0;
3228 		rfc.max_pdu_size    = 0;
3229 
3230 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3231 				   (unsigned long) &rfc);
3232 		break;
3233 
3234 	case L2CAP_MODE_ERTM:
3235 		rfc.mode            = L2CAP_MODE_ERTM;
3236 		rfc.max_transmit    = chan->max_tx;
3237 
3238 		__l2cap_set_ertm_timeouts(chan, &rfc);
3239 
3240 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3241 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3242 			     L2CAP_FCS_SIZE);
3243 		rfc.max_pdu_size = cpu_to_le16(size);
3244 
3245 		l2cap_txwin_setup(chan);
3246 
3247 		rfc.txwin_size = min_t(u16, chan->tx_win,
3248 				       L2CAP_DEFAULT_TX_WINDOW);
3249 
3250 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3251 				   (unsigned long) &rfc);
3252 
3253 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3254 			l2cap_add_opt_efs(&ptr, chan);
3255 
3256 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3257 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3258 					   chan->tx_win);
3259 
3260 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3261 			if (chan->fcs == L2CAP_FCS_NONE ||
3262 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3263 				chan->fcs = L2CAP_FCS_NONE;
3264 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3265 						   chan->fcs);
3266 			}
3267 		break;
3268 
3269 	case L2CAP_MODE_STREAMING:
3270 		l2cap_txwin_setup(chan);
3271 		rfc.mode            = L2CAP_MODE_STREAMING;
3272 		rfc.txwin_size      = 0;
3273 		rfc.max_transmit    = 0;
3274 		rfc.retrans_timeout = 0;
3275 		rfc.monitor_timeout = 0;
3276 
3277 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3278 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3279 			     L2CAP_FCS_SIZE);
3280 		rfc.max_pdu_size = cpu_to_le16(size);
3281 
3282 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3283 				   (unsigned long) &rfc);
3284 
3285 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3286 			l2cap_add_opt_efs(&ptr, chan);
3287 
3288 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3289 			if (chan->fcs == L2CAP_FCS_NONE ||
3290 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3291 				chan->fcs = L2CAP_FCS_NONE;
3292 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3293 						   chan->fcs);
3294 			}
3295 		break;
3296 	}
3297 
3298 	req->dcid  = cpu_to_le16(chan->dcid);
3299 	req->flags = cpu_to_le16(0);
3300 
3301 	return ptr - data;
3302 }
3303 
3304 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3305 {
3306 	struct l2cap_conf_rsp *rsp = data;
3307 	void *ptr = rsp->data;
3308 	void *req = chan->conf_req;
3309 	int len = chan->conf_len;
3310 	int type, hint, olen;
3311 	unsigned long val;
3312 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3313 	struct l2cap_conf_efs efs;
3314 	u8 remote_efs = 0;
3315 	u16 mtu = L2CAP_DEFAULT_MTU;
3316 	u16 result = L2CAP_CONF_SUCCESS;
3317 	u16 size;
3318 
3319 	BT_DBG("chan %p", chan);
3320 
3321 	while (len >= L2CAP_CONF_OPT_SIZE) {
3322 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3323 
3324 		hint  = type & L2CAP_CONF_HINT;
3325 		type &= L2CAP_CONF_MASK;
3326 
3327 		switch (type) {
3328 		case L2CAP_CONF_MTU:
3329 			mtu = val;
3330 			break;
3331 
3332 		case L2CAP_CONF_FLUSH_TO:
3333 			chan->flush_to = val;
3334 			break;
3335 
3336 		case L2CAP_CONF_QOS:
3337 			break;
3338 
3339 		case L2CAP_CONF_RFC:
3340 			if (olen == sizeof(rfc))
3341 				memcpy(&rfc, (void *) val, olen);
3342 			break;
3343 
3344 		case L2CAP_CONF_FCS:
3345 			if (val == L2CAP_FCS_NONE)
3346 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3347 			break;
3348 
3349 		case L2CAP_CONF_EFS:
3350 			remote_efs = 1;
3351 			if (olen == sizeof(efs))
3352 				memcpy(&efs, (void *) val, olen);
3353 			break;
3354 
3355 		case L2CAP_CONF_EWS:
3356 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3357 				return -ECONNREFUSED;
3358 
3359 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3360 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3361 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3362 			chan->remote_tx_win = val;
3363 			break;
3364 
3365 		default:
3366 			if (hint)
3367 				break;
3368 
3369 			result = L2CAP_CONF_UNKNOWN;
3370 			*((u8 *) ptr++) = type;
3371 			break;
3372 		}
3373 	}
3374 
3375 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3376 		goto done;
3377 
3378 	switch (chan->mode) {
3379 	case L2CAP_MODE_STREAMING:
3380 	case L2CAP_MODE_ERTM:
3381 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3382 			chan->mode = l2cap_select_mode(rfc.mode,
3383 						       chan->conn->feat_mask);
3384 			break;
3385 		}
3386 
3387 		if (remote_efs) {
3388 			if (__l2cap_efs_supported(chan->conn))
3389 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3390 			else
3391 				return -ECONNREFUSED;
3392 		}
3393 
3394 		if (chan->mode != rfc.mode)
3395 			return -ECONNREFUSED;
3396 
3397 		break;
3398 	}
3399 
3400 done:
3401 	if (chan->mode != rfc.mode) {
3402 		result = L2CAP_CONF_UNACCEPT;
3403 		rfc.mode = chan->mode;
3404 
3405 		if (chan->num_conf_rsp == 1)
3406 			return -ECONNREFUSED;
3407 
3408 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3409 				   (unsigned long) &rfc);
3410 	}
3411 
3412 	if (result == L2CAP_CONF_SUCCESS) {
3413 		/* Configure output options and let the other side know
3414 		 * which ones we don't like. */
3415 
3416 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3417 			result = L2CAP_CONF_UNACCEPT;
3418 		else {
3419 			chan->omtu = mtu;
3420 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3421 		}
3422 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3423 
3424 		if (remote_efs) {
3425 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3426 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3427 			    efs.stype != chan->local_stype) {
3428 
3429 				result = L2CAP_CONF_UNACCEPT;
3430 
3431 				if (chan->num_conf_req >= 1)
3432 					return -ECONNREFUSED;
3433 
3434 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3435 						   sizeof(efs),
3436 						   (unsigned long) &efs);
3437 			} else {
3438 				/* Send PENDING Conf Rsp */
3439 				result = L2CAP_CONF_PENDING;
3440 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3441 			}
3442 		}
3443 
3444 		switch (rfc.mode) {
3445 		case L2CAP_MODE_BASIC:
3446 			chan->fcs = L2CAP_FCS_NONE;
3447 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3448 			break;
3449 
3450 		case L2CAP_MODE_ERTM:
3451 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3452 				chan->remote_tx_win = rfc.txwin_size;
3453 			else
3454 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3455 
3456 			chan->remote_max_tx = rfc.max_transmit;
3457 
3458 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3459 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3460 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3461 			rfc.max_pdu_size = cpu_to_le16(size);
3462 			chan->remote_mps = size;
3463 
3464 			__l2cap_set_ertm_timeouts(chan, &rfc);
3465 
3466 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3467 
3468 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3469 					   sizeof(rfc), (unsigned long) &rfc);
3470 
3471 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3472 				chan->remote_id = efs.id;
3473 				chan->remote_stype = efs.stype;
3474 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3475 				chan->remote_flush_to =
3476 					le32_to_cpu(efs.flush_to);
3477 				chan->remote_acc_lat =
3478 					le32_to_cpu(efs.acc_lat);
3479 				chan->remote_sdu_itime =
3480 					le32_to_cpu(efs.sdu_itime);
3481 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3482 						   sizeof(efs),
3483 						   (unsigned long) &efs);
3484 			}
3485 			break;
3486 
3487 		case L2CAP_MODE_STREAMING:
3488 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3489 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3490 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3491 			rfc.max_pdu_size = cpu_to_le16(size);
3492 			chan->remote_mps = size;
3493 
3494 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3495 
3496 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3497 					   (unsigned long) &rfc);
3498 
3499 			break;
3500 
3501 		default:
3502 			result = L2CAP_CONF_UNACCEPT;
3503 
3504 			memset(&rfc, 0, sizeof(rfc));
3505 			rfc.mode = chan->mode;
3506 		}
3507 
3508 		if (result == L2CAP_CONF_SUCCESS)
3509 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3510 	}
3511 	rsp->scid   = cpu_to_le16(chan->dcid);
3512 	rsp->result = cpu_to_le16(result);
3513 	rsp->flags  = cpu_to_le16(0);
3514 
3515 	return ptr - data;
3516 }
3517 
3518 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3519 				void *data, u16 *result)
3520 {
3521 	struct l2cap_conf_req *req = data;
3522 	void *ptr = req->data;
3523 	int type, olen;
3524 	unsigned long val;
3525 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3526 	struct l2cap_conf_efs efs;
3527 
3528 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3529 
3530 	while (len >= L2CAP_CONF_OPT_SIZE) {
3531 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3532 
3533 		switch (type) {
3534 		case L2CAP_CONF_MTU:
3535 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3536 				*result = L2CAP_CONF_UNACCEPT;
3537 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3538 			} else
3539 				chan->imtu = val;
3540 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3541 			break;
3542 
3543 		case L2CAP_CONF_FLUSH_TO:
3544 			chan->flush_to = val;
3545 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3546 					   2, chan->flush_to);
3547 			break;
3548 
3549 		case L2CAP_CONF_RFC:
3550 			if (olen == sizeof(rfc))
3551 				memcpy(&rfc, (void *)val, olen);
3552 
3553 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3554 			    rfc.mode != chan->mode)
3555 				return -ECONNREFUSED;
3556 
3557 			chan->fcs = 0;
3558 
3559 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3560 					   sizeof(rfc), (unsigned long) &rfc);
3561 			break;
3562 
3563 		case L2CAP_CONF_EWS:
3564 			chan->ack_win = min_t(u16, val, chan->ack_win);
3565 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3566 					   chan->tx_win);
3567 			break;
3568 
3569 		case L2CAP_CONF_EFS:
3570 			if (olen == sizeof(efs))
3571 				memcpy(&efs, (void *)val, olen);
3572 
3573 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3574 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3575 			    efs.stype != chan->local_stype)
3576 				return -ECONNREFUSED;
3577 
3578 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3579 					   (unsigned long) &efs);
3580 			break;
3581 
3582 		case L2CAP_CONF_FCS:
3583 			if (*result == L2CAP_CONF_PENDING)
3584 				if (val == L2CAP_FCS_NONE)
3585 					set_bit(CONF_RECV_NO_FCS,
3586 						&chan->conf_state);
3587 			break;
3588 		}
3589 	}
3590 
3591 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3592 		return -ECONNREFUSED;
3593 
3594 	chan->mode = rfc.mode;
3595 
3596 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3597 		switch (rfc.mode) {
3598 		case L2CAP_MODE_ERTM:
3599 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3600 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3601 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3602 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3603 				chan->ack_win = min_t(u16, chan->ack_win,
3604 						      rfc.txwin_size);
3605 
3606 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3607 				chan->local_msdu = le16_to_cpu(efs.msdu);
3608 				chan->local_sdu_itime =
3609 					le32_to_cpu(efs.sdu_itime);
3610 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3611 				chan->local_flush_to =
3612 					le32_to_cpu(efs.flush_to);
3613 			}
3614 			break;
3615 
3616 		case L2CAP_MODE_STREAMING:
3617 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3618 		}
3619 	}
3620 
3621 	req->dcid   = cpu_to_le16(chan->dcid);
3622 	req->flags  = cpu_to_le16(0);
3623 
3624 	return ptr - data;
3625 }
3626 
3627 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3628 				u16 result, u16 flags)
3629 {
3630 	struct l2cap_conf_rsp *rsp = data;
3631 	void *ptr = rsp->data;
3632 
3633 	BT_DBG("chan %p", chan);
3634 
3635 	rsp->scid   = cpu_to_le16(chan->dcid);
3636 	rsp->result = cpu_to_le16(result);
3637 	rsp->flags  = cpu_to_le16(flags);
3638 
3639 	return ptr - data;
3640 }
3641 
3642 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3643 {
3644 	struct l2cap_le_conn_rsp rsp;
3645 	struct l2cap_conn *conn = chan->conn;
3646 
3647 	BT_DBG("chan %p", chan);
3648 
3649 	rsp.dcid    = cpu_to_le16(chan->scid);
3650 	rsp.mtu     = cpu_to_le16(chan->imtu);
3651 	rsp.mps     = cpu_to_le16(chan->mps);
3652 	rsp.credits = cpu_to_le16(chan->rx_credits);
3653 	rsp.result  = cpu_to_le16(L2CAP_CR_SUCCESS);
3654 
3655 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3656 		       &rsp);
3657 }
3658 
3659 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3660 {
3661 	struct l2cap_conn_rsp rsp;
3662 	struct l2cap_conn *conn = chan->conn;
3663 	u8 buf[128];
3664 	u8 rsp_code;
3665 
3666 	rsp.scid   = cpu_to_le16(chan->dcid);
3667 	rsp.dcid   = cpu_to_le16(chan->scid);
3668 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3669 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3670 
3671 	if (chan->hs_hcon)
3672 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3673 	else
3674 		rsp_code = L2CAP_CONN_RSP;
3675 
3676 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3677 
3678 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3679 
3680 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3681 		return;
3682 
3683 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3684 		       l2cap_build_conf_req(chan, buf), buf);
3685 	chan->num_conf_req++;
3686 }
3687 
3688 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3689 {
3690 	int type, olen;
3691 	unsigned long val;
3692 	/* Use sane default values in case a misbehaving remote device
3693 	 * did not send an RFC or extended window size option.
3694 	 */
3695 	u16 txwin_ext = chan->ack_win;
3696 	struct l2cap_conf_rfc rfc = {
3697 		.mode = chan->mode,
3698 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3699 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3700 		.max_pdu_size = cpu_to_le16(chan->imtu),
3701 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3702 	};
3703 
3704 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3705 
3706 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3707 		return;
3708 
3709 	while (len >= L2CAP_CONF_OPT_SIZE) {
3710 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3711 
3712 		switch (type) {
3713 		case L2CAP_CONF_RFC:
3714 			if (olen == sizeof(rfc))
3715 				memcpy(&rfc, (void *)val, olen);
3716 			break;
3717 		case L2CAP_CONF_EWS:
3718 			txwin_ext = val;
3719 			break;
3720 		}
3721 	}
3722 
3723 	switch (rfc.mode) {
3724 	case L2CAP_MODE_ERTM:
3725 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3726 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3727 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3728 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3729 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3730 		else
3731 			chan->ack_win = min_t(u16, chan->ack_win,
3732 					      rfc.txwin_size);
3733 		break;
3734 	case L2CAP_MODE_STREAMING:
3735 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3736 	}
3737 }
3738 
3739 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3740 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3741 				    u8 *data)
3742 {
3743 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3744 
3745 	if (cmd_len < sizeof(*rej))
3746 		return -EPROTO;
3747 
3748 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3749 		return 0;
3750 
3751 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3752 	    cmd->ident == conn->info_ident) {
3753 		cancel_delayed_work(&conn->info_timer);
3754 
3755 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3756 		conn->info_ident = 0;
3757 
3758 		l2cap_conn_start(conn);
3759 	}
3760 
3761 	return 0;
3762 }
3763 
3764 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3765 					struct l2cap_cmd_hdr *cmd,
3766 					u8 *data, u8 rsp_code, u8 amp_id)
3767 {
3768 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3769 	struct l2cap_conn_rsp rsp;
3770 	struct l2cap_chan *chan = NULL, *pchan;
3771 	int result, status = L2CAP_CS_NO_INFO;
3772 
3773 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3774 	__le16 psm = req->psm;
3775 
3776 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3777 
3778 	/* Check if we have socket listening on psm */
3779 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3780 					 &conn->hcon->dst, ACL_LINK);
3781 	if (!pchan) {
3782 		result = L2CAP_CR_BAD_PSM;
3783 		goto sendresp;
3784 	}
3785 
3786 	mutex_lock(&conn->chan_lock);
3787 	l2cap_chan_lock(pchan);
3788 
3789 	/* Check if the ACL is secure enough (if not SDP) */
3790 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3791 	    !hci_conn_check_link_mode(conn->hcon)) {
3792 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3793 		result = L2CAP_CR_SEC_BLOCK;
3794 		goto response;
3795 	}
3796 
3797 	result = L2CAP_CR_NO_MEM;
3798 
3799 	/* Check if we already have channel with that dcid */
3800 	if (__l2cap_get_chan_by_dcid(conn, scid))
3801 		goto response;
3802 
3803 	chan = pchan->ops->new_connection(pchan);
3804 	if (!chan)
3805 		goto response;
3806 
3807 	/* For certain devices (ex: HID mouse), support for authentication,
3808 	 * pairing and bonding is optional. For such devices, inorder to avoid
3809 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3810 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3811 	 */
3812 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3813 
3814 	bacpy(&chan->src, &conn->hcon->src);
3815 	bacpy(&chan->dst, &conn->hcon->dst);
3816 	chan->src_type = bdaddr_src_type(conn->hcon);
3817 	chan->dst_type = bdaddr_dst_type(conn->hcon);
3818 	chan->psm  = psm;
3819 	chan->dcid = scid;
3820 	chan->local_amp_id = amp_id;
3821 
3822 	__l2cap_chan_add(conn, chan);
3823 
3824 	dcid = chan->scid;
3825 
3826 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3827 
3828 	chan->ident = cmd->ident;
3829 
3830 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3831 		if (l2cap_chan_check_security(chan, false)) {
3832 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3833 				l2cap_state_change(chan, BT_CONNECT2);
3834 				result = L2CAP_CR_PEND;
3835 				status = L2CAP_CS_AUTHOR_PEND;
3836 				chan->ops->defer(chan);
3837 			} else {
3838 				/* Force pending result for AMP controllers.
3839 				 * The connection will succeed after the
3840 				 * physical link is up.
3841 				 */
3842 				if (amp_id == AMP_ID_BREDR) {
3843 					l2cap_state_change(chan, BT_CONFIG);
3844 					result = L2CAP_CR_SUCCESS;
3845 				} else {
3846 					l2cap_state_change(chan, BT_CONNECT2);
3847 					result = L2CAP_CR_PEND;
3848 				}
3849 				status = L2CAP_CS_NO_INFO;
3850 			}
3851 		} else {
3852 			l2cap_state_change(chan, BT_CONNECT2);
3853 			result = L2CAP_CR_PEND;
3854 			status = L2CAP_CS_AUTHEN_PEND;
3855 		}
3856 	} else {
3857 		l2cap_state_change(chan, BT_CONNECT2);
3858 		result = L2CAP_CR_PEND;
3859 		status = L2CAP_CS_NO_INFO;
3860 	}
3861 
3862 response:
3863 	l2cap_chan_unlock(pchan);
3864 	mutex_unlock(&conn->chan_lock);
3865 	l2cap_chan_put(pchan);
3866 
3867 sendresp:
3868 	rsp.scid   = cpu_to_le16(scid);
3869 	rsp.dcid   = cpu_to_le16(dcid);
3870 	rsp.result = cpu_to_le16(result);
3871 	rsp.status = cpu_to_le16(status);
3872 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3873 
3874 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3875 		struct l2cap_info_req info;
3876 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3877 
3878 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3879 		conn->info_ident = l2cap_get_ident(conn);
3880 
3881 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3882 
3883 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3884 			       sizeof(info), &info);
3885 	}
3886 
3887 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3888 	    result == L2CAP_CR_SUCCESS) {
3889 		u8 buf[128];
3890 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3891 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3892 			       l2cap_build_conf_req(chan, buf), buf);
3893 		chan->num_conf_req++;
3894 	}
3895 
3896 	return chan;
3897 }
3898 
3899 static int l2cap_connect_req(struct l2cap_conn *conn,
3900 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3901 {
3902 	struct hci_dev *hdev = conn->hcon->hdev;
3903 	struct hci_conn *hcon = conn->hcon;
3904 
3905 	if (cmd_len < sizeof(struct l2cap_conn_req))
3906 		return -EPROTO;
3907 
3908 	hci_dev_lock(hdev);
3909 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3910 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3911 		mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3912 	hci_dev_unlock(hdev);
3913 
3914 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3915 	return 0;
3916 }
3917 
3918 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3919 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3920 				    u8 *data)
3921 {
3922 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3923 	u16 scid, dcid, result, status;
3924 	struct l2cap_chan *chan;
3925 	u8 req[128];
3926 	int err;
3927 
3928 	if (cmd_len < sizeof(*rsp))
3929 		return -EPROTO;
3930 
3931 	scid   = __le16_to_cpu(rsp->scid);
3932 	dcid   = __le16_to_cpu(rsp->dcid);
3933 	result = __le16_to_cpu(rsp->result);
3934 	status = __le16_to_cpu(rsp->status);
3935 
3936 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3937 	       dcid, scid, result, status);
3938 
3939 	mutex_lock(&conn->chan_lock);
3940 
3941 	if (scid) {
3942 		chan = __l2cap_get_chan_by_scid(conn, scid);
3943 		if (!chan) {
3944 			err = -EBADSLT;
3945 			goto unlock;
3946 		}
3947 	} else {
3948 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3949 		if (!chan) {
3950 			err = -EBADSLT;
3951 			goto unlock;
3952 		}
3953 	}
3954 
3955 	err = 0;
3956 
3957 	l2cap_chan_lock(chan);
3958 
3959 	switch (result) {
3960 	case L2CAP_CR_SUCCESS:
3961 		l2cap_state_change(chan, BT_CONFIG);
3962 		chan->ident = 0;
3963 		chan->dcid = dcid;
3964 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3965 
3966 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3967 			break;
3968 
3969 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3970 			       l2cap_build_conf_req(chan, req), req);
3971 		chan->num_conf_req++;
3972 		break;
3973 
3974 	case L2CAP_CR_PEND:
3975 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3976 		break;
3977 
3978 	default:
3979 		l2cap_chan_del(chan, ECONNREFUSED);
3980 		break;
3981 	}
3982 
3983 	l2cap_chan_unlock(chan);
3984 
3985 unlock:
3986 	mutex_unlock(&conn->chan_lock);
3987 
3988 	return err;
3989 }
3990 
3991 static inline void set_default_fcs(struct l2cap_chan *chan)
3992 {
3993 	/* FCS is enabled only in ERTM or streaming mode, if one or both
3994 	 * sides request it.
3995 	 */
3996 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3997 		chan->fcs = L2CAP_FCS_NONE;
3998 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3999 		chan->fcs = L2CAP_FCS_CRC16;
4000 }
4001 
4002 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4003 				    u8 ident, u16 flags)
4004 {
4005 	struct l2cap_conn *conn = chan->conn;
4006 
4007 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4008 	       flags);
4009 
4010 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4011 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4012 
4013 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4014 		       l2cap_build_conf_rsp(chan, data,
4015 					    L2CAP_CONF_SUCCESS, flags), data);
4016 }
4017 
4018 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4019 				   u16 scid, u16 dcid)
4020 {
4021 	struct l2cap_cmd_rej_cid rej;
4022 
4023 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4024 	rej.scid = __cpu_to_le16(scid);
4025 	rej.dcid = __cpu_to_le16(dcid);
4026 
4027 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4028 }
4029 
4030 static inline int l2cap_config_req(struct l2cap_conn *conn,
4031 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4032 				   u8 *data)
4033 {
4034 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4035 	u16 dcid, flags;
4036 	u8 rsp[64];
4037 	struct l2cap_chan *chan;
4038 	int len, err = 0;
4039 
4040 	if (cmd_len < sizeof(*req))
4041 		return -EPROTO;
4042 
4043 	dcid  = __le16_to_cpu(req->dcid);
4044 	flags = __le16_to_cpu(req->flags);
4045 
4046 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4047 
4048 	chan = l2cap_get_chan_by_scid(conn, dcid);
4049 	if (!chan) {
4050 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4051 		return 0;
4052 	}
4053 
4054 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4055 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4056 				       chan->dcid);
4057 		goto unlock;
4058 	}
4059 
4060 	/* Reject if config buffer is too small. */
4061 	len = cmd_len - sizeof(*req);
4062 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4063 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4064 			       l2cap_build_conf_rsp(chan, rsp,
4065 			       L2CAP_CONF_REJECT, flags), rsp);
4066 		goto unlock;
4067 	}
4068 
4069 	/* Store config. */
4070 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4071 	chan->conf_len += len;
4072 
4073 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4074 		/* Incomplete config. Send empty response. */
4075 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4076 			       l2cap_build_conf_rsp(chan, rsp,
4077 			       L2CAP_CONF_SUCCESS, flags), rsp);
4078 		goto unlock;
4079 	}
4080 
4081 	/* Complete config. */
4082 	len = l2cap_parse_conf_req(chan, rsp);
4083 	if (len < 0) {
4084 		l2cap_send_disconn_req(chan, ECONNRESET);
4085 		goto unlock;
4086 	}
4087 
4088 	chan->ident = cmd->ident;
4089 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4090 	chan->num_conf_rsp++;
4091 
4092 	/* Reset config buffer. */
4093 	chan->conf_len = 0;
4094 
4095 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4096 		goto unlock;
4097 
4098 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4099 		set_default_fcs(chan);
4100 
4101 		if (chan->mode == L2CAP_MODE_ERTM ||
4102 		    chan->mode == L2CAP_MODE_STREAMING)
4103 			err = l2cap_ertm_init(chan);
4104 
4105 		if (err < 0)
4106 			l2cap_send_disconn_req(chan, -err);
4107 		else
4108 			l2cap_chan_ready(chan);
4109 
4110 		goto unlock;
4111 	}
4112 
4113 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4114 		u8 buf[64];
4115 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4116 			       l2cap_build_conf_req(chan, buf), buf);
4117 		chan->num_conf_req++;
4118 	}
4119 
4120 	/* Got Conf Rsp PENDING from remote side and assume we sent
4121 	   Conf Rsp PENDING in the code above */
4122 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4123 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4124 
4125 		/* check compatibility */
4126 
4127 		/* Send rsp for BR/EDR channel */
4128 		if (!chan->hs_hcon)
4129 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4130 		else
4131 			chan->ident = cmd->ident;
4132 	}
4133 
4134 unlock:
4135 	l2cap_chan_unlock(chan);
4136 	return err;
4137 }
4138 
4139 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4140 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4141 				   u8 *data)
4142 {
4143 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4144 	u16 scid, flags, result;
4145 	struct l2cap_chan *chan;
4146 	int len = cmd_len - sizeof(*rsp);
4147 	int err = 0;
4148 
4149 	if (cmd_len < sizeof(*rsp))
4150 		return -EPROTO;
4151 
4152 	scid   = __le16_to_cpu(rsp->scid);
4153 	flags  = __le16_to_cpu(rsp->flags);
4154 	result = __le16_to_cpu(rsp->result);
4155 
4156 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4157 	       result, len);
4158 
4159 	chan = l2cap_get_chan_by_scid(conn, scid);
4160 	if (!chan)
4161 		return 0;
4162 
4163 	switch (result) {
4164 	case L2CAP_CONF_SUCCESS:
4165 		l2cap_conf_rfc_get(chan, rsp->data, len);
4166 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4167 		break;
4168 
4169 	case L2CAP_CONF_PENDING:
4170 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4171 
4172 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4173 			char buf[64];
4174 
4175 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4176 						   buf, &result);
4177 			if (len < 0) {
4178 				l2cap_send_disconn_req(chan, ECONNRESET);
4179 				goto done;
4180 			}
4181 
4182 			if (!chan->hs_hcon) {
4183 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4184 							0);
4185 			} else {
4186 				if (l2cap_check_efs(chan)) {
4187 					amp_create_logical_link(chan);
4188 					chan->ident = cmd->ident;
4189 				}
4190 			}
4191 		}
4192 		goto done;
4193 
4194 	case L2CAP_CONF_UNACCEPT:
4195 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4196 			char req[64];
4197 
4198 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4199 				l2cap_send_disconn_req(chan, ECONNRESET);
4200 				goto done;
4201 			}
4202 
4203 			/* throw out any old stored conf requests */
4204 			result = L2CAP_CONF_SUCCESS;
4205 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4206 						   req, &result);
4207 			if (len < 0) {
4208 				l2cap_send_disconn_req(chan, ECONNRESET);
4209 				goto done;
4210 			}
4211 
4212 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4213 				       L2CAP_CONF_REQ, len, req);
4214 			chan->num_conf_req++;
4215 			if (result != L2CAP_CONF_SUCCESS)
4216 				goto done;
4217 			break;
4218 		}
4219 
4220 	default:
4221 		l2cap_chan_set_err(chan, ECONNRESET);
4222 
4223 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4224 		l2cap_send_disconn_req(chan, ECONNRESET);
4225 		goto done;
4226 	}
4227 
4228 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4229 		goto done;
4230 
4231 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4232 
4233 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4234 		set_default_fcs(chan);
4235 
4236 		if (chan->mode == L2CAP_MODE_ERTM ||
4237 		    chan->mode == L2CAP_MODE_STREAMING)
4238 			err = l2cap_ertm_init(chan);
4239 
4240 		if (err < 0)
4241 			l2cap_send_disconn_req(chan, -err);
4242 		else
4243 			l2cap_chan_ready(chan);
4244 	}
4245 
4246 done:
4247 	l2cap_chan_unlock(chan);
4248 	return err;
4249 }
4250 
4251 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4252 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4253 				       u8 *data)
4254 {
4255 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4256 	struct l2cap_disconn_rsp rsp;
4257 	u16 dcid, scid;
4258 	struct l2cap_chan *chan;
4259 
4260 	if (cmd_len != sizeof(*req))
4261 		return -EPROTO;
4262 
4263 	scid = __le16_to_cpu(req->scid);
4264 	dcid = __le16_to_cpu(req->dcid);
4265 
4266 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4267 
4268 	mutex_lock(&conn->chan_lock);
4269 
4270 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4271 	if (!chan) {
4272 		mutex_unlock(&conn->chan_lock);
4273 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4274 		return 0;
4275 	}
4276 
4277 	l2cap_chan_lock(chan);
4278 
4279 	rsp.dcid = cpu_to_le16(chan->scid);
4280 	rsp.scid = cpu_to_le16(chan->dcid);
4281 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4282 
4283 	chan->ops->set_shutdown(chan);
4284 
4285 	l2cap_chan_hold(chan);
4286 	l2cap_chan_del(chan, ECONNRESET);
4287 
4288 	l2cap_chan_unlock(chan);
4289 
4290 	chan->ops->close(chan);
4291 	l2cap_chan_put(chan);
4292 
4293 	mutex_unlock(&conn->chan_lock);
4294 
4295 	return 0;
4296 }
4297 
4298 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4299 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4300 				       u8 *data)
4301 {
4302 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4303 	u16 dcid, scid;
4304 	struct l2cap_chan *chan;
4305 
4306 	if (cmd_len != sizeof(*rsp))
4307 		return -EPROTO;
4308 
4309 	scid = __le16_to_cpu(rsp->scid);
4310 	dcid = __le16_to_cpu(rsp->dcid);
4311 
4312 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4313 
4314 	mutex_lock(&conn->chan_lock);
4315 
4316 	chan = __l2cap_get_chan_by_scid(conn, scid);
4317 	if (!chan) {
4318 		mutex_unlock(&conn->chan_lock);
4319 		return 0;
4320 	}
4321 
4322 	l2cap_chan_lock(chan);
4323 
4324 	l2cap_chan_hold(chan);
4325 	l2cap_chan_del(chan, 0);
4326 
4327 	l2cap_chan_unlock(chan);
4328 
4329 	chan->ops->close(chan);
4330 	l2cap_chan_put(chan);
4331 
4332 	mutex_unlock(&conn->chan_lock);
4333 
4334 	return 0;
4335 }
4336 
4337 static inline int l2cap_information_req(struct l2cap_conn *conn,
4338 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4339 					u8 *data)
4340 {
4341 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4342 	u16 type;
4343 
4344 	if (cmd_len != sizeof(*req))
4345 		return -EPROTO;
4346 
4347 	type = __le16_to_cpu(req->type);
4348 
4349 	BT_DBG("type 0x%4.4x", type);
4350 
4351 	if (type == L2CAP_IT_FEAT_MASK) {
4352 		u8 buf[8];
4353 		u32 feat_mask = l2cap_feat_mask;
4354 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4355 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4356 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4357 		if (!disable_ertm)
4358 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4359 				| L2CAP_FEAT_FCS;
4360 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4361 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4362 				| L2CAP_FEAT_EXT_WINDOW;
4363 
4364 		put_unaligned_le32(feat_mask, rsp->data);
4365 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4366 			       buf);
4367 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4368 		u8 buf[12];
4369 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4370 
4371 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4372 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4373 		rsp->data[0] = conn->local_fixed_chan;
4374 		memset(rsp->data + 1, 0, 7);
4375 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4376 			       buf);
4377 	} else {
4378 		struct l2cap_info_rsp rsp;
4379 		rsp.type   = cpu_to_le16(type);
4380 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4381 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4382 			       &rsp);
4383 	}
4384 
4385 	return 0;
4386 }
4387 
4388 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4389 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4390 					u8 *data)
4391 {
4392 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4393 	u16 type, result;
4394 
4395 	if (cmd_len < sizeof(*rsp))
4396 		return -EPROTO;
4397 
4398 	type   = __le16_to_cpu(rsp->type);
4399 	result = __le16_to_cpu(rsp->result);
4400 
4401 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4402 
4403 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4404 	if (cmd->ident != conn->info_ident ||
4405 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4406 		return 0;
4407 
4408 	cancel_delayed_work(&conn->info_timer);
4409 
4410 	if (result != L2CAP_IR_SUCCESS) {
4411 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4412 		conn->info_ident = 0;
4413 
4414 		l2cap_conn_start(conn);
4415 
4416 		return 0;
4417 	}
4418 
4419 	switch (type) {
4420 	case L2CAP_IT_FEAT_MASK:
4421 		conn->feat_mask = get_unaligned_le32(rsp->data);
4422 
4423 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4424 			struct l2cap_info_req req;
4425 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4426 
4427 			conn->info_ident = l2cap_get_ident(conn);
4428 
4429 			l2cap_send_cmd(conn, conn->info_ident,
4430 				       L2CAP_INFO_REQ, sizeof(req), &req);
4431 		} else {
4432 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4433 			conn->info_ident = 0;
4434 
4435 			l2cap_conn_start(conn);
4436 		}
4437 		break;
4438 
4439 	case L2CAP_IT_FIXED_CHAN:
4440 		conn->remote_fixed_chan = rsp->data[0];
4441 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4442 		conn->info_ident = 0;
4443 
4444 		l2cap_conn_start(conn);
4445 		break;
4446 	}
4447 
4448 	return 0;
4449 }
4450 
4451 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4452 				    struct l2cap_cmd_hdr *cmd,
4453 				    u16 cmd_len, void *data)
4454 {
4455 	struct l2cap_create_chan_req *req = data;
4456 	struct l2cap_create_chan_rsp rsp;
4457 	struct l2cap_chan *chan;
4458 	struct hci_dev *hdev;
4459 	u16 psm, scid;
4460 
4461 	if (cmd_len != sizeof(*req))
4462 		return -EPROTO;
4463 
4464 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4465 		return -EINVAL;
4466 
4467 	psm = le16_to_cpu(req->psm);
4468 	scid = le16_to_cpu(req->scid);
4469 
4470 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4471 
4472 	/* For controller id 0 make BR/EDR connection */
4473 	if (req->amp_id == AMP_ID_BREDR) {
4474 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4475 			      req->amp_id);
4476 		return 0;
4477 	}
4478 
4479 	/* Validate AMP controller id */
4480 	hdev = hci_dev_get(req->amp_id);
4481 	if (!hdev)
4482 		goto error;
4483 
4484 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4485 		hci_dev_put(hdev);
4486 		goto error;
4487 	}
4488 
4489 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4490 			     req->amp_id);
4491 	if (chan) {
4492 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4493 		struct hci_conn *hs_hcon;
4494 
4495 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4496 						  &conn->hcon->dst);
4497 		if (!hs_hcon) {
4498 			hci_dev_put(hdev);
4499 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4500 					       chan->dcid);
4501 			return 0;
4502 		}
4503 
4504 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4505 
4506 		mgr->bredr_chan = chan;
4507 		chan->hs_hcon = hs_hcon;
4508 		chan->fcs = L2CAP_FCS_NONE;
4509 		conn->mtu = hdev->block_mtu;
4510 	}
4511 
4512 	hci_dev_put(hdev);
4513 
4514 	return 0;
4515 
4516 error:
4517 	rsp.dcid = 0;
4518 	rsp.scid = cpu_to_le16(scid);
4519 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4520 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4521 
4522 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4523 		       sizeof(rsp), &rsp);
4524 
4525 	return 0;
4526 }
4527 
4528 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4529 {
4530 	struct l2cap_move_chan_req req;
4531 	u8 ident;
4532 
4533 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4534 
4535 	ident = l2cap_get_ident(chan->conn);
4536 	chan->ident = ident;
4537 
4538 	req.icid = cpu_to_le16(chan->scid);
4539 	req.dest_amp_id = dest_amp_id;
4540 
4541 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4542 		       &req);
4543 
4544 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4545 }
4546 
4547 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4548 {
4549 	struct l2cap_move_chan_rsp rsp;
4550 
4551 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4552 
4553 	rsp.icid = cpu_to_le16(chan->dcid);
4554 	rsp.result = cpu_to_le16(result);
4555 
4556 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4557 		       sizeof(rsp), &rsp);
4558 }
4559 
4560 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4561 {
4562 	struct l2cap_move_chan_cfm cfm;
4563 
4564 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4565 
4566 	chan->ident = l2cap_get_ident(chan->conn);
4567 
4568 	cfm.icid = cpu_to_le16(chan->scid);
4569 	cfm.result = cpu_to_le16(result);
4570 
4571 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4572 		       sizeof(cfm), &cfm);
4573 
4574 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4575 }
4576 
4577 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4578 {
4579 	struct l2cap_move_chan_cfm cfm;
4580 
4581 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4582 
4583 	cfm.icid = cpu_to_le16(icid);
4584 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4585 
4586 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4587 		       sizeof(cfm), &cfm);
4588 }
4589 
4590 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4591 					 u16 icid)
4592 {
4593 	struct l2cap_move_chan_cfm_rsp rsp;
4594 
4595 	BT_DBG("icid 0x%4.4x", icid);
4596 
4597 	rsp.icid = cpu_to_le16(icid);
4598 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4599 }
4600 
4601 static void __release_logical_link(struct l2cap_chan *chan)
4602 {
4603 	chan->hs_hchan = NULL;
4604 	chan->hs_hcon = NULL;
4605 
4606 	/* Placeholder - release the logical link */
4607 }
4608 
4609 static void l2cap_logical_fail(struct l2cap_chan *chan)
4610 {
4611 	/* Logical link setup failed */
4612 	if (chan->state != BT_CONNECTED) {
4613 		/* Create channel failure, disconnect */
4614 		l2cap_send_disconn_req(chan, ECONNRESET);
4615 		return;
4616 	}
4617 
4618 	switch (chan->move_role) {
4619 	case L2CAP_MOVE_ROLE_RESPONDER:
4620 		l2cap_move_done(chan);
4621 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4622 		break;
4623 	case L2CAP_MOVE_ROLE_INITIATOR:
4624 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4625 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4626 			/* Remote has only sent pending or
4627 			 * success responses, clean up
4628 			 */
4629 			l2cap_move_done(chan);
4630 		}
4631 
4632 		/* Other amp move states imply that the move
4633 		 * has already aborted
4634 		 */
4635 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4636 		break;
4637 	}
4638 }
4639 
4640 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4641 					struct hci_chan *hchan)
4642 {
4643 	struct l2cap_conf_rsp rsp;
4644 
4645 	chan->hs_hchan = hchan;
4646 	chan->hs_hcon->l2cap_data = chan->conn;
4647 
4648 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4649 
4650 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4651 		int err;
4652 
4653 		set_default_fcs(chan);
4654 
4655 		err = l2cap_ertm_init(chan);
4656 		if (err < 0)
4657 			l2cap_send_disconn_req(chan, -err);
4658 		else
4659 			l2cap_chan_ready(chan);
4660 	}
4661 }
4662 
4663 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4664 				      struct hci_chan *hchan)
4665 {
4666 	chan->hs_hcon = hchan->conn;
4667 	chan->hs_hcon->l2cap_data = chan->conn;
4668 
4669 	BT_DBG("move_state %d", chan->move_state);
4670 
4671 	switch (chan->move_state) {
4672 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4673 		/* Move confirm will be sent after a success
4674 		 * response is received
4675 		 */
4676 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4677 		break;
4678 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4679 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4680 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4681 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4682 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4683 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4684 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4685 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4686 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4687 		}
4688 		break;
4689 	default:
4690 		/* Move was not in expected state, free the channel */
4691 		__release_logical_link(chan);
4692 
4693 		chan->move_state = L2CAP_MOVE_STABLE;
4694 	}
4695 }
4696 
4697 /* Call with chan locked */
4698 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4699 		       u8 status)
4700 {
4701 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4702 
4703 	if (status) {
4704 		l2cap_logical_fail(chan);
4705 		__release_logical_link(chan);
4706 		return;
4707 	}
4708 
4709 	if (chan->state != BT_CONNECTED) {
4710 		/* Ignore logical link if channel is on BR/EDR */
4711 		if (chan->local_amp_id != AMP_ID_BREDR)
4712 			l2cap_logical_finish_create(chan, hchan);
4713 	} else {
4714 		l2cap_logical_finish_move(chan, hchan);
4715 	}
4716 }
4717 
4718 void l2cap_move_start(struct l2cap_chan *chan)
4719 {
4720 	BT_DBG("chan %p", chan);
4721 
4722 	if (chan->local_amp_id == AMP_ID_BREDR) {
4723 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4724 			return;
4725 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4726 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4727 		/* Placeholder - start physical link setup */
4728 	} else {
4729 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4730 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4731 		chan->move_id = 0;
4732 		l2cap_move_setup(chan);
4733 		l2cap_send_move_chan_req(chan, 0);
4734 	}
4735 }
4736 
4737 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4738 			    u8 local_amp_id, u8 remote_amp_id)
4739 {
4740 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4741 	       local_amp_id, remote_amp_id);
4742 
4743 	chan->fcs = L2CAP_FCS_NONE;
4744 
4745 	/* Outgoing channel on AMP */
4746 	if (chan->state == BT_CONNECT) {
4747 		if (result == L2CAP_CR_SUCCESS) {
4748 			chan->local_amp_id = local_amp_id;
4749 			l2cap_send_create_chan_req(chan, remote_amp_id);
4750 		} else {
4751 			/* Revert to BR/EDR connect */
4752 			l2cap_send_conn_req(chan);
4753 		}
4754 
4755 		return;
4756 	}
4757 
4758 	/* Incoming channel on AMP */
4759 	if (__l2cap_no_conn_pending(chan)) {
4760 		struct l2cap_conn_rsp rsp;
4761 		char buf[128];
4762 		rsp.scid = cpu_to_le16(chan->dcid);
4763 		rsp.dcid = cpu_to_le16(chan->scid);
4764 
4765 		if (result == L2CAP_CR_SUCCESS) {
4766 			/* Send successful response */
4767 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4768 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4769 		} else {
4770 			/* Send negative response */
4771 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4772 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4773 		}
4774 
4775 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4776 			       sizeof(rsp), &rsp);
4777 
4778 		if (result == L2CAP_CR_SUCCESS) {
4779 			l2cap_state_change(chan, BT_CONFIG);
4780 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4781 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4782 				       L2CAP_CONF_REQ,
4783 				       l2cap_build_conf_req(chan, buf), buf);
4784 			chan->num_conf_req++;
4785 		}
4786 	}
4787 }
4788 
4789 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4790 				   u8 remote_amp_id)
4791 {
4792 	l2cap_move_setup(chan);
4793 	chan->move_id = local_amp_id;
4794 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4795 
4796 	l2cap_send_move_chan_req(chan, remote_amp_id);
4797 }
4798 
4799 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4800 {
4801 	struct hci_chan *hchan = NULL;
4802 
4803 	/* Placeholder - get hci_chan for logical link */
4804 
4805 	if (hchan) {
4806 		if (hchan->state == BT_CONNECTED) {
4807 			/* Logical link is ready to go */
4808 			chan->hs_hcon = hchan->conn;
4809 			chan->hs_hcon->l2cap_data = chan->conn;
4810 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4811 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4812 
4813 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4814 		} else {
4815 			/* Wait for logical link to be ready */
4816 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4817 		}
4818 	} else {
4819 		/* Logical link not available */
4820 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4821 	}
4822 }
4823 
4824 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4825 {
4826 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4827 		u8 rsp_result;
4828 		if (result == -EINVAL)
4829 			rsp_result = L2CAP_MR_BAD_ID;
4830 		else
4831 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4832 
4833 		l2cap_send_move_chan_rsp(chan, rsp_result);
4834 	}
4835 
4836 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4837 	chan->move_state = L2CAP_MOVE_STABLE;
4838 
4839 	/* Restart data transmission */
4840 	l2cap_ertm_send(chan);
4841 }
4842 
4843 /* Invoke with locked chan */
4844 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4845 {
4846 	u8 local_amp_id = chan->local_amp_id;
4847 	u8 remote_amp_id = chan->remote_amp_id;
4848 
4849 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4850 	       chan, result, local_amp_id, remote_amp_id);
4851 
4852 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4853 		l2cap_chan_unlock(chan);
4854 		return;
4855 	}
4856 
4857 	if (chan->state != BT_CONNECTED) {
4858 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4859 	} else if (result != L2CAP_MR_SUCCESS) {
4860 		l2cap_do_move_cancel(chan, result);
4861 	} else {
4862 		switch (chan->move_role) {
4863 		case L2CAP_MOVE_ROLE_INITIATOR:
4864 			l2cap_do_move_initiate(chan, local_amp_id,
4865 					       remote_amp_id);
4866 			break;
4867 		case L2CAP_MOVE_ROLE_RESPONDER:
4868 			l2cap_do_move_respond(chan, result);
4869 			break;
4870 		default:
4871 			l2cap_do_move_cancel(chan, result);
4872 			break;
4873 		}
4874 	}
4875 }
4876 
4877 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4878 					 struct l2cap_cmd_hdr *cmd,
4879 					 u16 cmd_len, void *data)
4880 {
4881 	struct l2cap_move_chan_req *req = data;
4882 	struct l2cap_move_chan_rsp rsp;
4883 	struct l2cap_chan *chan;
4884 	u16 icid = 0;
4885 	u16 result = L2CAP_MR_NOT_ALLOWED;
4886 
4887 	if (cmd_len != sizeof(*req))
4888 		return -EPROTO;
4889 
4890 	icid = le16_to_cpu(req->icid);
4891 
4892 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4893 
4894 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4895 		return -EINVAL;
4896 
4897 	chan = l2cap_get_chan_by_dcid(conn, icid);
4898 	if (!chan) {
4899 		rsp.icid = cpu_to_le16(icid);
4900 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4901 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4902 			       sizeof(rsp), &rsp);
4903 		return 0;
4904 	}
4905 
4906 	chan->ident = cmd->ident;
4907 
4908 	if (chan->scid < L2CAP_CID_DYN_START ||
4909 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4910 	    (chan->mode != L2CAP_MODE_ERTM &&
4911 	     chan->mode != L2CAP_MODE_STREAMING)) {
4912 		result = L2CAP_MR_NOT_ALLOWED;
4913 		goto send_move_response;
4914 	}
4915 
4916 	if (chan->local_amp_id == req->dest_amp_id) {
4917 		result = L2CAP_MR_SAME_ID;
4918 		goto send_move_response;
4919 	}
4920 
4921 	if (req->dest_amp_id != AMP_ID_BREDR) {
4922 		struct hci_dev *hdev;
4923 		hdev = hci_dev_get(req->dest_amp_id);
4924 		if (!hdev || hdev->dev_type != HCI_AMP ||
4925 		    !test_bit(HCI_UP, &hdev->flags)) {
4926 			if (hdev)
4927 				hci_dev_put(hdev);
4928 
4929 			result = L2CAP_MR_BAD_ID;
4930 			goto send_move_response;
4931 		}
4932 		hci_dev_put(hdev);
4933 	}
4934 
4935 	/* Detect a move collision.  Only send a collision response
4936 	 * if this side has "lost", otherwise proceed with the move.
4937 	 * The winner has the larger bd_addr.
4938 	 */
4939 	if ((__chan_is_moving(chan) ||
4940 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4941 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4942 		result = L2CAP_MR_COLLISION;
4943 		goto send_move_response;
4944 	}
4945 
4946 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4947 	l2cap_move_setup(chan);
4948 	chan->move_id = req->dest_amp_id;
4949 	icid = chan->dcid;
4950 
4951 	if (req->dest_amp_id == AMP_ID_BREDR) {
4952 		/* Moving to BR/EDR */
4953 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4954 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4955 			result = L2CAP_MR_PEND;
4956 		} else {
4957 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4958 			result = L2CAP_MR_SUCCESS;
4959 		}
4960 	} else {
4961 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4962 		/* Placeholder - uncomment when amp functions are available */
4963 		/*amp_accept_physical(chan, req->dest_amp_id);*/
4964 		result = L2CAP_MR_PEND;
4965 	}
4966 
4967 send_move_response:
4968 	l2cap_send_move_chan_rsp(chan, result);
4969 
4970 	l2cap_chan_unlock(chan);
4971 
4972 	return 0;
4973 }
4974 
4975 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4976 {
4977 	struct l2cap_chan *chan;
4978 	struct hci_chan *hchan = NULL;
4979 
4980 	chan = l2cap_get_chan_by_scid(conn, icid);
4981 	if (!chan) {
4982 		l2cap_send_move_chan_cfm_icid(conn, icid);
4983 		return;
4984 	}
4985 
4986 	__clear_chan_timer(chan);
4987 	if (result == L2CAP_MR_PEND)
4988 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4989 
4990 	switch (chan->move_state) {
4991 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4992 		/* Move confirm will be sent when logical link
4993 		 * is complete.
4994 		 */
4995 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4996 		break;
4997 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4998 		if (result == L2CAP_MR_PEND) {
4999 			break;
5000 		} else if (test_bit(CONN_LOCAL_BUSY,
5001 				    &chan->conn_state)) {
5002 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5003 		} else {
5004 			/* Logical link is up or moving to BR/EDR,
5005 			 * proceed with move
5006 			 */
5007 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5008 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5009 		}
5010 		break;
5011 	case L2CAP_MOVE_WAIT_RSP:
5012 		/* Moving to AMP */
5013 		if (result == L2CAP_MR_SUCCESS) {
5014 			/* Remote is ready, send confirm immediately
5015 			 * after logical link is ready
5016 			 */
5017 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5018 		} else {
5019 			/* Both logical link and move success
5020 			 * are required to confirm
5021 			 */
5022 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5023 		}
5024 
5025 		/* Placeholder - get hci_chan for logical link */
5026 		if (!hchan) {
5027 			/* Logical link not available */
5028 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5029 			break;
5030 		}
5031 
5032 		/* If the logical link is not yet connected, do not
5033 		 * send confirmation.
5034 		 */
5035 		if (hchan->state != BT_CONNECTED)
5036 			break;
5037 
5038 		/* Logical link is already ready to go */
5039 
5040 		chan->hs_hcon = hchan->conn;
5041 		chan->hs_hcon->l2cap_data = chan->conn;
5042 
5043 		if (result == L2CAP_MR_SUCCESS) {
5044 			/* Can confirm now */
5045 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5046 		} else {
5047 			/* Now only need move success
5048 			 * to confirm
5049 			 */
5050 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5051 		}
5052 
5053 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5054 		break;
5055 	default:
5056 		/* Any other amp move state means the move failed. */
5057 		chan->move_id = chan->local_amp_id;
5058 		l2cap_move_done(chan);
5059 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5060 	}
5061 
5062 	l2cap_chan_unlock(chan);
5063 }
5064 
5065 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5066 			    u16 result)
5067 {
5068 	struct l2cap_chan *chan;
5069 
5070 	chan = l2cap_get_chan_by_ident(conn, ident);
5071 	if (!chan) {
5072 		/* Could not locate channel, icid is best guess */
5073 		l2cap_send_move_chan_cfm_icid(conn, icid);
5074 		return;
5075 	}
5076 
5077 	__clear_chan_timer(chan);
5078 
5079 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5080 		if (result == L2CAP_MR_COLLISION) {
5081 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5082 		} else {
5083 			/* Cleanup - cancel move */
5084 			chan->move_id = chan->local_amp_id;
5085 			l2cap_move_done(chan);
5086 		}
5087 	}
5088 
5089 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5090 
5091 	l2cap_chan_unlock(chan);
5092 }
5093 
5094 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5095 				  struct l2cap_cmd_hdr *cmd,
5096 				  u16 cmd_len, void *data)
5097 {
5098 	struct l2cap_move_chan_rsp *rsp = data;
5099 	u16 icid, result;
5100 
5101 	if (cmd_len != sizeof(*rsp))
5102 		return -EPROTO;
5103 
5104 	icid = le16_to_cpu(rsp->icid);
5105 	result = le16_to_cpu(rsp->result);
5106 
5107 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5108 
5109 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5110 		l2cap_move_continue(conn, icid, result);
5111 	else
5112 		l2cap_move_fail(conn, cmd->ident, icid, result);
5113 
5114 	return 0;
5115 }
5116 
5117 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5118 				      struct l2cap_cmd_hdr *cmd,
5119 				      u16 cmd_len, void *data)
5120 {
5121 	struct l2cap_move_chan_cfm *cfm = data;
5122 	struct l2cap_chan *chan;
5123 	u16 icid, result;
5124 
5125 	if (cmd_len != sizeof(*cfm))
5126 		return -EPROTO;
5127 
5128 	icid = le16_to_cpu(cfm->icid);
5129 	result = le16_to_cpu(cfm->result);
5130 
5131 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5132 
5133 	chan = l2cap_get_chan_by_dcid(conn, icid);
5134 	if (!chan) {
5135 		/* Spec requires a response even if the icid was not found */
5136 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5137 		return 0;
5138 	}
5139 
5140 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5141 		if (result == L2CAP_MC_CONFIRMED) {
5142 			chan->local_amp_id = chan->move_id;
5143 			if (chan->local_amp_id == AMP_ID_BREDR)
5144 				__release_logical_link(chan);
5145 		} else {
5146 			chan->move_id = chan->local_amp_id;
5147 		}
5148 
5149 		l2cap_move_done(chan);
5150 	}
5151 
5152 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5153 
5154 	l2cap_chan_unlock(chan);
5155 
5156 	return 0;
5157 }
5158 
5159 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5160 						 struct l2cap_cmd_hdr *cmd,
5161 						 u16 cmd_len, void *data)
5162 {
5163 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5164 	struct l2cap_chan *chan;
5165 	u16 icid;
5166 
5167 	if (cmd_len != sizeof(*rsp))
5168 		return -EPROTO;
5169 
5170 	icid = le16_to_cpu(rsp->icid);
5171 
5172 	BT_DBG("icid 0x%4.4x", icid);
5173 
5174 	chan = l2cap_get_chan_by_scid(conn, icid);
5175 	if (!chan)
5176 		return 0;
5177 
5178 	__clear_chan_timer(chan);
5179 
5180 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5181 		chan->local_amp_id = chan->move_id;
5182 
5183 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5184 			__release_logical_link(chan);
5185 
5186 		l2cap_move_done(chan);
5187 	}
5188 
5189 	l2cap_chan_unlock(chan);
5190 
5191 	return 0;
5192 }
5193 
5194 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5195 					      struct l2cap_cmd_hdr *cmd,
5196 					      u16 cmd_len, u8 *data)
5197 {
5198 	struct hci_conn *hcon = conn->hcon;
5199 	struct l2cap_conn_param_update_req *req;
5200 	struct l2cap_conn_param_update_rsp rsp;
5201 	u16 min, max, latency, to_multiplier;
5202 	int err;
5203 
5204 	if (hcon->role != HCI_ROLE_MASTER)
5205 		return -EINVAL;
5206 
5207 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5208 		return -EPROTO;
5209 
5210 	req = (struct l2cap_conn_param_update_req *) data;
5211 	min		= __le16_to_cpu(req->min);
5212 	max		= __le16_to_cpu(req->max);
5213 	latency		= __le16_to_cpu(req->latency);
5214 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5215 
5216 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5217 	       min, max, latency, to_multiplier);
5218 
5219 	memset(&rsp, 0, sizeof(rsp));
5220 
5221 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5222 	if (err)
5223 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5224 	else
5225 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5226 
5227 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5228 		       sizeof(rsp), &rsp);
5229 
5230 	if (!err) {
5231 		u8 store_hint;
5232 
5233 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5234 						to_multiplier);
5235 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5236 				    store_hint, min, max, latency,
5237 				    to_multiplier);
5238 
5239 	}
5240 
5241 	return 0;
5242 }
5243 
5244 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5245 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5246 				u8 *data)
5247 {
5248 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5249 	struct hci_conn *hcon = conn->hcon;
5250 	u16 dcid, mtu, mps, credits, result;
5251 	struct l2cap_chan *chan;
5252 	int err, sec_level;
5253 
5254 	if (cmd_len < sizeof(*rsp))
5255 		return -EPROTO;
5256 
5257 	dcid    = __le16_to_cpu(rsp->dcid);
5258 	mtu     = __le16_to_cpu(rsp->mtu);
5259 	mps     = __le16_to_cpu(rsp->mps);
5260 	credits = __le16_to_cpu(rsp->credits);
5261 	result  = __le16_to_cpu(rsp->result);
5262 
5263 	if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
5264 					   dcid < L2CAP_CID_DYN_START ||
5265 					   dcid > L2CAP_CID_LE_DYN_END))
5266 		return -EPROTO;
5267 
5268 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5269 	       dcid, mtu, mps, credits, result);
5270 
5271 	mutex_lock(&conn->chan_lock);
5272 
5273 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5274 	if (!chan) {
5275 		err = -EBADSLT;
5276 		goto unlock;
5277 	}
5278 
5279 	err = 0;
5280 
5281 	l2cap_chan_lock(chan);
5282 
5283 	switch (result) {
5284 	case L2CAP_CR_SUCCESS:
5285 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5286 			err = -EBADSLT;
5287 			break;
5288 		}
5289 
5290 		chan->ident = 0;
5291 		chan->dcid = dcid;
5292 		chan->omtu = mtu;
5293 		chan->remote_mps = mps;
5294 		chan->tx_credits = credits;
5295 		l2cap_chan_ready(chan);
5296 		break;
5297 
5298 	case L2CAP_CR_AUTHENTICATION:
5299 	case L2CAP_CR_ENCRYPTION:
5300 		/* If we already have MITM protection we can't do
5301 		 * anything.
5302 		 */
5303 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5304 			l2cap_chan_del(chan, ECONNREFUSED);
5305 			break;
5306 		}
5307 
5308 		sec_level = hcon->sec_level + 1;
5309 		if (chan->sec_level < sec_level)
5310 			chan->sec_level = sec_level;
5311 
5312 		/* We'll need to send a new Connect Request */
5313 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5314 
5315 		smp_conn_security(hcon, chan->sec_level);
5316 		break;
5317 
5318 	default:
5319 		l2cap_chan_del(chan, ECONNREFUSED);
5320 		break;
5321 	}
5322 
5323 	l2cap_chan_unlock(chan);
5324 
5325 unlock:
5326 	mutex_unlock(&conn->chan_lock);
5327 
5328 	return err;
5329 }
5330 
5331 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5332 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5333 				      u8 *data)
5334 {
5335 	int err = 0;
5336 
5337 	switch (cmd->code) {
5338 	case L2CAP_COMMAND_REJ:
5339 		l2cap_command_rej(conn, cmd, cmd_len, data);
5340 		break;
5341 
5342 	case L2CAP_CONN_REQ:
5343 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5344 		break;
5345 
5346 	case L2CAP_CONN_RSP:
5347 	case L2CAP_CREATE_CHAN_RSP:
5348 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5349 		break;
5350 
5351 	case L2CAP_CONF_REQ:
5352 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5353 		break;
5354 
5355 	case L2CAP_CONF_RSP:
5356 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5357 		break;
5358 
5359 	case L2CAP_DISCONN_REQ:
5360 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5361 		break;
5362 
5363 	case L2CAP_DISCONN_RSP:
5364 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5365 		break;
5366 
5367 	case L2CAP_ECHO_REQ:
5368 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5369 		break;
5370 
5371 	case L2CAP_ECHO_RSP:
5372 		break;
5373 
5374 	case L2CAP_INFO_REQ:
5375 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5376 		break;
5377 
5378 	case L2CAP_INFO_RSP:
5379 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5380 		break;
5381 
5382 	case L2CAP_CREATE_CHAN_REQ:
5383 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5384 		break;
5385 
5386 	case L2CAP_MOVE_CHAN_REQ:
5387 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5388 		break;
5389 
5390 	case L2CAP_MOVE_CHAN_RSP:
5391 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5392 		break;
5393 
5394 	case L2CAP_MOVE_CHAN_CFM:
5395 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5396 		break;
5397 
5398 	case L2CAP_MOVE_CHAN_CFM_RSP:
5399 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5400 		break;
5401 
5402 	default:
5403 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5404 		err = -EINVAL;
5405 		break;
5406 	}
5407 
5408 	return err;
5409 }
5410 
5411 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5412 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5413 				u8 *data)
5414 {
5415 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5416 	struct l2cap_le_conn_rsp rsp;
5417 	struct l2cap_chan *chan, *pchan;
5418 	u16 dcid, scid, credits, mtu, mps;
5419 	__le16 psm;
5420 	u8 result;
5421 
5422 	if (cmd_len != sizeof(*req))
5423 		return -EPROTO;
5424 
5425 	scid = __le16_to_cpu(req->scid);
5426 	mtu  = __le16_to_cpu(req->mtu);
5427 	mps  = __le16_to_cpu(req->mps);
5428 	psm  = req->psm;
5429 	dcid = 0;
5430 	credits = 0;
5431 
5432 	if (mtu < 23 || mps < 23)
5433 		return -EPROTO;
5434 
5435 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5436 	       scid, mtu, mps);
5437 
5438 	/* Check if we have socket listening on psm */
5439 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5440 					 &conn->hcon->dst, LE_LINK);
5441 	if (!pchan) {
5442 		result = L2CAP_CR_BAD_PSM;
5443 		chan = NULL;
5444 		goto response;
5445 	}
5446 
5447 	mutex_lock(&conn->chan_lock);
5448 	l2cap_chan_lock(pchan);
5449 
5450 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5451 				     SMP_ALLOW_STK)) {
5452 		result = L2CAP_CR_AUTHENTICATION;
5453 		chan = NULL;
5454 		goto response_unlock;
5455 	}
5456 
5457 	/* Check for valid dynamic CID range */
5458 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5459 		result = L2CAP_CR_INVALID_SCID;
5460 		chan = NULL;
5461 		goto response_unlock;
5462 	}
5463 
5464 	/* Check if we already have channel with that dcid */
5465 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5466 		result = L2CAP_CR_SCID_IN_USE;
5467 		chan = NULL;
5468 		goto response_unlock;
5469 	}
5470 
5471 	chan = pchan->ops->new_connection(pchan);
5472 	if (!chan) {
5473 		result = L2CAP_CR_NO_MEM;
5474 		goto response_unlock;
5475 	}
5476 
5477 	l2cap_le_flowctl_init(chan);
5478 
5479 	bacpy(&chan->src, &conn->hcon->src);
5480 	bacpy(&chan->dst, &conn->hcon->dst);
5481 	chan->src_type = bdaddr_src_type(conn->hcon);
5482 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5483 	chan->psm  = psm;
5484 	chan->dcid = scid;
5485 	chan->omtu = mtu;
5486 	chan->remote_mps = mps;
5487 	chan->tx_credits = __le16_to_cpu(req->credits);
5488 
5489 	__l2cap_chan_add(conn, chan);
5490 	dcid = chan->scid;
5491 	credits = chan->rx_credits;
5492 
5493 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5494 
5495 	chan->ident = cmd->ident;
5496 
5497 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5498 		l2cap_state_change(chan, BT_CONNECT2);
5499 		/* The following result value is actually not defined
5500 		 * for LE CoC but we use it to let the function know
5501 		 * that it should bail out after doing its cleanup
5502 		 * instead of sending a response.
5503 		 */
5504 		result = L2CAP_CR_PEND;
5505 		chan->ops->defer(chan);
5506 	} else {
5507 		l2cap_chan_ready(chan);
5508 		result = L2CAP_CR_SUCCESS;
5509 	}
5510 
5511 response_unlock:
5512 	l2cap_chan_unlock(pchan);
5513 	mutex_unlock(&conn->chan_lock);
5514 	l2cap_chan_put(pchan);
5515 
5516 	if (result == L2CAP_CR_PEND)
5517 		return 0;
5518 
5519 response:
5520 	if (chan) {
5521 		rsp.mtu = cpu_to_le16(chan->imtu);
5522 		rsp.mps = cpu_to_le16(chan->mps);
5523 	} else {
5524 		rsp.mtu = 0;
5525 		rsp.mps = 0;
5526 	}
5527 
5528 	rsp.dcid    = cpu_to_le16(dcid);
5529 	rsp.credits = cpu_to_le16(credits);
5530 	rsp.result  = cpu_to_le16(result);
5531 
5532 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5533 
5534 	return 0;
5535 }
5536 
5537 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5538 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5539 				   u8 *data)
5540 {
5541 	struct l2cap_le_credits *pkt;
5542 	struct l2cap_chan *chan;
5543 	u16 cid, credits, max_credits;
5544 
5545 	if (cmd_len != sizeof(*pkt))
5546 		return -EPROTO;
5547 
5548 	pkt = (struct l2cap_le_credits *) data;
5549 	cid	= __le16_to_cpu(pkt->cid);
5550 	credits	= __le16_to_cpu(pkt->credits);
5551 
5552 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5553 
5554 	chan = l2cap_get_chan_by_dcid(conn, cid);
5555 	if (!chan)
5556 		return -EBADSLT;
5557 
5558 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5559 	if (credits > max_credits) {
5560 		BT_ERR("LE credits overflow");
5561 		l2cap_send_disconn_req(chan, ECONNRESET);
5562 		l2cap_chan_unlock(chan);
5563 
5564 		/* Return 0 so that we don't trigger an unnecessary
5565 		 * command reject packet.
5566 		 */
5567 		return 0;
5568 	}
5569 
5570 	chan->tx_credits += credits;
5571 
5572 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5573 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5574 		chan->tx_credits--;
5575 	}
5576 
5577 	if (chan->tx_credits)
5578 		chan->ops->resume(chan);
5579 
5580 	l2cap_chan_unlock(chan);
5581 
5582 	return 0;
5583 }
5584 
5585 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5586 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5587 				       u8 *data)
5588 {
5589 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5590 	struct l2cap_chan *chan;
5591 
5592 	if (cmd_len < sizeof(*rej))
5593 		return -EPROTO;
5594 
5595 	mutex_lock(&conn->chan_lock);
5596 
5597 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5598 	if (!chan)
5599 		goto done;
5600 
5601 	l2cap_chan_lock(chan);
5602 	l2cap_chan_del(chan, ECONNREFUSED);
5603 	l2cap_chan_unlock(chan);
5604 
5605 done:
5606 	mutex_unlock(&conn->chan_lock);
5607 	return 0;
5608 }
5609 
5610 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5611 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5612 				   u8 *data)
5613 {
5614 	int err = 0;
5615 
5616 	switch (cmd->code) {
5617 	case L2CAP_COMMAND_REJ:
5618 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5619 		break;
5620 
5621 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5622 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5623 		break;
5624 
5625 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5626 		break;
5627 
5628 	case L2CAP_LE_CONN_RSP:
5629 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5630 		break;
5631 
5632 	case L2CAP_LE_CONN_REQ:
5633 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5634 		break;
5635 
5636 	case L2CAP_LE_CREDITS:
5637 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5638 		break;
5639 
5640 	case L2CAP_DISCONN_REQ:
5641 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5642 		break;
5643 
5644 	case L2CAP_DISCONN_RSP:
5645 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5646 		break;
5647 
5648 	default:
5649 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5650 		err = -EINVAL;
5651 		break;
5652 	}
5653 
5654 	return err;
5655 }
5656 
5657 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5658 					struct sk_buff *skb)
5659 {
5660 	struct hci_conn *hcon = conn->hcon;
5661 	struct l2cap_cmd_hdr *cmd;
5662 	u16 len;
5663 	int err;
5664 
5665 	if (hcon->type != LE_LINK)
5666 		goto drop;
5667 
5668 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5669 		goto drop;
5670 
5671 	cmd = (void *) skb->data;
5672 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5673 
5674 	len = le16_to_cpu(cmd->len);
5675 
5676 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5677 
5678 	if (len != skb->len || !cmd->ident) {
5679 		BT_DBG("corrupted command");
5680 		goto drop;
5681 	}
5682 
5683 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5684 	if (err) {
5685 		struct l2cap_cmd_rej_unk rej;
5686 
5687 		BT_ERR("Wrong link type (%d)", err);
5688 
5689 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5690 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5691 			       sizeof(rej), &rej);
5692 	}
5693 
5694 drop:
5695 	kfree_skb(skb);
5696 }
5697 
5698 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5699 				     struct sk_buff *skb)
5700 {
5701 	struct hci_conn *hcon = conn->hcon;
5702 	u8 *data = skb->data;
5703 	int len = skb->len;
5704 	struct l2cap_cmd_hdr cmd;
5705 	int err;
5706 
5707 	l2cap_raw_recv(conn, skb);
5708 
5709 	if (hcon->type != ACL_LINK)
5710 		goto drop;
5711 
5712 	while (len >= L2CAP_CMD_HDR_SIZE) {
5713 		u16 cmd_len;
5714 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5715 		data += L2CAP_CMD_HDR_SIZE;
5716 		len  -= L2CAP_CMD_HDR_SIZE;
5717 
5718 		cmd_len = le16_to_cpu(cmd.len);
5719 
5720 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5721 		       cmd.ident);
5722 
5723 		if (cmd_len > len || !cmd.ident) {
5724 			BT_DBG("corrupted command");
5725 			break;
5726 		}
5727 
5728 		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5729 		if (err) {
5730 			struct l2cap_cmd_rej_unk rej;
5731 
5732 			BT_ERR("Wrong link type (%d)", err);
5733 
5734 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5735 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5736 				       sizeof(rej), &rej);
5737 		}
5738 
5739 		data += cmd_len;
5740 		len  -= cmd_len;
5741 	}
5742 
5743 drop:
5744 	kfree_skb(skb);
5745 }
5746 
5747 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5748 {
5749 	u16 our_fcs, rcv_fcs;
5750 	int hdr_size;
5751 
5752 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5753 		hdr_size = L2CAP_EXT_HDR_SIZE;
5754 	else
5755 		hdr_size = L2CAP_ENH_HDR_SIZE;
5756 
5757 	if (chan->fcs == L2CAP_FCS_CRC16) {
5758 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5759 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5760 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5761 
5762 		if (our_fcs != rcv_fcs)
5763 			return -EBADMSG;
5764 	}
5765 	return 0;
5766 }
5767 
5768 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5769 {
5770 	struct l2cap_ctrl control;
5771 
5772 	BT_DBG("chan %p", chan);
5773 
5774 	memset(&control, 0, sizeof(control));
5775 	control.sframe = 1;
5776 	control.final = 1;
5777 	control.reqseq = chan->buffer_seq;
5778 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5779 
5780 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5781 		control.super = L2CAP_SUPER_RNR;
5782 		l2cap_send_sframe(chan, &control);
5783 	}
5784 
5785 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5786 	    chan->unacked_frames > 0)
5787 		__set_retrans_timer(chan);
5788 
5789 	/* Send pending iframes */
5790 	l2cap_ertm_send(chan);
5791 
5792 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5793 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5794 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5795 		 * send it now.
5796 		 */
5797 		control.super = L2CAP_SUPER_RR;
5798 		l2cap_send_sframe(chan, &control);
5799 	}
5800 }
5801 
5802 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5803 			    struct sk_buff **last_frag)
5804 {
5805 	/* skb->len reflects data in skb as well as all fragments
5806 	 * skb->data_len reflects only data in fragments
5807 	 */
5808 	if (!skb_has_frag_list(skb))
5809 		skb_shinfo(skb)->frag_list = new_frag;
5810 
5811 	new_frag->next = NULL;
5812 
5813 	(*last_frag)->next = new_frag;
5814 	*last_frag = new_frag;
5815 
5816 	skb->len += new_frag->len;
5817 	skb->data_len += new_frag->len;
5818 	skb->truesize += new_frag->truesize;
5819 }
5820 
5821 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5822 				struct l2cap_ctrl *control)
5823 {
5824 	int err = -EINVAL;
5825 
5826 	switch (control->sar) {
5827 	case L2CAP_SAR_UNSEGMENTED:
5828 		if (chan->sdu)
5829 			break;
5830 
5831 		err = chan->ops->recv(chan, skb);
5832 		break;
5833 
5834 	case L2CAP_SAR_START:
5835 		if (chan->sdu)
5836 			break;
5837 
5838 		chan->sdu_len = get_unaligned_le16(skb->data);
5839 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5840 
5841 		if (chan->sdu_len > chan->imtu) {
5842 			err = -EMSGSIZE;
5843 			break;
5844 		}
5845 
5846 		if (skb->len >= chan->sdu_len)
5847 			break;
5848 
5849 		chan->sdu = skb;
5850 		chan->sdu_last_frag = skb;
5851 
5852 		skb = NULL;
5853 		err = 0;
5854 		break;
5855 
5856 	case L2CAP_SAR_CONTINUE:
5857 		if (!chan->sdu)
5858 			break;
5859 
5860 		append_skb_frag(chan->sdu, skb,
5861 				&chan->sdu_last_frag);
5862 		skb = NULL;
5863 
5864 		if (chan->sdu->len >= chan->sdu_len)
5865 			break;
5866 
5867 		err = 0;
5868 		break;
5869 
5870 	case L2CAP_SAR_END:
5871 		if (!chan->sdu)
5872 			break;
5873 
5874 		append_skb_frag(chan->sdu, skb,
5875 				&chan->sdu_last_frag);
5876 		skb = NULL;
5877 
5878 		if (chan->sdu->len != chan->sdu_len)
5879 			break;
5880 
5881 		err = chan->ops->recv(chan, chan->sdu);
5882 
5883 		if (!err) {
5884 			/* Reassembly complete */
5885 			chan->sdu = NULL;
5886 			chan->sdu_last_frag = NULL;
5887 			chan->sdu_len = 0;
5888 		}
5889 		break;
5890 	}
5891 
5892 	if (err) {
5893 		kfree_skb(skb);
5894 		kfree_skb(chan->sdu);
5895 		chan->sdu = NULL;
5896 		chan->sdu_last_frag = NULL;
5897 		chan->sdu_len = 0;
5898 	}
5899 
5900 	return err;
5901 }
5902 
5903 static int l2cap_resegment(struct l2cap_chan *chan)
5904 {
5905 	/* Placeholder */
5906 	return 0;
5907 }
5908 
5909 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5910 {
5911 	u8 event;
5912 
5913 	if (chan->mode != L2CAP_MODE_ERTM)
5914 		return;
5915 
5916 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5917 	l2cap_tx(chan, NULL, NULL, event);
5918 }
5919 
5920 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5921 {
5922 	int err = 0;
5923 	/* Pass sequential frames to l2cap_reassemble_sdu()
5924 	 * until a gap is encountered.
5925 	 */
5926 
5927 	BT_DBG("chan %p", chan);
5928 
5929 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5930 		struct sk_buff *skb;
5931 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5932 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5933 
5934 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5935 
5936 		if (!skb)
5937 			break;
5938 
5939 		skb_unlink(skb, &chan->srej_q);
5940 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5941 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5942 		if (err)
5943 			break;
5944 	}
5945 
5946 	if (skb_queue_empty(&chan->srej_q)) {
5947 		chan->rx_state = L2CAP_RX_STATE_RECV;
5948 		l2cap_send_ack(chan);
5949 	}
5950 
5951 	return err;
5952 }
5953 
5954 static void l2cap_handle_srej(struct l2cap_chan *chan,
5955 			      struct l2cap_ctrl *control)
5956 {
5957 	struct sk_buff *skb;
5958 
5959 	BT_DBG("chan %p, control %p", chan, control);
5960 
5961 	if (control->reqseq == chan->next_tx_seq) {
5962 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5963 		l2cap_send_disconn_req(chan, ECONNRESET);
5964 		return;
5965 	}
5966 
5967 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5968 
5969 	if (skb == NULL) {
5970 		BT_DBG("Seq %d not available for retransmission",
5971 		       control->reqseq);
5972 		return;
5973 	}
5974 
5975 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5976 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5977 		l2cap_send_disconn_req(chan, ECONNRESET);
5978 		return;
5979 	}
5980 
5981 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5982 
5983 	if (control->poll) {
5984 		l2cap_pass_to_tx(chan, control);
5985 
5986 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5987 		l2cap_retransmit(chan, control);
5988 		l2cap_ertm_send(chan);
5989 
5990 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5991 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5992 			chan->srej_save_reqseq = control->reqseq;
5993 		}
5994 	} else {
5995 		l2cap_pass_to_tx_fbit(chan, control);
5996 
5997 		if (control->final) {
5998 			if (chan->srej_save_reqseq != control->reqseq ||
5999 			    !test_and_clear_bit(CONN_SREJ_ACT,
6000 						&chan->conn_state))
6001 				l2cap_retransmit(chan, control);
6002 		} else {
6003 			l2cap_retransmit(chan, control);
6004 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6005 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6006 				chan->srej_save_reqseq = control->reqseq;
6007 			}
6008 		}
6009 	}
6010 }
6011 
6012 static void l2cap_handle_rej(struct l2cap_chan *chan,
6013 			     struct l2cap_ctrl *control)
6014 {
6015 	struct sk_buff *skb;
6016 
6017 	BT_DBG("chan %p, control %p", chan, control);
6018 
6019 	if (control->reqseq == chan->next_tx_seq) {
6020 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6021 		l2cap_send_disconn_req(chan, ECONNRESET);
6022 		return;
6023 	}
6024 
6025 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6026 
6027 	if (chan->max_tx && skb &&
6028 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6029 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6030 		l2cap_send_disconn_req(chan, ECONNRESET);
6031 		return;
6032 	}
6033 
6034 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6035 
6036 	l2cap_pass_to_tx(chan, control);
6037 
6038 	if (control->final) {
6039 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6040 			l2cap_retransmit_all(chan, control);
6041 	} else {
6042 		l2cap_retransmit_all(chan, control);
6043 		l2cap_ertm_send(chan);
6044 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6045 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6046 	}
6047 }
6048 
6049 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6050 {
6051 	BT_DBG("chan %p, txseq %d", chan, txseq);
6052 
6053 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6054 	       chan->expected_tx_seq);
6055 
6056 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6057 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6058 		    chan->tx_win) {
6059 			/* See notes below regarding "double poll" and
6060 			 * invalid packets.
6061 			 */
6062 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6063 				BT_DBG("Invalid/Ignore - after SREJ");
6064 				return L2CAP_TXSEQ_INVALID_IGNORE;
6065 			} else {
6066 				BT_DBG("Invalid - in window after SREJ sent");
6067 				return L2CAP_TXSEQ_INVALID;
6068 			}
6069 		}
6070 
6071 		if (chan->srej_list.head == txseq) {
6072 			BT_DBG("Expected SREJ");
6073 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6074 		}
6075 
6076 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6077 			BT_DBG("Duplicate SREJ - txseq already stored");
6078 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6079 		}
6080 
6081 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6082 			BT_DBG("Unexpected SREJ - not requested");
6083 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6084 		}
6085 	}
6086 
6087 	if (chan->expected_tx_seq == txseq) {
6088 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6089 		    chan->tx_win) {
6090 			BT_DBG("Invalid - txseq outside tx window");
6091 			return L2CAP_TXSEQ_INVALID;
6092 		} else {
6093 			BT_DBG("Expected");
6094 			return L2CAP_TXSEQ_EXPECTED;
6095 		}
6096 	}
6097 
6098 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6099 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6100 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6101 		return L2CAP_TXSEQ_DUPLICATE;
6102 	}
6103 
6104 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6105 		/* A source of invalid packets is a "double poll" condition,
6106 		 * where delays cause us to send multiple poll packets.  If
6107 		 * the remote stack receives and processes both polls,
6108 		 * sequence numbers can wrap around in such a way that a
6109 		 * resent frame has a sequence number that looks like new data
6110 		 * with a sequence gap.  This would trigger an erroneous SREJ
6111 		 * request.
6112 		 *
6113 		 * Fortunately, this is impossible with a tx window that's
6114 		 * less than half of the maximum sequence number, which allows
6115 		 * invalid frames to be safely ignored.
6116 		 *
6117 		 * With tx window sizes greater than half of the tx window
6118 		 * maximum, the frame is invalid and cannot be ignored.  This
6119 		 * causes a disconnect.
6120 		 */
6121 
6122 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6123 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6124 			return L2CAP_TXSEQ_INVALID_IGNORE;
6125 		} else {
6126 			BT_DBG("Invalid - txseq outside tx window");
6127 			return L2CAP_TXSEQ_INVALID;
6128 		}
6129 	} else {
6130 		BT_DBG("Unexpected - txseq indicates missing frames");
6131 		return L2CAP_TXSEQ_UNEXPECTED;
6132 	}
6133 }
6134 
6135 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6136 			       struct l2cap_ctrl *control,
6137 			       struct sk_buff *skb, u8 event)
6138 {
6139 	int err = 0;
6140 	bool skb_in_use = false;
6141 
6142 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6143 	       event);
6144 
6145 	switch (event) {
6146 	case L2CAP_EV_RECV_IFRAME:
6147 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6148 		case L2CAP_TXSEQ_EXPECTED:
6149 			l2cap_pass_to_tx(chan, control);
6150 
6151 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6152 				BT_DBG("Busy, discarding expected seq %d",
6153 				       control->txseq);
6154 				break;
6155 			}
6156 
6157 			chan->expected_tx_seq = __next_seq(chan,
6158 							   control->txseq);
6159 
6160 			chan->buffer_seq = chan->expected_tx_seq;
6161 			skb_in_use = true;
6162 
6163 			err = l2cap_reassemble_sdu(chan, skb, control);
6164 			if (err)
6165 				break;
6166 
6167 			if (control->final) {
6168 				if (!test_and_clear_bit(CONN_REJ_ACT,
6169 							&chan->conn_state)) {
6170 					control->final = 0;
6171 					l2cap_retransmit_all(chan, control);
6172 					l2cap_ertm_send(chan);
6173 				}
6174 			}
6175 
6176 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6177 				l2cap_send_ack(chan);
6178 			break;
6179 		case L2CAP_TXSEQ_UNEXPECTED:
6180 			l2cap_pass_to_tx(chan, control);
6181 
6182 			/* Can't issue SREJ frames in the local busy state.
6183 			 * Drop this frame, it will be seen as missing
6184 			 * when local busy is exited.
6185 			 */
6186 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6187 				BT_DBG("Busy, discarding unexpected seq %d",
6188 				       control->txseq);
6189 				break;
6190 			}
6191 
6192 			/* There was a gap in the sequence, so an SREJ
6193 			 * must be sent for each missing frame.  The
6194 			 * current frame is stored for later use.
6195 			 */
6196 			skb_queue_tail(&chan->srej_q, skb);
6197 			skb_in_use = true;
6198 			BT_DBG("Queued %p (queue len %d)", skb,
6199 			       skb_queue_len(&chan->srej_q));
6200 
6201 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6202 			l2cap_seq_list_clear(&chan->srej_list);
6203 			l2cap_send_srej(chan, control->txseq);
6204 
6205 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6206 			break;
6207 		case L2CAP_TXSEQ_DUPLICATE:
6208 			l2cap_pass_to_tx(chan, control);
6209 			break;
6210 		case L2CAP_TXSEQ_INVALID_IGNORE:
6211 			break;
6212 		case L2CAP_TXSEQ_INVALID:
6213 		default:
6214 			l2cap_send_disconn_req(chan, ECONNRESET);
6215 			break;
6216 		}
6217 		break;
6218 	case L2CAP_EV_RECV_RR:
6219 		l2cap_pass_to_tx(chan, control);
6220 		if (control->final) {
6221 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6222 
6223 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6224 			    !__chan_is_moving(chan)) {
6225 				control->final = 0;
6226 				l2cap_retransmit_all(chan, control);
6227 			}
6228 
6229 			l2cap_ertm_send(chan);
6230 		} else if (control->poll) {
6231 			l2cap_send_i_or_rr_or_rnr(chan);
6232 		} else {
6233 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6234 					       &chan->conn_state) &&
6235 			    chan->unacked_frames)
6236 				__set_retrans_timer(chan);
6237 
6238 			l2cap_ertm_send(chan);
6239 		}
6240 		break;
6241 	case L2CAP_EV_RECV_RNR:
6242 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6243 		l2cap_pass_to_tx(chan, control);
6244 		if (control && control->poll) {
6245 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6246 			l2cap_send_rr_or_rnr(chan, 0);
6247 		}
6248 		__clear_retrans_timer(chan);
6249 		l2cap_seq_list_clear(&chan->retrans_list);
6250 		break;
6251 	case L2CAP_EV_RECV_REJ:
6252 		l2cap_handle_rej(chan, control);
6253 		break;
6254 	case L2CAP_EV_RECV_SREJ:
6255 		l2cap_handle_srej(chan, control);
6256 		break;
6257 	default:
6258 		break;
6259 	}
6260 
6261 	if (skb && !skb_in_use) {
6262 		BT_DBG("Freeing %p", skb);
6263 		kfree_skb(skb);
6264 	}
6265 
6266 	return err;
6267 }
6268 
6269 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6270 				    struct l2cap_ctrl *control,
6271 				    struct sk_buff *skb, u8 event)
6272 {
6273 	int err = 0;
6274 	u16 txseq = control->txseq;
6275 	bool skb_in_use = false;
6276 
6277 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6278 	       event);
6279 
6280 	switch (event) {
6281 	case L2CAP_EV_RECV_IFRAME:
6282 		switch (l2cap_classify_txseq(chan, txseq)) {
6283 		case L2CAP_TXSEQ_EXPECTED:
6284 			/* Keep frame for reassembly later */
6285 			l2cap_pass_to_tx(chan, control);
6286 			skb_queue_tail(&chan->srej_q, skb);
6287 			skb_in_use = true;
6288 			BT_DBG("Queued %p (queue len %d)", skb,
6289 			       skb_queue_len(&chan->srej_q));
6290 
6291 			chan->expected_tx_seq = __next_seq(chan, txseq);
6292 			break;
6293 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6294 			l2cap_seq_list_pop(&chan->srej_list);
6295 
6296 			l2cap_pass_to_tx(chan, control);
6297 			skb_queue_tail(&chan->srej_q, skb);
6298 			skb_in_use = true;
6299 			BT_DBG("Queued %p (queue len %d)", skb,
6300 			       skb_queue_len(&chan->srej_q));
6301 
6302 			err = l2cap_rx_queued_iframes(chan);
6303 			if (err)
6304 				break;
6305 
6306 			break;
6307 		case L2CAP_TXSEQ_UNEXPECTED:
6308 			/* Got a frame that can't be reassembled yet.
6309 			 * Save it for later, and send SREJs to cover
6310 			 * the missing frames.
6311 			 */
6312 			skb_queue_tail(&chan->srej_q, skb);
6313 			skb_in_use = true;
6314 			BT_DBG("Queued %p (queue len %d)", skb,
6315 			       skb_queue_len(&chan->srej_q));
6316 
6317 			l2cap_pass_to_tx(chan, control);
6318 			l2cap_send_srej(chan, control->txseq);
6319 			break;
6320 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6321 			/* This frame was requested with an SREJ, but
6322 			 * some expected retransmitted frames are
6323 			 * missing.  Request retransmission of missing
6324 			 * SREJ'd frames.
6325 			 */
6326 			skb_queue_tail(&chan->srej_q, skb);
6327 			skb_in_use = true;
6328 			BT_DBG("Queued %p (queue len %d)", skb,
6329 			       skb_queue_len(&chan->srej_q));
6330 
6331 			l2cap_pass_to_tx(chan, control);
6332 			l2cap_send_srej_list(chan, control->txseq);
6333 			break;
6334 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6335 			/* We've already queued this frame.  Drop this copy. */
6336 			l2cap_pass_to_tx(chan, control);
6337 			break;
6338 		case L2CAP_TXSEQ_DUPLICATE:
6339 			/* Expecting a later sequence number, so this frame
6340 			 * was already received.  Ignore it completely.
6341 			 */
6342 			break;
6343 		case L2CAP_TXSEQ_INVALID_IGNORE:
6344 			break;
6345 		case L2CAP_TXSEQ_INVALID:
6346 		default:
6347 			l2cap_send_disconn_req(chan, ECONNRESET);
6348 			break;
6349 		}
6350 		break;
6351 	case L2CAP_EV_RECV_RR:
6352 		l2cap_pass_to_tx(chan, control);
6353 		if (control->final) {
6354 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6355 
6356 			if (!test_and_clear_bit(CONN_REJ_ACT,
6357 						&chan->conn_state)) {
6358 				control->final = 0;
6359 				l2cap_retransmit_all(chan, control);
6360 			}
6361 
6362 			l2cap_ertm_send(chan);
6363 		} else if (control->poll) {
6364 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6365 					       &chan->conn_state) &&
6366 			    chan->unacked_frames) {
6367 				__set_retrans_timer(chan);
6368 			}
6369 
6370 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6371 			l2cap_send_srej_tail(chan);
6372 		} else {
6373 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6374 					       &chan->conn_state) &&
6375 			    chan->unacked_frames)
6376 				__set_retrans_timer(chan);
6377 
6378 			l2cap_send_ack(chan);
6379 		}
6380 		break;
6381 	case L2CAP_EV_RECV_RNR:
6382 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6383 		l2cap_pass_to_tx(chan, control);
6384 		if (control->poll) {
6385 			l2cap_send_srej_tail(chan);
6386 		} else {
6387 			struct l2cap_ctrl rr_control;
6388 			memset(&rr_control, 0, sizeof(rr_control));
6389 			rr_control.sframe = 1;
6390 			rr_control.super = L2CAP_SUPER_RR;
6391 			rr_control.reqseq = chan->buffer_seq;
6392 			l2cap_send_sframe(chan, &rr_control);
6393 		}
6394 
6395 		break;
6396 	case L2CAP_EV_RECV_REJ:
6397 		l2cap_handle_rej(chan, control);
6398 		break;
6399 	case L2CAP_EV_RECV_SREJ:
6400 		l2cap_handle_srej(chan, control);
6401 		break;
6402 	}
6403 
6404 	if (skb && !skb_in_use) {
6405 		BT_DBG("Freeing %p", skb);
6406 		kfree_skb(skb);
6407 	}
6408 
6409 	return err;
6410 }
6411 
6412 static int l2cap_finish_move(struct l2cap_chan *chan)
6413 {
6414 	BT_DBG("chan %p", chan);
6415 
6416 	chan->rx_state = L2CAP_RX_STATE_RECV;
6417 
6418 	if (chan->hs_hcon)
6419 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6420 	else
6421 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6422 
6423 	return l2cap_resegment(chan);
6424 }
6425 
6426 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6427 				 struct l2cap_ctrl *control,
6428 				 struct sk_buff *skb, u8 event)
6429 {
6430 	int err;
6431 
6432 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6433 	       event);
6434 
6435 	if (!control->poll)
6436 		return -EPROTO;
6437 
6438 	l2cap_process_reqseq(chan, control->reqseq);
6439 
6440 	if (!skb_queue_empty(&chan->tx_q))
6441 		chan->tx_send_head = skb_peek(&chan->tx_q);
6442 	else
6443 		chan->tx_send_head = NULL;
6444 
6445 	/* Rewind next_tx_seq to the point expected
6446 	 * by the receiver.
6447 	 */
6448 	chan->next_tx_seq = control->reqseq;
6449 	chan->unacked_frames = 0;
6450 
6451 	err = l2cap_finish_move(chan);
6452 	if (err)
6453 		return err;
6454 
6455 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6456 	l2cap_send_i_or_rr_or_rnr(chan);
6457 
6458 	if (event == L2CAP_EV_RECV_IFRAME)
6459 		return -EPROTO;
6460 
6461 	return l2cap_rx_state_recv(chan, control, NULL, event);
6462 }
6463 
6464 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6465 				 struct l2cap_ctrl *control,
6466 				 struct sk_buff *skb, u8 event)
6467 {
6468 	int err;
6469 
6470 	if (!control->final)
6471 		return -EPROTO;
6472 
6473 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6474 
6475 	chan->rx_state = L2CAP_RX_STATE_RECV;
6476 	l2cap_process_reqseq(chan, control->reqseq);
6477 
6478 	if (!skb_queue_empty(&chan->tx_q))
6479 		chan->tx_send_head = skb_peek(&chan->tx_q);
6480 	else
6481 		chan->tx_send_head = NULL;
6482 
6483 	/* Rewind next_tx_seq to the point expected
6484 	 * by the receiver.
6485 	 */
6486 	chan->next_tx_seq = control->reqseq;
6487 	chan->unacked_frames = 0;
6488 
6489 	if (chan->hs_hcon)
6490 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6491 	else
6492 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6493 
6494 	err = l2cap_resegment(chan);
6495 
6496 	if (!err)
6497 		err = l2cap_rx_state_recv(chan, control, skb, event);
6498 
6499 	return err;
6500 }
6501 
6502 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6503 {
6504 	/* Make sure reqseq is for a packet that has been sent but not acked */
6505 	u16 unacked;
6506 
6507 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6508 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6509 }
6510 
6511 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6512 		    struct sk_buff *skb, u8 event)
6513 {
6514 	int err = 0;
6515 
6516 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6517 	       control, skb, event, chan->rx_state);
6518 
6519 	if (__valid_reqseq(chan, control->reqseq)) {
6520 		switch (chan->rx_state) {
6521 		case L2CAP_RX_STATE_RECV:
6522 			err = l2cap_rx_state_recv(chan, control, skb, event);
6523 			break;
6524 		case L2CAP_RX_STATE_SREJ_SENT:
6525 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6526 						       event);
6527 			break;
6528 		case L2CAP_RX_STATE_WAIT_P:
6529 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6530 			break;
6531 		case L2CAP_RX_STATE_WAIT_F:
6532 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6533 			break;
6534 		default:
6535 			/* shut it down */
6536 			break;
6537 		}
6538 	} else {
6539 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6540 		       control->reqseq, chan->next_tx_seq,
6541 		       chan->expected_ack_seq);
6542 		l2cap_send_disconn_req(chan, ECONNRESET);
6543 	}
6544 
6545 	return err;
6546 }
6547 
6548 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6549 			   struct sk_buff *skb)
6550 {
6551 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6552 	       chan->rx_state);
6553 
6554 	if (l2cap_classify_txseq(chan, control->txseq) ==
6555 	    L2CAP_TXSEQ_EXPECTED) {
6556 		l2cap_pass_to_tx(chan, control);
6557 
6558 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6559 		       __next_seq(chan, chan->buffer_seq));
6560 
6561 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6562 
6563 		l2cap_reassemble_sdu(chan, skb, control);
6564 	} else {
6565 		if (chan->sdu) {
6566 			kfree_skb(chan->sdu);
6567 			chan->sdu = NULL;
6568 		}
6569 		chan->sdu_last_frag = NULL;
6570 		chan->sdu_len = 0;
6571 
6572 		if (skb) {
6573 			BT_DBG("Freeing %p", skb);
6574 			kfree_skb(skb);
6575 		}
6576 	}
6577 
6578 	chan->last_acked_seq = control->txseq;
6579 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6580 
6581 	return 0;
6582 }
6583 
6584 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6585 {
6586 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6587 	u16 len;
6588 	u8 event;
6589 
6590 	__unpack_control(chan, skb);
6591 
6592 	len = skb->len;
6593 
6594 	/*
6595 	 * We can just drop the corrupted I-frame here.
6596 	 * Receiver will miss it and start proper recovery
6597 	 * procedures and ask for retransmission.
6598 	 */
6599 	if (l2cap_check_fcs(chan, skb))
6600 		goto drop;
6601 
6602 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6603 		len -= L2CAP_SDULEN_SIZE;
6604 
6605 	if (chan->fcs == L2CAP_FCS_CRC16)
6606 		len -= L2CAP_FCS_SIZE;
6607 
6608 	if (len > chan->mps) {
6609 		l2cap_send_disconn_req(chan, ECONNRESET);
6610 		goto drop;
6611 	}
6612 
6613 	if (!control->sframe) {
6614 		int err;
6615 
6616 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6617 		       control->sar, control->reqseq, control->final,
6618 		       control->txseq);
6619 
6620 		/* Validate F-bit - F=0 always valid, F=1 only
6621 		 * valid in TX WAIT_F
6622 		 */
6623 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6624 			goto drop;
6625 
6626 		if (chan->mode != L2CAP_MODE_STREAMING) {
6627 			event = L2CAP_EV_RECV_IFRAME;
6628 			err = l2cap_rx(chan, control, skb, event);
6629 		} else {
6630 			err = l2cap_stream_rx(chan, control, skb);
6631 		}
6632 
6633 		if (err)
6634 			l2cap_send_disconn_req(chan, ECONNRESET);
6635 	} else {
6636 		const u8 rx_func_to_event[4] = {
6637 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6638 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6639 		};
6640 
6641 		/* Only I-frames are expected in streaming mode */
6642 		if (chan->mode == L2CAP_MODE_STREAMING)
6643 			goto drop;
6644 
6645 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6646 		       control->reqseq, control->final, control->poll,
6647 		       control->super);
6648 
6649 		if (len != 0) {
6650 			BT_ERR("Trailing bytes: %d in sframe", len);
6651 			l2cap_send_disconn_req(chan, ECONNRESET);
6652 			goto drop;
6653 		}
6654 
6655 		/* Validate F and P bits */
6656 		if (control->final && (control->poll ||
6657 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6658 			goto drop;
6659 
6660 		event = rx_func_to_event[control->super];
6661 		if (l2cap_rx(chan, control, skb, event))
6662 			l2cap_send_disconn_req(chan, ECONNRESET);
6663 	}
6664 
6665 	return 0;
6666 
6667 drop:
6668 	kfree_skb(skb);
6669 	return 0;
6670 }
6671 
6672 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6673 {
6674 	struct l2cap_conn *conn = chan->conn;
6675 	struct l2cap_le_credits pkt;
6676 	u16 return_credits;
6677 
6678 	/* We return more credits to the sender only after the amount of
6679 	 * credits falls below half of the initial amount.
6680 	 */
6681 	if (chan->rx_credits >= (le_max_credits + 1) / 2)
6682 		return;
6683 
6684 	return_credits = le_max_credits - chan->rx_credits;
6685 
6686 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6687 
6688 	chan->rx_credits += return_credits;
6689 
6690 	pkt.cid     = cpu_to_le16(chan->scid);
6691 	pkt.credits = cpu_to_le16(return_credits);
6692 
6693 	chan->ident = l2cap_get_ident(conn);
6694 
6695 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6696 }
6697 
6698 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6699 {
6700 	int err;
6701 
6702 	if (!chan->rx_credits) {
6703 		BT_ERR("No credits to receive LE L2CAP data");
6704 		l2cap_send_disconn_req(chan, ECONNRESET);
6705 		return -ENOBUFS;
6706 	}
6707 
6708 	if (chan->imtu < skb->len) {
6709 		BT_ERR("Too big LE L2CAP PDU");
6710 		return -ENOBUFS;
6711 	}
6712 
6713 	chan->rx_credits--;
6714 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6715 
6716 	l2cap_chan_le_send_credits(chan);
6717 
6718 	err = 0;
6719 
6720 	if (!chan->sdu) {
6721 		u16 sdu_len;
6722 
6723 		sdu_len = get_unaligned_le16(skb->data);
6724 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6725 
6726 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6727 		       sdu_len, skb->len, chan->imtu);
6728 
6729 		if (sdu_len > chan->imtu) {
6730 			BT_ERR("Too big LE L2CAP SDU length received");
6731 			err = -EMSGSIZE;
6732 			goto failed;
6733 		}
6734 
6735 		if (skb->len > sdu_len) {
6736 			BT_ERR("Too much LE L2CAP data received");
6737 			err = -EINVAL;
6738 			goto failed;
6739 		}
6740 
6741 		if (skb->len == sdu_len)
6742 			return chan->ops->recv(chan, skb);
6743 
6744 		chan->sdu = skb;
6745 		chan->sdu_len = sdu_len;
6746 		chan->sdu_last_frag = skb;
6747 
6748 		return 0;
6749 	}
6750 
6751 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6752 	       chan->sdu->len, skb->len, chan->sdu_len);
6753 
6754 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6755 		BT_ERR("Too much LE L2CAP data received");
6756 		err = -EINVAL;
6757 		goto failed;
6758 	}
6759 
6760 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6761 	skb = NULL;
6762 
6763 	if (chan->sdu->len == chan->sdu_len) {
6764 		err = chan->ops->recv(chan, chan->sdu);
6765 		if (!err) {
6766 			chan->sdu = NULL;
6767 			chan->sdu_last_frag = NULL;
6768 			chan->sdu_len = 0;
6769 		}
6770 	}
6771 
6772 failed:
6773 	if (err) {
6774 		kfree_skb(skb);
6775 		kfree_skb(chan->sdu);
6776 		chan->sdu = NULL;
6777 		chan->sdu_last_frag = NULL;
6778 		chan->sdu_len = 0;
6779 	}
6780 
6781 	/* We can't return an error here since we took care of the skb
6782 	 * freeing internally. An error return would cause the caller to
6783 	 * do a double-free of the skb.
6784 	 */
6785 	return 0;
6786 }
6787 
6788 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6789 			       struct sk_buff *skb)
6790 {
6791 	struct l2cap_chan *chan;
6792 
6793 	chan = l2cap_get_chan_by_scid(conn, cid);
6794 	if (!chan) {
6795 		if (cid == L2CAP_CID_A2MP) {
6796 			chan = a2mp_channel_create(conn, skb);
6797 			if (!chan) {
6798 				kfree_skb(skb);
6799 				return;
6800 			}
6801 
6802 			l2cap_chan_lock(chan);
6803 		} else {
6804 			BT_DBG("unknown cid 0x%4.4x", cid);
6805 			/* Drop packet and return */
6806 			kfree_skb(skb);
6807 			return;
6808 		}
6809 	}
6810 
6811 	BT_DBG("chan %p, len %d", chan, skb->len);
6812 
6813 	/* If we receive data on a fixed channel before the info req/rsp
6814 	 * procdure is done simply assume that the channel is supported
6815 	 * and mark it as ready.
6816 	 */
6817 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6818 		l2cap_chan_ready(chan);
6819 
6820 	if (chan->state != BT_CONNECTED)
6821 		goto drop;
6822 
6823 	switch (chan->mode) {
6824 	case L2CAP_MODE_LE_FLOWCTL:
6825 		if (l2cap_le_data_rcv(chan, skb) < 0)
6826 			goto drop;
6827 
6828 		goto done;
6829 
6830 	case L2CAP_MODE_BASIC:
6831 		/* If socket recv buffers overflows we drop data here
6832 		 * which is *bad* because L2CAP has to be reliable.
6833 		 * But we don't have any other choice. L2CAP doesn't
6834 		 * provide flow control mechanism. */
6835 
6836 		if (chan->imtu < skb->len) {
6837 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6838 			goto drop;
6839 		}
6840 
6841 		if (!chan->ops->recv(chan, skb))
6842 			goto done;
6843 		break;
6844 
6845 	case L2CAP_MODE_ERTM:
6846 	case L2CAP_MODE_STREAMING:
6847 		l2cap_data_rcv(chan, skb);
6848 		goto done;
6849 
6850 	default:
6851 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6852 		break;
6853 	}
6854 
6855 drop:
6856 	kfree_skb(skb);
6857 
6858 done:
6859 	l2cap_chan_unlock(chan);
6860 }
6861 
6862 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6863 				  struct sk_buff *skb)
6864 {
6865 	struct hci_conn *hcon = conn->hcon;
6866 	struct l2cap_chan *chan;
6867 
6868 	if (hcon->type != ACL_LINK)
6869 		goto free_skb;
6870 
6871 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6872 					ACL_LINK);
6873 	if (!chan)
6874 		goto free_skb;
6875 
6876 	BT_DBG("chan %p, len %d", chan, skb->len);
6877 
6878 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6879 		goto drop;
6880 
6881 	if (chan->imtu < skb->len)
6882 		goto drop;
6883 
6884 	/* Store remote BD_ADDR and PSM for msg_name */
6885 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6886 	bt_cb(skb)->l2cap.psm = psm;
6887 
6888 	if (!chan->ops->recv(chan, skb)) {
6889 		l2cap_chan_put(chan);
6890 		return;
6891 	}
6892 
6893 drop:
6894 	l2cap_chan_put(chan);
6895 free_skb:
6896 	kfree_skb(skb);
6897 }
6898 
6899 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6900 {
6901 	struct l2cap_hdr *lh = (void *) skb->data;
6902 	struct hci_conn *hcon = conn->hcon;
6903 	u16 cid, len;
6904 	__le16 psm;
6905 
6906 	if (hcon->state != BT_CONNECTED) {
6907 		BT_DBG("queueing pending rx skb");
6908 		skb_queue_tail(&conn->pending_rx, skb);
6909 		return;
6910 	}
6911 
6912 	skb_pull(skb, L2CAP_HDR_SIZE);
6913 	cid = __le16_to_cpu(lh->cid);
6914 	len = __le16_to_cpu(lh->len);
6915 
6916 	if (len != skb->len) {
6917 		kfree_skb(skb);
6918 		return;
6919 	}
6920 
6921 	/* Since we can't actively block incoming LE connections we must
6922 	 * at least ensure that we ignore incoming data from them.
6923 	 */
6924 	if (hcon->type == LE_LINK &&
6925 	    hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6926 				   bdaddr_dst_type(hcon))) {
6927 		kfree_skb(skb);
6928 		return;
6929 	}
6930 
6931 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6932 
6933 	switch (cid) {
6934 	case L2CAP_CID_SIGNALING:
6935 		l2cap_sig_channel(conn, skb);
6936 		break;
6937 
6938 	case L2CAP_CID_CONN_LESS:
6939 		psm = get_unaligned((__le16 *) skb->data);
6940 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6941 		l2cap_conless_channel(conn, psm, skb);
6942 		break;
6943 
6944 	case L2CAP_CID_LE_SIGNALING:
6945 		l2cap_le_sig_channel(conn, skb);
6946 		break;
6947 
6948 	default:
6949 		l2cap_data_channel(conn, cid, skb);
6950 		break;
6951 	}
6952 }
6953 
6954 static void process_pending_rx(struct work_struct *work)
6955 {
6956 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6957 					       pending_rx_work);
6958 	struct sk_buff *skb;
6959 
6960 	BT_DBG("");
6961 
6962 	while ((skb = skb_dequeue(&conn->pending_rx)))
6963 		l2cap_recv_frame(conn, skb);
6964 }
6965 
6966 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6967 {
6968 	struct l2cap_conn *conn = hcon->l2cap_data;
6969 	struct hci_chan *hchan;
6970 
6971 	if (conn)
6972 		return conn;
6973 
6974 	hchan = hci_chan_create(hcon);
6975 	if (!hchan)
6976 		return NULL;
6977 
6978 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6979 	if (!conn) {
6980 		hci_chan_del(hchan);
6981 		return NULL;
6982 	}
6983 
6984 	kref_init(&conn->ref);
6985 	hcon->l2cap_data = conn;
6986 	conn->hcon = hci_conn_get(hcon);
6987 	conn->hchan = hchan;
6988 
6989 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6990 
6991 	switch (hcon->type) {
6992 	case LE_LINK:
6993 		if (hcon->hdev->le_mtu) {
6994 			conn->mtu = hcon->hdev->le_mtu;
6995 			break;
6996 		}
6997 		/* fall through */
6998 	default:
6999 		conn->mtu = hcon->hdev->acl_mtu;
7000 		break;
7001 	}
7002 
7003 	conn->feat_mask = 0;
7004 
7005 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7006 
7007 	if (hcon->type == ACL_LINK &&
7008 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7009 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7010 
7011 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7012 	    (bredr_sc_enabled(hcon->hdev) ||
7013 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7014 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7015 
7016 	mutex_init(&conn->ident_lock);
7017 	mutex_init(&conn->chan_lock);
7018 
7019 	INIT_LIST_HEAD(&conn->chan_l);
7020 	INIT_LIST_HEAD(&conn->users);
7021 
7022 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7023 
7024 	skb_queue_head_init(&conn->pending_rx);
7025 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7026 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7027 
7028 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7029 
7030 	return conn;
7031 }
7032 
7033 static bool is_valid_psm(u16 psm, u8 dst_type) {
7034 	if (!psm)
7035 		return false;
7036 
7037 	if (bdaddr_type_is_le(dst_type))
7038 		return (psm <= 0x00ff);
7039 
7040 	/* PSM must be odd and lsb of upper byte must be 0 */
7041 	return ((psm & 0x0101) == 0x0001);
7042 }
7043 
7044 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7045 		       bdaddr_t *dst, u8 dst_type)
7046 {
7047 	struct l2cap_conn *conn;
7048 	struct hci_conn *hcon;
7049 	struct hci_dev *hdev;
7050 	int err;
7051 
7052 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7053 	       dst_type, __le16_to_cpu(psm));
7054 
7055 	hdev = hci_get_route(dst, &chan->src);
7056 	if (!hdev)
7057 		return -EHOSTUNREACH;
7058 
7059 	hci_dev_lock(hdev);
7060 
7061 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7062 	    chan->chan_type != L2CAP_CHAN_RAW) {
7063 		err = -EINVAL;
7064 		goto done;
7065 	}
7066 
7067 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7068 		err = -EINVAL;
7069 		goto done;
7070 	}
7071 
7072 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7073 		err = -EINVAL;
7074 		goto done;
7075 	}
7076 
7077 	switch (chan->mode) {
7078 	case L2CAP_MODE_BASIC:
7079 		break;
7080 	case L2CAP_MODE_LE_FLOWCTL:
7081 		l2cap_le_flowctl_init(chan);
7082 		break;
7083 	case L2CAP_MODE_ERTM:
7084 	case L2CAP_MODE_STREAMING:
7085 		if (!disable_ertm)
7086 			break;
7087 		/* fall through */
7088 	default:
7089 		err = -EOPNOTSUPP;
7090 		goto done;
7091 	}
7092 
7093 	switch (chan->state) {
7094 	case BT_CONNECT:
7095 	case BT_CONNECT2:
7096 	case BT_CONFIG:
7097 		/* Already connecting */
7098 		err = 0;
7099 		goto done;
7100 
7101 	case BT_CONNECTED:
7102 		/* Already connected */
7103 		err = -EISCONN;
7104 		goto done;
7105 
7106 	case BT_OPEN:
7107 	case BT_BOUND:
7108 		/* Can connect */
7109 		break;
7110 
7111 	default:
7112 		err = -EBADFD;
7113 		goto done;
7114 	}
7115 
7116 	/* Set destination address and psm */
7117 	bacpy(&chan->dst, dst);
7118 	chan->dst_type = dst_type;
7119 
7120 	chan->psm = psm;
7121 	chan->dcid = cid;
7122 
7123 	if (bdaddr_type_is_le(dst_type)) {
7124 		/* Convert from L2CAP channel address type to HCI address type
7125 		 */
7126 		if (dst_type == BDADDR_LE_PUBLIC)
7127 			dst_type = ADDR_LE_DEV_PUBLIC;
7128 		else
7129 			dst_type = ADDR_LE_DEV_RANDOM;
7130 
7131 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7132 			hcon = hci_connect_le(hdev, dst, dst_type,
7133 					      chan->sec_level,
7134 					      HCI_LE_CONN_TIMEOUT,
7135 					      HCI_ROLE_SLAVE);
7136 		else
7137 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7138 						   chan->sec_level,
7139 						   HCI_LE_CONN_TIMEOUT);
7140 
7141 	} else {
7142 		u8 auth_type = l2cap_get_auth_type(chan);
7143 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7144 	}
7145 
7146 	if (IS_ERR(hcon)) {
7147 		err = PTR_ERR(hcon);
7148 		goto done;
7149 	}
7150 
7151 	conn = l2cap_conn_add(hcon);
7152 	if (!conn) {
7153 		hci_conn_drop(hcon);
7154 		err = -ENOMEM;
7155 		goto done;
7156 	}
7157 
7158 	mutex_lock(&conn->chan_lock);
7159 	l2cap_chan_lock(chan);
7160 
7161 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7162 		hci_conn_drop(hcon);
7163 		err = -EBUSY;
7164 		goto chan_unlock;
7165 	}
7166 
7167 	/* Update source addr of the socket */
7168 	bacpy(&chan->src, &hcon->src);
7169 	chan->src_type = bdaddr_src_type(hcon);
7170 
7171 	__l2cap_chan_add(conn, chan);
7172 
7173 	/* l2cap_chan_add takes its own ref so we can drop this one */
7174 	hci_conn_drop(hcon);
7175 
7176 	l2cap_state_change(chan, BT_CONNECT);
7177 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7178 
7179 	/* Release chan->sport so that it can be reused by other
7180 	 * sockets (as it's only used for listening sockets).
7181 	 */
7182 	write_lock(&chan_list_lock);
7183 	chan->sport = 0;
7184 	write_unlock(&chan_list_lock);
7185 
7186 	if (hcon->state == BT_CONNECTED) {
7187 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7188 			__clear_chan_timer(chan);
7189 			if (l2cap_chan_check_security(chan, true))
7190 				l2cap_state_change(chan, BT_CONNECTED);
7191 		} else
7192 			l2cap_do_start(chan);
7193 	}
7194 
7195 	err = 0;
7196 
7197 chan_unlock:
7198 	l2cap_chan_unlock(chan);
7199 	mutex_unlock(&conn->chan_lock);
7200 done:
7201 	hci_dev_unlock(hdev);
7202 	hci_dev_put(hdev);
7203 	return err;
7204 }
7205 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7206 
7207 /* ---- L2CAP interface with lower layer (HCI) ---- */
7208 
7209 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7210 {
7211 	int exact = 0, lm1 = 0, lm2 = 0;
7212 	struct l2cap_chan *c;
7213 
7214 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7215 
7216 	/* Find listening sockets and check their link_mode */
7217 	read_lock(&chan_list_lock);
7218 	list_for_each_entry(c, &chan_list, global_l) {
7219 		if (c->state != BT_LISTEN)
7220 			continue;
7221 
7222 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7223 			lm1 |= HCI_LM_ACCEPT;
7224 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7225 				lm1 |= HCI_LM_MASTER;
7226 			exact++;
7227 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7228 			lm2 |= HCI_LM_ACCEPT;
7229 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7230 				lm2 |= HCI_LM_MASTER;
7231 		}
7232 	}
7233 	read_unlock(&chan_list_lock);
7234 
7235 	return exact ? lm1 : lm2;
7236 }
7237 
7238 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7239  * from an existing channel in the list or from the beginning of the
7240  * global list (by passing NULL as first parameter).
7241  */
7242 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7243 						  struct hci_conn *hcon)
7244 {
7245 	u8 src_type = bdaddr_src_type(hcon);
7246 
7247 	read_lock(&chan_list_lock);
7248 
7249 	if (c)
7250 		c = list_next_entry(c, global_l);
7251 	else
7252 		c = list_entry(chan_list.next, typeof(*c), global_l);
7253 
7254 	list_for_each_entry_from(c, &chan_list, global_l) {
7255 		if (c->chan_type != L2CAP_CHAN_FIXED)
7256 			continue;
7257 		if (c->state != BT_LISTEN)
7258 			continue;
7259 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7260 			continue;
7261 		if (src_type != c->src_type)
7262 			continue;
7263 
7264 		l2cap_chan_hold(c);
7265 		read_unlock(&chan_list_lock);
7266 		return c;
7267 	}
7268 
7269 	read_unlock(&chan_list_lock);
7270 
7271 	return NULL;
7272 }
7273 
7274 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7275 {
7276 	struct hci_dev *hdev = hcon->hdev;
7277 	struct l2cap_conn *conn;
7278 	struct l2cap_chan *pchan;
7279 	u8 dst_type;
7280 
7281 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7282 		return;
7283 
7284 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7285 
7286 	if (status) {
7287 		l2cap_conn_del(hcon, bt_to_errno(status));
7288 		return;
7289 	}
7290 
7291 	conn = l2cap_conn_add(hcon);
7292 	if (!conn)
7293 		return;
7294 
7295 	dst_type = bdaddr_dst_type(hcon);
7296 
7297 	/* If device is blocked, do not create channels for it */
7298 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7299 		return;
7300 
7301 	/* Find fixed channels and notify them of the new connection. We
7302 	 * use multiple individual lookups, continuing each time where
7303 	 * we left off, because the list lock would prevent calling the
7304 	 * potentially sleeping l2cap_chan_lock() function.
7305 	 */
7306 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7307 	while (pchan) {
7308 		struct l2cap_chan *chan, *next;
7309 
7310 		/* Client fixed channels should override server ones */
7311 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7312 			goto next;
7313 
7314 		l2cap_chan_lock(pchan);
7315 		chan = pchan->ops->new_connection(pchan);
7316 		if (chan) {
7317 			bacpy(&chan->src, &hcon->src);
7318 			bacpy(&chan->dst, &hcon->dst);
7319 			chan->src_type = bdaddr_src_type(hcon);
7320 			chan->dst_type = dst_type;
7321 
7322 			__l2cap_chan_add(conn, chan);
7323 		}
7324 
7325 		l2cap_chan_unlock(pchan);
7326 next:
7327 		next = l2cap_global_fixed_chan(pchan, hcon);
7328 		l2cap_chan_put(pchan);
7329 		pchan = next;
7330 	}
7331 
7332 	l2cap_conn_ready(conn);
7333 }
7334 
7335 int l2cap_disconn_ind(struct hci_conn *hcon)
7336 {
7337 	struct l2cap_conn *conn = hcon->l2cap_data;
7338 
7339 	BT_DBG("hcon %p", hcon);
7340 
7341 	if (!conn)
7342 		return HCI_ERROR_REMOTE_USER_TERM;
7343 	return conn->disc_reason;
7344 }
7345 
7346 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7347 {
7348 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7349 		return;
7350 
7351 	BT_DBG("hcon %p reason %d", hcon, reason);
7352 
7353 	l2cap_conn_del(hcon, bt_to_errno(reason));
7354 }
7355 
7356 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7357 {
7358 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7359 		return;
7360 
7361 	if (encrypt == 0x00) {
7362 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7363 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7364 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7365 			   chan->sec_level == BT_SECURITY_FIPS)
7366 			l2cap_chan_close(chan, ECONNREFUSED);
7367 	} else {
7368 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7369 			__clear_chan_timer(chan);
7370 	}
7371 }
7372 
7373 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7374 {
7375 	struct l2cap_conn *conn = hcon->l2cap_data;
7376 	struct l2cap_chan *chan;
7377 
7378 	if (!conn)
7379 		return;
7380 
7381 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7382 
7383 	mutex_lock(&conn->chan_lock);
7384 
7385 	list_for_each_entry(chan, &conn->chan_l, list) {
7386 		l2cap_chan_lock(chan);
7387 
7388 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7389 		       state_to_string(chan->state));
7390 
7391 		if (chan->scid == L2CAP_CID_A2MP) {
7392 			l2cap_chan_unlock(chan);
7393 			continue;
7394 		}
7395 
7396 		if (!status && encrypt)
7397 			chan->sec_level = hcon->sec_level;
7398 
7399 		if (!__l2cap_no_conn_pending(chan)) {
7400 			l2cap_chan_unlock(chan);
7401 			continue;
7402 		}
7403 
7404 		if (!status && (chan->state == BT_CONNECTED ||
7405 				chan->state == BT_CONFIG)) {
7406 			chan->ops->resume(chan);
7407 			l2cap_check_encryption(chan, encrypt);
7408 			l2cap_chan_unlock(chan);
7409 			continue;
7410 		}
7411 
7412 		if (chan->state == BT_CONNECT) {
7413 			if (!status)
7414 				l2cap_start_connection(chan);
7415 			else
7416 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7417 		} else if (chan->state == BT_CONNECT2 &&
7418 			   chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7419 			struct l2cap_conn_rsp rsp;
7420 			__u16 res, stat;
7421 
7422 			if (!status) {
7423 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7424 					res = L2CAP_CR_PEND;
7425 					stat = L2CAP_CS_AUTHOR_PEND;
7426 					chan->ops->defer(chan);
7427 				} else {
7428 					l2cap_state_change(chan, BT_CONFIG);
7429 					res = L2CAP_CR_SUCCESS;
7430 					stat = L2CAP_CS_NO_INFO;
7431 				}
7432 			} else {
7433 				l2cap_state_change(chan, BT_DISCONN);
7434 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7435 				res = L2CAP_CR_SEC_BLOCK;
7436 				stat = L2CAP_CS_NO_INFO;
7437 			}
7438 
7439 			rsp.scid   = cpu_to_le16(chan->dcid);
7440 			rsp.dcid   = cpu_to_le16(chan->scid);
7441 			rsp.result = cpu_to_le16(res);
7442 			rsp.status = cpu_to_le16(stat);
7443 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7444 				       sizeof(rsp), &rsp);
7445 
7446 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7447 			    res == L2CAP_CR_SUCCESS) {
7448 				char buf[128];
7449 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7450 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7451 					       L2CAP_CONF_REQ,
7452 					       l2cap_build_conf_req(chan, buf),
7453 					       buf);
7454 				chan->num_conf_req++;
7455 			}
7456 		}
7457 
7458 		l2cap_chan_unlock(chan);
7459 	}
7460 
7461 	mutex_unlock(&conn->chan_lock);
7462 }
7463 
7464 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7465 {
7466 	struct l2cap_conn *conn = hcon->l2cap_data;
7467 	struct l2cap_hdr *hdr;
7468 	int len;
7469 
7470 	/* For AMP controller do not create l2cap conn */
7471 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7472 		goto drop;
7473 
7474 	if (!conn)
7475 		conn = l2cap_conn_add(hcon);
7476 
7477 	if (!conn)
7478 		goto drop;
7479 
7480 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7481 
7482 	switch (flags) {
7483 	case ACL_START:
7484 	case ACL_START_NO_FLUSH:
7485 	case ACL_COMPLETE:
7486 		if (conn->rx_len) {
7487 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7488 			kfree_skb(conn->rx_skb);
7489 			conn->rx_skb = NULL;
7490 			conn->rx_len = 0;
7491 			l2cap_conn_unreliable(conn, ECOMM);
7492 		}
7493 
7494 		/* Start fragment always begin with Basic L2CAP header */
7495 		if (skb->len < L2CAP_HDR_SIZE) {
7496 			BT_ERR("Frame is too short (len %d)", skb->len);
7497 			l2cap_conn_unreliable(conn, ECOMM);
7498 			goto drop;
7499 		}
7500 
7501 		hdr = (struct l2cap_hdr *) skb->data;
7502 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7503 
7504 		if (len == skb->len) {
7505 			/* Complete frame received */
7506 			l2cap_recv_frame(conn, skb);
7507 			return;
7508 		}
7509 
7510 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7511 
7512 		if (skb->len > len) {
7513 			BT_ERR("Frame is too long (len %d, expected len %d)",
7514 			       skb->len, len);
7515 			l2cap_conn_unreliable(conn, ECOMM);
7516 			goto drop;
7517 		}
7518 
7519 		/* Allocate skb for the complete frame (with header) */
7520 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7521 		if (!conn->rx_skb)
7522 			goto drop;
7523 
7524 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7525 					  skb->len);
7526 		conn->rx_len = len - skb->len;
7527 		break;
7528 
7529 	case ACL_CONT:
7530 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7531 
7532 		if (!conn->rx_len) {
7533 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7534 			l2cap_conn_unreliable(conn, ECOMM);
7535 			goto drop;
7536 		}
7537 
7538 		if (skb->len > conn->rx_len) {
7539 			BT_ERR("Fragment is too long (len %d, expected %d)",
7540 			       skb->len, conn->rx_len);
7541 			kfree_skb(conn->rx_skb);
7542 			conn->rx_skb = NULL;
7543 			conn->rx_len = 0;
7544 			l2cap_conn_unreliable(conn, ECOMM);
7545 			goto drop;
7546 		}
7547 
7548 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7549 					  skb->len);
7550 		conn->rx_len -= skb->len;
7551 
7552 		if (!conn->rx_len) {
7553 			/* Complete frame received. l2cap_recv_frame
7554 			 * takes ownership of the skb so set the global
7555 			 * rx_skb pointer to NULL first.
7556 			 */
7557 			struct sk_buff *rx_skb = conn->rx_skb;
7558 			conn->rx_skb = NULL;
7559 			l2cap_recv_frame(conn, rx_skb);
7560 		}
7561 		break;
7562 	}
7563 
7564 drop:
7565 	kfree_skb(skb);
7566 }
7567 
7568 static struct hci_cb l2cap_cb = {
7569 	.name		= "L2CAP",
7570 	.connect_cfm	= l2cap_connect_cfm,
7571 	.disconn_cfm	= l2cap_disconn_cfm,
7572 	.security_cfm	= l2cap_security_cfm,
7573 };
7574 
7575 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7576 {
7577 	struct l2cap_chan *c;
7578 
7579 	read_lock(&chan_list_lock);
7580 
7581 	list_for_each_entry(c, &chan_list, global_l) {
7582 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7583 			   &c->src, c->src_type, &c->dst, c->dst_type,
7584 			   c->state, __le16_to_cpu(c->psm),
7585 			   c->scid, c->dcid, c->imtu, c->omtu,
7586 			   c->sec_level, c->mode);
7587 	}
7588 
7589 	read_unlock(&chan_list_lock);
7590 
7591 	return 0;
7592 }
7593 
7594 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7595 {
7596 	return single_open(file, l2cap_debugfs_show, inode->i_private);
7597 }
7598 
7599 static const struct file_operations l2cap_debugfs_fops = {
7600 	.open		= l2cap_debugfs_open,
7601 	.read		= seq_read,
7602 	.llseek		= seq_lseek,
7603 	.release	= single_release,
7604 };
7605 
7606 static struct dentry *l2cap_debugfs;
7607 
7608 int __init l2cap_init(void)
7609 {
7610 	int err;
7611 
7612 	err = l2cap_init_sockets();
7613 	if (err < 0)
7614 		return err;
7615 
7616 	hci_register_cb(&l2cap_cb);
7617 
7618 	if (IS_ERR_OR_NULL(bt_debugfs))
7619 		return 0;
7620 
7621 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7622 					    NULL, &l2cap_debugfs_fops);
7623 
7624 	debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7625 			   &le_max_credits);
7626 	debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7627 			   &le_default_mps);
7628 
7629 	return 0;
7630 }
7631 
7632 void l2cap_exit(void)
7633 {
7634 	debugfs_remove(l2cap_debugfs);
7635 	hci_unregister_cb(&l2cap_cb);
7636 	l2cap_cleanup_sockets();
7637 }
7638 
7639 module_param(disable_ertm, bool, 0644);
7640 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7641