xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision c819e2cf)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43 
44 #define LE_FLOWCTL_MAX_CREDITS 65535
45 
46 bool disable_ertm;
47 
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52 
53 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
54 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
55 
56 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
57 				       u8 code, u8 ident, u16 dlen, void *data);
58 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
59 			   void *data);
60 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
61 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
62 
63 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
64 		     struct sk_buff_head *skbs, u8 event);
65 
66 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
67 {
68 	if (hcon->type == LE_LINK) {
69 		if (type == ADDR_LE_DEV_PUBLIC)
70 			return BDADDR_LE_PUBLIC;
71 		else
72 			return BDADDR_LE_RANDOM;
73 	}
74 
75 	return BDADDR_BREDR;
76 }
77 
78 /* ---- L2CAP channels ---- */
79 
80 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
81 						   u16 cid)
82 {
83 	struct l2cap_chan *c;
84 
85 	list_for_each_entry(c, &conn->chan_l, list) {
86 		if (c->dcid == cid)
87 			return c;
88 	}
89 	return NULL;
90 }
91 
92 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
93 						   u16 cid)
94 {
95 	struct l2cap_chan *c;
96 
97 	list_for_each_entry(c, &conn->chan_l, list) {
98 		if (c->scid == cid)
99 			return c;
100 	}
101 	return NULL;
102 }
103 
104 /* Find channel with given SCID.
105  * Returns locked channel. */
106 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
107 						 u16 cid)
108 {
109 	struct l2cap_chan *c;
110 
111 	mutex_lock(&conn->chan_lock);
112 	c = __l2cap_get_chan_by_scid(conn, cid);
113 	if (c)
114 		l2cap_chan_lock(c);
115 	mutex_unlock(&conn->chan_lock);
116 
117 	return c;
118 }
119 
120 /* Find channel with given DCID.
121  * Returns locked channel.
122  */
123 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
124 						 u16 cid)
125 {
126 	struct l2cap_chan *c;
127 
128 	mutex_lock(&conn->chan_lock);
129 	c = __l2cap_get_chan_by_dcid(conn, cid);
130 	if (c)
131 		l2cap_chan_lock(c);
132 	mutex_unlock(&conn->chan_lock);
133 
134 	return c;
135 }
136 
137 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
138 						    u8 ident)
139 {
140 	struct l2cap_chan *c;
141 
142 	list_for_each_entry(c, &conn->chan_l, list) {
143 		if (c->ident == ident)
144 			return c;
145 	}
146 	return NULL;
147 }
148 
149 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
150 						  u8 ident)
151 {
152 	struct l2cap_chan *c;
153 
154 	mutex_lock(&conn->chan_lock);
155 	c = __l2cap_get_chan_by_ident(conn, ident);
156 	if (c)
157 		l2cap_chan_lock(c);
158 	mutex_unlock(&conn->chan_lock);
159 
160 	return c;
161 }
162 
163 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
164 {
165 	struct l2cap_chan *c;
166 
167 	list_for_each_entry(c, &chan_list, global_l) {
168 		if (c->sport == psm && !bacmp(&c->src, src))
169 			return c;
170 	}
171 	return NULL;
172 }
173 
174 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
175 {
176 	int err;
177 
178 	write_lock(&chan_list_lock);
179 
180 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
181 		err = -EADDRINUSE;
182 		goto done;
183 	}
184 
185 	if (psm) {
186 		chan->psm = psm;
187 		chan->sport = psm;
188 		err = 0;
189 	} else {
190 		u16 p;
191 
192 		err = -EINVAL;
193 		for (p = 0x1001; p < 0x1100; p += 2)
194 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
195 				chan->psm   = cpu_to_le16(p);
196 				chan->sport = cpu_to_le16(p);
197 				err = 0;
198 				break;
199 			}
200 	}
201 
202 done:
203 	write_unlock(&chan_list_lock);
204 	return err;
205 }
206 EXPORT_SYMBOL_GPL(l2cap_add_psm);
207 
208 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
209 {
210 	write_lock(&chan_list_lock);
211 
212 	/* Override the defaults (which are for conn-oriented) */
213 	chan->omtu = L2CAP_DEFAULT_MTU;
214 	chan->chan_type = L2CAP_CHAN_FIXED;
215 
216 	chan->scid = scid;
217 
218 	write_unlock(&chan_list_lock);
219 
220 	return 0;
221 }
222 
223 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
224 {
225 	u16 cid, dyn_end;
226 
227 	if (conn->hcon->type == LE_LINK)
228 		dyn_end = L2CAP_CID_LE_DYN_END;
229 	else
230 		dyn_end = L2CAP_CID_DYN_END;
231 
232 	for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
233 		if (!__l2cap_get_chan_by_scid(conn, cid))
234 			return cid;
235 	}
236 
237 	return 0;
238 }
239 
240 static void l2cap_state_change(struct l2cap_chan *chan, int state)
241 {
242 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
243 	       state_to_string(state));
244 
245 	chan->state = state;
246 	chan->ops->state_change(chan, state, 0);
247 }
248 
249 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
250 						int state, int err)
251 {
252 	chan->state = state;
253 	chan->ops->state_change(chan, chan->state, err);
254 }
255 
256 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
257 {
258 	chan->ops->state_change(chan, chan->state, err);
259 }
260 
261 static void __set_retrans_timer(struct l2cap_chan *chan)
262 {
263 	if (!delayed_work_pending(&chan->monitor_timer) &&
264 	    chan->retrans_timeout) {
265 		l2cap_set_timer(chan, &chan->retrans_timer,
266 				msecs_to_jiffies(chan->retrans_timeout));
267 	}
268 }
269 
270 static void __set_monitor_timer(struct l2cap_chan *chan)
271 {
272 	__clear_retrans_timer(chan);
273 	if (chan->monitor_timeout) {
274 		l2cap_set_timer(chan, &chan->monitor_timer,
275 				msecs_to_jiffies(chan->monitor_timeout));
276 	}
277 }
278 
279 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
280 					       u16 seq)
281 {
282 	struct sk_buff *skb;
283 
284 	skb_queue_walk(head, skb) {
285 		if (bt_cb(skb)->control.txseq == seq)
286 			return skb;
287 	}
288 
289 	return NULL;
290 }
291 
292 /* ---- L2CAP sequence number lists ---- */
293 
294 /* For ERTM, ordered lists of sequence numbers must be tracked for
295  * SREJ requests that are received and for frames that are to be
296  * retransmitted. These seq_list functions implement a singly-linked
297  * list in an array, where membership in the list can also be checked
298  * in constant time. Items can also be added to the tail of the list
299  * and removed from the head in constant time, without further memory
300  * allocs or frees.
301  */
302 
303 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
304 {
305 	size_t alloc_size, i;
306 
307 	/* Allocated size is a power of 2 to map sequence numbers
308 	 * (which may be up to 14 bits) in to a smaller array that is
309 	 * sized for the negotiated ERTM transmit windows.
310 	 */
311 	alloc_size = roundup_pow_of_two(size);
312 
313 	seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
314 	if (!seq_list->list)
315 		return -ENOMEM;
316 
317 	seq_list->mask = alloc_size - 1;
318 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
319 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
320 	for (i = 0; i < alloc_size; i++)
321 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
322 
323 	return 0;
324 }
325 
326 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
327 {
328 	kfree(seq_list->list);
329 }
330 
331 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
332 					   u16 seq)
333 {
334 	/* Constant-time check for list membership */
335 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
336 }
337 
338 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
339 {
340 	u16 seq = seq_list->head;
341 	u16 mask = seq_list->mask;
342 
343 	seq_list->head = seq_list->list[seq & mask];
344 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
345 
346 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
347 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
348 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
349 	}
350 
351 	return seq;
352 }
353 
354 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
355 {
356 	u16 i;
357 
358 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
359 		return;
360 
361 	for (i = 0; i <= seq_list->mask; i++)
362 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
363 
364 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
365 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
366 }
367 
368 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
369 {
370 	u16 mask = seq_list->mask;
371 
372 	/* All appends happen in constant time */
373 
374 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
375 		return;
376 
377 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
378 		seq_list->head = seq;
379 	else
380 		seq_list->list[seq_list->tail & mask] = seq;
381 
382 	seq_list->tail = seq;
383 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
384 }
385 
386 static void l2cap_chan_timeout(struct work_struct *work)
387 {
388 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
389 					       chan_timer.work);
390 	struct l2cap_conn *conn = chan->conn;
391 	int reason;
392 
393 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
394 
395 	mutex_lock(&conn->chan_lock);
396 	l2cap_chan_lock(chan);
397 
398 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
399 		reason = ECONNREFUSED;
400 	else if (chan->state == BT_CONNECT &&
401 		 chan->sec_level != BT_SECURITY_SDP)
402 		reason = ECONNREFUSED;
403 	else
404 		reason = ETIMEDOUT;
405 
406 	l2cap_chan_close(chan, reason);
407 
408 	l2cap_chan_unlock(chan);
409 
410 	chan->ops->close(chan);
411 	mutex_unlock(&conn->chan_lock);
412 
413 	l2cap_chan_put(chan);
414 }
415 
416 struct l2cap_chan *l2cap_chan_create(void)
417 {
418 	struct l2cap_chan *chan;
419 
420 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
421 	if (!chan)
422 		return NULL;
423 
424 	mutex_init(&chan->lock);
425 
426 	/* Set default lock nesting level */
427 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
428 
429 	write_lock(&chan_list_lock);
430 	list_add(&chan->global_l, &chan_list);
431 	write_unlock(&chan_list_lock);
432 
433 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
434 
435 	chan->state = BT_OPEN;
436 
437 	kref_init(&chan->kref);
438 
439 	/* This flag is cleared in l2cap_chan_ready() */
440 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
441 
442 	BT_DBG("chan %p", chan);
443 
444 	return chan;
445 }
446 EXPORT_SYMBOL_GPL(l2cap_chan_create);
447 
448 static void l2cap_chan_destroy(struct kref *kref)
449 {
450 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
451 
452 	BT_DBG("chan %p", chan);
453 
454 	write_lock(&chan_list_lock);
455 	list_del(&chan->global_l);
456 	write_unlock(&chan_list_lock);
457 
458 	kfree(chan);
459 }
460 
461 void l2cap_chan_hold(struct l2cap_chan *c)
462 {
463 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
464 
465 	kref_get(&c->kref);
466 }
467 
468 void l2cap_chan_put(struct l2cap_chan *c)
469 {
470 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
471 
472 	kref_put(&c->kref, l2cap_chan_destroy);
473 }
474 EXPORT_SYMBOL_GPL(l2cap_chan_put);
475 
476 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
477 {
478 	chan->fcs  = L2CAP_FCS_CRC16;
479 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
480 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
481 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
482 	chan->remote_max_tx = chan->max_tx;
483 	chan->remote_tx_win = chan->tx_win;
484 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
485 	chan->sec_level = BT_SECURITY_LOW;
486 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
487 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
488 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
489 	chan->conf_state = 0;
490 
491 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
492 }
493 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
494 
495 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
496 {
497 	chan->sdu = NULL;
498 	chan->sdu_last_frag = NULL;
499 	chan->sdu_len = 0;
500 	chan->tx_credits = 0;
501 	chan->rx_credits = le_max_credits;
502 	chan->mps = min_t(u16, chan->imtu, le_default_mps);
503 
504 	skb_queue_head_init(&chan->tx_q);
505 }
506 
507 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
508 {
509 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
510 	       __le16_to_cpu(chan->psm), chan->dcid);
511 
512 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
513 
514 	chan->conn = conn;
515 
516 	switch (chan->chan_type) {
517 	case L2CAP_CHAN_CONN_ORIENTED:
518 		/* Alloc CID for connection-oriented socket */
519 		chan->scid = l2cap_alloc_cid(conn);
520 		if (conn->hcon->type == ACL_LINK)
521 			chan->omtu = L2CAP_DEFAULT_MTU;
522 		break;
523 
524 	case L2CAP_CHAN_CONN_LESS:
525 		/* Connectionless socket */
526 		chan->scid = L2CAP_CID_CONN_LESS;
527 		chan->dcid = L2CAP_CID_CONN_LESS;
528 		chan->omtu = L2CAP_DEFAULT_MTU;
529 		break;
530 
531 	case L2CAP_CHAN_FIXED:
532 		/* Caller will set CID and CID specific MTU values */
533 		break;
534 
535 	default:
536 		/* Raw socket can send/recv signalling messages only */
537 		chan->scid = L2CAP_CID_SIGNALING;
538 		chan->dcid = L2CAP_CID_SIGNALING;
539 		chan->omtu = L2CAP_DEFAULT_MTU;
540 	}
541 
542 	chan->local_id		= L2CAP_BESTEFFORT_ID;
543 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
544 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
545 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
546 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
547 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
548 
549 	l2cap_chan_hold(chan);
550 
551 	/* Only keep a reference for fixed channels if they requested it */
552 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
553 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
554 		hci_conn_hold(conn->hcon);
555 
556 	list_add(&chan->list, &conn->chan_l);
557 }
558 
559 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
560 {
561 	mutex_lock(&conn->chan_lock);
562 	__l2cap_chan_add(conn, chan);
563 	mutex_unlock(&conn->chan_lock);
564 }
565 
566 void l2cap_chan_del(struct l2cap_chan *chan, int err)
567 {
568 	struct l2cap_conn *conn = chan->conn;
569 
570 	__clear_chan_timer(chan);
571 
572 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
573 	       state_to_string(chan->state));
574 
575 	chan->ops->teardown(chan, err);
576 
577 	if (conn) {
578 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
579 		/* Delete from channel list */
580 		list_del(&chan->list);
581 
582 		l2cap_chan_put(chan);
583 
584 		chan->conn = NULL;
585 
586 		/* Reference was only held for non-fixed channels or
587 		 * fixed channels that explicitly requested it using the
588 		 * FLAG_HOLD_HCI_CONN flag.
589 		 */
590 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
591 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
592 			hci_conn_drop(conn->hcon);
593 
594 		if (mgr && mgr->bredr_chan == chan)
595 			mgr->bredr_chan = NULL;
596 	}
597 
598 	if (chan->hs_hchan) {
599 		struct hci_chan *hs_hchan = chan->hs_hchan;
600 
601 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
602 		amp_disconnect_logical_link(hs_hchan);
603 	}
604 
605 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
606 		return;
607 
608 	switch(chan->mode) {
609 	case L2CAP_MODE_BASIC:
610 		break;
611 
612 	case L2CAP_MODE_LE_FLOWCTL:
613 		skb_queue_purge(&chan->tx_q);
614 		break;
615 
616 	case L2CAP_MODE_ERTM:
617 		__clear_retrans_timer(chan);
618 		__clear_monitor_timer(chan);
619 		__clear_ack_timer(chan);
620 
621 		skb_queue_purge(&chan->srej_q);
622 
623 		l2cap_seq_list_free(&chan->srej_list);
624 		l2cap_seq_list_free(&chan->retrans_list);
625 
626 		/* fall through */
627 
628 	case L2CAP_MODE_STREAMING:
629 		skb_queue_purge(&chan->tx_q);
630 		break;
631 	}
632 
633 	return;
634 }
635 EXPORT_SYMBOL_GPL(l2cap_chan_del);
636 
637 static void l2cap_conn_update_id_addr(struct work_struct *work)
638 {
639 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
640 					       id_addr_update_work);
641 	struct hci_conn *hcon = conn->hcon;
642 	struct l2cap_chan *chan;
643 
644 	mutex_lock(&conn->chan_lock);
645 
646 	list_for_each_entry(chan, &conn->chan_l, list) {
647 		l2cap_chan_lock(chan);
648 		bacpy(&chan->dst, &hcon->dst);
649 		chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
650 		l2cap_chan_unlock(chan);
651 	}
652 
653 	mutex_unlock(&conn->chan_lock);
654 }
655 
656 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
657 {
658 	struct l2cap_conn *conn = chan->conn;
659 	struct l2cap_le_conn_rsp rsp;
660 	u16 result;
661 
662 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
663 		result = L2CAP_CR_AUTHORIZATION;
664 	else
665 		result = L2CAP_CR_BAD_PSM;
666 
667 	l2cap_state_change(chan, BT_DISCONN);
668 
669 	rsp.dcid    = cpu_to_le16(chan->scid);
670 	rsp.mtu     = cpu_to_le16(chan->imtu);
671 	rsp.mps     = cpu_to_le16(chan->mps);
672 	rsp.credits = cpu_to_le16(chan->rx_credits);
673 	rsp.result  = cpu_to_le16(result);
674 
675 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
676 		       &rsp);
677 }
678 
679 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
680 {
681 	struct l2cap_conn *conn = chan->conn;
682 	struct l2cap_conn_rsp rsp;
683 	u16 result;
684 
685 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
686 		result = L2CAP_CR_SEC_BLOCK;
687 	else
688 		result = L2CAP_CR_BAD_PSM;
689 
690 	l2cap_state_change(chan, BT_DISCONN);
691 
692 	rsp.scid   = cpu_to_le16(chan->dcid);
693 	rsp.dcid   = cpu_to_le16(chan->scid);
694 	rsp.result = cpu_to_le16(result);
695 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
696 
697 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
698 }
699 
700 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
701 {
702 	struct l2cap_conn *conn = chan->conn;
703 
704 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
705 
706 	switch (chan->state) {
707 	case BT_LISTEN:
708 		chan->ops->teardown(chan, 0);
709 		break;
710 
711 	case BT_CONNECTED:
712 	case BT_CONFIG:
713 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
714 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
715 			l2cap_send_disconn_req(chan, reason);
716 		} else
717 			l2cap_chan_del(chan, reason);
718 		break;
719 
720 	case BT_CONNECT2:
721 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
722 			if (conn->hcon->type == ACL_LINK)
723 				l2cap_chan_connect_reject(chan);
724 			else if (conn->hcon->type == LE_LINK)
725 				l2cap_chan_le_connect_reject(chan);
726 		}
727 
728 		l2cap_chan_del(chan, reason);
729 		break;
730 
731 	case BT_CONNECT:
732 	case BT_DISCONN:
733 		l2cap_chan_del(chan, reason);
734 		break;
735 
736 	default:
737 		chan->ops->teardown(chan, 0);
738 		break;
739 	}
740 }
741 EXPORT_SYMBOL(l2cap_chan_close);
742 
743 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
744 {
745 	switch (chan->chan_type) {
746 	case L2CAP_CHAN_RAW:
747 		switch (chan->sec_level) {
748 		case BT_SECURITY_HIGH:
749 		case BT_SECURITY_FIPS:
750 			return HCI_AT_DEDICATED_BONDING_MITM;
751 		case BT_SECURITY_MEDIUM:
752 			return HCI_AT_DEDICATED_BONDING;
753 		default:
754 			return HCI_AT_NO_BONDING;
755 		}
756 		break;
757 	case L2CAP_CHAN_CONN_LESS:
758 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
759 			if (chan->sec_level == BT_SECURITY_LOW)
760 				chan->sec_level = BT_SECURITY_SDP;
761 		}
762 		if (chan->sec_level == BT_SECURITY_HIGH ||
763 		    chan->sec_level == BT_SECURITY_FIPS)
764 			return HCI_AT_NO_BONDING_MITM;
765 		else
766 			return HCI_AT_NO_BONDING;
767 		break;
768 	case L2CAP_CHAN_CONN_ORIENTED:
769 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
770 			if (chan->sec_level == BT_SECURITY_LOW)
771 				chan->sec_level = BT_SECURITY_SDP;
772 
773 			if (chan->sec_level == BT_SECURITY_HIGH ||
774 			    chan->sec_level == BT_SECURITY_FIPS)
775 				return HCI_AT_NO_BONDING_MITM;
776 			else
777 				return HCI_AT_NO_BONDING;
778 		}
779 		/* fall through */
780 	default:
781 		switch (chan->sec_level) {
782 		case BT_SECURITY_HIGH:
783 		case BT_SECURITY_FIPS:
784 			return HCI_AT_GENERAL_BONDING_MITM;
785 		case BT_SECURITY_MEDIUM:
786 			return HCI_AT_GENERAL_BONDING;
787 		default:
788 			return HCI_AT_NO_BONDING;
789 		}
790 		break;
791 	}
792 }
793 
794 /* Service level security */
795 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
796 {
797 	struct l2cap_conn *conn = chan->conn;
798 	__u8 auth_type;
799 
800 	if (conn->hcon->type == LE_LINK)
801 		return smp_conn_security(conn->hcon, chan->sec_level);
802 
803 	auth_type = l2cap_get_auth_type(chan);
804 
805 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
806 				 initiator);
807 }
808 
809 static u8 l2cap_get_ident(struct l2cap_conn *conn)
810 {
811 	u8 id;
812 
813 	/* Get next available identificator.
814 	 *    1 - 128 are used by kernel.
815 	 *  129 - 199 are reserved.
816 	 *  200 - 254 are used by utilities like l2ping, etc.
817 	 */
818 
819 	mutex_lock(&conn->ident_lock);
820 
821 	if (++conn->tx_ident > 128)
822 		conn->tx_ident = 1;
823 
824 	id = conn->tx_ident;
825 
826 	mutex_unlock(&conn->ident_lock);
827 
828 	return id;
829 }
830 
831 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
832 			   void *data)
833 {
834 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
835 	u8 flags;
836 
837 	BT_DBG("code 0x%2.2x", code);
838 
839 	if (!skb)
840 		return;
841 
842 	/* Use NO_FLUSH if supported or we have an LE link (which does
843 	 * not support auto-flushing packets) */
844 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
845 	    conn->hcon->type == LE_LINK)
846 		flags = ACL_START_NO_FLUSH;
847 	else
848 		flags = ACL_START;
849 
850 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
851 	skb->priority = HCI_PRIO_MAX;
852 
853 	hci_send_acl(conn->hchan, skb, flags);
854 }
855 
856 static bool __chan_is_moving(struct l2cap_chan *chan)
857 {
858 	return chan->move_state != L2CAP_MOVE_STABLE &&
859 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
860 }
861 
862 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
863 {
864 	struct hci_conn *hcon = chan->conn->hcon;
865 	u16 flags;
866 
867 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
868 	       skb->priority);
869 
870 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
871 		if (chan->hs_hchan)
872 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
873 		else
874 			kfree_skb(skb);
875 
876 		return;
877 	}
878 
879 	/* Use NO_FLUSH for LE links (where this is the only option) or
880 	 * if the BR/EDR link supports it and flushing has not been
881 	 * explicitly requested (through FLAG_FLUSHABLE).
882 	 */
883 	if (hcon->type == LE_LINK ||
884 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
885 	     lmp_no_flush_capable(hcon->hdev)))
886 		flags = ACL_START_NO_FLUSH;
887 	else
888 		flags = ACL_START;
889 
890 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
891 	hci_send_acl(chan->conn->hchan, skb, flags);
892 }
893 
894 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
895 {
896 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
897 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
898 
899 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
900 		/* S-Frame */
901 		control->sframe = 1;
902 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
903 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
904 
905 		control->sar = 0;
906 		control->txseq = 0;
907 	} else {
908 		/* I-Frame */
909 		control->sframe = 0;
910 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
911 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
912 
913 		control->poll = 0;
914 		control->super = 0;
915 	}
916 }
917 
918 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
919 {
920 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
921 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
922 
923 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
924 		/* S-Frame */
925 		control->sframe = 1;
926 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
927 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
928 
929 		control->sar = 0;
930 		control->txseq = 0;
931 	} else {
932 		/* I-Frame */
933 		control->sframe = 0;
934 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
935 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
936 
937 		control->poll = 0;
938 		control->super = 0;
939 	}
940 }
941 
942 static inline void __unpack_control(struct l2cap_chan *chan,
943 				    struct sk_buff *skb)
944 {
945 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
946 		__unpack_extended_control(get_unaligned_le32(skb->data),
947 					  &bt_cb(skb)->control);
948 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
949 	} else {
950 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
951 					  &bt_cb(skb)->control);
952 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
953 	}
954 }
955 
956 static u32 __pack_extended_control(struct l2cap_ctrl *control)
957 {
958 	u32 packed;
959 
960 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
961 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
962 
963 	if (control->sframe) {
964 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
965 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
966 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
967 	} else {
968 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
969 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
970 	}
971 
972 	return packed;
973 }
974 
975 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
976 {
977 	u16 packed;
978 
979 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
980 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
981 
982 	if (control->sframe) {
983 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
984 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
985 		packed |= L2CAP_CTRL_FRAME_TYPE;
986 	} else {
987 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
988 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
989 	}
990 
991 	return packed;
992 }
993 
994 static inline void __pack_control(struct l2cap_chan *chan,
995 				  struct l2cap_ctrl *control,
996 				  struct sk_buff *skb)
997 {
998 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
999 		put_unaligned_le32(__pack_extended_control(control),
1000 				   skb->data + L2CAP_HDR_SIZE);
1001 	} else {
1002 		put_unaligned_le16(__pack_enhanced_control(control),
1003 				   skb->data + L2CAP_HDR_SIZE);
1004 	}
1005 }
1006 
1007 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1008 {
1009 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1010 		return L2CAP_EXT_HDR_SIZE;
1011 	else
1012 		return L2CAP_ENH_HDR_SIZE;
1013 }
1014 
1015 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1016 					       u32 control)
1017 {
1018 	struct sk_buff *skb;
1019 	struct l2cap_hdr *lh;
1020 	int hlen = __ertm_hdr_size(chan);
1021 
1022 	if (chan->fcs == L2CAP_FCS_CRC16)
1023 		hlen += L2CAP_FCS_SIZE;
1024 
1025 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1026 
1027 	if (!skb)
1028 		return ERR_PTR(-ENOMEM);
1029 
1030 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1031 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1032 	lh->cid = cpu_to_le16(chan->dcid);
1033 
1034 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1035 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1036 	else
1037 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1038 
1039 	if (chan->fcs == L2CAP_FCS_CRC16) {
1040 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1041 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1042 	}
1043 
1044 	skb->priority = HCI_PRIO_MAX;
1045 	return skb;
1046 }
1047 
1048 static void l2cap_send_sframe(struct l2cap_chan *chan,
1049 			      struct l2cap_ctrl *control)
1050 {
1051 	struct sk_buff *skb;
1052 	u32 control_field;
1053 
1054 	BT_DBG("chan %p, control %p", chan, control);
1055 
1056 	if (!control->sframe)
1057 		return;
1058 
1059 	if (__chan_is_moving(chan))
1060 		return;
1061 
1062 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1063 	    !control->poll)
1064 		control->final = 1;
1065 
1066 	if (control->super == L2CAP_SUPER_RR)
1067 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1068 	else if (control->super == L2CAP_SUPER_RNR)
1069 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1070 
1071 	if (control->super != L2CAP_SUPER_SREJ) {
1072 		chan->last_acked_seq = control->reqseq;
1073 		__clear_ack_timer(chan);
1074 	}
1075 
1076 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1077 	       control->final, control->poll, control->super);
1078 
1079 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1080 		control_field = __pack_extended_control(control);
1081 	else
1082 		control_field = __pack_enhanced_control(control);
1083 
1084 	skb = l2cap_create_sframe_pdu(chan, control_field);
1085 	if (!IS_ERR(skb))
1086 		l2cap_do_send(chan, skb);
1087 }
1088 
1089 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1090 {
1091 	struct l2cap_ctrl control;
1092 
1093 	BT_DBG("chan %p, poll %d", chan, poll);
1094 
1095 	memset(&control, 0, sizeof(control));
1096 	control.sframe = 1;
1097 	control.poll = poll;
1098 
1099 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1100 		control.super = L2CAP_SUPER_RNR;
1101 	else
1102 		control.super = L2CAP_SUPER_RR;
1103 
1104 	control.reqseq = chan->buffer_seq;
1105 	l2cap_send_sframe(chan, &control);
1106 }
1107 
1108 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1109 {
1110 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1111 		return true;
1112 
1113 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1114 }
1115 
1116 static bool __amp_capable(struct l2cap_chan *chan)
1117 {
1118 	struct l2cap_conn *conn = chan->conn;
1119 	struct hci_dev *hdev;
1120 	bool amp_available = false;
1121 
1122 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1123 		return false;
1124 
1125 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1126 		return false;
1127 
1128 	read_lock(&hci_dev_list_lock);
1129 	list_for_each_entry(hdev, &hci_dev_list, list) {
1130 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1131 		    test_bit(HCI_UP, &hdev->flags)) {
1132 			amp_available = true;
1133 			break;
1134 		}
1135 	}
1136 	read_unlock(&hci_dev_list_lock);
1137 
1138 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1139 		return amp_available;
1140 
1141 	return false;
1142 }
1143 
1144 static bool l2cap_check_efs(struct l2cap_chan *chan)
1145 {
1146 	/* Check EFS parameters */
1147 	return true;
1148 }
1149 
1150 void l2cap_send_conn_req(struct l2cap_chan *chan)
1151 {
1152 	struct l2cap_conn *conn = chan->conn;
1153 	struct l2cap_conn_req req;
1154 
1155 	req.scid = cpu_to_le16(chan->scid);
1156 	req.psm  = chan->psm;
1157 
1158 	chan->ident = l2cap_get_ident(conn);
1159 
1160 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1161 
1162 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1163 }
1164 
1165 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1166 {
1167 	struct l2cap_create_chan_req req;
1168 	req.scid = cpu_to_le16(chan->scid);
1169 	req.psm  = chan->psm;
1170 	req.amp_id = amp_id;
1171 
1172 	chan->ident = l2cap_get_ident(chan->conn);
1173 
1174 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1175 		       sizeof(req), &req);
1176 }
1177 
1178 static void l2cap_move_setup(struct l2cap_chan *chan)
1179 {
1180 	struct sk_buff *skb;
1181 
1182 	BT_DBG("chan %p", chan);
1183 
1184 	if (chan->mode != L2CAP_MODE_ERTM)
1185 		return;
1186 
1187 	__clear_retrans_timer(chan);
1188 	__clear_monitor_timer(chan);
1189 	__clear_ack_timer(chan);
1190 
1191 	chan->retry_count = 0;
1192 	skb_queue_walk(&chan->tx_q, skb) {
1193 		if (bt_cb(skb)->control.retries)
1194 			bt_cb(skb)->control.retries = 1;
1195 		else
1196 			break;
1197 	}
1198 
1199 	chan->expected_tx_seq = chan->buffer_seq;
1200 
1201 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1202 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1203 	l2cap_seq_list_clear(&chan->retrans_list);
1204 	l2cap_seq_list_clear(&chan->srej_list);
1205 	skb_queue_purge(&chan->srej_q);
1206 
1207 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1208 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1209 
1210 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1211 }
1212 
1213 static void l2cap_move_done(struct l2cap_chan *chan)
1214 {
1215 	u8 move_role = chan->move_role;
1216 	BT_DBG("chan %p", chan);
1217 
1218 	chan->move_state = L2CAP_MOVE_STABLE;
1219 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1220 
1221 	if (chan->mode != L2CAP_MODE_ERTM)
1222 		return;
1223 
1224 	switch (move_role) {
1225 	case L2CAP_MOVE_ROLE_INITIATOR:
1226 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1227 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1228 		break;
1229 	case L2CAP_MOVE_ROLE_RESPONDER:
1230 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1231 		break;
1232 	}
1233 }
1234 
1235 static void l2cap_chan_ready(struct l2cap_chan *chan)
1236 {
1237 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1238 	chan->conf_state = 0;
1239 	__clear_chan_timer(chan);
1240 
1241 	if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1242 		chan->ops->suspend(chan);
1243 
1244 	chan->state = BT_CONNECTED;
1245 
1246 	chan->ops->ready(chan);
1247 }
1248 
1249 static void l2cap_le_connect(struct l2cap_chan *chan)
1250 {
1251 	struct l2cap_conn *conn = chan->conn;
1252 	struct l2cap_le_conn_req req;
1253 
1254 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1255 		return;
1256 
1257 	req.psm     = chan->psm;
1258 	req.scid    = cpu_to_le16(chan->scid);
1259 	req.mtu     = cpu_to_le16(chan->imtu);
1260 	req.mps     = cpu_to_le16(chan->mps);
1261 	req.credits = cpu_to_le16(chan->rx_credits);
1262 
1263 	chan->ident = l2cap_get_ident(conn);
1264 
1265 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1266 		       sizeof(req), &req);
1267 }
1268 
1269 static void l2cap_le_start(struct l2cap_chan *chan)
1270 {
1271 	struct l2cap_conn *conn = chan->conn;
1272 
1273 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1274 		return;
1275 
1276 	if (!chan->psm) {
1277 		l2cap_chan_ready(chan);
1278 		return;
1279 	}
1280 
1281 	if (chan->state == BT_CONNECT)
1282 		l2cap_le_connect(chan);
1283 }
1284 
1285 static void l2cap_start_connection(struct l2cap_chan *chan)
1286 {
1287 	if (__amp_capable(chan)) {
1288 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1289 		a2mp_discover_amp(chan);
1290 	} else if (chan->conn->hcon->type == LE_LINK) {
1291 		l2cap_le_start(chan);
1292 	} else {
1293 		l2cap_send_conn_req(chan);
1294 	}
1295 }
1296 
1297 static void l2cap_request_info(struct l2cap_conn *conn)
1298 {
1299 	struct l2cap_info_req req;
1300 
1301 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1302 		return;
1303 
1304 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1305 
1306 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1307 	conn->info_ident = l2cap_get_ident(conn);
1308 
1309 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1310 
1311 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1312 		       sizeof(req), &req);
1313 }
1314 
1315 static void l2cap_do_start(struct l2cap_chan *chan)
1316 {
1317 	struct l2cap_conn *conn = chan->conn;
1318 
1319 	if (conn->hcon->type == LE_LINK) {
1320 		l2cap_le_start(chan);
1321 		return;
1322 	}
1323 
1324 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1325 		l2cap_request_info(conn);
1326 		return;
1327 	}
1328 
1329 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1330 		return;
1331 
1332 	if (l2cap_chan_check_security(chan, true) &&
1333 	    __l2cap_no_conn_pending(chan))
1334 		l2cap_start_connection(chan);
1335 }
1336 
1337 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1338 {
1339 	u32 local_feat_mask = l2cap_feat_mask;
1340 	if (!disable_ertm)
1341 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1342 
1343 	switch (mode) {
1344 	case L2CAP_MODE_ERTM:
1345 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1346 	case L2CAP_MODE_STREAMING:
1347 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1348 	default:
1349 		return 0x00;
1350 	}
1351 }
1352 
1353 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1354 {
1355 	struct l2cap_conn *conn = chan->conn;
1356 	struct l2cap_disconn_req req;
1357 
1358 	if (!conn)
1359 		return;
1360 
1361 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1362 		__clear_retrans_timer(chan);
1363 		__clear_monitor_timer(chan);
1364 		__clear_ack_timer(chan);
1365 	}
1366 
1367 	if (chan->scid == L2CAP_CID_A2MP) {
1368 		l2cap_state_change(chan, BT_DISCONN);
1369 		return;
1370 	}
1371 
1372 	req.dcid = cpu_to_le16(chan->dcid);
1373 	req.scid = cpu_to_le16(chan->scid);
1374 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1375 		       sizeof(req), &req);
1376 
1377 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1378 }
1379 
1380 /* ---- L2CAP connections ---- */
1381 static void l2cap_conn_start(struct l2cap_conn *conn)
1382 {
1383 	struct l2cap_chan *chan, *tmp;
1384 
1385 	BT_DBG("conn %p", conn);
1386 
1387 	mutex_lock(&conn->chan_lock);
1388 
1389 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1390 		l2cap_chan_lock(chan);
1391 
1392 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1393 			l2cap_chan_ready(chan);
1394 			l2cap_chan_unlock(chan);
1395 			continue;
1396 		}
1397 
1398 		if (chan->state == BT_CONNECT) {
1399 			if (!l2cap_chan_check_security(chan, true) ||
1400 			    !__l2cap_no_conn_pending(chan)) {
1401 				l2cap_chan_unlock(chan);
1402 				continue;
1403 			}
1404 
1405 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1406 			    && test_bit(CONF_STATE2_DEVICE,
1407 					&chan->conf_state)) {
1408 				l2cap_chan_close(chan, ECONNRESET);
1409 				l2cap_chan_unlock(chan);
1410 				continue;
1411 			}
1412 
1413 			l2cap_start_connection(chan);
1414 
1415 		} else if (chan->state == BT_CONNECT2) {
1416 			struct l2cap_conn_rsp rsp;
1417 			char buf[128];
1418 			rsp.scid = cpu_to_le16(chan->dcid);
1419 			rsp.dcid = cpu_to_le16(chan->scid);
1420 
1421 			if (l2cap_chan_check_security(chan, false)) {
1422 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1423 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1424 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1425 					chan->ops->defer(chan);
1426 
1427 				} else {
1428 					l2cap_state_change(chan, BT_CONFIG);
1429 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1430 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1431 				}
1432 			} else {
1433 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1434 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1435 			}
1436 
1437 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1438 				       sizeof(rsp), &rsp);
1439 
1440 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1441 			    rsp.result != L2CAP_CR_SUCCESS) {
1442 				l2cap_chan_unlock(chan);
1443 				continue;
1444 			}
1445 
1446 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1447 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1448 				       l2cap_build_conf_req(chan, buf), buf);
1449 			chan->num_conf_req++;
1450 		}
1451 
1452 		l2cap_chan_unlock(chan);
1453 	}
1454 
1455 	mutex_unlock(&conn->chan_lock);
1456 }
1457 
1458 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1459 {
1460 	struct hci_conn *hcon = conn->hcon;
1461 	struct hci_dev *hdev = hcon->hdev;
1462 
1463 	BT_DBG("%s conn %p", hdev->name, conn);
1464 
1465 	/* For outgoing pairing which doesn't necessarily have an
1466 	 * associated socket (e.g. mgmt_pair_device).
1467 	 */
1468 	if (hcon->out)
1469 		smp_conn_security(hcon, hcon->pending_sec_level);
1470 
1471 	/* For LE slave connections, make sure the connection interval
1472 	 * is in the range of the minium and maximum interval that has
1473 	 * been configured for this connection. If not, then trigger
1474 	 * the connection update procedure.
1475 	 */
1476 	if (hcon->role == HCI_ROLE_SLAVE &&
1477 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1478 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1479 		struct l2cap_conn_param_update_req req;
1480 
1481 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1482 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1483 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1484 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1485 
1486 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1487 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1488 	}
1489 }
1490 
1491 static void l2cap_conn_ready(struct l2cap_conn *conn)
1492 {
1493 	struct l2cap_chan *chan;
1494 	struct hci_conn *hcon = conn->hcon;
1495 
1496 	BT_DBG("conn %p", conn);
1497 
1498 	if (hcon->type == ACL_LINK)
1499 		l2cap_request_info(conn);
1500 
1501 	mutex_lock(&conn->chan_lock);
1502 
1503 	list_for_each_entry(chan, &conn->chan_l, list) {
1504 
1505 		l2cap_chan_lock(chan);
1506 
1507 		if (chan->scid == L2CAP_CID_A2MP) {
1508 			l2cap_chan_unlock(chan);
1509 			continue;
1510 		}
1511 
1512 		if (hcon->type == LE_LINK) {
1513 			l2cap_le_start(chan);
1514 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1515 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1516 				l2cap_chan_ready(chan);
1517 		} else if (chan->state == BT_CONNECT) {
1518 			l2cap_do_start(chan);
1519 		}
1520 
1521 		l2cap_chan_unlock(chan);
1522 	}
1523 
1524 	mutex_unlock(&conn->chan_lock);
1525 
1526 	if (hcon->type == LE_LINK)
1527 		l2cap_le_conn_ready(conn);
1528 
1529 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1530 }
1531 
1532 /* Notify sockets that we cannot guaranty reliability anymore */
1533 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1534 {
1535 	struct l2cap_chan *chan;
1536 
1537 	BT_DBG("conn %p", conn);
1538 
1539 	mutex_lock(&conn->chan_lock);
1540 
1541 	list_for_each_entry(chan, &conn->chan_l, list) {
1542 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1543 			l2cap_chan_set_err(chan, err);
1544 	}
1545 
1546 	mutex_unlock(&conn->chan_lock);
1547 }
1548 
1549 static void l2cap_info_timeout(struct work_struct *work)
1550 {
1551 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1552 					       info_timer.work);
1553 
1554 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1555 	conn->info_ident = 0;
1556 
1557 	l2cap_conn_start(conn);
1558 }
1559 
1560 /*
1561  * l2cap_user
1562  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1563  * callback is called during registration. The ->remove callback is called
1564  * during unregistration.
1565  * An l2cap_user object can either be explicitly unregistered or when the
1566  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1567  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1568  * External modules must own a reference to the l2cap_conn object if they intend
1569  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1570  * any time if they don't.
1571  */
1572 
1573 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1574 {
1575 	struct hci_dev *hdev = conn->hcon->hdev;
1576 	int ret;
1577 
1578 	/* We need to check whether l2cap_conn is registered. If it is not, we
1579 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1580 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1581 	 * relies on the parent hci_conn object to be locked. This itself relies
1582 	 * on the hci_dev object to be locked. So we must lock the hci device
1583 	 * here, too. */
1584 
1585 	hci_dev_lock(hdev);
1586 
1587 	if (user->list.next || user->list.prev) {
1588 		ret = -EINVAL;
1589 		goto out_unlock;
1590 	}
1591 
1592 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1593 	if (!conn->hchan) {
1594 		ret = -ENODEV;
1595 		goto out_unlock;
1596 	}
1597 
1598 	ret = user->probe(conn, user);
1599 	if (ret)
1600 		goto out_unlock;
1601 
1602 	list_add(&user->list, &conn->users);
1603 	ret = 0;
1604 
1605 out_unlock:
1606 	hci_dev_unlock(hdev);
1607 	return ret;
1608 }
1609 EXPORT_SYMBOL(l2cap_register_user);
1610 
1611 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1612 {
1613 	struct hci_dev *hdev = conn->hcon->hdev;
1614 
1615 	hci_dev_lock(hdev);
1616 
1617 	if (!user->list.next || !user->list.prev)
1618 		goto out_unlock;
1619 
1620 	list_del(&user->list);
1621 	user->list.next = NULL;
1622 	user->list.prev = NULL;
1623 	user->remove(conn, user);
1624 
1625 out_unlock:
1626 	hci_dev_unlock(hdev);
1627 }
1628 EXPORT_SYMBOL(l2cap_unregister_user);
1629 
1630 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1631 {
1632 	struct l2cap_user *user;
1633 
1634 	while (!list_empty(&conn->users)) {
1635 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1636 		list_del(&user->list);
1637 		user->list.next = NULL;
1638 		user->list.prev = NULL;
1639 		user->remove(conn, user);
1640 	}
1641 }
1642 
1643 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1644 {
1645 	struct l2cap_conn *conn = hcon->l2cap_data;
1646 	struct l2cap_chan *chan, *l;
1647 
1648 	if (!conn)
1649 		return;
1650 
1651 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1652 
1653 	kfree_skb(conn->rx_skb);
1654 
1655 	skb_queue_purge(&conn->pending_rx);
1656 
1657 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1658 	 * might block if we are running on a worker from the same workqueue
1659 	 * pending_rx_work is waiting on.
1660 	 */
1661 	if (work_pending(&conn->pending_rx_work))
1662 		cancel_work_sync(&conn->pending_rx_work);
1663 
1664 	if (work_pending(&conn->id_addr_update_work))
1665 		cancel_work_sync(&conn->id_addr_update_work);
1666 
1667 	l2cap_unregister_all_users(conn);
1668 
1669 	/* Force the connection to be immediately dropped */
1670 	hcon->disc_timeout = 0;
1671 
1672 	mutex_lock(&conn->chan_lock);
1673 
1674 	/* Kill channels */
1675 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1676 		l2cap_chan_hold(chan);
1677 		l2cap_chan_lock(chan);
1678 
1679 		l2cap_chan_del(chan, err);
1680 
1681 		l2cap_chan_unlock(chan);
1682 
1683 		chan->ops->close(chan);
1684 		l2cap_chan_put(chan);
1685 	}
1686 
1687 	mutex_unlock(&conn->chan_lock);
1688 
1689 	hci_chan_del(conn->hchan);
1690 
1691 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1692 		cancel_delayed_work_sync(&conn->info_timer);
1693 
1694 	hcon->l2cap_data = NULL;
1695 	conn->hchan = NULL;
1696 	l2cap_conn_put(conn);
1697 }
1698 
1699 static void l2cap_conn_free(struct kref *ref)
1700 {
1701 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1702 
1703 	hci_conn_put(conn->hcon);
1704 	kfree(conn);
1705 }
1706 
1707 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1708 {
1709 	kref_get(&conn->ref);
1710 	return conn;
1711 }
1712 EXPORT_SYMBOL(l2cap_conn_get);
1713 
1714 void l2cap_conn_put(struct l2cap_conn *conn)
1715 {
1716 	kref_put(&conn->ref, l2cap_conn_free);
1717 }
1718 EXPORT_SYMBOL(l2cap_conn_put);
1719 
1720 /* ---- Socket interface ---- */
1721 
1722 /* Find socket with psm and source / destination bdaddr.
1723  * Returns closest match.
1724  */
1725 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1726 						   bdaddr_t *src,
1727 						   bdaddr_t *dst,
1728 						   u8 link_type)
1729 {
1730 	struct l2cap_chan *c, *c1 = NULL;
1731 
1732 	read_lock(&chan_list_lock);
1733 
1734 	list_for_each_entry(c, &chan_list, global_l) {
1735 		if (state && c->state != state)
1736 			continue;
1737 
1738 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1739 			continue;
1740 
1741 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1742 			continue;
1743 
1744 		if (c->psm == psm) {
1745 			int src_match, dst_match;
1746 			int src_any, dst_any;
1747 
1748 			/* Exact match. */
1749 			src_match = !bacmp(&c->src, src);
1750 			dst_match = !bacmp(&c->dst, dst);
1751 			if (src_match && dst_match) {
1752 				l2cap_chan_hold(c);
1753 				read_unlock(&chan_list_lock);
1754 				return c;
1755 			}
1756 
1757 			/* Closest match */
1758 			src_any = !bacmp(&c->src, BDADDR_ANY);
1759 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1760 			if ((src_match && dst_any) || (src_any && dst_match) ||
1761 			    (src_any && dst_any))
1762 				c1 = c;
1763 		}
1764 	}
1765 
1766 	if (c1)
1767 		l2cap_chan_hold(c1);
1768 
1769 	read_unlock(&chan_list_lock);
1770 
1771 	return c1;
1772 }
1773 
1774 static void l2cap_monitor_timeout(struct work_struct *work)
1775 {
1776 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1777 					       monitor_timer.work);
1778 
1779 	BT_DBG("chan %p", chan);
1780 
1781 	l2cap_chan_lock(chan);
1782 
1783 	if (!chan->conn) {
1784 		l2cap_chan_unlock(chan);
1785 		l2cap_chan_put(chan);
1786 		return;
1787 	}
1788 
1789 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1790 
1791 	l2cap_chan_unlock(chan);
1792 	l2cap_chan_put(chan);
1793 }
1794 
1795 static void l2cap_retrans_timeout(struct work_struct *work)
1796 {
1797 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1798 					       retrans_timer.work);
1799 
1800 	BT_DBG("chan %p", chan);
1801 
1802 	l2cap_chan_lock(chan);
1803 
1804 	if (!chan->conn) {
1805 		l2cap_chan_unlock(chan);
1806 		l2cap_chan_put(chan);
1807 		return;
1808 	}
1809 
1810 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1811 	l2cap_chan_unlock(chan);
1812 	l2cap_chan_put(chan);
1813 }
1814 
1815 static void l2cap_streaming_send(struct l2cap_chan *chan,
1816 				 struct sk_buff_head *skbs)
1817 {
1818 	struct sk_buff *skb;
1819 	struct l2cap_ctrl *control;
1820 
1821 	BT_DBG("chan %p, skbs %p", chan, skbs);
1822 
1823 	if (__chan_is_moving(chan))
1824 		return;
1825 
1826 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1827 
1828 	while (!skb_queue_empty(&chan->tx_q)) {
1829 
1830 		skb = skb_dequeue(&chan->tx_q);
1831 
1832 		bt_cb(skb)->control.retries = 1;
1833 		control = &bt_cb(skb)->control;
1834 
1835 		control->reqseq = 0;
1836 		control->txseq = chan->next_tx_seq;
1837 
1838 		__pack_control(chan, control, skb);
1839 
1840 		if (chan->fcs == L2CAP_FCS_CRC16) {
1841 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1842 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1843 		}
1844 
1845 		l2cap_do_send(chan, skb);
1846 
1847 		BT_DBG("Sent txseq %u", control->txseq);
1848 
1849 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1850 		chan->frames_sent++;
1851 	}
1852 }
1853 
1854 static int l2cap_ertm_send(struct l2cap_chan *chan)
1855 {
1856 	struct sk_buff *skb, *tx_skb;
1857 	struct l2cap_ctrl *control;
1858 	int sent = 0;
1859 
1860 	BT_DBG("chan %p", chan);
1861 
1862 	if (chan->state != BT_CONNECTED)
1863 		return -ENOTCONN;
1864 
1865 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1866 		return 0;
1867 
1868 	if (__chan_is_moving(chan))
1869 		return 0;
1870 
1871 	while (chan->tx_send_head &&
1872 	       chan->unacked_frames < chan->remote_tx_win &&
1873 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1874 
1875 		skb = chan->tx_send_head;
1876 
1877 		bt_cb(skb)->control.retries = 1;
1878 		control = &bt_cb(skb)->control;
1879 
1880 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1881 			control->final = 1;
1882 
1883 		control->reqseq = chan->buffer_seq;
1884 		chan->last_acked_seq = chan->buffer_seq;
1885 		control->txseq = chan->next_tx_seq;
1886 
1887 		__pack_control(chan, control, skb);
1888 
1889 		if (chan->fcs == L2CAP_FCS_CRC16) {
1890 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1891 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1892 		}
1893 
1894 		/* Clone after data has been modified. Data is assumed to be
1895 		   read-only (for locking purposes) on cloned sk_buffs.
1896 		 */
1897 		tx_skb = skb_clone(skb, GFP_KERNEL);
1898 
1899 		if (!tx_skb)
1900 			break;
1901 
1902 		__set_retrans_timer(chan);
1903 
1904 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1905 		chan->unacked_frames++;
1906 		chan->frames_sent++;
1907 		sent++;
1908 
1909 		if (skb_queue_is_last(&chan->tx_q, skb))
1910 			chan->tx_send_head = NULL;
1911 		else
1912 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1913 
1914 		l2cap_do_send(chan, tx_skb);
1915 		BT_DBG("Sent txseq %u", control->txseq);
1916 	}
1917 
1918 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1919 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
1920 
1921 	return sent;
1922 }
1923 
1924 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1925 {
1926 	struct l2cap_ctrl control;
1927 	struct sk_buff *skb;
1928 	struct sk_buff *tx_skb;
1929 	u16 seq;
1930 
1931 	BT_DBG("chan %p", chan);
1932 
1933 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1934 		return;
1935 
1936 	if (__chan_is_moving(chan))
1937 		return;
1938 
1939 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1940 		seq = l2cap_seq_list_pop(&chan->retrans_list);
1941 
1942 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1943 		if (!skb) {
1944 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
1945 			       seq);
1946 			continue;
1947 		}
1948 
1949 		bt_cb(skb)->control.retries++;
1950 		control = bt_cb(skb)->control;
1951 
1952 		if (chan->max_tx != 0 &&
1953 		    bt_cb(skb)->control.retries > chan->max_tx) {
1954 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1955 			l2cap_send_disconn_req(chan, ECONNRESET);
1956 			l2cap_seq_list_clear(&chan->retrans_list);
1957 			break;
1958 		}
1959 
1960 		control.reqseq = chan->buffer_seq;
1961 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1962 			control.final = 1;
1963 		else
1964 			control.final = 0;
1965 
1966 		if (skb_cloned(skb)) {
1967 			/* Cloned sk_buffs are read-only, so we need a
1968 			 * writeable copy
1969 			 */
1970 			tx_skb = skb_copy(skb, GFP_KERNEL);
1971 		} else {
1972 			tx_skb = skb_clone(skb, GFP_KERNEL);
1973 		}
1974 
1975 		if (!tx_skb) {
1976 			l2cap_seq_list_clear(&chan->retrans_list);
1977 			break;
1978 		}
1979 
1980 		/* Update skb contents */
1981 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1982 			put_unaligned_le32(__pack_extended_control(&control),
1983 					   tx_skb->data + L2CAP_HDR_SIZE);
1984 		} else {
1985 			put_unaligned_le16(__pack_enhanced_control(&control),
1986 					   tx_skb->data + L2CAP_HDR_SIZE);
1987 		}
1988 
1989 		/* Update FCS */
1990 		if (chan->fcs == L2CAP_FCS_CRC16) {
1991 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
1992 					tx_skb->len - L2CAP_FCS_SIZE);
1993 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
1994 						L2CAP_FCS_SIZE);
1995 		}
1996 
1997 		l2cap_do_send(chan, tx_skb);
1998 
1999 		BT_DBG("Resent txseq %d", control.txseq);
2000 
2001 		chan->last_acked_seq = chan->buffer_seq;
2002 	}
2003 }
2004 
2005 static void l2cap_retransmit(struct l2cap_chan *chan,
2006 			     struct l2cap_ctrl *control)
2007 {
2008 	BT_DBG("chan %p, control %p", chan, control);
2009 
2010 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2011 	l2cap_ertm_resend(chan);
2012 }
2013 
2014 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2015 				 struct l2cap_ctrl *control)
2016 {
2017 	struct sk_buff *skb;
2018 
2019 	BT_DBG("chan %p, control %p", chan, control);
2020 
2021 	if (control->poll)
2022 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2023 
2024 	l2cap_seq_list_clear(&chan->retrans_list);
2025 
2026 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2027 		return;
2028 
2029 	if (chan->unacked_frames) {
2030 		skb_queue_walk(&chan->tx_q, skb) {
2031 			if (bt_cb(skb)->control.txseq == control->reqseq ||
2032 			    skb == chan->tx_send_head)
2033 				break;
2034 		}
2035 
2036 		skb_queue_walk_from(&chan->tx_q, skb) {
2037 			if (skb == chan->tx_send_head)
2038 				break;
2039 
2040 			l2cap_seq_list_append(&chan->retrans_list,
2041 					      bt_cb(skb)->control.txseq);
2042 		}
2043 
2044 		l2cap_ertm_resend(chan);
2045 	}
2046 }
2047 
2048 static void l2cap_send_ack(struct l2cap_chan *chan)
2049 {
2050 	struct l2cap_ctrl control;
2051 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2052 					 chan->last_acked_seq);
2053 	int threshold;
2054 
2055 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2056 	       chan, chan->last_acked_seq, chan->buffer_seq);
2057 
2058 	memset(&control, 0, sizeof(control));
2059 	control.sframe = 1;
2060 
2061 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2062 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2063 		__clear_ack_timer(chan);
2064 		control.super = L2CAP_SUPER_RNR;
2065 		control.reqseq = chan->buffer_seq;
2066 		l2cap_send_sframe(chan, &control);
2067 	} else {
2068 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2069 			l2cap_ertm_send(chan);
2070 			/* If any i-frames were sent, they included an ack */
2071 			if (chan->buffer_seq == chan->last_acked_seq)
2072 				frames_to_ack = 0;
2073 		}
2074 
2075 		/* Ack now if the window is 3/4ths full.
2076 		 * Calculate without mul or div
2077 		 */
2078 		threshold = chan->ack_win;
2079 		threshold += threshold << 1;
2080 		threshold >>= 2;
2081 
2082 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2083 		       threshold);
2084 
2085 		if (frames_to_ack >= threshold) {
2086 			__clear_ack_timer(chan);
2087 			control.super = L2CAP_SUPER_RR;
2088 			control.reqseq = chan->buffer_seq;
2089 			l2cap_send_sframe(chan, &control);
2090 			frames_to_ack = 0;
2091 		}
2092 
2093 		if (frames_to_ack)
2094 			__set_ack_timer(chan);
2095 	}
2096 }
2097 
2098 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2099 					 struct msghdr *msg, int len,
2100 					 int count, struct sk_buff *skb)
2101 {
2102 	struct l2cap_conn *conn = chan->conn;
2103 	struct sk_buff **frag;
2104 	int sent = 0;
2105 
2106 	if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
2107 		return -EFAULT;
2108 
2109 	sent += count;
2110 	len  -= count;
2111 
2112 	/* Continuation fragments (no L2CAP header) */
2113 	frag = &skb_shinfo(skb)->frag_list;
2114 	while (len) {
2115 		struct sk_buff *tmp;
2116 
2117 		count = min_t(unsigned int, conn->mtu, len);
2118 
2119 		tmp = chan->ops->alloc_skb(chan, 0, count,
2120 					   msg->msg_flags & MSG_DONTWAIT);
2121 		if (IS_ERR(tmp))
2122 			return PTR_ERR(tmp);
2123 
2124 		*frag = tmp;
2125 
2126 		if (copy_from_iter(skb_put(*frag, count), count,
2127 				   &msg->msg_iter) != count)
2128 			return -EFAULT;
2129 
2130 		sent += count;
2131 		len  -= count;
2132 
2133 		skb->len += (*frag)->len;
2134 		skb->data_len += (*frag)->len;
2135 
2136 		frag = &(*frag)->next;
2137 	}
2138 
2139 	return sent;
2140 }
2141 
2142 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2143 						 struct msghdr *msg, size_t len)
2144 {
2145 	struct l2cap_conn *conn = chan->conn;
2146 	struct sk_buff *skb;
2147 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2148 	struct l2cap_hdr *lh;
2149 
2150 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2151 	       __le16_to_cpu(chan->psm), len);
2152 
2153 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2154 
2155 	skb = chan->ops->alloc_skb(chan, hlen, count,
2156 				   msg->msg_flags & MSG_DONTWAIT);
2157 	if (IS_ERR(skb))
2158 		return skb;
2159 
2160 	/* Create L2CAP header */
2161 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2162 	lh->cid = cpu_to_le16(chan->dcid);
2163 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2164 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2165 
2166 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2167 	if (unlikely(err < 0)) {
2168 		kfree_skb(skb);
2169 		return ERR_PTR(err);
2170 	}
2171 	return skb;
2172 }
2173 
2174 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2175 					      struct msghdr *msg, size_t len)
2176 {
2177 	struct l2cap_conn *conn = chan->conn;
2178 	struct sk_buff *skb;
2179 	int err, count;
2180 	struct l2cap_hdr *lh;
2181 
2182 	BT_DBG("chan %p len %zu", chan, len);
2183 
2184 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2185 
2186 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2187 				   msg->msg_flags & MSG_DONTWAIT);
2188 	if (IS_ERR(skb))
2189 		return skb;
2190 
2191 	/* Create L2CAP header */
2192 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2193 	lh->cid = cpu_to_le16(chan->dcid);
2194 	lh->len = cpu_to_le16(len);
2195 
2196 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2197 	if (unlikely(err < 0)) {
2198 		kfree_skb(skb);
2199 		return ERR_PTR(err);
2200 	}
2201 	return skb;
2202 }
2203 
2204 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2205 					       struct msghdr *msg, size_t len,
2206 					       u16 sdulen)
2207 {
2208 	struct l2cap_conn *conn = chan->conn;
2209 	struct sk_buff *skb;
2210 	int err, count, hlen;
2211 	struct l2cap_hdr *lh;
2212 
2213 	BT_DBG("chan %p len %zu", chan, len);
2214 
2215 	if (!conn)
2216 		return ERR_PTR(-ENOTCONN);
2217 
2218 	hlen = __ertm_hdr_size(chan);
2219 
2220 	if (sdulen)
2221 		hlen += L2CAP_SDULEN_SIZE;
2222 
2223 	if (chan->fcs == L2CAP_FCS_CRC16)
2224 		hlen += L2CAP_FCS_SIZE;
2225 
2226 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2227 
2228 	skb = chan->ops->alloc_skb(chan, hlen, count,
2229 				   msg->msg_flags & MSG_DONTWAIT);
2230 	if (IS_ERR(skb))
2231 		return skb;
2232 
2233 	/* Create L2CAP header */
2234 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2235 	lh->cid = cpu_to_le16(chan->dcid);
2236 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2237 
2238 	/* Control header is populated later */
2239 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2240 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2241 	else
2242 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2243 
2244 	if (sdulen)
2245 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2246 
2247 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2248 	if (unlikely(err < 0)) {
2249 		kfree_skb(skb);
2250 		return ERR_PTR(err);
2251 	}
2252 
2253 	bt_cb(skb)->control.fcs = chan->fcs;
2254 	bt_cb(skb)->control.retries = 0;
2255 	return skb;
2256 }
2257 
2258 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2259 			     struct sk_buff_head *seg_queue,
2260 			     struct msghdr *msg, size_t len)
2261 {
2262 	struct sk_buff *skb;
2263 	u16 sdu_len;
2264 	size_t pdu_len;
2265 	u8 sar;
2266 
2267 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2268 
2269 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2270 	 * so fragmented skbs are not used.  The HCI layer's handling
2271 	 * of fragmented skbs is not compatible with ERTM's queueing.
2272 	 */
2273 
2274 	/* PDU size is derived from the HCI MTU */
2275 	pdu_len = chan->conn->mtu;
2276 
2277 	/* Constrain PDU size for BR/EDR connections */
2278 	if (!chan->hs_hcon)
2279 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2280 
2281 	/* Adjust for largest possible L2CAP overhead. */
2282 	if (chan->fcs)
2283 		pdu_len -= L2CAP_FCS_SIZE;
2284 
2285 	pdu_len -= __ertm_hdr_size(chan);
2286 
2287 	/* Remote device may have requested smaller PDUs */
2288 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2289 
2290 	if (len <= pdu_len) {
2291 		sar = L2CAP_SAR_UNSEGMENTED;
2292 		sdu_len = 0;
2293 		pdu_len = len;
2294 	} else {
2295 		sar = L2CAP_SAR_START;
2296 		sdu_len = len;
2297 	}
2298 
2299 	while (len > 0) {
2300 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2301 
2302 		if (IS_ERR(skb)) {
2303 			__skb_queue_purge(seg_queue);
2304 			return PTR_ERR(skb);
2305 		}
2306 
2307 		bt_cb(skb)->control.sar = sar;
2308 		__skb_queue_tail(seg_queue, skb);
2309 
2310 		len -= pdu_len;
2311 		if (sdu_len)
2312 			sdu_len = 0;
2313 
2314 		if (len <= pdu_len) {
2315 			sar = L2CAP_SAR_END;
2316 			pdu_len = len;
2317 		} else {
2318 			sar = L2CAP_SAR_CONTINUE;
2319 		}
2320 	}
2321 
2322 	return 0;
2323 }
2324 
2325 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2326 						   struct msghdr *msg,
2327 						   size_t len, u16 sdulen)
2328 {
2329 	struct l2cap_conn *conn = chan->conn;
2330 	struct sk_buff *skb;
2331 	int err, count, hlen;
2332 	struct l2cap_hdr *lh;
2333 
2334 	BT_DBG("chan %p len %zu", chan, len);
2335 
2336 	if (!conn)
2337 		return ERR_PTR(-ENOTCONN);
2338 
2339 	hlen = L2CAP_HDR_SIZE;
2340 
2341 	if (sdulen)
2342 		hlen += L2CAP_SDULEN_SIZE;
2343 
2344 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2345 
2346 	skb = chan->ops->alloc_skb(chan, hlen, count,
2347 				   msg->msg_flags & MSG_DONTWAIT);
2348 	if (IS_ERR(skb))
2349 		return skb;
2350 
2351 	/* Create L2CAP header */
2352 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2353 	lh->cid = cpu_to_le16(chan->dcid);
2354 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2355 
2356 	if (sdulen)
2357 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2358 
2359 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2360 	if (unlikely(err < 0)) {
2361 		kfree_skb(skb);
2362 		return ERR_PTR(err);
2363 	}
2364 
2365 	return skb;
2366 }
2367 
2368 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2369 				struct sk_buff_head *seg_queue,
2370 				struct msghdr *msg, size_t len)
2371 {
2372 	struct sk_buff *skb;
2373 	size_t pdu_len;
2374 	u16 sdu_len;
2375 
2376 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2377 
2378 	sdu_len = len;
2379 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2380 
2381 	while (len > 0) {
2382 		if (len <= pdu_len)
2383 			pdu_len = len;
2384 
2385 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2386 		if (IS_ERR(skb)) {
2387 			__skb_queue_purge(seg_queue);
2388 			return PTR_ERR(skb);
2389 		}
2390 
2391 		__skb_queue_tail(seg_queue, skb);
2392 
2393 		len -= pdu_len;
2394 
2395 		if (sdu_len) {
2396 			sdu_len = 0;
2397 			pdu_len += L2CAP_SDULEN_SIZE;
2398 		}
2399 	}
2400 
2401 	return 0;
2402 }
2403 
2404 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2405 {
2406 	struct sk_buff *skb;
2407 	int err;
2408 	struct sk_buff_head seg_queue;
2409 
2410 	if (!chan->conn)
2411 		return -ENOTCONN;
2412 
2413 	/* Connectionless channel */
2414 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2415 		skb = l2cap_create_connless_pdu(chan, msg, len);
2416 		if (IS_ERR(skb))
2417 			return PTR_ERR(skb);
2418 
2419 		/* Channel lock is released before requesting new skb and then
2420 		 * reacquired thus we need to recheck channel state.
2421 		 */
2422 		if (chan->state != BT_CONNECTED) {
2423 			kfree_skb(skb);
2424 			return -ENOTCONN;
2425 		}
2426 
2427 		l2cap_do_send(chan, skb);
2428 		return len;
2429 	}
2430 
2431 	switch (chan->mode) {
2432 	case L2CAP_MODE_LE_FLOWCTL:
2433 		/* Check outgoing MTU */
2434 		if (len > chan->omtu)
2435 			return -EMSGSIZE;
2436 
2437 		if (!chan->tx_credits)
2438 			return -EAGAIN;
2439 
2440 		__skb_queue_head_init(&seg_queue);
2441 
2442 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2443 
2444 		if (chan->state != BT_CONNECTED) {
2445 			__skb_queue_purge(&seg_queue);
2446 			err = -ENOTCONN;
2447 		}
2448 
2449 		if (err)
2450 			return err;
2451 
2452 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2453 
2454 		while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2455 			l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2456 			chan->tx_credits--;
2457 		}
2458 
2459 		if (!chan->tx_credits)
2460 			chan->ops->suspend(chan);
2461 
2462 		err = len;
2463 
2464 		break;
2465 
2466 	case L2CAP_MODE_BASIC:
2467 		/* Check outgoing MTU */
2468 		if (len > chan->omtu)
2469 			return -EMSGSIZE;
2470 
2471 		/* Create a basic PDU */
2472 		skb = l2cap_create_basic_pdu(chan, msg, len);
2473 		if (IS_ERR(skb))
2474 			return PTR_ERR(skb);
2475 
2476 		/* Channel lock is released before requesting new skb and then
2477 		 * reacquired thus we need to recheck channel state.
2478 		 */
2479 		if (chan->state != BT_CONNECTED) {
2480 			kfree_skb(skb);
2481 			return -ENOTCONN;
2482 		}
2483 
2484 		l2cap_do_send(chan, skb);
2485 		err = len;
2486 		break;
2487 
2488 	case L2CAP_MODE_ERTM:
2489 	case L2CAP_MODE_STREAMING:
2490 		/* Check outgoing MTU */
2491 		if (len > chan->omtu) {
2492 			err = -EMSGSIZE;
2493 			break;
2494 		}
2495 
2496 		__skb_queue_head_init(&seg_queue);
2497 
2498 		/* Do segmentation before calling in to the state machine,
2499 		 * since it's possible to block while waiting for memory
2500 		 * allocation.
2501 		 */
2502 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2503 
2504 		/* The channel could have been closed while segmenting,
2505 		 * check that it is still connected.
2506 		 */
2507 		if (chan->state != BT_CONNECTED) {
2508 			__skb_queue_purge(&seg_queue);
2509 			err = -ENOTCONN;
2510 		}
2511 
2512 		if (err)
2513 			break;
2514 
2515 		if (chan->mode == L2CAP_MODE_ERTM)
2516 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2517 		else
2518 			l2cap_streaming_send(chan, &seg_queue);
2519 
2520 		err = len;
2521 
2522 		/* If the skbs were not queued for sending, they'll still be in
2523 		 * seg_queue and need to be purged.
2524 		 */
2525 		__skb_queue_purge(&seg_queue);
2526 		break;
2527 
2528 	default:
2529 		BT_DBG("bad state %1.1x", chan->mode);
2530 		err = -EBADFD;
2531 	}
2532 
2533 	return err;
2534 }
2535 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2536 
2537 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2538 {
2539 	struct l2cap_ctrl control;
2540 	u16 seq;
2541 
2542 	BT_DBG("chan %p, txseq %u", chan, txseq);
2543 
2544 	memset(&control, 0, sizeof(control));
2545 	control.sframe = 1;
2546 	control.super = L2CAP_SUPER_SREJ;
2547 
2548 	for (seq = chan->expected_tx_seq; seq != txseq;
2549 	     seq = __next_seq(chan, seq)) {
2550 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2551 			control.reqseq = seq;
2552 			l2cap_send_sframe(chan, &control);
2553 			l2cap_seq_list_append(&chan->srej_list, seq);
2554 		}
2555 	}
2556 
2557 	chan->expected_tx_seq = __next_seq(chan, txseq);
2558 }
2559 
2560 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2561 {
2562 	struct l2cap_ctrl control;
2563 
2564 	BT_DBG("chan %p", chan);
2565 
2566 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2567 		return;
2568 
2569 	memset(&control, 0, sizeof(control));
2570 	control.sframe = 1;
2571 	control.super = L2CAP_SUPER_SREJ;
2572 	control.reqseq = chan->srej_list.tail;
2573 	l2cap_send_sframe(chan, &control);
2574 }
2575 
2576 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2577 {
2578 	struct l2cap_ctrl control;
2579 	u16 initial_head;
2580 	u16 seq;
2581 
2582 	BT_DBG("chan %p, txseq %u", chan, txseq);
2583 
2584 	memset(&control, 0, sizeof(control));
2585 	control.sframe = 1;
2586 	control.super = L2CAP_SUPER_SREJ;
2587 
2588 	/* Capture initial list head to allow only one pass through the list. */
2589 	initial_head = chan->srej_list.head;
2590 
2591 	do {
2592 		seq = l2cap_seq_list_pop(&chan->srej_list);
2593 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2594 			break;
2595 
2596 		control.reqseq = seq;
2597 		l2cap_send_sframe(chan, &control);
2598 		l2cap_seq_list_append(&chan->srej_list, seq);
2599 	} while (chan->srej_list.head != initial_head);
2600 }
2601 
2602 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2603 {
2604 	struct sk_buff *acked_skb;
2605 	u16 ackseq;
2606 
2607 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2608 
2609 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2610 		return;
2611 
2612 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2613 	       chan->expected_ack_seq, chan->unacked_frames);
2614 
2615 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2616 	     ackseq = __next_seq(chan, ackseq)) {
2617 
2618 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2619 		if (acked_skb) {
2620 			skb_unlink(acked_skb, &chan->tx_q);
2621 			kfree_skb(acked_skb);
2622 			chan->unacked_frames--;
2623 		}
2624 	}
2625 
2626 	chan->expected_ack_seq = reqseq;
2627 
2628 	if (chan->unacked_frames == 0)
2629 		__clear_retrans_timer(chan);
2630 
2631 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2632 }
2633 
2634 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2635 {
2636 	BT_DBG("chan %p", chan);
2637 
2638 	chan->expected_tx_seq = chan->buffer_seq;
2639 	l2cap_seq_list_clear(&chan->srej_list);
2640 	skb_queue_purge(&chan->srej_q);
2641 	chan->rx_state = L2CAP_RX_STATE_RECV;
2642 }
2643 
2644 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2645 				struct l2cap_ctrl *control,
2646 				struct sk_buff_head *skbs, u8 event)
2647 {
2648 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2649 	       event);
2650 
2651 	switch (event) {
2652 	case L2CAP_EV_DATA_REQUEST:
2653 		if (chan->tx_send_head == NULL)
2654 			chan->tx_send_head = skb_peek(skbs);
2655 
2656 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2657 		l2cap_ertm_send(chan);
2658 		break;
2659 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2660 		BT_DBG("Enter LOCAL_BUSY");
2661 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2662 
2663 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2664 			/* The SREJ_SENT state must be aborted if we are to
2665 			 * enter the LOCAL_BUSY state.
2666 			 */
2667 			l2cap_abort_rx_srej_sent(chan);
2668 		}
2669 
2670 		l2cap_send_ack(chan);
2671 
2672 		break;
2673 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2674 		BT_DBG("Exit LOCAL_BUSY");
2675 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2676 
2677 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2678 			struct l2cap_ctrl local_control;
2679 
2680 			memset(&local_control, 0, sizeof(local_control));
2681 			local_control.sframe = 1;
2682 			local_control.super = L2CAP_SUPER_RR;
2683 			local_control.poll = 1;
2684 			local_control.reqseq = chan->buffer_seq;
2685 			l2cap_send_sframe(chan, &local_control);
2686 
2687 			chan->retry_count = 1;
2688 			__set_monitor_timer(chan);
2689 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2690 		}
2691 		break;
2692 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2693 		l2cap_process_reqseq(chan, control->reqseq);
2694 		break;
2695 	case L2CAP_EV_EXPLICIT_POLL:
2696 		l2cap_send_rr_or_rnr(chan, 1);
2697 		chan->retry_count = 1;
2698 		__set_monitor_timer(chan);
2699 		__clear_ack_timer(chan);
2700 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2701 		break;
2702 	case L2CAP_EV_RETRANS_TO:
2703 		l2cap_send_rr_or_rnr(chan, 1);
2704 		chan->retry_count = 1;
2705 		__set_monitor_timer(chan);
2706 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2707 		break;
2708 	case L2CAP_EV_RECV_FBIT:
2709 		/* Nothing to process */
2710 		break;
2711 	default:
2712 		break;
2713 	}
2714 }
2715 
2716 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2717 				  struct l2cap_ctrl *control,
2718 				  struct sk_buff_head *skbs, u8 event)
2719 {
2720 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2721 	       event);
2722 
2723 	switch (event) {
2724 	case L2CAP_EV_DATA_REQUEST:
2725 		if (chan->tx_send_head == NULL)
2726 			chan->tx_send_head = skb_peek(skbs);
2727 		/* Queue data, but don't send. */
2728 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2729 		break;
2730 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2731 		BT_DBG("Enter LOCAL_BUSY");
2732 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2733 
2734 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2735 			/* The SREJ_SENT state must be aborted if we are to
2736 			 * enter the LOCAL_BUSY state.
2737 			 */
2738 			l2cap_abort_rx_srej_sent(chan);
2739 		}
2740 
2741 		l2cap_send_ack(chan);
2742 
2743 		break;
2744 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2745 		BT_DBG("Exit LOCAL_BUSY");
2746 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2747 
2748 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2749 			struct l2cap_ctrl local_control;
2750 			memset(&local_control, 0, sizeof(local_control));
2751 			local_control.sframe = 1;
2752 			local_control.super = L2CAP_SUPER_RR;
2753 			local_control.poll = 1;
2754 			local_control.reqseq = chan->buffer_seq;
2755 			l2cap_send_sframe(chan, &local_control);
2756 
2757 			chan->retry_count = 1;
2758 			__set_monitor_timer(chan);
2759 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2760 		}
2761 		break;
2762 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2763 		l2cap_process_reqseq(chan, control->reqseq);
2764 
2765 		/* Fall through */
2766 
2767 	case L2CAP_EV_RECV_FBIT:
2768 		if (control && control->final) {
2769 			__clear_monitor_timer(chan);
2770 			if (chan->unacked_frames > 0)
2771 				__set_retrans_timer(chan);
2772 			chan->retry_count = 0;
2773 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2774 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2775 		}
2776 		break;
2777 	case L2CAP_EV_EXPLICIT_POLL:
2778 		/* Ignore */
2779 		break;
2780 	case L2CAP_EV_MONITOR_TO:
2781 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2782 			l2cap_send_rr_or_rnr(chan, 1);
2783 			__set_monitor_timer(chan);
2784 			chan->retry_count++;
2785 		} else {
2786 			l2cap_send_disconn_req(chan, ECONNABORTED);
2787 		}
2788 		break;
2789 	default:
2790 		break;
2791 	}
2792 }
2793 
2794 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2795 		     struct sk_buff_head *skbs, u8 event)
2796 {
2797 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2798 	       chan, control, skbs, event, chan->tx_state);
2799 
2800 	switch (chan->tx_state) {
2801 	case L2CAP_TX_STATE_XMIT:
2802 		l2cap_tx_state_xmit(chan, control, skbs, event);
2803 		break;
2804 	case L2CAP_TX_STATE_WAIT_F:
2805 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2806 		break;
2807 	default:
2808 		/* Ignore event */
2809 		break;
2810 	}
2811 }
2812 
2813 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2814 			     struct l2cap_ctrl *control)
2815 {
2816 	BT_DBG("chan %p, control %p", chan, control);
2817 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2818 }
2819 
2820 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2821 				  struct l2cap_ctrl *control)
2822 {
2823 	BT_DBG("chan %p, control %p", chan, control);
2824 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2825 }
2826 
2827 /* Copy frame to all raw sockets on that connection */
2828 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2829 {
2830 	struct sk_buff *nskb;
2831 	struct l2cap_chan *chan;
2832 
2833 	BT_DBG("conn %p", conn);
2834 
2835 	mutex_lock(&conn->chan_lock);
2836 
2837 	list_for_each_entry(chan, &conn->chan_l, list) {
2838 		if (chan->chan_type != L2CAP_CHAN_RAW)
2839 			continue;
2840 
2841 		/* Don't send frame to the channel it came from */
2842 		if (bt_cb(skb)->chan == chan)
2843 			continue;
2844 
2845 		nskb = skb_clone(skb, GFP_KERNEL);
2846 		if (!nskb)
2847 			continue;
2848 		if (chan->ops->recv(chan, nskb))
2849 			kfree_skb(nskb);
2850 	}
2851 
2852 	mutex_unlock(&conn->chan_lock);
2853 }
2854 
2855 /* ---- L2CAP signalling commands ---- */
2856 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2857 				       u8 ident, u16 dlen, void *data)
2858 {
2859 	struct sk_buff *skb, **frag;
2860 	struct l2cap_cmd_hdr *cmd;
2861 	struct l2cap_hdr *lh;
2862 	int len, count;
2863 
2864 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2865 	       conn, code, ident, dlen);
2866 
2867 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2868 		return NULL;
2869 
2870 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2871 	count = min_t(unsigned int, conn->mtu, len);
2872 
2873 	skb = bt_skb_alloc(count, GFP_KERNEL);
2874 	if (!skb)
2875 		return NULL;
2876 
2877 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2878 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2879 
2880 	if (conn->hcon->type == LE_LINK)
2881 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2882 	else
2883 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2884 
2885 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2886 	cmd->code  = code;
2887 	cmd->ident = ident;
2888 	cmd->len   = cpu_to_le16(dlen);
2889 
2890 	if (dlen) {
2891 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2892 		memcpy(skb_put(skb, count), data, count);
2893 		data += count;
2894 	}
2895 
2896 	len -= skb->len;
2897 
2898 	/* Continuation fragments (no L2CAP header) */
2899 	frag = &skb_shinfo(skb)->frag_list;
2900 	while (len) {
2901 		count = min_t(unsigned int, conn->mtu, len);
2902 
2903 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2904 		if (!*frag)
2905 			goto fail;
2906 
2907 		memcpy(skb_put(*frag, count), data, count);
2908 
2909 		len  -= count;
2910 		data += count;
2911 
2912 		frag = &(*frag)->next;
2913 	}
2914 
2915 	return skb;
2916 
2917 fail:
2918 	kfree_skb(skb);
2919 	return NULL;
2920 }
2921 
2922 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2923 				     unsigned long *val)
2924 {
2925 	struct l2cap_conf_opt *opt = *ptr;
2926 	int len;
2927 
2928 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2929 	*ptr += len;
2930 
2931 	*type = opt->type;
2932 	*olen = opt->len;
2933 
2934 	switch (opt->len) {
2935 	case 1:
2936 		*val = *((u8 *) opt->val);
2937 		break;
2938 
2939 	case 2:
2940 		*val = get_unaligned_le16(opt->val);
2941 		break;
2942 
2943 	case 4:
2944 		*val = get_unaligned_le32(opt->val);
2945 		break;
2946 
2947 	default:
2948 		*val = (unsigned long) opt->val;
2949 		break;
2950 	}
2951 
2952 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2953 	return len;
2954 }
2955 
2956 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2957 {
2958 	struct l2cap_conf_opt *opt = *ptr;
2959 
2960 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2961 
2962 	opt->type = type;
2963 	opt->len  = len;
2964 
2965 	switch (len) {
2966 	case 1:
2967 		*((u8 *) opt->val)  = val;
2968 		break;
2969 
2970 	case 2:
2971 		put_unaligned_le16(val, opt->val);
2972 		break;
2973 
2974 	case 4:
2975 		put_unaligned_le32(val, opt->val);
2976 		break;
2977 
2978 	default:
2979 		memcpy(opt->val, (void *) val, len);
2980 		break;
2981 	}
2982 
2983 	*ptr += L2CAP_CONF_OPT_SIZE + len;
2984 }
2985 
2986 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2987 {
2988 	struct l2cap_conf_efs efs;
2989 
2990 	switch (chan->mode) {
2991 	case L2CAP_MODE_ERTM:
2992 		efs.id		= chan->local_id;
2993 		efs.stype	= chan->local_stype;
2994 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2995 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2996 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2997 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2998 		break;
2999 
3000 	case L2CAP_MODE_STREAMING:
3001 		efs.id		= 1;
3002 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3003 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3004 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3005 		efs.acc_lat	= 0;
3006 		efs.flush_to	= 0;
3007 		break;
3008 
3009 	default:
3010 		return;
3011 	}
3012 
3013 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3014 			   (unsigned long) &efs);
3015 }
3016 
3017 static void l2cap_ack_timeout(struct work_struct *work)
3018 {
3019 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3020 					       ack_timer.work);
3021 	u16 frames_to_ack;
3022 
3023 	BT_DBG("chan %p", chan);
3024 
3025 	l2cap_chan_lock(chan);
3026 
3027 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3028 				     chan->last_acked_seq);
3029 
3030 	if (frames_to_ack)
3031 		l2cap_send_rr_or_rnr(chan, 0);
3032 
3033 	l2cap_chan_unlock(chan);
3034 	l2cap_chan_put(chan);
3035 }
3036 
3037 int l2cap_ertm_init(struct l2cap_chan *chan)
3038 {
3039 	int err;
3040 
3041 	chan->next_tx_seq = 0;
3042 	chan->expected_tx_seq = 0;
3043 	chan->expected_ack_seq = 0;
3044 	chan->unacked_frames = 0;
3045 	chan->buffer_seq = 0;
3046 	chan->frames_sent = 0;
3047 	chan->last_acked_seq = 0;
3048 	chan->sdu = NULL;
3049 	chan->sdu_last_frag = NULL;
3050 	chan->sdu_len = 0;
3051 
3052 	skb_queue_head_init(&chan->tx_q);
3053 
3054 	chan->local_amp_id = AMP_ID_BREDR;
3055 	chan->move_id = AMP_ID_BREDR;
3056 	chan->move_state = L2CAP_MOVE_STABLE;
3057 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3058 
3059 	if (chan->mode != L2CAP_MODE_ERTM)
3060 		return 0;
3061 
3062 	chan->rx_state = L2CAP_RX_STATE_RECV;
3063 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3064 
3065 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3066 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3067 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3068 
3069 	skb_queue_head_init(&chan->srej_q);
3070 
3071 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3072 	if (err < 0)
3073 		return err;
3074 
3075 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3076 	if (err < 0)
3077 		l2cap_seq_list_free(&chan->srej_list);
3078 
3079 	return err;
3080 }
3081 
3082 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3083 {
3084 	switch (mode) {
3085 	case L2CAP_MODE_STREAMING:
3086 	case L2CAP_MODE_ERTM:
3087 		if (l2cap_mode_supported(mode, remote_feat_mask))
3088 			return mode;
3089 		/* fall through */
3090 	default:
3091 		return L2CAP_MODE_BASIC;
3092 	}
3093 }
3094 
3095 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3096 {
3097 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3098 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3099 }
3100 
3101 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3102 {
3103 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3104 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3105 }
3106 
3107 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3108 				      struct l2cap_conf_rfc *rfc)
3109 {
3110 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3111 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3112 
3113 		/* Class 1 devices have must have ERTM timeouts
3114 		 * exceeding the Link Supervision Timeout.  The
3115 		 * default Link Supervision Timeout for AMP
3116 		 * controllers is 10 seconds.
3117 		 *
3118 		 * Class 1 devices use 0xffffffff for their
3119 		 * best-effort flush timeout, so the clamping logic
3120 		 * will result in a timeout that meets the above
3121 		 * requirement.  ERTM timeouts are 16-bit values, so
3122 		 * the maximum timeout is 65.535 seconds.
3123 		 */
3124 
3125 		/* Convert timeout to milliseconds and round */
3126 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3127 
3128 		/* This is the recommended formula for class 2 devices
3129 		 * that start ERTM timers when packets are sent to the
3130 		 * controller.
3131 		 */
3132 		ertm_to = 3 * ertm_to + 500;
3133 
3134 		if (ertm_to > 0xffff)
3135 			ertm_to = 0xffff;
3136 
3137 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3138 		rfc->monitor_timeout = rfc->retrans_timeout;
3139 	} else {
3140 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3141 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3142 	}
3143 }
3144 
3145 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3146 {
3147 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3148 	    __l2cap_ews_supported(chan->conn)) {
3149 		/* use extended control field */
3150 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3151 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3152 	} else {
3153 		chan->tx_win = min_t(u16, chan->tx_win,
3154 				     L2CAP_DEFAULT_TX_WINDOW);
3155 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3156 	}
3157 	chan->ack_win = chan->tx_win;
3158 }
3159 
3160 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3161 {
3162 	struct l2cap_conf_req *req = data;
3163 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3164 	void *ptr = req->data;
3165 	u16 size;
3166 
3167 	BT_DBG("chan %p", chan);
3168 
3169 	if (chan->num_conf_req || chan->num_conf_rsp)
3170 		goto done;
3171 
3172 	switch (chan->mode) {
3173 	case L2CAP_MODE_STREAMING:
3174 	case L2CAP_MODE_ERTM:
3175 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3176 			break;
3177 
3178 		if (__l2cap_efs_supported(chan->conn))
3179 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3180 
3181 		/* fall through */
3182 	default:
3183 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3184 		break;
3185 	}
3186 
3187 done:
3188 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3189 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3190 
3191 	switch (chan->mode) {
3192 	case L2CAP_MODE_BASIC:
3193 		if (disable_ertm)
3194 			break;
3195 
3196 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3197 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3198 			break;
3199 
3200 		rfc.mode            = L2CAP_MODE_BASIC;
3201 		rfc.txwin_size      = 0;
3202 		rfc.max_transmit    = 0;
3203 		rfc.retrans_timeout = 0;
3204 		rfc.monitor_timeout = 0;
3205 		rfc.max_pdu_size    = 0;
3206 
3207 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3208 				   (unsigned long) &rfc);
3209 		break;
3210 
3211 	case L2CAP_MODE_ERTM:
3212 		rfc.mode            = L2CAP_MODE_ERTM;
3213 		rfc.max_transmit    = chan->max_tx;
3214 
3215 		__l2cap_set_ertm_timeouts(chan, &rfc);
3216 
3217 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3218 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3219 			     L2CAP_FCS_SIZE);
3220 		rfc.max_pdu_size = cpu_to_le16(size);
3221 
3222 		l2cap_txwin_setup(chan);
3223 
3224 		rfc.txwin_size = min_t(u16, chan->tx_win,
3225 				       L2CAP_DEFAULT_TX_WINDOW);
3226 
3227 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3228 				   (unsigned long) &rfc);
3229 
3230 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3231 			l2cap_add_opt_efs(&ptr, chan);
3232 
3233 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3234 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3235 					   chan->tx_win);
3236 
3237 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3238 			if (chan->fcs == L2CAP_FCS_NONE ||
3239 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3240 				chan->fcs = L2CAP_FCS_NONE;
3241 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3242 						   chan->fcs);
3243 			}
3244 		break;
3245 
3246 	case L2CAP_MODE_STREAMING:
3247 		l2cap_txwin_setup(chan);
3248 		rfc.mode            = L2CAP_MODE_STREAMING;
3249 		rfc.txwin_size      = 0;
3250 		rfc.max_transmit    = 0;
3251 		rfc.retrans_timeout = 0;
3252 		rfc.monitor_timeout = 0;
3253 
3254 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3255 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3256 			     L2CAP_FCS_SIZE);
3257 		rfc.max_pdu_size = cpu_to_le16(size);
3258 
3259 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3260 				   (unsigned long) &rfc);
3261 
3262 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3263 			l2cap_add_opt_efs(&ptr, chan);
3264 
3265 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3266 			if (chan->fcs == L2CAP_FCS_NONE ||
3267 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3268 				chan->fcs = L2CAP_FCS_NONE;
3269 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3270 						   chan->fcs);
3271 			}
3272 		break;
3273 	}
3274 
3275 	req->dcid  = cpu_to_le16(chan->dcid);
3276 	req->flags = cpu_to_le16(0);
3277 
3278 	return ptr - data;
3279 }
3280 
3281 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3282 {
3283 	struct l2cap_conf_rsp *rsp = data;
3284 	void *ptr = rsp->data;
3285 	void *req = chan->conf_req;
3286 	int len = chan->conf_len;
3287 	int type, hint, olen;
3288 	unsigned long val;
3289 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3290 	struct l2cap_conf_efs efs;
3291 	u8 remote_efs = 0;
3292 	u16 mtu = L2CAP_DEFAULT_MTU;
3293 	u16 result = L2CAP_CONF_SUCCESS;
3294 	u16 size;
3295 
3296 	BT_DBG("chan %p", chan);
3297 
3298 	while (len >= L2CAP_CONF_OPT_SIZE) {
3299 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3300 
3301 		hint  = type & L2CAP_CONF_HINT;
3302 		type &= L2CAP_CONF_MASK;
3303 
3304 		switch (type) {
3305 		case L2CAP_CONF_MTU:
3306 			mtu = val;
3307 			break;
3308 
3309 		case L2CAP_CONF_FLUSH_TO:
3310 			chan->flush_to = val;
3311 			break;
3312 
3313 		case L2CAP_CONF_QOS:
3314 			break;
3315 
3316 		case L2CAP_CONF_RFC:
3317 			if (olen == sizeof(rfc))
3318 				memcpy(&rfc, (void *) val, olen);
3319 			break;
3320 
3321 		case L2CAP_CONF_FCS:
3322 			if (val == L2CAP_FCS_NONE)
3323 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3324 			break;
3325 
3326 		case L2CAP_CONF_EFS:
3327 			remote_efs = 1;
3328 			if (olen == sizeof(efs))
3329 				memcpy(&efs, (void *) val, olen);
3330 			break;
3331 
3332 		case L2CAP_CONF_EWS:
3333 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3334 				return -ECONNREFUSED;
3335 
3336 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3337 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3338 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3339 			chan->remote_tx_win = val;
3340 			break;
3341 
3342 		default:
3343 			if (hint)
3344 				break;
3345 
3346 			result = L2CAP_CONF_UNKNOWN;
3347 			*((u8 *) ptr++) = type;
3348 			break;
3349 		}
3350 	}
3351 
3352 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3353 		goto done;
3354 
3355 	switch (chan->mode) {
3356 	case L2CAP_MODE_STREAMING:
3357 	case L2CAP_MODE_ERTM:
3358 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3359 			chan->mode = l2cap_select_mode(rfc.mode,
3360 						       chan->conn->feat_mask);
3361 			break;
3362 		}
3363 
3364 		if (remote_efs) {
3365 			if (__l2cap_efs_supported(chan->conn))
3366 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3367 			else
3368 				return -ECONNREFUSED;
3369 		}
3370 
3371 		if (chan->mode != rfc.mode)
3372 			return -ECONNREFUSED;
3373 
3374 		break;
3375 	}
3376 
3377 done:
3378 	if (chan->mode != rfc.mode) {
3379 		result = L2CAP_CONF_UNACCEPT;
3380 		rfc.mode = chan->mode;
3381 
3382 		if (chan->num_conf_rsp == 1)
3383 			return -ECONNREFUSED;
3384 
3385 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3386 				   (unsigned long) &rfc);
3387 	}
3388 
3389 	if (result == L2CAP_CONF_SUCCESS) {
3390 		/* Configure output options and let the other side know
3391 		 * which ones we don't like. */
3392 
3393 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3394 			result = L2CAP_CONF_UNACCEPT;
3395 		else {
3396 			chan->omtu = mtu;
3397 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3398 		}
3399 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3400 
3401 		if (remote_efs) {
3402 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3403 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3404 			    efs.stype != chan->local_stype) {
3405 
3406 				result = L2CAP_CONF_UNACCEPT;
3407 
3408 				if (chan->num_conf_req >= 1)
3409 					return -ECONNREFUSED;
3410 
3411 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3412 						   sizeof(efs),
3413 						   (unsigned long) &efs);
3414 			} else {
3415 				/* Send PENDING Conf Rsp */
3416 				result = L2CAP_CONF_PENDING;
3417 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3418 			}
3419 		}
3420 
3421 		switch (rfc.mode) {
3422 		case L2CAP_MODE_BASIC:
3423 			chan->fcs = L2CAP_FCS_NONE;
3424 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3425 			break;
3426 
3427 		case L2CAP_MODE_ERTM:
3428 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3429 				chan->remote_tx_win = rfc.txwin_size;
3430 			else
3431 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3432 
3433 			chan->remote_max_tx = rfc.max_transmit;
3434 
3435 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3436 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3437 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3438 			rfc.max_pdu_size = cpu_to_le16(size);
3439 			chan->remote_mps = size;
3440 
3441 			__l2cap_set_ertm_timeouts(chan, &rfc);
3442 
3443 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3444 
3445 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3446 					   sizeof(rfc), (unsigned long) &rfc);
3447 
3448 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3449 				chan->remote_id = efs.id;
3450 				chan->remote_stype = efs.stype;
3451 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3452 				chan->remote_flush_to =
3453 					le32_to_cpu(efs.flush_to);
3454 				chan->remote_acc_lat =
3455 					le32_to_cpu(efs.acc_lat);
3456 				chan->remote_sdu_itime =
3457 					le32_to_cpu(efs.sdu_itime);
3458 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3459 						   sizeof(efs),
3460 						   (unsigned long) &efs);
3461 			}
3462 			break;
3463 
3464 		case L2CAP_MODE_STREAMING:
3465 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3466 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3467 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3468 			rfc.max_pdu_size = cpu_to_le16(size);
3469 			chan->remote_mps = size;
3470 
3471 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3472 
3473 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3474 					   (unsigned long) &rfc);
3475 
3476 			break;
3477 
3478 		default:
3479 			result = L2CAP_CONF_UNACCEPT;
3480 
3481 			memset(&rfc, 0, sizeof(rfc));
3482 			rfc.mode = chan->mode;
3483 		}
3484 
3485 		if (result == L2CAP_CONF_SUCCESS)
3486 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3487 	}
3488 	rsp->scid   = cpu_to_le16(chan->dcid);
3489 	rsp->result = cpu_to_le16(result);
3490 	rsp->flags  = cpu_to_le16(0);
3491 
3492 	return ptr - data;
3493 }
3494 
3495 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3496 				void *data, u16 *result)
3497 {
3498 	struct l2cap_conf_req *req = data;
3499 	void *ptr = req->data;
3500 	int type, olen;
3501 	unsigned long val;
3502 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3503 	struct l2cap_conf_efs efs;
3504 
3505 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3506 
3507 	while (len >= L2CAP_CONF_OPT_SIZE) {
3508 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3509 
3510 		switch (type) {
3511 		case L2CAP_CONF_MTU:
3512 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3513 				*result = L2CAP_CONF_UNACCEPT;
3514 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3515 			} else
3516 				chan->imtu = val;
3517 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3518 			break;
3519 
3520 		case L2CAP_CONF_FLUSH_TO:
3521 			chan->flush_to = val;
3522 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3523 					   2, chan->flush_to);
3524 			break;
3525 
3526 		case L2CAP_CONF_RFC:
3527 			if (olen == sizeof(rfc))
3528 				memcpy(&rfc, (void *)val, olen);
3529 
3530 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3531 			    rfc.mode != chan->mode)
3532 				return -ECONNREFUSED;
3533 
3534 			chan->fcs = 0;
3535 
3536 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3537 					   sizeof(rfc), (unsigned long) &rfc);
3538 			break;
3539 
3540 		case L2CAP_CONF_EWS:
3541 			chan->ack_win = min_t(u16, val, chan->ack_win);
3542 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3543 					   chan->tx_win);
3544 			break;
3545 
3546 		case L2CAP_CONF_EFS:
3547 			if (olen == sizeof(efs))
3548 				memcpy(&efs, (void *)val, olen);
3549 
3550 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3551 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3552 			    efs.stype != chan->local_stype)
3553 				return -ECONNREFUSED;
3554 
3555 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3556 					   (unsigned long) &efs);
3557 			break;
3558 
3559 		case L2CAP_CONF_FCS:
3560 			if (*result == L2CAP_CONF_PENDING)
3561 				if (val == L2CAP_FCS_NONE)
3562 					set_bit(CONF_RECV_NO_FCS,
3563 						&chan->conf_state);
3564 			break;
3565 		}
3566 	}
3567 
3568 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3569 		return -ECONNREFUSED;
3570 
3571 	chan->mode = rfc.mode;
3572 
3573 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3574 		switch (rfc.mode) {
3575 		case L2CAP_MODE_ERTM:
3576 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3577 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3578 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3579 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3580 				chan->ack_win = min_t(u16, chan->ack_win,
3581 						      rfc.txwin_size);
3582 
3583 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3584 				chan->local_msdu = le16_to_cpu(efs.msdu);
3585 				chan->local_sdu_itime =
3586 					le32_to_cpu(efs.sdu_itime);
3587 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3588 				chan->local_flush_to =
3589 					le32_to_cpu(efs.flush_to);
3590 			}
3591 			break;
3592 
3593 		case L2CAP_MODE_STREAMING:
3594 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3595 		}
3596 	}
3597 
3598 	req->dcid   = cpu_to_le16(chan->dcid);
3599 	req->flags  = cpu_to_le16(0);
3600 
3601 	return ptr - data;
3602 }
3603 
3604 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3605 				u16 result, u16 flags)
3606 {
3607 	struct l2cap_conf_rsp *rsp = data;
3608 	void *ptr = rsp->data;
3609 
3610 	BT_DBG("chan %p", chan);
3611 
3612 	rsp->scid   = cpu_to_le16(chan->dcid);
3613 	rsp->result = cpu_to_le16(result);
3614 	rsp->flags  = cpu_to_le16(flags);
3615 
3616 	return ptr - data;
3617 }
3618 
3619 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3620 {
3621 	struct l2cap_le_conn_rsp rsp;
3622 	struct l2cap_conn *conn = chan->conn;
3623 
3624 	BT_DBG("chan %p", chan);
3625 
3626 	rsp.dcid    = cpu_to_le16(chan->scid);
3627 	rsp.mtu     = cpu_to_le16(chan->imtu);
3628 	rsp.mps     = cpu_to_le16(chan->mps);
3629 	rsp.credits = cpu_to_le16(chan->rx_credits);
3630 	rsp.result  = cpu_to_le16(L2CAP_CR_SUCCESS);
3631 
3632 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3633 		       &rsp);
3634 }
3635 
3636 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3637 {
3638 	struct l2cap_conn_rsp rsp;
3639 	struct l2cap_conn *conn = chan->conn;
3640 	u8 buf[128];
3641 	u8 rsp_code;
3642 
3643 	rsp.scid   = cpu_to_le16(chan->dcid);
3644 	rsp.dcid   = cpu_to_le16(chan->scid);
3645 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3646 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3647 
3648 	if (chan->hs_hcon)
3649 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3650 	else
3651 		rsp_code = L2CAP_CONN_RSP;
3652 
3653 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3654 
3655 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3656 
3657 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3658 		return;
3659 
3660 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3661 		       l2cap_build_conf_req(chan, buf), buf);
3662 	chan->num_conf_req++;
3663 }
3664 
3665 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3666 {
3667 	int type, olen;
3668 	unsigned long val;
3669 	/* Use sane default values in case a misbehaving remote device
3670 	 * did not send an RFC or extended window size option.
3671 	 */
3672 	u16 txwin_ext = chan->ack_win;
3673 	struct l2cap_conf_rfc rfc = {
3674 		.mode = chan->mode,
3675 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3676 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3677 		.max_pdu_size = cpu_to_le16(chan->imtu),
3678 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3679 	};
3680 
3681 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3682 
3683 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3684 		return;
3685 
3686 	while (len >= L2CAP_CONF_OPT_SIZE) {
3687 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3688 
3689 		switch (type) {
3690 		case L2CAP_CONF_RFC:
3691 			if (olen == sizeof(rfc))
3692 				memcpy(&rfc, (void *)val, olen);
3693 			break;
3694 		case L2CAP_CONF_EWS:
3695 			txwin_ext = val;
3696 			break;
3697 		}
3698 	}
3699 
3700 	switch (rfc.mode) {
3701 	case L2CAP_MODE_ERTM:
3702 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3703 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3704 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3705 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3706 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3707 		else
3708 			chan->ack_win = min_t(u16, chan->ack_win,
3709 					      rfc.txwin_size);
3710 		break;
3711 	case L2CAP_MODE_STREAMING:
3712 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3713 	}
3714 }
3715 
3716 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3717 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3718 				    u8 *data)
3719 {
3720 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3721 
3722 	if (cmd_len < sizeof(*rej))
3723 		return -EPROTO;
3724 
3725 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3726 		return 0;
3727 
3728 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3729 	    cmd->ident == conn->info_ident) {
3730 		cancel_delayed_work(&conn->info_timer);
3731 
3732 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3733 		conn->info_ident = 0;
3734 
3735 		l2cap_conn_start(conn);
3736 	}
3737 
3738 	return 0;
3739 }
3740 
3741 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3742 					struct l2cap_cmd_hdr *cmd,
3743 					u8 *data, u8 rsp_code, u8 amp_id)
3744 {
3745 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3746 	struct l2cap_conn_rsp rsp;
3747 	struct l2cap_chan *chan = NULL, *pchan;
3748 	int result, status = L2CAP_CS_NO_INFO;
3749 
3750 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3751 	__le16 psm = req->psm;
3752 
3753 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3754 
3755 	/* Check if we have socket listening on psm */
3756 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3757 					 &conn->hcon->dst, ACL_LINK);
3758 	if (!pchan) {
3759 		result = L2CAP_CR_BAD_PSM;
3760 		goto sendresp;
3761 	}
3762 
3763 	mutex_lock(&conn->chan_lock);
3764 	l2cap_chan_lock(pchan);
3765 
3766 	/* Check if the ACL is secure enough (if not SDP) */
3767 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3768 	    !hci_conn_check_link_mode(conn->hcon)) {
3769 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3770 		result = L2CAP_CR_SEC_BLOCK;
3771 		goto response;
3772 	}
3773 
3774 	result = L2CAP_CR_NO_MEM;
3775 
3776 	/* Check if we already have channel with that dcid */
3777 	if (__l2cap_get_chan_by_dcid(conn, scid))
3778 		goto response;
3779 
3780 	chan = pchan->ops->new_connection(pchan);
3781 	if (!chan)
3782 		goto response;
3783 
3784 	/* For certain devices (ex: HID mouse), support for authentication,
3785 	 * pairing and bonding is optional. For such devices, inorder to avoid
3786 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3787 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3788 	 */
3789 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3790 
3791 	bacpy(&chan->src, &conn->hcon->src);
3792 	bacpy(&chan->dst, &conn->hcon->dst);
3793 	chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3794 	chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3795 	chan->psm  = psm;
3796 	chan->dcid = scid;
3797 	chan->local_amp_id = amp_id;
3798 
3799 	__l2cap_chan_add(conn, chan);
3800 
3801 	dcid = chan->scid;
3802 
3803 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3804 
3805 	chan->ident = cmd->ident;
3806 
3807 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3808 		if (l2cap_chan_check_security(chan, false)) {
3809 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3810 				l2cap_state_change(chan, BT_CONNECT2);
3811 				result = L2CAP_CR_PEND;
3812 				status = L2CAP_CS_AUTHOR_PEND;
3813 				chan->ops->defer(chan);
3814 			} else {
3815 				/* Force pending result for AMP controllers.
3816 				 * The connection will succeed after the
3817 				 * physical link is up.
3818 				 */
3819 				if (amp_id == AMP_ID_BREDR) {
3820 					l2cap_state_change(chan, BT_CONFIG);
3821 					result = L2CAP_CR_SUCCESS;
3822 				} else {
3823 					l2cap_state_change(chan, BT_CONNECT2);
3824 					result = L2CAP_CR_PEND;
3825 				}
3826 				status = L2CAP_CS_NO_INFO;
3827 			}
3828 		} else {
3829 			l2cap_state_change(chan, BT_CONNECT2);
3830 			result = L2CAP_CR_PEND;
3831 			status = L2CAP_CS_AUTHEN_PEND;
3832 		}
3833 	} else {
3834 		l2cap_state_change(chan, BT_CONNECT2);
3835 		result = L2CAP_CR_PEND;
3836 		status = L2CAP_CS_NO_INFO;
3837 	}
3838 
3839 response:
3840 	l2cap_chan_unlock(pchan);
3841 	mutex_unlock(&conn->chan_lock);
3842 	l2cap_chan_put(pchan);
3843 
3844 sendresp:
3845 	rsp.scid   = cpu_to_le16(scid);
3846 	rsp.dcid   = cpu_to_le16(dcid);
3847 	rsp.result = cpu_to_le16(result);
3848 	rsp.status = cpu_to_le16(status);
3849 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3850 
3851 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3852 		struct l2cap_info_req info;
3853 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3854 
3855 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3856 		conn->info_ident = l2cap_get_ident(conn);
3857 
3858 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3859 
3860 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3861 			       sizeof(info), &info);
3862 	}
3863 
3864 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3865 	    result == L2CAP_CR_SUCCESS) {
3866 		u8 buf[128];
3867 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3868 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3869 			       l2cap_build_conf_req(chan, buf), buf);
3870 		chan->num_conf_req++;
3871 	}
3872 
3873 	return chan;
3874 }
3875 
3876 static int l2cap_connect_req(struct l2cap_conn *conn,
3877 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3878 {
3879 	struct hci_dev *hdev = conn->hcon->hdev;
3880 	struct hci_conn *hcon = conn->hcon;
3881 
3882 	if (cmd_len < sizeof(struct l2cap_conn_req))
3883 		return -EPROTO;
3884 
3885 	hci_dev_lock(hdev);
3886 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3887 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3888 		mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3889 	hci_dev_unlock(hdev);
3890 
3891 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3892 	return 0;
3893 }
3894 
3895 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3896 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3897 				    u8 *data)
3898 {
3899 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3900 	u16 scid, dcid, result, status;
3901 	struct l2cap_chan *chan;
3902 	u8 req[128];
3903 	int err;
3904 
3905 	if (cmd_len < sizeof(*rsp))
3906 		return -EPROTO;
3907 
3908 	scid   = __le16_to_cpu(rsp->scid);
3909 	dcid   = __le16_to_cpu(rsp->dcid);
3910 	result = __le16_to_cpu(rsp->result);
3911 	status = __le16_to_cpu(rsp->status);
3912 
3913 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3914 	       dcid, scid, result, status);
3915 
3916 	mutex_lock(&conn->chan_lock);
3917 
3918 	if (scid) {
3919 		chan = __l2cap_get_chan_by_scid(conn, scid);
3920 		if (!chan) {
3921 			err = -EBADSLT;
3922 			goto unlock;
3923 		}
3924 	} else {
3925 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3926 		if (!chan) {
3927 			err = -EBADSLT;
3928 			goto unlock;
3929 		}
3930 	}
3931 
3932 	err = 0;
3933 
3934 	l2cap_chan_lock(chan);
3935 
3936 	switch (result) {
3937 	case L2CAP_CR_SUCCESS:
3938 		l2cap_state_change(chan, BT_CONFIG);
3939 		chan->ident = 0;
3940 		chan->dcid = dcid;
3941 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3942 
3943 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3944 			break;
3945 
3946 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3947 			       l2cap_build_conf_req(chan, req), req);
3948 		chan->num_conf_req++;
3949 		break;
3950 
3951 	case L2CAP_CR_PEND:
3952 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3953 		break;
3954 
3955 	default:
3956 		l2cap_chan_del(chan, ECONNREFUSED);
3957 		break;
3958 	}
3959 
3960 	l2cap_chan_unlock(chan);
3961 
3962 unlock:
3963 	mutex_unlock(&conn->chan_lock);
3964 
3965 	return err;
3966 }
3967 
3968 static inline void set_default_fcs(struct l2cap_chan *chan)
3969 {
3970 	/* FCS is enabled only in ERTM or streaming mode, if one or both
3971 	 * sides request it.
3972 	 */
3973 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3974 		chan->fcs = L2CAP_FCS_NONE;
3975 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3976 		chan->fcs = L2CAP_FCS_CRC16;
3977 }
3978 
3979 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3980 				    u8 ident, u16 flags)
3981 {
3982 	struct l2cap_conn *conn = chan->conn;
3983 
3984 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3985 	       flags);
3986 
3987 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3988 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3989 
3990 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3991 		       l2cap_build_conf_rsp(chan, data,
3992 					    L2CAP_CONF_SUCCESS, flags), data);
3993 }
3994 
3995 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3996 				   u16 scid, u16 dcid)
3997 {
3998 	struct l2cap_cmd_rej_cid rej;
3999 
4000 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4001 	rej.scid = __cpu_to_le16(scid);
4002 	rej.dcid = __cpu_to_le16(dcid);
4003 
4004 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4005 }
4006 
4007 static inline int l2cap_config_req(struct l2cap_conn *conn,
4008 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4009 				   u8 *data)
4010 {
4011 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4012 	u16 dcid, flags;
4013 	u8 rsp[64];
4014 	struct l2cap_chan *chan;
4015 	int len, err = 0;
4016 
4017 	if (cmd_len < sizeof(*req))
4018 		return -EPROTO;
4019 
4020 	dcid  = __le16_to_cpu(req->dcid);
4021 	flags = __le16_to_cpu(req->flags);
4022 
4023 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4024 
4025 	chan = l2cap_get_chan_by_scid(conn, dcid);
4026 	if (!chan) {
4027 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4028 		return 0;
4029 	}
4030 
4031 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4032 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4033 				       chan->dcid);
4034 		goto unlock;
4035 	}
4036 
4037 	/* Reject if config buffer is too small. */
4038 	len = cmd_len - sizeof(*req);
4039 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4040 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4041 			       l2cap_build_conf_rsp(chan, rsp,
4042 			       L2CAP_CONF_REJECT, flags), rsp);
4043 		goto unlock;
4044 	}
4045 
4046 	/* Store config. */
4047 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4048 	chan->conf_len += len;
4049 
4050 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4051 		/* Incomplete config. Send empty response. */
4052 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4053 			       l2cap_build_conf_rsp(chan, rsp,
4054 			       L2CAP_CONF_SUCCESS, flags), rsp);
4055 		goto unlock;
4056 	}
4057 
4058 	/* Complete config. */
4059 	len = l2cap_parse_conf_req(chan, rsp);
4060 	if (len < 0) {
4061 		l2cap_send_disconn_req(chan, ECONNRESET);
4062 		goto unlock;
4063 	}
4064 
4065 	chan->ident = cmd->ident;
4066 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4067 	chan->num_conf_rsp++;
4068 
4069 	/* Reset config buffer. */
4070 	chan->conf_len = 0;
4071 
4072 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4073 		goto unlock;
4074 
4075 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4076 		set_default_fcs(chan);
4077 
4078 		if (chan->mode == L2CAP_MODE_ERTM ||
4079 		    chan->mode == L2CAP_MODE_STREAMING)
4080 			err = l2cap_ertm_init(chan);
4081 
4082 		if (err < 0)
4083 			l2cap_send_disconn_req(chan, -err);
4084 		else
4085 			l2cap_chan_ready(chan);
4086 
4087 		goto unlock;
4088 	}
4089 
4090 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4091 		u8 buf[64];
4092 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4093 			       l2cap_build_conf_req(chan, buf), buf);
4094 		chan->num_conf_req++;
4095 	}
4096 
4097 	/* Got Conf Rsp PENDING from remote side and assume we sent
4098 	   Conf Rsp PENDING in the code above */
4099 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4100 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4101 
4102 		/* check compatibility */
4103 
4104 		/* Send rsp for BR/EDR channel */
4105 		if (!chan->hs_hcon)
4106 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4107 		else
4108 			chan->ident = cmd->ident;
4109 	}
4110 
4111 unlock:
4112 	l2cap_chan_unlock(chan);
4113 	return err;
4114 }
4115 
4116 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4117 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4118 				   u8 *data)
4119 {
4120 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4121 	u16 scid, flags, result;
4122 	struct l2cap_chan *chan;
4123 	int len = cmd_len - sizeof(*rsp);
4124 	int err = 0;
4125 
4126 	if (cmd_len < sizeof(*rsp))
4127 		return -EPROTO;
4128 
4129 	scid   = __le16_to_cpu(rsp->scid);
4130 	flags  = __le16_to_cpu(rsp->flags);
4131 	result = __le16_to_cpu(rsp->result);
4132 
4133 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4134 	       result, len);
4135 
4136 	chan = l2cap_get_chan_by_scid(conn, scid);
4137 	if (!chan)
4138 		return 0;
4139 
4140 	switch (result) {
4141 	case L2CAP_CONF_SUCCESS:
4142 		l2cap_conf_rfc_get(chan, rsp->data, len);
4143 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4144 		break;
4145 
4146 	case L2CAP_CONF_PENDING:
4147 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4148 
4149 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4150 			char buf[64];
4151 
4152 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4153 						   buf, &result);
4154 			if (len < 0) {
4155 				l2cap_send_disconn_req(chan, ECONNRESET);
4156 				goto done;
4157 			}
4158 
4159 			if (!chan->hs_hcon) {
4160 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4161 							0);
4162 			} else {
4163 				if (l2cap_check_efs(chan)) {
4164 					amp_create_logical_link(chan);
4165 					chan->ident = cmd->ident;
4166 				}
4167 			}
4168 		}
4169 		goto done;
4170 
4171 	case L2CAP_CONF_UNACCEPT:
4172 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4173 			char req[64];
4174 
4175 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4176 				l2cap_send_disconn_req(chan, ECONNRESET);
4177 				goto done;
4178 			}
4179 
4180 			/* throw out any old stored conf requests */
4181 			result = L2CAP_CONF_SUCCESS;
4182 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4183 						   req, &result);
4184 			if (len < 0) {
4185 				l2cap_send_disconn_req(chan, ECONNRESET);
4186 				goto done;
4187 			}
4188 
4189 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4190 				       L2CAP_CONF_REQ, len, req);
4191 			chan->num_conf_req++;
4192 			if (result != L2CAP_CONF_SUCCESS)
4193 				goto done;
4194 			break;
4195 		}
4196 
4197 	default:
4198 		l2cap_chan_set_err(chan, ECONNRESET);
4199 
4200 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4201 		l2cap_send_disconn_req(chan, ECONNRESET);
4202 		goto done;
4203 	}
4204 
4205 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4206 		goto done;
4207 
4208 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4209 
4210 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4211 		set_default_fcs(chan);
4212 
4213 		if (chan->mode == L2CAP_MODE_ERTM ||
4214 		    chan->mode == L2CAP_MODE_STREAMING)
4215 			err = l2cap_ertm_init(chan);
4216 
4217 		if (err < 0)
4218 			l2cap_send_disconn_req(chan, -err);
4219 		else
4220 			l2cap_chan_ready(chan);
4221 	}
4222 
4223 done:
4224 	l2cap_chan_unlock(chan);
4225 	return err;
4226 }
4227 
4228 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4229 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4230 				       u8 *data)
4231 {
4232 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4233 	struct l2cap_disconn_rsp rsp;
4234 	u16 dcid, scid;
4235 	struct l2cap_chan *chan;
4236 
4237 	if (cmd_len != sizeof(*req))
4238 		return -EPROTO;
4239 
4240 	scid = __le16_to_cpu(req->scid);
4241 	dcid = __le16_to_cpu(req->dcid);
4242 
4243 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4244 
4245 	mutex_lock(&conn->chan_lock);
4246 
4247 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4248 	if (!chan) {
4249 		mutex_unlock(&conn->chan_lock);
4250 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4251 		return 0;
4252 	}
4253 
4254 	l2cap_chan_lock(chan);
4255 
4256 	rsp.dcid = cpu_to_le16(chan->scid);
4257 	rsp.scid = cpu_to_le16(chan->dcid);
4258 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4259 
4260 	chan->ops->set_shutdown(chan);
4261 
4262 	l2cap_chan_hold(chan);
4263 	l2cap_chan_del(chan, ECONNRESET);
4264 
4265 	l2cap_chan_unlock(chan);
4266 
4267 	chan->ops->close(chan);
4268 	l2cap_chan_put(chan);
4269 
4270 	mutex_unlock(&conn->chan_lock);
4271 
4272 	return 0;
4273 }
4274 
4275 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4276 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4277 				       u8 *data)
4278 {
4279 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4280 	u16 dcid, scid;
4281 	struct l2cap_chan *chan;
4282 
4283 	if (cmd_len != sizeof(*rsp))
4284 		return -EPROTO;
4285 
4286 	scid = __le16_to_cpu(rsp->scid);
4287 	dcid = __le16_to_cpu(rsp->dcid);
4288 
4289 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4290 
4291 	mutex_lock(&conn->chan_lock);
4292 
4293 	chan = __l2cap_get_chan_by_scid(conn, scid);
4294 	if (!chan) {
4295 		mutex_unlock(&conn->chan_lock);
4296 		return 0;
4297 	}
4298 
4299 	l2cap_chan_lock(chan);
4300 
4301 	l2cap_chan_hold(chan);
4302 	l2cap_chan_del(chan, 0);
4303 
4304 	l2cap_chan_unlock(chan);
4305 
4306 	chan->ops->close(chan);
4307 	l2cap_chan_put(chan);
4308 
4309 	mutex_unlock(&conn->chan_lock);
4310 
4311 	return 0;
4312 }
4313 
4314 static inline int l2cap_information_req(struct l2cap_conn *conn,
4315 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4316 					u8 *data)
4317 {
4318 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4319 	u16 type;
4320 
4321 	if (cmd_len != sizeof(*req))
4322 		return -EPROTO;
4323 
4324 	type = __le16_to_cpu(req->type);
4325 
4326 	BT_DBG("type 0x%4.4x", type);
4327 
4328 	if (type == L2CAP_IT_FEAT_MASK) {
4329 		u8 buf[8];
4330 		u32 feat_mask = l2cap_feat_mask;
4331 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4332 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4333 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4334 		if (!disable_ertm)
4335 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4336 				| L2CAP_FEAT_FCS;
4337 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4338 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4339 				| L2CAP_FEAT_EXT_WINDOW;
4340 
4341 		put_unaligned_le32(feat_mask, rsp->data);
4342 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4343 			       buf);
4344 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4345 		u8 buf[12];
4346 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4347 
4348 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4349 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4350 		rsp->data[0] = conn->local_fixed_chan;
4351 		memset(rsp->data + 1, 0, 7);
4352 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4353 			       buf);
4354 	} else {
4355 		struct l2cap_info_rsp rsp;
4356 		rsp.type   = cpu_to_le16(type);
4357 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4358 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4359 			       &rsp);
4360 	}
4361 
4362 	return 0;
4363 }
4364 
4365 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4366 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4367 					u8 *data)
4368 {
4369 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4370 	u16 type, result;
4371 
4372 	if (cmd_len < sizeof(*rsp))
4373 		return -EPROTO;
4374 
4375 	type   = __le16_to_cpu(rsp->type);
4376 	result = __le16_to_cpu(rsp->result);
4377 
4378 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4379 
4380 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4381 	if (cmd->ident != conn->info_ident ||
4382 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4383 		return 0;
4384 
4385 	cancel_delayed_work(&conn->info_timer);
4386 
4387 	if (result != L2CAP_IR_SUCCESS) {
4388 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4389 		conn->info_ident = 0;
4390 
4391 		l2cap_conn_start(conn);
4392 
4393 		return 0;
4394 	}
4395 
4396 	switch (type) {
4397 	case L2CAP_IT_FEAT_MASK:
4398 		conn->feat_mask = get_unaligned_le32(rsp->data);
4399 
4400 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4401 			struct l2cap_info_req req;
4402 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4403 
4404 			conn->info_ident = l2cap_get_ident(conn);
4405 
4406 			l2cap_send_cmd(conn, conn->info_ident,
4407 				       L2CAP_INFO_REQ, sizeof(req), &req);
4408 		} else {
4409 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4410 			conn->info_ident = 0;
4411 
4412 			l2cap_conn_start(conn);
4413 		}
4414 		break;
4415 
4416 	case L2CAP_IT_FIXED_CHAN:
4417 		conn->remote_fixed_chan = rsp->data[0];
4418 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4419 		conn->info_ident = 0;
4420 
4421 		l2cap_conn_start(conn);
4422 		break;
4423 	}
4424 
4425 	return 0;
4426 }
4427 
4428 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4429 				    struct l2cap_cmd_hdr *cmd,
4430 				    u16 cmd_len, void *data)
4431 {
4432 	struct l2cap_create_chan_req *req = data;
4433 	struct l2cap_create_chan_rsp rsp;
4434 	struct l2cap_chan *chan;
4435 	struct hci_dev *hdev;
4436 	u16 psm, scid;
4437 
4438 	if (cmd_len != sizeof(*req))
4439 		return -EPROTO;
4440 
4441 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4442 		return -EINVAL;
4443 
4444 	psm = le16_to_cpu(req->psm);
4445 	scid = le16_to_cpu(req->scid);
4446 
4447 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4448 
4449 	/* For controller id 0 make BR/EDR connection */
4450 	if (req->amp_id == AMP_ID_BREDR) {
4451 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4452 			      req->amp_id);
4453 		return 0;
4454 	}
4455 
4456 	/* Validate AMP controller id */
4457 	hdev = hci_dev_get(req->amp_id);
4458 	if (!hdev)
4459 		goto error;
4460 
4461 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4462 		hci_dev_put(hdev);
4463 		goto error;
4464 	}
4465 
4466 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4467 			     req->amp_id);
4468 	if (chan) {
4469 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4470 		struct hci_conn *hs_hcon;
4471 
4472 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4473 						  &conn->hcon->dst);
4474 		if (!hs_hcon) {
4475 			hci_dev_put(hdev);
4476 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4477 					       chan->dcid);
4478 			return 0;
4479 		}
4480 
4481 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4482 
4483 		mgr->bredr_chan = chan;
4484 		chan->hs_hcon = hs_hcon;
4485 		chan->fcs = L2CAP_FCS_NONE;
4486 		conn->mtu = hdev->block_mtu;
4487 	}
4488 
4489 	hci_dev_put(hdev);
4490 
4491 	return 0;
4492 
4493 error:
4494 	rsp.dcid = 0;
4495 	rsp.scid = cpu_to_le16(scid);
4496 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4497 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4498 
4499 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4500 		       sizeof(rsp), &rsp);
4501 
4502 	return 0;
4503 }
4504 
4505 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4506 {
4507 	struct l2cap_move_chan_req req;
4508 	u8 ident;
4509 
4510 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4511 
4512 	ident = l2cap_get_ident(chan->conn);
4513 	chan->ident = ident;
4514 
4515 	req.icid = cpu_to_le16(chan->scid);
4516 	req.dest_amp_id = dest_amp_id;
4517 
4518 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4519 		       &req);
4520 
4521 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4522 }
4523 
4524 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4525 {
4526 	struct l2cap_move_chan_rsp rsp;
4527 
4528 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4529 
4530 	rsp.icid = cpu_to_le16(chan->dcid);
4531 	rsp.result = cpu_to_le16(result);
4532 
4533 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4534 		       sizeof(rsp), &rsp);
4535 }
4536 
4537 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4538 {
4539 	struct l2cap_move_chan_cfm cfm;
4540 
4541 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4542 
4543 	chan->ident = l2cap_get_ident(chan->conn);
4544 
4545 	cfm.icid = cpu_to_le16(chan->scid);
4546 	cfm.result = cpu_to_le16(result);
4547 
4548 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4549 		       sizeof(cfm), &cfm);
4550 
4551 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4552 }
4553 
4554 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4555 {
4556 	struct l2cap_move_chan_cfm cfm;
4557 
4558 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4559 
4560 	cfm.icid = cpu_to_le16(icid);
4561 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4562 
4563 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4564 		       sizeof(cfm), &cfm);
4565 }
4566 
4567 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4568 					 u16 icid)
4569 {
4570 	struct l2cap_move_chan_cfm_rsp rsp;
4571 
4572 	BT_DBG("icid 0x%4.4x", icid);
4573 
4574 	rsp.icid = cpu_to_le16(icid);
4575 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4576 }
4577 
4578 static void __release_logical_link(struct l2cap_chan *chan)
4579 {
4580 	chan->hs_hchan = NULL;
4581 	chan->hs_hcon = NULL;
4582 
4583 	/* Placeholder - release the logical link */
4584 }
4585 
4586 static void l2cap_logical_fail(struct l2cap_chan *chan)
4587 {
4588 	/* Logical link setup failed */
4589 	if (chan->state != BT_CONNECTED) {
4590 		/* Create channel failure, disconnect */
4591 		l2cap_send_disconn_req(chan, ECONNRESET);
4592 		return;
4593 	}
4594 
4595 	switch (chan->move_role) {
4596 	case L2CAP_MOVE_ROLE_RESPONDER:
4597 		l2cap_move_done(chan);
4598 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4599 		break;
4600 	case L2CAP_MOVE_ROLE_INITIATOR:
4601 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4602 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4603 			/* Remote has only sent pending or
4604 			 * success responses, clean up
4605 			 */
4606 			l2cap_move_done(chan);
4607 		}
4608 
4609 		/* Other amp move states imply that the move
4610 		 * has already aborted
4611 		 */
4612 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4613 		break;
4614 	}
4615 }
4616 
4617 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4618 					struct hci_chan *hchan)
4619 {
4620 	struct l2cap_conf_rsp rsp;
4621 
4622 	chan->hs_hchan = hchan;
4623 	chan->hs_hcon->l2cap_data = chan->conn;
4624 
4625 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4626 
4627 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4628 		int err;
4629 
4630 		set_default_fcs(chan);
4631 
4632 		err = l2cap_ertm_init(chan);
4633 		if (err < 0)
4634 			l2cap_send_disconn_req(chan, -err);
4635 		else
4636 			l2cap_chan_ready(chan);
4637 	}
4638 }
4639 
4640 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4641 				      struct hci_chan *hchan)
4642 {
4643 	chan->hs_hcon = hchan->conn;
4644 	chan->hs_hcon->l2cap_data = chan->conn;
4645 
4646 	BT_DBG("move_state %d", chan->move_state);
4647 
4648 	switch (chan->move_state) {
4649 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4650 		/* Move confirm will be sent after a success
4651 		 * response is received
4652 		 */
4653 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4654 		break;
4655 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4656 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4657 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4658 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4659 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4660 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4661 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4662 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4663 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4664 		}
4665 		break;
4666 	default:
4667 		/* Move was not in expected state, free the channel */
4668 		__release_logical_link(chan);
4669 
4670 		chan->move_state = L2CAP_MOVE_STABLE;
4671 	}
4672 }
4673 
4674 /* Call with chan locked */
4675 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4676 		       u8 status)
4677 {
4678 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4679 
4680 	if (status) {
4681 		l2cap_logical_fail(chan);
4682 		__release_logical_link(chan);
4683 		return;
4684 	}
4685 
4686 	if (chan->state != BT_CONNECTED) {
4687 		/* Ignore logical link if channel is on BR/EDR */
4688 		if (chan->local_amp_id != AMP_ID_BREDR)
4689 			l2cap_logical_finish_create(chan, hchan);
4690 	} else {
4691 		l2cap_logical_finish_move(chan, hchan);
4692 	}
4693 }
4694 
4695 void l2cap_move_start(struct l2cap_chan *chan)
4696 {
4697 	BT_DBG("chan %p", chan);
4698 
4699 	if (chan->local_amp_id == AMP_ID_BREDR) {
4700 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4701 			return;
4702 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4703 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4704 		/* Placeholder - start physical link setup */
4705 	} else {
4706 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4707 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4708 		chan->move_id = 0;
4709 		l2cap_move_setup(chan);
4710 		l2cap_send_move_chan_req(chan, 0);
4711 	}
4712 }
4713 
4714 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4715 			    u8 local_amp_id, u8 remote_amp_id)
4716 {
4717 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4718 	       local_amp_id, remote_amp_id);
4719 
4720 	chan->fcs = L2CAP_FCS_NONE;
4721 
4722 	/* Outgoing channel on AMP */
4723 	if (chan->state == BT_CONNECT) {
4724 		if (result == L2CAP_CR_SUCCESS) {
4725 			chan->local_amp_id = local_amp_id;
4726 			l2cap_send_create_chan_req(chan, remote_amp_id);
4727 		} else {
4728 			/* Revert to BR/EDR connect */
4729 			l2cap_send_conn_req(chan);
4730 		}
4731 
4732 		return;
4733 	}
4734 
4735 	/* Incoming channel on AMP */
4736 	if (__l2cap_no_conn_pending(chan)) {
4737 		struct l2cap_conn_rsp rsp;
4738 		char buf[128];
4739 		rsp.scid = cpu_to_le16(chan->dcid);
4740 		rsp.dcid = cpu_to_le16(chan->scid);
4741 
4742 		if (result == L2CAP_CR_SUCCESS) {
4743 			/* Send successful response */
4744 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4745 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4746 		} else {
4747 			/* Send negative response */
4748 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4749 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4750 		}
4751 
4752 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4753 			       sizeof(rsp), &rsp);
4754 
4755 		if (result == L2CAP_CR_SUCCESS) {
4756 			l2cap_state_change(chan, BT_CONFIG);
4757 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4758 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4759 				       L2CAP_CONF_REQ,
4760 				       l2cap_build_conf_req(chan, buf), buf);
4761 			chan->num_conf_req++;
4762 		}
4763 	}
4764 }
4765 
4766 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4767 				   u8 remote_amp_id)
4768 {
4769 	l2cap_move_setup(chan);
4770 	chan->move_id = local_amp_id;
4771 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4772 
4773 	l2cap_send_move_chan_req(chan, remote_amp_id);
4774 }
4775 
4776 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4777 {
4778 	struct hci_chan *hchan = NULL;
4779 
4780 	/* Placeholder - get hci_chan for logical link */
4781 
4782 	if (hchan) {
4783 		if (hchan->state == BT_CONNECTED) {
4784 			/* Logical link is ready to go */
4785 			chan->hs_hcon = hchan->conn;
4786 			chan->hs_hcon->l2cap_data = chan->conn;
4787 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4788 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4789 
4790 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4791 		} else {
4792 			/* Wait for logical link to be ready */
4793 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4794 		}
4795 	} else {
4796 		/* Logical link not available */
4797 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4798 	}
4799 }
4800 
4801 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4802 {
4803 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4804 		u8 rsp_result;
4805 		if (result == -EINVAL)
4806 			rsp_result = L2CAP_MR_BAD_ID;
4807 		else
4808 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4809 
4810 		l2cap_send_move_chan_rsp(chan, rsp_result);
4811 	}
4812 
4813 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4814 	chan->move_state = L2CAP_MOVE_STABLE;
4815 
4816 	/* Restart data transmission */
4817 	l2cap_ertm_send(chan);
4818 }
4819 
4820 /* Invoke with locked chan */
4821 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4822 {
4823 	u8 local_amp_id = chan->local_amp_id;
4824 	u8 remote_amp_id = chan->remote_amp_id;
4825 
4826 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4827 	       chan, result, local_amp_id, remote_amp_id);
4828 
4829 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4830 		l2cap_chan_unlock(chan);
4831 		return;
4832 	}
4833 
4834 	if (chan->state != BT_CONNECTED) {
4835 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4836 	} else if (result != L2CAP_MR_SUCCESS) {
4837 		l2cap_do_move_cancel(chan, result);
4838 	} else {
4839 		switch (chan->move_role) {
4840 		case L2CAP_MOVE_ROLE_INITIATOR:
4841 			l2cap_do_move_initiate(chan, local_amp_id,
4842 					       remote_amp_id);
4843 			break;
4844 		case L2CAP_MOVE_ROLE_RESPONDER:
4845 			l2cap_do_move_respond(chan, result);
4846 			break;
4847 		default:
4848 			l2cap_do_move_cancel(chan, result);
4849 			break;
4850 		}
4851 	}
4852 }
4853 
4854 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4855 					 struct l2cap_cmd_hdr *cmd,
4856 					 u16 cmd_len, void *data)
4857 {
4858 	struct l2cap_move_chan_req *req = data;
4859 	struct l2cap_move_chan_rsp rsp;
4860 	struct l2cap_chan *chan;
4861 	u16 icid = 0;
4862 	u16 result = L2CAP_MR_NOT_ALLOWED;
4863 
4864 	if (cmd_len != sizeof(*req))
4865 		return -EPROTO;
4866 
4867 	icid = le16_to_cpu(req->icid);
4868 
4869 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4870 
4871 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4872 		return -EINVAL;
4873 
4874 	chan = l2cap_get_chan_by_dcid(conn, icid);
4875 	if (!chan) {
4876 		rsp.icid = cpu_to_le16(icid);
4877 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4878 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4879 			       sizeof(rsp), &rsp);
4880 		return 0;
4881 	}
4882 
4883 	chan->ident = cmd->ident;
4884 
4885 	if (chan->scid < L2CAP_CID_DYN_START ||
4886 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4887 	    (chan->mode != L2CAP_MODE_ERTM &&
4888 	     chan->mode != L2CAP_MODE_STREAMING)) {
4889 		result = L2CAP_MR_NOT_ALLOWED;
4890 		goto send_move_response;
4891 	}
4892 
4893 	if (chan->local_amp_id == req->dest_amp_id) {
4894 		result = L2CAP_MR_SAME_ID;
4895 		goto send_move_response;
4896 	}
4897 
4898 	if (req->dest_amp_id != AMP_ID_BREDR) {
4899 		struct hci_dev *hdev;
4900 		hdev = hci_dev_get(req->dest_amp_id);
4901 		if (!hdev || hdev->dev_type != HCI_AMP ||
4902 		    !test_bit(HCI_UP, &hdev->flags)) {
4903 			if (hdev)
4904 				hci_dev_put(hdev);
4905 
4906 			result = L2CAP_MR_BAD_ID;
4907 			goto send_move_response;
4908 		}
4909 		hci_dev_put(hdev);
4910 	}
4911 
4912 	/* Detect a move collision.  Only send a collision response
4913 	 * if this side has "lost", otherwise proceed with the move.
4914 	 * The winner has the larger bd_addr.
4915 	 */
4916 	if ((__chan_is_moving(chan) ||
4917 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4918 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4919 		result = L2CAP_MR_COLLISION;
4920 		goto send_move_response;
4921 	}
4922 
4923 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4924 	l2cap_move_setup(chan);
4925 	chan->move_id = req->dest_amp_id;
4926 	icid = chan->dcid;
4927 
4928 	if (req->dest_amp_id == AMP_ID_BREDR) {
4929 		/* Moving to BR/EDR */
4930 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4931 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4932 			result = L2CAP_MR_PEND;
4933 		} else {
4934 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4935 			result = L2CAP_MR_SUCCESS;
4936 		}
4937 	} else {
4938 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4939 		/* Placeholder - uncomment when amp functions are available */
4940 		/*amp_accept_physical(chan, req->dest_amp_id);*/
4941 		result = L2CAP_MR_PEND;
4942 	}
4943 
4944 send_move_response:
4945 	l2cap_send_move_chan_rsp(chan, result);
4946 
4947 	l2cap_chan_unlock(chan);
4948 
4949 	return 0;
4950 }
4951 
4952 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4953 {
4954 	struct l2cap_chan *chan;
4955 	struct hci_chan *hchan = NULL;
4956 
4957 	chan = l2cap_get_chan_by_scid(conn, icid);
4958 	if (!chan) {
4959 		l2cap_send_move_chan_cfm_icid(conn, icid);
4960 		return;
4961 	}
4962 
4963 	__clear_chan_timer(chan);
4964 	if (result == L2CAP_MR_PEND)
4965 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4966 
4967 	switch (chan->move_state) {
4968 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4969 		/* Move confirm will be sent when logical link
4970 		 * is complete.
4971 		 */
4972 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4973 		break;
4974 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4975 		if (result == L2CAP_MR_PEND) {
4976 			break;
4977 		} else if (test_bit(CONN_LOCAL_BUSY,
4978 				    &chan->conn_state)) {
4979 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4980 		} else {
4981 			/* Logical link is up or moving to BR/EDR,
4982 			 * proceed with move
4983 			 */
4984 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4985 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4986 		}
4987 		break;
4988 	case L2CAP_MOVE_WAIT_RSP:
4989 		/* Moving to AMP */
4990 		if (result == L2CAP_MR_SUCCESS) {
4991 			/* Remote is ready, send confirm immediately
4992 			 * after logical link is ready
4993 			 */
4994 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4995 		} else {
4996 			/* Both logical link and move success
4997 			 * are required to confirm
4998 			 */
4999 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5000 		}
5001 
5002 		/* Placeholder - get hci_chan for logical link */
5003 		if (!hchan) {
5004 			/* Logical link not available */
5005 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5006 			break;
5007 		}
5008 
5009 		/* If the logical link is not yet connected, do not
5010 		 * send confirmation.
5011 		 */
5012 		if (hchan->state != BT_CONNECTED)
5013 			break;
5014 
5015 		/* Logical link is already ready to go */
5016 
5017 		chan->hs_hcon = hchan->conn;
5018 		chan->hs_hcon->l2cap_data = chan->conn;
5019 
5020 		if (result == L2CAP_MR_SUCCESS) {
5021 			/* Can confirm now */
5022 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5023 		} else {
5024 			/* Now only need move success
5025 			 * to confirm
5026 			 */
5027 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5028 		}
5029 
5030 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5031 		break;
5032 	default:
5033 		/* Any other amp move state means the move failed. */
5034 		chan->move_id = chan->local_amp_id;
5035 		l2cap_move_done(chan);
5036 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5037 	}
5038 
5039 	l2cap_chan_unlock(chan);
5040 }
5041 
5042 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5043 			    u16 result)
5044 {
5045 	struct l2cap_chan *chan;
5046 
5047 	chan = l2cap_get_chan_by_ident(conn, ident);
5048 	if (!chan) {
5049 		/* Could not locate channel, icid is best guess */
5050 		l2cap_send_move_chan_cfm_icid(conn, icid);
5051 		return;
5052 	}
5053 
5054 	__clear_chan_timer(chan);
5055 
5056 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5057 		if (result == L2CAP_MR_COLLISION) {
5058 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5059 		} else {
5060 			/* Cleanup - cancel move */
5061 			chan->move_id = chan->local_amp_id;
5062 			l2cap_move_done(chan);
5063 		}
5064 	}
5065 
5066 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5067 
5068 	l2cap_chan_unlock(chan);
5069 }
5070 
5071 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5072 				  struct l2cap_cmd_hdr *cmd,
5073 				  u16 cmd_len, void *data)
5074 {
5075 	struct l2cap_move_chan_rsp *rsp = data;
5076 	u16 icid, result;
5077 
5078 	if (cmd_len != sizeof(*rsp))
5079 		return -EPROTO;
5080 
5081 	icid = le16_to_cpu(rsp->icid);
5082 	result = le16_to_cpu(rsp->result);
5083 
5084 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5085 
5086 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5087 		l2cap_move_continue(conn, icid, result);
5088 	else
5089 		l2cap_move_fail(conn, cmd->ident, icid, result);
5090 
5091 	return 0;
5092 }
5093 
5094 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5095 				      struct l2cap_cmd_hdr *cmd,
5096 				      u16 cmd_len, void *data)
5097 {
5098 	struct l2cap_move_chan_cfm *cfm = data;
5099 	struct l2cap_chan *chan;
5100 	u16 icid, result;
5101 
5102 	if (cmd_len != sizeof(*cfm))
5103 		return -EPROTO;
5104 
5105 	icid = le16_to_cpu(cfm->icid);
5106 	result = le16_to_cpu(cfm->result);
5107 
5108 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5109 
5110 	chan = l2cap_get_chan_by_dcid(conn, icid);
5111 	if (!chan) {
5112 		/* Spec requires a response even if the icid was not found */
5113 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5114 		return 0;
5115 	}
5116 
5117 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5118 		if (result == L2CAP_MC_CONFIRMED) {
5119 			chan->local_amp_id = chan->move_id;
5120 			if (chan->local_amp_id == AMP_ID_BREDR)
5121 				__release_logical_link(chan);
5122 		} else {
5123 			chan->move_id = chan->local_amp_id;
5124 		}
5125 
5126 		l2cap_move_done(chan);
5127 	}
5128 
5129 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5130 
5131 	l2cap_chan_unlock(chan);
5132 
5133 	return 0;
5134 }
5135 
5136 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5137 						 struct l2cap_cmd_hdr *cmd,
5138 						 u16 cmd_len, void *data)
5139 {
5140 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5141 	struct l2cap_chan *chan;
5142 	u16 icid;
5143 
5144 	if (cmd_len != sizeof(*rsp))
5145 		return -EPROTO;
5146 
5147 	icid = le16_to_cpu(rsp->icid);
5148 
5149 	BT_DBG("icid 0x%4.4x", icid);
5150 
5151 	chan = l2cap_get_chan_by_scid(conn, icid);
5152 	if (!chan)
5153 		return 0;
5154 
5155 	__clear_chan_timer(chan);
5156 
5157 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5158 		chan->local_amp_id = chan->move_id;
5159 
5160 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5161 			__release_logical_link(chan);
5162 
5163 		l2cap_move_done(chan);
5164 	}
5165 
5166 	l2cap_chan_unlock(chan);
5167 
5168 	return 0;
5169 }
5170 
5171 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5172 					      struct l2cap_cmd_hdr *cmd,
5173 					      u16 cmd_len, u8 *data)
5174 {
5175 	struct hci_conn *hcon = conn->hcon;
5176 	struct l2cap_conn_param_update_req *req;
5177 	struct l2cap_conn_param_update_rsp rsp;
5178 	u16 min, max, latency, to_multiplier;
5179 	int err;
5180 
5181 	if (hcon->role != HCI_ROLE_MASTER)
5182 		return -EINVAL;
5183 
5184 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5185 		return -EPROTO;
5186 
5187 	req = (struct l2cap_conn_param_update_req *) data;
5188 	min		= __le16_to_cpu(req->min);
5189 	max		= __le16_to_cpu(req->max);
5190 	latency		= __le16_to_cpu(req->latency);
5191 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5192 
5193 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5194 	       min, max, latency, to_multiplier);
5195 
5196 	memset(&rsp, 0, sizeof(rsp));
5197 
5198 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5199 	if (err)
5200 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5201 	else
5202 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5203 
5204 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5205 		       sizeof(rsp), &rsp);
5206 
5207 	if (!err) {
5208 		u8 store_hint;
5209 
5210 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5211 						to_multiplier);
5212 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5213 				    store_hint, min, max, latency,
5214 				    to_multiplier);
5215 
5216 	}
5217 
5218 	return 0;
5219 }
5220 
5221 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5222 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5223 				u8 *data)
5224 {
5225 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5226 	struct hci_conn *hcon = conn->hcon;
5227 	u16 dcid, mtu, mps, credits, result;
5228 	struct l2cap_chan *chan;
5229 	int err, sec_level;
5230 
5231 	if (cmd_len < sizeof(*rsp))
5232 		return -EPROTO;
5233 
5234 	dcid    = __le16_to_cpu(rsp->dcid);
5235 	mtu     = __le16_to_cpu(rsp->mtu);
5236 	mps     = __le16_to_cpu(rsp->mps);
5237 	credits = __le16_to_cpu(rsp->credits);
5238 	result  = __le16_to_cpu(rsp->result);
5239 
5240 	if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5241 		return -EPROTO;
5242 
5243 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5244 	       dcid, mtu, mps, credits, result);
5245 
5246 	mutex_lock(&conn->chan_lock);
5247 
5248 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5249 	if (!chan) {
5250 		err = -EBADSLT;
5251 		goto unlock;
5252 	}
5253 
5254 	err = 0;
5255 
5256 	l2cap_chan_lock(chan);
5257 
5258 	switch (result) {
5259 	case L2CAP_CR_SUCCESS:
5260 		chan->ident = 0;
5261 		chan->dcid = dcid;
5262 		chan->omtu = mtu;
5263 		chan->remote_mps = mps;
5264 		chan->tx_credits = credits;
5265 		l2cap_chan_ready(chan);
5266 		break;
5267 
5268 	case L2CAP_CR_AUTHENTICATION:
5269 	case L2CAP_CR_ENCRYPTION:
5270 		/* If we already have MITM protection we can't do
5271 		 * anything.
5272 		 */
5273 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5274 			l2cap_chan_del(chan, ECONNREFUSED);
5275 			break;
5276 		}
5277 
5278 		sec_level = hcon->sec_level + 1;
5279 		if (chan->sec_level < sec_level)
5280 			chan->sec_level = sec_level;
5281 
5282 		/* We'll need to send a new Connect Request */
5283 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5284 
5285 		smp_conn_security(hcon, chan->sec_level);
5286 		break;
5287 
5288 	default:
5289 		l2cap_chan_del(chan, ECONNREFUSED);
5290 		break;
5291 	}
5292 
5293 	l2cap_chan_unlock(chan);
5294 
5295 unlock:
5296 	mutex_unlock(&conn->chan_lock);
5297 
5298 	return err;
5299 }
5300 
5301 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5302 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5303 				      u8 *data)
5304 {
5305 	int err = 0;
5306 
5307 	switch (cmd->code) {
5308 	case L2CAP_COMMAND_REJ:
5309 		l2cap_command_rej(conn, cmd, cmd_len, data);
5310 		break;
5311 
5312 	case L2CAP_CONN_REQ:
5313 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5314 		break;
5315 
5316 	case L2CAP_CONN_RSP:
5317 	case L2CAP_CREATE_CHAN_RSP:
5318 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5319 		break;
5320 
5321 	case L2CAP_CONF_REQ:
5322 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5323 		break;
5324 
5325 	case L2CAP_CONF_RSP:
5326 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5327 		break;
5328 
5329 	case L2CAP_DISCONN_REQ:
5330 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5331 		break;
5332 
5333 	case L2CAP_DISCONN_RSP:
5334 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5335 		break;
5336 
5337 	case L2CAP_ECHO_REQ:
5338 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5339 		break;
5340 
5341 	case L2CAP_ECHO_RSP:
5342 		break;
5343 
5344 	case L2CAP_INFO_REQ:
5345 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5346 		break;
5347 
5348 	case L2CAP_INFO_RSP:
5349 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5350 		break;
5351 
5352 	case L2CAP_CREATE_CHAN_REQ:
5353 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5354 		break;
5355 
5356 	case L2CAP_MOVE_CHAN_REQ:
5357 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5358 		break;
5359 
5360 	case L2CAP_MOVE_CHAN_RSP:
5361 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5362 		break;
5363 
5364 	case L2CAP_MOVE_CHAN_CFM:
5365 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5366 		break;
5367 
5368 	case L2CAP_MOVE_CHAN_CFM_RSP:
5369 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5370 		break;
5371 
5372 	default:
5373 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5374 		err = -EINVAL;
5375 		break;
5376 	}
5377 
5378 	return err;
5379 }
5380 
5381 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5382 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5383 				u8 *data)
5384 {
5385 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5386 	struct l2cap_le_conn_rsp rsp;
5387 	struct l2cap_chan *chan, *pchan;
5388 	u16 dcid, scid, credits, mtu, mps;
5389 	__le16 psm;
5390 	u8 result;
5391 
5392 	if (cmd_len != sizeof(*req))
5393 		return -EPROTO;
5394 
5395 	scid = __le16_to_cpu(req->scid);
5396 	mtu  = __le16_to_cpu(req->mtu);
5397 	mps  = __le16_to_cpu(req->mps);
5398 	psm  = req->psm;
5399 	dcid = 0;
5400 	credits = 0;
5401 
5402 	if (mtu < 23 || mps < 23)
5403 		return -EPROTO;
5404 
5405 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5406 	       scid, mtu, mps);
5407 
5408 	/* Check if we have socket listening on psm */
5409 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5410 					 &conn->hcon->dst, LE_LINK);
5411 	if (!pchan) {
5412 		result = L2CAP_CR_BAD_PSM;
5413 		chan = NULL;
5414 		goto response;
5415 	}
5416 
5417 	mutex_lock(&conn->chan_lock);
5418 	l2cap_chan_lock(pchan);
5419 
5420 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5421 				     SMP_ALLOW_STK)) {
5422 		result = L2CAP_CR_AUTHENTICATION;
5423 		chan = NULL;
5424 		goto response_unlock;
5425 	}
5426 
5427 	/* Check if we already have channel with that dcid */
5428 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5429 		result = L2CAP_CR_NO_MEM;
5430 		chan = NULL;
5431 		goto response_unlock;
5432 	}
5433 
5434 	chan = pchan->ops->new_connection(pchan);
5435 	if (!chan) {
5436 		result = L2CAP_CR_NO_MEM;
5437 		goto response_unlock;
5438 	}
5439 
5440 	l2cap_le_flowctl_init(chan);
5441 
5442 	bacpy(&chan->src, &conn->hcon->src);
5443 	bacpy(&chan->dst, &conn->hcon->dst);
5444 	chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5445 	chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5446 	chan->psm  = psm;
5447 	chan->dcid = scid;
5448 	chan->omtu = mtu;
5449 	chan->remote_mps = mps;
5450 	chan->tx_credits = __le16_to_cpu(req->credits);
5451 
5452 	__l2cap_chan_add(conn, chan);
5453 	dcid = chan->scid;
5454 	credits = chan->rx_credits;
5455 
5456 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5457 
5458 	chan->ident = cmd->ident;
5459 
5460 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5461 		l2cap_state_change(chan, BT_CONNECT2);
5462 		/* The following result value is actually not defined
5463 		 * for LE CoC but we use it to let the function know
5464 		 * that it should bail out after doing its cleanup
5465 		 * instead of sending a response.
5466 		 */
5467 		result = L2CAP_CR_PEND;
5468 		chan->ops->defer(chan);
5469 	} else {
5470 		l2cap_chan_ready(chan);
5471 		result = L2CAP_CR_SUCCESS;
5472 	}
5473 
5474 response_unlock:
5475 	l2cap_chan_unlock(pchan);
5476 	mutex_unlock(&conn->chan_lock);
5477 	l2cap_chan_put(pchan);
5478 
5479 	if (result == L2CAP_CR_PEND)
5480 		return 0;
5481 
5482 response:
5483 	if (chan) {
5484 		rsp.mtu = cpu_to_le16(chan->imtu);
5485 		rsp.mps = cpu_to_le16(chan->mps);
5486 	} else {
5487 		rsp.mtu = 0;
5488 		rsp.mps = 0;
5489 	}
5490 
5491 	rsp.dcid    = cpu_to_le16(dcid);
5492 	rsp.credits = cpu_to_le16(credits);
5493 	rsp.result  = cpu_to_le16(result);
5494 
5495 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5496 
5497 	return 0;
5498 }
5499 
5500 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5501 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5502 				   u8 *data)
5503 {
5504 	struct l2cap_le_credits *pkt;
5505 	struct l2cap_chan *chan;
5506 	u16 cid, credits, max_credits;
5507 
5508 	if (cmd_len != sizeof(*pkt))
5509 		return -EPROTO;
5510 
5511 	pkt = (struct l2cap_le_credits *) data;
5512 	cid	= __le16_to_cpu(pkt->cid);
5513 	credits	= __le16_to_cpu(pkt->credits);
5514 
5515 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5516 
5517 	chan = l2cap_get_chan_by_dcid(conn, cid);
5518 	if (!chan)
5519 		return -EBADSLT;
5520 
5521 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5522 	if (credits > max_credits) {
5523 		BT_ERR("LE credits overflow");
5524 		l2cap_send_disconn_req(chan, ECONNRESET);
5525 		l2cap_chan_unlock(chan);
5526 
5527 		/* Return 0 so that we don't trigger an unnecessary
5528 		 * command reject packet.
5529 		 */
5530 		return 0;
5531 	}
5532 
5533 	chan->tx_credits += credits;
5534 
5535 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5536 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5537 		chan->tx_credits--;
5538 	}
5539 
5540 	if (chan->tx_credits)
5541 		chan->ops->resume(chan);
5542 
5543 	l2cap_chan_unlock(chan);
5544 
5545 	return 0;
5546 }
5547 
5548 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5549 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5550 				       u8 *data)
5551 {
5552 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5553 	struct l2cap_chan *chan;
5554 
5555 	if (cmd_len < sizeof(*rej))
5556 		return -EPROTO;
5557 
5558 	mutex_lock(&conn->chan_lock);
5559 
5560 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5561 	if (!chan)
5562 		goto done;
5563 
5564 	l2cap_chan_lock(chan);
5565 	l2cap_chan_del(chan, ECONNREFUSED);
5566 	l2cap_chan_unlock(chan);
5567 
5568 done:
5569 	mutex_unlock(&conn->chan_lock);
5570 	return 0;
5571 }
5572 
5573 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5574 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5575 				   u8 *data)
5576 {
5577 	int err = 0;
5578 
5579 	switch (cmd->code) {
5580 	case L2CAP_COMMAND_REJ:
5581 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5582 		break;
5583 
5584 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5585 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5586 		break;
5587 
5588 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5589 		break;
5590 
5591 	case L2CAP_LE_CONN_RSP:
5592 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5593 		break;
5594 
5595 	case L2CAP_LE_CONN_REQ:
5596 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5597 		break;
5598 
5599 	case L2CAP_LE_CREDITS:
5600 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5601 		break;
5602 
5603 	case L2CAP_DISCONN_REQ:
5604 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5605 		break;
5606 
5607 	case L2CAP_DISCONN_RSP:
5608 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5609 		break;
5610 
5611 	default:
5612 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5613 		err = -EINVAL;
5614 		break;
5615 	}
5616 
5617 	return err;
5618 }
5619 
5620 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5621 					struct sk_buff *skb)
5622 {
5623 	struct hci_conn *hcon = conn->hcon;
5624 	struct l2cap_cmd_hdr *cmd;
5625 	u16 len;
5626 	int err;
5627 
5628 	if (hcon->type != LE_LINK)
5629 		goto drop;
5630 
5631 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5632 		goto drop;
5633 
5634 	cmd = (void *) skb->data;
5635 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5636 
5637 	len = le16_to_cpu(cmd->len);
5638 
5639 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5640 
5641 	if (len != skb->len || !cmd->ident) {
5642 		BT_DBG("corrupted command");
5643 		goto drop;
5644 	}
5645 
5646 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5647 	if (err) {
5648 		struct l2cap_cmd_rej_unk rej;
5649 
5650 		BT_ERR("Wrong link type (%d)", err);
5651 
5652 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5653 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5654 			       sizeof(rej), &rej);
5655 	}
5656 
5657 drop:
5658 	kfree_skb(skb);
5659 }
5660 
5661 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5662 				     struct sk_buff *skb)
5663 {
5664 	struct hci_conn *hcon = conn->hcon;
5665 	u8 *data = skb->data;
5666 	int len = skb->len;
5667 	struct l2cap_cmd_hdr cmd;
5668 	int err;
5669 
5670 	l2cap_raw_recv(conn, skb);
5671 
5672 	if (hcon->type != ACL_LINK)
5673 		goto drop;
5674 
5675 	while (len >= L2CAP_CMD_HDR_SIZE) {
5676 		u16 cmd_len;
5677 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5678 		data += L2CAP_CMD_HDR_SIZE;
5679 		len  -= L2CAP_CMD_HDR_SIZE;
5680 
5681 		cmd_len = le16_to_cpu(cmd.len);
5682 
5683 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5684 		       cmd.ident);
5685 
5686 		if (cmd_len > len || !cmd.ident) {
5687 			BT_DBG("corrupted command");
5688 			break;
5689 		}
5690 
5691 		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5692 		if (err) {
5693 			struct l2cap_cmd_rej_unk rej;
5694 
5695 			BT_ERR("Wrong link type (%d)", err);
5696 
5697 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5698 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5699 				       sizeof(rej), &rej);
5700 		}
5701 
5702 		data += cmd_len;
5703 		len  -= cmd_len;
5704 	}
5705 
5706 drop:
5707 	kfree_skb(skb);
5708 }
5709 
5710 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5711 {
5712 	u16 our_fcs, rcv_fcs;
5713 	int hdr_size;
5714 
5715 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5716 		hdr_size = L2CAP_EXT_HDR_SIZE;
5717 	else
5718 		hdr_size = L2CAP_ENH_HDR_SIZE;
5719 
5720 	if (chan->fcs == L2CAP_FCS_CRC16) {
5721 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5722 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5723 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5724 
5725 		if (our_fcs != rcv_fcs)
5726 			return -EBADMSG;
5727 	}
5728 	return 0;
5729 }
5730 
5731 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5732 {
5733 	struct l2cap_ctrl control;
5734 
5735 	BT_DBG("chan %p", chan);
5736 
5737 	memset(&control, 0, sizeof(control));
5738 	control.sframe = 1;
5739 	control.final = 1;
5740 	control.reqseq = chan->buffer_seq;
5741 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5742 
5743 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5744 		control.super = L2CAP_SUPER_RNR;
5745 		l2cap_send_sframe(chan, &control);
5746 	}
5747 
5748 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5749 	    chan->unacked_frames > 0)
5750 		__set_retrans_timer(chan);
5751 
5752 	/* Send pending iframes */
5753 	l2cap_ertm_send(chan);
5754 
5755 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5756 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5757 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5758 		 * send it now.
5759 		 */
5760 		control.super = L2CAP_SUPER_RR;
5761 		l2cap_send_sframe(chan, &control);
5762 	}
5763 }
5764 
5765 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5766 			    struct sk_buff **last_frag)
5767 {
5768 	/* skb->len reflects data in skb as well as all fragments
5769 	 * skb->data_len reflects only data in fragments
5770 	 */
5771 	if (!skb_has_frag_list(skb))
5772 		skb_shinfo(skb)->frag_list = new_frag;
5773 
5774 	new_frag->next = NULL;
5775 
5776 	(*last_frag)->next = new_frag;
5777 	*last_frag = new_frag;
5778 
5779 	skb->len += new_frag->len;
5780 	skb->data_len += new_frag->len;
5781 	skb->truesize += new_frag->truesize;
5782 }
5783 
5784 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5785 				struct l2cap_ctrl *control)
5786 {
5787 	int err = -EINVAL;
5788 
5789 	switch (control->sar) {
5790 	case L2CAP_SAR_UNSEGMENTED:
5791 		if (chan->sdu)
5792 			break;
5793 
5794 		err = chan->ops->recv(chan, skb);
5795 		break;
5796 
5797 	case L2CAP_SAR_START:
5798 		if (chan->sdu)
5799 			break;
5800 
5801 		chan->sdu_len = get_unaligned_le16(skb->data);
5802 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5803 
5804 		if (chan->sdu_len > chan->imtu) {
5805 			err = -EMSGSIZE;
5806 			break;
5807 		}
5808 
5809 		if (skb->len >= chan->sdu_len)
5810 			break;
5811 
5812 		chan->sdu = skb;
5813 		chan->sdu_last_frag = skb;
5814 
5815 		skb = NULL;
5816 		err = 0;
5817 		break;
5818 
5819 	case L2CAP_SAR_CONTINUE:
5820 		if (!chan->sdu)
5821 			break;
5822 
5823 		append_skb_frag(chan->sdu, skb,
5824 				&chan->sdu_last_frag);
5825 		skb = NULL;
5826 
5827 		if (chan->sdu->len >= chan->sdu_len)
5828 			break;
5829 
5830 		err = 0;
5831 		break;
5832 
5833 	case L2CAP_SAR_END:
5834 		if (!chan->sdu)
5835 			break;
5836 
5837 		append_skb_frag(chan->sdu, skb,
5838 				&chan->sdu_last_frag);
5839 		skb = NULL;
5840 
5841 		if (chan->sdu->len != chan->sdu_len)
5842 			break;
5843 
5844 		err = chan->ops->recv(chan, chan->sdu);
5845 
5846 		if (!err) {
5847 			/* Reassembly complete */
5848 			chan->sdu = NULL;
5849 			chan->sdu_last_frag = NULL;
5850 			chan->sdu_len = 0;
5851 		}
5852 		break;
5853 	}
5854 
5855 	if (err) {
5856 		kfree_skb(skb);
5857 		kfree_skb(chan->sdu);
5858 		chan->sdu = NULL;
5859 		chan->sdu_last_frag = NULL;
5860 		chan->sdu_len = 0;
5861 	}
5862 
5863 	return err;
5864 }
5865 
5866 static int l2cap_resegment(struct l2cap_chan *chan)
5867 {
5868 	/* Placeholder */
5869 	return 0;
5870 }
5871 
5872 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5873 {
5874 	u8 event;
5875 
5876 	if (chan->mode != L2CAP_MODE_ERTM)
5877 		return;
5878 
5879 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5880 	l2cap_tx(chan, NULL, NULL, event);
5881 }
5882 
5883 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5884 {
5885 	int err = 0;
5886 	/* Pass sequential frames to l2cap_reassemble_sdu()
5887 	 * until a gap is encountered.
5888 	 */
5889 
5890 	BT_DBG("chan %p", chan);
5891 
5892 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5893 		struct sk_buff *skb;
5894 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5895 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5896 
5897 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5898 
5899 		if (!skb)
5900 			break;
5901 
5902 		skb_unlink(skb, &chan->srej_q);
5903 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5904 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5905 		if (err)
5906 			break;
5907 	}
5908 
5909 	if (skb_queue_empty(&chan->srej_q)) {
5910 		chan->rx_state = L2CAP_RX_STATE_RECV;
5911 		l2cap_send_ack(chan);
5912 	}
5913 
5914 	return err;
5915 }
5916 
5917 static void l2cap_handle_srej(struct l2cap_chan *chan,
5918 			      struct l2cap_ctrl *control)
5919 {
5920 	struct sk_buff *skb;
5921 
5922 	BT_DBG("chan %p, control %p", chan, control);
5923 
5924 	if (control->reqseq == chan->next_tx_seq) {
5925 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5926 		l2cap_send_disconn_req(chan, ECONNRESET);
5927 		return;
5928 	}
5929 
5930 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5931 
5932 	if (skb == NULL) {
5933 		BT_DBG("Seq %d not available for retransmission",
5934 		       control->reqseq);
5935 		return;
5936 	}
5937 
5938 	if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5939 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5940 		l2cap_send_disconn_req(chan, ECONNRESET);
5941 		return;
5942 	}
5943 
5944 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5945 
5946 	if (control->poll) {
5947 		l2cap_pass_to_tx(chan, control);
5948 
5949 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5950 		l2cap_retransmit(chan, control);
5951 		l2cap_ertm_send(chan);
5952 
5953 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5954 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5955 			chan->srej_save_reqseq = control->reqseq;
5956 		}
5957 	} else {
5958 		l2cap_pass_to_tx_fbit(chan, control);
5959 
5960 		if (control->final) {
5961 			if (chan->srej_save_reqseq != control->reqseq ||
5962 			    !test_and_clear_bit(CONN_SREJ_ACT,
5963 						&chan->conn_state))
5964 				l2cap_retransmit(chan, control);
5965 		} else {
5966 			l2cap_retransmit(chan, control);
5967 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5968 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5969 				chan->srej_save_reqseq = control->reqseq;
5970 			}
5971 		}
5972 	}
5973 }
5974 
5975 static void l2cap_handle_rej(struct l2cap_chan *chan,
5976 			     struct l2cap_ctrl *control)
5977 {
5978 	struct sk_buff *skb;
5979 
5980 	BT_DBG("chan %p, control %p", chan, control);
5981 
5982 	if (control->reqseq == chan->next_tx_seq) {
5983 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5984 		l2cap_send_disconn_req(chan, ECONNRESET);
5985 		return;
5986 	}
5987 
5988 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5989 
5990 	if (chan->max_tx && skb &&
5991 	    bt_cb(skb)->control.retries >= chan->max_tx) {
5992 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5993 		l2cap_send_disconn_req(chan, ECONNRESET);
5994 		return;
5995 	}
5996 
5997 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5998 
5999 	l2cap_pass_to_tx(chan, control);
6000 
6001 	if (control->final) {
6002 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6003 			l2cap_retransmit_all(chan, control);
6004 	} else {
6005 		l2cap_retransmit_all(chan, control);
6006 		l2cap_ertm_send(chan);
6007 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6008 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6009 	}
6010 }
6011 
6012 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6013 {
6014 	BT_DBG("chan %p, txseq %d", chan, txseq);
6015 
6016 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6017 	       chan->expected_tx_seq);
6018 
6019 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6020 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6021 		    chan->tx_win) {
6022 			/* See notes below regarding "double poll" and
6023 			 * invalid packets.
6024 			 */
6025 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6026 				BT_DBG("Invalid/Ignore - after SREJ");
6027 				return L2CAP_TXSEQ_INVALID_IGNORE;
6028 			} else {
6029 				BT_DBG("Invalid - in window after SREJ sent");
6030 				return L2CAP_TXSEQ_INVALID;
6031 			}
6032 		}
6033 
6034 		if (chan->srej_list.head == txseq) {
6035 			BT_DBG("Expected SREJ");
6036 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6037 		}
6038 
6039 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6040 			BT_DBG("Duplicate SREJ - txseq already stored");
6041 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6042 		}
6043 
6044 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6045 			BT_DBG("Unexpected SREJ - not requested");
6046 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6047 		}
6048 	}
6049 
6050 	if (chan->expected_tx_seq == txseq) {
6051 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6052 		    chan->tx_win) {
6053 			BT_DBG("Invalid - txseq outside tx window");
6054 			return L2CAP_TXSEQ_INVALID;
6055 		} else {
6056 			BT_DBG("Expected");
6057 			return L2CAP_TXSEQ_EXPECTED;
6058 		}
6059 	}
6060 
6061 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6062 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6063 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6064 		return L2CAP_TXSEQ_DUPLICATE;
6065 	}
6066 
6067 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6068 		/* A source of invalid packets is a "double poll" condition,
6069 		 * where delays cause us to send multiple poll packets.  If
6070 		 * the remote stack receives and processes both polls,
6071 		 * sequence numbers can wrap around in such a way that a
6072 		 * resent frame has a sequence number that looks like new data
6073 		 * with a sequence gap.  This would trigger an erroneous SREJ
6074 		 * request.
6075 		 *
6076 		 * Fortunately, this is impossible with a tx window that's
6077 		 * less than half of the maximum sequence number, which allows
6078 		 * invalid frames to be safely ignored.
6079 		 *
6080 		 * With tx window sizes greater than half of the tx window
6081 		 * maximum, the frame is invalid and cannot be ignored.  This
6082 		 * causes a disconnect.
6083 		 */
6084 
6085 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6086 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6087 			return L2CAP_TXSEQ_INVALID_IGNORE;
6088 		} else {
6089 			BT_DBG("Invalid - txseq outside tx window");
6090 			return L2CAP_TXSEQ_INVALID;
6091 		}
6092 	} else {
6093 		BT_DBG("Unexpected - txseq indicates missing frames");
6094 		return L2CAP_TXSEQ_UNEXPECTED;
6095 	}
6096 }
6097 
6098 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6099 			       struct l2cap_ctrl *control,
6100 			       struct sk_buff *skb, u8 event)
6101 {
6102 	int err = 0;
6103 	bool skb_in_use = false;
6104 
6105 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6106 	       event);
6107 
6108 	switch (event) {
6109 	case L2CAP_EV_RECV_IFRAME:
6110 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6111 		case L2CAP_TXSEQ_EXPECTED:
6112 			l2cap_pass_to_tx(chan, control);
6113 
6114 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6115 				BT_DBG("Busy, discarding expected seq %d",
6116 				       control->txseq);
6117 				break;
6118 			}
6119 
6120 			chan->expected_tx_seq = __next_seq(chan,
6121 							   control->txseq);
6122 
6123 			chan->buffer_seq = chan->expected_tx_seq;
6124 			skb_in_use = true;
6125 
6126 			err = l2cap_reassemble_sdu(chan, skb, control);
6127 			if (err)
6128 				break;
6129 
6130 			if (control->final) {
6131 				if (!test_and_clear_bit(CONN_REJ_ACT,
6132 							&chan->conn_state)) {
6133 					control->final = 0;
6134 					l2cap_retransmit_all(chan, control);
6135 					l2cap_ertm_send(chan);
6136 				}
6137 			}
6138 
6139 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6140 				l2cap_send_ack(chan);
6141 			break;
6142 		case L2CAP_TXSEQ_UNEXPECTED:
6143 			l2cap_pass_to_tx(chan, control);
6144 
6145 			/* Can't issue SREJ frames in the local busy state.
6146 			 * Drop this frame, it will be seen as missing
6147 			 * when local busy is exited.
6148 			 */
6149 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6150 				BT_DBG("Busy, discarding unexpected seq %d",
6151 				       control->txseq);
6152 				break;
6153 			}
6154 
6155 			/* There was a gap in the sequence, so an SREJ
6156 			 * must be sent for each missing frame.  The
6157 			 * current frame is stored for later use.
6158 			 */
6159 			skb_queue_tail(&chan->srej_q, skb);
6160 			skb_in_use = true;
6161 			BT_DBG("Queued %p (queue len %d)", skb,
6162 			       skb_queue_len(&chan->srej_q));
6163 
6164 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6165 			l2cap_seq_list_clear(&chan->srej_list);
6166 			l2cap_send_srej(chan, control->txseq);
6167 
6168 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6169 			break;
6170 		case L2CAP_TXSEQ_DUPLICATE:
6171 			l2cap_pass_to_tx(chan, control);
6172 			break;
6173 		case L2CAP_TXSEQ_INVALID_IGNORE:
6174 			break;
6175 		case L2CAP_TXSEQ_INVALID:
6176 		default:
6177 			l2cap_send_disconn_req(chan, ECONNRESET);
6178 			break;
6179 		}
6180 		break;
6181 	case L2CAP_EV_RECV_RR:
6182 		l2cap_pass_to_tx(chan, control);
6183 		if (control->final) {
6184 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6185 
6186 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6187 			    !__chan_is_moving(chan)) {
6188 				control->final = 0;
6189 				l2cap_retransmit_all(chan, control);
6190 			}
6191 
6192 			l2cap_ertm_send(chan);
6193 		} else if (control->poll) {
6194 			l2cap_send_i_or_rr_or_rnr(chan);
6195 		} else {
6196 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6197 					       &chan->conn_state) &&
6198 			    chan->unacked_frames)
6199 				__set_retrans_timer(chan);
6200 
6201 			l2cap_ertm_send(chan);
6202 		}
6203 		break;
6204 	case L2CAP_EV_RECV_RNR:
6205 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6206 		l2cap_pass_to_tx(chan, control);
6207 		if (control && control->poll) {
6208 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6209 			l2cap_send_rr_or_rnr(chan, 0);
6210 		}
6211 		__clear_retrans_timer(chan);
6212 		l2cap_seq_list_clear(&chan->retrans_list);
6213 		break;
6214 	case L2CAP_EV_RECV_REJ:
6215 		l2cap_handle_rej(chan, control);
6216 		break;
6217 	case L2CAP_EV_RECV_SREJ:
6218 		l2cap_handle_srej(chan, control);
6219 		break;
6220 	default:
6221 		break;
6222 	}
6223 
6224 	if (skb && !skb_in_use) {
6225 		BT_DBG("Freeing %p", skb);
6226 		kfree_skb(skb);
6227 	}
6228 
6229 	return err;
6230 }
6231 
6232 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6233 				    struct l2cap_ctrl *control,
6234 				    struct sk_buff *skb, u8 event)
6235 {
6236 	int err = 0;
6237 	u16 txseq = control->txseq;
6238 	bool skb_in_use = false;
6239 
6240 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6241 	       event);
6242 
6243 	switch (event) {
6244 	case L2CAP_EV_RECV_IFRAME:
6245 		switch (l2cap_classify_txseq(chan, txseq)) {
6246 		case L2CAP_TXSEQ_EXPECTED:
6247 			/* Keep frame for reassembly later */
6248 			l2cap_pass_to_tx(chan, control);
6249 			skb_queue_tail(&chan->srej_q, skb);
6250 			skb_in_use = true;
6251 			BT_DBG("Queued %p (queue len %d)", skb,
6252 			       skb_queue_len(&chan->srej_q));
6253 
6254 			chan->expected_tx_seq = __next_seq(chan, txseq);
6255 			break;
6256 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6257 			l2cap_seq_list_pop(&chan->srej_list);
6258 
6259 			l2cap_pass_to_tx(chan, control);
6260 			skb_queue_tail(&chan->srej_q, skb);
6261 			skb_in_use = true;
6262 			BT_DBG("Queued %p (queue len %d)", skb,
6263 			       skb_queue_len(&chan->srej_q));
6264 
6265 			err = l2cap_rx_queued_iframes(chan);
6266 			if (err)
6267 				break;
6268 
6269 			break;
6270 		case L2CAP_TXSEQ_UNEXPECTED:
6271 			/* Got a frame that can't be reassembled yet.
6272 			 * Save it for later, and send SREJs to cover
6273 			 * the missing frames.
6274 			 */
6275 			skb_queue_tail(&chan->srej_q, skb);
6276 			skb_in_use = true;
6277 			BT_DBG("Queued %p (queue len %d)", skb,
6278 			       skb_queue_len(&chan->srej_q));
6279 
6280 			l2cap_pass_to_tx(chan, control);
6281 			l2cap_send_srej(chan, control->txseq);
6282 			break;
6283 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6284 			/* This frame was requested with an SREJ, but
6285 			 * some expected retransmitted frames are
6286 			 * missing.  Request retransmission of missing
6287 			 * SREJ'd frames.
6288 			 */
6289 			skb_queue_tail(&chan->srej_q, skb);
6290 			skb_in_use = true;
6291 			BT_DBG("Queued %p (queue len %d)", skb,
6292 			       skb_queue_len(&chan->srej_q));
6293 
6294 			l2cap_pass_to_tx(chan, control);
6295 			l2cap_send_srej_list(chan, control->txseq);
6296 			break;
6297 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6298 			/* We've already queued this frame.  Drop this copy. */
6299 			l2cap_pass_to_tx(chan, control);
6300 			break;
6301 		case L2CAP_TXSEQ_DUPLICATE:
6302 			/* Expecting a later sequence number, so this frame
6303 			 * was already received.  Ignore it completely.
6304 			 */
6305 			break;
6306 		case L2CAP_TXSEQ_INVALID_IGNORE:
6307 			break;
6308 		case L2CAP_TXSEQ_INVALID:
6309 		default:
6310 			l2cap_send_disconn_req(chan, ECONNRESET);
6311 			break;
6312 		}
6313 		break;
6314 	case L2CAP_EV_RECV_RR:
6315 		l2cap_pass_to_tx(chan, control);
6316 		if (control->final) {
6317 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6318 
6319 			if (!test_and_clear_bit(CONN_REJ_ACT,
6320 						&chan->conn_state)) {
6321 				control->final = 0;
6322 				l2cap_retransmit_all(chan, control);
6323 			}
6324 
6325 			l2cap_ertm_send(chan);
6326 		} else if (control->poll) {
6327 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6328 					       &chan->conn_state) &&
6329 			    chan->unacked_frames) {
6330 				__set_retrans_timer(chan);
6331 			}
6332 
6333 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6334 			l2cap_send_srej_tail(chan);
6335 		} else {
6336 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6337 					       &chan->conn_state) &&
6338 			    chan->unacked_frames)
6339 				__set_retrans_timer(chan);
6340 
6341 			l2cap_send_ack(chan);
6342 		}
6343 		break;
6344 	case L2CAP_EV_RECV_RNR:
6345 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6346 		l2cap_pass_to_tx(chan, control);
6347 		if (control->poll) {
6348 			l2cap_send_srej_tail(chan);
6349 		} else {
6350 			struct l2cap_ctrl rr_control;
6351 			memset(&rr_control, 0, sizeof(rr_control));
6352 			rr_control.sframe = 1;
6353 			rr_control.super = L2CAP_SUPER_RR;
6354 			rr_control.reqseq = chan->buffer_seq;
6355 			l2cap_send_sframe(chan, &rr_control);
6356 		}
6357 
6358 		break;
6359 	case L2CAP_EV_RECV_REJ:
6360 		l2cap_handle_rej(chan, control);
6361 		break;
6362 	case L2CAP_EV_RECV_SREJ:
6363 		l2cap_handle_srej(chan, control);
6364 		break;
6365 	}
6366 
6367 	if (skb && !skb_in_use) {
6368 		BT_DBG("Freeing %p", skb);
6369 		kfree_skb(skb);
6370 	}
6371 
6372 	return err;
6373 }
6374 
6375 static int l2cap_finish_move(struct l2cap_chan *chan)
6376 {
6377 	BT_DBG("chan %p", chan);
6378 
6379 	chan->rx_state = L2CAP_RX_STATE_RECV;
6380 
6381 	if (chan->hs_hcon)
6382 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6383 	else
6384 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6385 
6386 	return l2cap_resegment(chan);
6387 }
6388 
6389 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6390 				 struct l2cap_ctrl *control,
6391 				 struct sk_buff *skb, u8 event)
6392 {
6393 	int err;
6394 
6395 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6396 	       event);
6397 
6398 	if (!control->poll)
6399 		return -EPROTO;
6400 
6401 	l2cap_process_reqseq(chan, control->reqseq);
6402 
6403 	if (!skb_queue_empty(&chan->tx_q))
6404 		chan->tx_send_head = skb_peek(&chan->tx_q);
6405 	else
6406 		chan->tx_send_head = NULL;
6407 
6408 	/* Rewind next_tx_seq to the point expected
6409 	 * by the receiver.
6410 	 */
6411 	chan->next_tx_seq = control->reqseq;
6412 	chan->unacked_frames = 0;
6413 
6414 	err = l2cap_finish_move(chan);
6415 	if (err)
6416 		return err;
6417 
6418 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6419 	l2cap_send_i_or_rr_or_rnr(chan);
6420 
6421 	if (event == L2CAP_EV_RECV_IFRAME)
6422 		return -EPROTO;
6423 
6424 	return l2cap_rx_state_recv(chan, control, NULL, event);
6425 }
6426 
6427 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6428 				 struct l2cap_ctrl *control,
6429 				 struct sk_buff *skb, u8 event)
6430 {
6431 	int err;
6432 
6433 	if (!control->final)
6434 		return -EPROTO;
6435 
6436 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6437 
6438 	chan->rx_state = L2CAP_RX_STATE_RECV;
6439 	l2cap_process_reqseq(chan, control->reqseq);
6440 
6441 	if (!skb_queue_empty(&chan->tx_q))
6442 		chan->tx_send_head = skb_peek(&chan->tx_q);
6443 	else
6444 		chan->tx_send_head = NULL;
6445 
6446 	/* Rewind next_tx_seq to the point expected
6447 	 * by the receiver.
6448 	 */
6449 	chan->next_tx_seq = control->reqseq;
6450 	chan->unacked_frames = 0;
6451 
6452 	if (chan->hs_hcon)
6453 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6454 	else
6455 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6456 
6457 	err = l2cap_resegment(chan);
6458 
6459 	if (!err)
6460 		err = l2cap_rx_state_recv(chan, control, skb, event);
6461 
6462 	return err;
6463 }
6464 
6465 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6466 {
6467 	/* Make sure reqseq is for a packet that has been sent but not acked */
6468 	u16 unacked;
6469 
6470 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6471 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6472 }
6473 
6474 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6475 		    struct sk_buff *skb, u8 event)
6476 {
6477 	int err = 0;
6478 
6479 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6480 	       control, skb, event, chan->rx_state);
6481 
6482 	if (__valid_reqseq(chan, control->reqseq)) {
6483 		switch (chan->rx_state) {
6484 		case L2CAP_RX_STATE_RECV:
6485 			err = l2cap_rx_state_recv(chan, control, skb, event);
6486 			break;
6487 		case L2CAP_RX_STATE_SREJ_SENT:
6488 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6489 						       event);
6490 			break;
6491 		case L2CAP_RX_STATE_WAIT_P:
6492 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6493 			break;
6494 		case L2CAP_RX_STATE_WAIT_F:
6495 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6496 			break;
6497 		default:
6498 			/* shut it down */
6499 			break;
6500 		}
6501 	} else {
6502 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6503 		       control->reqseq, chan->next_tx_seq,
6504 		       chan->expected_ack_seq);
6505 		l2cap_send_disconn_req(chan, ECONNRESET);
6506 	}
6507 
6508 	return err;
6509 }
6510 
6511 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6512 			   struct sk_buff *skb)
6513 {
6514 	int err = 0;
6515 
6516 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6517 	       chan->rx_state);
6518 
6519 	if (l2cap_classify_txseq(chan, control->txseq) ==
6520 	    L2CAP_TXSEQ_EXPECTED) {
6521 		l2cap_pass_to_tx(chan, control);
6522 
6523 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6524 		       __next_seq(chan, chan->buffer_seq));
6525 
6526 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6527 
6528 		l2cap_reassemble_sdu(chan, skb, control);
6529 	} else {
6530 		if (chan->sdu) {
6531 			kfree_skb(chan->sdu);
6532 			chan->sdu = NULL;
6533 		}
6534 		chan->sdu_last_frag = NULL;
6535 		chan->sdu_len = 0;
6536 
6537 		if (skb) {
6538 			BT_DBG("Freeing %p", skb);
6539 			kfree_skb(skb);
6540 		}
6541 	}
6542 
6543 	chan->last_acked_seq = control->txseq;
6544 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6545 
6546 	return err;
6547 }
6548 
6549 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6550 {
6551 	struct l2cap_ctrl *control = &bt_cb(skb)->control;
6552 	u16 len;
6553 	u8 event;
6554 
6555 	__unpack_control(chan, skb);
6556 
6557 	len = skb->len;
6558 
6559 	/*
6560 	 * We can just drop the corrupted I-frame here.
6561 	 * Receiver will miss it and start proper recovery
6562 	 * procedures and ask for retransmission.
6563 	 */
6564 	if (l2cap_check_fcs(chan, skb))
6565 		goto drop;
6566 
6567 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6568 		len -= L2CAP_SDULEN_SIZE;
6569 
6570 	if (chan->fcs == L2CAP_FCS_CRC16)
6571 		len -= L2CAP_FCS_SIZE;
6572 
6573 	if (len > chan->mps) {
6574 		l2cap_send_disconn_req(chan, ECONNRESET);
6575 		goto drop;
6576 	}
6577 
6578 	if (!control->sframe) {
6579 		int err;
6580 
6581 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6582 		       control->sar, control->reqseq, control->final,
6583 		       control->txseq);
6584 
6585 		/* Validate F-bit - F=0 always valid, F=1 only
6586 		 * valid in TX WAIT_F
6587 		 */
6588 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6589 			goto drop;
6590 
6591 		if (chan->mode != L2CAP_MODE_STREAMING) {
6592 			event = L2CAP_EV_RECV_IFRAME;
6593 			err = l2cap_rx(chan, control, skb, event);
6594 		} else {
6595 			err = l2cap_stream_rx(chan, control, skb);
6596 		}
6597 
6598 		if (err)
6599 			l2cap_send_disconn_req(chan, ECONNRESET);
6600 	} else {
6601 		const u8 rx_func_to_event[4] = {
6602 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6603 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6604 		};
6605 
6606 		/* Only I-frames are expected in streaming mode */
6607 		if (chan->mode == L2CAP_MODE_STREAMING)
6608 			goto drop;
6609 
6610 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6611 		       control->reqseq, control->final, control->poll,
6612 		       control->super);
6613 
6614 		if (len != 0) {
6615 			BT_ERR("Trailing bytes: %d in sframe", len);
6616 			l2cap_send_disconn_req(chan, ECONNRESET);
6617 			goto drop;
6618 		}
6619 
6620 		/* Validate F and P bits */
6621 		if (control->final && (control->poll ||
6622 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6623 			goto drop;
6624 
6625 		event = rx_func_to_event[control->super];
6626 		if (l2cap_rx(chan, control, skb, event))
6627 			l2cap_send_disconn_req(chan, ECONNRESET);
6628 	}
6629 
6630 	return 0;
6631 
6632 drop:
6633 	kfree_skb(skb);
6634 	return 0;
6635 }
6636 
6637 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6638 {
6639 	struct l2cap_conn *conn = chan->conn;
6640 	struct l2cap_le_credits pkt;
6641 	u16 return_credits;
6642 
6643 	/* We return more credits to the sender only after the amount of
6644 	 * credits falls below half of the initial amount.
6645 	 */
6646 	if (chan->rx_credits >= (le_max_credits + 1) / 2)
6647 		return;
6648 
6649 	return_credits = le_max_credits - chan->rx_credits;
6650 
6651 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6652 
6653 	chan->rx_credits += return_credits;
6654 
6655 	pkt.cid     = cpu_to_le16(chan->scid);
6656 	pkt.credits = cpu_to_le16(return_credits);
6657 
6658 	chan->ident = l2cap_get_ident(conn);
6659 
6660 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6661 }
6662 
6663 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6664 {
6665 	int err;
6666 
6667 	if (!chan->rx_credits) {
6668 		BT_ERR("No credits to receive LE L2CAP data");
6669 		l2cap_send_disconn_req(chan, ECONNRESET);
6670 		return -ENOBUFS;
6671 	}
6672 
6673 	if (chan->imtu < skb->len) {
6674 		BT_ERR("Too big LE L2CAP PDU");
6675 		return -ENOBUFS;
6676 	}
6677 
6678 	chan->rx_credits--;
6679 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6680 
6681 	l2cap_chan_le_send_credits(chan);
6682 
6683 	err = 0;
6684 
6685 	if (!chan->sdu) {
6686 		u16 sdu_len;
6687 
6688 		sdu_len = get_unaligned_le16(skb->data);
6689 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6690 
6691 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6692 		       sdu_len, skb->len, chan->imtu);
6693 
6694 		if (sdu_len > chan->imtu) {
6695 			BT_ERR("Too big LE L2CAP SDU length received");
6696 			err = -EMSGSIZE;
6697 			goto failed;
6698 		}
6699 
6700 		if (skb->len > sdu_len) {
6701 			BT_ERR("Too much LE L2CAP data received");
6702 			err = -EINVAL;
6703 			goto failed;
6704 		}
6705 
6706 		if (skb->len == sdu_len)
6707 			return chan->ops->recv(chan, skb);
6708 
6709 		chan->sdu = skb;
6710 		chan->sdu_len = sdu_len;
6711 		chan->sdu_last_frag = skb;
6712 
6713 		return 0;
6714 	}
6715 
6716 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6717 	       chan->sdu->len, skb->len, chan->sdu_len);
6718 
6719 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6720 		BT_ERR("Too much LE L2CAP data received");
6721 		err = -EINVAL;
6722 		goto failed;
6723 	}
6724 
6725 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6726 	skb = NULL;
6727 
6728 	if (chan->sdu->len == chan->sdu_len) {
6729 		err = chan->ops->recv(chan, chan->sdu);
6730 		if (!err) {
6731 			chan->sdu = NULL;
6732 			chan->sdu_last_frag = NULL;
6733 			chan->sdu_len = 0;
6734 		}
6735 	}
6736 
6737 failed:
6738 	if (err) {
6739 		kfree_skb(skb);
6740 		kfree_skb(chan->sdu);
6741 		chan->sdu = NULL;
6742 		chan->sdu_last_frag = NULL;
6743 		chan->sdu_len = 0;
6744 	}
6745 
6746 	/* We can't return an error here since we took care of the skb
6747 	 * freeing internally. An error return would cause the caller to
6748 	 * do a double-free of the skb.
6749 	 */
6750 	return 0;
6751 }
6752 
6753 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6754 			       struct sk_buff *skb)
6755 {
6756 	struct l2cap_chan *chan;
6757 
6758 	chan = l2cap_get_chan_by_scid(conn, cid);
6759 	if (!chan) {
6760 		if (cid == L2CAP_CID_A2MP) {
6761 			chan = a2mp_channel_create(conn, skb);
6762 			if (!chan) {
6763 				kfree_skb(skb);
6764 				return;
6765 			}
6766 
6767 			l2cap_chan_lock(chan);
6768 		} else {
6769 			BT_DBG("unknown cid 0x%4.4x", cid);
6770 			/* Drop packet and return */
6771 			kfree_skb(skb);
6772 			return;
6773 		}
6774 	}
6775 
6776 	BT_DBG("chan %p, len %d", chan, skb->len);
6777 
6778 	if (chan->state != BT_CONNECTED)
6779 		goto drop;
6780 
6781 	switch (chan->mode) {
6782 	case L2CAP_MODE_LE_FLOWCTL:
6783 		if (l2cap_le_data_rcv(chan, skb) < 0)
6784 			goto drop;
6785 
6786 		goto done;
6787 
6788 	case L2CAP_MODE_BASIC:
6789 		/* If socket recv buffers overflows we drop data here
6790 		 * which is *bad* because L2CAP has to be reliable.
6791 		 * But we don't have any other choice. L2CAP doesn't
6792 		 * provide flow control mechanism. */
6793 
6794 		if (chan->imtu < skb->len) {
6795 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6796 			goto drop;
6797 		}
6798 
6799 		if (!chan->ops->recv(chan, skb))
6800 			goto done;
6801 		break;
6802 
6803 	case L2CAP_MODE_ERTM:
6804 	case L2CAP_MODE_STREAMING:
6805 		l2cap_data_rcv(chan, skb);
6806 		goto done;
6807 
6808 	default:
6809 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6810 		break;
6811 	}
6812 
6813 drop:
6814 	kfree_skb(skb);
6815 
6816 done:
6817 	l2cap_chan_unlock(chan);
6818 }
6819 
6820 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6821 				  struct sk_buff *skb)
6822 {
6823 	struct hci_conn *hcon = conn->hcon;
6824 	struct l2cap_chan *chan;
6825 
6826 	if (hcon->type != ACL_LINK)
6827 		goto free_skb;
6828 
6829 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6830 					ACL_LINK);
6831 	if (!chan)
6832 		goto free_skb;
6833 
6834 	BT_DBG("chan %p, len %d", chan, skb->len);
6835 
6836 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6837 		goto drop;
6838 
6839 	if (chan->imtu < skb->len)
6840 		goto drop;
6841 
6842 	/* Store remote BD_ADDR and PSM for msg_name */
6843 	bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6844 	bt_cb(skb)->psm = psm;
6845 
6846 	if (!chan->ops->recv(chan, skb)) {
6847 		l2cap_chan_put(chan);
6848 		return;
6849 	}
6850 
6851 drop:
6852 	l2cap_chan_put(chan);
6853 free_skb:
6854 	kfree_skb(skb);
6855 }
6856 
6857 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6858 {
6859 	struct l2cap_hdr *lh = (void *) skb->data;
6860 	struct hci_conn *hcon = conn->hcon;
6861 	u16 cid, len;
6862 	__le16 psm;
6863 
6864 	if (hcon->state != BT_CONNECTED) {
6865 		BT_DBG("queueing pending rx skb");
6866 		skb_queue_tail(&conn->pending_rx, skb);
6867 		return;
6868 	}
6869 
6870 	skb_pull(skb, L2CAP_HDR_SIZE);
6871 	cid = __le16_to_cpu(lh->cid);
6872 	len = __le16_to_cpu(lh->len);
6873 
6874 	if (len != skb->len) {
6875 		kfree_skb(skb);
6876 		return;
6877 	}
6878 
6879 	/* Since we can't actively block incoming LE connections we must
6880 	 * at least ensure that we ignore incoming data from them.
6881 	 */
6882 	if (hcon->type == LE_LINK &&
6883 	    hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6884 				   bdaddr_type(hcon, hcon->dst_type))) {
6885 		kfree_skb(skb);
6886 		return;
6887 	}
6888 
6889 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6890 
6891 	switch (cid) {
6892 	case L2CAP_CID_SIGNALING:
6893 		l2cap_sig_channel(conn, skb);
6894 		break;
6895 
6896 	case L2CAP_CID_CONN_LESS:
6897 		psm = get_unaligned((__le16 *) skb->data);
6898 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6899 		l2cap_conless_channel(conn, psm, skb);
6900 		break;
6901 
6902 	case L2CAP_CID_LE_SIGNALING:
6903 		l2cap_le_sig_channel(conn, skb);
6904 		break;
6905 
6906 	default:
6907 		l2cap_data_channel(conn, cid, skb);
6908 		break;
6909 	}
6910 }
6911 
6912 static void process_pending_rx(struct work_struct *work)
6913 {
6914 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6915 					       pending_rx_work);
6916 	struct sk_buff *skb;
6917 
6918 	BT_DBG("");
6919 
6920 	while ((skb = skb_dequeue(&conn->pending_rx)))
6921 		l2cap_recv_frame(conn, skb);
6922 }
6923 
6924 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6925 {
6926 	struct l2cap_conn *conn = hcon->l2cap_data;
6927 	struct hci_chan *hchan;
6928 
6929 	if (conn)
6930 		return conn;
6931 
6932 	hchan = hci_chan_create(hcon);
6933 	if (!hchan)
6934 		return NULL;
6935 
6936 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6937 	if (!conn) {
6938 		hci_chan_del(hchan);
6939 		return NULL;
6940 	}
6941 
6942 	kref_init(&conn->ref);
6943 	hcon->l2cap_data = conn;
6944 	conn->hcon = hci_conn_get(hcon);
6945 	conn->hchan = hchan;
6946 
6947 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6948 
6949 	switch (hcon->type) {
6950 	case LE_LINK:
6951 		if (hcon->hdev->le_mtu) {
6952 			conn->mtu = hcon->hdev->le_mtu;
6953 			break;
6954 		}
6955 		/* fall through */
6956 	default:
6957 		conn->mtu = hcon->hdev->acl_mtu;
6958 		break;
6959 	}
6960 
6961 	conn->feat_mask = 0;
6962 
6963 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6964 
6965 	if (hcon->type == ACL_LINK &&
6966 	    test_bit(HCI_HS_ENABLED, &hcon->hdev->dev_flags))
6967 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
6968 
6969 	if (test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags) &&
6970 	    (bredr_sc_enabled(hcon->hdev) ||
6971 	     test_bit(HCI_FORCE_LESC, &hcon->hdev->dbg_flags)))
6972 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6973 
6974 	mutex_init(&conn->ident_lock);
6975 	mutex_init(&conn->chan_lock);
6976 
6977 	INIT_LIST_HEAD(&conn->chan_l);
6978 	INIT_LIST_HEAD(&conn->users);
6979 
6980 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6981 
6982 	skb_queue_head_init(&conn->pending_rx);
6983 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6984 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
6985 
6986 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6987 
6988 	return conn;
6989 }
6990 
6991 static bool is_valid_psm(u16 psm, u8 dst_type) {
6992 	if (!psm)
6993 		return false;
6994 
6995 	if (bdaddr_type_is_le(dst_type))
6996 		return (psm <= 0x00ff);
6997 
6998 	/* PSM must be odd and lsb of upper byte must be 0 */
6999 	return ((psm & 0x0101) == 0x0001);
7000 }
7001 
7002 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7003 		       bdaddr_t *dst, u8 dst_type)
7004 {
7005 	struct l2cap_conn *conn;
7006 	struct hci_conn *hcon;
7007 	struct hci_dev *hdev;
7008 	int err;
7009 
7010 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7011 	       dst_type, __le16_to_cpu(psm));
7012 
7013 	hdev = hci_get_route(dst, &chan->src);
7014 	if (!hdev)
7015 		return -EHOSTUNREACH;
7016 
7017 	hci_dev_lock(hdev);
7018 
7019 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7020 	    chan->chan_type != L2CAP_CHAN_RAW) {
7021 		err = -EINVAL;
7022 		goto done;
7023 	}
7024 
7025 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7026 		err = -EINVAL;
7027 		goto done;
7028 	}
7029 
7030 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7031 		err = -EINVAL;
7032 		goto done;
7033 	}
7034 
7035 	switch (chan->mode) {
7036 	case L2CAP_MODE_BASIC:
7037 		break;
7038 	case L2CAP_MODE_LE_FLOWCTL:
7039 		l2cap_le_flowctl_init(chan);
7040 		break;
7041 	case L2CAP_MODE_ERTM:
7042 	case L2CAP_MODE_STREAMING:
7043 		if (!disable_ertm)
7044 			break;
7045 		/* fall through */
7046 	default:
7047 		err = -EOPNOTSUPP;
7048 		goto done;
7049 	}
7050 
7051 	switch (chan->state) {
7052 	case BT_CONNECT:
7053 	case BT_CONNECT2:
7054 	case BT_CONFIG:
7055 		/* Already connecting */
7056 		err = 0;
7057 		goto done;
7058 
7059 	case BT_CONNECTED:
7060 		/* Already connected */
7061 		err = -EISCONN;
7062 		goto done;
7063 
7064 	case BT_OPEN:
7065 	case BT_BOUND:
7066 		/* Can connect */
7067 		break;
7068 
7069 	default:
7070 		err = -EBADFD;
7071 		goto done;
7072 	}
7073 
7074 	/* Set destination address and psm */
7075 	bacpy(&chan->dst, dst);
7076 	chan->dst_type = dst_type;
7077 
7078 	chan->psm = psm;
7079 	chan->dcid = cid;
7080 
7081 	if (bdaddr_type_is_le(dst_type)) {
7082 		u8 role;
7083 
7084 		/* Convert from L2CAP channel address type to HCI address type
7085 		 */
7086 		if (dst_type == BDADDR_LE_PUBLIC)
7087 			dst_type = ADDR_LE_DEV_PUBLIC;
7088 		else
7089 			dst_type = ADDR_LE_DEV_RANDOM;
7090 
7091 		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7092 			role = HCI_ROLE_SLAVE;
7093 		else
7094 			role = HCI_ROLE_MASTER;
7095 
7096 		hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7097 				      HCI_LE_CONN_TIMEOUT, role);
7098 	} else {
7099 		u8 auth_type = l2cap_get_auth_type(chan);
7100 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7101 	}
7102 
7103 	if (IS_ERR(hcon)) {
7104 		err = PTR_ERR(hcon);
7105 		goto done;
7106 	}
7107 
7108 	conn = l2cap_conn_add(hcon);
7109 	if (!conn) {
7110 		hci_conn_drop(hcon);
7111 		err = -ENOMEM;
7112 		goto done;
7113 	}
7114 
7115 	mutex_lock(&conn->chan_lock);
7116 	l2cap_chan_lock(chan);
7117 
7118 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7119 		hci_conn_drop(hcon);
7120 		err = -EBUSY;
7121 		goto chan_unlock;
7122 	}
7123 
7124 	/* Update source addr of the socket */
7125 	bacpy(&chan->src, &hcon->src);
7126 	chan->src_type = bdaddr_type(hcon, hcon->src_type);
7127 
7128 	__l2cap_chan_add(conn, chan);
7129 
7130 	/* l2cap_chan_add takes its own ref so we can drop this one */
7131 	hci_conn_drop(hcon);
7132 
7133 	l2cap_state_change(chan, BT_CONNECT);
7134 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7135 
7136 	/* Release chan->sport so that it can be reused by other
7137 	 * sockets (as it's only used for listening sockets).
7138 	 */
7139 	write_lock(&chan_list_lock);
7140 	chan->sport = 0;
7141 	write_unlock(&chan_list_lock);
7142 
7143 	if (hcon->state == BT_CONNECTED) {
7144 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7145 			__clear_chan_timer(chan);
7146 			if (l2cap_chan_check_security(chan, true))
7147 				l2cap_state_change(chan, BT_CONNECTED);
7148 		} else
7149 			l2cap_do_start(chan);
7150 	}
7151 
7152 	err = 0;
7153 
7154 chan_unlock:
7155 	l2cap_chan_unlock(chan);
7156 	mutex_unlock(&conn->chan_lock);
7157 done:
7158 	hci_dev_unlock(hdev);
7159 	hci_dev_put(hdev);
7160 	return err;
7161 }
7162 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7163 
7164 /* ---- L2CAP interface with lower layer (HCI) ---- */
7165 
7166 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7167 {
7168 	int exact = 0, lm1 = 0, lm2 = 0;
7169 	struct l2cap_chan *c;
7170 
7171 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7172 
7173 	/* Find listening sockets and check their link_mode */
7174 	read_lock(&chan_list_lock);
7175 	list_for_each_entry(c, &chan_list, global_l) {
7176 		if (c->state != BT_LISTEN)
7177 			continue;
7178 
7179 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7180 			lm1 |= HCI_LM_ACCEPT;
7181 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7182 				lm1 |= HCI_LM_MASTER;
7183 			exact++;
7184 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7185 			lm2 |= HCI_LM_ACCEPT;
7186 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7187 				lm2 |= HCI_LM_MASTER;
7188 		}
7189 	}
7190 	read_unlock(&chan_list_lock);
7191 
7192 	return exact ? lm1 : lm2;
7193 }
7194 
7195 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7196  * from an existing channel in the list or from the beginning of the
7197  * global list (by passing NULL as first parameter).
7198  */
7199 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7200 						  bdaddr_t *src, u8 link_type)
7201 {
7202 	read_lock(&chan_list_lock);
7203 
7204 	if (c)
7205 		c = list_next_entry(c, global_l);
7206 	else
7207 		c = list_entry(chan_list.next, typeof(*c), global_l);
7208 
7209 	list_for_each_entry_from(c, &chan_list, global_l) {
7210 		if (c->chan_type != L2CAP_CHAN_FIXED)
7211 			continue;
7212 		if (c->state != BT_LISTEN)
7213 			continue;
7214 		if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY))
7215 			continue;
7216 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
7217 			continue;
7218 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
7219 			continue;
7220 
7221 		l2cap_chan_hold(c);
7222 		read_unlock(&chan_list_lock);
7223 		return c;
7224 	}
7225 
7226 	read_unlock(&chan_list_lock);
7227 
7228 	return NULL;
7229 }
7230 
7231 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7232 {
7233 	struct hci_dev *hdev = hcon->hdev;
7234 	struct l2cap_conn *conn;
7235 	struct l2cap_chan *pchan;
7236 	u8 dst_type;
7237 
7238 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7239 
7240 	if (status) {
7241 		l2cap_conn_del(hcon, bt_to_errno(status));
7242 		return;
7243 	}
7244 
7245 	conn = l2cap_conn_add(hcon);
7246 	if (!conn)
7247 		return;
7248 
7249 	dst_type = bdaddr_type(hcon, hcon->dst_type);
7250 
7251 	/* If device is blocked, do not create channels for it */
7252 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7253 		return;
7254 
7255 	/* Find fixed channels and notify them of the new connection. We
7256 	 * use multiple individual lookups, continuing each time where
7257 	 * we left off, because the list lock would prevent calling the
7258 	 * potentially sleeping l2cap_chan_lock() function.
7259 	 */
7260 	pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type);
7261 	while (pchan) {
7262 		struct l2cap_chan *chan, *next;
7263 
7264 		/* Client fixed channels should override server ones */
7265 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7266 			goto next;
7267 
7268 		l2cap_chan_lock(pchan);
7269 		chan = pchan->ops->new_connection(pchan);
7270 		if (chan) {
7271 			bacpy(&chan->src, &hcon->src);
7272 			bacpy(&chan->dst, &hcon->dst);
7273 			chan->src_type = bdaddr_type(hcon, hcon->src_type);
7274 			chan->dst_type = dst_type;
7275 
7276 			__l2cap_chan_add(conn, chan);
7277 		}
7278 
7279 		l2cap_chan_unlock(pchan);
7280 next:
7281 		next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr,
7282 					       hcon->type);
7283 		l2cap_chan_put(pchan);
7284 		pchan = next;
7285 	}
7286 
7287 	l2cap_conn_ready(conn);
7288 }
7289 
7290 int l2cap_disconn_ind(struct hci_conn *hcon)
7291 {
7292 	struct l2cap_conn *conn = hcon->l2cap_data;
7293 
7294 	BT_DBG("hcon %p", hcon);
7295 
7296 	if (!conn)
7297 		return HCI_ERROR_REMOTE_USER_TERM;
7298 	return conn->disc_reason;
7299 }
7300 
7301 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7302 {
7303 	BT_DBG("hcon %p reason %d", hcon, reason);
7304 
7305 	l2cap_conn_del(hcon, bt_to_errno(reason));
7306 }
7307 
7308 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7309 {
7310 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7311 		return;
7312 
7313 	if (encrypt == 0x00) {
7314 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7315 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7316 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7317 			   chan->sec_level == BT_SECURITY_FIPS)
7318 			l2cap_chan_close(chan, ECONNREFUSED);
7319 	} else {
7320 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7321 			__clear_chan_timer(chan);
7322 	}
7323 }
7324 
7325 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7326 {
7327 	struct l2cap_conn *conn = hcon->l2cap_data;
7328 	struct l2cap_chan *chan;
7329 
7330 	if (!conn)
7331 		return 0;
7332 
7333 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7334 
7335 	mutex_lock(&conn->chan_lock);
7336 
7337 	list_for_each_entry(chan, &conn->chan_l, list) {
7338 		l2cap_chan_lock(chan);
7339 
7340 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7341 		       state_to_string(chan->state));
7342 
7343 		if (chan->scid == L2CAP_CID_A2MP) {
7344 			l2cap_chan_unlock(chan);
7345 			continue;
7346 		}
7347 
7348 		if (!status && encrypt)
7349 			chan->sec_level = hcon->sec_level;
7350 
7351 		if (!__l2cap_no_conn_pending(chan)) {
7352 			l2cap_chan_unlock(chan);
7353 			continue;
7354 		}
7355 
7356 		if (!status && (chan->state == BT_CONNECTED ||
7357 				chan->state == BT_CONFIG)) {
7358 			chan->ops->resume(chan);
7359 			l2cap_check_encryption(chan, encrypt);
7360 			l2cap_chan_unlock(chan);
7361 			continue;
7362 		}
7363 
7364 		if (chan->state == BT_CONNECT) {
7365 			if (!status)
7366 				l2cap_start_connection(chan);
7367 			else
7368 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7369 		} else if (chan->state == BT_CONNECT2 &&
7370 			   chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7371 			struct l2cap_conn_rsp rsp;
7372 			__u16 res, stat;
7373 
7374 			if (!status) {
7375 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7376 					res = L2CAP_CR_PEND;
7377 					stat = L2CAP_CS_AUTHOR_PEND;
7378 					chan->ops->defer(chan);
7379 				} else {
7380 					l2cap_state_change(chan, BT_CONFIG);
7381 					res = L2CAP_CR_SUCCESS;
7382 					stat = L2CAP_CS_NO_INFO;
7383 				}
7384 			} else {
7385 				l2cap_state_change(chan, BT_DISCONN);
7386 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7387 				res = L2CAP_CR_SEC_BLOCK;
7388 				stat = L2CAP_CS_NO_INFO;
7389 			}
7390 
7391 			rsp.scid   = cpu_to_le16(chan->dcid);
7392 			rsp.dcid   = cpu_to_le16(chan->scid);
7393 			rsp.result = cpu_to_le16(res);
7394 			rsp.status = cpu_to_le16(stat);
7395 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7396 				       sizeof(rsp), &rsp);
7397 
7398 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7399 			    res == L2CAP_CR_SUCCESS) {
7400 				char buf[128];
7401 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7402 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7403 					       L2CAP_CONF_REQ,
7404 					       l2cap_build_conf_req(chan, buf),
7405 					       buf);
7406 				chan->num_conf_req++;
7407 			}
7408 		}
7409 
7410 		l2cap_chan_unlock(chan);
7411 	}
7412 
7413 	mutex_unlock(&conn->chan_lock);
7414 
7415 	return 0;
7416 }
7417 
7418 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7419 {
7420 	struct l2cap_conn *conn = hcon->l2cap_data;
7421 	struct l2cap_hdr *hdr;
7422 	int len;
7423 
7424 	/* For AMP controller do not create l2cap conn */
7425 	if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7426 		goto drop;
7427 
7428 	if (!conn)
7429 		conn = l2cap_conn_add(hcon);
7430 
7431 	if (!conn)
7432 		goto drop;
7433 
7434 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7435 
7436 	switch (flags) {
7437 	case ACL_START:
7438 	case ACL_START_NO_FLUSH:
7439 	case ACL_COMPLETE:
7440 		if (conn->rx_len) {
7441 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7442 			kfree_skb(conn->rx_skb);
7443 			conn->rx_skb = NULL;
7444 			conn->rx_len = 0;
7445 			l2cap_conn_unreliable(conn, ECOMM);
7446 		}
7447 
7448 		/* Start fragment always begin with Basic L2CAP header */
7449 		if (skb->len < L2CAP_HDR_SIZE) {
7450 			BT_ERR("Frame is too short (len %d)", skb->len);
7451 			l2cap_conn_unreliable(conn, ECOMM);
7452 			goto drop;
7453 		}
7454 
7455 		hdr = (struct l2cap_hdr *) skb->data;
7456 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7457 
7458 		if (len == skb->len) {
7459 			/* Complete frame received */
7460 			l2cap_recv_frame(conn, skb);
7461 			return 0;
7462 		}
7463 
7464 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7465 
7466 		if (skb->len > len) {
7467 			BT_ERR("Frame is too long (len %d, expected len %d)",
7468 			       skb->len, len);
7469 			l2cap_conn_unreliable(conn, ECOMM);
7470 			goto drop;
7471 		}
7472 
7473 		/* Allocate skb for the complete frame (with header) */
7474 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7475 		if (!conn->rx_skb)
7476 			goto drop;
7477 
7478 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7479 					  skb->len);
7480 		conn->rx_len = len - skb->len;
7481 		break;
7482 
7483 	case ACL_CONT:
7484 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7485 
7486 		if (!conn->rx_len) {
7487 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7488 			l2cap_conn_unreliable(conn, ECOMM);
7489 			goto drop;
7490 		}
7491 
7492 		if (skb->len > conn->rx_len) {
7493 			BT_ERR("Fragment is too long (len %d, expected %d)",
7494 			       skb->len, conn->rx_len);
7495 			kfree_skb(conn->rx_skb);
7496 			conn->rx_skb = NULL;
7497 			conn->rx_len = 0;
7498 			l2cap_conn_unreliable(conn, ECOMM);
7499 			goto drop;
7500 		}
7501 
7502 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7503 					  skb->len);
7504 		conn->rx_len -= skb->len;
7505 
7506 		if (!conn->rx_len) {
7507 			/* Complete frame received. l2cap_recv_frame
7508 			 * takes ownership of the skb so set the global
7509 			 * rx_skb pointer to NULL first.
7510 			 */
7511 			struct sk_buff *rx_skb = conn->rx_skb;
7512 			conn->rx_skb = NULL;
7513 			l2cap_recv_frame(conn, rx_skb);
7514 		}
7515 		break;
7516 	}
7517 
7518 drop:
7519 	kfree_skb(skb);
7520 	return 0;
7521 }
7522 
7523 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7524 {
7525 	struct l2cap_chan *c;
7526 
7527 	read_lock(&chan_list_lock);
7528 
7529 	list_for_each_entry(c, &chan_list, global_l) {
7530 		seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7531 			   &c->src, &c->dst,
7532 			   c->state, __le16_to_cpu(c->psm),
7533 			   c->scid, c->dcid, c->imtu, c->omtu,
7534 			   c->sec_level, c->mode);
7535 	}
7536 
7537 	read_unlock(&chan_list_lock);
7538 
7539 	return 0;
7540 }
7541 
7542 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7543 {
7544 	return single_open(file, l2cap_debugfs_show, inode->i_private);
7545 }
7546 
7547 static const struct file_operations l2cap_debugfs_fops = {
7548 	.open		= l2cap_debugfs_open,
7549 	.read		= seq_read,
7550 	.llseek		= seq_lseek,
7551 	.release	= single_release,
7552 };
7553 
7554 static struct dentry *l2cap_debugfs;
7555 
7556 int __init l2cap_init(void)
7557 {
7558 	int err;
7559 
7560 	err = l2cap_init_sockets();
7561 	if (err < 0)
7562 		return err;
7563 
7564 	if (IS_ERR_OR_NULL(bt_debugfs))
7565 		return 0;
7566 
7567 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7568 					    NULL, &l2cap_debugfs_fops);
7569 
7570 	debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7571 			   &le_max_credits);
7572 	debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7573 			   &le_default_mps);
7574 
7575 	return 0;
7576 }
7577 
7578 void l2cap_exit(void)
7579 {
7580 	debugfs_remove(l2cap_debugfs);
7581 	l2cap_cleanup_sockets();
7582 }
7583 
7584 module_param(disable_ertm, bool, 0644);
7585 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7586