xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision de2bdb3d)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
53 
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56 
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 				       u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
60 			   void *data);
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
63 
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 		     struct sk_buff_head *skbs, u8 event);
66 
67 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
68 {
69 	if (link_type == LE_LINK) {
70 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
71 			return BDADDR_LE_PUBLIC;
72 		else
73 			return BDADDR_LE_RANDOM;
74 	}
75 
76 	return BDADDR_BREDR;
77 }
78 
79 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
80 {
81 	return bdaddr_type(hcon->type, hcon->src_type);
82 }
83 
84 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
85 {
86 	return bdaddr_type(hcon->type, hcon->dst_type);
87 }
88 
89 /* ---- L2CAP channels ---- */
90 
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
92 						   u16 cid)
93 {
94 	struct l2cap_chan *c;
95 
96 	list_for_each_entry(c, &conn->chan_l, list) {
97 		if (c->dcid == cid)
98 			return c;
99 	}
100 	return NULL;
101 }
102 
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
104 						   u16 cid)
105 {
106 	struct l2cap_chan *c;
107 
108 	list_for_each_entry(c, &conn->chan_l, list) {
109 		if (c->scid == cid)
110 			return c;
111 	}
112 	return NULL;
113 }
114 
115 /* Find channel with given SCID.
116  * Returns locked channel. */
117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 						 u16 cid)
119 {
120 	struct l2cap_chan *c;
121 
122 	mutex_lock(&conn->chan_lock);
123 	c = __l2cap_get_chan_by_scid(conn, cid);
124 	if (c)
125 		l2cap_chan_lock(c);
126 	mutex_unlock(&conn->chan_lock);
127 
128 	return c;
129 }
130 
131 /* Find channel with given DCID.
132  * Returns locked channel.
133  */
134 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
135 						 u16 cid)
136 {
137 	struct l2cap_chan *c;
138 
139 	mutex_lock(&conn->chan_lock);
140 	c = __l2cap_get_chan_by_dcid(conn, cid);
141 	if (c)
142 		l2cap_chan_lock(c);
143 	mutex_unlock(&conn->chan_lock);
144 
145 	return c;
146 }
147 
148 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
149 						    u8 ident)
150 {
151 	struct l2cap_chan *c;
152 
153 	list_for_each_entry(c, &conn->chan_l, list) {
154 		if (c->ident == ident)
155 			return c;
156 	}
157 	return NULL;
158 }
159 
160 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
161 						  u8 ident)
162 {
163 	struct l2cap_chan *c;
164 
165 	mutex_lock(&conn->chan_lock);
166 	c = __l2cap_get_chan_by_ident(conn, ident);
167 	if (c)
168 		l2cap_chan_lock(c);
169 	mutex_unlock(&conn->chan_lock);
170 
171 	return c;
172 }
173 
174 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
175 {
176 	struct l2cap_chan *c;
177 
178 	list_for_each_entry(c, &chan_list, global_l) {
179 		if (c->sport == psm && !bacmp(&c->src, src))
180 			return c;
181 	}
182 	return NULL;
183 }
184 
185 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
186 {
187 	int err;
188 
189 	write_lock(&chan_list_lock);
190 
191 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
192 		err = -EADDRINUSE;
193 		goto done;
194 	}
195 
196 	if (psm) {
197 		chan->psm = psm;
198 		chan->sport = psm;
199 		err = 0;
200 	} else {
201 		u16 p, start, end, incr;
202 
203 		if (chan->src_type == BDADDR_BREDR) {
204 			start = L2CAP_PSM_DYN_START;
205 			end = L2CAP_PSM_AUTO_END;
206 			incr = 2;
207 		} else {
208 			start = L2CAP_PSM_LE_DYN_START;
209 			end = L2CAP_PSM_LE_DYN_END;
210 			incr = 1;
211 		}
212 
213 		err = -EINVAL;
214 		for (p = start; p <= end; p += incr)
215 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
216 				chan->psm   = cpu_to_le16(p);
217 				chan->sport = cpu_to_le16(p);
218 				err = 0;
219 				break;
220 			}
221 	}
222 
223 done:
224 	write_unlock(&chan_list_lock);
225 	return err;
226 }
227 EXPORT_SYMBOL_GPL(l2cap_add_psm);
228 
229 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
230 {
231 	write_lock(&chan_list_lock);
232 
233 	/* Override the defaults (which are for conn-oriented) */
234 	chan->omtu = L2CAP_DEFAULT_MTU;
235 	chan->chan_type = L2CAP_CHAN_FIXED;
236 
237 	chan->scid = scid;
238 
239 	write_unlock(&chan_list_lock);
240 
241 	return 0;
242 }
243 
244 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
245 {
246 	u16 cid, dyn_end;
247 
248 	if (conn->hcon->type == LE_LINK)
249 		dyn_end = L2CAP_CID_LE_DYN_END;
250 	else
251 		dyn_end = L2CAP_CID_DYN_END;
252 
253 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
254 		if (!__l2cap_get_chan_by_scid(conn, cid))
255 			return cid;
256 	}
257 
258 	return 0;
259 }
260 
261 static void l2cap_state_change(struct l2cap_chan *chan, int state)
262 {
263 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
264 	       state_to_string(state));
265 
266 	chan->state = state;
267 	chan->ops->state_change(chan, state, 0);
268 }
269 
270 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
271 						int state, int err)
272 {
273 	chan->state = state;
274 	chan->ops->state_change(chan, chan->state, err);
275 }
276 
277 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
278 {
279 	chan->ops->state_change(chan, chan->state, err);
280 }
281 
282 static void __set_retrans_timer(struct l2cap_chan *chan)
283 {
284 	if (!delayed_work_pending(&chan->monitor_timer) &&
285 	    chan->retrans_timeout) {
286 		l2cap_set_timer(chan, &chan->retrans_timer,
287 				msecs_to_jiffies(chan->retrans_timeout));
288 	}
289 }
290 
291 static void __set_monitor_timer(struct l2cap_chan *chan)
292 {
293 	__clear_retrans_timer(chan);
294 	if (chan->monitor_timeout) {
295 		l2cap_set_timer(chan, &chan->monitor_timer,
296 				msecs_to_jiffies(chan->monitor_timeout));
297 	}
298 }
299 
300 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
301 					       u16 seq)
302 {
303 	struct sk_buff *skb;
304 
305 	skb_queue_walk(head, skb) {
306 		if (bt_cb(skb)->l2cap.txseq == seq)
307 			return skb;
308 	}
309 
310 	return NULL;
311 }
312 
313 /* ---- L2CAP sequence number lists ---- */
314 
315 /* For ERTM, ordered lists of sequence numbers must be tracked for
316  * SREJ requests that are received and for frames that are to be
317  * retransmitted. These seq_list functions implement a singly-linked
318  * list in an array, where membership in the list can also be checked
319  * in constant time. Items can also be added to the tail of the list
320  * and removed from the head in constant time, without further memory
321  * allocs or frees.
322  */
323 
324 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
325 {
326 	size_t alloc_size, i;
327 
328 	/* Allocated size is a power of 2 to map sequence numbers
329 	 * (which may be up to 14 bits) in to a smaller array that is
330 	 * sized for the negotiated ERTM transmit windows.
331 	 */
332 	alloc_size = roundup_pow_of_two(size);
333 
334 	seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
335 	if (!seq_list->list)
336 		return -ENOMEM;
337 
338 	seq_list->mask = alloc_size - 1;
339 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 	for (i = 0; i < alloc_size; i++)
342 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
343 
344 	return 0;
345 }
346 
347 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
348 {
349 	kfree(seq_list->list);
350 }
351 
352 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
353 					   u16 seq)
354 {
355 	/* Constant-time check for list membership */
356 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
357 }
358 
359 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 {
361 	u16 seq = seq_list->head;
362 	u16 mask = seq_list->mask;
363 
364 	seq_list->head = seq_list->list[seq & mask];
365 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
366 
367 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
368 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
369 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
370 	}
371 
372 	return seq;
373 }
374 
375 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
376 {
377 	u16 i;
378 
379 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
380 		return;
381 
382 	for (i = 0; i <= seq_list->mask; i++)
383 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
384 
385 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
386 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
387 }
388 
389 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
390 {
391 	u16 mask = seq_list->mask;
392 
393 	/* All appends happen in constant time */
394 
395 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
396 		return;
397 
398 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
399 		seq_list->head = seq;
400 	else
401 		seq_list->list[seq_list->tail & mask] = seq;
402 
403 	seq_list->tail = seq;
404 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
405 }
406 
407 static void l2cap_chan_timeout(struct work_struct *work)
408 {
409 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
410 					       chan_timer.work);
411 	struct l2cap_conn *conn = chan->conn;
412 	int reason;
413 
414 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
415 
416 	mutex_lock(&conn->chan_lock);
417 	l2cap_chan_lock(chan);
418 
419 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
420 		reason = ECONNREFUSED;
421 	else if (chan->state == BT_CONNECT &&
422 		 chan->sec_level != BT_SECURITY_SDP)
423 		reason = ECONNREFUSED;
424 	else
425 		reason = ETIMEDOUT;
426 
427 	l2cap_chan_close(chan, reason);
428 
429 	l2cap_chan_unlock(chan);
430 
431 	chan->ops->close(chan);
432 	mutex_unlock(&conn->chan_lock);
433 
434 	l2cap_chan_put(chan);
435 }
436 
437 struct l2cap_chan *l2cap_chan_create(void)
438 {
439 	struct l2cap_chan *chan;
440 
441 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
442 	if (!chan)
443 		return NULL;
444 
445 	mutex_init(&chan->lock);
446 
447 	/* Set default lock nesting level */
448 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
449 
450 	write_lock(&chan_list_lock);
451 	list_add(&chan->global_l, &chan_list);
452 	write_unlock(&chan_list_lock);
453 
454 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
455 
456 	chan->state = BT_OPEN;
457 
458 	kref_init(&chan->kref);
459 
460 	/* This flag is cleared in l2cap_chan_ready() */
461 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
462 
463 	BT_DBG("chan %p", chan);
464 
465 	return chan;
466 }
467 EXPORT_SYMBOL_GPL(l2cap_chan_create);
468 
469 static void l2cap_chan_destroy(struct kref *kref)
470 {
471 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
472 
473 	BT_DBG("chan %p", chan);
474 
475 	write_lock(&chan_list_lock);
476 	list_del(&chan->global_l);
477 	write_unlock(&chan_list_lock);
478 
479 	kfree(chan);
480 }
481 
482 void l2cap_chan_hold(struct l2cap_chan *c)
483 {
484 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
485 
486 	kref_get(&c->kref);
487 }
488 
489 void l2cap_chan_put(struct l2cap_chan *c)
490 {
491 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
492 
493 	kref_put(&c->kref, l2cap_chan_destroy);
494 }
495 EXPORT_SYMBOL_GPL(l2cap_chan_put);
496 
497 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
498 {
499 	chan->fcs  = L2CAP_FCS_CRC16;
500 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
501 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
502 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
503 	chan->remote_max_tx = chan->max_tx;
504 	chan->remote_tx_win = chan->tx_win;
505 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
506 	chan->sec_level = BT_SECURITY_LOW;
507 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
508 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
509 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
510 	chan->conf_state = 0;
511 
512 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
513 }
514 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
515 
516 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
517 {
518 	chan->sdu = NULL;
519 	chan->sdu_last_frag = NULL;
520 	chan->sdu_len = 0;
521 	chan->tx_credits = 0;
522 	chan->rx_credits = le_max_credits;
523 	chan->mps = min_t(u16, chan->imtu, le_default_mps);
524 
525 	skb_queue_head_init(&chan->tx_q);
526 }
527 
528 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
529 {
530 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
531 	       __le16_to_cpu(chan->psm), chan->dcid);
532 
533 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
534 
535 	chan->conn = conn;
536 
537 	switch (chan->chan_type) {
538 	case L2CAP_CHAN_CONN_ORIENTED:
539 		/* Alloc CID for connection-oriented socket */
540 		chan->scid = l2cap_alloc_cid(conn);
541 		if (conn->hcon->type == ACL_LINK)
542 			chan->omtu = L2CAP_DEFAULT_MTU;
543 		break;
544 
545 	case L2CAP_CHAN_CONN_LESS:
546 		/* Connectionless socket */
547 		chan->scid = L2CAP_CID_CONN_LESS;
548 		chan->dcid = L2CAP_CID_CONN_LESS;
549 		chan->omtu = L2CAP_DEFAULT_MTU;
550 		break;
551 
552 	case L2CAP_CHAN_FIXED:
553 		/* Caller will set CID and CID specific MTU values */
554 		break;
555 
556 	default:
557 		/* Raw socket can send/recv signalling messages only */
558 		chan->scid = L2CAP_CID_SIGNALING;
559 		chan->dcid = L2CAP_CID_SIGNALING;
560 		chan->omtu = L2CAP_DEFAULT_MTU;
561 	}
562 
563 	chan->local_id		= L2CAP_BESTEFFORT_ID;
564 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
565 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
566 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
567 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
568 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
569 
570 	l2cap_chan_hold(chan);
571 
572 	/* Only keep a reference for fixed channels if they requested it */
573 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
574 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
575 		hci_conn_hold(conn->hcon);
576 
577 	list_add(&chan->list, &conn->chan_l);
578 }
579 
580 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
581 {
582 	mutex_lock(&conn->chan_lock);
583 	__l2cap_chan_add(conn, chan);
584 	mutex_unlock(&conn->chan_lock);
585 }
586 
587 void l2cap_chan_del(struct l2cap_chan *chan, int err)
588 {
589 	struct l2cap_conn *conn = chan->conn;
590 
591 	__clear_chan_timer(chan);
592 
593 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
594 	       state_to_string(chan->state));
595 
596 	chan->ops->teardown(chan, err);
597 
598 	if (conn) {
599 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
600 		/* Delete from channel list */
601 		list_del(&chan->list);
602 
603 		l2cap_chan_put(chan);
604 
605 		chan->conn = NULL;
606 
607 		/* Reference was only held for non-fixed channels or
608 		 * fixed channels that explicitly requested it using the
609 		 * FLAG_HOLD_HCI_CONN flag.
610 		 */
611 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
612 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
613 			hci_conn_drop(conn->hcon);
614 
615 		if (mgr && mgr->bredr_chan == chan)
616 			mgr->bredr_chan = NULL;
617 	}
618 
619 	if (chan->hs_hchan) {
620 		struct hci_chan *hs_hchan = chan->hs_hchan;
621 
622 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
623 		amp_disconnect_logical_link(hs_hchan);
624 	}
625 
626 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
627 		return;
628 
629 	switch(chan->mode) {
630 	case L2CAP_MODE_BASIC:
631 		break;
632 
633 	case L2CAP_MODE_LE_FLOWCTL:
634 		skb_queue_purge(&chan->tx_q);
635 		break;
636 
637 	case L2CAP_MODE_ERTM:
638 		__clear_retrans_timer(chan);
639 		__clear_monitor_timer(chan);
640 		__clear_ack_timer(chan);
641 
642 		skb_queue_purge(&chan->srej_q);
643 
644 		l2cap_seq_list_free(&chan->srej_list);
645 		l2cap_seq_list_free(&chan->retrans_list);
646 
647 		/* fall through */
648 
649 	case L2CAP_MODE_STREAMING:
650 		skb_queue_purge(&chan->tx_q);
651 		break;
652 	}
653 
654 	return;
655 }
656 EXPORT_SYMBOL_GPL(l2cap_chan_del);
657 
658 static void l2cap_conn_update_id_addr(struct work_struct *work)
659 {
660 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
661 					       id_addr_update_work);
662 	struct hci_conn *hcon = conn->hcon;
663 	struct l2cap_chan *chan;
664 
665 	mutex_lock(&conn->chan_lock);
666 
667 	list_for_each_entry(chan, &conn->chan_l, list) {
668 		l2cap_chan_lock(chan);
669 		bacpy(&chan->dst, &hcon->dst);
670 		chan->dst_type = bdaddr_dst_type(hcon);
671 		l2cap_chan_unlock(chan);
672 	}
673 
674 	mutex_unlock(&conn->chan_lock);
675 }
676 
677 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
678 {
679 	struct l2cap_conn *conn = chan->conn;
680 	struct l2cap_le_conn_rsp rsp;
681 	u16 result;
682 
683 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
684 		result = L2CAP_CR_AUTHORIZATION;
685 	else
686 		result = L2CAP_CR_BAD_PSM;
687 
688 	l2cap_state_change(chan, BT_DISCONN);
689 
690 	rsp.dcid    = cpu_to_le16(chan->scid);
691 	rsp.mtu     = cpu_to_le16(chan->imtu);
692 	rsp.mps     = cpu_to_le16(chan->mps);
693 	rsp.credits = cpu_to_le16(chan->rx_credits);
694 	rsp.result  = cpu_to_le16(result);
695 
696 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
697 		       &rsp);
698 }
699 
700 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
701 {
702 	struct l2cap_conn *conn = chan->conn;
703 	struct l2cap_conn_rsp rsp;
704 	u16 result;
705 
706 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
707 		result = L2CAP_CR_SEC_BLOCK;
708 	else
709 		result = L2CAP_CR_BAD_PSM;
710 
711 	l2cap_state_change(chan, BT_DISCONN);
712 
713 	rsp.scid   = cpu_to_le16(chan->dcid);
714 	rsp.dcid   = cpu_to_le16(chan->scid);
715 	rsp.result = cpu_to_le16(result);
716 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
717 
718 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
719 }
720 
721 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
722 {
723 	struct l2cap_conn *conn = chan->conn;
724 
725 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
726 
727 	switch (chan->state) {
728 	case BT_LISTEN:
729 		chan->ops->teardown(chan, 0);
730 		break;
731 
732 	case BT_CONNECTED:
733 	case BT_CONFIG:
734 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
735 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
736 			l2cap_send_disconn_req(chan, reason);
737 		} else
738 			l2cap_chan_del(chan, reason);
739 		break;
740 
741 	case BT_CONNECT2:
742 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
743 			if (conn->hcon->type == ACL_LINK)
744 				l2cap_chan_connect_reject(chan);
745 			else if (conn->hcon->type == LE_LINK)
746 				l2cap_chan_le_connect_reject(chan);
747 		}
748 
749 		l2cap_chan_del(chan, reason);
750 		break;
751 
752 	case BT_CONNECT:
753 	case BT_DISCONN:
754 		l2cap_chan_del(chan, reason);
755 		break;
756 
757 	default:
758 		chan->ops->teardown(chan, 0);
759 		break;
760 	}
761 }
762 EXPORT_SYMBOL(l2cap_chan_close);
763 
764 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
765 {
766 	switch (chan->chan_type) {
767 	case L2CAP_CHAN_RAW:
768 		switch (chan->sec_level) {
769 		case BT_SECURITY_HIGH:
770 		case BT_SECURITY_FIPS:
771 			return HCI_AT_DEDICATED_BONDING_MITM;
772 		case BT_SECURITY_MEDIUM:
773 			return HCI_AT_DEDICATED_BONDING;
774 		default:
775 			return HCI_AT_NO_BONDING;
776 		}
777 		break;
778 	case L2CAP_CHAN_CONN_LESS:
779 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
780 			if (chan->sec_level == BT_SECURITY_LOW)
781 				chan->sec_level = BT_SECURITY_SDP;
782 		}
783 		if (chan->sec_level == BT_SECURITY_HIGH ||
784 		    chan->sec_level == BT_SECURITY_FIPS)
785 			return HCI_AT_NO_BONDING_MITM;
786 		else
787 			return HCI_AT_NO_BONDING;
788 		break;
789 	case L2CAP_CHAN_CONN_ORIENTED:
790 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
791 			if (chan->sec_level == BT_SECURITY_LOW)
792 				chan->sec_level = BT_SECURITY_SDP;
793 
794 			if (chan->sec_level == BT_SECURITY_HIGH ||
795 			    chan->sec_level == BT_SECURITY_FIPS)
796 				return HCI_AT_NO_BONDING_MITM;
797 			else
798 				return HCI_AT_NO_BONDING;
799 		}
800 		/* fall through */
801 	default:
802 		switch (chan->sec_level) {
803 		case BT_SECURITY_HIGH:
804 		case BT_SECURITY_FIPS:
805 			return HCI_AT_GENERAL_BONDING_MITM;
806 		case BT_SECURITY_MEDIUM:
807 			return HCI_AT_GENERAL_BONDING;
808 		default:
809 			return HCI_AT_NO_BONDING;
810 		}
811 		break;
812 	}
813 }
814 
815 /* Service level security */
816 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
817 {
818 	struct l2cap_conn *conn = chan->conn;
819 	__u8 auth_type;
820 
821 	if (conn->hcon->type == LE_LINK)
822 		return smp_conn_security(conn->hcon, chan->sec_level);
823 
824 	auth_type = l2cap_get_auth_type(chan);
825 
826 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
827 				 initiator);
828 }
829 
830 static u8 l2cap_get_ident(struct l2cap_conn *conn)
831 {
832 	u8 id;
833 
834 	/* Get next available identificator.
835 	 *    1 - 128 are used by kernel.
836 	 *  129 - 199 are reserved.
837 	 *  200 - 254 are used by utilities like l2ping, etc.
838 	 */
839 
840 	mutex_lock(&conn->ident_lock);
841 
842 	if (++conn->tx_ident > 128)
843 		conn->tx_ident = 1;
844 
845 	id = conn->tx_ident;
846 
847 	mutex_unlock(&conn->ident_lock);
848 
849 	return id;
850 }
851 
852 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
853 			   void *data)
854 {
855 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
856 	u8 flags;
857 
858 	BT_DBG("code 0x%2.2x", code);
859 
860 	if (!skb)
861 		return;
862 
863 	/* Use NO_FLUSH if supported or we have an LE link (which does
864 	 * not support auto-flushing packets) */
865 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
866 	    conn->hcon->type == LE_LINK)
867 		flags = ACL_START_NO_FLUSH;
868 	else
869 		flags = ACL_START;
870 
871 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
872 	skb->priority = HCI_PRIO_MAX;
873 
874 	hci_send_acl(conn->hchan, skb, flags);
875 }
876 
877 static bool __chan_is_moving(struct l2cap_chan *chan)
878 {
879 	return chan->move_state != L2CAP_MOVE_STABLE &&
880 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
881 }
882 
883 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
884 {
885 	struct hci_conn *hcon = chan->conn->hcon;
886 	u16 flags;
887 
888 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
889 	       skb->priority);
890 
891 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
892 		if (chan->hs_hchan)
893 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
894 		else
895 			kfree_skb(skb);
896 
897 		return;
898 	}
899 
900 	/* Use NO_FLUSH for LE links (where this is the only option) or
901 	 * if the BR/EDR link supports it and flushing has not been
902 	 * explicitly requested (through FLAG_FLUSHABLE).
903 	 */
904 	if (hcon->type == LE_LINK ||
905 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
906 	     lmp_no_flush_capable(hcon->hdev)))
907 		flags = ACL_START_NO_FLUSH;
908 	else
909 		flags = ACL_START;
910 
911 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
912 	hci_send_acl(chan->conn->hchan, skb, flags);
913 }
914 
915 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
916 {
917 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
918 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
919 
920 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
921 		/* S-Frame */
922 		control->sframe = 1;
923 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
924 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
925 
926 		control->sar = 0;
927 		control->txseq = 0;
928 	} else {
929 		/* I-Frame */
930 		control->sframe = 0;
931 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
932 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
933 
934 		control->poll = 0;
935 		control->super = 0;
936 	}
937 }
938 
939 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
940 {
941 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
942 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
943 
944 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
945 		/* S-Frame */
946 		control->sframe = 1;
947 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
948 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
949 
950 		control->sar = 0;
951 		control->txseq = 0;
952 	} else {
953 		/* I-Frame */
954 		control->sframe = 0;
955 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
956 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
957 
958 		control->poll = 0;
959 		control->super = 0;
960 	}
961 }
962 
963 static inline void __unpack_control(struct l2cap_chan *chan,
964 				    struct sk_buff *skb)
965 {
966 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
967 		__unpack_extended_control(get_unaligned_le32(skb->data),
968 					  &bt_cb(skb)->l2cap);
969 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
970 	} else {
971 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
972 					  &bt_cb(skb)->l2cap);
973 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
974 	}
975 }
976 
977 static u32 __pack_extended_control(struct l2cap_ctrl *control)
978 {
979 	u32 packed;
980 
981 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
982 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
983 
984 	if (control->sframe) {
985 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
986 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
987 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
988 	} else {
989 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
990 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
991 	}
992 
993 	return packed;
994 }
995 
996 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
997 {
998 	u16 packed;
999 
1000 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1001 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1002 
1003 	if (control->sframe) {
1004 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1005 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1006 		packed |= L2CAP_CTRL_FRAME_TYPE;
1007 	} else {
1008 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1009 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1010 	}
1011 
1012 	return packed;
1013 }
1014 
1015 static inline void __pack_control(struct l2cap_chan *chan,
1016 				  struct l2cap_ctrl *control,
1017 				  struct sk_buff *skb)
1018 {
1019 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1020 		put_unaligned_le32(__pack_extended_control(control),
1021 				   skb->data + L2CAP_HDR_SIZE);
1022 	} else {
1023 		put_unaligned_le16(__pack_enhanced_control(control),
1024 				   skb->data + L2CAP_HDR_SIZE);
1025 	}
1026 }
1027 
1028 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1029 {
1030 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1031 		return L2CAP_EXT_HDR_SIZE;
1032 	else
1033 		return L2CAP_ENH_HDR_SIZE;
1034 }
1035 
1036 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1037 					       u32 control)
1038 {
1039 	struct sk_buff *skb;
1040 	struct l2cap_hdr *lh;
1041 	int hlen = __ertm_hdr_size(chan);
1042 
1043 	if (chan->fcs == L2CAP_FCS_CRC16)
1044 		hlen += L2CAP_FCS_SIZE;
1045 
1046 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1047 
1048 	if (!skb)
1049 		return ERR_PTR(-ENOMEM);
1050 
1051 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1052 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1053 	lh->cid = cpu_to_le16(chan->dcid);
1054 
1055 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1056 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1057 	else
1058 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1059 
1060 	if (chan->fcs == L2CAP_FCS_CRC16) {
1061 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1062 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1063 	}
1064 
1065 	skb->priority = HCI_PRIO_MAX;
1066 	return skb;
1067 }
1068 
1069 static void l2cap_send_sframe(struct l2cap_chan *chan,
1070 			      struct l2cap_ctrl *control)
1071 {
1072 	struct sk_buff *skb;
1073 	u32 control_field;
1074 
1075 	BT_DBG("chan %p, control %p", chan, control);
1076 
1077 	if (!control->sframe)
1078 		return;
1079 
1080 	if (__chan_is_moving(chan))
1081 		return;
1082 
1083 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1084 	    !control->poll)
1085 		control->final = 1;
1086 
1087 	if (control->super == L2CAP_SUPER_RR)
1088 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1089 	else if (control->super == L2CAP_SUPER_RNR)
1090 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1091 
1092 	if (control->super != L2CAP_SUPER_SREJ) {
1093 		chan->last_acked_seq = control->reqseq;
1094 		__clear_ack_timer(chan);
1095 	}
1096 
1097 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1098 	       control->final, control->poll, control->super);
1099 
1100 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1101 		control_field = __pack_extended_control(control);
1102 	else
1103 		control_field = __pack_enhanced_control(control);
1104 
1105 	skb = l2cap_create_sframe_pdu(chan, control_field);
1106 	if (!IS_ERR(skb))
1107 		l2cap_do_send(chan, skb);
1108 }
1109 
1110 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1111 {
1112 	struct l2cap_ctrl control;
1113 
1114 	BT_DBG("chan %p, poll %d", chan, poll);
1115 
1116 	memset(&control, 0, sizeof(control));
1117 	control.sframe = 1;
1118 	control.poll = poll;
1119 
1120 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1121 		control.super = L2CAP_SUPER_RNR;
1122 	else
1123 		control.super = L2CAP_SUPER_RR;
1124 
1125 	control.reqseq = chan->buffer_seq;
1126 	l2cap_send_sframe(chan, &control);
1127 }
1128 
1129 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1130 {
1131 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1132 		return true;
1133 
1134 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1135 }
1136 
1137 static bool __amp_capable(struct l2cap_chan *chan)
1138 {
1139 	struct l2cap_conn *conn = chan->conn;
1140 	struct hci_dev *hdev;
1141 	bool amp_available = false;
1142 
1143 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1144 		return false;
1145 
1146 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1147 		return false;
1148 
1149 	read_lock(&hci_dev_list_lock);
1150 	list_for_each_entry(hdev, &hci_dev_list, list) {
1151 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1152 		    test_bit(HCI_UP, &hdev->flags)) {
1153 			amp_available = true;
1154 			break;
1155 		}
1156 	}
1157 	read_unlock(&hci_dev_list_lock);
1158 
1159 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1160 		return amp_available;
1161 
1162 	return false;
1163 }
1164 
1165 static bool l2cap_check_efs(struct l2cap_chan *chan)
1166 {
1167 	/* Check EFS parameters */
1168 	return true;
1169 }
1170 
1171 void l2cap_send_conn_req(struct l2cap_chan *chan)
1172 {
1173 	struct l2cap_conn *conn = chan->conn;
1174 	struct l2cap_conn_req req;
1175 
1176 	req.scid = cpu_to_le16(chan->scid);
1177 	req.psm  = chan->psm;
1178 
1179 	chan->ident = l2cap_get_ident(conn);
1180 
1181 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1182 
1183 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1184 }
1185 
1186 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1187 {
1188 	struct l2cap_create_chan_req req;
1189 	req.scid = cpu_to_le16(chan->scid);
1190 	req.psm  = chan->psm;
1191 	req.amp_id = amp_id;
1192 
1193 	chan->ident = l2cap_get_ident(chan->conn);
1194 
1195 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1196 		       sizeof(req), &req);
1197 }
1198 
1199 static void l2cap_move_setup(struct l2cap_chan *chan)
1200 {
1201 	struct sk_buff *skb;
1202 
1203 	BT_DBG("chan %p", chan);
1204 
1205 	if (chan->mode != L2CAP_MODE_ERTM)
1206 		return;
1207 
1208 	__clear_retrans_timer(chan);
1209 	__clear_monitor_timer(chan);
1210 	__clear_ack_timer(chan);
1211 
1212 	chan->retry_count = 0;
1213 	skb_queue_walk(&chan->tx_q, skb) {
1214 		if (bt_cb(skb)->l2cap.retries)
1215 			bt_cb(skb)->l2cap.retries = 1;
1216 		else
1217 			break;
1218 	}
1219 
1220 	chan->expected_tx_seq = chan->buffer_seq;
1221 
1222 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1223 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1224 	l2cap_seq_list_clear(&chan->retrans_list);
1225 	l2cap_seq_list_clear(&chan->srej_list);
1226 	skb_queue_purge(&chan->srej_q);
1227 
1228 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1229 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1230 
1231 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1232 }
1233 
1234 static void l2cap_move_done(struct l2cap_chan *chan)
1235 {
1236 	u8 move_role = chan->move_role;
1237 	BT_DBG("chan %p", chan);
1238 
1239 	chan->move_state = L2CAP_MOVE_STABLE;
1240 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1241 
1242 	if (chan->mode != L2CAP_MODE_ERTM)
1243 		return;
1244 
1245 	switch (move_role) {
1246 	case L2CAP_MOVE_ROLE_INITIATOR:
1247 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1248 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1249 		break;
1250 	case L2CAP_MOVE_ROLE_RESPONDER:
1251 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1252 		break;
1253 	}
1254 }
1255 
1256 static void l2cap_chan_ready(struct l2cap_chan *chan)
1257 {
1258 	/* The channel may have already been flagged as connected in
1259 	 * case of receiving data before the L2CAP info req/rsp
1260 	 * procedure is complete.
1261 	 */
1262 	if (chan->state == BT_CONNECTED)
1263 		return;
1264 
1265 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1266 	chan->conf_state = 0;
1267 	__clear_chan_timer(chan);
1268 
1269 	if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1270 		chan->ops->suspend(chan);
1271 
1272 	chan->state = BT_CONNECTED;
1273 
1274 	chan->ops->ready(chan);
1275 }
1276 
1277 static void l2cap_le_connect(struct l2cap_chan *chan)
1278 {
1279 	struct l2cap_conn *conn = chan->conn;
1280 	struct l2cap_le_conn_req req;
1281 
1282 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1283 		return;
1284 
1285 	req.psm     = chan->psm;
1286 	req.scid    = cpu_to_le16(chan->scid);
1287 	req.mtu     = cpu_to_le16(chan->imtu);
1288 	req.mps     = cpu_to_le16(chan->mps);
1289 	req.credits = cpu_to_le16(chan->rx_credits);
1290 
1291 	chan->ident = l2cap_get_ident(conn);
1292 
1293 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1294 		       sizeof(req), &req);
1295 }
1296 
1297 static void l2cap_le_start(struct l2cap_chan *chan)
1298 {
1299 	struct l2cap_conn *conn = chan->conn;
1300 
1301 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1302 		return;
1303 
1304 	if (!chan->psm) {
1305 		l2cap_chan_ready(chan);
1306 		return;
1307 	}
1308 
1309 	if (chan->state == BT_CONNECT)
1310 		l2cap_le_connect(chan);
1311 }
1312 
1313 static void l2cap_start_connection(struct l2cap_chan *chan)
1314 {
1315 	if (__amp_capable(chan)) {
1316 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1317 		a2mp_discover_amp(chan);
1318 	} else if (chan->conn->hcon->type == LE_LINK) {
1319 		l2cap_le_start(chan);
1320 	} else {
1321 		l2cap_send_conn_req(chan);
1322 	}
1323 }
1324 
1325 static void l2cap_request_info(struct l2cap_conn *conn)
1326 {
1327 	struct l2cap_info_req req;
1328 
1329 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1330 		return;
1331 
1332 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1333 
1334 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1335 	conn->info_ident = l2cap_get_ident(conn);
1336 
1337 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1338 
1339 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1340 		       sizeof(req), &req);
1341 }
1342 
1343 static void l2cap_do_start(struct l2cap_chan *chan)
1344 {
1345 	struct l2cap_conn *conn = chan->conn;
1346 
1347 	if (conn->hcon->type == LE_LINK) {
1348 		l2cap_le_start(chan);
1349 		return;
1350 	}
1351 
1352 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1353 		l2cap_request_info(conn);
1354 		return;
1355 	}
1356 
1357 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1358 		return;
1359 
1360 	if (l2cap_chan_check_security(chan, true) &&
1361 	    __l2cap_no_conn_pending(chan))
1362 		l2cap_start_connection(chan);
1363 }
1364 
1365 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1366 {
1367 	u32 local_feat_mask = l2cap_feat_mask;
1368 	if (!disable_ertm)
1369 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1370 
1371 	switch (mode) {
1372 	case L2CAP_MODE_ERTM:
1373 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1374 	case L2CAP_MODE_STREAMING:
1375 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1376 	default:
1377 		return 0x00;
1378 	}
1379 }
1380 
1381 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1382 {
1383 	struct l2cap_conn *conn = chan->conn;
1384 	struct l2cap_disconn_req req;
1385 
1386 	if (!conn)
1387 		return;
1388 
1389 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1390 		__clear_retrans_timer(chan);
1391 		__clear_monitor_timer(chan);
1392 		__clear_ack_timer(chan);
1393 	}
1394 
1395 	if (chan->scid == L2CAP_CID_A2MP) {
1396 		l2cap_state_change(chan, BT_DISCONN);
1397 		return;
1398 	}
1399 
1400 	req.dcid = cpu_to_le16(chan->dcid);
1401 	req.scid = cpu_to_le16(chan->scid);
1402 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1403 		       sizeof(req), &req);
1404 
1405 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1406 }
1407 
1408 /* ---- L2CAP connections ---- */
1409 static void l2cap_conn_start(struct l2cap_conn *conn)
1410 {
1411 	struct l2cap_chan *chan, *tmp;
1412 
1413 	BT_DBG("conn %p", conn);
1414 
1415 	mutex_lock(&conn->chan_lock);
1416 
1417 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1418 		l2cap_chan_lock(chan);
1419 
1420 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1421 			l2cap_chan_ready(chan);
1422 			l2cap_chan_unlock(chan);
1423 			continue;
1424 		}
1425 
1426 		if (chan->state == BT_CONNECT) {
1427 			if (!l2cap_chan_check_security(chan, true) ||
1428 			    !__l2cap_no_conn_pending(chan)) {
1429 				l2cap_chan_unlock(chan);
1430 				continue;
1431 			}
1432 
1433 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1434 			    && test_bit(CONF_STATE2_DEVICE,
1435 					&chan->conf_state)) {
1436 				l2cap_chan_close(chan, ECONNRESET);
1437 				l2cap_chan_unlock(chan);
1438 				continue;
1439 			}
1440 
1441 			l2cap_start_connection(chan);
1442 
1443 		} else if (chan->state == BT_CONNECT2) {
1444 			struct l2cap_conn_rsp rsp;
1445 			char buf[128];
1446 			rsp.scid = cpu_to_le16(chan->dcid);
1447 			rsp.dcid = cpu_to_le16(chan->scid);
1448 
1449 			if (l2cap_chan_check_security(chan, false)) {
1450 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1451 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1452 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1453 					chan->ops->defer(chan);
1454 
1455 				} else {
1456 					l2cap_state_change(chan, BT_CONFIG);
1457 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1458 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1459 				}
1460 			} else {
1461 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1462 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1463 			}
1464 
1465 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1466 				       sizeof(rsp), &rsp);
1467 
1468 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1469 			    rsp.result != L2CAP_CR_SUCCESS) {
1470 				l2cap_chan_unlock(chan);
1471 				continue;
1472 			}
1473 
1474 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1475 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1476 				       l2cap_build_conf_req(chan, buf), buf);
1477 			chan->num_conf_req++;
1478 		}
1479 
1480 		l2cap_chan_unlock(chan);
1481 	}
1482 
1483 	mutex_unlock(&conn->chan_lock);
1484 }
1485 
1486 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1487 {
1488 	struct hci_conn *hcon = conn->hcon;
1489 	struct hci_dev *hdev = hcon->hdev;
1490 
1491 	BT_DBG("%s conn %p", hdev->name, conn);
1492 
1493 	/* For outgoing pairing which doesn't necessarily have an
1494 	 * associated socket (e.g. mgmt_pair_device).
1495 	 */
1496 	if (hcon->out)
1497 		smp_conn_security(hcon, hcon->pending_sec_level);
1498 
1499 	/* For LE slave connections, make sure the connection interval
1500 	 * is in the range of the minium and maximum interval that has
1501 	 * been configured for this connection. If not, then trigger
1502 	 * the connection update procedure.
1503 	 */
1504 	if (hcon->role == HCI_ROLE_SLAVE &&
1505 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1506 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1507 		struct l2cap_conn_param_update_req req;
1508 
1509 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1510 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1511 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1512 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1513 
1514 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1515 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1516 	}
1517 }
1518 
1519 static void l2cap_conn_ready(struct l2cap_conn *conn)
1520 {
1521 	struct l2cap_chan *chan;
1522 	struct hci_conn *hcon = conn->hcon;
1523 
1524 	BT_DBG("conn %p", conn);
1525 
1526 	if (hcon->type == ACL_LINK)
1527 		l2cap_request_info(conn);
1528 
1529 	mutex_lock(&conn->chan_lock);
1530 
1531 	list_for_each_entry(chan, &conn->chan_l, list) {
1532 
1533 		l2cap_chan_lock(chan);
1534 
1535 		if (chan->scid == L2CAP_CID_A2MP) {
1536 			l2cap_chan_unlock(chan);
1537 			continue;
1538 		}
1539 
1540 		if (hcon->type == LE_LINK) {
1541 			l2cap_le_start(chan);
1542 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1543 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1544 				l2cap_chan_ready(chan);
1545 		} else if (chan->state == BT_CONNECT) {
1546 			l2cap_do_start(chan);
1547 		}
1548 
1549 		l2cap_chan_unlock(chan);
1550 	}
1551 
1552 	mutex_unlock(&conn->chan_lock);
1553 
1554 	if (hcon->type == LE_LINK)
1555 		l2cap_le_conn_ready(conn);
1556 
1557 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1558 }
1559 
1560 /* Notify sockets that we cannot guaranty reliability anymore */
1561 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1562 {
1563 	struct l2cap_chan *chan;
1564 
1565 	BT_DBG("conn %p", conn);
1566 
1567 	mutex_lock(&conn->chan_lock);
1568 
1569 	list_for_each_entry(chan, &conn->chan_l, list) {
1570 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1571 			l2cap_chan_set_err(chan, err);
1572 	}
1573 
1574 	mutex_unlock(&conn->chan_lock);
1575 }
1576 
1577 static void l2cap_info_timeout(struct work_struct *work)
1578 {
1579 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1580 					       info_timer.work);
1581 
1582 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1583 	conn->info_ident = 0;
1584 
1585 	l2cap_conn_start(conn);
1586 }
1587 
1588 /*
1589  * l2cap_user
1590  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1591  * callback is called during registration. The ->remove callback is called
1592  * during unregistration.
1593  * An l2cap_user object can either be explicitly unregistered or when the
1594  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1595  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1596  * External modules must own a reference to the l2cap_conn object if they intend
1597  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1598  * any time if they don't.
1599  */
1600 
1601 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1602 {
1603 	struct hci_dev *hdev = conn->hcon->hdev;
1604 	int ret;
1605 
1606 	/* We need to check whether l2cap_conn is registered. If it is not, we
1607 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1608 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1609 	 * relies on the parent hci_conn object to be locked. This itself relies
1610 	 * on the hci_dev object to be locked. So we must lock the hci device
1611 	 * here, too. */
1612 
1613 	hci_dev_lock(hdev);
1614 
1615 	if (!list_empty(&user->list)) {
1616 		ret = -EINVAL;
1617 		goto out_unlock;
1618 	}
1619 
1620 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1621 	if (!conn->hchan) {
1622 		ret = -ENODEV;
1623 		goto out_unlock;
1624 	}
1625 
1626 	ret = user->probe(conn, user);
1627 	if (ret)
1628 		goto out_unlock;
1629 
1630 	list_add(&user->list, &conn->users);
1631 	ret = 0;
1632 
1633 out_unlock:
1634 	hci_dev_unlock(hdev);
1635 	return ret;
1636 }
1637 EXPORT_SYMBOL(l2cap_register_user);
1638 
1639 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1640 {
1641 	struct hci_dev *hdev = conn->hcon->hdev;
1642 
1643 	hci_dev_lock(hdev);
1644 
1645 	if (list_empty(&user->list))
1646 		goto out_unlock;
1647 
1648 	list_del_init(&user->list);
1649 	user->remove(conn, user);
1650 
1651 out_unlock:
1652 	hci_dev_unlock(hdev);
1653 }
1654 EXPORT_SYMBOL(l2cap_unregister_user);
1655 
1656 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1657 {
1658 	struct l2cap_user *user;
1659 
1660 	while (!list_empty(&conn->users)) {
1661 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1662 		list_del_init(&user->list);
1663 		user->remove(conn, user);
1664 	}
1665 }
1666 
1667 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1668 {
1669 	struct l2cap_conn *conn = hcon->l2cap_data;
1670 	struct l2cap_chan *chan, *l;
1671 
1672 	if (!conn)
1673 		return;
1674 
1675 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1676 
1677 	kfree_skb(conn->rx_skb);
1678 
1679 	skb_queue_purge(&conn->pending_rx);
1680 
1681 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1682 	 * might block if we are running on a worker from the same workqueue
1683 	 * pending_rx_work is waiting on.
1684 	 */
1685 	if (work_pending(&conn->pending_rx_work))
1686 		cancel_work_sync(&conn->pending_rx_work);
1687 
1688 	if (work_pending(&conn->id_addr_update_work))
1689 		cancel_work_sync(&conn->id_addr_update_work);
1690 
1691 	l2cap_unregister_all_users(conn);
1692 
1693 	/* Force the connection to be immediately dropped */
1694 	hcon->disc_timeout = 0;
1695 
1696 	mutex_lock(&conn->chan_lock);
1697 
1698 	/* Kill channels */
1699 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1700 		l2cap_chan_hold(chan);
1701 		l2cap_chan_lock(chan);
1702 
1703 		l2cap_chan_del(chan, err);
1704 
1705 		l2cap_chan_unlock(chan);
1706 
1707 		chan->ops->close(chan);
1708 		l2cap_chan_put(chan);
1709 	}
1710 
1711 	mutex_unlock(&conn->chan_lock);
1712 
1713 	hci_chan_del(conn->hchan);
1714 
1715 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1716 		cancel_delayed_work_sync(&conn->info_timer);
1717 
1718 	hcon->l2cap_data = NULL;
1719 	conn->hchan = NULL;
1720 	l2cap_conn_put(conn);
1721 }
1722 
1723 static void l2cap_conn_free(struct kref *ref)
1724 {
1725 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1726 
1727 	hci_conn_put(conn->hcon);
1728 	kfree(conn);
1729 }
1730 
1731 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1732 {
1733 	kref_get(&conn->ref);
1734 	return conn;
1735 }
1736 EXPORT_SYMBOL(l2cap_conn_get);
1737 
1738 void l2cap_conn_put(struct l2cap_conn *conn)
1739 {
1740 	kref_put(&conn->ref, l2cap_conn_free);
1741 }
1742 EXPORT_SYMBOL(l2cap_conn_put);
1743 
1744 /* ---- Socket interface ---- */
1745 
1746 /* Find socket with psm and source / destination bdaddr.
1747  * Returns closest match.
1748  */
1749 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1750 						   bdaddr_t *src,
1751 						   bdaddr_t *dst,
1752 						   u8 link_type)
1753 {
1754 	struct l2cap_chan *c, *c1 = NULL;
1755 
1756 	read_lock(&chan_list_lock);
1757 
1758 	list_for_each_entry(c, &chan_list, global_l) {
1759 		if (state && c->state != state)
1760 			continue;
1761 
1762 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1763 			continue;
1764 
1765 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1766 			continue;
1767 
1768 		if (c->psm == psm) {
1769 			int src_match, dst_match;
1770 			int src_any, dst_any;
1771 
1772 			/* Exact match. */
1773 			src_match = !bacmp(&c->src, src);
1774 			dst_match = !bacmp(&c->dst, dst);
1775 			if (src_match && dst_match) {
1776 				l2cap_chan_hold(c);
1777 				read_unlock(&chan_list_lock);
1778 				return c;
1779 			}
1780 
1781 			/* Closest match */
1782 			src_any = !bacmp(&c->src, BDADDR_ANY);
1783 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1784 			if ((src_match && dst_any) || (src_any && dst_match) ||
1785 			    (src_any && dst_any))
1786 				c1 = c;
1787 		}
1788 	}
1789 
1790 	if (c1)
1791 		l2cap_chan_hold(c1);
1792 
1793 	read_unlock(&chan_list_lock);
1794 
1795 	return c1;
1796 }
1797 
1798 static void l2cap_monitor_timeout(struct work_struct *work)
1799 {
1800 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1801 					       monitor_timer.work);
1802 
1803 	BT_DBG("chan %p", chan);
1804 
1805 	l2cap_chan_lock(chan);
1806 
1807 	if (!chan->conn) {
1808 		l2cap_chan_unlock(chan);
1809 		l2cap_chan_put(chan);
1810 		return;
1811 	}
1812 
1813 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1814 
1815 	l2cap_chan_unlock(chan);
1816 	l2cap_chan_put(chan);
1817 }
1818 
1819 static void l2cap_retrans_timeout(struct work_struct *work)
1820 {
1821 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1822 					       retrans_timer.work);
1823 
1824 	BT_DBG("chan %p", chan);
1825 
1826 	l2cap_chan_lock(chan);
1827 
1828 	if (!chan->conn) {
1829 		l2cap_chan_unlock(chan);
1830 		l2cap_chan_put(chan);
1831 		return;
1832 	}
1833 
1834 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1835 	l2cap_chan_unlock(chan);
1836 	l2cap_chan_put(chan);
1837 }
1838 
1839 static void l2cap_streaming_send(struct l2cap_chan *chan,
1840 				 struct sk_buff_head *skbs)
1841 {
1842 	struct sk_buff *skb;
1843 	struct l2cap_ctrl *control;
1844 
1845 	BT_DBG("chan %p, skbs %p", chan, skbs);
1846 
1847 	if (__chan_is_moving(chan))
1848 		return;
1849 
1850 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1851 
1852 	while (!skb_queue_empty(&chan->tx_q)) {
1853 
1854 		skb = skb_dequeue(&chan->tx_q);
1855 
1856 		bt_cb(skb)->l2cap.retries = 1;
1857 		control = &bt_cb(skb)->l2cap;
1858 
1859 		control->reqseq = 0;
1860 		control->txseq = chan->next_tx_seq;
1861 
1862 		__pack_control(chan, control, skb);
1863 
1864 		if (chan->fcs == L2CAP_FCS_CRC16) {
1865 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1866 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1867 		}
1868 
1869 		l2cap_do_send(chan, skb);
1870 
1871 		BT_DBG("Sent txseq %u", control->txseq);
1872 
1873 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1874 		chan->frames_sent++;
1875 	}
1876 }
1877 
1878 static int l2cap_ertm_send(struct l2cap_chan *chan)
1879 {
1880 	struct sk_buff *skb, *tx_skb;
1881 	struct l2cap_ctrl *control;
1882 	int sent = 0;
1883 
1884 	BT_DBG("chan %p", chan);
1885 
1886 	if (chan->state != BT_CONNECTED)
1887 		return -ENOTCONN;
1888 
1889 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1890 		return 0;
1891 
1892 	if (__chan_is_moving(chan))
1893 		return 0;
1894 
1895 	while (chan->tx_send_head &&
1896 	       chan->unacked_frames < chan->remote_tx_win &&
1897 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1898 
1899 		skb = chan->tx_send_head;
1900 
1901 		bt_cb(skb)->l2cap.retries = 1;
1902 		control = &bt_cb(skb)->l2cap;
1903 
1904 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1905 			control->final = 1;
1906 
1907 		control->reqseq = chan->buffer_seq;
1908 		chan->last_acked_seq = chan->buffer_seq;
1909 		control->txseq = chan->next_tx_seq;
1910 
1911 		__pack_control(chan, control, skb);
1912 
1913 		if (chan->fcs == L2CAP_FCS_CRC16) {
1914 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1915 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1916 		}
1917 
1918 		/* Clone after data has been modified. Data is assumed to be
1919 		   read-only (for locking purposes) on cloned sk_buffs.
1920 		 */
1921 		tx_skb = skb_clone(skb, GFP_KERNEL);
1922 
1923 		if (!tx_skb)
1924 			break;
1925 
1926 		__set_retrans_timer(chan);
1927 
1928 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1929 		chan->unacked_frames++;
1930 		chan->frames_sent++;
1931 		sent++;
1932 
1933 		if (skb_queue_is_last(&chan->tx_q, skb))
1934 			chan->tx_send_head = NULL;
1935 		else
1936 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1937 
1938 		l2cap_do_send(chan, tx_skb);
1939 		BT_DBG("Sent txseq %u", control->txseq);
1940 	}
1941 
1942 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1943 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
1944 
1945 	return sent;
1946 }
1947 
1948 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1949 {
1950 	struct l2cap_ctrl control;
1951 	struct sk_buff *skb;
1952 	struct sk_buff *tx_skb;
1953 	u16 seq;
1954 
1955 	BT_DBG("chan %p", chan);
1956 
1957 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1958 		return;
1959 
1960 	if (__chan_is_moving(chan))
1961 		return;
1962 
1963 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1964 		seq = l2cap_seq_list_pop(&chan->retrans_list);
1965 
1966 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1967 		if (!skb) {
1968 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
1969 			       seq);
1970 			continue;
1971 		}
1972 
1973 		bt_cb(skb)->l2cap.retries++;
1974 		control = bt_cb(skb)->l2cap;
1975 
1976 		if (chan->max_tx != 0 &&
1977 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
1978 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1979 			l2cap_send_disconn_req(chan, ECONNRESET);
1980 			l2cap_seq_list_clear(&chan->retrans_list);
1981 			break;
1982 		}
1983 
1984 		control.reqseq = chan->buffer_seq;
1985 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1986 			control.final = 1;
1987 		else
1988 			control.final = 0;
1989 
1990 		if (skb_cloned(skb)) {
1991 			/* Cloned sk_buffs are read-only, so we need a
1992 			 * writeable copy
1993 			 */
1994 			tx_skb = skb_copy(skb, GFP_KERNEL);
1995 		} else {
1996 			tx_skb = skb_clone(skb, GFP_KERNEL);
1997 		}
1998 
1999 		if (!tx_skb) {
2000 			l2cap_seq_list_clear(&chan->retrans_list);
2001 			break;
2002 		}
2003 
2004 		/* Update skb contents */
2005 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2006 			put_unaligned_le32(__pack_extended_control(&control),
2007 					   tx_skb->data + L2CAP_HDR_SIZE);
2008 		} else {
2009 			put_unaligned_le16(__pack_enhanced_control(&control),
2010 					   tx_skb->data + L2CAP_HDR_SIZE);
2011 		}
2012 
2013 		/* Update FCS */
2014 		if (chan->fcs == L2CAP_FCS_CRC16) {
2015 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2016 					tx_skb->len - L2CAP_FCS_SIZE);
2017 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2018 						L2CAP_FCS_SIZE);
2019 		}
2020 
2021 		l2cap_do_send(chan, tx_skb);
2022 
2023 		BT_DBG("Resent txseq %d", control.txseq);
2024 
2025 		chan->last_acked_seq = chan->buffer_seq;
2026 	}
2027 }
2028 
2029 static void l2cap_retransmit(struct l2cap_chan *chan,
2030 			     struct l2cap_ctrl *control)
2031 {
2032 	BT_DBG("chan %p, control %p", chan, control);
2033 
2034 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2035 	l2cap_ertm_resend(chan);
2036 }
2037 
2038 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2039 				 struct l2cap_ctrl *control)
2040 {
2041 	struct sk_buff *skb;
2042 
2043 	BT_DBG("chan %p, control %p", chan, control);
2044 
2045 	if (control->poll)
2046 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2047 
2048 	l2cap_seq_list_clear(&chan->retrans_list);
2049 
2050 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2051 		return;
2052 
2053 	if (chan->unacked_frames) {
2054 		skb_queue_walk(&chan->tx_q, skb) {
2055 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2056 			    skb == chan->tx_send_head)
2057 				break;
2058 		}
2059 
2060 		skb_queue_walk_from(&chan->tx_q, skb) {
2061 			if (skb == chan->tx_send_head)
2062 				break;
2063 
2064 			l2cap_seq_list_append(&chan->retrans_list,
2065 					      bt_cb(skb)->l2cap.txseq);
2066 		}
2067 
2068 		l2cap_ertm_resend(chan);
2069 	}
2070 }
2071 
2072 static void l2cap_send_ack(struct l2cap_chan *chan)
2073 {
2074 	struct l2cap_ctrl control;
2075 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2076 					 chan->last_acked_seq);
2077 	int threshold;
2078 
2079 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2080 	       chan, chan->last_acked_seq, chan->buffer_seq);
2081 
2082 	memset(&control, 0, sizeof(control));
2083 	control.sframe = 1;
2084 
2085 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2086 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2087 		__clear_ack_timer(chan);
2088 		control.super = L2CAP_SUPER_RNR;
2089 		control.reqseq = chan->buffer_seq;
2090 		l2cap_send_sframe(chan, &control);
2091 	} else {
2092 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2093 			l2cap_ertm_send(chan);
2094 			/* If any i-frames were sent, they included an ack */
2095 			if (chan->buffer_seq == chan->last_acked_seq)
2096 				frames_to_ack = 0;
2097 		}
2098 
2099 		/* Ack now if the window is 3/4ths full.
2100 		 * Calculate without mul or div
2101 		 */
2102 		threshold = chan->ack_win;
2103 		threshold += threshold << 1;
2104 		threshold >>= 2;
2105 
2106 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2107 		       threshold);
2108 
2109 		if (frames_to_ack >= threshold) {
2110 			__clear_ack_timer(chan);
2111 			control.super = L2CAP_SUPER_RR;
2112 			control.reqseq = chan->buffer_seq;
2113 			l2cap_send_sframe(chan, &control);
2114 			frames_to_ack = 0;
2115 		}
2116 
2117 		if (frames_to_ack)
2118 			__set_ack_timer(chan);
2119 	}
2120 }
2121 
2122 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2123 					 struct msghdr *msg, int len,
2124 					 int count, struct sk_buff *skb)
2125 {
2126 	struct l2cap_conn *conn = chan->conn;
2127 	struct sk_buff **frag;
2128 	int sent = 0;
2129 
2130 	if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
2131 		return -EFAULT;
2132 
2133 	sent += count;
2134 	len  -= count;
2135 
2136 	/* Continuation fragments (no L2CAP header) */
2137 	frag = &skb_shinfo(skb)->frag_list;
2138 	while (len) {
2139 		struct sk_buff *tmp;
2140 
2141 		count = min_t(unsigned int, conn->mtu, len);
2142 
2143 		tmp = chan->ops->alloc_skb(chan, 0, count,
2144 					   msg->msg_flags & MSG_DONTWAIT);
2145 		if (IS_ERR(tmp))
2146 			return PTR_ERR(tmp);
2147 
2148 		*frag = tmp;
2149 
2150 		if (copy_from_iter(skb_put(*frag, count), count,
2151 				   &msg->msg_iter) != count)
2152 			return -EFAULT;
2153 
2154 		sent += count;
2155 		len  -= count;
2156 
2157 		skb->len += (*frag)->len;
2158 		skb->data_len += (*frag)->len;
2159 
2160 		frag = &(*frag)->next;
2161 	}
2162 
2163 	return sent;
2164 }
2165 
2166 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2167 						 struct msghdr *msg, size_t len)
2168 {
2169 	struct l2cap_conn *conn = chan->conn;
2170 	struct sk_buff *skb;
2171 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2172 	struct l2cap_hdr *lh;
2173 
2174 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2175 	       __le16_to_cpu(chan->psm), len);
2176 
2177 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2178 
2179 	skb = chan->ops->alloc_skb(chan, hlen, count,
2180 				   msg->msg_flags & MSG_DONTWAIT);
2181 	if (IS_ERR(skb))
2182 		return skb;
2183 
2184 	/* Create L2CAP header */
2185 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2186 	lh->cid = cpu_to_le16(chan->dcid);
2187 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2188 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2189 
2190 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2191 	if (unlikely(err < 0)) {
2192 		kfree_skb(skb);
2193 		return ERR_PTR(err);
2194 	}
2195 	return skb;
2196 }
2197 
2198 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2199 					      struct msghdr *msg, size_t len)
2200 {
2201 	struct l2cap_conn *conn = chan->conn;
2202 	struct sk_buff *skb;
2203 	int err, count;
2204 	struct l2cap_hdr *lh;
2205 
2206 	BT_DBG("chan %p len %zu", chan, len);
2207 
2208 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2209 
2210 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2211 				   msg->msg_flags & MSG_DONTWAIT);
2212 	if (IS_ERR(skb))
2213 		return skb;
2214 
2215 	/* Create L2CAP header */
2216 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2217 	lh->cid = cpu_to_le16(chan->dcid);
2218 	lh->len = cpu_to_le16(len);
2219 
2220 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2221 	if (unlikely(err < 0)) {
2222 		kfree_skb(skb);
2223 		return ERR_PTR(err);
2224 	}
2225 	return skb;
2226 }
2227 
2228 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2229 					       struct msghdr *msg, size_t len,
2230 					       u16 sdulen)
2231 {
2232 	struct l2cap_conn *conn = chan->conn;
2233 	struct sk_buff *skb;
2234 	int err, count, hlen;
2235 	struct l2cap_hdr *lh;
2236 
2237 	BT_DBG("chan %p len %zu", chan, len);
2238 
2239 	if (!conn)
2240 		return ERR_PTR(-ENOTCONN);
2241 
2242 	hlen = __ertm_hdr_size(chan);
2243 
2244 	if (sdulen)
2245 		hlen += L2CAP_SDULEN_SIZE;
2246 
2247 	if (chan->fcs == L2CAP_FCS_CRC16)
2248 		hlen += L2CAP_FCS_SIZE;
2249 
2250 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2251 
2252 	skb = chan->ops->alloc_skb(chan, hlen, count,
2253 				   msg->msg_flags & MSG_DONTWAIT);
2254 	if (IS_ERR(skb))
2255 		return skb;
2256 
2257 	/* Create L2CAP header */
2258 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2259 	lh->cid = cpu_to_le16(chan->dcid);
2260 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2261 
2262 	/* Control header is populated later */
2263 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2264 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2265 	else
2266 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2267 
2268 	if (sdulen)
2269 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2270 
2271 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2272 	if (unlikely(err < 0)) {
2273 		kfree_skb(skb);
2274 		return ERR_PTR(err);
2275 	}
2276 
2277 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2278 	bt_cb(skb)->l2cap.retries = 0;
2279 	return skb;
2280 }
2281 
2282 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2283 			     struct sk_buff_head *seg_queue,
2284 			     struct msghdr *msg, size_t len)
2285 {
2286 	struct sk_buff *skb;
2287 	u16 sdu_len;
2288 	size_t pdu_len;
2289 	u8 sar;
2290 
2291 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2292 
2293 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2294 	 * so fragmented skbs are not used.  The HCI layer's handling
2295 	 * of fragmented skbs is not compatible with ERTM's queueing.
2296 	 */
2297 
2298 	/* PDU size is derived from the HCI MTU */
2299 	pdu_len = chan->conn->mtu;
2300 
2301 	/* Constrain PDU size for BR/EDR connections */
2302 	if (!chan->hs_hcon)
2303 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2304 
2305 	/* Adjust for largest possible L2CAP overhead. */
2306 	if (chan->fcs)
2307 		pdu_len -= L2CAP_FCS_SIZE;
2308 
2309 	pdu_len -= __ertm_hdr_size(chan);
2310 
2311 	/* Remote device may have requested smaller PDUs */
2312 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2313 
2314 	if (len <= pdu_len) {
2315 		sar = L2CAP_SAR_UNSEGMENTED;
2316 		sdu_len = 0;
2317 		pdu_len = len;
2318 	} else {
2319 		sar = L2CAP_SAR_START;
2320 		sdu_len = len;
2321 	}
2322 
2323 	while (len > 0) {
2324 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2325 
2326 		if (IS_ERR(skb)) {
2327 			__skb_queue_purge(seg_queue);
2328 			return PTR_ERR(skb);
2329 		}
2330 
2331 		bt_cb(skb)->l2cap.sar = sar;
2332 		__skb_queue_tail(seg_queue, skb);
2333 
2334 		len -= pdu_len;
2335 		if (sdu_len)
2336 			sdu_len = 0;
2337 
2338 		if (len <= pdu_len) {
2339 			sar = L2CAP_SAR_END;
2340 			pdu_len = len;
2341 		} else {
2342 			sar = L2CAP_SAR_CONTINUE;
2343 		}
2344 	}
2345 
2346 	return 0;
2347 }
2348 
2349 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2350 						   struct msghdr *msg,
2351 						   size_t len, u16 sdulen)
2352 {
2353 	struct l2cap_conn *conn = chan->conn;
2354 	struct sk_buff *skb;
2355 	int err, count, hlen;
2356 	struct l2cap_hdr *lh;
2357 
2358 	BT_DBG("chan %p len %zu", chan, len);
2359 
2360 	if (!conn)
2361 		return ERR_PTR(-ENOTCONN);
2362 
2363 	hlen = L2CAP_HDR_SIZE;
2364 
2365 	if (sdulen)
2366 		hlen += L2CAP_SDULEN_SIZE;
2367 
2368 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2369 
2370 	skb = chan->ops->alloc_skb(chan, hlen, count,
2371 				   msg->msg_flags & MSG_DONTWAIT);
2372 	if (IS_ERR(skb))
2373 		return skb;
2374 
2375 	/* Create L2CAP header */
2376 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2377 	lh->cid = cpu_to_le16(chan->dcid);
2378 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2379 
2380 	if (sdulen)
2381 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2382 
2383 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2384 	if (unlikely(err < 0)) {
2385 		kfree_skb(skb);
2386 		return ERR_PTR(err);
2387 	}
2388 
2389 	return skb;
2390 }
2391 
2392 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2393 				struct sk_buff_head *seg_queue,
2394 				struct msghdr *msg, size_t len)
2395 {
2396 	struct sk_buff *skb;
2397 	size_t pdu_len;
2398 	u16 sdu_len;
2399 
2400 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2401 
2402 	sdu_len = len;
2403 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2404 
2405 	while (len > 0) {
2406 		if (len <= pdu_len)
2407 			pdu_len = len;
2408 
2409 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2410 		if (IS_ERR(skb)) {
2411 			__skb_queue_purge(seg_queue);
2412 			return PTR_ERR(skb);
2413 		}
2414 
2415 		__skb_queue_tail(seg_queue, skb);
2416 
2417 		len -= pdu_len;
2418 
2419 		if (sdu_len) {
2420 			sdu_len = 0;
2421 			pdu_len += L2CAP_SDULEN_SIZE;
2422 		}
2423 	}
2424 
2425 	return 0;
2426 }
2427 
2428 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2429 {
2430 	struct sk_buff *skb;
2431 	int err;
2432 	struct sk_buff_head seg_queue;
2433 
2434 	if (!chan->conn)
2435 		return -ENOTCONN;
2436 
2437 	/* Connectionless channel */
2438 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2439 		skb = l2cap_create_connless_pdu(chan, msg, len);
2440 		if (IS_ERR(skb))
2441 			return PTR_ERR(skb);
2442 
2443 		/* Channel lock is released before requesting new skb and then
2444 		 * reacquired thus we need to recheck channel state.
2445 		 */
2446 		if (chan->state != BT_CONNECTED) {
2447 			kfree_skb(skb);
2448 			return -ENOTCONN;
2449 		}
2450 
2451 		l2cap_do_send(chan, skb);
2452 		return len;
2453 	}
2454 
2455 	switch (chan->mode) {
2456 	case L2CAP_MODE_LE_FLOWCTL:
2457 		/* Check outgoing MTU */
2458 		if (len > chan->omtu)
2459 			return -EMSGSIZE;
2460 
2461 		if (!chan->tx_credits)
2462 			return -EAGAIN;
2463 
2464 		__skb_queue_head_init(&seg_queue);
2465 
2466 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2467 
2468 		if (chan->state != BT_CONNECTED) {
2469 			__skb_queue_purge(&seg_queue);
2470 			err = -ENOTCONN;
2471 		}
2472 
2473 		if (err)
2474 			return err;
2475 
2476 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2477 
2478 		while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2479 			l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2480 			chan->tx_credits--;
2481 		}
2482 
2483 		if (!chan->tx_credits)
2484 			chan->ops->suspend(chan);
2485 
2486 		err = len;
2487 
2488 		break;
2489 
2490 	case L2CAP_MODE_BASIC:
2491 		/* Check outgoing MTU */
2492 		if (len > chan->omtu)
2493 			return -EMSGSIZE;
2494 
2495 		/* Create a basic PDU */
2496 		skb = l2cap_create_basic_pdu(chan, msg, len);
2497 		if (IS_ERR(skb))
2498 			return PTR_ERR(skb);
2499 
2500 		/* Channel lock is released before requesting new skb and then
2501 		 * reacquired thus we need to recheck channel state.
2502 		 */
2503 		if (chan->state != BT_CONNECTED) {
2504 			kfree_skb(skb);
2505 			return -ENOTCONN;
2506 		}
2507 
2508 		l2cap_do_send(chan, skb);
2509 		err = len;
2510 		break;
2511 
2512 	case L2CAP_MODE_ERTM:
2513 	case L2CAP_MODE_STREAMING:
2514 		/* Check outgoing MTU */
2515 		if (len > chan->omtu) {
2516 			err = -EMSGSIZE;
2517 			break;
2518 		}
2519 
2520 		__skb_queue_head_init(&seg_queue);
2521 
2522 		/* Do segmentation before calling in to the state machine,
2523 		 * since it's possible to block while waiting for memory
2524 		 * allocation.
2525 		 */
2526 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2527 
2528 		/* The channel could have been closed while segmenting,
2529 		 * check that it is still connected.
2530 		 */
2531 		if (chan->state != BT_CONNECTED) {
2532 			__skb_queue_purge(&seg_queue);
2533 			err = -ENOTCONN;
2534 		}
2535 
2536 		if (err)
2537 			break;
2538 
2539 		if (chan->mode == L2CAP_MODE_ERTM)
2540 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2541 		else
2542 			l2cap_streaming_send(chan, &seg_queue);
2543 
2544 		err = len;
2545 
2546 		/* If the skbs were not queued for sending, they'll still be in
2547 		 * seg_queue and need to be purged.
2548 		 */
2549 		__skb_queue_purge(&seg_queue);
2550 		break;
2551 
2552 	default:
2553 		BT_DBG("bad state %1.1x", chan->mode);
2554 		err = -EBADFD;
2555 	}
2556 
2557 	return err;
2558 }
2559 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2560 
2561 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2562 {
2563 	struct l2cap_ctrl control;
2564 	u16 seq;
2565 
2566 	BT_DBG("chan %p, txseq %u", chan, txseq);
2567 
2568 	memset(&control, 0, sizeof(control));
2569 	control.sframe = 1;
2570 	control.super = L2CAP_SUPER_SREJ;
2571 
2572 	for (seq = chan->expected_tx_seq; seq != txseq;
2573 	     seq = __next_seq(chan, seq)) {
2574 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2575 			control.reqseq = seq;
2576 			l2cap_send_sframe(chan, &control);
2577 			l2cap_seq_list_append(&chan->srej_list, seq);
2578 		}
2579 	}
2580 
2581 	chan->expected_tx_seq = __next_seq(chan, txseq);
2582 }
2583 
2584 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2585 {
2586 	struct l2cap_ctrl control;
2587 
2588 	BT_DBG("chan %p", chan);
2589 
2590 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2591 		return;
2592 
2593 	memset(&control, 0, sizeof(control));
2594 	control.sframe = 1;
2595 	control.super = L2CAP_SUPER_SREJ;
2596 	control.reqseq = chan->srej_list.tail;
2597 	l2cap_send_sframe(chan, &control);
2598 }
2599 
2600 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2601 {
2602 	struct l2cap_ctrl control;
2603 	u16 initial_head;
2604 	u16 seq;
2605 
2606 	BT_DBG("chan %p, txseq %u", chan, txseq);
2607 
2608 	memset(&control, 0, sizeof(control));
2609 	control.sframe = 1;
2610 	control.super = L2CAP_SUPER_SREJ;
2611 
2612 	/* Capture initial list head to allow only one pass through the list. */
2613 	initial_head = chan->srej_list.head;
2614 
2615 	do {
2616 		seq = l2cap_seq_list_pop(&chan->srej_list);
2617 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2618 			break;
2619 
2620 		control.reqseq = seq;
2621 		l2cap_send_sframe(chan, &control);
2622 		l2cap_seq_list_append(&chan->srej_list, seq);
2623 	} while (chan->srej_list.head != initial_head);
2624 }
2625 
2626 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2627 {
2628 	struct sk_buff *acked_skb;
2629 	u16 ackseq;
2630 
2631 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2632 
2633 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2634 		return;
2635 
2636 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2637 	       chan->expected_ack_seq, chan->unacked_frames);
2638 
2639 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2640 	     ackseq = __next_seq(chan, ackseq)) {
2641 
2642 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2643 		if (acked_skb) {
2644 			skb_unlink(acked_skb, &chan->tx_q);
2645 			kfree_skb(acked_skb);
2646 			chan->unacked_frames--;
2647 		}
2648 	}
2649 
2650 	chan->expected_ack_seq = reqseq;
2651 
2652 	if (chan->unacked_frames == 0)
2653 		__clear_retrans_timer(chan);
2654 
2655 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2656 }
2657 
2658 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2659 {
2660 	BT_DBG("chan %p", chan);
2661 
2662 	chan->expected_tx_seq = chan->buffer_seq;
2663 	l2cap_seq_list_clear(&chan->srej_list);
2664 	skb_queue_purge(&chan->srej_q);
2665 	chan->rx_state = L2CAP_RX_STATE_RECV;
2666 }
2667 
2668 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2669 				struct l2cap_ctrl *control,
2670 				struct sk_buff_head *skbs, u8 event)
2671 {
2672 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2673 	       event);
2674 
2675 	switch (event) {
2676 	case L2CAP_EV_DATA_REQUEST:
2677 		if (chan->tx_send_head == NULL)
2678 			chan->tx_send_head = skb_peek(skbs);
2679 
2680 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2681 		l2cap_ertm_send(chan);
2682 		break;
2683 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2684 		BT_DBG("Enter LOCAL_BUSY");
2685 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2686 
2687 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2688 			/* The SREJ_SENT state must be aborted if we are to
2689 			 * enter the LOCAL_BUSY state.
2690 			 */
2691 			l2cap_abort_rx_srej_sent(chan);
2692 		}
2693 
2694 		l2cap_send_ack(chan);
2695 
2696 		break;
2697 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2698 		BT_DBG("Exit LOCAL_BUSY");
2699 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2700 
2701 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2702 			struct l2cap_ctrl local_control;
2703 
2704 			memset(&local_control, 0, sizeof(local_control));
2705 			local_control.sframe = 1;
2706 			local_control.super = L2CAP_SUPER_RR;
2707 			local_control.poll = 1;
2708 			local_control.reqseq = chan->buffer_seq;
2709 			l2cap_send_sframe(chan, &local_control);
2710 
2711 			chan->retry_count = 1;
2712 			__set_monitor_timer(chan);
2713 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2714 		}
2715 		break;
2716 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2717 		l2cap_process_reqseq(chan, control->reqseq);
2718 		break;
2719 	case L2CAP_EV_EXPLICIT_POLL:
2720 		l2cap_send_rr_or_rnr(chan, 1);
2721 		chan->retry_count = 1;
2722 		__set_monitor_timer(chan);
2723 		__clear_ack_timer(chan);
2724 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2725 		break;
2726 	case L2CAP_EV_RETRANS_TO:
2727 		l2cap_send_rr_or_rnr(chan, 1);
2728 		chan->retry_count = 1;
2729 		__set_monitor_timer(chan);
2730 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2731 		break;
2732 	case L2CAP_EV_RECV_FBIT:
2733 		/* Nothing to process */
2734 		break;
2735 	default:
2736 		break;
2737 	}
2738 }
2739 
2740 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2741 				  struct l2cap_ctrl *control,
2742 				  struct sk_buff_head *skbs, u8 event)
2743 {
2744 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2745 	       event);
2746 
2747 	switch (event) {
2748 	case L2CAP_EV_DATA_REQUEST:
2749 		if (chan->tx_send_head == NULL)
2750 			chan->tx_send_head = skb_peek(skbs);
2751 		/* Queue data, but don't send. */
2752 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2753 		break;
2754 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2755 		BT_DBG("Enter LOCAL_BUSY");
2756 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2757 
2758 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2759 			/* The SREJ_SENT state must be aborted if we are to
2760 			 * enter the LOCAL_BUSY state.
2761 			 */
2762 			l2cap_abort_rx_srej_sent(chan);
2763 		}
2764 
2765 		l2cap_send_ack(chan);
2766 
2767 		break;
2768 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2769 		BT_DBG("Exit LOCAL_BUSY");
2770 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2771 
2772 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2773 			struct l2cap_ctrl local_control;
2774 			memset(&local_control, 0, sizeof(local_control));
2775 			local_control.sframe = 1;
2776 			local_control.super = L2CAP_SUPER_RR;
2777 			local_control.poll = 1;
2778 			local_control.reqseq = chan->buffer_seq;
2779 			l2cap_send_sframe(chan, &local_control);
2780 
2781 			chan->retry_count = 1;
2782 			__set_monitor_timer(chan);
2783 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2784 		}
2785 		break;
2786 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2787 		l2cap_process_reqseq(chan, control->reqseq);
2788 
2789 		/* Fall through */
2790 
2791 	case L2CAP_EV_RECV_FBIT:
2792 		if (control && control->final) {
2793 			__clear_monitor_timer(chan);
2794 			if (chan->unacked_frames > 0)
2795 				__set_retrans_timer(chan);
2796 			chan->retry_count = 0;
2797 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2798 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2799 		}
2800 		break;
2801 	case L2CAP_EV_EXPLICIT_POLL:
2802 		/* Ignore */
2803 		break;
2804 	case L2CAP_EV_MONITOR_TO:
2805 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2806 			l2cap_send_rr_or_rnr(chan, 1);
2807 			__set_monitor_timer(chan);
2808 			chan->retry_count++;
2809 		} else {
2810 			l2cap_send_disconn_req(chan, ECONNABORTED);
2811 		}
2812 		break;
2813 	default:
2814 		break;
2815 	}
2816 }
2817 
2818 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2819 		     struct sk_buff_head *skbs, u8 event)
2820 {
2821 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2822 	       chan, control, skbs, event, chan->tx_state);
2823 
2824 	switch (chan->tx_state) {
2825 	case L2CAP_TX_STATE_XMIT:
2826 		l2cap_tx_state_xmit(chan, control, skbs, event);
2827 		break;
2828 	case L2CAP_TX_STATE_WAIT_F:
2829 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2830 		break;
2831 	default:
2832 		/* Ignore event */
2833 		break;
2834 	}
2835 }
2836 
2837 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2838 			     struct l2cap_ctrl *control)
2839 {
2840 	BT_DBG("chan %p, control %p", chan, control);
2841 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2842 }
2843 
2844 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2845 				  struct l2cap_ctrl *control)
2846 {
2847 	BT_DBG("chan %p, control %p", chan, control);
2848 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2849 }
2850 
2851 /* Copy frame to all raw sockets on that connection */
2852 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2853 {
2854 	struct sk_buff *nskb;
2855 	struct l2cap_chan *chan;
2856 
2857 	BT_DBG("conn %p", conn);
2858 
2859 	mutex_lock(&conn->chan_lock);
2860 
2861 	list_for_each_entry(chan, &conn->chan_l, list) {
2862 		if (chan->chan_type != L2CAP_CHAN_RAW)
2863 			continue;
2864 
2865 		/* Don't send frame to the channel it came from */
2866 		if (bt_cb(skb)->l2cap.chan == chan)
2867 			continue;
2868 
2869 		nskb = skb_clone(skb, GFP_KERNEL);
2870 		if (!nskb)
2871 			continue;
2872 		if (chan->ops->recv(chan, nskb))
2873 			kfree_skb(nskb);
2874 	}
2875 
2876 	mutex_unlock(&conn->chan_lock);
2877 }
2878 
2879 /* ---- L2CAP signalling commands ---- */
2880 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2881 				       u8 ident, u16 dlen, void *data)
2882 {
2883 	struct sk_buff *skb, **frag;
2884 	struct l2cap_cmd_hdr *cmd;
2885 	struct l2cap_hdr *lh;
2886 	int len, count;
2887 
2888 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2889 	       conn, code, ident, dlen);
2890 
2891 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2892 		return NULL;
2893 
2894 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2895 	count = min_t(unsigned int, conn->mtu, len);
2896 
2897 	skb = bt_skb_alloc(count, GFP_KERNEL);
2898 	if (!skb)
2899 		return NULL;
2900 
2901 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2902 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2903 
2904 	if (conn->hcon->type == LE_LINK)
2905 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2906 	else
2907 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2908 
2909 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2910 	cmd->code  = code;
2911 	cmd->ident = ident;
2912 	cmd->len   = cpu_to_le16(dlen);
2913 
2914 	if (dlen) {
2915 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2916 		memcpy(skb_put(skb, count), data, count);
2917 		data += count;
2918 	}
2919 
2920 	len -= skb->len;
2921 
2922 	/* Continuation fragments (no L2CAP header) */
2923 	frag = &skb_shinfo(skb)->frag_list;
2924 	while (len) {
2925 		count = min_t(unsigned int, conn->mtu, len);
2926 
2927 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2928 		if (!*frag)
2929 			goto fail;
2930 
2931 		memcpy(skb_put(*frag, count), data, count);
2932 
2933 		len  -= count;
2934 		data += count;
2935 
2936 		frag = &(*frag)->next;
2937 	}
2938 
2939 	return skb;
2940 
2941 fail:
2942 	kfree_skb(skb);
2943 	return NULL;
2944 }
2945 
2946 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2947 				     unsigned long *val)
2948 {
2949 	struct l2cap_conf_opt *opt = *ptr;
2950 	int len;
2951 
2952 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2953 	*ptr += len;
2954 
2955 	*type = opt->type;
2956 	*olen = opt->len;
2957 
2958 	switch (opt->len) {
2959 	case 1:
2960 		*val = *((u8 *) opt->val);
2961 		break;
2962 
2963 	case 2:
2964 		*val = get_unaligned_le16(opt->val);
2965 		break;
2966 
2967 	case 4:
2968 		*val = get_unaligned_le32(opt->val);
2969 		break;
2970 
2971 	default:
2972 		*val = (unsigned long) opt->val;
2973 		break;
2974 	}
2975 
2976 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2977 	return len;
2978 }
2979 
2980 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2981 {
2982 	struct l2cap_conf_opt *opt = *ptr;
2983 
2984 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2985 
2986 	opt->type = type;
2987 	opt->len  = len;
2988 
2989 	switch (len) {
2990 	case 1:
2991 		*((u8 *) opt->val)  = val;
2992 		break;
2993 
2994 	case 2:
2995 		put_unaligned_le16(val, opt->val);
2996 		break;
2997 
2998 	case 4:
2999 		put_unaligned_le32(val, opt->val);
3000 		break;
3001 
3002 	default:
3003 		memcpy(opt->val, (void *) val, len);
3004 		break;
3005 	}
3006 
3007 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3008 }
3009 
3010 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3011 {
3012 	struct l2cap_conf_efs efs;
3013 
3014 	switch (chan->mode) {
3015 	case L2CAP_MODE_ERTM:
3016 		efs.id		= chan->local_id;
3017 		efs.stype	= chan->local_stype;
3018 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3019 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3020 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3021 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3022 		break;
3023 
3024 	case L2CAP_MODE_STREAMING:
3025 		efs.id		= 1;
3026 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3027 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3028 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3029 		efs.acc_lat	= 0;
3030 		efs.flush_to	= 0;
3031 		break;
3032 
3033 	default:
3034 		return;
3035 	}
3036 
3037 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3038 			   (unsigned long) &efs);
3039 }
3040 
3041 static void l2cap_ack_timeout(struct work_struct *work)
3042 {
3043 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3044 					       ack_timer.work);
3045 	u16 frames_to_ack;
3046 
3047 	BT_DBG("chan %p", chan);
3048 
3049 	l2cap_chan_lock(chan);
3050 
3051 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3052 				     chan->last_acked_seq);
3053 
3054 	if (frames_to_ack)
3055 		l2cap_send_rr_or_rnr(chan, 0);
3056 
3057 	l2cap_chan_unlock(chan);
3058 	l2cap_chan_put(chan);
3059 }
3060 
3061 int l2cap_ertm_init(struct l2cap_chan *chan)
3062 {
3063 	int err;
3064 
3065 	chan->next_tx_seq = 0;
3066 	chan->expected_tx_seq = 0;
3067 	chan->expected_ack_seq = 0;
3068 	chan->unacked_frames = 0;
3069 	chan->buffer_seq = 0;
3070 	chan->frames_sent = 0;
3071 	chan->last_acked_seq = 0;
3072 	chan->sdu = NULL;
3073 	chan->sdu_last_frag = NULL;
3074 	chan->sdu_len = 0;
3075 
3076 	skb_queue_head_init(&chan->tx_q);
3077 
3078 	chan->local_amp_id = AMP_ID_BREDR;
3079 	chan->move_id = AMP_ID_BREDR;
3080 	chan->move_state = L2CAP_MOVE_STABLE;
3081 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3082 
3083 	if (chan->mode != L2CAP_MODE_ERTM)
3084 		return 0;
3085 
3086 	chan->rx_state = L2CAP_RX_STATE_RECV;
3087 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3088 
3089 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3090 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3091 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3092 
3093 	skb_queue_head_init(&chan->srej_q);
3094 
3095 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3096 	if (err < 0)
3097 		return err;
3098 
3099 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3100 	if (err < 0)
3101 		l2cap_seq_list_free(&chan->srej_list);
3102 
3103 	return err;
3104 }
3105 
3106 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3107 {
3108 	switch (mode) {
3109 	case L2CAP_MODE_STREAMING:
3110 	case L2CAP_MODE_ERTM:
3111 		if (l2cap_mode_supported(mode, remote_feat_mask))
3112 			return mode;
3113 		/* fall through */
3114 	default:
3115 		return L2CAP_MODE_BASIC;
3116 	}
3117 }
3118 
3119 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3120 {
3121 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3122 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3123 }
3124 
3125 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3126 {
3127 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3128 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3129 }
3130 
3131 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3132 				      struct l2cap_conf_rfc *rfc)
3133 {
3134 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3135 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3136 
3137 		/* Class 1 devices have must have ERTM timeouts
3138 		 * exceeding the Link Supervision Timeout.  The
3139 		 * default Link Supervision Timeout for AMP
3140 		 * controllers is 10 seconds.
3141 		 *
3142 		 * Class 1 devices use 0xffffffff for their
3143 		 * best-effort flush timeout, so the clamping logic
3144 		 * will result in a timeout that meets the above
3145 		 * requirement.  ERTM timeouts are 16-bit values, so
3146 		 * the maximum timeout is 65.535 seconds.
3147 		 */
3148 
3149 		/* Convert timeout to milliseconds and round */
3150 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3151 
3152 		/* This is the recommended formula for class 2 devices
3153 		 * that start ERTM timers when packets are sent to the
3154 		 * controller.
3155 		 */
3156 		ertm_to = 3 * ertm_to + 500;
3157 
3158 		if (ertm_to > 0xffff)
3159 			ertm_to = 0xffff;
3160 
3161 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3162 		rfc->monitor_timeout = rfc->retrans_timeout;
3163 	} else {
3164 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3165 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3166 	}
3167 }
3168 
3169 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3170 {
3171 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3172 	    __l2cap_ews_supported(chan->conn)) {
3173 		/* use extended control field */
3174 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3175 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3176 	} else {
3177 		chan->tx_win = min_t(u16, chan->tx_win,
3178 				     L2CAP_DEFAULT_TX_WINDOW);
3179 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3180 	}
3181 	chan->ack_win = chan->tx_win;
3182 }
3183 
3184 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3185 {
3186 	struct l2cap_conf_req *req = data;
3187 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3188 	void *ptr = req->data;
3189 	u16 size;
3190 
3191 	BT_DBG("chan %p", chan);
3192 
3193 	if (chan->num_conf_req || chan->num_conf_rsp)
3194 		goto done;
3195 
3196 	switch (chan->mode) {
3197 	case L2CAP_MODE_STREAMING:
3198 	case L2CAP_MODE_ERTM:
3199 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3200 			break;
3201 
3202 		if (__l2cap_efs_supported(chan->conn))
3203 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3204 
3205 		/* fall through */
3206 	default:
3207 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3208 		break;
3209 	}
3210 
3211 done:
3212 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3213 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3214 
3215 	switch (chan->mode) {
3216 	case L2CAP_MODE_BASIC:
3217 		if (disable_ertm)
3218 			break;
3219 
3220 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3221 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3222 			break;
3223 
3224 		rfc.mode            = L2CAP_MODE_BASIC;
3225 		rfc.txwin_size      = 0;
3226 		rfc.max_transmit    = 0;
3227 		rfc.retrans_timeout = 0;
3228 		rfc.monitor_timeout = 0;
3229 		rfc.max_pdu_size    = 0;
3230 
3231 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3232 				   (unsigned long) &rfc);
3233 		break;
3234 
3235 	case L2CAP_MODE_ERTM:
3236 		rfc.mode            = L2CAP_MODE_ERTM;
3237 		rfc.max_transmit    = chan->max_tx;
3238 
3239 		__l2cap_set_ertm_timeouts(chan, &rfc);
3240 
3241 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3242 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3243 			     L2CAP_FCS_SIZE);
3244 		rfc.max_pdu_size = cpu_to_le16(size);
3245 
3246 		l2cap_txwin_setup(chan);
3247 
3248 		rfc.txwin_size = min_t(u16, chan->tx_win,
3249 				       L2CAP_DEFAULT_TX_WINDOW);
3250 
3251 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3252 				   (unsigned long) &rfc);
3253 
3254 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3255 			l2cap_add_opt_efs(&ptr, chan);
3256 
3257 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3258 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3259 					   chan->tx_win);
3260 
3261 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3262 			if (chan->fcs == L2CAP_FCS_NONE ||
3263 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3264 				chan->fcs = L2CAP_FCS_NONE;
3265 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3266 						   chan->fcs);
3267 			}
3268 		break;
3269 
3270 	case L2CAP_MODE_STREAMING:
3271 		l2cap_txwin_setup(chan);
3272 		rfc.mode            = L2CAP_MODE_STREAMING;
3273 		rfc.txwin_size      = 0;
3274 		rfc.max_transmit    = 0;
3275 		rfc.retrans_timeout = 0;
3276 		rfc.monitor_timeout = 0;
3277 
3278 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3279 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3280 			     L2CAP_FCS_SIZE);
3281 		rfc.max_pdu_size = cpu_to_le16(size);
3282 
3283 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3284 				   (unsigned long) &rfc);
3285 
3286 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3287 			l2cap_add_opt_efs(&ptr, chan);
3288 
3289 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3290 			if (chan->fcs == L2CAP_FCS_NONE ||
3291 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3292 				chan->fcs = L2CAP_FCS_NONE;
3293 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3294 						   chan->fcs);
3295 			}
3296 		break;
3297 	}
3298 
3299 	req->dcid  = cpu_to_le16(chan->dcid);
3300 	req->flags = cpu_to_le16(0);
3301 
3302 	return ptr - data;
3303 }
3304 
3305 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3306 {
3307 	struct l2cap_conf_rsp *rsp = data;
3308 	void *ptr = rsp->data;
3309 	void *req = chan->conf_req;
3310 	int len = chan->conf_len;
3311 	int type, hint, olen;
3312 	unsigned long val;
3313 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3314 	struct l2cap_conf_efs efs;
3315 	u8 remote_efs = 0;
3316 	u16 mtu = L2CAP_DEFAULT_MTU;
3317 	u16 result = L2CAP_CONF_SUCCESS;
3318 	u16 size;
3319 
3320 	BT_DBG("chan %p", chan);
3321 
3322 	while (len >= L2CAP_CONF_OPT_SIZE) {
3323 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3324 
3325 		hint  = type & L2CAP_CONF_HINT;
3326 		type &= L2CAP_CONF_MASK;
3327 
3328 		switch (type) {
3329 		case L2CAP_CONF_MTU:
3330 			mtu = val;
3331 			break;
3332 
3333 		case L2CAP_CONF_FLUSH_TO:
3334 			chan->flush_to = val;
3335 			break;
3336 
3337 		case L2CAP_CONF_QOS:
3338 			break;
3339 
3340 		case L2CAP_CONF_RFC:
3341 			if (olen == sizeof(rfc))
3342 				memcpy(&rfc, (void *) val, olen);
3343 			break;
3344 
3345 		case L2CAP_CONF_FCS:
3346 			if (val == L2CAP_FCS_NONE)
3347 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3348 			break;
3349 
3350 		case L2CAP_CONF_EFS:
3351 			remote_efs = 1;
3352 			if (olen == sizeof(efs))
3353 				memcpy(&efs, (void *) val, olen);
3354 			break;
3355 
3356 		case L2CAP_CONF_EWS:
3357 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3358 				return -ECONNREFUSED;
3359 
3360 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3361 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3362 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3363 			chan->remote_tx_win = val;
3364 			break;
3365 
3366 		default:
3367 			if (hint)
3368 				break;
3369 
3370 			result = L2CAP_CONF_UNKNOWN;
3371 			*((u8 *) ptr++) = type;
3372 			break;
3373 		}
3374 	}
3375 
3376 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3377 		goto done;
3378 
3379 	switch (chan->mode) {
3380 	case L2CAP_MODE_STREAMING:
3381 	case L2CAP_MODE_ERTM:
3382 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3383 			chan->mode = l2cap_select_mode(rfc.mode,
3384 						       chan->conn->feat_mask);
3385 			break;
3386 		}
3387 
3388 		if (remote_efs) {
3389 			if (__l2cap_efs_supported(chan->conn))
3390 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3391 			else
3392 				return -ECONNREFUSED;
3393 		}
3394 
3395 		if (chan->mode != rfc.mode)
3396 			return -ECONNREFUSED;
3397 
3398 		break;
3399 	}
3400 
3401 done:
3402 	if (chan->mode != rfc.mode) {
3403 		result = L2CAP_CONF_UNACCEPT;
3404 		rfc.mode = chan->mode;
3405 
3406 		if (chan->num_conf_rsp == 1)
3407 			return -ECONNREFUSED;
3408 
3409 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3410 				   (unsigned long) &rfc);
3411 	}
3412 
3413 	if (result == L2CAP_CONF_SUCCESS) {
3414 		/* Configure output options and let the other side know
3415 		 * which ones we don't like. */
3416 
3417 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3418 			result = L2CAP_CONF_UNACCEPT;
3419 		else {
3420 			chan->omtu = mtu;
3421 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3422 		}
3423 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3424 
3425 		if (remote_efs) {
3426 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3427 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3428 			    efs.stype != chan->local_stype) {
3429 
3430 				result = L2CAP_CONF_UNACCEPT;
3431 
3432 				if (chan->num_conf_req >= 1)
3433 					return -ECONNREFUSED;
3434 
3435 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3436 						   sizeof(efs),
3437 						   (unsigned long) &efs);
3438 			} else {
3439 				/* Send PENDING Conf Rsp */
3440 				result = L2CAP_CONF_PENDING;
3441 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3442 			}
3443 		}
3444 
3445 		switch (rfc.mode) {
3446 		case L2CAP_MODE_BASIC:
3447 			chan->fcs = L2CAP_FCS_NONE;
3448 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3449 			break;
3450 
3451 		case L2CAP_MODE_ERTM:
3452 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3453 				chan->remote_tx_win = rfc.txwin_size;
3454 			else
3455 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3456 
3457 			chan->remote_max_tx = rfc.max_transmit;
3458 
3459 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3460 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3461 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3462 			rfc.max_pdu_size = cpu_to_le16(size);
3463 			chan->remote_mps = size;
3464 
3465 			__l2cap_set_ertm_timeouts(chan, &rfc);
3466 
3467 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3468 
3469 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3470 					   sizeof(rfc), (unsigned long) &rfc);
3471 
3472 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3473 				chan->remote_id = efs.id;
3474 				chan->remote_stype = efs.stype;
3475 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3476 				chan->remote_flush_to =
3477 					le32_to_cpu(efs.flush_to);
3478 				chan->remote_acc_lat =
3479 					le32_to_cpu(efs.acc_lat);
3480 				chan->remote_sdu_itime =
3481 					le32_to_cpu(efs.sdu_itime);
3482 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3483 						   sizeof(efs),
3484 						   (unsigned long) &efs);
3485 			}
3486 			break;
3487 
3488 		case L2CAP_MODE_STREAMING:
3489 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3490 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3491 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3492 			rfc.max_pdu_size = cpu_to_le16(size);
3493 			chan->remote_mps = size;
3494 
3495 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3496 
3497 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3498 					   (unsigned long) &rfc);
3499 
3500 			break;
3501 
3502 		default:
3503 			result = L2CAP_CONF_UNACCEPT;
3504 
3505 			memset(&rfc, 0, sizeof(rfc));
3506 			rfc.mode = chan->mode;
3507 		}
3508 
3509 		if (result == L2CAP_CONF_SUCCESS)
3510 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3511 	}
3512 	rsp->scid   = cpu_to_le16(chan->dcid);
3513 	rsp->result = cpu_to_le16(result);
3514 	rsp->flags  = cpu_to_le16(0);
3515 
3516 	return ptr - data;
3517 }
3518 
3519 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3520 				void *data, u16 *result)
3521 {
3522 	struct l2cap_conf_req *req = data;
3523 	void *ptr = req->data;
3524 	int type, olen;
3525 	unsigned long val;
3526 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3527 	struct l2cap_conf_efs efs;
3528 
3529 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3530 
3531 	while (len >= L2CAP_CONF_OPT_SIZE) {
3532 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3533 
3534 		switch (type) {
3535 		case L2CAP_CONF_MTU:
3536 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3537 				*result = L2CAP_CONF_UNACCEPT;
3538 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3539 			} else
3540 				chan->imtu = val;
3541 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3542 			break;
3543 
3544 		case L2CAP_CONF_FLUSH_TO:
3545 			chan->flush_to = val;
3546 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3547 					   2, chan->flush_to);
3548 			break;
3549 
3550 		case L2CAP_CONF_RFC:
3551 			if (olen == sizeof(rfc))
3552 				memcpy(&rfc, (void *)val, olen);
3553 
3554 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3555 			    rfc.mode != chan->mode)
3556 				return -ECONNREFUSED;
3557 
3558 			chan->fcs = 0;
3559 
3560 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3561 					   sizeof(rfc), (unsigned long) &rfc);
3562 			break;
3563 
3564 		case L2CAP_CONF_EWS:
3565 			chan->ack_win = min_t(u16, val, chan->ack_win);
3566 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3567 					   chan->tx_win);
3568 			break;
3569 
3570 		case L2CAP_CONF_EFS:
3571 			if (olen == sizeof(efs))
3572 				memcpy(&efs, (void *)val, olen);
3573 
3574 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3575 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3576 			    efs.stype != chan->local_stype)
3577 				return -ECONNREFUSED;
3578 
3579 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3580 					   (unsigned long) &efs);
3581 			break;
3582 
3583 		case L2CAP_CONF_FCS:
3584 			if (*result == L2CAP_CONF_PENDING)
3585 				if (val == L2CAP_FCS_NONE)
3586 					set_bit(CONF_RECV_NO_FCS,
3587 						&chan->conf_state);
3588 			break;
3589 		}
3590 	}
3591 
3592 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3593 		return -ECONNREFUSED;
3594 
3595 	chan->mode = rfc.mode;
3596 
3597 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3598 		switch (rfc.mode) {
3599 		case L2CAP_MODE_ERTM:
3600 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3601 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3602 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3603 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3604 				chan->ack_win = min_t(u16, chan->ack_win,
3605 						      rfc.txwin_size);
3606 
3607 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3608 				chan->local_msdu = le16_to_cpu(efs.msdu);
3609 				chan->local_sdu_itime =
3610 					le32_to_cpu(efs.sdu_itime);
3611 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3612 				chan->local_flush_to =
3613 					le32_to_cpu(efs.flush_to);
3614 			}
3615 			break;
3616 
3617 		case L2CAP_MODE_STREAMING:
3618 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3619 		}
3620 	}
3621 
3622 	req->dcid   = cpu_to_le16(chan->dcid);
3623 	req->flags  = cpu_to_le16(0);
3624 
3625 	return ptr - data;
3626 }
3627 
3628 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3629 				u16 result, u16 flags)
3630 {
3631 	struct l2cap_conf_rsp *rsp = data;
3632 	void *ptr = rsp->data;
3633 
3634 	BT_DBG("chan %p", chan);
3635 
3636 	rsp->scid   = cpu_to_le16(chan->dcid);
3637 	rsp->result = cpu_to_le16(result);
3638 	rsp->flags  = cpu_to_le16(flags);
3639 
3640 	return ptr - data;
3641 }
3642 
3643 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3644 {
3645 	struct l2cap_le_conn_rsp rsp;
3646 	struct l2cap_conn *conn = chan->conn;
3647 
3648 	BT_DBG("chan %p", chan);
3649 
3650 	rsp.dcid    = cpu_to_le16(chan->scid);
3651 	rsp.mtu     = cpu_to_le16(chan->imtu);
3652 	rsp.mps     = cpu_to_le16(chan->mps);
3653 	rsp.credits = cpu_to_le16(chan->rx_credits);
3654 	rsp.result  = cpu_to_le16(L2CAP_CR_SUCCESS);
3655 
3656 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3657 		       &rsp);
3658 }
3659 
3660 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3661 {
3662 	struct l2cap_conn_rsp rsp;
3663 	struct l2cap_conn *conn = chan->conn;
3664 	u8 buf[128];
3665 	u8 rsp_code;
3666 
3667 	rsp.scid   = cpu_to_le16(chan->dcid);
3668 	rsp.dcid   = cpu_to_le16(chan->scid);
3669 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3670 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3671 
3672 	if (chan->hs_hcon)
3673 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3674 	else
3675 		rsp_code = L2CAP_CONN_RSP;
3676 
3677 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3678 
3679 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3680 
3681 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3682 		return;
3683 
3684 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3685 		       l2cap_build_conf_req(chan, buf), buf);
3686 	chan->num_conf_req++;
3687 }
3688 
3689 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3690 {
3691 	int type, olen;
3692 	unsigned long val;
3693 	/* Use sane default values in case a misbehaving remote device
3694 	 * did not send an RFC or extended window size option.
3695 	 */
3696 	u16 txwin_ext = chan->ack_win;
3697 	struct l2cap_conf_rfc rfc = {
3698 		.mode = chan->mode,
3699 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3700 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3701 		.max_pdu_size = cpu_to_le16(chan->imtu),
3702 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3703 	};
3704 
3705 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3706 
3707 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3708 		return;
3709 
3710 	while (len >= L2CAP_CONF_OPT_SIZE) {
3711 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3712 
3713 		switch (type) {
3714 		case L2CAP_CONF_RFC:
3715 			if (olen == sizeof(rfc))
3716 				memcpy(&rfc, (void *)val, olen);
3717 			break;
3718 		case L2CAP_CONF_EWS:
3719 			txwin_ext = val;
3720 			break;
3721 		}
3722 	}
3723 
3724 	switch (rfc.mode) {
3725 	case L2CAP_MODE_ERTM:
3726 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3727 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3728 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3729 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3730 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3731 		else
3732 			chan->ack_win = min_t(u16, chan->ack_win,
3733 					      rfc.txwin_size);
3734 		break;
3735 	case L2CAP_MODE_STREAMING:
3736 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3737 	}
3738 }
3739 
3740 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3741 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3742 				    u8 *data)
3743 {
3744 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3745 
3746 	if (cmd_len < sizeof(*rej))
3747 		return -EPROTO;
3748 
3749 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3750 		return 0;
3751 
3752 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3753 	    cmd->ident == conn->info_ident) {
3754 		cancel_delayed_work(&conn->info_timer);
3755 
3756 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3757 		conn->info_ident = 0;
3758 
3759 		l2cap_conn_start(conn);
3760 	}
3761 
3762 	return 0;
3763 }
3764 
3765 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3766 					struct l2cap_cmd_hdr *cmd,
3767 					u8 *data, u8 rsp_code, u8 amp_id)
3768 {
3769 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3770 	struct l2cap_conn_rsp rsp;
3771 	struct l2cap_chan *chan = NULL, *pchan;
3772 	int result, status = L2CAP_CS_NO_INFO;
3773 
3774 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3775 	__le16 psm = req->psm;
3776 
3777 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3778 
3779 	/* Check if we have socket listening on psm */
3780 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3781 					 &conn->hcon->dst, ACL_LINK);
3782 	if (!pchan) {
3783 		result = L2CAP_CR_BAD_PSM;
3784 		goto sendresp;
3785 	}
3786 
3787 	mutex_lock(&conn->chan_lock);
3788 	l2cap_chan_lock(pchan);
3789 
3790 	/* Check if the ACL is secure enough (if not SDP) */
3791 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3792 	    !hci_conn_check_link_mode(conn->hcon)) {
3793 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3794 		result = L2CAP_CR_SEC_BLOCK;
3795 		goto response;
3796 	}
3797 
3798 	result = L2CAP_CR_NO_MEM;
3799 
3800 	/* Check if we already have channel with that dcid */
3801 	if (__l2cap_get_chan_by_dcid(conn, scid))
3802 		goto response;
3803 
3804 	chan = pchan->ops->new_connection(pchan);
3805 	if (!chan)
3806 		goto response;
3807 
3808 	/* For certain devices (ex: HID mouse), support for authentication,
3809 	 * pairing and bonding is optional. For such devices, inorder to avoid
3810 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3811 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3812 	 */
3813 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3814 
3815 	bacpy(&chan->src, &conn->hcon->src);
3816 	bacpy(&chan->dst, &conn->hcon->dst);
3817 	chan->src_type = bdaddr_src_type(conn->hcon);
3818 	chan->dst_type = bdaddr_dst_type(conn->hcon);
3819 	chan->psm  = psm;
3820 	chan->dcid = scid;
3821 	chan->local_amp_id = amp_id;
3822 
3823 	__l2cap_chan_add(conn, chan);
3824 
3825 	dcid = chan->scid;
3826 
3827 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3828 
3829 	chan->ident = cmd->ident;
3830 
3831 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3832 		if (l2cap_chan_check_security(chan, false)) {
3833 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3834 				l2cap_state_change(chan, BT_CONNECT2);
3835 				result = L2CAP_CR_PEND;
3836 				status = L2CAP_CS_AUTHOR_PEND;
3837 				chan->ops->defer(chan);
3838 			} else {
3839 				/* Force pending result for AMP controllers.
3840 				 * The connection will succeed after the
3841 				 * physical link is up.
3842 				 */
3843 				if (amp_id == AMP_ID_BREDR) {
3844 					l2cap_state_change(chan, BT_CONFIG);
3845 					result = L2CAP_CR_SUCCESS;
3846 				} else {
3847 					l2cap_state_change(chan, BT_CONNECT2);
3848 					result = L2CAP_CR_PEND;
3849 				}
3850 				status = L2CAP_CS_NO_INFO;
3851 			}
3852 		} else {
3853 			l2cap_state_change(chan, BT_CONNECT2);
3854 			result = L2CAP_CR_PEND;
3855 			status = L2CAP_CS_AUTHEN_PEND;
3856 		}
3857 	} else {
3858 		l2cap_state_change(chan, BT_CONNECT2);
3859 		result = L2CAP_CR_PEND;
3860 		status = L2CAP_CS_NO_INFO;
3861 	}
3862 
3863 response:
3864 	l2cap_chan_unlock(pchan);
3865 	mutex_unlock(&conn->chan_lock);
3866 	l2cap_chan_put(pchan);
3867 
3868 sendresp:
3869 	rsp.scid   = cpu_to_le16(scid);
3870 	rsp.dcid   = cpu_to_le16(dcid);
3871 	rsp.result = cpu_to_le16(result);
3872 	rsp.status = cpu_to_le16(status);
3873 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3874 
3875 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3876 		struct l2cap_info_req info;
3877 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3878 
3879 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3880 		conn->info_ident = l2cap_get_ident(conn);
3881 
3882 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3883 
3884 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3885 			       sizeof(info), &info);
3886 	}
3887 
3888 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3889 	    result == L2CAP_CR_SUCCESS) {
3890 		u8 buf[128];
3891 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3892 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3893 			       l2cap_build_conf_req(chan, buf), buf);
3894 		chan->num_conf_req++;
3895 	}
3896 
3897 	return chan;
3898 }
3899 
3900 static int l2cap_connect_req(struct l2cap_conn *conn,
3901 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3902 {
3903 	struct hci_dev *hdev = conn->hcon->hdev;
3904 	struct hci_conn *hcon = conn->hcon;
3905 
3906 	if (cmd_len < sizeof(struct l2cap_conn_req))
3907 		return -EPROTO;
3908 
3909 	hci_dev_lock(hdev);
3910 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3911 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3912 		mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3913 	hci_dev_unlock(hdev);
3914 
3915 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3916 	return 0;
3917 }
3918 
3919 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3920 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3921 				    u8 *data)
3922 {
3923 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3924 	u16 scid, dcid, result, status;
3925 	struct l2cap_chan *chan;
3926 	u8 req[128];
3927 	int err;
3928 
3929 	if (cmd_len < sizeof(*rsp))
3930 		return -EPROTO;
3931 
3932 	scid   = __le16_to_cpu(rsp->scid);
3933 	dcid   = __le16_to_cpu(rsp->dcid);
3934 	result = __le16_to_cpu(rsp->result);
3935 	status = __le16_to_cpu(rsp->status);
3936 
3937 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3938 	       dcid, scid, result, status);
3939 
3940 	mutex_lock(&conn->chan_lock);
3941 
3942 	if (scid) {
3943 		chan = __l2cap_get_chan_by_scid(conn, scid);
3944 		if (!chan) {
3945 			err = -EBADSLT;
3946 			goto unlock;
3947 		}
3948 	} else {
3949 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3950 		if (!chan) {
3951 			err = -EBADSLT;
3952 			goto unlock;
3953 		}
3954 	}
3955 
3956 	err = 0;
3957 
3958 	l2cap_chan_lock(chan);
3959 
3960 	switch (result) {
3961 	case L2CAP_CR_SUCCESS:
3962 		l2cap_state_change(chan, BT_CONFIG);
3963 		chan->ident = 0;
3964 		chan->dcid = dcid;
3965 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3966 
3967 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3968 			break;
3969 
3970 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3971 			       l2cap_build_conf_req(chan, req), req);
3972 		chan->num_conf_req++;
3973 		break;
3974 
3975 	case L2CAP_CR_PEND:
3976 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3977 		break;
3978 
3979 	default:
3980 		l2cap_chan_del(chan, ECONNREFUSED);
3981 		break;
3982 	}
3983 
3984 	l2cap_chan_unlock(chan);
3985 
3986 unlock:
3987 	mutex_unlock(&conn->chan_lock);
3988 
3989 	return err;
3990 }
3991 
3992 static inline void set_default_fcs(struct l2cap_chan *chan)
3993 {
3994 	/* FCS is enabled only in ERTM or streaming mode, if one or both
3995 	 * sides request it.
3996 	 */
3997 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3998 		chan->fcs = L2CAP_FCS_NONE;
3999 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4000 		chan->fcs = L2CAP_FCS_CRC16;
4001 }
4002 
4003 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4004 				    u8 ident, u16 flags)
4005 {
4006 	struct l2cap_conn *conn = chan->conn;
4007 
4008 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4009 	       flags);
4010 
4011 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4012 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4013 
4014 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4015 		       l2cap_build_conf_rsp(chan, data,
4016 					    L2CAP_CONF_SUCCESS, flags), data);
4017 }
4018 
4019 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4020 				   u16 scid, u16 dcid)
4021 {
4022 	struct l2cap_cmd_rej_cid rej;
4023 
4024 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4025 	rej.scid = __cpu_to_le16(scid);
4026 	rej.dcid = __cpu_to_le16(dcid);
4027 
4028 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4029 }
4030 
4031 static inline int l2cap_config_req(struct l2cap_conn *conn,
4032 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4033 				   u8 *data)
4034 {
4035 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4036 	u16 dcid, flags;
4037 	u8 rsp[64];
4038 	struct l2cap_chan *chan;
4039 	int len, err = 0;
4040 
4041 	if (cmd_len < sizeof(*req))
4042 		return -EPROTO;
4043 
4044 	dcid  = __le16_to_cpu(req->dcid);
4045 	flags = __le16_to_cpu(req->flags);
4046 
4047 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4048 
4049 	chan = l2cap_get_chan_by_scid(conn, dcid);
4050 	if (!chan) {
4051 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4052 		return 0;
4053 	}
4054 
4055 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4056 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4057 				       chan->dcid);
4058 		goto unlock;
4059 	}
4060 
4061 	/* Reject if config buffer is too small. */
4062 	len = cmd_len - sizeof(*req);
4063 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4064 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4065 			       l2cap_build_conf_rsp(chan, rsp,
4066 			       L2CAP_CONF_REJECT, flags), rsp);
4067 		goto unlock;
4068 	}
4069 
4070 	/* Store config. */
4071 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4072 	chan->conf_len += len;
4073 
4074 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4075 		/* Incomplete config. Send empty response. */
4076 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4077 			       l2cap_build_conf_rsp(chan, rsp,
4078 			       L2CAP_CONF_SUCCESS, flags), rsp);
4079 		goto unlock;
4080 	}
4081 
4082 	/* Complete config. */
4083 	len = l2cap_parse_conf_req(chan, rsp);
4084 	if (len < 0) {
4085 		l2cap_send_disconn_req(chan, ECONNRESET);
4086 		goto unlock;
4087 	}
4088 
4089 	chan->ident = cmd->ident;
4090 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4091 	chan->num_conf_rsp++;
4092 
4093 	/* Reset config buffer. */
4094 	chan->conf_len = 0;
4095 
4096 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4097 		goto unlock;
4098 
4099 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4100 		set_default_fcs(chan);
4101 
4102 		if (chan->mode == L2CAP_MODE_ERTM ||
4103 		    chan->mode == L2CAP_MODE_STREAMING)
4104 			err = l2cap_ertm_init(chan);
4105 
4106 		if (err < 0)
4107 			l2cap_send_disconn_req(chan, -err);
4108 		else
4109 			l2cap_chan_ready(chan);
4110 
4111 		goto unlock;
4112 	}
4113 
4114 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4115 		u8 buf[64];
4116 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4117 			       l2cap_build_conf_req(chan, buf), buf);
4118 		chan->num_conf_req++;
4119 	}
4120 
4121 	/* Got Conf Rsp PENDING from remote side and assume we sent
4122 	   Conf Rsp PENDING in the code above */
4123 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4124 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4125 
4126 		/* check compatibility */
4127 
4128 		/* Send rsp for BR/EDR channel */
4129 		if (!chan->hs_hcon)
4130 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4131 		else
4132 			chan->ident = cmd->ident;
4133 	}
4134 
4135 unlock:
4136 	l2cap_chan_unlock(chan);
4137 	return err;
4138 }
4139 
4140 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4141 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4142 				   u8 *data)
4143 {
4144 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4145 	u16 scid, flags, result;
4146 	struct l2cap_chan *chan;
4147 	int len = cmd_len - sizeof(*rsp);
4148 	int err = 0;
4149 
4150 	if (cmd_len < sizeof(*rsp))
4151 		return -EPROTO;
4152 
4153 	scid   = __le16_to_cpu(rsp->scid);
4154 	flags  = __le16_to_cpu(rsp->flags);
4155 	result = __le16_to_cpu(rsp->result);
4156 
4157 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4158 	       result, len);
4159 
4160 	chan = l2cap_get_chan_by_scid(conn, scid);
4161 	if (!chan)
4162 		return 0;
4163 
4164 	switch (result) {
4165 	case L2CAP_CONF_SUCCESS:
4166 		l2cap_conf_rfc_get(chan, rsp->data, len);
4167 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4168 		break;
4169 
4170 	case L2CAP_CONF_PENDING:
4171 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4172 
4173 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4174 			char buf[64];
4175 
4176 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4177 						   buf, &result);
4178 			if (len < 0) {
4179 				l2cap_send_disconn_req(chan, ECONNRESET);
4180 				goto done;
4181 			}
4182 
4183 			if (!chan->hs_hcon) {
4184 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4185 							0);
4186 			} else {
4187 				if (l2cap_check_efs(chan)) {
4188 					amp_create_logical_link(chan);
4189 					chan->ident = cmd->ident;
4190 				}
4191 			}
4192 		}
4193 		goto done;
4194 
4195 	case L2CAP_CONF_UNACCEPT:
4196 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4197 			char req[64];
4198 
4199 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4200 				l2cap_send_disconn_req(chan, ECONNRESET);
4201 				goto done;
4202 			}
4203 
4204 			/* throw out any old stored conf requests */
4205 			result = L2CAP_CONF_SUCCESS;
4206 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4207 						   req, &result);
4208 			if (len < 0) {
4209 				l2cap_send_disconn_req(chan, ECONNRESET);
4210 				goto done;
4211 			}
4212 
4213 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4214 				       L2CAP_CONF_REQ, len, req);
4215 			chan->num_conf_req++;
4216 			if (result != L2CAP_CONF_SUCCESS)
4217 				goto done;
4218 			break;
4219 		}
4220 
4221 	default:
4222 		l2cap_chan_set_err(chan, ECONNRESET);
4223 
4224 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4225 		l2cap_send_disconn_req(chan, ECONNRESET);
4226 		goto done;
4227 	}
4228 
4229 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4230 		goto done;
4231 
4232 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4233 
4234 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4235 		set_default_fcs(chan);
4236 
4237 		if (chan->mode == L2CAP_MODE_ERTM ||
4238 		    chan->mode == L2CAP_MODE_STREAMING)
4239 			err = l2cap_ertm_init(chan);
4240 
4241 		if (err < 0)
4242 			l2cap_send_disconn_req(chan, -err);
4243 		else
4244 			l2cap_chan_ready(chan);
4245 	}
4246 
4247 done:
4248 	l2cap_chan_unlock(chan);
4249 	return err;
4250 }
4251 
4252 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4253 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4254 				       u8 *data)
4255 {
4256 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4257 	struct l2cap_disconn_rsp rsp;
4258 	u16 dcid, scid;
4259 	struct l2cap_chan *chan;
4260 
4261 	if (cmd_len != sizeof(*req))
4262 		return -EPROTO;
4263 
4264 	scid = __le16_to_cpu(req->scid);
4265 	dcid = __le16_to_cpu(req->dcid);
4266 
4267 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4268 
4269 	mutex_lock(&conn->chan_lock);
4270 
4271 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4272 	if (!chan) {
4273 		mutex_unlock(&conn->chan_lock);
4274 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4275 		return 0;
4276 	}
4277 
4278 	l2cap_chan_lock(chan);
4279 
4280 	rsp.dcid = cpu_to_le16(chan->scid);
4281 	rsp.scid = cpu_to_le16(chan->dcid);
4282 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4283 
4284 	chan->ops->set_shutdown(chan);
4285 
4286 	l2cap_chan_hold(chan);
4287 	l2cap_chan_del(chan, ECONNRESET);
4288 
4289 	l2cap_chan_unlock(chan);
4290 
4291 	chan->ops->close(chan);
4292 	l2cap_chan_put(chan);
4293 
4294 	mutex_unlock(&conn->chan_lock);
4295 
4296 	return 0;
4297 }
4298 
4299 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4300 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4301 				       u8 *data)
4302 {
4303 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4304 	u16 dcid, scid;
4305 	struct l2cap_chan *chan;
4306 
4307 	if (cmd_len != sizeof(*rsp))
4308 		return -EPROTO;
4309 
4310 	scid = __le16_to_cpu(rsp->scid);
4311 	dcid = __le16_to_cpu(rsp->dcid);
4312 
4313 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4314 
4315 	mutex_lock(&conn->chan_lock);
4316 
4317 	chan = __l2cap_get_chan_by_scid(conn, scid);
4318 	if (!chan) {
4319 		mutex_unlock(&conn->chan_lock);
4320 		return 0;
4321 	}
4322 
4323 	l2cap_chan_lock(chan);
4324 
4325 	l2cap_chan_hold(chan);
4326 	l2cap_chan_del(chan, 0);
4327 
4328 	l2cap_chan_unlock(chan);
4329 
4330 	chan->ops->close(chan);
4331 	l2cap_chan_put(chan);
4332 
4333 	mutex_unlock(&conn->chan_lock);
4334 
4335 	return 0;
4336 }
4337 
4338 static inline int l2cap_information_req(struct l2cap_conn *conn,
4339 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4340 					u8 *data)
4341 {
4342 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4343 	u16 type;
4344 
4345 	if (cmd_len != sizeof(*req))
4346 		return -EPROTO;
4347 
4348 	type = __le16_to_cpu(req->type);
4349 
4350 	BT_DBG("type 0x%4.4x", type);
4351 
4352 	if (type == L2CAP_IT_FEAT_MASK) {
4353 		u8 buf[8];
4354 		u32 feat_mask = l2cap_feat_mask;
4355 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4356 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4357 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4358 		if (!disable_ertm)
4359 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4360 				| L2CAP_FEAT_FCS;
4361 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4362 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4363 				| L2CAP_FEAT_EXT_WINDOW;
4364 
4365 		put_unaligned_le32(feat_mask, rsp->data);
4366 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4367 			       buf);
4368 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4369 		u8 buf[12];
4370 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4371 
4372 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4373 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4374 		rsp->data[0] = conn->local_fixed_chan;
4375 		memset(rsp->data + 1, 0, 7);
4376 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4377 			       buf);
4378 	} else {
4379 		struct l2cap_info_rsp rsp;
4380 		rsp.type   = cpu_to_le16(type);
4381 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4382 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4383 			       &rsp);
4384 	}
4385 
4386 	return 0;
4387 }
4388 
4389 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4390 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4391 					u8 *data)
4392 {
4393 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4394 	u16 type, result;
4395 
4396 	if (cmd_len < sizeof(*rsp))
4397 		return -EPROTO;
4398 
4399 	type   = __le16_to_cpu(rsp->type);
4400 	result = __le16_to_cpu(rsp->result);
4401 
4402 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4403 
4404 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4405 	if (cmd->ident != conn->info_ident ||
4406 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4407 		return 0;
4408 
4409 	cancel_delayed_work(&conn->info_timer);
4410 
4411 	if (result != L2CAP_IR_SUCCESS) {
4412 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4413 		conn->info_ident = 0;
4414 
4415 		l2cap_conn_start(conn);
4416 
4417 		return 0;
4418 	}
4419 
4420 	switch (type) {
4421 	case L2CAP_IT_FEAT_MASK:
4422 		conn->feat_mask = get_unaligned_le32(rsp->data);
4423 
4424 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4425 			struct l2cap_info_req req;
4426 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4427 
4428 			conn->info_ident = l2cap_get_ident(conn);
4429 
4430 			l2cap_send_cmd(conn, conn->info_ident,
4431 				       L2CAP_INFO_REQ, sizeof(req), &req);
4432 		} else {
4433 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4434 			conn->info_ident = 0;
4435 
4436 			l2cap_conn_start(conn);
4437 		}
4438 		break;
4439 
4440 	case L2CAP_IT_FIXED_CHAN:
4441 		conn->remote_fixed_chan = rsp->data[0];
4442 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4443 		conn->info_ident = 0;
4444 
4445 		l2cap_conn_start(conn);
4446 		break;
4447 	}
4448 
4449 	return 0;
4450 }
4451 
4452 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4453 				    struct l2cap_cmd_hdr *cmd,
4454 				    u16 cmd_len, void *data)
4455 {
4456 	struct l2cap_create_chan_req *req = data;
4457 	struct l2cap_create_chan_rsp rsp;
4458 	struct l2cap_chan *chan;
4459 	struct hci_dev *hdev;
4460 	u16 psm, scid;
4461 
4462 	if (cmd_len != sizeof(*req))
4463 		return -EPROTO;
4464 
4465 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4466 		return -EINVAL;
4467 
4468 	psm = le16_to_cpu(req->psm);
4469 	scid = le16_to_cpu(req->scid);
4470 
4471 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4472 
4473 	/* For controller id 0 make BR/EDR connection */
4474 	if (req->amp_id == AMP_ID_BREDR) {
4475 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4476 			      req->amp_id);
4477 		return 0;
4478 	}
4479 
4480 	/* Validate AMP controller id */
4481 	hdev = hci_dev_get(req->amp_id);
4482 	if (!hdev)
4483 		goto error;
4484 
4485 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4486 		hci_dev_put(hdev);
4487 		goto error;
4488 	}
4489 
4490 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4491 			     req->amp_id);
4492 	if (chan) {
4493 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4494 		struct hci_conn *hs_hcon;
4495 
4496 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4497 						  &conn->hcon->dst);
4498 		if (!hs_hcon) {
4499 			hci_dev_put(hdev);
4500 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4501 					       chan->dcid);
4502 			return 0;
4503 		}
4504 
4505 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4506 
4507 		mgr->bredr_chan = chan;
4508 		chan->hs_hcon = hs_hcon;
4509 		chan->fcs = L2CAP_FCS_NONE;
4510 		conn->mtu = hdev->block_mtu;
4511 	}
4512 
4513 	hci_dev_put(hdev);
4514 
4515 	return 0;
4516 
4517 error:
4518 	rsp.dcid = 0;
4519 	rsp.scid = cpu_to_le16(scid);
4520 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4521 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4522 
4523 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4524 		       sizeof(rsp), &rsp);
4525 
4526 	return 0;
4527 }
4528 
4529 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4530 {
4531 	struct l2cap_move_chan_req req;
4532 	u8 ident;
4533 
4534 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4535 
4536 	ident = l2cap_get_ident(chan->conn);
4537 	chan->ident = ident;
4538 
4539 	req.icid = cpu_to_le16(chan->scid);
4540 	req.dest_amp_id = dest_amp_id;
4541 
4542 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4543 		       &req);
4544 
4545 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4546 }
4547 
4548 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4549 {
4550 	struct l2cap_move_chan_rsp rsp;
4551 
4552 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4553 
4554 	rsp.icid = cpu_to_le16(chan->dcid);
4555 	rsp.result = cpu_to_le16(result);
4556 
4557 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4558 		       sizeof(rsp), &rsp);
4559 }
4560 
4561 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4562 {
4563 	struct l2cap_move_chan_cfm cfm;
4564 
4565 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4566 
4567 	chan->ident = l2cap_get_ident(chan->conn);
4568 
4569 	cfm.icid = cpu_to_le16(chan->scid);
4570 	cfm.result = cpu_to_le16(result);
4571 
4572 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4573 		       sizeof(cfm), &cfm);
4574 
4575 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4576 }
4577 
4578 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4579 {
4580 	struct l2cap_move_chan_cfm cfm;
4581 
4582 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4583 
4584 	cfm.icid = cpu_to_le16(icid);
4585 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4586 
4587 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4588 		       sizeof(cfm), &cfm);
4589 }
4590 
4591 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4592 					 u16 icid)
4593 {
4594 	struct l2cap_move_chan_cfm_rsp rsp;
4595 
4596 	BT_DBG("icid 0x%4.4x", icid);
4597 
4598 	rsp.icid = cpu_to_le16(icid);
4599 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4600 }
4601 
4602 static void __release_logical_link(struct l2cap_chan *chan)
4603 {
4604 	chan->hs_hchan = NULL;
4605 	chan->hs_hcon = NULL;
4606 
4607 	/* Placeholder - release the logical link */
4608 }
4609 
4610 static void l2cap_logical_fail(struct l2cap_chan *chan)
4611 {
4612 	/* Logical link setup failed */
4613 	if (chan->state != BT_CONNECTED) {
4614 		/* Create channel failure, disconnect */
4615 		l2cap_send_disconn_req(chan, ECONNRESET);
4616 		return;
4617 	}
4618 
4619 	switch (chan->move_role) {
4620 	case L2CAP_MOVE_ROLE_RESPONDER:
4621 		l2cap_move_done(chan);
4622 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4623 		break;
4624 	case L2CAP_MOVE_ROLE_INITIATOR:
4625 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4626 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4627 			/* Remote has only sent pending or
4628 			 * success responses, clean up
4629 			 */
4630 			l2cap_move_done(chan);
4631 		}
4632 
4633 		/* Other amp move states imply that the move
4634 		 * has already aborted
4635 		 */
4636 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4637 		break;
4638 	}
4639 }
4640 
4641 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4642 					struct hci_chan *hchan)
4643 {
4644 	struct l2cap_conf_rsp rsp;
4645 
4646 	chan->hs_hchan = hchan;
4647 	chan->hs_hcon->l2cap_data = chan->conn;
4648 
4649 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4650 
4651 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4652 		int err;
4653 
4654 		set_default_fcs(chan);
4655 
4656 		err = l2cap_ertm_init(chan);
4657 		if (err < 0)
4658 			l2cap_send_disconn_req(chan, -err);
4659 		else
4660 			l2cap_chan_ready(chan);
4661 	}
4662 }
4663 
4664 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4665 				      struct hci_chan *hchan)
4666 {
4667 	chan->hs_hcon = hchan->conn;
4668 	chan->hs_hcon->l2cap_data = chan->conn;
4669 
4670 	BT_DBG("move_state %d", chan->move_state);
4671 
4672 	switch (chan->move_state) {
4673 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4674 		/* Move confirm will be sent after a success
4675 		 * response is received
4676 		 */
4677 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4678 		break;
4679 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4680 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4681 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4682 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4683 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4684 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4685 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4686 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4687 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4688 		}
4689 		break;
4690 	default:
4691 		/* Move was not in expected state, free the channel */
4692 		__release_logical_link(chan);
4693 
4694 		chan->move_state = L2CAP_MOVE_STABLE;
4695 	}
4696 }
4697 
4698 /* Call with chan locked */
4699 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4700 		       u8 status)
4701 {
4702 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4703 
4704 	if (status) {
4705 		l2cap_logical_fail(chan);
4706 		__release_logical_link(chan);
4707 		return;
4708 	}
4709 
4710 	if (chan->state != BT_CONNECTED) {
4711 		/* Ignore logical link if channel is on BR/EDR */
4712 		if (chan->local_amp_id != AMP_ID_BREDR)
4713 			l2cap_logical_finish_create(chan, hchan);
4714 	} else {
4715 		l2cap_logical_finish_move(chan, hchan);
4716 	}
4717 }
4718 
4719 void l2cap_move_start(struct l2cap_chan *chan)
4720 {
4721 	BT_DBG("chan %p", chan);
4722 
4723 	if (chan->local_amp_id == AMP_ID_BREDR) {
4724 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4725 			return;
4726 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4727 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4728 		/* Placeholder - start physical link setup */
4729 	} else {
4730 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4731 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4732 		chan->move_id = 0;
4733 		l2cap_move_setup(chan);
4734 		l2cap_send_move_chan_req(chan, 0);
4735 	}
4736 }
4737 
4738 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4739 			    u8 local_amp_id, u8 remote_amp_id)
4740 {
4741 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4742 	       local_amp_id, remote_amp_id);
4743 
4744 	chan->fcs = L2CAP_FCS_NONE;
4745 
4746 	/* Outgoing channel on AMP */
4747 	if (chan->state == BT_CONNECT) {
4748 		if (result == L2CAP_CR_SUCCESS) {
4749 			chan->local_amp_id = local_amp_id;
4750 			l2cap_send_create_chan_req(chan, remote_amp_id);
4751 		} else {
4752 			/* Revert to BR/EDR connect */
4753 			l2cap_send_conn_req(chan);
4754 		}
4755 
4756 		return;
4757 	}
4758 
4759 	/* Incoming channel on AMP */
4760 	if (__l2cap_no_conn_pending(chan)) {
4761 		struct l2cap_conn_rsp rsp;
4762 		char buf[128];
4763 		rsp.scid = cpu_to_le16(chan->dcid);
4764 		rsp.dcid = cpu_to_le16(chan->scid);
4765 
4766 		if (result == L2CAP_CR_SUCCESS) {
4767 			/* Send successful response */
4768 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4769 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4770 		} else {
4771 			/* Send negative response */
4772 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4773 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4774 		}
4775 
4776 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4777 			       sizeof(rsp), &rsp);
4778 
4779 		if (result == L2CAP_CR_SUCCESS) {
4780 			l2cap_state_change(chan, BT_CONFIG);
4781 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4782 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4783 				       L2CAP_CONF_REQ,
4784 				       l2cap_build_conf_req(chan, buf), buf);
4785 			chan->num_conf_req++;
4786 		}
4787 	}
4788 }
4789 
4790 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4791 				   u8 remote_amp_id)
4792 {
4793 	l2cap_move_setup(chan);
4794 	chan->move_id = local_amp_id;
4795 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4796 
4797 	l2cap_send_move_chan_req(chan, remote_amp_id);
4798 }
4799 
4800 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4801 {
4802 	struct hci_chan *hchan = NULL;
4803 
4804 	/* Placeholder - get hci_chan for logical link */
4805 
4806 	if (hchan) {
4807 		if (hchan->state == BT_CONNECTED) {
4808 			/* Logical link is ready to go */
4809 			chan->hs_hcon = hchan->conn;
4810 			chan->hs_hcon->l2cap_data = chan->conn;
4811 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4812 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4813 
4814 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4815 		} else {
4816 			/* Wait for logical link to be ready */
4817 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4818 		}
4819 	} else {
4820 		/* Logical link not available */
4821 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4822 	}
4823 }
4824 
4825 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4826 {
4827 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4828 		u8 rsp_result;
4829 		if (result == -EINVAL)
4830 			rsp_result = L2CAP_MR_BAD_ID;
4831 		else
4832 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4833 
4834 		l2cap_send_move_chan_rsp(chan, rsp_result);
4835 	}
4836 
4837 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4838 	chan->move_state = L2CAP_MOVE_STABLE;
4839 
4840 	/* Restart data transmission */
4841 	l2cap_ertm_send(chan);
4842 }
4843 
4844 /* Invoke with locked chan */
4845 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4846 {
4847 	u8 local_amp_id = chan->local_amp_id;
4848 	u8 remote_amp_id = chan->remote_amp_id;
4849 
4850 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4851 	       chan, result, local_amp_id, remote_amp_id);
4852 
4853 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4854 		l2cap_chan_unlock(chan);
4855 		return;
4856 	}
4857 
4858 	if (chan->state != BT_CONNECTED) {
4859 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4860 	} else if (result != L2CAP_MR_SUCCESS) {
4861 		l2cap_do_move_cancel(chan, result);
4862 	} else {
4863 		switch (chan->move_role) {
4864 		case L2CAP_MOVE_ROLE_INITIATOR:
4865 			l2cap_do_move_initiate(chan, local_amp_id,
4866 					       remote_amp_id);
4867 			break;
4868 		case L2CAP_MOVE_ROLE_RESPONDER:
4869 			l2cap_do_move_respond(chan, result);
4870 			break;
4871 		default:
4872 			l2cap_do_move_cancel(chan, result);
4873 			break;
4874 		}
4875 	}
4876 }
4877 
4878 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4879 					 struct l2cap_cmd_hdr *cmd,
4880 					 u16 cmd_len, void *data)
4881 {
4882 	struct l2cap_move_chan_req *req = data;
4883 	struct l2cap_move_chan_rsp rsp;
4884 	struct l2cap_chan *chan;
4885 	u16 icid = 0;
4886 	u16 result = L2CAP_MR_NOT_ALLOWED;
4887 
4888 	if (cmd_len != sizeof(*req))
4889 		return -EPROTO;
4890 
4891 	icid = le16_to_cpu(req->icid);
4892 
4893 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4894 
4895 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4896 		return -EINVAL;
4897 
4898 	chan = l2cap_get_chan_by_dcid(conn, icid);
4899 	if (!chan) {
4900 		rsp.icid = cpu_to_le16(icid);
4901 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4902 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4903 			       sizeof(rsp), &rsp);
4904 		return 0;
4905 	}
4906 
4907 	chan->ident = cmd->ident;
4908 
4909 	if (chan->scid < L2CAP_CID_DYN_START ||
4910 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4911 	    (chan->mode != L2CAP_MODE_ERTM &&
4912 	     chan->mode != L2CAP_MODE_STREAMING)) {
4913 		result = L2CAP_MR_NOT_ALLOWED;
4914 		goto send_move_response;
4915 	}
4916 
4917 	if (chan->local_amp_id == req->dest_amp_id) {
4918 		result = L2CAP_MR_SAME_ID;
4919 		goto send_move_response;
4920 	}
4921 
4922 	if (req->dest_amp_id != AMP_ID_BREDR) {
4923 		struct hci_dev *hdev;
4924 		hdev = hci_dev_get(req->dest_amp_id);
4925 		if (!hdev || hdev->dev_type != HCI_AMP ||
4926 		    !test_bit(HCI_UP, &hdev->flags)) {
4927 			if (hdev)
4928 				hci_dev_put(hdev);
4929 
4930 			result = L2CAP_MR_BAD_ID;
4931 			goto send_move_response;
4932 		}
4933 		hci_dev_put(hdev);
4934 	}
4935 
4936 	/* Detect a move collision.  Only send a collision response
4937 	 * if this side has "lost", otherwise proceed with the move.
4938 	 * The winner has the larger bd_addr.
4939 	 */
4940 	if ((__chan_is_moving(chan) ||
4941 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4942 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4943 		result = L2CAP_MR_COLLISION;
4944 		goto send_move_response;
4945 	}
4946 
4947 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4948 	l2cap_move_setup(chan);
4949 	chan->move_id = req->dest_amp_id;
4950 	icid = chan->dcid;
4951 
4952 	if (req->dest_amp_id == AMP_ID_BREDR) {
4953 		/* Moving to BR/EDR */
4954 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4955 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4956 			result = L2CAP_MR_PEND;
4957 		} else {
4958 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4959 			result = L2CAP_MR_SUCCESS;
4960 		}
4961 	} else {
4962 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4963 		/* Placeholder - uncomment when amp functions are available */
4964 		/*amp_accept_physical(chan, req->dest_amp_id);*/
4965 		result = L2CAP_MR_PEND;
4966 	}
4967 
4968 send_move_response:
4969 	l2cap_send_move_chan_rsp(chan, result);
4970 
4971 	l2cap_chan_unlock(chan);
4972 
4973 	return 0;
4974 }
4975 
4976 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4977 {
4978 	struct l2cap_chan *chan;
4979 	struct hci_chan *hchan = NULL;
4980 
4981 	chan = l2cap_get_chan_by_scid(conn, icid);
4982 	if (!chan) {
4983 		l2cap_send_move_chan_cfm_icid(conn, icid);
4984 		return;
4985 	}
4986 
4987 	__clear_chan_timer(chan);
4988 	if (result == L2CAP_MR_PEND)
4989 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4990 
4991 	switch (chan->move_state) {
4992 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4993 		/* Move confirm will be sent when logical link
4994 		 * is complete.
4995 		 */
4996 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4997 		break;
4998 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4999 		if (result == L2CAP_MR_PEND) {
5000 			break;
5001 		} else if (test_bit(CONN_LOCAL_BUSY,
5002 				    &chan->conn_state)) {
5003 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5004 		} else {
5005 			/* Logical link is up or moving to BR/EDR,
5006 			 * proceed with move
5007 			 */
5008 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5009 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5010 		}
5011 		break;
5012 	case L2CAP_MOVE_WAIT_RSP:
5013 		/* Moving to AMP */
5014 		if (result == L2CAP_MR_SUCCESS) {
5015 			/* Remote is ready, send confirm immediately
5016 			 * after logical link is ready
5017 			 */
5018 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5019 		} else {
5020 			/* Both logical link and move success
5021 			 * are required to confirm
5022 			 */
5023 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5024 		}
5025 
5026 		/* Placeholder - get hci_chan for logical link */
5027 		if (!hchan) {
5028 			/* Logical link not available */
5029 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5030 			break;
5031 		}
5032 
5033 		/* If the logical link is not yet connected, do not
5034 		 * send confirmation.
5035 		 */
5036 		if (hchan->state != BT_CONNECTED)
5037 			break;
5038 
5039 		/* Logical link is already ready to go */
5040 
5041 		chan->hs_hcon = hchan->conn;
5042 		chan->hs_hcon->l2cap_data = chan->conn;
5043 
5044 		if (result == L2CAP_MR_SUCCESS) {
5045 			/* Can confirm now */
5046 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5047 		} else {
5048 			/* Now only need move success
5049 			 * to confirm
5050 			 */
5051 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5052 		}
5053 
5054 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5055 		break;
5056 	default:
5057 		/* Any other amp move state means the move failed. */
5058 		chan->move_id = chan->local_amp_id;
5059 		l2cap_move_done(chan);
5060 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5061 	}
5062 
5063 	l2cap_chan_unlock(chan);
5064 }
5065 
5066 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5067 			    u16 result)
5068 {
5069 	struct l2cap_chan *chan;
5070 
5071 	chan = l2cap_get_chan_by_ident(conn, ident);
5072 	if (!chan) {
5073 		/* Could not locate channel, icid is best guess */
5074 		l2cap_send_move_chan_cfm_icid(conn, icid);
5075 		return;
5076 	}
5077 
5078 	__clear_chan_timer(chan);
5079 
5080 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5081 		if (result == L2CAP_MR_COLLISION) {
5082 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5083 		} else {
5084 			/* Cleanup - cancel move */
5085 			chan->move_id = chan->local_amp_id;
5086 			l2cap_move_done(chan);
5087 		}
5088 	}
5089 
5090 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5091 
5092 	l2cap_chan_unlock(chan);
5093 }
5094 
5095 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5096 				  struct l2cap_cmd_hdr *cmd,
5097 				  u16 cmd_len, void *data)
5098 {
5099 	struct l2cap_move_chan_rsp *rsp = data;
5100 	u16 icid, result;
5101 
5102 	if (cmd_len != sizeof(*rsp))
5103 		return -EPROTO;
5104 
5105 	icid = le16_to_cpu(rsp->icid);
5106 	result = le16_to_cpu(rsp->result);
5107 
5108 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5109 
5110 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5111 		l2cap_move_continue(conn, icid, result);
5112 	else
5113 		l2cap_move_fail(conn, cmd->ident, icid, result);
5114 
5115 	return 0;
5116 }
5117 
5118 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5119 				      struct l2cap_cmd_hdr *cmd,
5120 				      u16 cmd_len, void *data)
5121 {
5122 	struct l2cap_move_chan_cfm *cfm = data;
5123 	struct l2cap_chan *chan;
5124 	u16 icid, result;
5125 
5126 	if (cmd_len != sizeof(*cfm))
5127 		return -EPROTO;
5128 
5129 	icid = le16_to_cpu(cfm->icid);
5130 	result = le16_to_cpu(cfm->result);
5131 
5132 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5133 
5134 	chan = l2cap_get_chan_by_dcid(conn, icid);
5135 	if (!chan) {
5136 		/* Spec requires a response even if the icid was not found */
5137 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5138 		return 0;
5139 	}
5140 
5141 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5142 		if (result == L2CAP_MC_CONFIRMED) {
5143 			chan->local_amp_id = chan->move_id;
5144 			if (chan->local_amp_id == AMP_ID_BREDR)
5145 				__release_logical_link(chan);
5146 		} else {
5147 			chan->move_id = chan->local_amp_id;
5148 		}
5149 
5150 		l2cap_move_done(chan);
5151 	}
5152 
5153 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5154 
5155 	l2cap_chan_unlock(chan);
5156 
5157 	return 0;
5158 }
5159 
5160 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5161 						 struct l2cap_cmd_hdr *cmd,
5162 						 u16 cmd_len, void *data)
5163 {
5164 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5165 	struct l2cap_chan *chan;
5166 	u16 icid;
5167 
5168 	if (cmd_len != sizeof(*rsp))
5169 		return -EPROTO;
5170 
5171 	icid = le16_to_cpu(rsp->icid);
5172 
5173 	BT_DBG("icid 0x%4.4x", icid);
5174 
5175 	chan = l2cap_get_chan_by_scid(conn, icid);
5176 	if (!chan)
5177 		return 0;
5178 
5179 	__clear_chan_timer(chan);
5180 
5181 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5182 		chan->local_amp_id = chan->move_id;
5183 
5184 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5185 			__release_logical_link(chan);
5186 
5187 		l2cap_move_done(chan);
5188 	}
5189 
5190 	l2cap_chan_unlock(chan);
5191 
5192 	return 0;
5193 }
5194 
5195 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5196 					      struct l2cap_cmd_hdr *cmd,
5197 					      u16 cmd_len, u8 *data)
5198 {
5199 	struct hci_conn *hcon = conn->hcon;
5200 	struct l2cap_conn_param_update_req *req;
5201 	struct l2cap_conn_param_update_rsp rsp;
5202 	u16 min, max, latency, to_multiplier;
5203 	int err;
5204 
5205 	if (hcon->role != HCI_ROLE_MASTER)
5206 		return -EINVAL;
5207 
5208 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5209 		return -EPROTO;
5210 
5211 	req = (struct l2cap_conn_param_update_req *) data;
5212 	min		= __le16_to_cpu(req->min);
5213 	max		= __le16_to_cpu(req->max);
5214 	latency		= __le16_to_cpu(req->latency);
5215 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5216 
5217 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5218 	       min, max, latency, to_multiplier);
5219 
5220 	memset(&rsp, 0, sizeof(rsp));
5221 
5222 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5223 	if (err)
5224 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5225 	else
5226 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5227 
5228 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5229 		       sizeof(rsp), &rsp);
5230 
5231 	if (!err) {
5232 		u8 store_hint;
5233 
5234 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5235 						to_multiplier);
5236 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5237 				    store_hint, min, max, latency,
5238 				    to_multiplier);
5239 
5240 	}
5241 
5242 	return 0;
5243 }
5244 
5245 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5246 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5247 				u8 *data)
5248 {
5249 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5250 	struct hci_conn *hcon = conn->hcon;
5251 	u16 dcid, mtu, mps, credits, result;
5252 	struct l2cap_chan *chan;
5253 	int err, sec_level;
5254 
5255 	if (cmd_len < sizeof(*rsp))
5256 		return -EPROTO;
5257 
5258 	dcid    = __le16_to_cpu(rsp->dcid);
5259 	mtu     = __le16_to_cpu(rsp->mtu);
5260 	mps     = __le16_to_cpu(rsp->mps);
5261 	credits = __le16_to_cpu(rsp->credits);
5262 	result  = __le16_to_cpu(rsp->result);
5263 
5264 	if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
5265 					   dcid < L2CAP_CID_DYN_START ||
5266 					   dcid > L2CAP_CID_LE_DYN_END))
5267 		return -EPROTO;
5268 
5269 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5270 	       dcid, mtu, mps, credits, result);
5271 
5272 	mutex_lock(&conn->chan_lock);
5273 
5274 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5275 	if (!chan) {
5276 		err = -EBADSLT;
5277 		goto unlock;
5278 	}
5279 
5280 	err = 0;
5281 
5282 	l2cap_chan_lock(chan);
5283 
5284 	switch (result) {
5285 	case L2CAP_CR_SUCCESS:
5286 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5287 			err = -EBADSLT;
5288 			break;
5289 		}
5290 
5291 		chan->ident = 0;
5292 		chan->dcid = dcid;
5293 		chan->omtu = mtu;
5294 		chan->remote_mps = mps;
5295 		chan->tx_credits = credits;
5296 		l2cap_chan_ready(chan);
5297 		break;
5298 
5299 	case L2CAP_CR_AUTHENTICATION:
5300 	case L2CAP_CR_ENCRYPTION:
5301 		/* If we already have MITM protection we can't do
5302 		 * anything.
5303 		 */
5304 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5305 			l2cap_chan_del(chan, ECONNREFUSED);
5306 			break;
5307 		}
5308 
5309 		sec_level = hcon->sec_level + 1;
5310 		if (chan->sec_level < sec_level)
5311 			chan->sec_level = sec_level;
5312 
5313 		/* We'll need to send a new Connect Request */
5314 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5315 
5316 		smp_conn_security(hcon, chan->sec_level);
5317 		break;
5318 
5319 	default:
5320 		l2cap_chan_del(chan, ECONNREFUSED);
5321 		break;
5322 	}
5323 
5324 	l2cap_chan_unlock(chan);
5325 
5326 unlock:
5327 	mutex_unlock(&conn->chan_lock);
5328 
5329 	return err;
5330 }
5331 
5332 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5333 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5334 				      u8 *data)
5335 {
5336 	int err = 0;
5337 
5338 	switch (cmd->code) {
5339 	case L2CAP_COMMAND_REJ:
5340 		l2cap_command_rej(conn, cmd, cmd_len, data);
5341 		break;
5342 
5343 	case L2CAP_CONN_REQ:
5344 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5345 		break;
5346 
5347 	case L2CAP_CONN_RSP:
5348 	case L2CAP_CREATE_CHAN_RSP:
5349 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5350 		break;
5351 
5352 	case L2CAP_CONF_REQ:
5353 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5354 		break;
5355 
5356 	case L2CAP_CONF_RSP:
5357 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5358 		break;
5359 
5360 	case L2CAP_DISCONN_REQ:
5361 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5362 		break;
5363 
5364 	case L2CAP_DISCONN_RSP:
5365 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5366 		break;
5367 
5368 	case L2CAP_ECHO_REQ:
5369 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5370 		break;
5371 
5372 	case L2CAP_ECHO_RSP:
5373 		break;
5374 
5375 	case L2CAP_INFO_REQ:
5376 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5377 		break;
5378 
5379 	case L2CAP_INFO_RSP:
5380 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5381 		break;
5382 
5383 	case L2CAP_CREATE_CHAN_REQ:
5384 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5385 		break;
5386 
5387 	case L2CAP_MOVE_CHAN_REQ:
5388 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5389 		break;
5390 
5391 	case L2CAP_MOVE_CHAN_RSP:
5392 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5393 		break;
5394 
5395 	case L2CAP_MOVE_CHAN_CFM:
5396 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5397 		break;
5398 
5399 	case L2CAP_MOVE_CHAN_CFM_RSP:
5400 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5401 		break;
5402 
5403 	default:
5404 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5405 		err = -EINVAL;
5406 		break;
5407 	}
5408 
5409 	return err;
5410 }
5411 
5412 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5413 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5414 				u8 *data)
5415 {
5416 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5417 	struct l2cap_le_conn_rsp rsp;
5418 	struct l2cap_chan *chan, *pchan;
5419 	u16 dcid, scid, credits, mtu, mps;
5420 	__le16 psm;
5421 	u8 result;
5422 
5423 	if (cmd_len != sizeof(*req))
5424 		return -EPROTO;
5425 
5426 	scid = __le16_to_cpu(req->scid);
5427 	mtu  = __le16_to_cpu(req->mtu);
5428 	mps  = __le16_to_cpu(req->mps);
5429 	psm  = req->psm;
5430 	dcid = 0;
5431 	credits = 0;
5432 
5433 	if (mtu < 23 || mps < 23)
5434 		return -EPROTO;
5435 
5436 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5437 	       scid, mtu, mps);
5438 
5439 	/* Check if we have socket listening on psm */
5440 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5441 					 &conn->hcon->dst, LE_LINK);
5442 	if (!pchan) {
5443 		result = L2CAP_CR_BAD_PSM;
5444 		chan = NULL;
5445 		goto response;
5446 	}
5447 
5448 	mutex_lock(&conn->chan_lock);
5449 	l2cap_chan_lock(pchan);
5450 
5451 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5452 				     SMP_ALLOW_STK)) {
5453 		result = L2CAP_CR_AUTHENTICATION;
5454 		chan = NULL;
5455 		goto response_unlock;
5456 	}
5457 
5458 	/* Check for valid dynamic CID range */
5459 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5460 		result = L2CAP_CR_INVALID_SCID;
5461 		chan = NULL;
5462 		goto response_unlock;
5463 	}
5464 
5465 	/* Check if we already have channel with that dcid */
5466 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5467 		result = L2CAP_CR_SCID_IN_USE;
5468 		chan = NULL;
5469 		goto response_unlock;
5470 	}
5471 
5472 	chan = pchan->ops->new_connection(pchan);
5473 	if (!chan) {
5474 		result = L2CAP_CR_NO_MEM;
5475 		goto response_unlock;
5476 	}
5477 
5478 	l2cap_le_flowctl_init(chan);
5479 
5480 	bacpy(&chan->src, &conn->hcon->src);
5481 	bacpy(&chan->dst, &conn->hcon->dst);
5482 	chan->src_type = bdaddr_src_type(conn->hcon);
5483 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5484 	chan->psm  = psm;
5485 	chan->dcid = scid;
5486 	chan->omtu = mtu;
5487 	chan->remote_mps = mps;
5488 	chan->tx_credits = __le16_to_cpu(req->credits);
5489 
5490 	__l2cap_chan_add(conn, chan);
5491 	dcid = chan->scid;
5492 	credits = chan->rx_credits;
5493 
5494 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5495 
5496 	chan->ident = cmd->ident;
5497 
5498 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5499 		l2cap_state_change(chan, BT_CONNECT2);
5500 		/* The following result value is actually not defined
5501 		 * for LE CoC but we use it to let the function know
5502 		 * that it should bail out after doing its cleanup
5503 		 * instead of sending a response.
5504 		 */
5505 		result = L2CAP_CR_PEND;
5506 		chan->ops->defer(chan);
5507 	} else {
5508 		l2cap_chan_ready(chan);
5509 		result = L2CAP_CR_SUCCESS;
5510 	}
5511 
5512 response_unlock:
5513 	l2cap_chan_unlock(pchan);
5514 	mutex_unlock(&conn->chan_lock);
5515 	l2cap_chan_put(pchan);
5516 
5517 	if (result == L2CAP_CR_PEND)
5518 		return 0;
5519 
5520 response:
5521 	if (chan) {
5522 		rsp.mtu = cpu_to_le16(chan->imtu);
5523 		rsp.mps = cpu_to_le16(chan->mps);
5524 	} else {
5525 		rsp.mtu = 0;
5526 		rsp.mps = 0;
5527 	}
5528 
5529 	rsp.dcid    = cpu_to_le16(dcid);
5530 	rsp.credits = cpu_to_le16(credits);
5531 	rsp.result  = cpu_to_le16(result);
5532 
5533 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5534 
5535 	return 0;
5536 }
5537 
5538 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5539 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5540 				   u8 *data)
5541 {
5542 	struct l2cap_le_credits *pkt;
5543 	struct l2cap_chan *chan;
5544 	u16 cid, credits, max_credits;
5545 
5546 	if (cmd_len != sizeof(*pkt))
5547 		return -EPROTO;
5548 
5549 	pkt = (struct l2cap_le_credits *) data;
5550 	cid	= __le16_to_cpu(pkt->cid);
5551 	credits	= __le16_to_cpu(pkt->credits);
5552 
5553 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5554 
5555 	chan = l2cap_get_chan_by_dcid(conn, cid);
5556 	if (!chan)
5557 		return -EBADSLT;
5558 
5559 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5560 	if (credits > max_credits) {
5561 		BT_ERR("LE credits overflow");
5562 		l2cap_send_disconn_req(chan, ECONNRESET);
5563 		l2cap_chan_unlock(chan);
5564 
5565 		/* Return 0 so that we don't trigger an unnecessary
5566 		 * command reject packet.
5567 		 */
5568 		return 0;
5569 	}
5570 
5571 	chan->tx_credits += credits;
5572 
5573 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5574 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5575 		chan->tx_credits--;
5576 	}
5577 
5578 	if (chan->tx_credits)
5579 		chan->ops->resume(chan);
5580 
5581 	l2cap_chan_unlock(chan);
5582 
5583 	return 0;
5584 }
5585 
5586 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5587 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5588 				       u8 *data)
5589 {
5590 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5591 	struct l2cap_chan *chan;
5592 
5593 	if (cmd_len < sizeof(*rej))
5594 		return -EPROTO;
5595 
5596 	mutex_lock(&conn->chan_lock);
5597 
5598 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5599 	if (!chan)
5600 		goto done;
5601 
5602 	l2cap_chan_lock(chan);
5603 	l2cap_chan_del(chan, ECONNREFUSED);
5604 	l2cap_chan_unlock(chan);
5605 
5606 done:
5607 	mutex_unlock(&conn->chan_lock);
5608 	return 0;
5609 }
5610 
5611 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5612 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5613 				   u8 *data)
5614 {
5615 	int err = 0;
5616 
5617 	switch (cmd->code) {
5618 	case L2CAP_COMMAND_REJ:
5619 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5620 		break;
5621 
5622 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5623 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5624 		break;
5625 
5626 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5627 		break;
5628 
5629 	case L2CAP_LE_CONN_RSP:
5630 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5631 		break;
5632 
5633 	case L2CAP_LE_CONN_REQ:
5634 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5635 		break;
5636 
5637 	case L2CAP_LE_CREDITS:
5638 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5639 		break;
5640 
5641 	case L2CAP_DISCONN_REQ:
5642 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5643 		break;
5644 
5645 	case L2CAP_DISCONN_RSP:
5646 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5647 		break;
5648 
5649 	default:
5650 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5651 		err = -EINVAL;
5652 		break;
5653 	}
5654 
5655 	return err;
5656 }
5657 
5658 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5659 					struct sk_buff *skb)
5660 {
5661 	struct hci_conn *hcon = conn->hcon;
5662 	struct l2cap_cmd_hdr *cmd;
5663 	u16 len;
5664 	int err;
5665 
5666 	if (hcon->type != LE_LINK)
5667 		goto drop;
5668 
5669 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5670 		goto drop;
5671 
5672 	cmd = (void *) skb->data;
5673 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5674 
5675 	len = le16_to_cpu(cmd->len);
5676 
5677 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5678 
5679 	if (len != skb->len || !cmd->ident) {
5680 		BT_DBG("corrupted command");
5681 		goto drop;
5682 	}
5683 
5684 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5685 	if (err) {
5686 		struct l2cap_cmd_rej_unk rej;
5687 
5688 		BT_ERR("Wrong link type (%d)", err);
5689 
5690 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5691 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5692 			       sizeof(rej), &rej);
5693 	}
5694 
5695 drop:
5696 	kfree_skb(skb);
5697 }
5698 
5699 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5700 				     struct sk_buff *skb)
5701 {
5702 	struct hci_conn *hcon = conn->hcon;
5703 	u8 *data = skb->data;
5704 	int len = skb->len;
5705 	struct l2cap_cmd_hdr cmd;
5706 	int err;
5707 
5708 	l2cap_raw_recv(conn, skb);
5709 
5710 	if (hcon->type != ACL_LINK)
5711 		goto drop;
5712 
5713 	while (len >= L2CAP_CMD_HDR_SIZE) {
5714 		u16 cmd_len;
5715 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5716 		data += L2CAP_CMD_HDR_SIZE;
5717 		len  -= L2CAP_CMD_HDR_SIZE;
5718 
5719 		cmd_len = le16_to_cpu(cmd.len);
5720 
5721 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5722 		       cmd.ident);
5723 
5724 		if (cmd_len > len || !cmd.ident) {
5725 			BT_DBG("corrupted command");
5726 			break;
5727 		}
5728 
5729 		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5730 		if (err) {
5731 			struct l2cap_cmd_rej_unk rej;
5732 
5733 			BT_ERR("Wrong link type (%d)", err);
5734 
5735 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5736 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5737 				       sizeof(rej), &rej);
5738 		}
5739 
5740 		data += cmd_len;
5741 		len  -= cmd_len;
5742 	}
5743 
5744 drop:
5745 	kfree_skb(skb);
5746 }
5747 
5748 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5749 {
5750 	u16 our_fcs, rcv_fcs;
5751 	int hdr_size;
5752 
5753 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5754 		hdr_size = L2CAP_EXT_HDR_SIZE;
5755 	else
5756 		hdr_size = L2CAP_ENH_HDR_SIZE;
5757 
5758 	if (chan->fcs == L2CAP_FCS_CRC16) {
5759 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5760 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5761 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5762 
5763 		if (our_fcs != rcv_fcs)
5764 			return -EBADMSG;
5765 	}
5766 	return 0;
5767 }
5768 
5769 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5770 {
5771 	struct l2cap_ctrl control;
5772 
5773 	BT_DBG("chan %p", chan);
5774 
5775 	memset(&control, 0, sizeof(control));
5776 	control.sframe = 1;
5777 	control.final = 1;
5778 	control.reqseq = chan->buffer_seq;
5779 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5780 
5781 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5782 		control.super = L2CAP_SUPER_RNR;
5783 		l2cap_send_sframe(chan, &control);
5784 	}
5785 
5786 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5787 	    chan->unacked_frames > 0)
5788 		__set_retrans_timer(chan);
5789 
5790 	/* Send pending iframes */
5791 	l2cap_ertm_send(chan);
5792 
5793 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5794 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5795 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5796 		 * send it now.
5797 		 */
5798 		control.super = L2CAP_SUPER_RR;
5799 		l2cap_send_sframe(chan, &control);
5800 	}
5801 }
5802 
5803 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5804 			    struct sk_buff **last_frag)
5805 {
5806 	/* skb->len reflects data in skb as well as all fragments
5807 	 * skb->data_len reflects only data in fragments
5808 	 */
5809 	if (!skb_has_frag_list(skb))
5810 		skb_shinfo(skb)->frag_list = new_frag;
5811 
5812 	new_frag->next = NULL;
5813 
5814 	(*last_frag)->next = new_frag;
5815 	*last_frag = new_frag;
5816 
5817 	skb->len += new_frag->len;
5818 	skb->data_len += new_frag->len;
5819 	skb->truesize += new_frag->truesize;
5820 }
5821 
5822 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5823 				struct l2cap_ctrl *control)
5824 {
5825 	int err = -EINVAL;
5826 
5827 	switch (control->sar) {
5828 	case L2CAP_SAR_UNSEGMENTED:
5829 		if (chan->sdu)
5830 			break;
5831 
5832 		err = chan->ops->recv(chan, skb);
5833 		break;
5834 
5835 	case L2CAP_SAR_START:
5836 		if (chan->sdu)
5837 			break;
5838 
5839 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5840 			break;
5841 
5842 		chan->sdu_len = get_unaligned_le16(skb->data);
5843 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5844 
5845 		if (chan->sdu_len > chan->imtu) {
5846 			err = -EMSGSIZE;
5847 			break;
5848 		}
5849 
5850 		if (skb->len >= chan->sdu_len)
5851 			break;
5852 
5853 		chan->sdu = skb;
5854 		chan->sdu_last_frag = skb;
5855 
5856 		skb = NULL;
5857 		err = 0;
5858 		break;
5859 
5860 	case L2CAP_SAR_CONTINUE:
5861 		if (!chan->sdu)
5862 			break;
5863 
5864 		append_skb_frag(chan->sdu, skb,
5865 				&chan->sdu_last_frag);
5866 		skb = NULL;
5867 
5868 		if (chan->sdu->len >= chan->sdu_len)
5869 			break;
5870 
5871 		err = 0;
5872 		break;
5873 
5874 	case L2CAP_SAR_END:
5875 		if (!chan->sdu)
5876 			break;
5877 
5878 		append_skb_frag(chan->sdu, skb,
5879 				&chan->sdu_last_frag);
5880 		skb = NULL;
5881 
5882 		if (chan->sdu->len != chan->sdu_len)
5883 			break;
5884 
5885 		err = chan->ops->recv(chan, chan->sdu);
5886 
5887 		if (!err) {
5888 			/* Reassembly complete */
5889 			chan->sdu = NULL;
5890 			chan->sdu_last_frag = NULL;
5891 			chan->sdu_len = 0;
5892 		}
5893 		break;
5894 	}
5895 
5896 	if (err) {
5897 		kfree_skb(skb);
5898 		kfree_skb(chan->sdu);
5899 		chan->sdu = NULL;
5900 		chan->sdu_last_frag = NULL;
5901 		chan->sdu_len = 0;
5902 	}
5903 
5904 	return err;
5905 }
5906 
5907 static int l2cap_resegment(struct l2cap_chan *chan)
5908 {
5909 	/* Placeholder */
5910 	return 0;
5911 }
5912 
5913 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5914 {
5915 	u8 event;
5916 
5917 	if (chan->mode != L2CAP_MODE_ERTM)
5918 		return;
5919 
5920 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5921 	l2cap_tx(chan, NULL, NULL, event);
5922 }
5923 
5924 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5925 {
5926 	int err = 0;
5927 	/* Pass sequential frames to l2cap_reassemble_sdu()
5928 	 * until a gap is encountered.
5929 	 */
5930 
5931 	BT_DBG("chan %p", chan);
5932 
5933 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5934 		struct sk_buff *skb;
5935 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5936 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5937 
5938 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5939 
5940 		if (!skb)
5941 			break;
5942 
5943 		skb_unlink(skb, &chan->srej_q);
5944 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5945 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5946 		if (err)
5947 			break;
5948 	}
5949 
5950 	if (skb_queue_empty(&chan->srej_q)) {
5951 		chan->rx_state = L2CAP_RX_STATE_RECV;
5952 		l2cap_send_ack(chan);
5953 	}
5954 
5955 	return err;
5956 }
5957 
5958 static void l2cap_handle_srej(struct l2cap_chan *chan,
5959 			      struct l2cap_ctrl *control)
5960 {
5961 	struct sk_buff *skb;
5962 
5963 	BT_DBG("chan %p, control %p", chan, control);
5964 
5965 	if (control->reqseq == chan->next_tx_seq) {
5966 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5967 		l2cap_send_disconn_req(chan, ECONNRESET);
5968 		return;
5969 	}
5970 
5971 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5972 
5973 	if (skb == NULL) {
5974 		BT_DBG("Seq %d not available for retransmission",
5975 		       control->reqseq);
5976 		return;
5977 	}
5978 
5979 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5980 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5981 		l2cap_send_disconn_req(chan, ECONNRESET);
5982 		return;
5983 	}
5984 
5985 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5986 
5987 	if (control->poll) {
5988 		l2cap_pass_to_tx(chan, control);
5989 
5990 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5991 		l2cap_retransmit(chan, control);
5992 		l2cap_ertm_send(chan);
5993 
5994 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5995 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5996 			chan->srej_save_reqseq = control->reqseq;
5997 		}
5998 	} else {
5999 		l2cap_pass_to_tx_fbit(chan, control);
6000 
6001 		if (control->final) {
6002 			if (chan->srej_save_reqseq != control->reqseq ||
6003 			    !test_and_clear_bit(CONN_SREJ_ACT,
6004 						&chan->conn_state))
6005 				l2cap_retransmit(chan, control);
6006 		} else {
6007 			l2cap_retransmit(chan, control);
6008 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6009 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6010 				chan->srej_save_reqseq = control->reqseq;
6011 			}
6012 		}
6013 	}
6014 }
6015 
6016 static void l2cap_handle_rej(struct l2cap_chan *chan,
6017 			     struct l2cap_ctrl *control)
6018 {
6019 	struct sk_buff *skb;
6020 
6021 	BT_DBG("chan %p, control %p", chan, control);
6022 
6023 	if (control->reqseq == chan->next_tx_seq) {
6024 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6025 		l2cap_send_disconn_req(chan, ECONNRESET);
6026 		return;
6027 	}
6028 
6029 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6030 
6031 	if (chan->max_tx && skb &&
6032 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6033 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6034 		l2cap_send_disconn_req(chan, ECONNRESET);
6035 		return;
6036 	}
6037 
6038 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6039 
6040 	l2cap_pass_to_tx(chan, control);
6041 
6042 	if (control->final) {
6043 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6044 			l2cap_retransmit_all(chan, control);
6045 	} else {
6046 		l2cap_retransmit_all(chan, control);
6047 		l2cap_ertm_send(chan);
6048 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6049 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6050 	}
6051 }
6052 
6053 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6054 {
6055 	BT_DBG("chan %p, txseq %d", chan, txseq);
6056 
6057 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6058 	       chan->expected_tx_seq);
6059 
6060 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6061 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6062 		    chan->tx_win) {
6063 			/* See notes below regarding "double poll" and
6064 			 * invalid packets.
6065 			 */
6066 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6067 				BT_DBG("Invalid/Ignore - after SREJ");
6068 				return L2CAP_TXSEQ_INVALID_IGNORE;
6069 			} else {
6070 				BT_DBG("Invalid - in window after SREJ sent");
6071 				return L2CAP_TXSEQ_INVALID;
6072 			}
6073 		}
6074 
6075 		if (chan->srej_list.head == txseq) {
6076 			BT_DBG("Expected SREJ");
6077 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6078 		}
6079 
6080 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6081 			BT_DBG("Duplicate SREJ - txseq already stored");
6082 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6083 		}
6084 
6085 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6086 			BT_DBG("Unexpected SREJ - not requested");
6087 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6088 		}
6089 	}
6090 
6091 	if (chan->expected_tx_seq == txseq) {
6092 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6093 		    chan->tx_win) {
6094 			BT_DBG("Invalid - txseq outside tx window");
6095 			return L2CAP_TXSEQ_INVALID;
6096 		} else {
6097 			BT_DBG("Expected");
6098 			return L2CAP_TXSEQ_EXPECTED;
6099 		}
6100 	}
6101 
6102 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6103 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6104 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6105 		return L2CAP_TXSEQ_DUPLICATE;
6106 	}
6107 
6108 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6109 		/* A source of invalid packets is a "double poll" condition,
6110 		 * where delays cause us to send multiple poll packets.  If
6111 		 * the remote stack receives and processes both polls,
6112 		 * sequence numbers can wrap around in such a way that a
6113 		 * resent frame has a sequence number that looks like new data
6114 		 * with a sequence gap.  This would trigger an erroneous SREJ
6115 		 * request.
6116 		 *
6117 		 * Fortunately, this is impossible with a tx window that's
6118 		 * less than half of the maximum sequence number, which allows
6119 		 * invalid frames to be safely ignored.
6120 		 *
6121 		 * With tx window sizes greater than half of the tx window
6122 		 * maximum, the frame is invalid and cannot be ignored.  This
6123 		 * causes a disconnect.
6124 		 */
6125 
6126 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6127 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6128 			return L2CAP_TXSEQ_INVALID_IGNORE;
6129 		} else {
6130 			BT_DBG("Invalid - txseq outside tx window");
6131 			return L2CAP_TXSEQ_INVALID;
6132 		}
6133 	} else {
6134 		BT_DBG("Unexpected - txseq indicates missing frames");
6135 		return L2CAP_TXSEQ_UNEXPECTED;
6136 	}
6137 }
6138 
6139 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6140 			       struct l2cap_ctrl *control,
6141 			       struct sk_buff *skb, u8 event)
6142 {
6143 	int err = 0;
6144 	bool skb_in_use = false;
6145 
6146 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6147 	       event);
6148 
6149 	switch (event) {
6150 	case L2CAP_EV_RECV_IFRAME:
6151 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6152 		case L2CAP_TXSEQ_EXPECTED:
6153 			l2cap_pass_to_tx(chan, control);
6154 
6155 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6156 				BT_DBG("Busy, discarding expected seq %d",
6157 				       control->txseq);
6158 				break;
6159 			}
6160 
6161 			chan->expected_tx_seq = __next_seq(chan,
6162 							   control->txseq);
6163 
6164 			chan->buffer_seq = chan->expected_tx_seq;
6165 			skb_in_use = true;
6166 
6167 			err = l2cap_reassemble_sdu(chan, skb, control);
6168 			if (err)
6169 				break;
6170 
6171 			if (control->final) {
6172 				if (!test_and_clear_bit(CONN_REJ_ACT,
6173 							&chan->conn_state)) {
6174 					control->final = 0;
6175 					l2cap_retransmit_all(chan, control);
6176 					l2cap_ertm_send(chan);
6177 				}
6178 			}
6179 
6180 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6181 				l2cap_send_ack(chan);
6182 			break;
6183 		case L2CAP_TXSEQ_UNEXPECTED:
6184 			l2cap_pass_to_tx(chan, control);
6185 
6186 			/* Can't issue SREJ frames in the local busy state.
6187 			 * Drop this frame, it will be seen as missing
6188 			 * when local busy is exited.
6189 			 */
6190 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6191 				BT_DBG("Busy, discarding unexpected seq %d",
6192 				       control->txseq);
6193 				break;
6194 			}
6195 
6196 			/* There was a gap in the sequence, so an SREJ
6197 			 * must be sent for each missing frame.  The
6198 			 * current frame is stored for later use.
6199 			 */
6200 			skb_queue_tail(&chan->srej_q, skb);
6201 			skb_in_use = true;
6202 			BT_DBG("Queued %p (queue len %d)", skb,
6203 			       skb_queue_len(&chan->srej_q));
6204 
6205 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6206 			l2cap_seq_list_clear(&chan->srej_list);
6207 			l2cap_send_srej(chan, control->txseq);
6208 
6209 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6210 			break;
6211 		case L2CAP_TXSEQ_DUPLICATE:
6212 			l2cap_pass_to_tx(chan, control);
6213 			break;
6214 		case L2CAP_TXSEQ_INVALID_IGNORE:
6215 			break;
6216 		case L2CAP_TXSEQ_INVALID:
6217 		default:
6218 			l2cap_send_disconn_req(chan, ECONNRESET);
6219 			break;
6220 		}
6221 		break;
6222 	case L2CAP_EV_RECV_RR:
6223 		l2cap_pass_to_tx(chan, control);
6224 		if (control->final) {
6225 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6226 
6227 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6228 			    !__chan_is_moving(chan)) {
6229 				control->final = 0;
6230 				l2cap_retransmit_all(chan, control);
6231 			}
6232 
6233 			l2cap_ertm_send(chan);
6234 		} else if (control->poll) {
6235 			l2cap_send_i_or_rr_or_rnr(chan);
6236 		} else {
6237 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6238 					       &chan->conn_state) &&
6239 			    chan->unacked_frames)
6240 				__set_retrans_timer(chan);
6241 
6242 			l2cap_ertm_send(chan);
6243 		}
6244 		break;
6245 	case L2CAP_EV_RECV_RNR:
6246 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6247 		l2cap_pass_to_tx(chan, control);
6248 		if (control && control->poll) {
6249 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6250 			l2cap_send_rr_or_rnr(chan, 0);
6251 		}
6252 		__clear_retrans_timer(chan);
6253 		l2cap_seq_list_clear(&chan->retrans_list);
6254 		break;
6255 	case L2CAP_EV_RECV_REJ:
6256 		l2cap_handle_rej(chan, control);
6257 		break;
6258 	case L2CAP_EV_RECV_SREJ:
6259 		l2cap_handle_srej(chan, control);
6260 		break;
6261 	default:
6262 		break;
6263 	}
6264 
6265 	if (skb && !skb_in_use) {
6266 		BT_DBG("Freeing %p", skb);
6267 		kfree_skb(skb);
6268 	}
6269 
6270 	return err;
6271 }
6272 
6273 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6274 				    struct l2cap_ctrl *control,
6275 				    struct sk_buff *skb, u8 event)
6276 {
6277 	int err = 0;
6278 	u16 txseq = control->txseq;
6279 	bool skb_in_use = false;
6280 
6281 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6282 	       event);
6283 
6284 	switch (event) {
6285 	case L2CAP_EV_RECV_IFRAME:
6286 		switch (l2cap_classify_txseq(chan, txseq)) {
6287 		case L2CAP_TXSEQ_EXPECTED:
6288 			/* Keep frame for reassembly later */
6289 			l2cap_pass_to_tx(chan, control);
6290 			skb_queue_tail(&chan->srej_q, skb);
6291 			skb_in_use = true;
6292 			BT_DBG("Queued %p (queue len %d)", skb,
6293 			       skb_queue_len(&chan->srej_q));
6294 
6295 			chan->expected_tx_seq = __next_seq(chan, txseq);
6296 			break;
6297 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6298 			l2cap_seq_list_pop(&chan->srej_list);
6299 
6300 			l2cap_pass_to_tx(chan, control);
6301 			skb_queue_tail(&chan->srej_q, skb);
6302 			skb_in_use = true;
6303 			BT_DBG("Queued %p (queue len %d)", skb,
6304 			       skb_queue_len(&chan->srej_q));
6305 
6306 			err = l2cap_rx_queued_iframes(chan);
6307 			if (err)
6308 				break;
6309 
6310 			break;
6311 		case L2CAP_TXSEQ_UNEXPECTED:
6312 			/* Got a frame that can't be reassembled yet.
6313 			 * Save it for later, and send SREJs to cover
6314 			 * the missing frames.
6315 			 */
6316 			skb_queue_tail(&chan->srej_q, skb);
6317 			skb_in_use = true;
6318 			BT_DBG("Queued %p (queue len %d)", skb,
6319 			       skb_queue_len(&chan->srej_q));
6320 
6321 			l2cap_pass_to_tx(chan, control);
6322 			l2cap_send_srej(chan, control->txseq);
6323 			break;
6324 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6325 			/* This frame was requested with an SREJ, but
6326 			 * some expected retransmitted frames are
6327 			 * missing.  Request retransmission of missing
6328 			 * SREJ'd frames.
6329 			 */
6330 			skb_queue_tail(&chan->srej_q, skb);
6331 			skb_in_use = true;
6332 			BT_DBG("Queued %p (queue len %d)", skb,
6333 			       skb_queue_len(&chan->srej_q));
6334 
6335 			l2cap_pass_to_tx(chan, control);
6336 			l2cap_send_srej_list(chan, control->txseq);
6337 			break;
6338 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6339 			/* We've already queued this frame.  Drop this copy. */
6340 			l2cap_pass_to_tx(chan, control);
6341 			break;
6342 		case L2CAP_TXSEQ_DUPLICATE:
6343 			/* Expecting a later sequence number, so this frame
6344 			 * was already received.  Ignore it completely.
6345 			 */
6346 			break;
6347 		case L2CAP_TXSEQ_INVALID_IGNORE:
6348 			break;
6349 		case L2CAP_TXSEQ_INVALID:
6350 		default:
6351 			l2cap_send_disconn_req(chan, ECONNRESET);
6352 			break;
6353 		}
6354 		break;
6355 	case L2CAP_EV_RECV_RR:
6356 		l2cap_pass_to_tx(chan, control);
6357 		if (control->final) {
6358 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6359 
6360 			if (!test_and_clear_bit(CONN_REJ_ACT,
6361 						&chan->conn_state)) {
6362 				control->final = 0;
6363 				l2cap_retransmit_all(chan, control);
6364 			}
6365 
6366 			l2cap_ertm_send(chan);
6367 		} else if (control->poll) {
6368 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6369 					       &chan->conn_state) &&
6370 			    chan->unacked_frames) {
6371 				__set_retrans_timer(chan);
6372 			}
6373 
6374 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6375 			l2cap_send_srej_tail(chan);
6376 		} else {
6377 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6378 					       &chan->conn_state) &&
6379 			    chan->unacked_frames)
6380 				__set_retrans_timer(chan);
6381 
6382 			l2cap_send_ack(chan);
6383 		}
6384 		break;
6385 	case L2CAP_EV_RECV_RNR:
6386 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6387 		l2cap_pass_to_tx(chan, control);
6388 		if (control->poll) {
6389 			l2cap_send_srej_tail(chan);
6390 		} else {
6391 			struct l2cap_ctrl rr_control;
6392 			memset(&rr_control, 0, sizeof(rr_control));
6393 			rr_control.sframe = 1;
6394 			rr_control.super = L2CAP_SUPER_RR;
6395 			rr_control.reqseq = chan->buffer_seq;
6396 			l2cap_send_sframe(chan, &rr_control);
6397 		}
6398 
6399 		break;
6400 	case L2CAP_EV_RECV_REJ:
6401 		l2cap_handle_rej(chan, control);
6402 		break;
6403 	case L2CAP_EV_RECV_SREJ:
6404 		l2cap_handle_srej(chan, control);
6405 		break;
6406 	}
6407 
6408 	if (skb && !skb_in_use) {
6409 		BT_DBG("Freeing %p", skb);
6410 		kfree_skb(skb);
6411 	}
6412 
6413 	return err;
6414 }
6415 
6416 static int l2cap_finish_move(struct l2cap_chan *chan)
6417 {
6418 	BT_DBG("chan %p", chan);
6419 
6420 	chan->rx_state = L2CAP_RX_STATE_RECV;
6421 
6422 	if (chan->hs_hcon)
6423 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6424 	else
6425 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6426 
6427 	return l2cap_resegment(chan);
6428 }
6429 
6430 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6431 				 struct l2cap_ctrl *control,
6432 				 struct sk_buff *skb, u8 event)
6433 {
6434 	int err;
6435 
6436 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6437 	       event);
6438 
6439 	if (!control->poll)
6440 		return -EPROTO;
6441 
6442 	l2cap_process_reqseq(chan, control->reqseq);
6443 
6444 	if (!skb_queue_empty(&chan->tx_q))
6445 		chan->tx_send_head = skb_peek(&chan->tx_q);
6446 	else
6447 		chan->tx_send_head = NULL;
6448 
6449 	/* Rewind next_tx_seq to the point expected
6450 	 * by the receiver.
6451 	 */
6452 	chan->next_tx_seq = control->reqseq;
6453 	chan->unacked_frames = 0;
6454 
6455 	err = l2cap_finish_move(chan);
6456 	if (err)
6457 		return err;
6458 
6459 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6460 	l2cap_send_i_or_rr_or_rnr(chan);
6461 
6462 	if (event == L2CAP_EV_RECV_IFRAME)
6463 		return -EPROTO;
6464 
6465 	return l2cap_rx_state_recv(chan, control, NULL, event);
6466 }
6467 
6468 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6469 				 struct l2cap_ctrl *control,
6470 				 struct sk_buff *skb, u8 event)
6471 {
6472 	int err;
6473 
6474 	if (!control->final)
6475 		return -EPROTO;
6476 
6477 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6478 
6479 	chan->rx_state = L2CAP_RX_STATE_RECV;
6480 	l2cap_process_reqseq(chan, control->reqseq);
6481 
6482 	if (!skb_queue_empty(&chan->tx_q))
6483 		chan->tx_send_head = skb_peek(&chan->tx_q);
6484 	else
6485 		chan->tx_send_head = NULL;
6486 
6487 	/* Rewind next_tx_seq to the point expected
6488 	 * by the receiver.
6489 	 */
6490 	chan->next_tx_seq = control->reqseq;
6491 	chan->unacked_frames = 0;
6492 
6493 	if (chan->hs_hcon)
6494 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6495 	else
6496 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6497 
6498 	err = l2cap_resegment(chan);
6499 
6500 	if (!err)
6501 		err = l2cap_rx_state_recv(chan, control, skb, event);
6502 
6503 	return err;
6504 }
6505 
6506 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6507 {
6508 	/* Make sure reqseq is for a packet that has been sent but not acked */
6509 	u16 unacked;
6510 
6511 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6512 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6513 }
6514 
6515 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6516 		    struct sk_buff *skb, u8 event)
6517 {
6518 	int err = 0;
6519 
6520 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6521 	       control, skb, event, chan->rx_state);
6522 
6523 	if (__valid_reqseq(chan, control->reqseq)) {
6524 		switch (chan->rx_state) {
6525 		case L2CAP_RX_STATE_RECV:
6526 			err = l2cap_rx_state_recv(chan, control, skb, event);
6527 			break;
6528 		case L2CAP_RX_STATE_SREJ_SENT:
6529 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6530 						       event);
6531 			break;
6532 		case L2CAP_RX_STATE_WAIT_P:
6533 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6534 			break;
6535 		case L2CAP_RX_STATE_WAIT_F:
6536 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6537 			break;
6538 		default:
6539 			/* shut it down */
6540 			break;
6541 		}
6542 	} else {
6543 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6544 		       control->reqseq, chan->next_tx_seq,
6545 		       chan->expected_ack_seq);
6546 		l2cap_send_disconn_req(chan, ECONNRESET);
6547 	}
6548 
6549 	return err;
6550 }
6551 
6552 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6553 			   struct sk_buff *skb)
6554 {
6555 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6556 	       chan->rx_state);
6557 
6558 	if (l2cap_classify_txseq(chan, control->txseq) ==
6559 	    L2CAP_TXSEQ_EXPECTED) {
6560 		l2cap_pass_to_tx(chan, control);
6561 
6562 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6563 		       __next_seq(chan, chan->buffer_seq));
6564 
6565 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6566 
6567 		l2cap_reassemble_sdu(chan, skb, control);
6568 	} else {
6569 		if (chan->sdu) {
6570 			kfree_skb(chan->sdu);
6571 			chan->sdu = NULL;
6572 		}
6573 		chan->sdu_last_frag = NULL;
6574 		chan->sdu_len = 0;
6575 
6576 		if (skb) {
6577 			BT_DBG("Freeing %p", skb);
6578 			kfree_skb(skb);
6579 		}
6580 	}
6581 
6582 	chan->last_acked_seq = control->txseq;
6583 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6584 
6585 	return 0;
6586 }
6587 
6588 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6589 {
6590 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6591 	u16 len;
6592 	u8 event;
6593 
6594 	__unpack_control(chan, skb);
6595 
6596 	len = skb->len;
6597 
6598 	/*
6599 	 * We can just drop the corrupted I-frame here.
6600 	 * Receiver will miss it and start proper recovery
6601 	 * procedures and ask for retransmission.
6602 	 */
6603 	if (l2cap_check_fcs(chan, skb))
6604 		goto drop;
6605 
6606 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6607 		len -= L2CAP_SDULEN_SIZE;
6608 
6609 	if (chan->fcs == L2CAP_FCS_CRC16)
6610 		len -= L2CAP_FCS_SIZE;
6611 
6612 	if (len > chan->mps) {
6613 		l2cap_send_disconn_req(chan, ECONNRESET);
6614 		goto drop;
6615 	}
6616 
6617 	if ((chan->mode == L2CAP_MODE_ERTM ||
6618 	     chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
6619 		goto drop;
6620 
6621 	if (!control->sframe) {
6622 		int err;
6623 
6624 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6625 		       control->sar, control->reqseq, control->final,
6626 		       control->txseq);
6627 
6628 		/* Validate F-bit - F=0 always valid, F=1 only
6629 		 * valid in TX WAIT_F
6630 		 */
6631 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6632 			goto drop;
6633 
6634 		if (chan->mode != L2CAP_MODE_STREAMING) {
6635 			event = L2CAP_EV_RECV_IFRAME;
6636 			err = l2cap_rx(chan, control, skb, event);
6637 		} else {
6638 			err = l2cap_stream_rx(chan, control, skb);
6639 		}
6640 
6641 		if (err)
6642 			l2cap_send_disconn_req(chan, ECONNRESET);
6643 	} else {
6644 		const u8 rx_func_to_event[4] = {
6645 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6646 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6647 		};
6648 
6649 		/* Only I-frames are expected in streaming mode */
6650 		if (chan->mode == L2CAP_MODE_STREAMING)
6651 			goto drop;
6652 
6653 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6654 		       control->reqseq, control->final, control->poll,
6655 		       control->super);
6656 
6657 		if (len != 0) {
6658 			BT_ERR("Trailing bytes: %d in sframe", len);
6659 			l2cap_send_disconn_req(chan, ECONNRESET);
6660 			goto drop;
6661 		}
6662 
6663 		/* Validate F and P bits */
6664 		if (control->final && (control->poll ||
6665 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6666 			goto drop;
6667 
6668 		event = rx_func_to_event[control->super];
6669 		if (l2cap_rx(chan, control, skb, event))
6670 			l2cap_send_disconn_req(chan, ECONNRESET);
6671 	}
6672 
6673 	return 0;
6674 
6675 drop:
6676 	kfree_skb(skb);
6677 	return 0;
6678 }
6679 
6680 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6681 {
6682 	struct l2cap_conn *conn = chan->conn;
6683 	struct l2cap_le_credits pkt;
6684 	u16 return_credits;
6685 
6686 	/* We return more credits to the sender only after the amount of
6687 	 * credits falls below half of the initial amount.
6688 	 */
6689 	if (chan->rx_credits >= (le_max_credits + 1) / 2)
6690 		return;
6691 
6692 	return_credits = le_max_credits - chan->rx_credits;
6693 
6694 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6695 
6696 	chan->rx_credits += return_credits;
6697 
6698 	pkt.cid     = cpu_to_le16(chan->scid);
6699 	pkt.credits = cpu_to_le16(return_credits);
6700 
6701 	chan->ident = l2cap_get_ident(conn);
6702 
6703 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6704 }
6705 
6706 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6707 {
6708 	int err;
6709 
6710 	if (!chan->rx_credits) {
6711 		BT_ERR("No credits to receive LE L2CAP data");
6712 		l2cap_send_disconn_req(chan, ECONNRESET);
6713 		return -ENOBUFS;
6714 	}
6715 
6716 	if (chan->imtu < skb->len) {
6717 		BT_ERR("Too big LE L2CAP PDU");
6718 		return -ENOBUFS;
6719 	}
6720 
6721 	chan->rx_credits--;
6722 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6723 
6724 	l2cap_chan_le_send_credits(chan);
6725 
6726 	err = 0;
6727 
6728 	if (!chan->sdu) {
6729 		u16 sdu_len;
6730 
6731 		sdu_len = get_unaligned_le16(skb->data);
6732 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6733 
6734 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6735 		       sdu_len, skb->len, chan->imtu);
6736 
6737 		if (sdu_len > chan->imtu) {
6738 			BT_ERR("Too big LE L2CAP SDU length received");
6739 			err = -EMSGSIZE;
6740 			goto failed;
6741 		}
6742 
6743 		if (skb->len > sdu_len) {
6744 			BT_ERR("Too much LE L2CAP data received");
6745 			err = -EINVAL;
6746 			goto failed;
6747 		}
6748 
6749 		if (skb->len == sdu_len)
6750 			return chan->ops->recv(chan, skb);
6751 
6752 		chan->sdu = skb;
6753 		chan->sdu_len = sdu_len;
6754 		chan->sdu_last_frag = skb;
6755 
6756 		return 0;
6757 	}
6758 
6759 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6760 	       chan->sdu->len, skb->len, chan->sdu_len);
6761 
6762 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6763 		BT_ERR("Too much LE L2CAP data received");
6764 		err = -EINVAL;
6765 		goto failed;
6766 	}
6767 
6768 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6769 	skb = NULL;
6770 
6771 	if (chan->sdu->len == chan->sdu_len) {
6772 		err = chan->ops->recv(chan, chan->sdu);
6773 		if (!err) {
6774 			chan->sdu = NULL;
6775 			chan->sdu_last_frag = NULL;
6776 			chan->sdu_len = 0;
6777 		}
6778 	}
6779 
6780 failed:
6781 	if (err) {
6782 		kfree_skb(skb);
6783 		kfree_skb(chan->sdu);
6784 		chan->sdu = NULL;
6785 		chan->sdu_last_frag = NULL;
6786 		chan->sdu_len = 0;
6787 	}
6788 
6789 	/* We can't return an error here since we took care of the skb
6790 	 * freeing internally. An error return would cause the caller to
6791 	 * do a double-free of the skb.
6792 	 */
6793 	return 0;
6794 }
6795 
6796 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6797 			       struct sk_buff *skb)
6798 {
6799 	struct l2cap_chan *chan;
6800 
6801 	chan = l2cap_get_chan_by_scid(conn, cid);
6802 	if (!chan) {
6803 		if (cid == L2CAP_CID_A2MP) {
6804 			chan = a2mp_channel_create(conn, skb);
6805 			if (!chan) {
6806 				kfree_skb(skb);
6807 				return;
6808 			}
6809 
6810 			l2cap_chan_lock(chan);
6811 		} else {
6812 			BT_DBG("unknown cid 0x%4.4x", cid);
6813 			/* Drop packet and return */
6814 			kfree_skb(skb);
6815 			return;
6816 		}
6817 	}
6818 
6819 	BT_DBG("chan %p, len %d", chan, skb->len);
6820 
6821 	/* If we receive data on a fixed channel before the info req/rsp
6822 	 * procdure is done simply assume that the channel is supported
6823 	 * and mark it as ready.
6824 	 */
6825 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6826 		l2cap_chan_ready(chan);
6827 
6828 	if (chan->state != BT_CONNECTED)
6829 		goto drop;
6830 
6831 	switch (chan->mode) {
6832 	case L2CAP_MODE_LE_FLOWCTL:
6833 		if (l2cap_le_data_rcv(chan, skb) < 0)
6834 			goto drop;
6835 
6836 		goto done;
6837 
6838 	case L2CAP_MODE_BASIC:
6839 		/* If socket recv buffers overflows we drop data here
6840 		 * which is *bad* because L2CAP has to be reliable.
6841 		 * But we don't have any other choice. L2CAP doesn't
6842 		 * provide flow control mechanism. */
6843 
6844 		if (chan->imtu < skb->len) {
6845 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6846 			goto drop;
6847 		}
6848 
6849 		if (!chan->ops->recv(chan, skb))
6850 			goto done;
6851 		break;
6852 
6853 	case L2CAP_MODE_ERTM:
6854 	case L2CAP_MODE_STREAMING:
6855 		l2cap_data_rcv(chan, skb);
6856 		goto done;
6857 
6858 	default:
6859 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6860 		break;
6861 	}
6862 
6863 drop:
6864 	kfree_skb(skb);
6865 
6866 done:
6867 	l2cap_chan_unlock(chan);
6868 }
6869 
6870 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6871 				  struct sk_buff *skb)
6872 {
6873 	struct hci_conn *hcon = conn->hcon;
6874 	struct l2cap_chan *chan;
6875 
6876 	if (hcon->type != ACL_LINK)
6877 		goto free_skb;
6878 
6879 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6880 					ACL_LINK);
6881 	if (!chan)
6882 		goto free_skb;
6883 
6884 	BT_DBG("chan %p, len %d", chan, skb->len);
6885 
6886 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6887 		goto drop;
6888 
6889 	if (chan->imtu < skb->len)
6890 		goto drop;
6891 
6892 	/* Store remote BD_ADDR and PSM for msg_name */
6893 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6894 	bt_cb(skb)->l2cap.psm = psm;
6895 
6896 	if (!chan->ops->recv(chan, skb)) {
6897 		l2cap_chan_put(chan);
6898 		return;
6899 	}
6900 
6901 drop:
6902 	l2cap_chan_put(chan);
6903 free_skb:
6904 	kfree_skb(skb);
6905 }
6906 
6907 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6908 {
6909 	struct l2cap_hdr *lh = (void *) skb->data;
6910 	struct hci_conn *hcon = conn->hcon;
6911 	u16 cid, len;
6912 	__le16 psm;
6913 
6914 	if (hcon->state != BT_CONNECTED) {
6915 		BT_DBG("queueing pending rx skb");
6916 		skb_queue_tail(&conn->pending_rx, skb);
6917 		return;
6918 	}
6919 
6920 	skb_pull(skb, L2CAP_HDR_SIZE);
6921 	cid = __le16_to_cpu(lh->cid);
6922 	len = __le16_to_cpu(lh->len);
6923 
6924 	if (len != skb->len) {
6925 		kfree_skb(skb);
6926 		return;
6927 	}
6928 
6929 	/* Since we can't actively block incoming LE connections we must
6930 	 * at least ensure that we ignore incoming data from them.
6931 	 */
6932 	if (hcon->type == LE_LINK &&
6933 	    hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6934 				   bdaddr_dst_type(hcon))) {
6935 		kfree_skb(skb);
6936 		return;
6937 	}
6938 
6939 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6940 
6941 	switch (cid) {
6942 	case L2CAP_CID_SIGNALING:
6943 		l2cap_sig_channel(conn, skb);
6944 		break;
6945 
6946 	case L2CAP_CID_CONN_LESS:
6947 		psm = get_unaligned((__le16 *) skb->data);
6948 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6949 		l2cap_conless_channel(conn, psm, skb);
6950 		break;
6951 
6952 	case L2CAP_CID_LE_SIGNALING:
6953 		l2cap_le_sig_channel(conn, skb);
6954 		break;
6955 
6956 	default:
6957 		l2cap_data_channel(conn, cid, skb);
6958 		break;
6959 	}
6960 }
6961 
6962 static void process_pending_rx(struct work_struct *work)
6963 {
6964 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6965 					       pending_rx_work);
6966 	struct sk_buff *skb;
6967 
6968 	BT_DBG("");
6969 
6970 	while ((skb = skb_dequeue(&conn->pending_rx)))
6971 		l2cap_recv_frame(conn, skb);
6972 }
6973 
6974 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6975 {
6976 	struct l2cap_conn *conn = hcon->l2cap_data;
6977 	struct hci_chan *hchan;
6978 
6979 	if (conn)
6980 		return conn;
6981 
6982 	hchan = hci_chan_create(hcon);
6983 	if (!hchan)
6984 		return NULL;
6985 
6986 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6987 	if (!conn) {
6988 		hci_chan_del(hchan);
6989 		return NULL;
6990 	}
6991 
6992 	kref_init(&conn->ref);
6993 	hcon->l2cap_data = conn;
6994 	conn->hcon = hci_conn_get(hcon);
6995 	conn->hchan = hchan;
6996 
6997 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6998 
6999 	switch (hcon->type) {
7000 	case LE_LINK:
7001 		if (hcon->hdev->le_mtu) {
7002 			conn->mtu = hcon->hdev->le_mtu;
7003 			break;
7004 		}
7005 		/* fall through */
7006 	default:
7007 		conn->mtu = hcon->hdev->acl_mtu;
7008 		break;
7009 	}
7010 
7011 	conn->feat_mask = 0;
7012 
7013 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7014 
7015 	if (hcon->type == ACL_LINK &&
7016 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7017 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7018 
7019 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7020 	    (bredr_sc_enabled(hcon->hdev) ||
7021 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7022 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7023 
7024 	mutex_init(&conn->ident_lock);
7025 	mutex_init(&conn->chan_lock);
7026 
7027 	INIT_LIST_HEAD(&conn->chan_l);
7028 	INIT_LIST_HEAD(&conn->users);
7029 
7030 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7031 
7032 	skb_queue_head_init(&conn->pending_rx);
7033 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7034 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7035 
7036 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7037 
7038 	return conn;
7039 }
7040 
7041 static bool is_valid_psm(u16 psm, u8 dst_type) {
7042 	if (!psm)
7043 		return false;
7044 
7045 	if (bdaddr_type_is_le(dst_type))
7046 		return (psm <= 0x00ff);
7047 
7048 	/* PSM must be odd and lsb of upper byte must be 0 */
7049 	return ((psm & 0x0101) == 0x0001);
7050 }
7051 
7052 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7053 		       bdaddr_t *dst, u8 dst_type)
7054 {
7055 	struct l2cap_conn *conn;
7056 	struct hci_conn *hcon;
7057 	struct hci_dev *hdev;
7058 	int err;
7059 
7060 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7061 	       dst_type, __le16_to_cpu(psm));
7062 
7063 	hdev = hci_get_route(dst, &chan->src);
7064 	if (!hdev)
7065 		return -EHOSTUNREACH;
7066 
7067 	hci_dev_lock(hdev);
7068 
7069 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7070 	    chan->chan_type != L2CAP_CHAN_RAW) {
7071 		err = -EINVAL;
7072 		goto done;
7073 	}
7074 
7075 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7076 		err = -EINVAL;
7077 		goto done;
7078 	}
7079 
7080 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7081 		err = -EINVAL;
7082 		goto done;
7083 	}
7084 
7085 	switch (chan->mode) {
7086 	case L2CAP_MODE_BASIC:
7087 		break;
7088 	case L2CAP_MODE_LE_FLOWCTL:
7089 		l2cap_le_flowctl_init(chan);
7090 		break;
7091 	case L2CAP_MODE_ERTM:
7092 	case L2CAP_MODE_STREAMING:
7093 		if (!disable_ertm)
7094 			break;
7095 		/* fall through */
7096 	default:
7097 		err = -EOPNOTSUPP;
7098 		goto done;
7099 	}
7100 
7101 	switch (chan->state) {
7102 	case BT_CONNECT:
7103 	case BT_CONNECT2:
7104 	case BT_CONFIG:
7105 		/* Already connecting */
7106 		err = 0;
7107 		goto done;
7108 
7109 	case BT_CONNECTED:
7110 		/* Already connected */
7111 		err = -EISCONN;
7112 		goto done;
7113 
7114 	case BT_OPEN:
7115 	case BT_BOUND:
7116 		/* Can connect */
7117 		break;
7118 
7119 	default:
7120 		err = -EBADFD;
7121 		goto done;
7122 	}
7123 
7124 	/* Set destination address and psm */
7125 	bacpy(&chan->dst, dst);
7126 	chan->dst_type = dst_type;
7127 
7128 	chan->psm = psm;
7129 	chan->dcid = cid;
7130 
7131 	if (bdaddr_type_is_le(dst_type)) {
7132 		/* Convert from L2CAP channel address type to HCI address type
7133 		 */
7134 		if (dst_type == BDADDR_LE_PUBLIC)
7135 			dst_type = ADDR_LE_DEV_PUBLIC;
7136 		else
7137 			dst_type = ADDR_LE_DEV_RANDOM;
7138 
7139 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7140 			hcon = hci_connect_le(hdev, dst, dst_type,
7141 					      chan->sec_level,
7142 					      HCI_LE_CONN_TIMEOUT,
7143 					      HCI_ROLE_SLAVE);
7144 		else
7145 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7146 						   chan->sec_level,
7147 						   HCI_LE_CONN_TIMEOUT);
7148 
7149 	} else {
7150 		u8 auth_type = l2cap_get_auth_type(chan);
7151 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7152 	}
7153 
7154 	if (IS_ERR(hcon)) {
7155 		err = PTR_ERR(hcon);
7156 		goto done;
7157 	}
7158 
7159 	conn = l2cap_conn_add(hcon);
7160 	if (!conn) {
7161 		hci_conn_drop(hcon);
7162 		err = -ENOMEM;
7163 		goto done;
7164 	}
7165 
7166 	mutex_lock(&conn->chan_lock);
7167 	l2cap_chan_lock(chan);
7168 
7169 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7170 		hci_conn_drop(hcon);
7171 		err = -EBUSY;
7172 		goto chan_unlock;
7173 	}
7174 
7175 	/* Update source addr of the socket */
7176 	bacpy(&chan->src, &hcon->src);
7177 	chan->src_type = bdaddr_src_type(hcon);
7178 
7179 	__l2cap_chan_add(conn, chan);
7180 
7181 	/* l2cap_chan_add takes its own ref so we can drop this one */
7182 	hci_conn_drop(hcon);
7183 
7184 	l2cap_state_change(chan, BT_CONNECT);
7185 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7186 
7187 	/* Release chan->sport so that it can be reused by other
7188 	 * sockets (as it's only used for listening sockets).
7189 	 */
7190 	write_lock(&chan_list_lock);
7191 	chan->sport = 0;
7192 	write_unlock(&chan_list_lock);
7193 
7194 	if (hcon->state == BT_CONNECTED) {
7195 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7196 			__clear_chan_timer(chan);
7197 			if (l2cap_chan_check_security(chan, true))
7198 				l2cap_state_change(chan, BT_CONNECTED);
7199 		} else
7200 			l2cap_do_start(chan);
7201 	}
7202 
7203 	err = 0;
7204 
7205 chan_unlock:
7206 	l2cap_chan_unlock(chan);
7207 	mutex_unlock(&conn->chan_lock);
7208 done:
7209 	hci_dev_unlock(hdev);
7210 	hci_dev_put(hdev);
7211 	return err;
7212 }
7213 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7214 
7215 /* ---- L2CAP interface with lower layer (HCI) ---- */
7216 
7217 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7218 {
7219 	int exact = 0, lm1 = 0, lm2 = 0;
7220 	struct l2cap_chan *c;
7221 
7222 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7223 
7224 	/* Find listening sockets and check their link_mode */
7225 	read_lock(&chan_list_lock);
7226 	list_for_each_entry(c, &chan_list, global_l) {
7227 		if (c->state != BT_LISTEN)
7228 			continue;
7229 
7230 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7231 			lm1 |= HCI_LM_ACCEPT;
7232 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7233 				lm1 |= HCI_LM_MASTER;
7234 			exact++;
7235 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7236 			lm2 |= HCI_LM_ACCEPT;
7237 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7238 				lm2 |= HCI_LM_MASTER;
7239 		}
7240 	}
7241 	read_unlock(&chan_list_lock);
7242 
7243 	return exact ? lm1 : lm2;
7244 }
7245 
7246 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7247  * from an existing channel in the list or from the beginning of the
7248  * global list (by passing NULL as first parameter).
7249  */
7250 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7251 						  struct hci_conn *hcon)
7252 {
7253 	u8 src_type = bdaddr_src_type(hcon);
7254 
7255 	read_lock(&chan_list_lock);
7256 
7257 	if (c)
7258 		c = list_next_entry(c, global_l);
7259 	else
7260 		c = list_entry(chan_list.next, typeof(*c), global_l);
7261 
7262 	list_for_each_entry_from(c, &chan_list, global_l) {
7263 		if (c->chan_type != L2CAP_CHAN_FIXED)
7264 			continue;
7265 		if (c->state != BT_LISTEN)
7266 			continue;
7267 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7268 			continue;
7269 		if (src_type != c->src_type)
7270 			continue;
7271 
7272 		l2cap_chan_hold(c);
7273 		read_unlock(&chan_list_lock);
7274 		return c;
7275 	}
7276 
7277 	read_unlock(&chan_list_lock);
7278 
7279 	return NULL;
7280 }
7281 
7282 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7283 {
7284 	struct hci_dev *hdev = hcon->hdev;
7285 	struct l2cap_conn *conn;
7286 	struct l2cap_chan *pchan;
7287 	u8 dst_type;
7288 
7289 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7290 		return;
7291 
7292 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7293 
7294 	if (status) {
7295 		l2cap_conn_del(hcon, bt_to_errno(status));
7296 		return;
7297 	}
7298 
7299 	conn = l2cap_conn_add(hcon);
7300 	if (!conn)
7301 		return;
7302 
7303 	dst_type = bdaddr_dst_type(hcon);
7304 
7305 	/* If device is blocked, do not create channels for it */
7306 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7307 		return;
7308 
7309 	/* Find fixed channels and notify them of the new connection. We
7310 	 * use multiple individual lookups, continuing each time where
7311 	 * we left off, because the list lock would prevent calling the
7312 	 * potentially sleeping l2cap_chan_lock() function.
7313 	 */
7314 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7315 	while (pchan) {
7316 		struct l2cap_chan *chan, *next;
7317 
7318 		/* Client fixed channels should override server ones */
7319 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7320 			goto next;
7321 
7322 		l2cap_chan_lock(pchan);
7323 		chan = pchan->ops->new_connection(pchan);
7324 		if (chan) {
7325 			bacpy(&chan->src, &hcon->src);
7326 			bacpy(&chan->dst, &hcon->dst);
7327 			chan->src_type = bdaddr_src_type(hcon);
7328 			chan->dst_type = dst_type;
7329 
7330 			__l2cap_chan_add(conn, chan);
7331 		}
7332 
7333 		l2cap_chan_unlock(pchan);
7334 next:
7335 		next = l2cap_global_fixed_chan(pchan, hcon);
7336 		l2cap_chan_put(pchan);
7337 		pchan = next;
7338 	}
7339 
7340 	l2cap_conn_ready(conn);
7341 }
7342 
7343 int l2cap_disconn_ind(struct hci_conn *hcon)
7344 {
7345 	struct l2cap_conn *conn = hcon->l2cap_data;
7346 
7347 	BT_DBG("hcon %p", hcon);
7348 
7349 	if (!conn)
7350 		return HCI_ERROR_REMOTE_USER_TERM;
7351 	return conn->disc_reason;
7352 }
7353 
7354 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7355 {
7356 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7357 		return;
7358 
7359 	BT_DBG("hcon %p reason %d", hcon, reason);
7360 
7361 	l2cap_conn_del(hcon, bt_to_errno(reason));
7362 }
7363 
7364 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7365 {
7366 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7367 		return;
7368 
7369 	if (encrypt == 0x00) {
7370 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7371 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7372 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7373 			   chan->sec_level == BT_SECURITY_FIPS)
7374 			l2cap_chan_close(chan, ECONNREFUSED);
7375 	} else {
7376 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7377 			__clear_chan_timer(chan);
7378 	}
7379 }
7380 
7381 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7382 {
7383 	struct l2cap_conn *conn = hcon->l2cap_data;
7384 	struct l2cap_chan *chan;
7385 
7386 	if (!conn)
7387 		return;
7388 
7389 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7390 
7391 	mutex_lock(&conn->chan_lock);
7392 
7393 	list_for_each_entry(chan, &conn->chan_l, list) {
7394 		l2cap_chan_lock(chan);
7395 
7396 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7397 		       state_to_string(chan->state));
7398 
7399 		if (chan->scid == L2CAP_CID_A2MP) {
7400 			l2cap_chan_unlock(chan);
7401 			continue;
7402 		}
7403 
7404 		if (!status && encrypt)
7405 			chan->sec_level = hcon->sec_level;
7406 
7407 		if (!__l2cap_no_conn_pending(chan)) {
7408 			l2cap_chan_unlock(chan);
7409 			continue;
7410 		}
7411 
7412 		if (!status && (chan->state == BT_CONNECTED ||
7413 				chan->state == BT_CONFIG)) {
7414 			chan->ops->resume(chan);
7415 			l2cap_check_encryption(chan, encrypt);
7416 			l2cap_chan_unlock(chan);
7417 			continue;
7418 		}
7419 
7420 		if (chan->state == BT_CONNECT) {
7421 			if (!status)
7422 				l2cap_start_connection(chan);
7423 			else
7424 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7425 		} else if (chan->state == BT_CONNECT2 &&
7426 			   chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7427 			struct l2cap_conn_rsp rsp;
7428 			__u16 res, stat;
7429 
7430 			if (!status) {
7431 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7432 					res = L2CAP_CR_PEND;
7433 					stat = L2CAP_CS_AUTHOR_PEND;
7434 					chan->ops->defer(chan);
7435 				} else {
7436 					l2cap_state_change(chan, BT_CONFIG);
7437 					res = L2CAP_CR_SUCCESS;
7438 					stat = L2CAP_CS_NO_INFO;
7439 				}
7440 			} else {
7441 				l2cap_state_change(chan, BT_DISCONN);
7442 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7443 				res = L2CAP_CR_SEC_BLOCK;
7444 				stat = L2CAP_CS_NO_INFO;
7445 			}
7446 
7447 			rsp.scid   = cpu_to_le16(chan->dcid);
7448 			rsp.dcid   = cpu_to_le16(chan->scid);
7449 			rsp.result = cpu_to_le16(res);
7450 			rsp.status = cpu_to_le16(stat);
7451 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7452 				       sizeof(rsp), &rsp);
7453 
7454 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7455 			    res == L2CAP_CR_SUCCESS) {
7456 				char buf[128];
7457 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7458 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7459 					       L2CAP_CONF_REQ,
7460 					       l2cap_build_conf_req(chan, buf),
7461 					       buf);
7462 				chan->num_conf_req++;
7463 			}
7464 		}
7465 
7466 		l2cap_chan_unlock(chan);
7467 	}
7468 
7469 	mutex_unlock(&conn->chan_lock);
7470 }
7471 
7472 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7473 {
7474 	struct l2cap_conn *conn = hcon->l2cap_data;
7475 	struct l2cap_hdr *hdr;
7476 	int len;
7477 
7478 	/* For AMP controller do not create l2cap conn */
7479 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7480 		goto drop;
7481 
7482 	if (!conn)
7483 		conn = l2cap_conn_add(hcon);
7484 
7485 	if (!conn)
7486 		goto drop;
7487 
7488 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7489 
7490 	switch (flags) {
7491 	case ACL_START:
7492 	case ACL_START_NO_FLUSH:
7493 	case ACL_COMPLETE:
7494 		if (conn->rx_len) {
7495 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7496 			kfree_skb(conn->rx_skb);
7497 			conn->rx_skb = NULL;
7498 			conn->rx_len = 0;
7499 			l2cap_conn_unreliable(conn, ECOMM);
7500 		}
7501 
7502 		/* Start fragment always begin with Basic L2CAP header */
7503 		if (skb->len < L2CAP_HDR_SIZE) {
7504 			BT_ERR("Frame is too short (len %d)", skb->len);
7505 			l2cap_conn_unreliable(conn, ECOMM);
7506 			goto drop;
7507 		}
7508 
7509 		hdr = (struct l2cap_hdr *) skb->data;
7510 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7511 
7512 		if (len == skb->len) {
7513 			/* Complete frame received */
7514 			l2cap_recv_frame(conn, skb);
7515 			return;
7516 		}
7517 
7518 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7519 
7520 		if (skb->len > len) {
7521 			BT_ERR("Frame is too long (len %d, expected len %d)",
7522 			       skb->len, len);
7523 			l2cap_conn_unreliable(conn, ECOMM);
7524 			goto drop;
7525 		}
7526 
7527 		/* Allocate skb for the complete frame (with header) */
7528 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7529 		if (!conn->rx_skb)
7530 			goto drop;
7531 
7532 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7533 					  skb->len);
7534 		conn->rx_len = len - skb->len;
7535 		break;
7536 
7537 	case ACL_CONT:
7538 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7539 
7540 		if (!conn->rx_len) {
7541 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7542 			l2cap_conn_unreliable(conn, ECOMM);
7543 			goto drop;
7544 		}
7545 
7546 		if (skb->len > conn->rx_len) {
7547 			BT_ERR("Fragment is too long (len %d, expected %d)",
7548 			       skb->len, conn->rx_len);
7549 			kfree_skb(conn->rx_skb);
7550 			conn->rx_skb = NULL;
7551 			conn->rx_len = 0;
7552 			l2cap_conn_unreliable(conn, ECOMM);
7553 			goto drop;
7554 		}
7555 
7556 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7557 					  skb->len);
7558 		conn->rx_len -= skb->len;
7559 
7560 		if (!conn->rx_len) {
7561 			/* Complete frame received. l2cap_recv_frame
7562 			 * takes ownership of the skb so set the global
7563 			 * rx_skb pointer to NULL first.
7564 			 */
7565 			struct sk_buff *rx_skb = conn->rx_skb;
7566 			conn->rx_skb = NULL;
7567 			l2cap_recv_frame(conn, rx_skb);
7568 		}
7569 		break;
7570 	}
7571 
7572 drop:
7573 	kfree_skb(skb);
7574 }
7575 
7576 static struct hci_cb l2cap_cb = {
7577 	.name		= "L2CAP",
7578 	.connect_cfm	= l2cap_connect_cfm,
7579 	.disconn_cfm	= l2cap_disconn_cfm,
7580 	.security_cfm	= l2cap_security_cfm,
7581 };
7582 
7583 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7584 {
7585 	struct l2cap_chan *c;
7586 
7587 	read_lock(&chan_list_lock);
7588 
7589 	list_for_each_entry(c, &chan_list, global_l) {
7590 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7591 			   &c->src, c->src_type, &c->dst, c->dst_type,
7592 			   c->state, __le16_to_cpu(c->psm),
7593 			   c->scid, c->dcid, c->imtu, c->omtu,
7594 			   c->sec_level, c->mode);
7595 	}
7596 
7597 	read_unlock(&chan_list_lock);
7598 
7599 	return 0;
7600 }
7601 
7602 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7603 {
7604 	return single_open(file, l2cap_debugfs_show, inode->i_private);
7605 }
7606 
7607 static const struct file_operations l2cap_debugfs_fops = {
7608 	.open		= l2cap_debugfs_open,
7609 	.read		= seq_read,
7610 	.llseek		= seq_lseek,
7611 	.release	= single_release,
7612 };
7613 
7614 static struct dentry *l2cap_debugfs;
7615 
7616 int __init l2cap_init(void)
7617 {
7618 	int err;
7619 
7620 	err = l2cap_init_sockets();
7621 	if (err < 0)
7622 		return err;
7623 
7624 	hci_register_cb(&l2cap_cb);
7625 
7626 	if (IS_ERR_OR_NULL(bt_debugfs))
7627 		return 0;
7628 
7629 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7630 					    NULL, &l2cap_debugfs_fops);
7631 
7632 	debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7633 			   &le_max_credits);
7634 	debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7635 			   &le_default_mps);
7636 
7637 	return 0;
7638 }
7639 
7640 void l2cap_exit(void)
7641 {
7642 	debugfs_remove(l2cap_debugfs);
7643 	hci_unregister_cb(&l2cap_cb);
7644 	l2cap_cleanup_sockets();
7645 }
7646 
7647 module_param(disable_ertm, bool, 0644);
7648 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7649