xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision 3557b3fd)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
53 
54 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
55 				       u8 code, u8 ident, u16 dlen, void *data);
56 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
57 			   void *data);
58 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
59 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
60 
61 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
62 		     struct sk_buff_head *skbs, u8 event);
63 
64 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
65 {
66 	if (link_type == LE_LINK) {
67 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
68 			return BDADDR_LE_PUBLIC;
69 		else
70 			return BDADDR_LE_RANDOM;
71 	}
72 
73 	return BDADDR_BREDR;
74 }
75 
76 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
77 {
78 	return bdaddr_type(hcon->type, hcon->src_type);
79 }
80 
81 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
82 {
83 	return bdaddr_type(hcon->type, hcon->dst_type);
84 }
85 
86 /* ---- L2CAP channels ---- */
87 
88 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
89 						   u16 cid)
90 {
91 	struct l2cap_chan *c;
92 
93 	list_for_each_entry(c, &conn->chan_l, list) {
94 		if (c->dcid == cid)
95 			return c;
96 	}
97 	return NULL;
98 }
99 
100 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
101 						   u16 cid)
102 {
103 	struct l2cap_chan *c;
104 
105 	list_for_each_entry(c, &conn->chan_l, list) {
106 		if (c->scid == cid)
107 			return c;
108 	}
109 	return NULL;
110 }
111 
112 /* Find channel with given SCID.
113  * Returns locked channel. */
114 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
115 						 u16 cid)
116 {
117 	struct l2cap_chan *c;
118 
119 	mutex_lock(&conn->chan_lock);
120 	c = __l2cap_get_chan_by_scid(conn, cid);
121 	if (c)
122 		l2cap_chan_lock(c);
123 	mutex_unlock(&conn->chan_lock);
124 
125 	return c;
126 }
127 
128 /* Find channel with given DCID.
129  * Returns locked channel.
130  */
131 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
132 						 u16 cid)
133 {
134 	struct l2cap_chan *c;
135 
136 	mutex_lock(&conn->chan_lock);
137 	c = __l2cap_get_chan_by_dcid(conn, cid);
138 	if (c)
139 		l2cap_chan_lock(c);
140 	mutex_unlock(&conn->chan_lock);
141 
142 	return c;
143 }
144 
145 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
146 						    u8 ident)
147 {
148 	struct l2cap_chan *c;
149 
150 	list_for_each_entry(c, &conn->chan_l, list) {
151 		if (c->ident == ident)
152 			return c;
153 	}
154 	return NULL;
155 }
156 
157 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
158 						  u8 ident)
159 {
160 	struct l2cap_chan *c;
161 
162 	mutex_lock(&conn->chan_lock);
163 	c = __l2cap_get_chan_by_ident(conn, ident);
164 	if (c)
165 		l2cap_chan_lock(c);
166 	mutex_unlock(&conn->chan_lock);
167 
168 	return c;
169 }
170 
171 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
172 {
173 	struct l2cap_chan *c;
174 
175 	list_for_each_entry(c, &chan_list, global_l) {
176 		if (c->sport == psm && !bacmp(&c->src, src))
177 			return c;
178 	}
179 	return NULL;
180 }
181 
182 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183 {
184 	int err;
185 
186 	write_lock(&chan_list_lock);
187 
188 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
189 		err = -EADDRINUSE;
190 		goto done;
191 	}
192 
193 	if (psm) {
194 		chan->psm = psm;
195 		chan->sport = psm;
196 		err = 0;
197 	} else {
198 		u16 p, start, end, incr;
199 
200 		if (chan->src_type == BDADDR_BREDR) {
201 			start = L2CAP_PSM_DYN_START;
202 			end = L2CAP_PSM_AUTO_END;
203 			incr = 2;
204 		} else {
205 			start = L2CAP_PSM_LE_DYN_START;
206 			end = L2CAP_PSM_LE_DYN_END;
207 			incr = 1;
208 		}
209 
210 		err = -EINVAL;
211 		for (p = start; p <= end; p += incr)
212 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
213 				chan->psm   = cpu_to_le16(p);
214 				chan->sport = cpu_to_le16(p);
215 				err = 0;
216 				break;
217 			}
218 	}
219 
220 done:
221 	write_unlock(&chan_list_lock);
222 	return err;
223 }
224 EXPORT_SYMBOL_GPL(l2cap_add_psm);
225 
226 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
227 {
228 	write_lock(&chan_list_lock);
229 
230 	/* Override the defaults (which are for conn-oriented) */
231 	chan->omtu = L2CAP_DEFAULT_MTU;
232 	chan->chan_type = L2CAP_CHAN_FIXED;
233 
234 	chan->scid = scid;
235 
236 	write_unlock(&chan_list_lock);
237 
238 	return 0;
239 }
240 
241 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
242 {
243 	u16 cid, dyn_end;
244 
245 	if (conn->hcon->type == LE_LINK)
246 		dyn_end = L2CAP_CID_LE_DYN_END;
247 	else
248 		dyn_end = L2CAP_CID_DYN_END;
249 
250 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
251 		if (!__l2cap_get_chan_by_scid(conn, cid))
252 			return cid;
253 	}
254 
255 	return 0;
256 }
257 
258 static void l2cap_state_change(struct l2cap_chan *chan, int state)
259 {
260 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
261 	       state_to_string(state));
262 
263 	chan->state = state;
264 	chan->ops->state_change(chan, state, 0);
265 }
266 
267 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
268 						int state, int err)
269 {
270 	chan->state = state;
271 	chan->ops->state_change(chan, chan->state, err);
272 }
273 
274 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
275 {
276 	chan->ops->state_change(chan, chan->state, err);
277 }
278 
279 static void __set_retrans_timer(struct l2cap_chan *chan)
280 {
281 	if (!delayed_work_pending(&chan->monitor_timer) &&
282 	    chan->retrans_timeout) {
283 		l2cap_set_timer(chan, &chan->retrans_timer,
284 				msecs_to_jiffies(chan->retrans_timeout));
285 	}
286 }
287 
288 static void __set_monitor_timer(struct l2cap_chan *chan)
289 {
290 	__clear_retrans_timer(chan);
291 	if (chan->monitor_timeout) {
292 		l2cap_set_timer(chan, &chan->monitor_timer,
293 				msecs_to_jiffies(chan->monitor_timeout));
294 	}
295 }
296 
297 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
298 					       u16 seq)
299 {
300 	struct sk_buff *skb;
301 
302 	skb_queue_walk(head, skb) {
303 		if (bt_cb(skb)->l2cap.txseq == seq)
304 			return skb;
305 	}
306 
307 	return NULL;
308 }
309 
310 /* ---- L2CAP sequence number lists ---- */
311 
312 /* For ERTM, ordered lists of sequence numbers must be tracked for
313  * SREJ requests that are received and for frames that are to be
314  * retransmitted. These seq_list functions implement a singly-linked
315  * list in an array, where membership in the list can also be checked
316  * in constant time. Items can also be added to the tail of the list
317  * and removed from the head in constant time, without further memory
318  * allocs or frees.
319  */
320 
321 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
322 {
323 	size_t alloc_size, i;
324 
325 	/* Allocated size is a power of 2 to map sequence numbers
326 	 * (which may be up to 14 bits) in to a smaller array that is
327 	 * sized for the negotiated ERTM transmit windows.
328 	 */
329 	alloc_size = roundup_pow_of_two(size);
330 
331 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
332 	if (!seq_list->list)
333 		return -ENOMEM;
334 
335 	seq_list->mask = alloc_size - 1;
336 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
337 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
338 	for (i = 0; i < alloc_size; i++)
339 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
340 
341 	return 0;
342 }
343 
344 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
345 {
346 	kfree(seq_list->list);
347 }
348 
349 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
350 					   u16 seq)
351 {
352 	/* Constant-time check for list membership */
353 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
354 }
355 
356 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
357 {
358 	u16 seq = seq_list->head;
359 	u16 mask = seq_list->mask;
360 
361 	seq_list->head = seq_list->list[seq & mask];
362 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
363 
364 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
365 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
366 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
367 	}
368 
369 	return seq;
370 }
371 
372 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
373 {
374 	u16 i;
375 
376 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
377 		return;
378 
379 	for (i = 0; i <= seq_list->mask; i++)
380 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
381 
382 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
383 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
384 }
385 
386 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
387 {
388 	u16 mask = seq_list->mask;
389 
390 	/* All appends happen in constant time */
391 
392 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
393 		return;
394 
395 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
396 		seq_list->head = seq;
397 	else
398 		seq_list->list[seq_list->tail & mask] = seq;
399 
400 	seq_list->tail = seq;
401 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
402 }
403 
404 static void l2cap_chan_timeout(struct work_struct *work)
405 {
406 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
407 					       chan_timer.work);
408 	struct l2cap_conn *conn = chan->conn;
409 	int reason;
410 
411 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
412 
413 	mutex_lock(&conn->chan_lock);
414 	l2cap_chan_lock(chan);
415 
416 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
417 		reason = ECONNREFUSED;
418 	else if (chan->state == BT_CONNECT &&
419 		 chan->sec_level != BT_SECURITY_SDP)
420 		reason = ECONNREFUSED;
421 	else
422 		reason = ETIMEDOUT;
423 
424 	l2cap_chan_close(chan, reason);
425 
426 	l2cap_chan_unlock(chan);
427 
428 	chan->ops->close(chan);
429 	mutex_unlock(&conn->chan_lock);
430 
431 	l2cap_chan_put(chan);
432 }
433 
434 struct l2cap_chan *l2cap_chan_create(void)
435 {
436 	struct l2cap_chan *chan;
437 
438 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
439 	if (!chan)
440 		return NULL;
441 
442 	mutex_init(&chan->lock);
443 
444 	/* Set default lock nesting level */
445 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
446 
447 	write_lock(&chan_list_lock);
448 	list_add(&chan->global_l, &chan_list);
449 	write_unlock(&chan_list_lock);
450 
451 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
452 
453 	chan->state = BT_OPEN;
454 
455 	kref_init(&chan->kref);
456 
457 	/* This flag is cleared in l2cap_chan_ready() */
458 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
459 
460 	BT_DBG("chan %p", chan);
461 
462 	return chan;
463 }
464 EXPORT_SYMBOL_GPL(l2cap_chan_create);
465 
466 static void l2cap_chan_destroy(struct kref *kref)
467 {
468 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
469 
470 	BT_DBG("chan %p", chan);
471 
472 	write_lock(&chan_list_lock);
473 	list_del(&chan->global_l);
474 	write_unlock(&chan_list_lock);
475 
476 	kfree(chan);
477 }
478 
479 void l2cap_chan_hold(struct l2cap_chan *c)
480 {
481 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
482 
483 	kref_get(&c->kref);
484 }
485 
486 void l2cap_chan_put(struct l2cap_chan *c)
487 {
488 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
489 
490 	kref_put(&c->kref, l2cap_chan_destroy);
491 }
492 EXPORT_SYMBOL_GPL(l2cap_chan_put);
493 
494 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
495 {
496 	chan->fcs  = L2CAP_FCS_CRC16;
497 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
498 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
499 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
500 	chan->remote_max_tx = chan->max_tx;
501 	chan->remote_tx_win = chan->tx_win;
502 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
503 	chan->sec_level = BT_SECURITY_LOW;
504 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
505 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
506 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
507 	chan->conf_state = 0;
508 
509 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
510 }
511 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
512 
513 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
514 {
515 	chan->sdu = NULL;
516 	chan->sdu_last_frag = NULL;
517 	chan->sdu_len = 0;
518 	chan->tx_credits = 0;
519 	/* Derive MPS from connection MTU to stop HCI fragmentation */
520 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
521 	/* Give enough credits for a full packet */
522 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
523 
524 	skb_queue_head_init(&chan->tx_q);
525 }
526 
527 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
528 {
529 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
530 	       __le16_to_cpu(chan->psm), chan->dcid);
531 
532 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
533 
534 	chan->conn = conn;
535 
536 	switch (chan->chan_type) {
537 	case L2CAP_CHAN_CONN_ORIENTED:
538 		/* Alloc CID for connection-oriented socket */
539 		chan->scid = l2cap_alloc_cid(conn);
540 		if (conn->hcon->type == ACL_LINK)
541 			chan->omtu = L2CAP_DEFAULT_MTU;
542 		break;
543 
544 	case L2CAP_CHAN_CONN_LESS:
545 		/* Connectionless socket */
546 		chan->scid = L2CAP_CID_CONN_LESS;
547 		chan->dcid = L2CAP_CID_CONN_LESS;
548 		chan->omtu = L2CAP_DEFAULT_MTU;
549 		break;
550 
551 	case L2CAP_CHAN_FIXED:
552 		/* Caller will set CID and CID specific MTU values */
553 		break;
554 
555 	default:
556 		/* Raw socket can send/recv signalling messages only */
557 		chan->scid = L2CAP_CID_SIGNALING;
558 		chan->dcid = L2CAP_CID_SIGNALING;
559 		chan->omtu = L2CAP_DEFAULT_MTU;
560 	}
561 
562 	chan->local_id		= L2CAP_BESTEFFORT_ID;
563 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
564 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
565 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
566 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
567 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
568 
569 	l2cap_chan_hold(chan);
570 
571 	/* Only keep a reference for fixed channels if they requested it */
572 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
573 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
574 		hci_conn_hold(conn->hcon);
575 
576 	list_add(&chan->list, &conn->chan_l);
577 }
578 
579 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
580 {
581 	mutex_lock(&conn->chan_lock);
582 	__l2cap_chan_add(conn, chan);
583 	mutex_unlock(&conn->chan_lock);
584 }
585 
586 void l2cap_chan_del(struct l2cap_chan *chan, int err)
587 {
588 	struct l2cap_conn *conn = chan->conn;
589 
590 	__clear_chan_timer(chan);
591 
592 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
593 	       state_to_string(chan->state));
594 
595 	chan->ops->teardown(chan, err);
596 
597 	if (conn) {
598 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
599 		/* Delete from channel list */
600 		list_del(&chan->list);
601 
602 		l2cap_chan_put(chan);
603 
604 		chan->conn = NULL;
605 
606 		/* Reference was only held for non-fixed channels or
607 		 * fixed channels that explicitly requested it using the
608 		 * FLAG_HOLD_HCI_CONN flag.
609 		 */
610 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
611 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
612 			hci_conn_drop(conn->hcon);
613 
614 		if (mgr && mgr->bredr_chan == chan)
615 			mgr->bredr_chan = NULL;
616 	}
617 
618 	if (chan->hs_hchan) {
619 		struct hci_chan *hs_hchan = chan->hs_hchan;
620 
621 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
622 		amp_disconnect_logical_link(hs_hchan);
623 	}
624 
625 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
626 		return;
627 
628 	switch(chan->mode) {
629 	case L2CAP_MODE_BASIC:
630 		break;
631 
632 	case L2CAP_MODE_LE_FLOWCTL:
633 		skb_queue_purge(&chan->tx_q);
634 		break;
635 
636 	case L2CAP_MODE_ERTM:
637 		__clear_retrans_timer(chan);
638 		__clear_monitor_timer(chan);
639 		__clear_ack_timer(chan);
640 
641 		skb_queue_purge(&chan->srej_q);
642 
643 		l2cap_seq_list_free(&chan->srej_list);
644 		l2cap_seq_list_free(&chan->retrans_list);
645 
646 		/* fall through */
647 
648 	case L2CAP_MODE_STREAMING:
649 		skb_queue_purge(&chan->tx_q);
650 		break;
651 	}
652 
653 	return;
654 }
655 EXPORT_SYMBOL_GPL(l2cap_chan_del);
656 
657 static void l2cap_conn_update_id_addr(struct work_struct *work)
658 {
659 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
660 					       id_addr_update_work);
661 	struct hci_conn *hcon = conn->hcon;
662 	struct l2cap_chan *chan;
663 
664 	mutex_lock(&conn->chan_lock);
665 
666 	list_for_each_entry(chan, &conn->chan_l, list) {
667 		l2cap_chan_lock(chan);
668 		bacpy(&chan->dst, &hcon->dst);
669 		chan->dst_type = bdaddr_dst_type(hcon);
670 		l2cap_chan_unlock(chan);
671 	}
672 
673 	mutex_unlock(&conn->chan_lock);
674 }
675 
676 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
677 {
678 	struct l2cap_conn *conn = chan->conn;
679 	struct l2cap_le_conn_rsp rsp;
680 	u16 result;
681 
682 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
683 		result = L2CAP_CR_LE_AUTHORIZATION;
684 	else
685 		result = L2CAP_CR_LE_BAD_PSM;
686 
687 	l2cap_state_change(chan, BT_DISCONN);
688 
689 	rsp.dcid    = cpu_to_le16(chan->scid);
690 	rsp.mtu     = cpu_to_le16(chan->imtu);
691 	rsp.mps     = cpu_to_le16(chan->mps);
692 	rsp.credits = cpu_to_le16(chan->rx_credits);
693 	rsp.result  = cpu_to_le16(result);
694 
695 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
696 		       &rsp);
697 }
698 
699 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
700 {
701 	struct l2cap_conn *conn = chan->conn;
702 	struct l2cap_conn_rsp rsp;
703 	u16 result;
704 
705 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
706 		result = L2CAP_CR_SEC_BLOCK;
707 	else
708 		result = L2CAP_CR_BAD_PSM;
709 
710 	l2cap_state_change(chan, BT_DISCONN);
711 
712 	rsp.scid   = cpu_to_le16(chan->dcid);
713 	rsp.dcid   = cpu_to_le16(chan->scid);
714 	rsp.result = cpu_to_le16(result);
715 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
716 
717 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
718 }
719 
720 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
721 {
722 	struct l2cap_conn *conn = chan->conn;
723 
724 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
725 
726 	switch (chan->state) {
727 	case BT_LISTEN:
728 		chan->ops->teardown(chan, 0);
729 		break;
730 
731 	case BT_CONNECTED:
732 	case BT_CONFIG:
733 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
734 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
735 			l2cap_send_disconn_req(chan, reason);
736 		} else
737 			l2cap_chan_del(chan, reason);
738 		break;
739 
740 	case BT_CONNECT2:
741 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
742 			if (conn->hcon->type == ACL_LINK)
743 				l2cap_chan_connect_reject(chan);
744 			else if (conn->hcon->type == LE_LINK)
745 				l2cap_chan_le_connect_reject(chan);
746 		}
747 
748 		l2cap_chan_del(chan, reason);
749 		break;
750 
751 	case BT_CONNECT:
752 	case BT_DISCONN:
753 		l2cap_chan_del(chan, reason);
754 		break;
755 
756 	default:
757 		chan->ops->teardown(chan, 0);
758 		break;
759 	}
760 }
761 EXPORT_SYMBOL(l2cap_chan_close);
762 
763 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
764 {
765 	switch (chan->chan_type) {
766 	case L2CAP_CHAN_RAW:
767 		switch (chan->sec_level) {
768 		case BT_SECURITY_HIGH:
769 		case BT_SECURITY_FIPS:
770 			return HCI_AT_DEDICATED_BONDING_MITM;
771 		case BT_SECURITY_MEDIUM:
772 			return HCI_AT_DEDICATED_BONDING;
773 		default:
774 			return HCI_AT_NO_BONDING;
775 		}
776 		break;
777 	case L2CAP_CHAN_CONN_LESS:
778 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
779 			if (chan->sec_level == BT_SECURITY_LOW)
780 				chan->sec_level = BT_SECURITY_SDP;
781 		}
782 		if (chan->sec_level == BT_SECURITY_HIGH ||
783 		    chan->sec_level == BT_SECURITY_FIPS)
784 			return HCI_AT_NO_BONDING_MITM;
785 		else
786 			return HCI_AT_NO_BONDING;
787 		break;
788 	case L2CAP_CHAN_CONN_ORIENTED:
789 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
790 			if (chan->sec_level == BT_SECURITY_LOW)
791 				chan->sec_level = BT_SECURITY_SDP;
792 
793 			if (chan->sec_level == BT_SECURITY_HIGH ||
794 			    chan->sec_level == BT_SECURITY_FIPS)
795 				return HCI_AT_NO_BONDING_MITM;
796 			else
797 				return HCI_AT_NO_BONDING;
798 		}
799 		/* fall through */
800 	default:
801 		switch (chan->sec_level) {
802 		case BT_SECURITY_HIGH:
803 		case BT_SECURITY_FIPS:
804 			return HCI_AT_GENERAL_BONDING_MITM;
805 		case BT_SECURITY_MEDIUM:
806 			return HCI_AT_GENERAL_BONDING;
807 		default:
808 			return HCI_AT_NO_BONDING;
809 		}
810 		break;
811 	}
812 }
813 
814 /* Service level security */
815 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
816 {
817 	struct l2cap_conn *conn = chan->conn;
818 	__u8 auth_type;
819 
820 	if (conn->hcon->type == LE_LINK)
821 		return smp_conn_security(conn->hcon, chan->sec_level);
822 
823 	auth_type = l2cap_get_auth_type(chan);
824 
825 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
826 				 initiator);
827 }
828 
829 static u8 l2cap_get_ident(struct l2cap_conn *conn)
830 {
831 	u8 id;
832 
833 	/* Get next available identificator.
834 	 *    1 - 128 are used by kernel.
835 	 *  129 - 199 are reserved.
836 	 *  200 - 254 are used by utilities like l2ping, etc.
837 	 */
838 
839 	mutex_lock(&conn->ident_lock);
840 
841 	if (++conn->tx_ident > 128)
842 		conn->tx_ident = 1;
843 
844 	id = conn->tx_ident;
845 
846 	mutex_unlock(&conn->ident_lock);
847 
848 	return id;
849 }
850 
851 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
852 			   void *data)
853 {
854 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
855 	u8 flags;
856 
857 	BT_DBG("code 0x%2.2x", code);
858 
859 	if (!skb)
860 		return;
861 
862 	/* Use NO_FLUSH if supported or we have an LE link (which does
863 	 * not support auto-flushing packets) */
864 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
865 	    conn->hcon->type == LE_LINK)
866 		flags = ACL_START_NO_FLUSH;
867 	else
868 		flags = ACL_START;
869 
870 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
871 	skb->priority = HCI_PRIO_MAX;
872 
873 	hci_send_acl(conn->hchan, skb, flags);
874 }
875 
876 static bool __chan_is_moving(struct l2cap_chan *chan)
877 {
878 	return chan->move_state != L2CAP_MOVE_STABLE &&
879 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
880 }
881 
882 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
883 {
884 	struct hci_conn *hcon = chan->conn->hcon;
885 	u16 flags;
886 
887 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
888 	       skb->priority);
889 
890 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
891 		if (chan->hs_hchan)
892 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
893 		else
894 			kfree_skb(skb);
895 
896 		return;
897 	}
898 
899 	/* Use NO_FLUSH for LE links (where this is the only option) or
900 	 * if the BR/EDR link supports it and flushing has not been
901 	 * explicitly requested (through FLAG_FLUSHABLE).
902 	 */
903 	if (hcon->type == LE_LINK ||
904 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
905 	     lmp_no_flush_capable(hcon->hdev)))
906 		flags = ACL_START_NO_FLUSH;
907 	else
908 		flags = ACL_START;
909 
910 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
911 	hci_send_acl(chan->conn->hchan, skb, flags);
912 }
913 
914 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
915 {
916 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
917 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
918 
919 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
920 		/* S-Frame */
921 		control->sframe = 1;
922 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
923 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
924 
925 		control->sar = 0;
926 		control->txseq = 0;
927 	} else {
928 		/* I-Frame */
929 		control->sframe = 0;
930 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
931 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
932 
933 		control->poll = 0;
934 		control->super = 0;
935 	}
936 }
937 
938 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
939 {
940 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
941 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
942 
943 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
944 		/* S-Frame */
945 		control->sframe = 1;
946 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
947 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
948 
949 		control->sar = 0;
950 		control->txseq = 0;
951 	} else {
952 		/* I-Frame */
953 		control->sframe = 0;
954 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
955 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
956 
957 		control->poll = 0;
958 		control->super = 0;
959 	}
960 }
961 
962 static inline void __unpack_control(struct l2cap_chan *chan,
963 				    struct sk_buff *skb)
964 {
965 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
966 		__unpack_extended_control(get_unaligned_le32(skb->data),
967 					  &bt_cb(skb)->l2cap);
968 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
969 	} else {
970 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
971 					  &bt_cb(skb)->l2cap);
972 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
973 	}
974 }
975 
976 static u32 __pack_extended_control(struct l2cap_ctrl *control)
977 {
978 	u32 packed;
979 
980 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
981 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
982 
983 	if (control->sframe) {
984 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
985 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
986 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
987 	} else {
988 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
989 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
990 	}
991 
992 	return packed;
993 }
994 
995 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
996 {
997 	u16 packed;
998 
999 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1000 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1001 
1002 	if (control->sframe) {
1003 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1004 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1005 		packed |= L2CAP_CTRL_FRAME_TYPE;
1006 	} else {
1007 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1008 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1009 	}
1010 
1011 	return packed;
1012 }
1013 
1014 static inline void __pack_control(struct l2cap_chan *chan,
1015 				  struct l2cap_ctrl *control,
1016 				  struct sk_buff *skb)
1017 {
1018 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1019 		put_unaligned_le32(__pack_extended_control(control),
1020 				   skb->data + L2CAP_HDR_SIZE);
1021 	} else {
1022 		put_unaligned_le16(__pack_enhanced_control(control),
1023 				   skb->data + L2CAP_HDR_SIZE);
1024 	}
1025 }
1026 
1027 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1028 {
1029 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1030 		return L2CAP_EXT_HDR_SIZE;
1031 	else
1032 		return L2CAP_ENH_HDR_SIZE;
1033 }
1034 
1035 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1036 					       u32 control)
1037 {
1038 	struct sk_buff *skb;
1039 	struct l2cap_hdr *lh;
1040 	int hlen = __ertm_hdr_size(chan);
1041 
1042 	if (chan->fcs == L2CAP_FCS_CRC16)
1043 		hlen += L2CAP_FCS_SIZE;
1044 
1045 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1046 
1047 	if (!skb)
1048 		return ERR_PTR(-ENOMEM);
1049 
1050 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1051 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1052 	lh->cid = cpu_to_le16(chan->dcid);
1053 
1054 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1055 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1056 	else
1057 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1058 
1059 	if (chan->fcs == L2CAP_FCS_CRC16) {
1060 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1061 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1062 	}
1063 
1064 	skb->priority = HCI_PRIO_MAX;
1065 	return skb;
1066 }
1067 
1068 static void l2cap_send_sframe(struct l2cap_chan *chan,
1069 			      struct l2cap_ctrl *control)
1070 {
1071 	struct sk_buff *skb;
1072 	u32 control_field;
1073 
1074 	BT_DBG("chan %p, control %p", chan, control);
1075 
1076 	if (!control->sframe)
1077 		return;
1078 
1079 	if (__chan_is_moving(chan))
1080 		return;
1081 
1082 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1083 	    !control->poll)
1084 		control->final = 1;
1085 
1086 	if (control->super == L2CAP_SUPER_RR)
1087 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1088 	else if (control->super == L2CAP_SUPER_RNR)
1089 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1090 
1091 	if (control->super != L2CAP_SUPER_SREJ) {
1092 		chan->last_acked_seq = control->reqseq;
1093 		__clear_ack_timer(chan);
1094 	}
1095 
1096 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1097 	       control->final, control->poll, control->super);
1098 
1099 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1100 		control_field = __pack_extended_control(control);
1101 	else
1102 		control_field = __pack_enhanced_control(control);
1103 
1104 	skb = l2cap_create_sframe_pdu(chan, control_field);
1105 	if (!IS_ERR(skb))
1106 		l2cap_do_send(chan, skb);
1107 }
1108 
1109 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1110 {
1111 	struct l2cap_ctrl control;
1112 
1113 	BT_DBG("chan %p, poll %d", chan, poll);
1114 
1115 	memset(&control, 0, sizeof(control));
1116 	control.sframe = 1;
1117 	control.poll = poll;
1118 
1119 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1120 		control.super = L2CAP_SUPER_RNR;
1121 	else
1122 		control.super = L2CAP_SUPER_RR;
1123 
1124 	control.reqseq = chan->buffer_seq;
1125 	l2cap_send_sframe(chan, &control);
1126 }
1127 
1128 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1129 {
1130 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1131 		return true;
1132 
1133 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1134 }
1135 
1136 static bool __amp_capable(struct l2cap_chan *chan)
1137 {
1138 	struct l2cap_conn *conn = chan->conn;
1139 	struct hci_dev *hdev;
1140 	bool amp_available = false;
1141 
1142 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1143 		return false;
1144 
1145 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1146 		return false;
1147 
1148 	read_lock(&hci_dev_list_lock);
1149 	list_for_each_entry(hdev, &hci_dev_list, list) {
1150 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1151 		    test_bit(HCI_UP, &hdev->flags)) {
1152 			amp_available = true;
1153 			break;
1154 		}
1155 	}
1156 	read_unlock(&hci_dev_list_lock);
1157 
1158 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1159 		return amp_available;
1160 
1161 	return false;
1162 }
1163 
1164 static bool l2cap_check_efs(struct l2cap_chan *chan)
1165 {
1166 	/* Check EFS parameters */
1167 	return true;
1168 }
1169 
1170 void l2cap_send_conn_req(struct l2cap_chan *chan)
1171 {
1172 	struct l2cap_conn *conn = chan->conn;
1173 	struct l2cap_conn_req req;
1174 
1175 	req.scid = cpu_to_le16(chan->scid);
1176 	req.psm  = chan->psm;
1177 
1178 	chan->ident = l2cap_get_ident(conn);
1179 
1180 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1181 
1182 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1183 }
1184 
1185 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1186 {
1187 	struct l2cap_create_chan_req req;
1188 	req.scid = cpu_to_le16(chan->scid);
1189 	req.psm  = chan->psm;
1190 	req.amp_id = amp_id;
1191 
1192 	chan->ident = l2cap_get_ident(chan->conn);
1193 
1194 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1195 		       sizeof(req), &req);
1196 }
1197 
1198 static void l2cap_move_setup(struct l2cap_chan *chan)
1199 {
1200 	struct sk_buff *skb;
1201 
1202 	BT_DBG("chan %p", chan);
1203 
1204 	if (chan->mode != L2CAP_MODE_ERTM)
1205 		return;
1206 
1207 	__clear_retrans_timer(chan);
1208 	__clear_monitor_timer(chan);
1209 	__clear_ack_timer(chan);
1210 
1211 	chan->retry_count = 0;
1212 	skb_queue_walk(&chan->tx_q, skb) {
1213 		if (bt_cb(skb)->l2cap.retries)
1214 			bt_cb(skb)->l2cap.retries = 1;
1215 		else
1216 			break;
1217 	}
1218 
1219 	chan->expected_tx_seq = chan->buffer_seq;
1220 
1221 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1222 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1223 	l2cap_seq_list_clear(&chan->retrans_list);
1224 	l2cap_seq_list_clear(&chan->srej_list);
1225 	skb_queue_purge(&chan->srej_q);
1226 
1227 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1228 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1229 
1230 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1231 }
1232 
1233 static void l2cap_move_done(struct l2cap_chan *chan)
1234 {
1235 	u8 move_role = chan->move_role;
1236 	BT_DBG("chan %p", chan);
1237 
1238 	chan->move_state = L2CAP_MOVE_STABLE;
1239 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1240 
1241 	if (chan->mode != L2CAP_MODE_ERTM)
1242 		return;
1243 
1244 	switch (move_role) {
1245 	case L2CAP_MOVE_ROLE_INITIATOR:
1246 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1247 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1248 		break;
1249 	case L2CAP_MOVE_ROLE_RESPONDER:
1250 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1251 		break;
1252 	}
1253 }
1254 
1255 static void l2cap_chan_ready(struct l2cap_chan *chan)
1256 {
1257 	/* The channel may have already been flagged as connected in
1258 	 * case of receiving data before the L2CAP info req/rsp
1259 	 * procedure is complete.
1260 	 */
1261 	if (chan->state == BT_CONNECTED)
1262 		return;
1263 
1264 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1265 	chan->conf_state = 0;
1266 	__clear_chan_timer(chan);
1267 
1268 	if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1269 		chan->ops->suspend(chan);
1270 
1271 	chan->state = BT_CONNECTED;
1272 
1273 	chan->ops->ready(chan);
1274 }
1275 
1276 static void l2cap_le_connect(struct l2cap_chan *chan)
1277 {
1278 	struct l2cap_conn *conn = chan->conn;
1279 	struct l2cap_le_conn_req req;
1280 
1281 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1282 		return;
1283 
1284 	l2cap_le_flowctl_init(chan);
1285 
1286 	req.psm     = chan->psm;
1287 	req.scid    = cpu_to_le16(chan->scid);
1288 	req.mtu     = cpu_to_le16(chan->imtu);
1289 	req.mps     = cpu_to_le16(chan->mps);
1290 	req.credits = cpu_to_le16(chan->rx_credits);
1291 
1292 	chan->ident = l2cap_get_ident(conn);
1293 
1294 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1295 		       sizeof(req), &req);
1296 }
1297 
1298 static void l2cap_le_start(struct l2cap_chan *chan)
1299 {
1300 	struct l2cap_conn *conn = chan->conn;
1301 
1302 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1303 		return;
1304 
1305 	if (!chan->psm) {
1306 		l2cap_chan_ready(chan);
1307 		return;
1308 	}
1309 
1310 	if (chan->state == BT_CONNECT)
1311 		l2cap_le_connect(chan);
1312 }
1313 
1314 static void l2cap_start_connection(struct l2cap_chan *chan)
1315 {
1316 	if (__amp_capable(chan)) {
1317 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1318 		a2mp_discover_amp(chan);
1319 	} else if (chan->conn->hcon->type == LE_LINK) {
1320 		l2cap_le_start(chan);
1321 	} else {
1322 		l2cap_send_conn_req(chan);
1323 	}
1324 }
1325 
1326 static void l2cap_request_info(struct l2cap_conn *conn)
1327 {
1328 	struct l2cap_info_req req;
1329 
1330 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1331 		return;
1332 
1333 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1334 
1335 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1336 	conn->info_ident = l2cap_get_ident(conn);
1337 
1338 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1339 
1340 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1341 		       sizeof(req), &req);
1342 }
1343 
1344 static void l2cap_do_start(struct l2cap_chan *chan)
1345 {
1346 	struct l2cap_conn *conn = chan->conn;
1347 
1348 	if (conn->hcon->type == LE_LINK) {
1349 		l2cap_le_start(chan);
1350 		return;
1351 	}
1352 
1353 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1354 		l2cap_request_info(conn);
1355 		return;
1356 	}
1357 
1358 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1359 		return;
1360 
1361 	if (l2cap_chan_check_security(chan, true) &&
1362 	    __l2cap_no_conn_pending(chan))
1363 		l2cap_start_connection(chan);
1364 }
1365 
1366 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1367 {
1368 	u32 local_feat_mask = l2cap_feat_mask;
1369 	if (!disable_ertm)
1370 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1371 
1372 	switch (mode) {
1373 	case L2CAP_MODE_ERTM:
1374 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1375 	case L2CAP_MODE_STREAMING:
1376 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1377 	default:
1378 		return 0x00;
1379 	}
1380 }
1381 
1382 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1383 {
1384 	struct l2cap_conn *conn = chan->conn;
1385 	struct l2cap_disconn_req req;
1386 
1387 	if (!conn)
1388 		return;
1389 
1390 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1391 		__clear_retrans_timer(chan);
1392 		__clear_monitor_timer(chan);
1393 		__clear_ack_timer(chan);
1394 	}
1395 
1396 	if (chan->scid == L2CAP_CID_A2MP) {
1397 		l2cap_state_change(chan, BT_DISCONN);
1398 		return;
1399 	}
1400 
1401 	req.dcid = cpu_to_le16(chan->dcid);
1402 	req.scid = cpu_to_le16(chan->scid);
1403 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1404 		       sizeof(req), &req);
1405 
1406 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1407 }
1408 
1409 /* ---- L2CAP connections ---- */
1410 static void l2cap_conn_start(struct l2cap_conn *conn)
1411 {
1412 	struct l2cap_chan *chan, *tmp;
1413 
1414 	BT_DBG("conn %p", conn);
1415 
1416 	mutex_lock(&conn->chan_lock);
1417 
1418 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1419 		l2cap_chan_lock(chan);
1420 
1421 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1422 			l2cap_chan_ready(chan);
1423 			l2cap_chan_unlock(chan);
1424 			continue;
1425 		}
1426 
1427 		if (chan->state == BT_CONNECT) {
1428 			if (!l2cap_chan_check_security(chan, true) ||
1429 			    !__l2cap_no_conn_pending(chan)) {
1430 				l2cap_chan_unlock(chan);
1431 				continue;
1432 			}
1433 
1434 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1435 			    && test_bit(CONF_STATE2_DEVICE,
1436 					&chan->conf_state)) {
1437 				l2cap_chan_close(chan, ECONNRESET);
1438 				l2cap_chan_unlock(chan);
1439 				continue;
1440 			}
1441 
1442 			l2cap_start_connection(chan);
1443 
1444 		} else if (chan->state == BT_CONNECT2) {
1445 			struct l2cap_conn_rsp rsp;
1446 			char buf[128];
1447 			rsp.scid = cpu_to_le16(chan->dcid);
1448 			rsp.dcid = cpu_to_le16(chan->scid);
1449 
1450 			if (l2cap_chan_check_security(chan, false)) {
1451 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1452 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1453 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1454 					chan->ops->defer(chan);
1455 
1456 				} else {
1457 					l2cap_state_change(chan, BT_CONFIG);
1458 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1459 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1460 				}
1461 			} else {
1462 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1463 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1464 			}
1465 
1466 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1467 				       sizeof(rsp), &rsp);
1468 
1469 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1470 			    rsp.result != L2CAP_CR_SUCCESS) {
1471 				l2cap_chan_unlock(chan);
1472 				continue;
1473 			}
1474 
1475 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1476 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1477 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1478 			chan->num_conf_req++;
1479 		}
1480 
1481 		l2cap_chan_unlock(chan);
1482 	}
1483 
1484 	mutex_unlock(&conn->chan_lock);
1485 }
1486 
1487 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1488 {
1489 	struct hci_conn *hcon = conn->hcon;
1490 	struct hci_dev *hdev = hcon->hdev;
1491 
1492 	BT_DBG("%s conn %p", hdev->name, conn);
1493 
1494 	/* For outgoing pairing which doesn't necessarily have an
1495 	 * associated socket (e.g. mgmt_pair_device).
1496 	 */
1497 	if (hcon->out)
1498 		smp_conn_security(hcon, hcon->pending_sec_level);
1499 
1500 	/* For LE slave connections, make sure the connection interval
1501 	 * is in the range of the minium and maximum interval that has
1502 	 * been configured for this connection. If not, then trigger
1503 	 * the connection update procedure.
1504 	 */
1505 	if (hcon->role == HCI_ROLE_SLAVE &&
1506 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1507 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1508 		struct l2cap_conn_param_update_req req;
1509 
1510 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1511 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1512 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1513 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1514 
1515 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1516 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1517 	}
1518 }
1519 
1520 static void l2cap_conn_ready(struct l2cap_conn *conn)
1521 {
1522 	struct l2cap_chan *chan;
1523 	struct hci_conn *hcon = conn->hcon;
1524 
1525 	BT_DBG("conn %p", conn);
1526 
1527 	if (hcon->type == ACL_LINK)
1528 		l2cap_request_info(conn);
1529 
1530 	mutex_lock(&conn->chan_lock);
1531 
1532 	list_for_each_entry(chan, &conn->chan_l, list) {
1533 
1534 		l2cap_chan_lock(chan);
1535 
1536 		if (chan->scid == L2CAP_CID_A2MP) {
1537 			l2cap_chan_unlock(chan);
1538 			continue;
1539 		}
1540 
1541 		if (hcon->type == LE_LINK) {
1542 			l2cap_le_start(chan);
1543 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1544 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1545 				l2cap_chan_ready(chan);
1546 		} else if (chan->state == BT_CONNECT) {
1547 			l2cap_do_start(chan);
1548 		}
1549 
1550 		l2cap_chan_unlock(chan);
1551 	}
1552 
1553 	mutex_unlock(&conn->chan_lock);
1554 
1555 	if (hcon->type == LE_LINK)
1556 		l2cap_le_conn_ready(conn);
1557 
1558 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1559 }
1560 
1561 /* Notify sockets that we cannot guaranty reliability anymore */
1562 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1563 {
1564 	struct l2cap_chan *chan;
1565 
1566 	BT_DBG("conn %p", conn);
1567 
1568 	mutex_lock(&conn->chan_lock);
1569 
1570 	list_for_each_entry(chan, &conn->chan_l, list) {
1571 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1572 			l2cap_chan_set_err(chan, err);
1573 	}
1574 
1575 	mutex_unlock(&conn->chan_lock);
1576 }
1577 
1578 static void l2cap_info_timeout(struct work_struct *work)
1579 {
1580 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1581 					       info_timer.work);
1582 
1583 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1584 	conn->info_ident = 0;
1585 
1586 	l2cap_conn_start(conn);
1587 }
1588 
1589 /*
1590  * l2cap_user
1591  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1592  * callback is called during registration. The ->remove callback is called
1593  * during unregistration.
1594  * An l2cap_user object can either be explicitly unregistered or when the
1595  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1596  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1597  * External modules must own a reference to the l2cap_conn object if they intend
1598  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1599  * any time if they don't.
1600  */
1601 
1602 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1603 {
1604 	struct hci_dev *hdev = conn->hcon->hdev;
1605 	int ret;
1606 
1607 	/* We need to check whether l2cap_conn is registered. If it is not, we
1608 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1609 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1610 	 * relies on the parent hci_conn object to be locked. This itself relies
1611 	 * on the hci_dev object to be locked. So we must lock the hci device
1612 	 * here, too. */
1613 
1614 	hci_dev_lock(hdev);
1615 
1616 	if (!list_empty(&user->list)) {
1617 		ret = -EINVAL;
1618 		goto out_unlock;
1619 	}
1620 
1621 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1622 	if (!conn->hchan) {
1623 		ret = -ENODEV;
1624 		goto out_unlock;
1625 	}
1626 
1627 	ret = user->probe(conn, user);
1628 	if (ret)
1629 		goto out_unlock;
1630 
1631 	list_add(&user->list, &conn->users);
1632 	ret = 0;
1633 
1634 out_unlock:
1635 	hci_dev_unlock(hdev);
1636 	return ret;
1637 }
1638 EXPORT_SYMBOL(l2cap_register_user);
1639 
1640 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1641 {
1642 	struct hci_dev *hdev = conn->hcon->hdev;
1643 
1644 	hci_dev_lock(hdev);
1645 
1646 	if (list_empty(&user->list))
1647 		goto out_unlock;
1648 
1649 	list_del_init(&user->list);
1650 	user->remove(conn, user);
1651 
1652 out_unlock:
1653 	hci_dev_unlock(hdev);
1654 }
1655 EXPORT_SYMBOL(l2cap_unregister_user);
1656 
1657 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1658 {
1659 	struct l2cap_user *user;
1660 
1661 	while (!list_empty(&conn->users)) {
1662 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1663 		list_del_init(&user->list);
1664 		user->remove(conn, user);
1665 	}
1666 }
1667 
1668 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1669 {
1670 	struct l2cap_conn *conn = hcon->l2cap_data;
1671 	struct l2cap_chan *chan, *l;
1672 
1673 	if (!conn)
1674 		return;
1675 
1676 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1677 
1678 	kfree_skb(conn->rx_skb);
1679 
1680 	skb_queue_purge(&conn->pending_rx);
1681 
1682 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1683 	 * might block if we are running on a worker from the same workqueue
1684 	 * pending_rx_work is waiting on.
1685 	 */
1686 	if (work_pending(&conn->pending_rx_work))
1687 		cancel_work_sync(&conn->pending_rx_work);
1688 
1689 	if (work_pending(&conn->id_addr_update_work))
1690 		cancel_work_sync(&conn->id_addr_update_work);
1691 
1692 	l2cap_unregister_all_users(conn);
1693 
1694 	/* Force the connection to be immediately dropped */
1695 	hcon->disc_timeout = 0;
1696 
1697 	mutex_lock(&conn->chan_lock);
1698 
1699 	/* Kill channels */
1700 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1701 		l2cap_chan_hold(chan);
1702 		l2cap_chan_lock(chan);
1703 
1704 		l2cap_chan_del(chan, err);
1705 
1706 		l2cap_chan_unlock(chan);
1707 
1708 		chan->ops->close(chan);
1709 		l2cap_chan_put(chan);
1710 	}
1711 
1712 	mutex_unlock(&conn->chan_lock);
1713 
1714 	hci_chan_del(conn->hchan);
1715 
1716 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1717 		cancel_delayed_work_sync(&conn->info_timer);
1718 
1719 	hcon->l2cap_data = NULL;
1720 	conn->hchan = NULL;
1721 	l2cap_conn_put(conn);
1722 }
1723 
1724 static void l2cap_conn_free(struct kref *ref)
1725 {
1726 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1727 
1728 	hci_conn_put(conn->hcon);
1729 	kfree(conn);
1730 }
1731 
1732 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1733 {
1734 	kref_get(&conn->ref);
1735 	return conn;
1736 }
1737 EXPORT_SYMBOL(l2cap_conn_get);
1738 
1739 void l2cap_conn_put(struct l2cap_conn *conn)
1740 {
1741 	kref_put(&conn->ref, l2cap_conn_free);
1742 }
1743 EXPORT_SYMBOL(l2cap_conn_put);
1744 
1745 /* ---- Socket interface ---- */
1746 
1747 /* Find socket with psm and source / destination bdaddr.
1748  * Returns closest match.
1749  */
1750 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1751 						   bdaddr_t *src,
1752 						   bdaddr_t *dst,
1753 						   u8 link_type)
1754 {
1755 	struct l2cap_chan *c, *c1 = NULL;
1756 
1757 	read_lock(&chan_list_lock);
1758 
1759 	list_for_each_entry(c, &chan_list, global_l) {
1760 		if (state && c->state != state)
1761 			continue;
1762 
1763 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1764 			continue;
1765 
1766 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1767 			continue;
1768 
1769 		if (c->psm == psm) {
1770 			int src_match, dst_match;
1771 			int src_any, dst_any;
1772 
1773 			/* Exact match. */
1774 			src_match = !bacmp(&c->src, src);
1775 			dst_match = !bacmp(&c->dst, dst);
1776 			if (src_match && dst_match) {
1777 				l2cap_chan_hold(c);
1778 				read_unlock(&chan_list_lock);
1779 				return c;
1780 			}
1781 
1782 			/* Closest match */
1783 			src_any = !bacmp(&c->src, BDADDR_ANY);
1784 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1785 			if ((src_match && dst_any) || (src_any && dst_match) ||
1786 			    (src_any && dst_any))
1787 				c1 = c;
1788 		}
1789 	}
1790 
1791 	if (c1)
1792 		l2cap_chan_hold(c1);
1793 
1794 	read_unlock(&chan_list_lock);
1795 
1796 	return c1;
1797 }
1798 
1799 static void l2cap_monitor_timeout(struct work_struct *work)
1800 {
1801 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1802 					       monitor_timer.work);
1803 
1804 	BT_DBG("chan %p", chan);
1805 
1806 	l2cap_chan_lock(chan);
1807 
1808 	if (!chan->conn) {
1809 		l2cap_chan_unlock(chan);
1810 		l2cap_chan_put(chan);
1811 		return;
1812 	}
1813 
1814 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1815 
1816 	l2cap_chan_unlock(chan);
1817 	l2cap_chan_put(chan);
1818 }
1819 
1820 static void l2cap_retrans_timeout(struct work_struct *work)
1821 {
1822 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1823 					       retrans_timer.work);
1824 
1825 	BT_DBG("chan %p", chan);
1826 
1827 	l2cap_chan_lock(chan);
1828 
1829 	if (!chan->conn) {
1830 		l2cap_chan_unlock(chan);
1831 		l2cap_chan_put(chan);
1832 		return;
1833 	}
1834 
1835 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1836 	l2cap_chan_unlock(chan);
1837 	l2cap_chan_put(chan);
1838 }
1839 
1840 static void l2cap_streaming_send(struct l2cap_chan *chan,
1841 				 struct sk_buff_head *skbs)
1842 {
1843 	struct sk_buff *skb;
1844 	struct l2cap_ctrl *control;
1845 
1846 	BT_DBG("chan %p, skbs %p", chan, skbs);
1847 
1848 	if (__chan_is_moving(chan))
1849 		return;
1850 
1851 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1852 
1853 	while (!skb_queue_empty(&chan->tx_q)) {
1854 
1855 		skb = skb_dequeue(&chan->tx_q);
1856 
1857 		bt_cb(skb)->l2cap.retries = 1;
1858 		control = &bt_cb(skb)->l2cap;
1859 
1860 		control->reqseq = 0;
1861 		control->txseq = chan->next_tx_seq;
1862 
1863 		__pack_control(chan, control, skb);
1864 
1865 		if (chan->fcs == L2CAP_FCS_CRC16) {
1866 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1867 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1868 		}
1869 
1870 		l2cap_do_send(chan, skb);
1871 
1872 		BT_DBG("Sent txseq %u", control->txseq);
1873 
1874 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1875 		chan->frames_sent++;
1876 	}
1877 }
1878 
1879 static int l2cap_ertm_send(struct l2cap_chan *chan)
1880 {
1881 	struct sk_buff *skb, *tx_skb;
1882 	struct l2cap_ctrl *control;
1883 	int sent = 0;
1884 
1885 	BT_DBG("chan %p", chan);
1886 
1887 	if (chan->state != BT_CONNECTED)
1888 		return -ENOTCONN;
1889 
1890 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1891 		return 0;
1892 
1893 	if (__chan_is_moving(chan))
1894 		return 0;
1895 
1896 	while (chan->tx_send_head &&
1897 	       chan->unacked_frames < chan->remote_tx_win &&
1898 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1899 
1900 		skb = chan->tx_send_head;
1901 
1902 		bt_cb(skb)->l2cap.retries = 1;
1903 		control = &bt_cb(skb)->l2cap;
1904 
1905 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1906 			control->final = 1;
1907 
1908 		control->reqseq = chan->buffer_seq;
1909 		chan->last_acked_seq = chan->buffer_seq;
1910 		control->txseq = chan->next_tx_seq;
1911 
1912 		__pack_control(chan, control, skb);
1913 
1914 		if (chan->fcs == L2CAP_FCS_CRC16) {
1915 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1916 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1917 		}
1918 
1919 		/* Clone after data has been modified. Data is assumed to be
1920 		   read-only (for locking purposes) on cloned sk_buffs.
1921 		 */
1922 		tx_skb = skb_clone(skb, GFP_KERNEL);
1923 
1924 		if (!tx_skb)
1925 			break;
1926 
1927 		__set_retrans_timer(chan);
1928 
1929 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1930 		chan->unacked_frames++;
1931 		chan->frames_sent++;
1932 		sent++;
1933 
1934 		if (skb_queue_is_last(&chan->tx_q, skb))
1935 			chan->tx_send_head = NULL;
1936 		else
1937 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1938 
1939 		l2cap_do_send(chan, tx_skb);
1940 		BT_DBG("Sent txseq %u", control->txseq);
1941 	}
1942 
1943 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1944 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
1945 
1946 	return sent;
1947 }
1948 
1949 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1950 {
1951 	struct l2cap_ctrl control;
1952 	struct sk_buff *skb;
1953 	struct sk_buff *tx_skb;
1954 	u16 seq;
1955 
1956 	BT_DBG("chan %p", chan);
1957 
1958 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1959 		return;
1960 
1961 	if (__chan_is_moving(chan))
1962 		return;
1963 
1964 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1965 		seq = l2cap_seq_list_pop(&chan->retrans_list);
1966 
1967 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1968 		if (!skb) {
1969 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
1970 			       seq);
1971 			continue;
1972 		}
1973 
1974 		bt_cb(skb)->l2cap.retries++;
1975 		control = bt_cb(skb)->l2cap;
1976 
1977 		if (chan->max_tx != 0 &&
1978 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
1979 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1980 			l2cap_send_disconn_req(chan, ECONNRESET);
1981 			l2cap_seq_list_clear(&chan->retrans_list);
1982 			break;
1983 		}
1984 
1985 		control.reqseq = chan->buffer_seq;
1986 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1987 			control.final = 1;
1988 		else
1989 			control.final = 0;
1990 
1991 		if (skb_cloned(skb)) {
1992 			/* Cloned sk_buffs are read-only, so we need a
1993 			 * writeable copy
1994 			 */
1995 			tx_skb = skb_copy(skb, GFP_KERNEL);
1996 		} else {
1997 			tx_skb = skb_clone(skb, GFP_KERNEL);
1998 		}
1999 
2000 		if (!tx_skb) {
2001 			l2cap_seq_list_clear(&chan->retrans_list);
2002 			break;
2003 		}
2004 
2005 		/* Update skb contents */
2006 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2007 			put_unaligned_le32(__pack_extended_control(&control),
2008 					   tx_skb->data + L2CAP_HDR_SIZE);
2009 		} else {
2010 			put_unaligned_le16(__pack_enhanced_control(&control),
2011 					   tx_skb->data + L2CAP_HDR_SIZE);
2012 		}
2013 
2014 		/* Update FCS */
2015 		if (chan->fcs == L2CAP_FCS_CRC16) {
2016 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2017 					tx_skb->len - L2CAP_FCS_SIZE);
2018 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2019 						L2CAP_FCS_SIZE);
2020 		}
2021 
2022 		l2cap_do_send(chan, tx_skb);
2023 
2024 		BT_DBG("Resent txseq %d", control.txseq);
2025 
2026 		chan->last_acked_seq = chan->buffer_seq;
2027 	}
2028 }
2029 
2030 static void l2cap_retransmit(struct l2cap_chan *chan,
2031 			     struct l2cap_ctrl *control)
2032 {
2033 	BT_DBG("chan %p, control %p", chan, control);
2034 
2035 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2036 	l2cap_ertm_resend(chan);
2037 }
2038 
2039 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2040 				 struct l2cap_ctrl *control)
2041 {
2042 	struct sk_buff *skb;
2043 
2044 	BT_DBG("chan %p, control %p", chan, control);
2045 
2046 	if (control->poll)
2047 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2048 
2049 	l2cap_seq_list_clear(&chan->retrans_list);
2050 
2051 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2052 		return;
2053 
2054 	if (chan->unacked_frames) {
2055 		skb_queue_walk(&chan->tx_q, skb) {
2056 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2057 			    skb == chan->tx_send_head)
2058 				break;
2059 		}
2060 
2061 		skb_queue_walk_from(&chan->tx_q, skb) {
2062 			if (skb == chan->tx_send_head)
2063 				break;
2064 
2065 			l2cap_seq_list_append(&chan->retrans_list,
2066 					      bt_cb(skb)->l2cap.txseq);
2067 		}
2068 
2069 		l2cap_ertm_resend(chan);
2070 	}
2071 }
2072 
2073 static void l2cap_send_ack(struct l2cap_chan *chan)
2074 {
2075 	struct l2cap_ctrl control;
2076 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2077 					 chan->last_acked_seq);
2078 	int threshold;
2079 
2080 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2081 	       chan, chan->last_acked_seq, chan->buffer_seq);
2082 
2083 	memset(&control, 0, sizeof(control));
2084 	control.sframe = 1;
2085 
2086 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2087 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2088 		__clear_ack_timer(chan);
2089 		control.super = L2CAP_SUPER_RNR;
2090 		control.reqseq = chan->buffer_seq;
2091 		l2cap_send_sframe(chan, &control);
2092 	} else {
2093 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2094 			l2cap_ertm_send(chan);
2095 			/* If any i-frames were sent, they included an ack */
2096 			if (chan->buffer_seq == chan->last_acked_seq)
2097 				frames_to_ack = 0;
2098 		}
2099 
2100 		/* Ack now if the window is 3/4ths full.
2101 		 * Calculate without mul or div
2102 		 */
2103 		threshold = chan->ack_win;
2104 		threshold += threshold << 1;
2105 		threshold >>= 2;
2106 
2107 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2108 		       threshold);
2109 
2110 		if (frames_to_ack >= threshold) {
2111 			__clear_ack_timer(chan);
2112 			control.super = L2CAP_SUPER_RR;
2113 			control.reqseq = chan->buffer_seq;
2114 			l2cap_send_sframe(chan, &control);
2115 			frames_to_ack = 0;
2116 		}
2117 
2118 		if (frames_to_ack)
2119 			__set_ack_timer(chan);
2120 	}
2121 }
2122 
2123 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2124 					 struct msghdr *msg, int len,
2125 					 int count, struct sk_buff *skb)
2126 {
2127 	struct l2cap_conn *conn = chan->conn;
2128 	struct sk_buff **frag;
2129 	int sent = 0;
2130 
2131 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2132 		return -EFAULT;
2133 
2134 	sent += count;
2135 	len  -= count;
2136 
2137 	/* Continuation fragments (no L2CAP header) */
2138 	frag = &skb_shinfo(skb)->frag_list;
2139 	while (len) {
2140 		struct sk_buff *tmp;
2141 
2142 		count = min_t(unsigned int, conn->mtu, len);
2143 
2144 		tmp = chan->ops->alloc_skb(chan, 0, count,
2145 					   msg->msg_flags & MSG_DONTWAIT);
2146 		if (IS_ERR(tmp))
2147 			return PTR_ERR(tmp);
2148 
2149 		*frag = tmp;
2150 
2151 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2152 				   &msg->msg_iter))
2153 			return -EFAULT;
2154 
2155 		sent += count;
2156 		len  -= count;
2157 
2158 		skb->len += (*frag)->len;
2159 		skb->data_len += (*frag)->len;
2160 
2161 		frag = &(*frag)->next;
2162 	}
2163 
2164 	return sent;
2165 }
2166 
2167 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2168 						 struct msghdr *msg, size_t len)
2169 {
2170 	struct l2cap_conn *conn = chan->conn;
2171 	struct sk_buff *skb;
2172 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2173 	struct l2cap_hdr *lh;
2174 
2175 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2176 	       __le16_to_cpu(chan->psm), len);
2177 
2178 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2179 
2180 	skb = chan->ops->alloc_skb(chan, hlen, count,
2181 				   msg->msg_flags & MSG_DONTWAIT);
2182 	if (IS_ERR(skb))
2183 		return skb;
2184 
2185 	/* Create L2CAP header */
2186 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2187 	lh->cid = cpu_to_le16(chan->dcid);
2188 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2189 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2190 
2191 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2192 	if (unlikely(err < 0)) {
2193 		kfree_skb(skb);
2194 		return ERR_PTR(err);
2195 	}
2196 	return skb;
2197 }
2198 
2199 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2200 					      struct msghdr *msg, size_t len)
2201 {
2202 	struct l2cap_conn *conn = chan->conn;
2203 	struct sk_buff *skb;
2204 	int err, count;
2205 	struct l2cap_hdr *lh;
2206 
2207 	BT_DBG("chan %p len %zu", chan, len);
2208 
2209 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2210 
2211 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2212 				   msg->msg_flags & MSG_DONTWAIT);
2213 	if (IS_ERR(skb))
2214 		return skb;
2215 
2216 	/* Create L2CAP header */
2217 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2218 	lh->cid = cpu_to_le16(chan->dcid);
2219 	lh->len = cpu_to_le16(len);
2220 
2221 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2222 	if (unlikely(err < 0)) {
2223 		kfree_skb(skb);
2224 		return ERR_PTR(err);
2225 	}
2226 	return skb;
2227 }
2228 
2229 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2230 					       struct msghdr *msg, size_t len,
2231 					       u16 sdulen)
2232 {
2233 	struct l2cap_conn *conn = chan->conn;
2234 	struct sk_buff *skb;
2235 	int err, count, hlen;
2236 	struct l2cap_hdr *lh;
2237 
2238 	BT_DBG("chan %p len %zu", chan, len);
2239 
2240 	if (!conn)
2241 		return ERR_PTR(-ENOTCONN);
2242 
2243 	hlen = __ertm_hdr_size(chan);
2244 
2245 	if (sdulen)
2246 		hlen += L2CAP_SDULEN_SIZE;
2247 
2248 	if (chan->fcs == L2CAP_FCS_CRC16)
2249 		hlen += L2CAP_FCS_SIZE;
2250 
2251 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2252 
2253 	skb = chan->ops->alloc_skb(chan, hlen, count,
2254 				   msg->msg_flags & MSG_DONTWAIT);
2255 	if (IS_ERR(skb))
2256 		return skb;
2257 
2258 	/* Create L2CAP header */
2259 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2260 	lh->cid = cpu_to_le16(chan->dcid);
2261 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2262 
2263 	/* Control header is populated later */
2264 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2265 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2266 	else
2267 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2268 
2269 	if (sdulen)
2270 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2271 
2272 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2273 	if (unlikely(err < 0)) {
2274 		kfree_skb(skb);
2275 		return ERR_PTR(err);
2276 	}
2277 
2278 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2279 	bt_cb(skb)->l2cap.retries = 0;
2280 	return skb;
2281 }
2282 
2283 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2284 			     struct sk_buff_head *seg_queue,
2285 			     struct msghdr *msg, size_t len)
2286 {
2287 	struct sk_buff *skb;
2288 	u16 sdu_len;
2289 	size_t pdu_len;
2290 	u8 sar;
2291 
2292 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2293 
2294 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2295 	 * so fragmented skbs are not used.  The HCI layer's handling
2296 	 * of fragmented skbs is not compatible with ERTM's queueing.
2297 	 */
2298 
2299 	/* PDU size is derived from the HCI MTU */
2300 	pdu_len = chan->conn->mtu;
2301 
2302 	/* Constrain PDU size for BR/EDR connections */
2303 	if (!chan->hs_hcon)
2304 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2305 
2306 	/* Adjust for largest possible L2CAP overhead. */
2307 	if (chan->fcs)
2308 		pdu_len -= L2CAP_FCS_SIZE;
2309 
2310 	pdu_len -= __ertm_hdr_size(chan);
2311 
2312 	/* Remote device may have requested smaller PDUs */
2313 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2314 
2315 	if (len <= pdu_len) {
2316 		sar = L2CAP_SAR_UNSEGMENTED;
2317 		sdu_len = 0;
2318 		pdu_len = len;
2319 	} else {
2320 		sar = L2CAP_SAR_START;
2321 		sdu_len = len;
2322 	}
2323 
2324 	while (len > 0) {
2325 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2326 
2327 		if (IS_ERR(skb)) {
2328 			__skb_queue_purge(seg_queue);
2329 			return PTR_ERR(skb);
2330 		}
2331 
2332 		bt_cb(skb)->l2cap.sar = sar;
2333 		__skb_queue_tail(seg_queue, skb);
2334 
2335 		len -= pdu_len;
2336 		if (sdu_len)
2337 			sdu_len = 0;
2338 
2339 		if (len <= pdu_len) {
2340 			sar = L2CAP_SAR_END;
2341 			pdu_len = len;
2342 		} else {
2343 			sar = L2CAP_SAR_CONTINUE;
2344 		}
2345 	}
2346 
2347 	return 0;
2348 }
2349 
2350 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2351 						   struct msghdr *msg,
2352 						   size_t len, u16 sdulen)
2353 {
2354 	struct l2cap_conn *conn = chan->conn;
2355 	struct sk_buff *skb;
2356 	int err, count, hlen;
2357 	struct l2cap_hdr *lh;
2358 
2359 	BT_DBG("chan %p len %zu", chan, len);
2360 
2361 	if (!conn)
2362 		return ERR_PTR(-ENOTCONN);
2363 
2364 	hlen = L2CAP_HDR_SIZE;
2365 
2366 	if (sdulen)
2367 		hlen += L2CAP_SDULEN_SIZE;
2368 
2369 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2370 
2371 	skb = chan->ops->alloc_skb(chan, hlen, count,
2372 				   msg->msg_flags & MSG_DONTWAIT);
2373 	if (IS_ERR(skb))
2374 		return skb;
2375 
2376 	/* Create L2CAP header */
2377 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2378 	lh->cid = cpu_to_le16(chan->dcid);
2379 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2380 
2381 	if (sdulen)
2382 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2383 
2384 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2385 	if (unlikely(err < 0)) {
2386 		kfree_skb(skb);
2387 		return ERR_PTR(err);
2388 	}
2389 
2390 	return skb;
2391 }
2392 
2393 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2394 				struct sk_buff_head *seg_queue,
2395 				struct msghdr *msg, size_t len)
2396 {
2397 	struct sk_buff *skb;
2398 	size_t pdu_len;
2399 	u16 sdu_len;
2400 
2401 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2402 
2403 	sdu_len = len;
2404 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2405 
2406 	while (len > 0) {
2407 		if (len <= pdu_len)
2408 			pdu_len = len;
2409 
2410 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2411 		if (IS_ERR(skb)) {
2412 			__skb_queue_purge(seg_queue);
2413 			return PTR_ERR(skb);
2414 		}
2415 
2416 		__skb_queue_tail(seg_queue, skb);
2417 
2418 		len -= pdu_len;
2419 
2420 		if (sdu_len) {
2421 			sdu_len = 0;
2422 			pdu_len += L2CAP_SDULEN_SIZE;
2423 		}
2424 	}
2425 
2426 	return 0;
2427 }
2428 
2429 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2430 {
2431 	int sent = 0;
2432 
2433 	BT_DBG("chan %p", chan);
2434 
2435 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2436 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2437 		chan->tx_credits--;
2438 		sent++;
2439 	}
2440 
2441 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2442 	       skb_queue_len(&chan->tx_q));
2443 }
2444 
2445 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2446 {
2447 	struct sk_buff *skb;
2448 	int err;
2449 	struct sk_buff_head seg_queue;
2450 
2451 	if (!chan->conn)
2452 		return -ENOTCONN;
2453 
2454 	/* Connectionless channel */
2455 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2456 		skb = l2cap_create_connless_pdu(chan, msg, len);
2457 		if (IS_ERR(skb))
2458 			return PTR_ERR(skb);
2459 
2460 		/* Channel lock is released before requesting new skb and then
2461 		 * reacquired thus we need to recheck channel state.
2462 		 */
2463 		if (chan->state != BT_CONNECTED) {
2464 			kfree_skb(skb);
2465 			return -ENOTCONN;
2466 		}
2467 
2468 		l2cap_do_send(chan, skb);
2469 		return len;
2470 	}
2471 
2472 	switch (chan->mode) {
2473 	case L2CAP_MODE_LE_FLOWCTL:
2474 		/* Check outgoing MTU */
2475 		if (len > chan->omtu)
2476 			return -EMSGSIZE;
2477 
2478 		__skb_queue_head_init(&seg_queue);
2479 
2480 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2481 
2482 		if (chan->state != BT_CONNECTED) {
2483 			__skb_queue_purge(&seg_queue);
2484 			err = -ENOTCONN;
2485 		}
2486 
2487 		if (err)
2488 			return err;
2489 
2490 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2491 
2492 		l2cap_le_flowctl_send(chan);
2493 
2494 		if (!chan->tx_credits)
2495 			chan->ops->suspend(chan);
2496 
2497 		err = len;
2498 
2499 		break;
2500 
2501 	case L2CAP_MODE_BASIC:
2502 		/* Check outgoing MTU */
2503 		if (len > chan->omtu)
2504 			return -EMSGSIZE;
2505 
2506 		/* Create a basic PDU */
2507 		skb = l2cap_create_basic_pdu(chan, msg, len);
2508 		if (IS_ERR(skb))
2509 			return PTR_ERR(skb);
2510 
2511 		/* Channel lock is released before requesting new skb and then
2512 		 * reacquired thus we need to recheck channel state.
2513 		 */
2514 		if (chan->state != BT_CONNECTED) {
2515 			kfree_skb(skb);
2516 			return -ENOTCONN;
2517 		}
2518 
2519 		l2cap_do_send(chan, skb);
2520 		err = len;
2521 		break;
2522 
2523 	case L2CAP_MODE_ERTM:
2524 	case L2CAP_MODE_STREAMING:
2525 		/* Check outgoing MTU */
2526 		if (len > chan->omtu) {
2527 			err = -EMSGSIZE;
2528 			break;
2529 		}
2530 
2531 		__skb_queue_head_init(&seg_queue);
2532 
2533 		/* Do segmentation before calling in to the state machine,
2534 		 * since it's possible to block while waiting for memory
2535 		 * allocation.
2536 		 */
2537 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2538 
2539 		/* The channel could have been closed while segmenting,
2540 		 * check that it is still connected.
2541 		 */
2542 		if (chan->state != BT_CONNECTED) {
2543 			__skb_queue_purge(&seg_queue);
2544 			err = -ENOTCONN;
2545 		}
2546 
2547 		if (err)
2548 			break;
2549 
2550 		if (chan->mode == L2CAP_MODE_ERTM)
2551 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2552 		else
2553 			l2cap_streaming_send(chan, &seg_queue);
2554 
2555 		err = len;
2556 
2557 		/* If the skbs were not queued for sending, they'll still be in
2558 		 * seg_queue and need to be purged.
2559 		 */
2560 		__skb_queue_purge(&seg_queue);
2561 		break;
2562 
2563 	default:
2564 		BT_DBG("bad state %1.1x", chan->mode);
2565 		err = -EBADFD;
2566 	}
2567 
2568 	return err;
2569 }
2570 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2571 
2572 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2573 {
2574 	struct l2cap_ctrl control;
2575 	u16 seq;
2576 
2577 	BT_DBG("chan %p, txseq %u", chan, txseq);
2578 
2579 	memset(&control, 0, sizeof(control));
2580 	control.sframe = 1;
2581 	control.super = L2CAP_SUPER_SREJ;
2582 
2583 	for (seq = chan->expected_tx_seq; seq != txseq;
2584 	     seq = __next_seq(chan, seq)) {
2585 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2586 			control.reqseq = seq;
2587 			l2cap_send_sframe(chan, &control);
2588 			l2cap_seq_list_append(&chan->srej_list, seq);
2589 		}
2590 	}
2591 
2592 	chan->expected_tx_seq = __next_seq(chan, txseq);
2593 }
2594 
2595 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2596 {
2597 	struct l2cap_ctrl control;
2598 
2599 	BT_DBG("chan %p", chan);
2600 
2601 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2602 		return;
2603 
2604 	memset(&control, 0, sizeof(control));
2605 	control.sframe = 1;
2606 	control.super = L2CAP_SUPER_SREJ;
2607 	control.reqseq = chan->srej_list.tail;
2608 	l2cap_send_sframe(chan, &control);
2609 }
2610 
2611 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2612 {
2613 	struct l2cap_ctrl control;
2614 	u16 initial_head;
2615 	u16 seq;
2616 
2617 	BT_DBG("chan %p, txseq %u", chan, txseq);
2618 
2619 	memset(&control, 0, sizeof(control));
2620 	control.sframe = 1;
2621 	control.super = L2CAP_SUPER_SREJ;
2622 
2623 	/* Capture initial list head to allow only one pass through the list. */
2624 	initial_head = chan->srej_list.head;
2625 
2626 	do {
2627 		seq = l2cap_seq_list_pop(&chan->srej_list);
2628 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2629 			break;
2630 
2631 		control.reqseq = seq;
2632 		l2cap_send_sframe(chan, &control);
2633 		l2cap_seq_list_append(&chan->srej_list, seq);
2634 	} while (chan->srej_list.head != initial_head);
2635 }
2636 
2637 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2638 {
2639 	struct sk_buff *acked_skb;
2640 	u16 ackseq;
2641 
2642 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2643 
2644 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2645 		return;
2646 
2647 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2648 	       chan->expected_ack_seq, chan->unacked_frames);
2649 
2650 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2651 	     ackseq = __next_seq(chan, ackseq)) {
2652 
2653 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2654 		if (acked_skb) {
2655 			skb_unlink(acked_skb, &chan->tx_q);
2656 			kfree_skb(acked_skb);
2657 			chan->unacked_frames--;
2658 		}
2659 	}
2660 
2661 	chan->expected_ack_seq = reqseq;
2662 
2663 	if (chan->unacked_frames == 0)
2664 		__clear_retrans_timer(chan);
2665 
2666 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2667 }
2668 
2669 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2670 {
2671 	BT_DBG("chan %p", chan);
2672 
2673 	chan->expected_tx_seq = chan->buffer_seq;
2674 	l2cap_seq_list_clear(&chan->srej_list);
2675 	skb_queue_purge(&chan->srej_q);
2676 	chan->rx_state = L2CAP_RX_STATE_RECV;
2677 }
2678 
2679 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2680 				struct l2cap_ctrl *control,
2681 				struct sk_buff_head *skbs, u8 event)
2682 {
2683 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2684 	       event);
2685 
2686 	switch (event) {
2687 	case L2CAP_EV_DATA_REQUEST:
2688 		if (chan->tx_send_head == NULL)
2689 			chan->tx_send_head = skb_peek(skbs);
2690 
2691 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2692 		l2cap_ertm_send(chan);
2693 		break;
2694 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2695 		BT_DBG("Enter LOCAL_BUSY");
2696 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2697 
2698 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2699 			/* The SREJ_SENT state must be aborted if we are to
2700 			 * enter the LOCAL_BUSY state.
2701 			 */
2702 			l2cap_abort_rx_srej_sent(chan);
2703 		}
2704 
2705 		l2cap_send_ack(chan);
2706 
2707 		break;
2708 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2709 		BT_DBG("Exit LOCAL_BUSY");
2710 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2711 
2712 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2713 			struct l2cap_ctrl local_control;
2714 
2715 			memset(&local_control, 0, sizeof(local_control));
2716 			local_control.sframe = 1;
2717 			local_control.super = L2CAP_SUPER_RR;
2718 			local_control.poll = 1;
2719 			local_control.reqseq = chan->buffer_seq;
2720 			l2cap_send_sframe(chan, &local_control);
2721 
2722 			chan->retry_count = 1;
2723 			__set_monitor_timer(chan);
2724 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2725 		}
2726 		break;
2727 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2728 		l2cap_process_reqseq(chan, control->reqseq);
2729 		break;
2730 	case L2CAP_EV_EXPLICIT_POLL:
2731 		l2cap_send_rr_or_rnr(chan, 1);
2732 		chan->retry_count = 1;
2733 		__set_monitor_timer(chan);
2734 		__clear_ack_timer(chan);
2735 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2736 		break;
2737 	case L2CAP_EV_RETRANS_TO:
2738 		l2cap_send_rr_or_rnr(chan, 1);
2739 		chan->retry_count = 1;
2740 		__set_monitor_timer(chan);
2741 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2742 		break;
2743 	case L2CAP_EV_RECV_FBIT:
2744 		/* Nothing to process */
2745 		break;
2746 	default:
2747 		break;
2748 	}
2749 }
2750 
2751 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2752 				  struct l2cap_ctrl *control,
2753 				  struct sk_buff_head *skbs, u8 event)
2754 {
2755 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2756 	       event);
2757 
2758 	switch (event) {
2759 	case L2CAP_EV_DATA_REQUEST:
2760 		if (chan->tx_send_head == NULL)
2761 			chan->tx_send_head = skb_peek(skbs);
2762 		/* Queue data, but don't send. */
2763 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2764 		break;
2765 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2766 		BT_DBG("Enter LOCAL_BUSY");
2767 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2768 
2769 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2770 			/* The SREJ_SENT state must be aborted if we are to
2771 			 * enter the LOCAL_BUSY state.
2772 			 */
2773 			l2cap_abort_rx_srej_sent(chan);
2774 		}
2775 
2776 		l2cap_send_ack(chan);
2777 
2778 		break;
2779 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2780 		BT_DBG("Exit LOCAL_BUSY");
2781 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2782 
2783 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2784 			struct l2cap_ctrl local_control;
2785 			memset(&local_control, 0, sizeof(local_control));
2786 			local_control.sframe = 1;
2787 			local_control.super = L2CAP_SUPER_RR;
2788 			local_control.poll = 1;
2789 			local_control.reqseq = chan->buffer_seq;
2790 			l2cap_send_sframe(chan, &local_control);
2791 
2792 			chan->retry_count = 1;
2793 			__set_monitor_timer(chan);
2794 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2795 		}
2796 		break;
2797 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2798 		l2cap_process_reqseq(chan, control->reqseq);
2799 
2800 		/* Fall through */
2801 
2802 	case L2CAP_EV_RECV_FBIT:
2803 		if (control && control->final) {
2804 			__clear_monitor_timer(chan);
2805 			if (chan->unacked_frames > 0)
2806 				__set_retrans_timer(chan);
2807 			chan->retry_count = 0;
2808 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2809 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2810 		}
2811 		break;
2812 	case L2CAP_EV_EXPLICIT_POLL:
2813 		/* Ignore */
2814 		break;
2815 	case L2CAP_EV_MONITOR_TO:
2816 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2817 			l2cap_send_rr_or_rnr(chan, 1);
2818 			__set_monitor_timer(chan);
2819 			chan->retry_count++;
2820 		} else {
2821 			l2cap_send_disconn_req(chan, ECONNABORTED);
2822 		}
2823 		break;
2824 	default:
2825 		break;
2826 	}
2827 }
2828 
2829 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2830 		     struct sk_buff_head *skbs, u8 event)
2831 {
2832 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2833 	       chan, control, skbs, event, chan->tx_state);
2834 
2835 	switch (chan->tx_state) {
2836 	case L2CAP_TX_STATE_XMIT:
2837 		l2cap_tx_state_xmit(chan, control, skbs, event);
2838 		break;
2839 	case L2CAP_TX_STATE_WAIT_F:
2840 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2841 		break;
2842 	default:
2843 		/* Ignore event */
2844 		break;
2845 	}
2846 }
2847 
2848 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2849 			     struct l2cap_ctrl *control)
2850 {
2851 	BT_DBG("chan %p, control %p", chan, control);
2852 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2853 }
2854 
2855 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2856 				  struct l2cap_ctrl *control)
2857 {
2858 	BT_DBG("chan %p, control %p", chan, control);
2859 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2860 }
2861 
2862 /* Copy frame to all raw sockets on that connection */
2863 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2864 {
2865 	struct sk_buff *nskb;
2866 	struct l2cap_chan *chan;
2867 
2868 	BT_DBG("conn %p", conn);
2869 
2870 	mutex_lock(&conn->chan_lock);
2871 
2872 	list_for_each_entry(chan, &conn->chan_l, list) {
2873 		if (chan->chan_type != L2CAP_CHAN_RAW)
2874 			continue;
2875 
2876 		/* Don't send frame to the channel it came from */
2877 		if (bt_cb(skb)->l2cap.chan == chan)
2878 			continue;
2879 
2880 		nskb = skb_clone(skb, GFP_KERNEL);
2881 		if (!nskb)
2882 			continue;
2883 		if (chan->ops->recv(chan, nskb))
2884 			kfree_skb(nskb);
2885 	}
2886 
2887 	mutex_unlock(&conn->chan_lock);
2888 }
2889 
2890 /* ---- L2CAP signalling commands ---- */
2891 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2892 				       u8 ident, u16 dlen, void *data)
2893 {
2894 	struct sk_buff *skb, **frag;
2895 	struct l2cap_cmd_hdr *cmd;
2896 	struct l2cap_hdr *lh;
2897 	int len, count;
2898 
2899 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2900 	       conn, code, ident, dlen);
2901 
2902 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2903 		return NULL;
2904 
2905 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2906 	count = min_t(unsigned int, conn->mtu, len);
2907 
2908 	skb = bt_skb_alloc(count, GFP_KERNEL);
2909 	if (!skb)
2910 		return NULL;
2911 
2912 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2913 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2914 
2915 	if (conn->hcon->type == LE_LINK)
2916 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2917 	else
2918 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2919 
2920 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2921 	cmd->code  = code;
2922 	cmd->ident = ident;
2923 	cmd->len   = cpu_to_le16(dlen);
2924 
2925 	if (dlen) {
2926 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2927 		skb_put_data(skb, data, count);
2928 		data += count;
2929 	}
2930 
2931 	len -= skb->len;
2932 
2933 	/* Continuation fragments (no L2CAP header) */
2934 	frag = &skb_shinfo(skb)->frag_list;
2935 	while (len) {
2936 		count = min_t(unsigned int, conn->mtu, len);
2937 
2938 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2939 		if (!*frag)
2940 			goto fail;
2941 
2942 		skb_put_data(*frag, data, count);
2943 
2944 		len  -= count;
2945 		data += count;
2946 
2947 		frag = &(*frag)->next;
2948 	}
2949 
2950 	return skb;
2951 
2952 fail:
2953 	kfree_skb(skb);
2954 	return NULL;
2955 }
2956 
2957 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2958 				     unsigned long *val)
2959 {
2960 	struct l2cap_conf_opt *opt = *ptr;
2961 	int len;
2962 
2963 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2964 	*ptr += len;
2965 
2966 	*type = opt->type;
2967 	*olen = opt->len;
2968 
2969 	switch (opt->len) {
2970 	case 1:
2971 		*val = *((u8 *) opt->val);
2972 		break;
2973 
2974 	case 2:
2975 		*val = get_unaligned_le16(opt->val);
2976 		break;
2977 
2978 	case 4:
2979 		*val = get_unaligned_le32(opt->val);
2980 		break;
2981 
2982 	default:
2983 		*val = (unsigned long) opt->val;
2984 		break;
2985 	}
2986 
2987 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2988 	return len;
2989 }
2990 
2991 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
2992 {
2993 	struct l2cap_conf_opt *opt = *ptr;
2994 
2995 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2996 
2997 	if (size < L2CAP_CONF_OPT_SIZE + len)
2998 		return;
2999 
3000 	opt->type = type;
3001 	opt->len  = len;
3002 
3003 	switch (len) {
3004 	case 1:
3005 		*((u8 *) opt->val)  = val;
3006 		break;
3007 
3008 	case 2:
3009 		put_unaligned_le16(val, opt->val);
3010 		break;
3011 
3012 	case 4:
3013 		put_unaligned_le32(val, opt->val);
3014 		break;
3015 
3016 	default:
3017 		memcpy(opt->val, (void *) val, len);
3018 		break;
3019 	}
3020 
3021 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3022 }
3023 
3024 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3025 {
3026 	struct l2cap_conf_efs efs;
3027 
3028 	switch (chan->mode) {
3029 	case L2CAP_MODE_ERTM:
3030 		efs.id		= chan->local_id;
3031 		efs.stype	= chan->local_stype;
3032 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3033 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3034 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3035 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3036 		break;
3037 
3038 	case L2CAP_MODE_STREAMING:
3039 		efs.id		= 1;
3040 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3041 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3042 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3043 		efs.acc_lat	= 0;
3044 		efs.flush_to	= 0;
3045 		break;
3046 
3047 	default:
3048 		return;
3049 	}
3050 
3051 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3052 			   (unsigned long) &efs, size);
3053 }
3054 
3055 static void l2cap_ack_timeout(struct work_struct *work)
3056 {
3057 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3058 					       ack_timer.work);
3059 	u16 frames_to_ack;
3060 
3061 	BT_DBG("chan %p", chan);
3062 
3063 	l2cap_chan_lock(chan);
3064 
3065 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3066 				     chan->last_acked_seq);
3067 
3068 	if (frames_to_ack)
3069 		l2cap_send_rr_or_rnr(chan, 0);
3070 
3071 	l2cap_chan_unlock(chan);
3072 	l2cap_chan_put(chan);
3073 }
3074 
3075 int l2cap_ertm_init(struct l2cap_chan *chan)
3076 {
3077 	int err;
3078 
3079 	chan->next_tx_seq = 0;
3080 	chan->expected_tx_seq = 0;
3081 	chan->expected_ack_seq = 0;
3082 	chan->unacked_frames = 0;
3083 	chan->buffer_seq = 0;
3084 	chan->frames_sent = 0;
3085 	chan->last_acked_seq = 0;
3086 	chan->sdu = NULL;
3087 	chan->sdu_last_frag = NULL;
3088 	chan->sdu_len = 0;
3089 
3090 	skb_queue_head_init(&chan->tx_q);
3091 
3092 	chan->local_amp_id = AMP_ID_BREDR;
3093 	chan->move_id = AMP_ID_BREDR;
3094 	chan->move_state = L2CAP_MOVE_STABLE;
3095 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3096 
3097 	if (chan->mode != L2CAP_MODE_ERTM)
3098 		return 0;
3099 
3100 	chan->rx_state = L2CAP_RX_STATE_RECV;
3101 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3102 
3103 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3104 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3105 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3106 
3107 	skb_queue_head_init(&chan->srej_q);
3108 
3109 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3110 	if (err < 0)
3111 		return err;
3112 
3113 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3114 	if (err < 0)
3115 		l2cap_seq_list_free(&chan->srej_list);
3116 
3117 	return err;
3118 }
3119 
3120 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3121 {
3122 	switch (mode) {
3123 	case L2CAP_MODE_STREAMING:
3124 	case L2CAP_MODE_ERTM:
3125 		if (l2cap_mode_supported(mode, remote_feat_mask))
3126 			return mode;
3127 		/* fall through */
3128 	default:
3129 		return L2CAP_MODE_BASIC;
3130 	}
3131 }
3132 
3133 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3134 {
3135 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3136 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3137 }
3138 
3139 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3140 {
3141 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3142 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3143 }
3144 
3145 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3146 				      struct l2cap_conf_rfc *rfc)
3147 {
3148 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3149 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3150 
3151 		/* Class 1 devices have must have ERTM timeouts
3152 		 * exceeding the Link Supervision Timeout.  The
3153 		 * default Link Supervision Timeout for AMP
3154 		 * controllers is 10 seconds.
3155 		 *
3156 		 * Class 1 devices use 0xffffffff for their
3157 		 * best-effort flush timeout, so the clamping logic
3158 		 * will result in a timeout that meets the above
3159 		 * requirement.  ERTM timeouts are 16-bit values, so
3160 		 * the maximum timeout is 65.535 seconds.
3161 		 */
3162 
3163 		/* Convert timeout to milliseconds and round */
3164 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3165 
3166 		/* This is the recommended formula for class 2 devices
3167 		 * that start ERTM timers when packets are sent to the
3168 		 * controller.
3169 		 */
3170 		ertm_to = 3 * ertm_to + 500;
3171 
3172 		if (ertm_to > 0xffff)
3173 			ertm_to = 0xffff;
3174 
3175 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3176 		rfc->monitor_timeout = rfc->retrans_timeout;
3177 	} else {
3178 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3179 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3180 	}
3181 }
3182 
3183 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3184 {
3185 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3186 	    __l2cap_ews_supported(chan->conn)) {
3187 		/* use extended control field */
3188 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3189 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3190 	} else {
3191 		chan->tx_win = min_t(u16, chan->tx_win,
3192 				     L2CAP_DEFAULT_TX_WINDOW);
3193 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3194 	}
3195 	chan->ack_win = chan->tx_win;
3196 }
3197 
3198 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3199 {
3200 	struct l2cap_conf_req *req = data;
3201 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3202 	void *ptr = req->data;
3203 	void *endptr = data + data_size;
3204 	u16 size;
3205 
3206 	BT_DBG("chan %p", chan);
3207 
3208 	if (chan->num_conf_req || chan->num_conf_rsp)
3209 		goto done;
3210 
3211 	switch (chan->mode) {
3212 	case L2CAP_MODE_STREAMING:
3213 	case L2CAP_MODE_ERTM:
3214 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3215 			break;
3216 
3217 		if (__l2cap_efs_supported(chan->conn))
3218 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3219 
3220 		/* fall through */
3221 	default:
3222 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3223 		break;
3224 	}
3225 
3226 done:
3227 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3228 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3229 
3230 	switch (chan->mode) {
3231 	case L2CAP_MODE_BASIC:
3232 		if (disable_ertm)
3233 			break;
3234 
3235 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3236 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3237 			break;
3238 
3239 		rfc.mode            = L2CAP_MODE_BASIC;
3240 		rfc.txwin_size      = 0;
3241 		rfc.max_transmit    = 0;
3242 		rfc.retrans_timeout = 0;
3243 		rfc.monitor_timeout = 0;
3244 		rfc.max_pdu_size    = 0;
3245 
3246 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3247 				   (unsigned long) &rfc, endptr - ptr);
3248 		break;
3249 
3250 	case L2CAP_MODE_ERTM:
3251 		rfc.mode            = L2CAP_MODE_ERTM;
3252 		rfc.max_transmit    = chan->max_tx;
3253 
3254 		__l2cap_set_ertm_timeouts(chan, &rfc);
3255 
3256 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3257 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3258 			     L2CAP_FCS_SIZE);
3259 		rfc.max_pdu_size = cpu_to_le16(size);
3260 
3261 		l2cap_txwin_setup(chan);
3262 
3263 		rfc.txwin_size = min_t(u16, chan->tx_win,
3264 				       L2CAP_DEFAULT_TX_WINDOW);
3265 
3266 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3267 				   (unsigned long) &rfc, endptr - ptr);
3268 
3269 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3270 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3271 
3272 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3273 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3274 					   chan->tx_win, endptr - ptr);
3275 
3276 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3277 			if (chan->fcs == L2CAP_FCS_NONE ||
3278 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3279 				chan->fcs = L2CAP_FCS_NONE;
3280 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3281 						   chan->fcs, endptr - ptr);
3282 			}
3283 		break;
3284 
3285 	case L2CAP_MODE_STREAMING:
3286 		l2cap_txwin_setup(chan);
3287 		rfc.mode            = L2CAP_MODE_STREAMING;
3288 		rfc.txwin_size      = 0;
3289 		rfc.max_transmit    = 0;
3290 		rfc.retrans_timeout = 0;
3291 		rfc.monitor_timeout = 0;
3292 
3293 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3294 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3295 			     L2CAP_FCS_SIZE);
3296 		rfc.max_pdu_size = cpu_to_le16(size);
3297 
3298 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3299 				   (unsigned long) &rfc, endptr - ptr);
3300 
3301 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3302 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3303 
3304 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3305 			if (chan->fcs == L2CAP_FCS_NONE ||
3306 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3307 				chan->fcs = L2CAP_FCS_NONE;
3308 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3309 						   chan->fcs, endptr - ptr);
3310 			}
3311 		break;
3312 	}
3313 
3314 	req->dcid  = cpu_to_le16(chan->dcid);
3315 	req->flags = cpu_to_le16(0);
3316 
3317 	return ptr - data;
3318 }
3319 
3320 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3321 {
3322 	struct l2cap_conf_rsp *rsp = data;
3323 	void *ptr = rsp->data;
3324 	void *endptr = data + data_size;
3325 	void *req = chan->conf_req;
3326 	int len = chan->conf_len;
3327 	int type, hint, olen;
3328 	unsigned long val;
3329 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3330 	struct l2cap_conf_efs efs;
3331 	u8 remote_efs = 0;
3332 	u16 mtu = L2CAP_DEFAULT_MTU;
3333 	u16 result = L2CAP_CONF_SUCCESS;
3334 	u16 size;
3335 
3336 	BT_DBG("chan %p", chan);
3337 
3338 	while (len >= L2CAP_CONF_OPT_SIZE) {
3339 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3340 		if (len < 0)
3341 			break;
3342 
3343 		hint  = type & L2CAP_CONF_HINT;
3344 		type &= L2CAP_CONF_MASK;
3345 
3346 		switch (type) {
3347 		case L2CAP_CONF_MTU:
3348 			if (olen != 2)
3349 				break;
3350 			mtu = val;
3351 			break;
3352 
3353 		case L2CAP_CONF_FLUSH_TO:
3354 			if (olen != 2)
3355 				break;
3356 			chan->flush_to = val;
3357 			break;
3358 
3359 		case L2CAP_CONF_QOS:
3360 			break;
3361 
3362 		case L2CAP_CONF_RFC:
3363 			if (olen != sizeof(rfc))
3364 				break;
3365 			memcpy(&rfc, (void *) val, olen);
3366 			break;
3367 
3368 		case L2CAP_CONF_FCS:
3369 			if (olen != 1)
3370 				break;
3371 			if (val == L2CAP_FCS_NONE)
3372 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3373 			break;
3374 
3375 		case L2CAP_CONF_EFS:
3376 			if (olen != sizeof(efs))
3377 				break;
3378 			remote_efs = 1;
3379 			memcpy(&efs, (void *) val, olen);
3380 			break;
3381 
3382 		case L2CAP_CONF_EWS:
3383 			if (olen != 2)
3384 				break;
3385 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3386 				return -ECONNREFUSED;
3387 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3388 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3389 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3390 			chan->remote_tx_win = val;
3391 			break;
3392 
3393 		default:
3394 			if (hint)
3395 				break;
3396 			result = L2CAP_CONF_UNKNOWN;
3397 			*((u8 *) ptr++) = type;
3398 			break;
3399 		}
3400 	}
3401 
3402 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3403 		goto done;
3404 
3405 	switch (chan->mode) {
3406 	case L2CAP_MODE_STREAMING:
3407 	case L2CAP_MODE_ERTM:
3408 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3409 			chan->mode = l2cap_select_mode(rfc.mode,
3410 						       chan->conn->feat_mask);
3411 			break;
3412 		}
3413 
3414 		if (remote_efs) {
3415 			if (__l2cap_efs_supported(chan->conn))
3416 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3417 			else
3418 				return -ECONNREFUSED;
3419 		}
3420 
3421 		if (chan->mode != rfc.mode)
3422 			return -ECONNREFUSED;
3423 
3424 		break;
3425 	}
3426 
3427 done:
3428 	if (chan->mode != rfc.mode) {
3429 		result = L2CAP_CONF_UNACCEPT;
3430 		rfc.mode = chan->mode;
3431 
3432 		if (chan->num_conf_rsp == 1)
3433 			return -ECONNREFUSED;
3434 
3435 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3436 				   (unsigned long) &rfc, endptr - ptr);
3437 	}
3438 
3439 	if (result == L2CAP_CONF_SUCCESS) {
3440 		/* Configure output options and let the other side know
3441 		 * which ones we don't like. */
3442 
3443 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3444 			result = L2CAP_CONF_UNACCEPT;
3445 		else {
3446 			chan->omtu = mtu;
3447 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3448 		}
3449 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3450 
3451 		if (remote_efs) {
3452 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3453 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3454 			    efs.stype != chan->local_stype) {
3455 
3456 				result = L2CAP_CONF_UNACCEPT;
3457 
3458 				if (chan->num_conf_req >= 1)
3459 					return -ECONNREFUSED;
3460 
3461 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3462 						   sizeof(efs),
3463 						   (unsigned long) &efs, endptr - ptr);
3464 			} else {
3465 				/* Send PENDING Conf Rsp */
3466 				result = L2CAP_CONF_PENDING;
3467 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3468 			}
3469 		}
3470 
3471 		switch (rfc.mode) {
3472 		case L2CAP_MODE_BASIC:
3473 			chan->fcs = L2CAP_FCS_NONE;
3474 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3475 			break;
3476 
3477 		case L2CAP_MODE_ERTM:
3478 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3479 				chan->remote_tx_win = rfc.txwin_size;
3480 			else
3481 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3482 
3483 			chan->remote_max_tx = rfc.max_transmit;
3484 
3485 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3486 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3487 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3488 			rfc.max_pdu_size = cpu_to_le16(size);
3489 			chan->remote_mps = size;
3490 
3491 			__l2cap_set_ertm_timeouts(chan, &rfc);
3492 
3493 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3494 
3495 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3496 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3497 
3498 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3499 				chan->remote_id = efs.id;
3500 				chan->remote_stype = efs.stype;
3501 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3502 				chan->remote_flush_to =
3503 					le32_to_cpu(efs.flush_to);
3504 				chan->remote_acc_lat =
3505 					le32_to_cpu(efs.acc_lat);
3506 				chan->remote_sdu_itime =
3507 					le32_to_cpu(efs.sdu_itime);
3508 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3509 						   sizeof(efs),
3510 						   (unsigned long) &efs, endptr - ptr);
3511 			}
3512 			break;
3513 
3514 		case L2CAP_MODE_STREAMING:
3515 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3516 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3517 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3518 			rfc.max_pdu_size = cpu_to_le16(size);
3519 			chan->remote_mps = size;
3520 
3521 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3522 
3523 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3524 					   (unsigned long) &rfc, endptr - ptr);
3525 
3526 			break;
3527 
3528 		default:
3529 			result = L2CAP_CONF_UNACCEPT;
3530 
3531 			memset(&rfc, 0, sizeof(rfc));
3532 			rfc.mode = chan->mode;
3533 		}
3534 
3535 		if (result == L2CAP_CONF_SUCCESS)
3536 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3537 	}
3538 	rsp->scid   = cpu_to_le16(chan->dcid);
3539 	rsp->result = cpu_to_le16(result);
3540 	rsp->flags  = cpu_to_le16(0);
3541 
3542 	return ptr - data;
3543 }
3544 
3545 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3546 				void *data, size_t size, u16 *result)
3547 {
3548 	struct l2cap_conf_req *req = data;
3549 	void *ptr = req->data;
3550 	void *endptr = data + size;
3551 	int type, olen;
3552 	unsigned long val;
3553 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3554 	struct l2cap_conf_efs efs;
3555 
3556 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3557 
3558 	while (len >= L2CAP_CONF_OPT_SIZE) {
3559 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3560 		if (len < 0)
3561 			break;
3562 
3563 		switch (type) {
3564 		case L2CAP_CONF_MTU:
3565 			if (olen != 2)
3566 				break;
3567 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3568 				*result = L2CAP_CONF_UNACCEPT;
3569 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3570 			} else
3571 				chan->imtu = val;
3572 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3573 					   endptr - ptr);
3574 			break;
3575 
3576 		case L2CAP_CONF_FLUSH_TO:
3577 			if (olen != 2)
3578 				break;
3579 			chan->flush_to = val;
3580 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3581 					   chan->flush_to, endptr - ptr);
3582 			break;
3583 
3584 		case L2CAP_CONF_RFC:
3585 			if (olen != sizeof(rfc))
3586 				break;
3587 			memcpy(&rfc, (void *)val, olen);
3588 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3589 			    rfc.mode != chan->mode)
3590 				return -ECONNREFUSED;
3591 			chan->fcs = 0;
3592 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3593 					   (unsigned long) &rfc, endptr - ptr);
3594 			break;
3595 
3596 		case L2CAP_CONF_EWS:
3597 			if (olen != 2)
3598 				break;
3599 			chan->ack_win = min_t(u16, val, chan->ack_win);
3600 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3601 					   chan->tx_win, endptr - ptr);
3602 			break;
3603 
3604 		case L2CAP_CONF_EFS:
3605 			if (olen != sizeof(efs))
3606 				break;
3607 			memcpy(&efs, (void *)val, olen);
3608 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3609 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3610 			    efs.stype != chan->local_stype)
3611 				return -ECONNREFUSED;
3612 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3613 					   (unsigned long) &efs, endptr - ptr);
3614 			break;
3615 
3616 		case L2CAP_CONF_FCS:
3617 			if (olen != 1)
3618 				break;
3619 			if (*result == L2CAP_CONF_PENDING)
3620 				if (val == L2CAP_FCS_NONE)
3621 					set_bit(CONF_RECV_NO_FCS,
3622 						&chan->conf_state);
3623 			break;
3624 		}
3625 	}
3626 
3627 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3628 		return -ECONNREFUSED;
3629 
3630 	chan->mode = rfc.mode;
3631 
3632 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3633 		switch (rfc.mode) {
3634 		case L2CAP_MODE_ERTM:
3635 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3636 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3637 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3638 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3639 				chan->ack_win = min_t(u16, chan->ack_win,
3640 						      rfc.txwin_size);
3641 
3642 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3643 				chan->local_msdu = le16_to_cpu(efs.msdu);
3644 				chan->local_sdu_itime =
3645 					le32_to_cpu(efs.sdu_itime);
3646 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3647 				chan->local_flush_to =
3648 					le32_to_cpu(efs.flush_to);
3649 			}
3650 			break;
3651 
3652 		case L2CAP_MODE_STREAMING:
3653 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3654 		}
3655 	}
3656 
3657 	req->dcid   = cpu_to_le16(chan->dcid);
3658 	req->flags  = cpu_to_le16(0);
3659 
3660 	return ptr - data;
3661 }
3662 
3663 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3664 				u16 result, u16 flags)
3665 {
3666 	struct l2cap_conf_rsp *rsp = data;
3667 	void *ptr = rsp->data;
3668 
3669 	BT_DBG("chan %p", chan);
3670 
3671 	rsp->scid   = cpu_to_le16(chan->dcid);
3672 	rsp->result = cpu_to_le16(result);
3673 	rsp->flags  = cpu_to_le16(flags);
3674 
3675 	return ptr - data;
3676 }
3677 
3678 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3679 {
3680 	struct l2cap_le_conn_rsp rsp;
3681 	struct l2cap_conn *conn = chan->conn;
3682 
3683 	BT_DBG("chan %p", chan);
3684 
3685 	rsp.dcid    = cpu_to_le16(chan->scid);
3686 	rsp.mtu     = cpu_to_le16(chan->imtu);
3687 	rsp.mps     = cpu_to_le16(chan->mps);
3688 	rsp.credits = cpu_to_le16(chan->rx_credits);
3689 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3690 
3691 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3692 		       &rsp);
3693 }
3694 
3695 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3696 {
3697 	struct l2cap_conn_rsp rsp;
3698 	struct l2cap_conn *conn = chan->conn;
3699 	u8 buf[128];
3700 	u8 rsp_code;
3701 
3702 	rsp.scid   = cpu_to_le16(chan->dcid);
3703 	rsp.dcid   = cpu_to_le16(chan->scid);
3704 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3705 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3706 
3707 	if (chan->hs_hcon)
3708 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3709 	else
3710 		rsp_code = L2CAP_CONN_RSP;
3711 
3712 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3713 
3714 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3715 
3716 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3717 		return;
3718 
3719 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3720 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3721 	chan->num_conf_req++;
3722 }
3723 
3724 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3725 {
3726 	int type, olen;
3727 	unsigned long val;
3728 	/* Use sane default values in case a misbehaving remote device
3729 	 * did not send an RFC or extended window size option.
3730 	 */
3731 	u16 txwin_ext = chan->ack_win;
3732 	struct l2cap_conf_rfc rfc = {
3733 		.mode = chan->mode,
3734 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3735 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3736 		.max_pdu_size = cpu_to_le16(chan->imtu),
3737 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3738 	};
3739 
3740 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3741 
3742 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3743 		return;
3744 
3745 	while (len >= L2CAP_CONF_OPT_SIZE) {
3746 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3747 		if (len < 0)
3748 			break;
3749 
3750 		switch (type) {
3751 		case L2CAP_CONF_RFC:
3752 			if (olen != sizeof(rfc))
3753 				break;
3754 			memcpy(&rfc, (void *)val, olen);
3755 			break;
3756 		case L2CAP_CONF_EWS:
3757 			if (olen != 2)
3758 				break;
3759 			txwin_ext = val;
3760 			break;
3761 		}
3762 	}
3763 
3764 	switch (rfc.mode) {
3765 	case L2CAP_MODE_ERTM:
3766 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3767 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3768 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3769 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3770 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3771 		else
3772 			chan->ack_win = min_t(u16, chan->ack_win,
3773 					      rfc.txwin_size);
3774 		break;
3775 	case L2CAP_MODE_STREAMING:
3776 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3777 	}
3778 }
3779 
3780 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3781 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3782 				    u8 *data)
3783 {
3784 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3785 
3786 	if (cmd_len < sizeof(*rej))
3787 		return -EPROTO;
3788 
3789 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3790 		return 0;
3791 
3792 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3793 	    cmd->ident == conn->info_ident) {
3794 		cancel_delayed_work(&conn->info_timer);
3795 
3796 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3797 		conn->info_ident = 0;
3798 
3799 		l2cap_conn_start(conn);
3800 	}
3801 
3802 	return 0;
3803 }
3804 
3805 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3806 					struct l2cap_cmd_hdr *cmd,
3807 					u8 *data, u8 rsp_code, u8 amp_id)
3808 {
3809 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3810 	struct l2cap_conn_rsp rsp;
3811 	struct l2cap_chan *chan = NULL, *pchan;
3812 	int result, status = L2CAP_CS_NO_INFO;
3813 
3814 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3815 	__le16 psm = req->psm;
3816 
3817 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3818 
3819 	/* Check if we have socket listening on psm */
3820 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3821 					 &conn->hcon->dst, ACL_LINK);
3822 	if (!pchan) {
3823 		result = L2CAP_CR_BAD_PSM;
3824 		goto sendresp;
3825 	}
3826 
3827 	mutex_lock(&conn->chan_lock);
3828 	l2cap_chan_lock(pchan);
3829 
3830 	/* Check if the ACL is secure enough (if not SDP) */
3831 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3832 	    !hci_conn_check_link_mode(conn->hcon)) {
3833 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3834 		result = L2CAP_CR_SEC_BLOCK;
3835 		goto response;
3836 	}
3837 
3838 	result = L2CAP_CR_NO_MEM;
3839 
3840 	/* Check for valid dynamic CID range (as per Erratum 3253) */
3841 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
3842 		result = L2CAP_CR_INVALID_SCID;
3843 		goto response;
3844 	}
3845 
3846 	/* Check if we already have channel with that dcid */
3847 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
3848 		result = L2CAP_CR_SCID_IN_USE;
3849 		goto response;
3850 	}
3851 
3852 	chan = pchan->ops->new_connection(pchan);
3853 	if (!chan)
3854 		goto response;
3855 
3856 	/* For certain devices (ex: HID mouse), support for authentication,
3857 	 * pairing and bonding is optional. For such devices, inorder to avoid
3858 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3859 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3860 	 */
3861 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3862 
3863 	bacpy(&chan->src, &conn->hcon->src);
3864 	bacpy(&chan->dst, &conn->hcon->dst);
3865 	chan->src_type = bdaddr_src_type(conn->hcon);
3866 	chan->dst_type = bdaddr_dst_type(conn->hcon);
3867 	chan->psm  = psm;
3868 	chan->dcid = scid;
3869 	chan->local_amp_id = amp_id;
3870 
3871 	__l2cap_chan_add(conn, chan);
3872 
3873 	dcid = chan->scid;
3874 
3875 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3876 
3877 	chan->ident = cmd->ident;
3878 
3879 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3880 		if (l2cap_chan_check_security(chan, false)) {
3881 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3882 				l2cap_state_change(chan, BT_CONNECT2);
3883 				result = L2CAP_CR_PEND;
3884 				status = L2CAP_CS_AUTHOR_PEND;
3885 				chan->ops->defer(chan);
3886 			} else {
3887 				/* Force pending result for AMP controllers.
3888 				 * The connection will succeed after the
3889 				 * physical link is up.
3890 				 */
3891 				if (amp_id == AMP_ID_BREDR) {
3892 					l2cap_state_change(chan, BT_CONFIG);
3893 					result = L2CAP_CR_SUCCESS;
3894 				} else {
3895 					l2cap_state_change(chan, BT_CONNECT2);
3896 					result = L2CAP_CR_PEND;
3897 				}
3898 				status = L2CAP_CS_NO_INFO;
3899 			}
3900 		} else {
3901 			l2cap_state_change(chan, BT_CONNECT2);
3902 			result = L2CAP_CR_PEND;
3903 			status = L2CAP_CS_AUTHEN_PEND;
3904 		}
3905 	} else {
3906 		l2cap_state_change(chan, BT_CONNECT2);
3907 		result = L2CAP_CR_PEND;
3908 		status = L2CAP_CS_NO_INFO;
3909 	}
3910 
3911 response:
3912 	l2cap_chan_unlock(pchan);
3913 	mutex_unlock(&conn->chan_lock);
3914 	l2cap_chan_put(pchan);
3915 
3916 sendresp:
3917 	rsp.scid   = cpu_to_le16(scid);
3918 	rsp.dcid   = cpu_to_le16(dcid);
3919 	rsp.result = cpu_to_le16(result);
3920 	rsp.status = cpu_to_le16(status);
3921 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3922 
3923 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3924 		struct l2cap_info_req info;
3925 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3926 
3927 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3928 		conn->info_ident = l2cap_get_ident(conn);
3929 
3930 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3931 
3932 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3933 			       sizeof(info), &info);
3934 	}
3935 
3936 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3937 	    result == L2CAP_CR_SUCCESS) {
3938 		u8 buf[128];
3939 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3940 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3941 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3942 		chan->num_conf_req++;
3943 	}
3944 
3945 	return chan;
3946 }
3947 
3948 static int l2cap_connect_req(struct l2cap_conn *conn,
3949 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3950 {
3951 	struct hci_dev *hdev = conn->hcon->hdev;
3952 	struct hci_conn *hcon = conn->hcon;
3953 
3954 	if (cmd_len < sizeof(struct l2cap_conn_req))
3955 		return -EPROTO;
3956 
3957 	hci_dev_lock(hdev);
3958 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3959 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3960 		mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3961 	hci_dev_unlock(hdev);
3962 
3963 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3964 	return 0;
3965 }
3966 
3967 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3968 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3969 				    u8 *data)
3970 {
3971 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3972 	u16 scid, dcid, result, status;
3973 	struct l2cap_chan *chan;
3974 	u8 req[128];
3975 	int err;
3976 
3977 	if (cmd_len < sizeof(*rsp))
3978 		return -EPROTO;
3979 
3980 	scid   = __le16_to_cpu(rsp->scid);
3981 	dcid   = __le16_to_cpu(rsp->dcid);
3982 	result = __le16_to_cpu(rsp->result);
3983 	status = __le16_to_cpu(rsp->status);
3984 
3985 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3986 	       dcid, scid, result, status);
3987 
3988 	mutex_lock(&conn->chan_lock);
3989 
3990 	if (scid) {
3991 		chan = __l2cap_get_chan_by_scid(conn, scid);
3992 		if (!chan) {
3993 			err = -EBADSLT;
3994 			goto unlock;
3995 		}
3996 	} else {
3997 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3998 		if (!chan) {
3999 			err = -EBADSLT;
4000 			goto unlock;
4001 		}
4002 	}
4003 
4004 	err = 0;
4005 
4006 	l2cap_chan_lock(chan);
4007 
4008 	switch (result) {
4009 	case L2CAP_CR_SUCCESS:
4010 		l2cap_state_change(chan, BT_CONFIG);
4011 		chan->ident = 0;
4012 		chan->dcid = dcid;
4013 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4014 
4015 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4016 			break;
4017 
4018 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4019 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4020 		chan->num_conf_req++;
4021 		break;
4022 
4023 	case L2CAP_CR_PEND:
4024 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4025 		break;
4026 
4027 	default:
4028 		l2cap_chan_del(chan, ECONNREFUSED);
4029 		break;
4030 	}
4031 
4032 	l2cap_chan_unlock(chan);
4033 
4034 unlock:
4035 	mutex_unlock(&conn->chan_lock);
4036 
4037 	return err;
4038 }
4039 
4040 static inline void set_default_fcs(struct l2cap_chan *chan)
4041 {
4042 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4043 	 * sides request it.
4044 	 */
4045 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4046 		chan->fcs = L2CAP_FCS_NONE;
4047 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4048 		chan->fcs = L2CAP_FCS_CRC16;
4049 }
4050 
4051 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4052 				    u8 ident, u16 flags)
4053 {
4054 	struct l2cap_conn *conn = chan->conn;
4055 
4056 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4057 	       flags);
4058 
4059 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4060 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4061 
4062 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4063 		       l2cap_build_conf_rsp(chan, data,
4064 					    L2CAP_CONF_SUCCESS, flags), data);
4065 }
4066 
4067 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4068 				   u16 scid, u16 dcid)
4069 {
4070 	struct l2cap_cmd_rej_cid rej;
4071 
4072 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4073 	rej.scid = __cpu_to_le16(scid);
4074 	rej.dcid = __cpu_to_le16(dcid);
4075 
4076 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4077 }
4078 
4079 static inline int l2cap_config_req(struct l2cap_conn *conn,
4080 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4081 				   u8 *data)
4082 {
4083 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4084 	u16 dcid, flags;
4085 	u8 rsp[64];
4086 	struct l2cap_chan *chan;
4087 	int len, err = 0;
4088 
4089 	if (cmd_len < sizeof(*req))
4090 		return -EPROTO;
4091 
4092 	dcid  = __le16_to_cpu(req->dcid);
4093 	flags = __le16_to_cpu(req->flags);
4094 
4095 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4096 
4097 	chan = l2cap_get_chan_by_scid(conn, dcid);
4098 	if (!chan) {
4099 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4100 		return 0;
4101 	}
4102 
4103 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4104 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4105 				       chan->dcid);
4106 		goto unlock;
4107 	}
4108 
4109 	/* Reject if config buffer is too small. */
4110 	len = cmd_len - sizeof(*req);
4111 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4112 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4113 			       l2cap_build_conf_rsp(chan, rsp,
4114 			       L2CAP_CONF_REJECT, flags), rsp);
4115 		goto unlock;
4116 	}
4117 
4118 	/* Store config. */
4119 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4120 	chan->conf_len += len;
4121 
4122 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4123 		/* Incomplete config. Send empty response. */
4124 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4125 			       l2cap_build_conf_rsp(chan, rsp,
4126 			       L2CAP_CONF_SUCCESS, flags), rsp);
4127 		goto unlock;
4128 	}
4129 
4130 	/* Complete config. */
4131 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4132 	if (len < 0) {
4133 		l2cap_send_disconn_req(chan, ECONNRESET);
4134 		goto unlock;
4135 	}
4136 
4137 	chan->ident = cmd->ident;
4138 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4139 	chan->num_conf_rsp++;
4140 
4141 	/* Reset config buffer. */
4142 	chan->conf_len = 0;
4143 
4144 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4145 		goto unlock;
4146 
4147 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4148 		set_default_fcs(chan);
4149 
4150 		if (chan->mode == L2CAP_MODE_ERTM ||
4151 		    chan->mode == L2CAP_MODE_STREAMING)
4152 			err = l2cap_ertm_init(chan);
4153 
4154 		if (err < 0)
4155 			l2cap_send_disconn_req(chan, -err);
4156 		else
4157 			l2cap_chan_ready(chan);
4158 
4159 		goto unlock;
4160 	}
4161 
4162 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4163 		u8 buf[64];
4164 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4165 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4166 		chan->num_conf_req++;
4167 	}
4168 
4169 	/* Got Conf Rsp PENDING from remote side and assume we sent
4170 	   Conf Rsp PENDING in the code above */
4171 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4172 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4173 
4174 		/* check compatibility */
4175 
4176 		/* Send rsp for BR/EDR channel */
4177 		if (!chan->hs_hcon)
4178 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4179 		else
4180 			chan->ident = cmd->ident;
4181 	}
4182 
4183 unlock:
4184 	l2cap_chan_unlock(chan);
4185 	return err;
4186 }
4187 
4188 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4189 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4190 				   u8 *data)
4191 {
4192 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4193 	u16 scid, flags, result;
4194 	struct l2cap_chan *chan;
4195 	int len = cmd_len - sizeof(*rsp);
4196 	int err = 0;
4197 
4198 	if (cmd_len < sizeof(*rsp))
4199 		return -EPROTO;
4200 
4201 	scid   = __le16_to_cpu(rsp->scid);
4202 	flags  = __le16_to_cpu(rsp->flags);
4203 	result = __le16_to_cpu(rsp->result);
4204 
4205 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4206 	       result, len);
4207 
4208 	chan = l2cap_get_chan_by_scid(conn, scid);
4209 	if (!chan)
4210 		return 0;
4211 
4212 	switch (result) {
4213 	case L2CAP_CONF_SUCCESS:
4214 		l2cap_conf_rfc_get(chan, rsp->data, len);
4215 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4216 		break;
4217 
4218 	case L2CAP_CONF_PENDING:
4219 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4220 
4221 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4222 			char buf[64];
4223 
4224 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4225 						   buf, sizeof(buf), &result);
4226 			if (len < 0) {
4227 				l2cap_send_disconn_req(chan, ECONNRESET);
4228 				goto done;
4229 			}
4230 
4231 			if (!chan->hs_hcon) {
4232 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4233 							0);
4234 			} else {
4235 				if (l2cap_check_efs(chan)) {
4236 					amp_create_logical_link(chan);
4237 					chan->ident = cmd->ident;
4238 				}
4239 			}
4240 		}
4241 		goto done;
4242 
4243 	case L2CAP_CONF_UNACCEPT:
4244 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4245 			char req[64];
4246 
4247 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4248 				l2cap_send_disconn_req(chan, ECONNRESET);
4249 				goto done;
4250 			}
4251 
4252 			/* throw out any old stored conf requests */
4253 			result = L2CAP_CONF_SUCCESS;
4254 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4255 						   req, sizeof(req), &result);
4256 			if (len < 0) {
4257 				l2cap_send_disconn_req(chan, ECONNRESET);
4258 				goto done;
4259 			}
4260 
4261 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4262 				       L2CAP_CONF_REQ, len, req);
4263 			chan->num_conf_req++;
4264 			if (result != L2CAP_CONF_SUCCESS)
4265 				goto done;
4266 			break;
4267 		}
4268 		/* fall through */
4269 
4270 	default:
4271 		l2cap_chan_set_err(chan, ECONNRESET);
4272 
4273 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4274 		l2cap_send_disconn_req(chan, ECONNRESET);
4275 		goto done;
4276 	}
4277 
4278 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4279 		goto done;
4280 
4281 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4282 
4283 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4284 		set_default_fcs(chan);
4285 
4286 		if (chan->mode == L2CAP_MODE_ERTM ||
4287 		    chan->mode == L2CAP_MODE_STREAMING)
4288 			err = l2cap_ertm_init(chan);
4289 
4290 		if (err < 0)
4291 			l2cap_send_disconn_req(chan, -err);
4292 		else
4293 			l2cap_chan_ready(chan);
4294 	}
4295 
4296 done:
4297 	l2cap_chan_unlock(chan);
4298 	return err;
4299 }
4300 
4301 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4302 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4303 				       u8 *data)
4304 {
4305 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4306 	struct l2cap_disconn_rsp rsp;
4307 	u16 dcid, scid;
4308 	struct l2cap_chan *chan;
4309 
4310 	if (cmd_len != sizeof(*req))
4311 		return -EPROTO;
4312 
4313 	scid = __le16_to_cpu(req->scid);
4314 	dcid = __le16_to_cpu(req->dcid);
4315 
4316 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4317 
4318 	mutex_lock(&conn->chan_lock);
4319 
4320 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4321 	if (!chan) {
4322 		mutex_unlock(&conn->chan_lock);
4323 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4324 		return 0;
4325 	}
4326 
4327 	l2cap_chan_lock(chan);
4328 
4329 	rsp.dcid = cpu_to_le16(chan->scid);
4330 	rsp.scid = cpu_to_le16(chan->dcid);
4331 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4332 
4333 	chan->ops->set_shutdown(chan);
4334 
4335 	l2cap_chan_hold(chan);
4336 	l2cap_chan_del(chan, ECONNRESET);
4337 
4338 	l2cap_chan_unlock(chan);
4339 
4340 	chan->ops->close(chan);
4341 	l2cap_chan_put(chan);
4342 
4343 	mutex_unlock(&conn->chan_lock);
4344 
4345 	return 0;
4346 }
4347 
4348 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4349 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4350 				       u8 *data)
4351 {
4352 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4353 	u16 dcid, scid;
4354 	struct l2cap_chan *chan;
4355 
4356 	if (cmd_len != sizeof(*rsp))
4357 		return -EPROTO;
4358 
4359 	scid = __le16_to_cpu(rsp->scid);
4360 	dcid = __le16_to_cpu(rsp->dcid);
4361 
4362 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4363 
4364 	mutex_lock(&conn->chan_lock);
4365 
4366 	chan = __l2cap_get_chan_by_scid(conn, scid);
4367 	if (!chan) {
4368 		mutex_unlock(&conn->chan_lock);
4369 		return 0;
4370 	}
4371 
4372 	l2cap_chan_lock(chan);
4373 
4374 	l2cap_chan_hold(chan);
4375 	l2cap_chan_del(chan, 0);
4376 
4377 	l2cap_chan_unlock(chan);
4378 
4379 	chan->ops->close(chan);
4380 	l2cap_chan_put(chan);
4381 
4382 	mutex_unlock(&conn->chan_lock);
4383 
4384 	return 0;
4385 }
4386 
4387 static inline int l2cap_information_req(struct l2cap_conn *conn,
4388 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4389 					u8 *data)
4390 {
4391 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4392 	u16 type;
4393 
4394 	if (cmd_len != sizeof(*req))
4395 		return -EPROTO;
4396 
4397 	type = __le16_to_cpu(req->type);
4398 
4399 	BT_DBG("type 0x%4.4x", type);
4400 
4401 	if (type == L2CAP_IT_FEAT_MASK) {
4402 		u8 buf[8];
4403 		u32 feat_mask = l2cap_feat_mask;
4404 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4405 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4406 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4407 		if (!disable_ertm)
4408 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4409 				| L2CAP_FEAT_FCS;
4410 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4411 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4412 				| L2CAP_FEAT_EXT_WINDOW;
4413 
4414 		put_unaligned_le32(feat_mask, rsp->data);
4415 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4416 			       buf);
4417 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4418 		u8 buf[12];
4419 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4420 
4421 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4422 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4423 		rsp->data[0] = conn->local_fixed_chan;
4424 		memset(rsp->data + 1, 0, 7);
4425 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4426 			       buf);
4427 	} else {
4428 		struct l2cap_info_rsp rsp;
4429 		rsp.type   = cpu_to_le16(type);
4430 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4431 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4432 			       &rsp);
4433 	}
4434 
4435 	return 0;
4436 }
4437 
4438 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4439 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4440 					u8 *data)
4441 {
4442 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4443 	u16 type, result;
4444 
4445 	if (cmd_len < sizeof(*rsp))
4446 		return -EPROTO;
4447 
4448 	type   = __le16_to_cpu(rsp->type);
4449 	result = __le16_to_cpu(rsp->result);
4450 
4451 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4452 
4453 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4454 	if (cmd->ident != conn->info_ident ||
4455 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4456 		return 0;
4457 
4458 	cancel_delayed_work(&conn->info_timer);
4459 
4460 	if (result != L2CAP_IR_SUCCESS) {
4461 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4462 		conn->info_ident = 0;
4463 
4464 		l2cap_conn_start(conn);
4465 
4466 		return 0;
4467 	}
4468 
4469 	switch (type) {
4470 	case L2CAP_IT_FEAT_MASK:
4471 		conn->feat_mask = get_unaligned_le32(rsp->data);
4472 
4473 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4474 			struct l2cap_info_req req;
4475 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4476 
4477 			conn->info_ident = l2cap_get_ident(conn);
4478 
4479 			l2cap_send_cmd(conn, conn->info_ident,
4480 				       L2CAP_INFO_REQ, sizeof(req), &req);
4481 		} else {
4482 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4483 			conn->info_ident = 0;
4484 
4485 			l2cap_conn_start(conn);
4486 		}
4487 		break;
4488 
4489 	case L2CAP_IT_FIXED_CHAN:
4490 		conn->remote_fixed_chan = rsp->data[0];
4491 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4492 		conn->info_ident = 0;
4493 
4494 		l2cap_conn_start(conn);
4495 		break;
4496 	}
4497 
4498 	return 0;
4499 }
4500 
4501 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4502 				    struct l2cap_cmd_hdr *cmd,
4503 				    u16 cmd_len, void *data)
4504 {
4505 	struct l2cap_create_chan_req *req = data;
4506 	struct l2cap_create_chan_rsp rsp;
4507 	struct l2cap_chan *chan;
4508 	struct hci_dev *hdev;
4509 	u16 psm, scid;
4510 
4511 	if (cmd_len != sizeof(*req))
4512 		return -EPROTO;
4513 
4514 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4515 		return -EINVAL;
4516 
4517 	psm = le16_to_cpu(req->psm);
4518 	scid = le16_to_cpu(req->scid);
4519 
4520 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4521 
4522 	/* For controller id 0 make BR/EDR connection */
4523 	if (req->amp_id == AMP_ID_BREDR) {
4524 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4525 			      req->amp_id);
4526 		return 0;
4527 	}
4528 
4529 	/* Validate AMP controller id */
4530 	hdev = hci_dev_get(req->amp_id);
4531 	if (!hdev)
4532 		goto error;
4533 
4534 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4535 		hci_dev_put(hdev);
4536 		goto error;
4537 	}
4538 
4539 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4540 			     req->amp_id);
4541 	if (chan) {
4542 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4543 		struct hci_conn *hs_hcon;
4544 
4545 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4546 						  &conn->hcon->dst);
4547 		if (!hs_hcon) {
4548 			hci_dev_put(hdev);
4549 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4550 					       chan->dcid);
4551 			return 0;
4552 		}
4553 
4554 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4555 
4556 		mgr->bredr_chan = chan;
4557 		chan->hs_hcon = hs_hcon;
4558 		chan->fcs = L2CAP_FCS_NONE;
4559 		conn->mtu = hdev->block_mtu;
4560 	}
4561 
4562 	hci_dev_put(hdev);
4563 
4564 	return 0;
4565 
4566 error:
4567 	rsp.dcid = 0;
4568 	rsp.scid = cpu_to_le16(scid);
4569 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4570 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4571 
4572 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4573 		       sizeof(rsp), &rsp);
4574 
4575 	return 0;
4576 }
4577 
4578 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4579 {
4580 	struct l2cap_move_chan_req req;
4581 	u8 ident;
4582 
4583 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4584 
4585 	ident = l2cap_get_ident(chan->conn);
4586 	chan->ident = ident;
4587 
4588 	req.icid = cpu_to_le16(chan->scid);
4589 	req.dest_amp_id = dest_amp_id;
4590 
4591 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4592 		       &req);
4593 
4594 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4595 }
4596 
4597 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4598 {
4599 	struct l2cap_move_chan_rsp rsp;
4600 
4601 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4602 
4603 	rsp.icid = cpu_to_le16(chan->dcid);
4604 	rsp.result = cpu_to_le16(result);
4605 
4606 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4607 		       sizeof(rsp), &rsp);
4608 }
4609 
4610 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4611 {
4612 	struct l2cap_move_chan_cfm cfm;
4613 
4614 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4615 
4616 	chan->ident = l2cap_get_ident(chan->conn);
4617 
4618 	cfm.icid = cpu_to_le16(chan->scid);
4619 	cfm.result = cpu_to_le16(result);
4620 
4621 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4622 		       sizeof(cfm), &cfm);
4623 
4624 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4625 }
4626 
4627 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4628 {
4629 	struct l2cap_move_chan_cfm cfm;
4630 
4631 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4632 
4633 	cfm.icid = cpu_to_le16(icid);
4634 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4635 
4636 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4637 		       sizeof(cfm), &cfm);
4638 }
4639 
4640 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4641 					 u16 icid)
4642 {
4643 	struct l2cap_move_chan_cfm_rsp rsp;
4644 
4645 	BT_DBG("icid 0x%4.4x", icid);
4646 
4647 	rsp.icid = cpu_to_le16(icid);
4648 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4649 }
4650 
4651 static void __release_logical_link(struct l2cap_chan *chan)
4652 {
4653 	chan->hs_hchan = NULL;
4654 	chan->hs_hcon = NULL;
4655 
4656 	/* Placeholder - release the logical link */
4657 }
4658 
4659 static void l2cap_logical_fail(struct l2cap_chan *chan)
4660 {
4661 	/* Logical link setup failed */
4662 	if (chan->state != BT_CONNECTED) {
4663 		/* Create channel failure, disconnect */
4664 		l2cap_send_disconn_req(chan, ECONNRESET);
4665 		return;
4666 	}
4667 
4668 	switch (chan->move_role) {
4669 	case L2CAP_MOVE_ROLE_RESPONDER:
4670 		l2cap_move_done(chan);
4671 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4672 		break;
4673 	case L2CAP_MOVE_ROLE_INITIATOR:
4674 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4675 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4676 			/* Remote has only sent pending or
4677 			 * success responses, clean up
4678 			 */
4679 			l2cap_move_done(chan);
4680 		}
4681 
4682 		/* Other amp move states imply that the move
4683 		 * has already aborted
4684 		 */
4685 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4686 		break;
4687 	}
4688 }
4689 
4690 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4691 					struct hci_chan *hchan)
4692 {
4693 	struct l2cap_conf_rsp rsp;
4694 
4695 	chan->hs_hchan = hchan;
4696 	chan->hs_hcon->l2cap_data = chan->conn;
4697 
4698 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4699 
4700 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4701 		int err;
4702 
4703 		set_default_fcs(chan);
4704 
4705 		err = l2cap_ertm_init(chan);
4706 		if (err < 0)
4707 			l2cap_send_disconn_req(chan, -err);
4708 		else
4709 			l2cap_chan_ready(chan);
4710 	}
4711 }
4712 
4713 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4714 				      struct hci_chan *hchan)
4715 {
4716 	chan->hs_hcon = hchan->conn;
4717 	chan->hs_hcon->l2cap_data = chan->conn;
4718 
4719 	BT_DBG("move_state %d", chan->move_state);
4720 
4721 	switch (chan->move_state) {
4722 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4723 		/* Move confirm will be sent after a success
4724 		 * response is received
4725 		 */
4726 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4727 		break;
4728 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4729 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4730 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4731 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4732 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4733 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4734 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4735 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4736 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4737 		}
4738 		break;
4739 	default:
4740 		/* Move was not in expected state, free the channel */
4741 		__release_logical_link(chan);
4742 
4743 		chan->move_state = L2CAP_MOVE_STABLE;
4744 	}
4745 }
4746 
4747 /* Call with chan locked */
4748 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4749 		       u8 status)
4750 {
4751 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4752 
4753 	if (status) {
4754 		l2cap_logical_fail(chan);
4755 		__release_logical_link(chan);
4756 		return;
4757 	}
4758 
4759 	if (chan->state != BT_CONNECTED) {
4760 		/* Ignore logical link if channel is on BR/EDR */
4761 		if (chan->local_amp_id != AMP_ID_BREDR)
4762 			l2cap_logical_finish_create(chan, hchan);
4763 	} else {
4764 		l2cap_logical_finish_move(chan, hchan);
4765 	}
4766 }
4767 
4768 void l2cap_move_start(struct l2cap_chan *chan)
4769 {
4770 	BT_DBG("chan %p", chan);
4771 
4772 	if (chan->local_amp_id == AMP_ID_BREDR) {
4773 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4774 			return;
4775 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4776 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4777 		/* Placeholder - start physical link setup */
4778 	} else {
4779 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4780 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4781 		chan->move_id = 0;
4782 		l2cap_move_setup(chan);
4783 		l2cap_send_move_chan_req(chan, 0);
4784 	}
4785 }
4786 
4787 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4788 			    u8 local_amp_id, u8 remote_amp_id)
4789 {
4790 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4791 	       local_amp_id, remote_amp_id);
4792 
4793 	chan->fcs = L2CAP_FCS_NONE;
4794 
4795 	/* Outgoing channel on AMP */
4796 	if (chan->state == BT_CONNECT) {
4797 		if (result == L2CAP_CR_SUCCESS) {
4798 			chan->local_amp_id = local_amp_id;
4799 			l2cap_send_create_chan_req(chan, remote_amp_id);
4800 		} else {
4801 			/* Revert to BR/EDR connect */
4802 			l2cap_send_conn_req(chan);
4803 		}
4804 
4805 		return;
4806 	}
4807 
4808 	/* Incoming channel on AMP */
4809 	if (__l2cap_no_conn_pending(chan)) {
4810 		struct l2cap_conn_rsp rsp;
4811 		char buf[128];
4812 		rsp.scid = cpu_to_le16(chan->dcid);
4813 		rsp.dcid = cpu_to_le16(chan->scid);
4814 
4815 		if (result == L2CAP_CR_SUCCESS) {
4816 			/* Send successful response */
4817 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4818 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4819 		} else {
4820 			/* Send negative response */
4821 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4822 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4823 		}
4824 
4825 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4826 			       sizeof(rsp), &rsp);
4827 
4828 		if (result == L2CAP_CR_SUCCESS) {
4829 			l2cap_state_change(chan, BT_CONFIG);
4830 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4831 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4832 				       L2CAP_CONF_REQ,
4833 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4834 			chan->num_conf_req++;
4835 		}
4836 	}
4837 }
4838 
4839 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4840 				   u8 remote_amp_id)
4841 {
4842 	l2cap_move_setup(chan);
4843 	chan->move_id = local_amp_id;
4844 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4845 
4846 	l2cap_send_move_chan_req(chan, remote_amp_id);
4847 }
4848 
4849 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4850 {
4851 	struct hci_chan *hchan = NULL;
4852 
4853 	/* Placeholder - get hci_chan for logical link */
4854 
4855 	if (hchan) {
4856 		if (hchan->state == BT_CONNECTED) {
4857 			/* Logical link is ready to go */
4858 			chan->hs_hcon = hchan->conn;
4859 			chan->hs_hcon->l2cap_data = chan->conn;
4860 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4861 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4862 
4863 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4864 		} else {
4865 			/* Wait for logical link to be ready */
4866 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4867 		}
4868 	} else {
4869 		/* Logical link not available */
4870 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4871 	}
4872 }
4873 
4874 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4875 {
4876 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4877 		u8 rsp_result;
4878 		if (result == -EINVAL)
4879 			rsp_result = L2CAP_MR_BAD_ID;
4880 		else
4881 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4882 
4883 		l2cap_send_move_chan_rsp(chan, rsp_result);
4884 	}
4885 
4886 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4887 	chan->move_state = L2CAP_MOVE_STABLE;
4888 
4889 	/* Restart data transmission */
4890 	l2cap_ertm_send(chan);
4891 }
4892 
4893 /* Invoke with locked chan */
4894 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4895 {
4896 	u8 local_amp_id = chan->local_amp_id;
4897 	u8 remote_amp_id = chan->remote_amp_id;
4898 
4899 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4900 	       chan, result, local_amp_id, remote_amp_id);
4901 
4902 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4903 		l2cap_chan_unlock(chan);
4904 		return;
4905 	}
4906 
4907 	if (chan->state != BT_CONNECTED) {
4908 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4909 	} else if (result != L2CAP_MR_SUCCESS) {
4910 		l2cap_do_move_cancel(chan, result);
4911 	} else {
4912 		switch (chan->move_role) {
4913 		case L2CAP_MOVE_ROLE_INITIATOR:
4914 			l2cap_do_move_initiate(chan, local_amp_id,
4915 					       remote_amp_id);
4916 			break;
4917 		case L2CAP_MOVE_ROLE_RESPONDER:
4918 			l2cap_do_move_respond(chan, result);
4919 			break;
4920 		default:
4921 			l2cap_do_move_cancel(chan, result);
4922 			break;
4923 		}
4924 	}
4925 }
4926 
4927 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4928 					 struct l2cap_cmd_hdr *cmd,
4929 					 u16 cmd_len, void *data)
4930 {
4931 	struct l2cap_move_chan_req *req = data;
4932 	struct l2cap_move_chan_rsp rsp;
4933 	struct l2cap_chan *chan;
4934 	u16 icid = 0;
4935 	u16 result = L2CAP_MR_NOT_ALLOWED;
4936 
4937 	if (cmd_len != sizeof(*req))
4938 		return -EPROTO;
4939 
4940 	icid = le16_to_cpu(req->icid);
4941 
4942 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4943 
4944 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4945 		return -EINVAL;
4946 
4947 	chan = l2cap_get_chan_by_dcid(conn, icid);
4948 	if (!chan) {
4949 		rsp.icid = cpu_to_le16(icid);
4950 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4951 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4952 			       sizeof(rsp), &rsp);
4953 		return 0;
4954 	}
4955 
4956 	chan->ident = cmd->ident;
4957 
4958 	if (chan->scid < L2CAP_CID_DYN_START ||
4959 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4960 	    (chan->mode != L2CAP_MODE_ERTM &&
4961 	     chan->mode != L2CAP_MODE_STREAMING)) {
4962 		result = L2CAP_MR_NOT_ALLOWED;
4963 		goto send_move_response;
4964 	}
4965 
4966 	if (chan->local_amp_id == req->dest_amp_id) {
4967 		result = L2CAP_MR_SAME_ID;
4968 		goto send_move_response;
4969 	}
4970 
4971 	if (req->dest_amp_id != AMP_ID_BREDR) {
4972 		struct hci_dev *hdev;
4973 		hdev = hci_dev_get(req->dest_amp_id);
4974 		if (!hdev || hdev->dev_type != HCI_AMP ||
4975 		    !test_bit(HCI_UP, &hdev->flags)) {
4976 			if (hdev)
4977 				hci_dev_put(hdev);
4978 
4979 			result = L2CAP_MR_BAD_ID;
4980 			goto send_move_response;
4981 		}
4982 		hci_dev_put(hdev);
4983 	}
4984 
4985 	/* Detect a move collision.  Only send a collision response
4986 	 * if this side has "lost", otherwise proceed with the move.
4987 	 * The winner has the larger bd_addr.
4988 	 */
4989 	if ((__chan_is_moving(chan) ||
4990 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4991 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4992 		result = L2CAP_MR_COLLISION;
4993 		goto send_move_response;
4994 	}
4995 
4996 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4997 	l2cap_move_setup(chan);
4998 	chan->move_id = req->dest_amp_id;
4999 	icid = chan->dcid;
5000 
5001 	if (req->dest_amp_id == AMP_ID_BREDR) {
5002 		/* Moving to BR/EDR */
5003 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5004 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5005 			result = L2CAP_MR_PEND;
5006 		} else {
5007 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5008 			result = L2CAP_MR_SUCCESS;
5009 		}
5010 	} else {
5011 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5012 		/* Placeholder - uncomment when amp functions are available */
5013 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5014 		result = L2CAP_MR_PEND;
5015 	}
5016 
5017 send_move_response:
5018 	l2cap_send_move_chan_rsp(chan, result);
5019 
5020 	l2cap_chan_unlock(chan);
5021 
5022 	return 0;
5023 }
5024 
5025 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5026 {
5027 	struct l2cap_chan *chan;
5028 	struct hci_chan *hchan = NULL;
5029 
5030 	chan = l2cap_get_chan_by_scid(conn, icid);
5031 	if (!chan) {
5032 		l2cap_send_move_chan_cfm_icid(conn, icid);
5033 		return;
5034 	}
5035 
5036 	__clear_chan_timer(chan);
5037 	if (result == L2CAP_MR_PEND)
5038 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5039 
5040 	switch (chan->move_state) {
5041 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5042 		/* Move confirm will be sent when logical link
5043 		 * is complete.
5044 		 */
5045 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5046 		break;
5047 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5048 		if (result == L2CAP_MR_PEND) {
5049 			break;
5050 		} else if (test_bit(CONN_LOCAL_BUSY,
5051 				    &chan->conn_state)) {
5052 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5053 		} else {
5054 			/* Logical link is up or moving to BR/EDR,
5055 			 * proceed with move
5056 			 */
5057 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5058 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5059 		}
5060 		break;
5061 	case L2CAP_MOVE_WAIT_RSP:
5062 		/* Moving to AMP */
5063 		if (result == L2CAP_MR_SUCCESS) {
5064 			/* Remote is ready, send confirm immediately
5065 			 * after logical link is ready
5066 			 */
5067 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5068 		} else {
5069 			/* Both logical link and move success
5070 			 * are required to confirm
5071 			 */
5072 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5073 		}
5074 
5075 		/* Placeholder - get hci_chan for logical link */
5076 		if (!hchan) {
5077 			/* Logical link not available */
5078 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5079 			break;
5080 		}
5081 
5082 		/* If the logical link is not yet connected, do not
5083 		 * send confirmation.
5084 		 */
5085 		if (hchan->state != BT_CONNECTED)
5086 			break;
5087 
5088 		/* Logical link is already ready to go */
5089 
5090 		chan->hs_hcon = hchan->conn;
5091 		chan->hs_hcon->l2cap_data = chan->conn;
5092 
5093 		if (result == L2CAP_MR_SUCCESS) {
5094 			/* Can confirm now */
5095 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5096 		} else {
5097 			/* Now only need move success
5098 			 * to confirm
5099 			 */
5100 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5101 		}
5102 
5103 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5104 		break;
5105 	default:
5106 		/* Any other amp move state means the move failed. */
5107 		chan->move_id = chan->local_amp_id;
5108 		l2cap_move_done(chan);
5109 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5110 	}
5111 
5112 	l2cap_chan_unlock(chan);
5113 }
5114 
5115 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5116 			    u16 result)
5117 {
5118 	struct l2cap_chan *chan;
5119 
5120 	chan = l2cap_get_chan_by_ident(conn, ident);
5121 	if (!chan) {
5122 		/* Could not locate channel, icid is best guess */
5123 		l2cap_send_move_chan_cfm_icid(conn, icid);
5124 		return;
5125 	}
5126 
5127 	__clear_chan_timer(chan);
5128 
5129 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5130 		if (result == L2CAP_MR_COLLISION) {
5131 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5132 		} else {
5133 			/* Cleanup - cancel move */
5134 			chan->move_id = chan->local_amp_id;
5135 			l2cap_move_done(chan);
5136 		}
5137 	}
5138 
5139 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5140 
5141 	l2cap_chan_unlock(chan);
5142 }
5143 
5144 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5145 				  struct l2cap_cmd_hdr *cmd,
5146 				  u16 cmd_len, void *data)
5147 {
5148 	struct l2cap_move_chan_rsp *rsp = data;
5149 	u16 icid, result;
5150 
5151 	if (cmd_len != sizeof(*rsp))
5152 		return -EPROTO;
5153 
5154 	icid = le16_to_cpu(rsp->icid);
5155 	result = le16_to_cpu(rsp->result);
5156 
5157 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5158 
5159 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5160 		l2cap_move_continue(conn, icid, result);
5161 	else
5162 		l2cap_move_fail(conn, cmd->ident, icid, result);
5163 
5164 	return 0;
5165 }
5166 
5167 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5168 				      struct l2cap_cmd_hdr *cmd,
5169 				      u16 cmd_len, void *data)
5170 {
5171 	struct l2cap_move_chan_cfm *cfm = data;
5172 	struct l2cap_chan *chan;
5173 	u16 icid, result;
5174 
5175 	if (cmd_len != sizeof(*cfm))
5176 		return -EPROTO;
5177 
5178 	icid = le16_to_cpu(cfm->icid);
5179 	result = le16_to_cpu(cfm->result);
5180 
5181 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5182 
5183 	chan = l2cap_get_chan_by_dcid(conn, icid);
5184 	if (!chan) {
5185 		/* Spec requires a response even if the icid was not found */
5186 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5187 		return 0;
5188 	}
5189 
5190 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5191 		if (result == L2CAP_MC_CONFIRMED) {
5192 			chan->local_amp_id = chan->move_id;
5193 			if (chan->local_amp_id == AMP_ID_BREDR)
5194 				__release_logical_link(chan);
5195 		} else {
5196 			chan->move_id = chan->local_amp_id;
5197 		}
5198 
5199 		l2cap_move_done(chan);
5200 	}
5201 
5202 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5203 
5204 	l2cap_chan_unlock(chan);
5205 
5206 	return 0;
5207 }
5208 
5209 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5210 						 struct l2cap_cmd_hdr *cmd,
5211 						 u16 cmd_len, void *data)
5212 {
5213 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5214 	struct l2cap_chan *chan;
5215 	u16 icid;
5216 
5217 	if (cmd_len != sizeof(*rsp))
5218 		return -EPROTO;
5219 
5220 	icid = le16_to_cpu(rsp->icid);
5221 
5222 	BT_DBG("icid 0x%4.4x", icid);
5223 
5224 	chan = l2cap_get_chan_by_scid(conn, icid);
5225 	if (!chan)
5226 		return 0;
5227 
5228 	__clear_chan_timer(chan);
5229 
5230 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5231 		chan->local_amp_id = chan->move_id;
5232 
5233 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5234 			__release_logical_link(chan);
5235 
5236 		l2cap_move_done(chan);
5237 	}
5238 
5239 	l2cap_chan_unlock(chan);
5240 
5241 	return 0;
5242 }
5243 
5244 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5245 					      struct l2cap_cmd_hdr *cmd,
5246 					      u16 cmd_len, u8 *data)
5247 {
5248 	struct hci_conn *hcon = conn->hcon;
5249 	struct l2cap_conn_param_update_req *req;
5250 	struct l2cap_conn_param_update_rsp rsp;
5251 	u16 min, max, latency, to_multiplier;
5252 	int err;
5253 
5254 	if (hcon->role != HCI_ROLE_MASTER)
5255 		return -EINVAL;
5256 
5257 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5258 		return -EPROTO;
5259 
5260 	req = (struct l2cap_conn_param_update_req *) data;
5261 	min		= __le16_to_cpu(req->min);
5262 	max		= __le16_to_cpu(req->max);
5263 	latency		= __le16_to_cpu(req->latency);
5264 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5265 
5266 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5267 	       min, max, latency, to_multiplier);
5268 
5269 	memset(&rsp, 0, sizeof(rsp));
5270 
5271 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5272 	if (err)
5273 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5274 	else
5275 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5276 
5277 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5278 		       sizeof(rsp), &rsp);
5279 
5280 	if (!err) {
5281 		u8 store_hint;
5282 
5283 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5284 						to_multiplier);
5285 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5286 				    store_hint, min, max, latency,
5287 				    to_multiplier);
5288 
5289 	}
5290 
5291 	return 0;
5292 }
5293 
5294 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5295 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5296 				u8 *data)
5297 {
5298 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5299 	struct hci_conn *hcon = conn->hcon;
5300 	u16 dcid, mtu, mps, credits, result;
5301 	struct l2cap_chan *chan;
5302 	int err, sec_level;
5303 
5304 	if (cmd_len < sizeof(*rsp))
5305 		return -EPROTO;
5306 
5307 	dcid    = __le16_to_cpu(rsp->dcid);
5308 	mtu     = __le16_to_cpu(rsp->mtu);
5309 	mps     = __le16_to_cpu(rsp->mps);
5310 	credits = __le16_to_cpu(rsp->credits);
5311 	result  = __le16_to_cpu(rsp->result);
5312 
5313 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5314 					   dcid < L2CAP_CID_DYN_START ||
5315 					   dcid > L2CAP_CID_LE_DYN_END))
5316 		return -EPROTO;
5317 
5318 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5319 	       dcid, mtu, mps, credits, result);
5320 
5321 	mutex_lock(&conn->chan_lock);
5322 
5323 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5324 	if (!chan) {
5325 		err = -EBADSLT;
5326 		goto unlock;
5327 	}
5328 
5329 	err = 0;
5330 
5331 	l2cap_chan_lock(chan);
5332 
5333 	switch (result) {
5334 	case L2CAP_CR_LE_SUCCESS:
5335 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5336 			err = -EBADSLT;
5337 			break;
5338 		}
5339 
5340 		chan->ident = 0;
5341 		chan->dcid = dcid;
5342 		chan->omtu = mtu;
5343 		chan->remote_mps = mps;
5344 		chan->tx_credits = credits;
5345 		l2cap_chan_ready(chan);
5346 		break;
5347 
5348 	case L2CAP_CR_LE_AUTHENTICATION:
5349 	case L2CAP_CR_LE_ENCRYPTION:
5350 		/* If we already have MITM protection we can't do
5351 		 * anything.
5352 		 */
5353 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5354 			l2cap_chan_del(chan, ECONNREFUSED);
5355 			break;
5356 		}
5357 
5358 		sec_level = hcon->sec_level + 1;
5359 		if (chan->sec_level < sec_level)
5360 			chan->sec_level = sec_level;
5361 
5362 		/* We'll need to send a new Connect Request */
5363 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5364 
5365 		smp_conn_security(hcon, chan->sec_level);
5366 		break;
5367 
5368 	default:
5369 		l2cap_chan_del(chan, ECONNREFUSED);
5370 		break;
5371 	}
5372 
5373 	l2cap_chan_unlock(chan);
5374 
5375 unlock:
5376 	mutex_unlock(&conn->chan_lock);
5377 
5378 	return err;
5379 }
5380 
5381 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5382 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5383 				      u8 *data)
5384 {
5385 	int err = 0;
5386 
5387 	switch (cmd->code) {
5388 	case L2CAP_COMMAND_REJ:
5389 		l2cap_command_rej(conn, cmd, cmd_len, data);
5390 		break;
5391 
5392 	case L2CAP_CONN_REQ:
5393 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5394 		break;
5395 
5396 	case L2CAP_CONN_RSP:
5397 	case L2CAP_CREATE_CHAN_RSP:
5398 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5399 		break;
5400 
5401 	case L2CAP_CONF_REQ:
5402 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5403 		break;
5404 
5405 	case L2CAP_CONF_RSP:
5406 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5407 		break;
5408 
5409 	case L2CAP_DISCONN_REQ:
5410 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5411 		break;
5412 
5413 	case L2CAP_DISCONN_RSP:
5414 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5415 		break;
5416 
5417 	case L2CAP_ECHO_REQ:
5418 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5419 		break;
5420 
5421 	case L2CAP_ECHO_RSP:
5422 		break;
5423 
5424 	case L2CAP_INFO_REQ:
5425 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5426 		break;
5427 
5428 	case L2CAP_INFO_RSP:
5429 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5430 		break;
5431 
5432 	case L2CAP_CREATE_CHAN_REQ:
5433 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5434 		break;
5435 
5436 	case L2CAP_MOVE_CHAN_REQ:
5437 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5438 		break;
5439 
5440 	case L2CAP_MOVE_CHAN_RSP:
5441 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5442 		break;
5443 
5444 	case L2CAP_MOVE_CHAN_CFM:
5445 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5446 		break;
5447 
5448 	case L2CAP_MOVE_CHAN_CFM_RSP:
5449 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5450 		break;
5451 
5452 	default:
5453 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5454 		err = -EINVAL;
5455 		break;
5456 	}
5457 
5458 	return err;
5459 }
5460 
5461 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5462 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5463 				u8 *data)
5464 {
5465 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5466 	struct l2cap_le_conn_rsp rsp;
5467 	struct l2cap_chan *chan, *pchan;
5468 	u16 dcid, scid, credits, mtu, mps;
5469 	__le16 psm;
5470 	u8 result;
5471 
5472 	if (cmd_len != sizeof(*req))
5473 		return -EPROTO;
5474 
5475 	scid = __le16_to_cpu(req->scid);
5476 	mtu  = __le16_to_cpu(req->mtu);
5477 	mps  = __le16_to_cpu(req->mps);
5478 	psm  = req->psm;
5479 	dcid = 0;
5480 	credits = 0;
5481 
5482 	if (mtu < 23 || mps < 23)
5483 		return -EPROTO;
5484 
5485 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5486 	       scid, mtu, mps);
5487 
5488 	/* Check if we have socket listening on psm */
5489 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5490 					 &conn->hcon->dst, LE_LINK);
5491 	if (!pchan) {
5492 		result = L2CAP_CR_LE_BAD_PSM;
5493 		chan = NULL;
5494 		goto response;
5495 	}
5496 
5497 	mutex_lock(&conn->chan_lock);
5498 	l2cap_chan_lock(pchan);
5499 
5500 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5501 				     SMP_ALLOW_STK)) {
5502 		result = L2CAP_CR_LE_AUTHENTICATION;
5503 		chan = NULL;
5504 		goto response_unlock;
5505 	}
5506 
5507 	/* Check for valid dynamic CID range */
5508 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5509 		result = L2CAP_CR_LE_INVALID_SCID;
5510 		chan = NULL;
5511 		goto response_unlock;
5512 	}
5513 
5514 	/* Check if we already have channel with that dcid */
5515 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5516 		result = L2CAP_CR_LE_SCID_IN_USE;
5517 		chan = NULL;
5518 		goto response_unlock;
5519 	}
5520 
5521 	chan = pchan->ops->new_connection(pchan);
5522 	if (!chan) {
5523 		result = L2CAP_CR_LE_NO_MEM;
5524 		goto response_unlock;
5525 	}
5526 
5527 	bacpy(&chan->src, &conn->hcon->src);
5528 	bacpy(&chan->dst, &conn->hcon->dst);
5529 	chan->src_type = bdaddr_src_type(conn->hcon);
5530 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5531 	chan->psm  = psm;
5532 	chan->dcid = scid;
5533 	chan->omtu = mtu;
5534 	chan->remote_mps = mps;
5535 	chan->tx_credits = __le16_to_cpu(req->credits);
5536 
5537 	__l2cap_chan_add(conn, chan);
5538 
5539 	l2cap_le_flowctl_init(chan);
5540 
5541 	dcid = chan->scid;
5542 	credits = chan->rx_credits;
5543 
5544 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5545 
5546 	chan->ident = cmd->ident;
5547 
5548 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5549 		l2cap_state_change(chan, BT_CONNECT2);
5550 		/* The following result value is actually not defined
5551 		 * for LE CoC but we use it to let the function know
5552 		 * that it should bail out after doing its cleanup
5553 		 * instead of sending a response.
5554 		 */
5555 		result = L2CAP_CR_PEND;
5556 		chan->ops->defer(chan);
5557 	} else {
5558 		l2cap_chan_ready(chan);
5559 		result = L2CAP_CR_LE_SUCCESS;
5560 	}
5561 
5562 response_unlock:
5563 	l2cap_chan_unlock(pchan);
5564 	mutex_unlock(&conn->chan_lock);
5565 	l2cap_chan_put(pchan);
5566 
5567 	if (result == L2CAP_CR_PEND)
5568 		return 0;
5569 
5570 response:
5571 	if (chan) {
5572 		rsp.mtu = cpu_to_le16(chan->imtu);
5573 		rsp.mps = cpu_to_le16(chan->mps);
5574 	} else {
5575 		rsp.mtu = 0;
5576 		rsp.mps = 0;
5577 	}
5578 
5579 	rsp.dcid    = cpu_to_le16(dcid);
5580 	rsp.credits = cpu_to_le16(credits);
5581 	rsp.result  = cpu_to_le16(result);
5582 
5583 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5584 
5585 	return 0;
5586 }
5587 
5588 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5589 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5590 				   u8 *data)
5591 {
5592 	struct l2cap_le_credits *pkt;
5593 	struct l2cap_chan *chan;
5594 	u16 cid, credits, max_credits;
5595 
5596 	if (cmd_len != sizeof(*pkt))
5597 		return -EPROTO;
5598 
5599 	pkt = (struct l2cap_le_credits *) data;
5600 	cid	= __le16_to_cpu(pkt->cid);
5601 	credits	= __le16_to_cpu(pkt->credits);
5602 
5603 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5604 
5605 	chan = l2cap_get_chan_by_dcid(conn, cid);
5606 	if (!chan)
5607 		return -EBADSLT;
5608 
5609 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5610 	if (credits > max_credits) {
5611 		BT_ERR("LE credits overflow");
5612 		l2cap_send_disconn_req(chan, ECONNRESET);
5613 		l2cap_chan_unlock(chan);
5614 
5615 		/* Return 0 so that we don't trigger an unnecessary
5616 		 * command reject packet.
5617 		 */
5618 		return 0;
5619 	}
5620 
5621 	chan->tx_credits += credits;
5622 
5623 	/* Resume sending */
5624 	l2cap_le_flowctl_send(chan);
5625 
5626 	if (chan->tx_credits)
5627 		chan->ops->resume(chan);
5628 
5629 	l2cap_chan_unlock(chan);
5630 
5631 	return 0;
5632 }
5633 
5634 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5635 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5636 				       u8 *data)
5637 {
5638 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5639 	struct l2cap_chan *chan;
5640 
5641 	if (cmd_len < sizeof(*rej))
5642 		return -EPROTO;
5643 
5644 	mutex_lock(&conn->chan_lock);
5645 
5646 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5647 	if (!chan)
5648 		goto done;
5649 
5650 	l2cap_chan_lock(chan);
5651 	l2cap_chan_del(chan, ECONNREFUSED);
5652 	l2cap_chan_unlock(chan);
5653 
5654 done:
5655 	mutex_unlock(&conn->chan_lock);
5656 	return 0;
5657 }
5658 
5659 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5660 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5661 				   u8 *data)
5662 {
5663 	int err = 0;
5664 
5665 	switch (cmd->code) {
5666 	case L2CAP_COMMAND_REJ:
5667 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5668 		break;
5669 
5670 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5671 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5672 		break;
5673 
5674 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5675 		break;
5676 
5677 	case L2CAP_LE_CONN_RSP:
5678 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5679 		break;
5680 
5681 	case L2CAP_LE_CONN_REQ:
5682 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5683 		break;
5684 
5685 	case L2CAP_LE_CREDITS:
5686 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5687 		break;
5688 
5689 	case L2CAP_DISCONN_REQ:
5690 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5691 		break;
5692 
5693 	case L2CAP_DISCONN_RSP:
5694 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5695 		break;
5696 
5697 	default:
5698 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5699 		err = -EINVAL;
5700 		break;
5701 	}
5702 
5703 	return err;
5704 }
5705 
5706 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5707 					struct sk_buff *skb)
5708 {
5709 	struct hci_conn *hcon = conn->hcon;
5710 	struct l2cap_cmd_hdr *cmd;
5711 	u16 len;
5712 	int err;
5713 
5714 	if (hcon->type != LE_LINK)
5715 		goto drop;
5716 
5717 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5718 		goto drop;
5719 
5720 	cmd = (void *) skb->data;
5721 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5722 
5723 	len = le16_to_cpu(cmd->len);
5724 
5725 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5726 
5727 	if (len != skb->len || !cmd->ident) {
5728 		BT_DBG("corrupted command");
5729 		goto drop;
5730 	}
5731 
5732 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5733 	if (err) {
5734 		struct l2cap_cmd_rej_unk rej;
5735 
5736 		BT_ERR("Wrong link type (%d)", err);
5737 
5738 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5739 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5740 			       sizeof(rej), &rej);
5741 	}
5742 
5743 drop:
5744 	kfree_skb(skb);
5745 }
5746 
5747 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5748 				     struct sk_buff *skb)
5749 {
5750 	struct hci_conn *hcon = conn->hcon;
5751 	u8 *data = skb->data;
5752 	int len = skb->len;
5753 	struct l2cap_cmd_hdr cmd;
5754 	int err;
5755 
5756 	l2cap_raw_recv(conn, skb);
5757 
5758 	if (hcon->type != ACL_LINK)
5759 		goto drop;
5760 
5761 	while (len >= L2CAP_CMD_HDR_SIZE) {
5762 		u16 cmd_len;
5763 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5764 		data += L2CAP_CMD_HDR_SIZE;
5765 		len  -= L2CAP_CMD_HDR_SIZE;
5766 
5767 		cmd_len = le16_to_cpu(cmd.len);
5768 
5769 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5770 		       cmd.ident);
5771 
5772 		if (cmd_len > len || !cmd.ident) {
5773 			BT_DBG("corrupted command");
5774 			break;
5775 		}
5776 
5777 		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5778 		if (err) {
5779 			struct l2cap_cmd_rej_unk rej;
5780 
5781 			BT_ERR("Wrong link type (%d)", err);
5782 
5783 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5784 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5785 				       sizeof(rej), &rej);
5786 		}
5787 
5788 		data += cmd_len;
5789 		len  -= cmd_len;
5790 	}
5791 
5792 drop:
5793 	kfree_skb(skb);
5794 }
5795 
5796 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5797 {
5798 	u16 our_fcs, rcv_fcs;
5799 	int hdr_size;
5800 
5801 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5802 		hdr_size = L2CAP_EXT_HDR_SIZE;
5803 	else
5804 		hdr_size = L2CAP_ENH_HDR_SIZE;
5805 
5806 	if (chan->fcs == L2CAP_FCS_CRC16) {
5807 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5808 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5809 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5810 
5811 		if (our_fcs != rcv_fcs)
5812 			return -EBADMSG;
5813 	}
5814 	return 0;
5815 }
5816 
5817 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5818 {
5819 	struct l2cap_ctrl control;
5820 
5821 	BT_DBG("chan %p", chan);
5822 
5823 	memset(&control, 0, sizeof(control));
5824 	control.sframe = 1;
5825 	control.final = 1;
5826 	control.reqseq = chan->buffer_seq;
5827 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5828 
5829 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5830 		control.super = L2CAP_SUPER_RNR;
5831 		l2cap_send_sframe(chan, &control);
5832 	}
5833 
5834 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5835 	    chan->unacked_frames > 0)
5836 		__set_retrans_timer(chan);
5837 
5838 	/* Send pending iframes */
5839 	l2cap_ertm_send(chan);
5840 
5841 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5842 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5843 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5844 		 * send it now.
5845 		 */
5846 		control.super = L2CAP_SUPER_RR;
5847 		l2cap_send_sframe(chan, &control);
5848 	}
5849 }
5850 
5851 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5852 			    struct sk_buff **last_frag)
5853 {
5854 	/* skb->len reflects data in skb as well as all fragments
5855 	 * skb->data_len reflects only data in fragments
5856 	 */
5857 	if (!skb_has_frag_list(skb))
5858 		skb_shinfo(skb)->frag_list = new_frag;
5859 
5860 	new_frag->next = NULL;
5861 
5862 	(*last_frag)->next = new_frag;
5863 	*last_frag = new_frag;
5864 
5865 	skb->len += new_frag->len;
5866 	skb->data_len += new_frag->len;
5867 	skb->truesize += new_frag->truesize;
5868 }
5869 
5870 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5871 				struct l2cap_ctrl *control)
5872 {
5873 	int err = -EINVAL;
5874 
5875 	switch (control->sar) {
5876 	case L2CAP_SAR_UNSEGMENTED:
5877 		if (chan->sdu)
5878 			break;
5879 
5880 		err = chan->ops->recv(chan, skb);
5881 		break;
5882 
5883 	case L2CAP_SAR_START:
5884 		if (chan->sdu)
5885 			break;
5886 
5887 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5888 			break;
5889 
5890 		chan->sdu_len = get_unaligned_le16(skb->data);
5891 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5892 
5893 		if (chan->sdu_len > chan->imtu) {
5894 			err = -EMSGSIZE;
5895 			break;
5896 		}
5897 
5898 		if (skb->len >= chan->sdu_len)
5899 			break;
5900 
5901 		chan->sdu = skb;
5902 		chan->sdu_last_frag = skb;
5903 
5904 		skb = NULL;
5905 		err = 0;
5906 		break;
5907 
5908 	case L2CAP_SAR_CONTINUE:
5909 		if (!chan->sdu)
5910 			break;
5911 
5912 		append_skb_frag(chan->sdu, skb,
5913 				&chan->sdu_last_frag);
5914 		skb = NULL;
5915 
5916 		if (chan->sdu->len >= chan->sdu_len)
5917 			break;
5918 
5919 		err = 0;
5920 		break;
5921 
5922 	case L2CAP_SAR_END:
5923 		if (!chan->sdu)
5924 			break;
5925 
5926 		append_skb_frag(chan->sdu, skb,
5927 				&chan->sdu_last_frag);
5928 		skb = NULL;
5929 
5930 		if (chan->sdu->len != chan->sdu_len)
5931 			break;
5932 
5933 		err = chan->ops->recv(chan, chan->sdu);
5934 
5935 		if (!err) {
5936 			/* Reassembly complete */
5937 			chan->sdu = NULL;
5938 			chan->sdu_last_frag = NULL;
5939 			chan->sdu_len = 0;
5940 		}
5941 		break;
5942 	}
5943 
5944 	if (err) {
5945 		kfree_skb(skb);
5946 		kfree_skb(chan->sdu);
5947 		chan->sdu = NULL;
5948 		chan->sdu_last_frag = NULL;
5949 		chan->sdu_len = 0;
5950 	}
5951 
5952 	return err;
5953 }
5954 
5955 static int l2cap_resegment(struct l2cap_chan *chan)
5956 {
5957 	/* Placeholder */
5958 	return 0;
5959 }
5960 
5961 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5962 {
5963 	u8 event;
5964 
5965 	if (chan->mode != L2CAP_MODE_ERTM)
5966 		return;
5967 
5968 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5969 	l2cap_tx(chan, NULL, NULL, event);
5970 }
5971 
5972 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5973 {
5974 	int err = 0;
5975 	/* Pass sequential frames to l2cap_reassemble_sdu()
5976 	 * until a gap is encountered.
5977 	 */
5978 
5979 	BT_DBG("chan %p", chan);
5980 
5981 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5982 		struct sk_buff *skb;
5983 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5984 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5985 
5986 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5987 
5988 		if (!skb)
5989 			break;
5990 
5991 		skb_unlink(skb, &chan->srej_q);
5992 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5993 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5994 		if (err)
5995 			break;
5996 	}
5997 
5998 	if (skb_queue_empty(&chan->srej_q)) {
5999 		chan->rx_state = L2CAP_RX_STATE_RECV;
6000 		l2cap_send_ack(chan);
6001 	}
6002 
6003 	return err;
6004 }
6005 
6006 static void l2cap_handle_srej(struct l2cap_chan *chan,
6007 			      struct l2cap_ctrl *control)
6008 {
6009 	struct sk_buff *skb;
6010 
6011 	BT_DBG("chan %p, control %p", chan, control);
6012 
6013 	if (control->reqseq == chan->next_tx_seq) {
6014 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6015 		l2cap_send_disconn_req(chan, ECONNRESET);
6016 		return;
6017 	}
6018 
6019 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6020 
6021 	if (skb == NULL) {
6022 		BT_DBG("Seq %d not available for retransmission",
6023 		       control->reqseq);
6024 		return;
6025 	}
6026 
6027 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6028 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6029 		l2cap_send_disconn_req(chan, ECONNRESET);
6030 		return;
6031 	}
6032 
6033 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6034 
6035 	if (control->poll) {
6036 		l2cap_pass_to_tx(chan, control);
6037 
6038 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6039 		l2cap_retransmit(chan, control);
6040 		l2cap_ertm_send(chan);
6041 
6042 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6043 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6044 			chan->srej_save_reqseq = control->reqseq;
6045 		}
6046 	} else {
6047 		l2cap_pass_to_tx_fbit(chan, control);
6048 
6049 		if (control->final) {
6050 			if (chan->srej_save_reqseq != control->reqseq ||
6051 			    !test_and_clear_bit(CONN_SREJ_ACT,
6052 						&chan->conn_state))
6053 				l2cap_retransmit(chan, control);
6054 		} else {
6055 			l2cap_retransmit(chan, control);
6056 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6057 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6058 				chan->srej_save_reqseq = control->reqseq;
6059 			}
6060 		}
6061 	}
6062 }
6063 
6064 static void l2cap_handle_rej(struct l2cap_chan *chan,
6065 			     struct l2cap_ctrl *control)
6066 {
6067 	struct sk_buff *skb;
6068 
6069 	BT_DBG("chan %p, control %p", chan, control);
6070 
6071 	if (control->reqseq == chan->next_tx_seq) {
6072 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6073 		l2cap_send_disconn_req(chan, ECONNRESET);
6074 		return;
6075 	}
6076 
6077 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6078 
6079 	if (chan->max_tx && skb &&
6080 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6081 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6082 		l2cap_send_disconn_req(chan, ECONNRESET);
6083 		return;
6084 	}
6085 
6086 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6087 
6088 	l2cap_pass_to_tx(chan, control);
6089 
6090 	if (control->final) {
6091 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6092 			l2cap_retransmit_all(chan, control);
6093 	} else {
6094 		l2cap_retransmit_all(chan, control);
6095 		l2cap_ertm_send(chan);
6096 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6097 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6098 	}
6099 }
6100 
6101 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6102 {
6103 	BT_DBG("chan %p, txseq %d", chan, txseq);
6104 
6105 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6106 	       chan->expected_tx_seq);
6107 
6108 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6109 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6110 		    chan->tx_win) {
6111 			/* See notes below regarding "double poll" and
6112 			 * invalid packets.
6113 			 */
6114 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6115 				BT_DBG("Invalid/Ignore - after SREJ");
6116 				return L2CAP_TXSEQ_INVALID_IGNORE;
6117 			} else {
6118 				BT_DBG("Invalid - in window after SREJ sent");
6119 				return L2CAP_TXSEQ_INVALID;
6120 			}
6121 		}
6122 
6123 		if (chan->srej_list.head == txseq) {
6124 			BT_DBG("Expected SREJ");
6125 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6126 		}
6127 
6128 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6129 			BT_DBG("Duplicate SREJ - txseq already stored");
6130 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6131 		}
6132 
6133 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6134 			BT_DBG("Unexpected SREJ - not requested");
6135 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6136 		}
6137 	}
6138 
6139 	if (chan->expected_tx_seq == txseq) {
6140 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6141 		    chan->tx_win) {
6142 			BT_DBG("Invalid - txseq outside tx window");
6143 			return L2CAP_TXSEQ_INVALID;
6144 		} else {
6145 			BT_DBG("Expected");
6146 			return L2CAP_TXSEQ_EXPECTED;
6147 		}
6148 	}
6149 
6150 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6151 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6152 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6153 		return L2CAP_TXSEQ_DUPLICATE;
6154 	}
6155 
6156 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6157 		/* A source of invalid packets is a "double poll" condition,
6158 		 * where delays cause us to send multiple poll packets.  If
6159 		 * the remote stack receives and processes both polls,
6160 		 * sequence numbers can wrap around in such a way that a
6161 		 * resent frame has a sequence number that looks like new data
6162 		 * with a sequence gap.  This would trigger an erroneous SREJ
6163 		 * request.
6164 		 *
6165 		 * Fortunately, this is impossible with a tx window that's
6166 		 * less than half of the maximum sequence number, which allows
6167 		 * invalid frames to be safely ignored.
6168 		 *
6169 		 * With tx window sizes greater than half of the tx window
6170 		 * maximum, the frame is invalid and cannot be ignored.  This
6171 		 * causes a disconnect.
6172 		 */
6173 
6174 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6175 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6176 			return L2CAP_TXSEQ_INVALID_IGNORE;
6177 		} else {
6178 			BT_DBG("Invalid - txseq outside tx window");
6179 			return L2CAP_TXSEQ_INVALID;
6180 		}
6181 	} else {
6182 		BT_DBG("Unexpected - txseq indicates missing frames");
6183 		return L2CAP_TXSEQ_UNEXPECTED;
6184 	}
6185 }
6186 
6187 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6188 			       struct l2cap_ctrl *control,
6189 			       struct sk_buff *skb, u8 event)
6190 {
6191 	int err = 0;
6192 	bool skb_in_use = false;
6193 
6194 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6195 	       event);
6196 
6197 	switch (event) {
6198 	case L2CAP_EV_RECV_IFRAME:
6199 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6200 		case L2CAP_TXSEQ_EXPECTED:
6201 			l2cap_pass_to_tx(chan, control);
6202 
6203 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6204 				BT_DBG("Busy, discarding expected seq %d",
6205 				       control->txseq);
6206 				break;
6207 			}
6208 
6209 			chan->expected_tx_seq = __next_seq(chan,
6210 							   control->txseq);
6211 
6212 			chan->buffer_seq = chan->expected_tx_seq;
6213 			skb_in_use = true;
6214 
6215 			err = l2cap_reassemble_sdu(chan, skb, control);
6216 			if (err)
6217 				break;
6218 
6219 			if (control->final) {
6220 				if (!test_and_clear_bit(CONN_REJ_ACT,
6221 							&chan->conn_state)) {
6222 					control->final = 0;
6223 					l2cap_retransmit_all(chan, control);
6224 					l2cap_ertm_send(chan);
6225 				}
6226 			}
6227 
6228 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6229 				l2cap_send_ack(chan);
6230 			break;
6231 		case L2CAP_TXSEQ_UNEXPECTED:
6232 			l2cap_pass_to_tx(chan, control);
6233 
6234 			/* Can't issue SREJ frames in the local busy state.
6235 			 * Drop this frame, it will be seen as missing
6236 			 * when local busy is exited.
6237 			 */
6238 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6239 				BT_DBG("Busy, discarding unexpected seq %d",
6240 				       control->txseq);
6241 				break;
6242 			}
6243 
6244 			/* There was a gap in the sequence, so an SREJ
6245 			 * must be sent for each missing frame.  The
6246 			 * current frame is stored for later use.
6247 			 */
6248 			skb_queue_tail(&chan->srej_q, skb);
6249 			skb_in_use = true;
6250 			BT_DBG("Queued %p (queue len %d)", skb,
6251 			       skb_queue_len(&chan->srej_q));
6252 
6253 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6254 			l2cap_seq_list_clear(&chan->srej_list);
6255 			l2cap_send_srej(chan, control->txseq);
6256 
6257 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6258 			break;
6259 		case L2CAP_TXSEQ_DUPLICATE:
6260 			l2cap_pass_to_tx(chan, control);
6261 			break;
6262 		case L2CAP_TXSEQ_INVALID_IGNORE:
6263 			break;
6264 		case L2CAP_TXSEQ_INVALID:
6265 		default:
6266 			l2cap_send_disconn_req(chan, ECONNRESET);
6267 			break;
6268 		}
6269 		break;
6270 	case L2CAP_EV_RECV_RR:
6271 		l2cap_pass_to_tx(chan, control);
6272 		if (control->final) {
6273 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6274 
6275 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6276 			    !__chan_is_moving(chan)) {
6277 				control->final = 0;
6278 				l2cap_retransmit_all(chan, control);
6279 			}
6280 
6281 			l2cap_ertm_send(chan);
6282 		} else if (control->poll) {
6283 			l2cap_send_i_or_rr_or_rnr(chan);
6284 		} else {
6285 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6286 					       &chan->conn_state) &&
6287 			    chan->unacked_frames)
6288 				__set_retrans_timer(chan);
6289 
6290 			l2cap_ertm_send(chan);
6291 		}
6292 		break;
6293 	case L2CAP_EV_RECV_RNR:
6294 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6295 		l2cap_pass_to_tx(chan, control);
6296 		if (control && control->poll) {
6297 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6298 			l2cap_send_rr_or_rnr(chan, 0);
6299 		}
6300 		__clear_retrans_timer(chan);
6301 		l2cap_seq_list_clear(&chan->retrans_list);
6302 		break;
6303 	case L2CAP_EV_RECV_REJ:
6304 		l2cap_handle_rej(chan, control);
6305 		break;
6306 	case L2CAP_EV_RECV_SREJ:
6307 		l2cap_handle_srej(chan, control);
6308 		break;
6309 	default:
6310 		break;
6311 	}
6312 
6313 	if (skb && !skb_in_use) {
6314 		BT_DBG("Freeing %p", skb);
6315 		kfree_skb(skb);
6316 	}
6317 
6318 	return err;
6319 }
6320 
6321 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6322 				    struct l2cap_ctrl *control,
6323 				    struct sk_buff *skb, u8 event)
6324 {
6325 	int err = 0;
6326 	u16 txseq = control->txseq;
6327 	bool skb_in_use = false;
6328 
6329 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6330 	       event);
6331 
6332 	switch (event) {
6333 	case L2CAP_EV_RECV_IFRAME:
6334 		switch (l2cap_classify_txseq(chan, txseq)) {
6335 		case L2CAP_TXSEQ_EXPECTED:
6336 			/* Keep frame for reassembly later */
6337 			l2cap_pass_to_tx(chan, control);
6338 			skb_queue_tail(&chan->srej_q, skb);
6339 			skb_in_use = true;
6340 			BT_DBG("Queued %p (queue len %d)", skb,
6341 			       skb_queue_len(&chan->srej_q));
6342 
6343 			chan->expected_tx_seq = __next_seq(chan, txseq);
6344 			break;
6345 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6346 			l2cap_seq_list_pop(&chan->srej_list);
6347 
6348 			l2cap_pass_to_tx(chan, control);
6349 			skb_queue_tail(&chan->srej_q, skb);
6350 			skb_in_use = true;
6351 			BT_DBG("Queued %p (queue len %d)", skb,
6352 			       skb_queue_len(&chan->srej_q));
6353 
6354 			err = l2cap_rx_queued_iframes(chan);
6355 			if (err)
6356 				break;
6357 
6358 			break;
6359 		case L2CAP_TXSEQ_UNEXPECTED:
6360 			/* Got a frame that can't be reassembled yet.
6361 			 * Save it for later, and send SREJs to cover
6362 			 * the missing frames.
6363 			 */
6364 			skb_queue_tail(&chan->srej_q, skb);
6365 			skb_in_use = true;
6366 			BT_DBG("Queued %p (queue len %d)", skb,
6367 			       skb_queue_len(&chan->srej_q));
6368 
6369 			l2cap_pass_to_tx(chan, control);
6370 			l2cap_send_srej(chan, control->txseq);
6371 			break;
6372 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6373 			/* This frame was requested with an SREJ, but
6374 			 * some expected retransmitted frames are
6375 			 * missing.  Request retransmission of missing
6376 			 * SREJ'd frames.
6377 			 */
6378 			skb_queue_tail(&chan->srej_q, skb);
6379 			skb_in_use = true;
6380 			BT_DBG("Queued %p (queue len %d)", skb,
6381 			       skb_queue_len(&chan->srej_q));
6382 
6383 			l2cap_pass_to_tx(chan, control);
6384 			l2cap_send_srej_list(chan, control->txseq);
6385 			break;
6386 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6387 			/* We've already queued this frame.  Drop this copy. */
6388 			l2cap_pass_to_tx(chan, control);
6389 			break;
6390 		case L2CAP_TXSEQ_DUPLICATE:
6391 			/* Expecting a later sequence number, so this frame
6392 			 * was already received.  Ignore it completely.
6393 			 */
6394 			break;
6395 		case L2CAP_TXSEQ_INVALID_IGNORE:
6396 			break;
6397 		case L2CAP_TXSEQ_INVALID:
6398 		default:
6399 			l2cap_send_disconn_req(chan, ECONNRESET);
6400 			break;
6401 		}
6402 		break;
6403 	case L2CAP_EV_RECV_RR:
6404 		l2cap_pass_to_tx(chan, control);
6405 		if (control->final) {
6406 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6407 
6408 			if (!test_and_clear_bit(CONN_REJ_ACT,
6409 						&chan->conn_state)) {
6410 				control->final = 0;
6411 				l2cap_retransmit_all(chan, control);
6412 			}
6413 
6414 			l2cap_ertm_send(chan);
6415 		} else if (control->poll) {
6416 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6417 					       &chan->conn_state) &&
6418 			    chan->unacked_frames) {
6419 				__set_retrans_timer(chan);
6420 			}
6421 
6422 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6423 			l2cap_send_srej_tail(chan);
6424 		} else {
6425 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6426 					       &chan->conn_state) &&
6427 			    chan->unacked_frames)
6428 				__set_retrans_timer(chan);
6429 
6430 			l2cap_send_ack(chan);
6431 		}
6432 		break;
6433 	case L2CAP_EV_RECV_RNR:
6434 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6435 		l2cap_pass_to_tx(chan, control);
6436 		if (control->poll) {
6437 			l2cap_send_srej_tail(chan);
6438 		} else {
6439 			struct l2cap_ctrl rr_control;
6440 			memset(&rr_control, 0, sizeof(rr_control));
6441 			rr_control.sframe = 1;
6442 			rr_control.super = L2CAP_SUPER_RR;
6443 			rr_control.reqseq = chan->buffer_seq;
6444 			l2cap_send_sframe(chan, &rr_control);
6445 		}
6446 
6447 		break;
6448 	case L2CAP_EV_RECV_REJ:
6449 		l2cap_handle_rej(chan, control);
6450 		break;
6451 	case L2CAP_EV_RECV_SREJ:
6452 		l2cap_handle_srej(chan, control);
6453 		break;
6454 	}
6455 
6456 	if (skb && !skb_in_use) {
6457 		BT_DBG("Freeing %p", skb);
6458 		kfree_skb(skb);
6459 	}
6460 
6461 	return err;
6462 }
6463 
6464 static int l2cap_finish_move(struct l2cap_chan *chan)
6465 {
6466 	BT_DBG("chan %p", chan);
6467 
6468 	chan->rx_state = L2CAP_RX_STATE_RECV;
6469 
6470 	if (chan->hs_hcon)
6471 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6472 	else
6473 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6474 
6475 	return l2cap_resegment(chan);
6476 }
6477 
6478 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6479 				 struct l2cap_ctrl *control,
6480 				 struct sk_buff *skb, u8 event)
6481 {
6482 	int err;
6483 
6484 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6485 	       event);
6486 
6487 	if (!control->poll)
6488 		return -EPROTO;
6489 
6490 	l2cap_process_reqseq(chan, control->reqseq);
6491 
6492 	if (!skb_queue_empty(&chan->tx_q))
6493 		chan->tx_send_head = skb_peek(&chan->tx_q);
6494 	else
6495 		chan->tx_send_head = NULL;
6496 
6497 	/* Rewind next_tx_seq to the point expected
6498 	 * by the receiver.
6499 	 */
6500 	chan->next_tx_seq = control->reqseq;
6501 	chan->unacked_frames = 0;
6502 
6503 	err = l2cap_finish_move(chan);
6504 	if (err)
6505 		return err;
6506 
6507 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6508 	l2cap_send_i_or_rr_or_rnr(chan);
6509 
6510 	if (event == L2CAP_EV_RECV_IFRAME)
6511 		return -EPROTO;
6512 
6513 	return l2cap_rx_state_recv(chan, control, NULL, event);
6514 }
6515 
6516 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6517 				 struct l2cap_ctrl *control,
6518 				 struct sk_buff *skb, u8 event)
6519 {
6520 	int err;
6521 
6522 	if (!control->final)
6523 		return -EPROTO;
6524 
6525 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6526 
6527 	chan->rx_state = L2CAP_RX_STATE_RECV;
6528 	l2cap_process_reqseq(chan, control->reqseq);
6529 
6530 	if (!skb_queue_empty(&chan->tx_q))
6531 		chan->tx_send_head = skb_peek(&chan->tx_q);
6532 	else
6533 		chan->tx_send_head = NULL;
6534 
6535 	/* Rewind next_tx_seq to the point expected
6536 	 * by the receiver.
6537 	 */
6538 	chan->next_tx_seq = control->reqseq;
6539 	chan->unacked_frames = 0;
6540 
6541 	if (chan->hs_hcon)
6542 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6543 	else
6544 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6545 
6546 	err = l2cap_resegment(chan);
6547 
6548 	if (!err)
6549 		err = l2cap_rx_state_recv(chan, control, skb, event);
6550 
6551 	return err;
6552 }
6553 
6554 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6555 {
6556 	/* Make sure reqseq is for a packet that has been sent but not acked */
6557 	u16 unacked;
6558 
6559 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6560 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6561 }
6562 
6563 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6564 		    struct sk_buff *skb, u8 event)
6565 {
6566 	int err = 0;
6567 
6568 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6569 	       control, skb, event, chan->rx_state);
6570 
6571 	if (__valid_reqseq(chan, control->reqseq)) {
6572 		switch (chan->rx_state) {
6573 		case L2CAP_RX_STATE_RECV:
6574 			err = l2cap_rx_state_recv(chan, control, skb, event);
6575 			break;
6576 		case L2CAP_RX_STATE_SREJ_SENT:
6577 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6578 						       event);
6579 			break;
6580 		case L2CAP_RX_STATE_WAIT_P:
6581 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6582 			break;
6583 		case L2CAP_RX_STATE_WAIT_F:
6584 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6585 			break;
6586 		default:
6587 			/* shut it down */
6588 			break;
6589 		}
6590 	} else {
6591 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6592 		       control->reqseq, chan->next_tx_seq,
6593 		       chan->expected_ack_seq);
6594 		l2cap_send_disconn_req(chan, ECONNRESET);
6595 	}
6596 
6597 	return err;
6598 }
6599 
6600 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6601 			   struct sk_buff *skb)
6602 {
6603 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6604 	       chan->rx_state);
6605 
6606 	if (l2cap_classify_txseq(chan, control->txseq) ==
6607 	    L2CAP_TXSEQ_EXPECTED) {
6608 		l2cap_pass_to_tx(chan, control);
6609 
6610 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6611 		       __next_seq(chan, chan->buffer_seq));
6612 
6613 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6614 
6615 		l2cap_reassemble_sdu(chan, skb, control);
6616 	} else {
6617 		if (chan->sdu) {
6618 			kfree_skb(chan->sdu);
6619 			chan->sdu = NULL;
6620 		}
6621 		chan->sdu_last_frag = NULL;
6622 		chan->sdu_len = 0;
6623 
6624 		if (skb) {
6625 			BT_DBG("Freeing %p", skb);
6626 			kfree_skb(skb);
6627 		}
6628 	}
6629 
6630 	chan->last_acked_seq = control->txseq;
6631 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6632 
6633 	return 0;
6634 }
6635 
6636 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6637 {
6638 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6639 	u16 len;
6640 	u8 event;
6641 
6642 	__unpack_control(chan, skb);
6643 
6644 	len = skb->len;
6645 
6646 	/*
6647 	 * We can just drop the corrupted I-frame here.
6648 	 * Receiver will miss it and start proper recovery
6649 	 * procedures and ask for retransmission.
6650 	 */
6651 	if (l2cap_check_fcs(chan, skb))
6652 		goto drop;
6653 
6654 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6655 		len -= L2CAP_SDULEN_SIZE;
6656 
6657 	if (chan->fcs == L2CAP_FCS_CRC16)
6658 		len -= L2CAP_FCS_SIZE;
6659 
6660 	if (len > chan->mps) {
6661 		l2cap_send_disconn_req(chan, ECONNRESET);
6662 		goto drop;
6663 	}
6664 
6665 	if ((chan->mode == L2CAP_MODE_ERTM ||
6666 	     chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
6667 		goto drop;
6668 
6669 	if (!control->sframe) {
6670 		int err;
6671 
6672 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6673 		       control->sar, control->reqseq, control->final,
6674 		       control->txseq);
6675 
6676 		/* Validate F-bit - F=0 always valid, F=1 only
6677 		 * valid in TX WAIT_F
6678 		 */
6679 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6680 			goto drop;
6681 
6682 		if (chan->mode != L2CAP_MODE_STREAMING) {
6683 			event = L2CAP_EV_RECV_IFRAME;
6684 			err = l2cap_rx(chan, control, skb, event);
6685 		} else {
6686 			err = l2cap_stream_rx(chan, control, skb);
6687 		}
6688 
6689 		if (err)
6690 			l2cap_send_disconn_req(chan, ECONNRESET);
6691 	} else {
6692 		const u8 rx_func_to_event[4] = {
6693 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6694 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6695 		};
6696 
6697 		/* Only I-frames are expected in streaming mode */
6698 		if (chan->mode == L2CAP_MODE_STREAMING)
6699 			goto drop;
6700 
6701 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6702 		       control->reqseq, control->final, control->poll,
6703 		       control->super);
6704 
6705 		if (len != 0) {
6706 			BT_ERR("Trailing bytes: %d in sframe", len);
6707 			l2cap_send_disconn_req(chan, ECONNRESET);
6708 			goto drop;
6709 		}
6710 
6711 		/* Validate F and P bits */
6712 		if (control->final && (control->poll ||
6713 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6714 			goto drop;
6715 
6716 		event = rx_func_to_event[control->super];
6717 		if (l2cap_rx(chan, control, skb, event))
6718 			l2cap_send_disconn_req(chan, ECONNRESET);
6719 	}
6720 
6721 	return 0;
6722 
6723 drop:
6724 	kfree_skb(skb);
6725 	return 0;
6726 }
6727 
6728 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6729 {
6730 	struct l2cap_conn *conn = chan->conn;
6731 	struct l2cap_le_credits pkt;
6732 	u16 return_credits;
6733 
6734 	return_credits = ((chan->imtu / chan->mps) + 1) - chan->rx_credits;
6735 
6736 	if (!return_credits)
6737 		return;
6738 
6739 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6740 
6741 	chan->rx_credits += return_credits;
6742 
6743 	pkt.cid     = cpu_to_le16(chan->scid);
6744 	pkt.credits = cpu_to_le16(return_credits);
6745 
6746 	chan->ident = l2cap_get_ident(conn);
6747 
6748 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6749 }
6750 
6751 static int l2cap_le_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6752 {
6753 	int err;
6754 
6755 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6756 
6757 	/* Wait recv to confirm reception before updating the credits */
6758 	err = chan->ops->recv(chan, skb);
6759 
6760 	/* Update credits whenever an SDU is received */
6761 	l2cap_chan_le_send_credits(chan);
6762 
6763 	return err;
6764 }
6765 
6766 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6767 {
6768 	int err;
6769 
6770 	if (!chan->rx_credits) {
6771 		BT_ERR("No credits to receive LE L2CAP data");
6772 		l2cap_send_disconn_req(chan, ECONNRESET);
6773 		return -ENOBUFS;
6774 	}
6775 
6776 	if (chan->imtu < skb->len) {
6777 		BT_ERR("Too big LE L2CAP PDU");
6778 		return -ENOBUFS;
6779 	}
6780 
6781 	chan->rx_credits--;
6782 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6783 
6784 	/* Update if remote had run out of credits, this should only happens
6785 	 * if the remote is not using the entire MPS.
6786 	 */
6787 	if (!chan->rx_credits)
6788 		l2cap_chan_le_send_credits(chan);
6789 
6790 	err = 0;
6791 
6792 	if (!chan->sdu) {
6793 		u16 sdu_len;
6794 
6795 		sdu_len = get_unaligned_le16(skb->data);
6796 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6797 
6798 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6799 		       sdu_len, skb->len, chan->imtu);
6800 
6801 		if (sdu_len > chan->imtu) {
6802 			BT_ERR("Too big LE L2CAP SDU length received");
6803 			err = -EMSGSIZE;
6804 			goto failed;
6805 		}
6806 
6807 		if (skb->len > sdu_len) {
6808 			BT_ERR("Too much LE L2CAP data received");
6809 			err = -EINVAL;
6810 			goto failed;
6811 		}
6812 
6813 		if (skb->len == sdu_len)
6814 			return l2cap_le_recv(chan, skb);
6815 
6816 		chan->sdu = skb;
6817 		chan->sdu_len = sdu_len;
6818 		chan->sdu_last_frag = skb;
6819 
6820 		/* Detect if remote is not able to use the selected MPS */
6821 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6822 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6823 
6824 			/* Adjust the number of credits */
6825 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6826 			chan->mps = mps_len;
6827 			l2cap_chan_le_send_credits(chan);
6828 		}
6829 
6830 		return 0;
6831 	}
6832 
6833 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6834 	       chan->sdu->len, skb->len, chan->sdu_len);
6835 
6836 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6837 		BT_ERR("Too much LE L2CAP data received");
6838 		err = -EINVAL;
6839 		goto failed;
6840 	}
6841 
6842 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6843 	skb = NULL;
6844 
6845 	if (chan->sdu->len == chan->sdu_len) {
6846 		err = l2cap_le_recv(chan, chan->sdu);
6847 		if (!err) {
6848 			chan->sdu = NULL;
6849 			chan->sdu_last_frag = NULL;
6850 			chan->sdu_len = 0;
6851 		}
6852 	}
6853 
6854 failed:
6855 	if (err) {
6856 		kfree_skb(skb);
6857 		kfree_skb(chan->sdu);
6858 		chan->sdu = NULL;
6859 		chan->sdu_last_frag = NULL;
6860 		chan->sdu_len = 0;
6861 	}
6862 
6863 	/* We can't return an error here since we took care of the skb
6864 	 * freeing internally. An error return would cause the caller to
6865 	 * do a double-free of the skb.
6866 	 */
6867 	return 0;
6868 }
6869 
6870 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6871 			       struct sk_buff *skb)
6872 {
6873 	struct l2cap_chan *chan;
6874 
6875 	chan = l2cap_get_chan_by_scid(conn, cid);
6876 	if (!chan) {
6877 		if (cid == L2CAP_CID_A2MP) {
6878 			chan = a2mp_channel_create(conn, skb);
6879 			if (!chan) {
6880 				kfree_skb(skb);
6881 				return;
6882 			}
6883 
6884 			l2cap_chan_lock(chan);
6885 		} else {
6886 			BT_DBG("unknown cid 0x%4.4x", cid);
6887 			/* Drop packet and return */
6888 			kfree_skb(skb);
6889 			return;
6890 		}
6891 	}
6892 
6893 	BT_DBG("chan %p, len %d", chan, skb->len);
6894 
6895 	/* If we receive data on a fixed channel before the info req/rsp
6896 	 * procdure is done simply assume that the channel is supported
6897 	 * and mark it as ready.
6898 	 */
6899 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6900 		l2cap_chan_ready(chan);
6901 
6902 	if (chan->state != BT_CONNECTED)
6903 		goto drop;
6904 
6905 	switch (chan->mode) {
6906 	case L2CAP_MODE_LE_FLOWCTL:
6907 		if (l2cap_le_data_rcv(chan, skb) < 0)
6908 			goto drop;
6909 
6910 		goto done;
6911 
6912 	case L2CAP_MODE_BASIC:
6913 		/* If socket recv buffers overflows we drop data here
6914 		 * which is *bad* because L2CAP has to be reliable.
6915 		 * But we don't have any other choice. L2CAP doesn't
6916 		 * provide flow control mechanism. */
6917 
6918 		if (chan->imtu < skb->len) {
6919 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6920 			goto drop;
6921 		}
6922 
6923 		if (!chan->ops->recv(chan, skb))
6924 			goto done;
6925 		break;
6926 
6927 	case L2CAP_MODE_ERTM:
6928 	case L2CAP_MODE_STREAMING:
6929 		l2cap_data_rcv(chan, skb);
6930 		goto done;
6931 
6932 	default:
6933 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6934 		break;
6935 	}
6936 
6937 drop:
6938 	kfree_skb(skb);
6939 
6940 done:
6941 	l2cap_chan_unlock(chan);
6942 }
6943 
6944 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6945 				  struct sk_buff *skb)
6946 {
6947 	struct hci_conn *hcon = conn->hcon;
6948 	struct l2cap_chan *chan;
6949 
6950 	if (hcon->type != ACL_LINK)
6951 		goto free_skb;
6952 
6953 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6954 					ACL_LINK);
6955 	if (!chan)
6956 		goto free_skb;
6957 
6958 	BT_DBG("chan %p, len %d", chan, skb->len);
6959 
6960 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6961 		goto drop;
6962 
6963 	if (chan->imtu < skb->len)
6964 		goto drop;
6965 
6966 	/* Store remote BD_ADDR and PSM for msg_name */
6967 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6968 	bt_cb(skb)->l2cap.psm = psm;
6969 
6970 	if (!chan->ops->recv(chan, skb)) {
6971 		l2cap_chan_put(chan);
6972 		return;
6973 	}
6974 
6975 drop:
6976 	l2cap_chan_put(chan);
6977 free_skb:
6978 	kfree_skb(skb);
6979 }
6980 
6981 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6982 {
6983 	struct l2cap_hdr *lh = (void *) skb->data;
6984 	struct hci_conn *hcon = conn->hcon;
6985 	u16 cid, len;
6986 	__le16 psm;
6987 
6988 	if (hcon->state != BT_CONNECTED) {
6989 		BT_DBG("queueing pending rx skb");
6990 		skb_queue_tail(&conn->pending_rx, skb);
6991 		return;
6992 	}
6993 
6994 	skb_pull(skb, L2CAP_HDR_SIZE);
6995 	cid = __le16_to_cpu(lh->cid);
6996 	len = __le16_to_cpu(lh->len);
6997 
6998 	if (len != skb->len) {
6999 		kfree_skb(skb);
7000 		return;
7001 	}
7002 
7003 	/* Since we can't actively block incoming LE connections we must
7004 	 * at least ensure that we ignore incoming data from them.
7005 	 */
7006 	if (hcon->type == LE_LINK &&
7007 	    hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7008 				   bdaddr_dst_type(hcon))) {
7009 		kfree_skb(skb);
7010 		return;
7011 	}
7012 
7013 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7014 
7015 	switch (cid) {
7016 	case L2CAP_CID_SIGNALING:
7017 		l2cap_sig_channel(conn, skb);
7018 		break;
7019 
7020 	case L2CAP_CID_CONN_LESS:
7021 		psm = get_unaligned((__le16 *) skb->data);
7022 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7023 		l2cap_conless_channel(conn, psm, skb);
7024 		break;
7025 
7026 	case L2CAP_CID_LE_SIGNALING:
7027 		l2cap_le_sig_channel(conn, skb);
7028 		break;
7029 
7030 	default:
7031 		l2cap_data_channel(conn, cid, skb);
7032 		break;
7033 	}
7034 }
7035 
7036 static void process_pending_rx(struct work_struct *work)
7037 {
7038 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7039 					       pending_rx_work);
7040 	struct sk_buff *skb;
7041 
7042 	BT_DBG("");
7043 
7044 	while ((skb = skb_dequeue(&conn->pending_rx)))
7045 		l2cap_recv_frame(conn, skb);
7046 }
7047 
7048 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7049 {
7050 	struct l2cap_conn *conn = hcon->l2cap_data;
7051 	struct hci_chan *hchan;
7052 
7053 	if (conn)
7054 		return conn;
7055 
7056 	hchan = hci_chan_create(hcon);
7057 	if (!hchan)
7058 		return NULL;
7059 
7060 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7061 	if (!conn) {
7062 		hci_chan_del(hchan);
7063 		return NULL;
7064 	}
7065 
7066 	kref_init(&conn->ref);
7067 	hcon->l2cap_data = conn;
7068 	conn->hcon = hci_conn_get(hcon);
7069 	conn->hchan = hchan;
7070 
7071 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7072 
7073 	switch (hcon->type) {
7074 	case LE_LINK:
7075 		if (hcon->hdev->le_mtu) {
7076 			conn->mtu = hcon->hdev->le_mtu;
7077 			break;
7078 		}
7079 		/* fall through */
7080 	default:
7081 		conn->mtu = hcon->hdev->acl_mtu;
7082 		break;
7083 	}
7084 
7085 	conn->feat_mask = 0;
7086 
7087 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7088 
7089 	if (hcon->type == ACL_LINK &&
7090 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7091 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7092 
7093 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7094 	    (bredr_sc_enabled(hcon->hdev) ||
7095 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7096 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7097 
7098 	mutex_init(&conn->ident_lock);
7099 	mutex_init(&conn->chan_lock);
7100 
7101 	INIT_LIST_HEAD(&conn->chan_l);
7102 	INIT_LIST_HEAD(&conn->users);
7103 
7104 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7105 
7106 	skb_queue_head_init(&conn->pending_rx);
7107 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7108 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7109 
7110 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7111 
7112 	return conn;
7113 }
7114 
7115 static bool is_valid_psm(u16 psm, u8 dst_type) {
7116 	if (!psm)
7117 		return false;
7118 
7119 	if (bdaddr_type_is_le(dst_type))
7120 		return (psm <= 0x00ff);
7121 
7122 	/* PSM must be odd and lsb of upper byte must be 0 */
7123 	return ((psm & 0x0101) == 0x0001);
7124 }
7125 
7126 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7127 		       bdaddr_t *dst, u8 dst_type)
7128 {
7129 	struct l2cap_conn *conn;
7130 	struct hci_conn *hcon;
7131 	struct hci_dev *hdev;
7132 	int err;
7133 
7134 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7135 	       dst_type, __le16_to_cpu(psm));
7136 
7137 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7138 	if (!hdev)
7139 		return -EHOSTUNREACH;
7140 
7141 	hci_dev_lock(hdev);
7142 
7143 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7144 	    chan->chan_type != L2CAP_CHAN_RAW) {
7145 		err = -EINVAL;
7146 		goto done;
7147 	}
7148 
7149 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7150 		err = -EINVAL;
7151 		goto done;
7152 	}
7153 
7154 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7155 		err = -EINVAL;
7156 		goto done;
7157 	}
7158 
7159 	switch (chan->mode) {
7160 	case L2CAP_MODE_BASIC:
7161 		break;
7162 	case L2CAP_MODE_LE_FLOWCTL:
7163 		break;
7164 	case L2CAP_MODE_ERTM:
7165 	case L2CAP_MODE_STREAMING:
7166 		if (!disable_ertm)
7167 			break;
7168 		/* fall through */
7169 	default:
7170 		err = -EOPNOTSUPP;
7171 		goto done;
7172 	}
7173 
7174 	switch (chan->state) {
7175 	case BT_CONNECT:
7176 	case BT_CONNECT2:
7177 	case BT_CONFIG:
7178 		/* Already connecting */
7179 		err = 0;
7180 		goto done;
7181 
7182 	case BT_CONNECTED:
7183 		/* Already connected */
7184 		err = -EISCONN;
7185 		goto done;
7186 
7187 	case BT_OPEN:
7188 	case BT_BOUND:
7189 		/* Can connect */
7190 		break;
7191 
7192 	default:
7193 		err = -EBADFD;
7194 		goto done;
7195 	}
7196 
7197 	/* Set destination address and psm */
7198 	bacpy(&chan->dst, dst);
7199 	chan->dst_type = dst_type;
7200 
7201 	chan->psm = psm;
7202 	chan->dcid = cid;
7203 
7204 	if (bdaddr_type_is_le(dst_type)) {
7205 		/* Convert from L2CAP channel address type to HCI address type
7206 		 */
7207 		if (dst_type == BDADDR_LE_PUBLIC)
7208 			dst_type = ADDR_LE_DEV_PUBLIC;
7209 		else
7210 			dst_type = ADDR_LE_DEV_RANDOM;
7211 
7212 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7213 			hcon = hci_connect_le(hdev, dst, dst_type,
7214 					      chan->sec_level,
7215 					      HCI_LE_CONN_TIMEOUT,
7216 					      HCI_ROLE_SLAVE, NULL);
7217 		else
7218 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7219 						   chan->sec_level,
7220 						   HCI_LE_CONN_TIMEOUT);
7221 
7222 	} else {
7223 		u8 auth_type = l2cap_get_auth_type(chan);
7224 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7225 	}
7226 
7227 	if (IS_ERR(hcon)) {
7228 		err = PTR_ERR(hcon);
7229 		goto done;
7230 	}
7231 
7232 	conn = l2cap_conn_add(hcon);
7233 	if (!conn) {
7234 		hci_conn_drop(hcon);
7235 		err = -ENOMEM;
7236 		goto done;
7237 	}
7238 
7239 	mutex_lock(&conn->chan_lock);
7240 	l2cap_chan_lock(chan);
7241 
7242 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7243 		hci_conn_drop(hcon);
7244 		err = -EBUSY;
7245 		goto chan_unlock;
7246 	}
7247 
7248 	/* Update source addr of the socket */
7249 	bacpy(&chan->src, &hcon->src);
7250 	chan->src_type = bdaddr_src_type(hcon);
7251 
7252 	__l2cap_chan_add(conn, chan);
7253 
7254 	/* l2cap_chan_add takes its own ref so we can drop this one */
7255 	hci_conn_drop(hcon);
7256 
7257 	l2cap_state_change(chan, BT_CONNECT);
7258 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7259 
7260 	/* Release chan->sport so that it can be reused by other
7261 	 * sockets (as it's only used for listening sockets).
7262 	 */
7263 	write_lock(&chan_list_lock);
7264 	chan->sport = 0;
7265 	write_unlock(&chan_list_lock);
7266 
7267 	if (hcon->state == BT_CONNECTED) {
7268 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7269 			__clear_chan_timer(chan);
7270 			if (l2cap_chan_check_security(chan, true))
7271 				l2cap_state_change(chan, BT_CONNECTED);
7272 		} else
7273 			l2cap_do_start(chan);
7274 	}
7275 
7276 	err = 0;
7277 
7278 chan_unlock:
7279 	l2cap_chan_unlock(chan);
7280 	mutex_unlock(&conn->chan_lock);
7281 done:
7282 	hci_dev_unlock(hdev);
7283 	hci_dev_put(hdev);
7284 	return err;
7285 }
7286 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7287 
7288 /* ---- L2CAP interface with lower layer (HCI) ---- */
7289 
7290 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7291 {
7292 	int exact = 0, lm1 = 0, lm2 = 0;
7293 	struct l2cap_chan *c;
7294 
7295 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7296 
7297 	/* Find listening sockets and check their link_mode */
7298 	read_lock(&chan_list_lock);
7299 	list_for_each_entry(c, &chan_list, global_l) {
7300 		if (c->state != BT_LISTEN)
7301 			continue;
7302 
7303 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7304 			lm1 |= HCI_LM_ACCEPT;
7305 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7306 				lm1 |= HCI_LM_MASTER;
7307 			exact++;
7308 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7309 			lm2 |= HCI_LM_ACCEPT;
7310 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7311 				lm2 |= HCI_LM_MASTER;
7312 		}
7313 	}
7314 	read_unlock(&chan_list_lock);
7315 
7316 	return exact ? lm1 : lm2;
7317 }
7318 
7319 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7320  * from an existing channel in the list or from the beginning of the
7321  * global list (by passing NULL as first parameter).
7322  */
7323 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7324 						  struct hci_conn *hcon)
7325 {
7326 	u8 src_type = bdaddr_src_type(hcon);
7327 
7328 	read_lock(&chan_list_lock);
7329 
7330 	if (c)
7331 		c = list_next_entry(c, global_l);
7332 	else
7333 		c = list_entry(chan_list.next, typeof(*c), global_l);
7334 
7335 	list_for_each_entry_from(c, &chan_list, global_l) {
7336 		if (c->chan_type != L2CAP_CHAN_FIXED)
7337 			continue;
7338 		if (c->state != BT_LISTEN)
7339 			continue;
7340 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7341 			continue;
7342 		if (src_type != c->src_type)
7343 			continue;
7344 
7345 		l2cap_chan_hold(c);
7346 		read_unlock(&chan_list_lock);
7347 		return c;
7348 	}
7349 
7350 	read_unlock(&chan_list_lock);
7351 
7352 	return NULL;
7353 }
7354 
7355 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7356 {
7357 	struct hci_dev *hdev = hcon->hdev;
7358 	struct l2cap_conn *conn;
7359 	struct l2cap_chan *pchan;
7360 	u8 dst_type;
7361 
7362 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7363 		return;
7364 
7365 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7366 
7367 	if (status) {
7368 		l2cap_conn_del(hcon, bt_to_errno(status));
7369 		return;
7370 	}
7371 
7372 	conn = l2cap_conn_add(hcon);
7373 	if (!conn)
7374 		return;
7375 
7376 	dst_type = bdaddr_dst_type(hcon);
7377 
7378 	/* If device is blocked, do not create channels for it */
7379 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7380 		return;
7381 
7382 	/* Find fixed channels and notify them of the new connection. We
7383 	 * use multiple individual lookups, continuing each time where
7384 	 * we left off, because the list lock would prevent calling the
7385 	 * potentially sleeping l2cap_chan_lock() function.
7386 	 */
7387 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7388 	while (pchan) {
7389 		struct l2cap_chan *chan, *next;
7390 
7391 		/* Client fixed channels should override server ones */
7392 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7393 			goto next;
7394 
7395 		l2cap_chan_lock(pchan);
7396 		chan = pchan->ops->new_connection(pchan);
7397 		if (chan) {
7398 			bacpy(&chan->src, &hcon->src);
7399 			bacpy(&chan->dst, &hcon->dst);
7400 			chan->src_type = bdaddr_src_type(hcon);
7401 			chan->dst_type = dst_type;
7402 
7403 			__l2cap_chan_add(conn, chan);
7404 		}
7405 
7406 		l2cap_chan_unlock(pchan);
7407 next:
7408 		next = l2cap_global_fixed_chan(pchan, hcon);
7409 		l2cap_chan_put(pchan);
7410 		pchan = next;
7411 	}
7412 
7413 	l2cap_conn_ready(conn);
7414 }
7415 
7416 int l2cap_disconn_ind(struct hci_conn *hcon)
7417 {
7418 	struct l2cap_conn *conn = hcon->l2cap_data;
7419 
7420 	BT_DBG("hcon %p", hcon);
7421 
7422 	if (!conn)
7423 		return HCI_ERROR_REMOTE_USER_TERM;
7424 	return conn->disc_reason;
7425 }
7426 
7427 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7428 {
7429 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7430 		return;
7431 
7432 	BT_DBG("hcon %p reason %d", hcon, reason);
7433 
7434 	l2cap_conn_del(hcon, bt_to_errno(reason));
7435 }
7436 
7437 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7438 {
7439 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7440 		return;
7441 
7442 	if (encrypt == 0x00) {
7443 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7444 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7445 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7446 			   chan->sec_level == BT_SECURITY_FIPS)
7447 			l2cap_chan_close(chan, ECONNREFUSED);
7448 	} else {
7449 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7450 			__clear_chan_timer(chan);
7451 	}
7452 }
7453 
7454 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7455 {
7456 	struct l2cap_conn *conn = hcon->l2cap_data;
7457 	struct l2cap_chan *chan;
7458 
7459 	if (!conn)
7460 		return;
7461 
7462 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7463 
7464 	mutex_lock(&conn->chan_lock);
7465 
7466 	list_for_each_entry(chan, &conn->chan_l, list) {
7467 		l2cap_chan_lock(chan);
7468 
7469 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7470 		       state_to_string(chan->state));
7471 
7472 		if (chan->scid == L2CAP_CID_A2MP) {
7473 			l2cap_chan_unlock(chan);
7474 			continue;
7475 		}
7476 
7477 		if (!status && encrypt)
7478 			chan->sec_level = hcon->sec_level;
7479 
7480 		if (!__l2cap_no_conn_pending(chan)) {
7481 			l2cap_chan_unlock(chan);
7482 			continue;
7483 		}
7484 
7485 		if (!status && (chan->state == BT_CONNECTED ||
7486 				chan->state == BT_CONFIG)) {
7487 			chan->ops->resume(chan);
7488 			l2cap_check_encryption(chan, encrypt);
7489 			l2cap_chan_unlock(chan);
7490 			continue;
7491 		}
7492 
7493 		if (chan->state == BT_CONNECT) {
7494 			if (!status)
7495 				l2cap_start_connection(chan);
7496 			else
7497 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7498 		} else if (chan->state == BT_CONNECT2 &&
7499 			   chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7500 			struct l2cap_conn_rsp rsp;
7501 			__u16 res, stat;
7502 
7503 			if (!status) {
7504 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7505 					res = L2CAP_CR_PEND;
7506 					stat = L2CAP_CS_AUTHOR_PEND;
7507 					chan->ops->defer(chan);
7508 				} else {
7509 					l2cap_state_change(chan, BT_CONFIG);
7510 					res = L2CAP_CR_SUCCESS;
7511 					stat = L2CAP_CS_NO_INFO;
7512 				}
7513 			} else {
7514 				l2cap_state_change(chan, BT_DISCONN);
7515 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7516 				res = L2CAP_CR_SEC_BLOCK;
7517 				stat = L2CAP_CS_NO_INFO;
7518 			}
7519 
7520 			rsp.scid   = cpu_to_le16(chan->dcid);
7521 			rsp.dcid   = cpu_to_le16(chan->scid);
7522 			rsp.result = cpu_to_le16(res);
7523 			rsp.status = cpu_to_le16(stat);
7524 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7525 				       sizeof(rsp), &rsp);
7526 
7527 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7528 			    res == L2CAP_CR_SUCCESS) {
7529 				char buf[128];
7530 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7531 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7532 					       L2CAP_CONF_REQ,
7533 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
7534 					       buf);
7535 				chan->num_conf_req++;
7536 			}
7537 		}
7538 
7539 		l2cap_chan_unlock(chan);
7540 	}
7541 
7542 	mutex_unlock(&conn->chan_lock);
7543 }
7544 
7545 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7546 {
7547 	struct l2cap_conn *conn = hcon->l2cap_data;
7548 	struct l2cap_hdr *hdr;
7549 	int len;
7550 
7551 	/* For AMP controller do not create l2cap conn */
7552 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7553 		goto drop;
7554 
7555 	if (!conn)
7556 		conn = l2cap_conn_add(hcon);
7557 
7558 	if (!conn)
7559 		goto drop;
7560 
7561 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7562 
7563 	switch (flags) {
7564 	case ACL_START:
7565 	case ACL_START_NO_FLUSH:
7566 	case ACL_COMPLETE:
7567 		if (conn->rx_len) {
7568 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7569 			kfree_skb(conn->rx_skb);
7570 			conn->rx_skb = NULL;
7571 			conn->rx_len = 0;
7572 			l2cap_conn_unreliable(conn, ECOMM);
7573 		}
7574 
7575 		/* Start fragment always begin with Basic L2CAP header */
7576 		if (skb->len < L2CAP_HDR_SIZE) {
7577 			BT_ERR("Frame is too short (len %d)", skb->len);
7578 			l2cap_conn_unreliable(conn, ECOMM);
7579 			goto drop;
7580 		}
7581 
7582 		hdr = (struct l2cap_hdr *) skb->data;
7583 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7584 
7585 		if (len == skb->len) {
7586 			/* Complete frame received */
7587 			l2cap_recv_frame(conn, skb);
7588 			return;
7589 		}
7590 
7591 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7592 
7593 		if (skb->len > len) {
7594 			BT_ERR("Frame is too long (len %d, expected len %d)",
7595 			       skb->len, len);
7596 			l2cap_conn_unreliable(conn, ECOMM);
7597 			goto drop;
7598 		}
7599 
7600 		/* Allocate skb for the complete frame (with header) */
7601 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7602 		if (!conn->rx_skb)
7603 			goto drop;
7604 
7605 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7606 					  skb->len);
7607 		conn->rx_len = len - skb->len;
7608 		break;
7609 
7610 	case ACL_CONT:
7611 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7612 
7613 		if (!conn->rx_len) {
7614 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7615 			l2cap_conn_unreliable(conn, ECOMM);
7616 			goto drop;
7617 		}
7618 
7619 		if (skb->len > conn->rx_len) {
7620 			BT_ERR("Fragment is too long (len %d, expected %d)",
7621 			       skb->len, conn->rx_len);
7622 			kfree_skb(conn->rx_skb);
7623 			conn->rx_skb = NULL;
7624 			conn->rx_len = 0;
7625 			l2cap_conn_unreliable(conn, ECOMM);
7626 			goto drop;
7627 		}
7628 
7629 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7630 					  skb->len);
7631 		conn->rx_len -= skb->len;
7632 
7633 		if (!conn->rx_len) {
7634 			/* Complete frame received. l2cap_recv_frame
7635 			 * takes ownership of the skb so set the global
7636 			 * rx_skb pointer to NULL first.
7637 			 */
7638 			struct sk_buff *rx_skb = conn->rx_skb;
7639 			conn->rx_skb = NULL;
7640 			l2cap_recv_frame(conn, rx_skb);
7641 		}
7642 		break;
7643 	}
7644 
7645 drop:
7646 	kfree_skb(skb);
7647 }
7648 
7649 static struct hci_cb l2cap_cb = {
7650 	.name		= "L2CAP",
7651 	.connect_cfm	= l2cap_connect_cfm,
7652 	.disconn_cfm	= l2cap_disconn_cfm,
7653 	.security_cfm	= l2cap_security_cfm,
7654 };
7655 
7656 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7657 {
7658 	struct l2cap_chan *c;
7659 
7660 	read_lock(&chan_list_lock);
7661 
7662 	list_for_each_entry(c, &chan_list, global_l) {
7663 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7664 			   &c->src, c->src_type, &c->dst, c->dst_type,
7665 			   c->state, __le16_to_cpu(c->psm),
7666 			   c->scid, c->dcid, c->imtu, c->omtu,
7667 			   c->sec_level, c->mode);
7668 	}
7669 
7670 	read_unlock(&chan_list_lock);
7671 
7672 	return 0;
7673 }
7674 
7675 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7676 
7677 static struct dentry *l2cap_debugfs;
7678 
7679 int __init l2cap_init(void)
7680 {
7681 	int err;
7682 
7683 	err = l2cap_init_sockets();
7684 	if (err < 0)
7685 		return err;
7686 
7687 	hci_register_cb(&l2cap_cb);
7688 
7689 	if (IS_ERR_OR_NULL(bt_debugfs))
7690 		return 0;
7691 
7692 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7693 					    NULL, &l2cap_debugfs_fops);
7694 
7695 	return 0;
7696 }
7697 
7698 void l2cap_exit(void)
7699 {
7700 	debugfs_remove(l2cap_debugfs);
7701 	hci_unregister_cb(&l2cap_cb);
7702 	l2cap_cleanup_sockets();
7703 }
7704 
7705 module_param(disable_ertm, bool, 0644);
7706 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7707