xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision 96ac6d43)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
53 
54 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
55 				       u8 code, u8 ident, u16 dlen, void *data);
56 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
57 			   void *data);
58 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
59 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
60 
61 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
62 		     struct sk_buff_head *skbs, u8 event);
63 
64 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
65 {
66 	if (link_type == LE_LINK) {
67 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
68 			return BDADDR_LE_PUBLIC;
69 		else
70 			return BDADDR_LE_RANDOM;
71 	}
72 
73 	return BDADDR_BREDR;
74 }
75 
76 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
77 {
78 	return bdaddr_type(hcon->type, hcon->src_type);
79 }
80 
81 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
82 {
83 	return bdaddr_type(hcon->type, hcon->dst_type);
84 }
85 
86 /* ---- L2CAP channels ---- */
87 
88 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
89 						   u16 cid)
90 {
91 	struct l2cap_chan *c;
92 
93 	list_for_each_entry(c, &conn->chan_l, list) {
94 		if (c->dcid == cid)
95 			return c;
96 	}
97 	return NULL;
98 }
99 
100 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
101 						   u16 cid)
102 {
103 	struct l2cap_chan *c;
104 
105 	list_for_each_entry(c, &conn->chan_l, list) {
106 		if (c->scid == cid)
107 			return c;
108 	}
109 	return NULL;
110 }
111 
112 /* Find channel with given SCID.
113  * Returns locked channel. */
114 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
115 						 u16 cid)
116 {
117 	struct l2cap_chan *c;
118 
119 	mutex_lock(&conn->chan_lock);
120 	c = __l2cap_get_chan_by_scid(conn, cid);
121 	if (c)
122 		l2cap_chan_lock(c);
123 	mutex_unlock(&conn->chan_lock);
124 
125 	return c;
126 }
127 
128 /* Find channel with given DCID.
129  * Returns locked channel.
130  */
131 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
132 						 u16 cid)
133 {
134 	struct l2cap_chan *c;
135 
136 	mutex_lock(&conn->chan_lock);
137 	c = __l2cap_get_chan_by_dcid(conn, cid);
138 	if (c)
139 		l2cap_chan_lock(c);
140 	mutex_unlock(&conn->chan_lock);
141 
142 	return c;
143 }
144 
145 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
146 						    u8 ident)
147 {
148 	struct l2cap_chan *c;
149 
150 	list_for_each_entry(c, &conn->chan_l, list) {
151 		if (c->ident == ident)
152 			return c;
153 	}
154 	return NULL;
155 }
156 
157 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
158 						  u8 ident)
159 {
160 	struct l2cap_chan *c;
161 
162 	mutex_lock(&conn->chan_lock);
163 	c = __l2cap_get_chan_by_ident(conn, ident);
164 	if (c)
165 		l2cap_chan_lock(c);
166 	mutex_unlock(&conn->chan_lock);
167 
168 	return c;
169 }
170 
171 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
172 {
173 	struct l2cap_chan *c;
174 
175 	list_for_each_entry(c, &chan_list, global_l) {
176 		if (c->sport == psm && !bacmp(&c->src, src))
177 			return c;
178 	}
179 	return NULL;
180 }
181 
182 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183 {
184 	int err;
185 
186 	write_lock(&chan_list_lock);
187 
188 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
189 		err = -EADDRINUSE;
190 		goto done;
191 	}
192 
193 	if (psm) {
194 		chan->psm = psm;
195 		chan->sport = psm;
196 		err = 0;
197 	} else {
198 		u16 p, start, end, incr;
199 
200 		if (chan->src_type == BDADDR_BREDR) {
201 			start = L2CAP_PSM_DYN_START;
202 			end = L2CAP_PSM_AUTO_END;
203 			incr = 2;
204 		} else {
205 			start = L2CAP_PSM_LE_DYN_START;
206 			end = L2CAP_PSM_LE_DYN_END;
207 			incr = 1;
208 		}
209 
210 		err = -EINVAL;
211 		for (p = start; p <= end; p += incr)
212 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
213 				chan->psm   = cpu_to_le16(p);
214 				chan->sport = cpu_to_le16(p);
215 				err = 0;
216 				break;
217 			}
218 	}
219 
220 done:
221 	write_unlock(&chan_list_lock);
222 	return err;
223 }
224 EXPORT_SYMBOL_GPL(l2cap_add_psm);
225 
226 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
227 {
228 	write_lock(&chan_list_lock);
229 
230 	/* Override the defaults (which are for conn-oriented) */
231 	chan->omtu = L2CAP_DEFAULT_MTU;
232 	chan->chan_type = L2CAP_CHAN_FIXED;
233 
234 	chan->scid = scid;
235 
236 	write_unlock(&chan_list_lock);
237 
238 	return 0;
239 }
240 
241 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
242 {
243 	u16 cid, dyn_end;
244 
245 	if (conn->hcon->type == LE_LINK)
246 		dyn_end = L2CAP_CID_LE_DYN_END;
247 	else
248 		dyn_end = L2CAP_CID_DYN_END;
249 
250 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
251 		if (!__l2cap_get_chan_by_scid(conn, cid))
252 			return cid;
253 	}
254 
255 	return 0;
256 }
257 
258 static void l2cap_state_change(struct l2cap_chan *chan, int state)
259 {
260 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
261 	       state_to_string(state));
262 
263 	chan->state = state;
264 	chan->ops->state_change(chan, state, 0);
265 }
266 
267 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
268 						int state, int err)
269 {
270 	chan->state = state;
271 	chan->ops->state_change(chan, chan->state, err);
272 }
273 
274 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
275 {
276 	chan->ops->state_change(chan, chan->state, err);
277 }
278 
279 static void __set_retrans_timer(struct l2cap_chan *chan)
280 {
281 	if (!delayed_work_pending(&chan->monitor_timer) &&
282 	    chan->retrans_timeout) {
283 		l2cap_set_timer(chan, &chan->retrans_timer,
284 				msecs_to_jiffies(chan->retrans_timeout));
285 	}
286 }
287 
288 static void __set_monitor_timer(struct l2cap_chan *chan)
289 {
290 	__clear_retrans_timer(chan);
291 	if (chan->monitor_timeout) {
292 		l2cap_set_timer(chan, &chan->monitor_timer,
293 				msecs_to_jiffies(chan->monitor_timeout));
294 	}
295 }
296 
297 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
298 					       u16 seq)
299 {
300 	struct sk_buff *skb;
301 
302 	skb_queue_walk(head, skb) {
303 		if (bt_cb(skb)->l2cap.txseq == seq)
304 			return skb;
305 	}
306 
307 	return NULL;
308 }
309 
310 /* ---- L2CAP sequence number lists ---- */
311 
312 /* For ERTM, ordered lists of sequence numbers must be tracked for
313  * SREJ requests that are received and for frames that are to be
314  * retransmitted. These seq_list functions implement a singly-linked
315  * list in an array, where membership in the list can also be checked
316  * in constant time. Items can also be added to the tail of the list
317  * and removed from the head in constant time, without further memory
318  * allocs or frees.
319  */
320 
321 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
322 {
323 	size_t alloc_size, i;
324 
325 	/* Allocated size is a power of 2 to map sequence numbers
326 	 * (which may be up to 14 bits) in to a smaller array that is
327 	 * sized for the negotiated ERTM transmit windows.
328 	 */
329 	alloc_size = roundup_pow_of_two(size);
330 
331 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
332 	if (!seq_list->list)
333 		return -ENOMEM;
334 
335 	seq_list->mask = alloc_size - 1;
336 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
337 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
338 	for (i = 0; i < alloc_size; i++)
339 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
340 
341 	return 0;
342 }
343 
344 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
345 {
346 	kfree(seq_list->list);
347 }
348 
349 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
350 					   u16 seq)
351 {
352 	/* Constant-time check for list membership */
353 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
354 }
355 
356 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
357 {
358 	u16 seq = seq_list->head;
359 	u16 mask = seq_list->mask;
360 
361 	seq_list->head = seq_list->list[seq & mask];
362 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
363 
364 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
365 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
366 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
367 	}
368 
369 	return seq;
370 }
371 
372 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
373 {
374 	u16 i;
375 
376 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
377 		return;
378 
379 	for (i = 0; i <= seq_list->mask; i++)
380 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
381 
382 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
383 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
384 }
385 
386 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
387 {
388 	u16 mask = seq_list->mask;
389 
390 	/* All appends happen in constant time */
391 
392 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
393 		return;
394 
395 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
396 		seq_list->head = seq;
397 	else
398 		seq_list->list[seq_list->tail & mask] = seq;
399 
400 	seq_list->tail = seq;
401 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
402 }
403 
404 static void l2cap_chan_timeout(struct work_struct *work)
405 {
406 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
407 					       chan_timer.work);
408 	struct l2cap_conn *conn = chan->conn;
409 	int reason;
410 
411 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
412 
413 	mutex_lock(&conn->chan_lock);
414 	l2cap_chan_lock(chan);
415 
416 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
417 		reason = ECONNREFUSED;
418 	else if (chan->state == BT_CONNECT &&
419 		 chan->sec_level != BT_SECURITY_SDP)
420 		reason = ECONNREFUSED;
421 	else
422 		reason = ETIMEDOUT;
423 
424 	l2cap_chan_close(chan, reason);
425 
426 	l2cap_chan_unlock(chan);
427 
428 	chan->ops->close(chan);
429 	mutex_unlock(&conn->chan_lock);
430 
431 	l2cap_chan_put(chan);
432 }
433 
434 struct l2cap_chan *l2cap_chan_create(void)
435 {
436 	struct l2cap_chan *chan;
437 
438 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
439 	if (!chan)
440 		return NULL;
441 
442 	mutex_init(&chan->lock);
443 
444 	/* Set default lock nesting level */
445 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
446 
447 	write_lock(&chan_list_lock);
448 	list_add(&chan->global_l, &chan_list);
449 	write_unlock(&chan_list_lock);
450 
451 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
452 
453 	chan->state = BT_OPEN;
454 
455 	kref_init(&chan->kref);
456 
457 	/* This flag is cleared in l2cap_chan_ready() */
458 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
459 
460 	BT_DBG("chan %p", chan);
461 
462 	return chan;
463 }
464 EXPORT_SYMBOL_GPL(l2cap_chan_create);
465 
466 static void l2cap_chan_destroy(struct kref *kref)
467 {
468 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
469 
470 	BT_DBG("chan %p", chan);
471 
472 	write_lock(&chan_list_lock);
473 	list_del(&chan->global_l);
474 	write_unlock(&chan_list_lock);
475 
476 	kfree(chan);
477 }
478 
479 void l2cap_chan_hold(struct l2cap_chan *c)
480 {
481 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
482 
483 	kref_get(&c->kref);
484 }
485 
486 void l2cap_chan_put(struct l2cap_chan *c)
487 {
488 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
489 
490 	kref_put(&c->kref, l2cap_chan_destroy);
491 }
492 EXPORT_SYMBOL_GPL(l2cap_chan_put);
493 
494 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
495 {
496 	chan->fcs  = L2CAP_FCS_CRC16;
497 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
498 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
499 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
500 	chan->remote_max_tx = chan->max_tx;
501 	chan->remote_tx_win = chan->tx_win;
502 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
503 	chan->sec_level = BT_SECURITY_LOW;
504 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
505 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
506 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
507 	chan->conf_state = 0;
508 
509 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
510 }
511 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
512 
513 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
514 {
515 	chan->sdu = NULL;
516 	chan->sdu_last_frag = NULL;
517 	chan->sdu_len = 0;
518 	chan->tx_credits = tx_credits;
519 	/* Derive MPS from connection MTU to stop HCI fragmentation */
520 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
521 	/* Give enough credits for a full packet */
522 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
523 
524 	skb_queue_head_init(&chan->tx_q);
525 }
526 
527 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
528 {
529 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
530 	       __le16_to_cpu(chan->psm), chan->dcid);
531 
532 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
533 
534 	chan->conn = conn;
535 
536 	switch (chan->chan_type) {
537 	case L2CAP_CHAN_CONN_ORIENTED:
538 		/* Alloc CID for connection-oriented socket */
539 		chan->scid = l2cap_alloc_cid(conn);
540 		if (conn->hcon->type == ACL_LINK)
541 			chan->omtu = L2CAP_DEFAULT_MTU;
542 		break;
543 
544 	case L2CAP_CHAN_CONN_LESS:
545 		/* Connectionless socket */
546 		chan->scid = L2CAP_CID_CONN_LESS;
547 		chan->dcid = L2CAP_CID_CONN_LESS;
548 		chan->omtu = L2CAP_DEFAULT_MTU;
549 		break;
550 
551 	case L2CAP_CHAN_FIXED:
552 		/* Caller will set CID and CID specific MTU values */
553 		break;
554 
555 	default:
556 		/* Raw socket can send/recv signalling messages only */
557 		chan->scid = L2CAP_CID_SIGNALING;
558 		chan->dcid = L2CAP_CID_SIGNALING;
559 		chan->omtu = L2CAP_DEFAULT_MTU;
560 	}
561 
562 	chan->local_id		= L2CAP_BESTEFFORT_ID;
563 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
564 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
565 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
566 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
567 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
568 
569 	l2cap_chan_hold(chan);
570 
571 	/* Only keep a reference for fixed channels if they requested it */
572 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
573 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
574 		hci_conn_hold(conn->hcon);
575 
576 	list_add(&chan->list, &conn->chan_l);
577 }
578 
579 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
580 {
581 	mutex_lock(&conn->chan_lock);
582 	__l2cap_chan_add(conn, chan);
583 	mutex_unlock(&conn->chan_lock);
584 }
585 
586 void l2cap_chan_del(struct l2cap_chan *chan, int err)
587 {
588 	struct l2cap_conn *conn = chan->conn;
589 
590 	__clear_chan_timer(chan);
591 
592 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
593 	       state_to_string(chan->state));
594 
595 	chan->ops->teardown(chan, err);
596 
597 	if (conn) {
598 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
599 		/* Delete from channel list */
600 		list_del(&chan->list);
601 
602 		l2cap_chan_put(chan);
603 
604 		chan->conn = NULL;
605 
606 		/* Reference was only held for non-fixed channels or
607 		 * fixed channels that explicitly requested it using the
608 		 * FLAG_HOLD_HCI_CONN flag.
609 		 */
610 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
611 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
612 			hci_conn_drop(conn->hcon);
613 
614 		if (mgr && mgr->bredr_chan == chan)
615 			mgr->bredr_chan = NULL;
616 	}
617 
618 	if (chan->hs_hchan) {
619 		struct hci_chan *hs_hchan = chan->hs_hchan;
620 
621 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
622 		amp_disconnect_logical_link(hs_hchan);
623 	}
624 
625 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
626 		return;
627 
628 	switch(chan->mode) {
629 	case L2CAP_MODE_BASIC:
630 		break;
631 
632 	case L2CAP_MODE_LE_FLOWCTL:
633 		skb_queue_purge(&chan->tx_q);
634 		break;
635 
636 	case L2CAP_MODE_ERTM:
637 		__clear_retrans_timer(chan);
638 		__clear_monitor_timer(chan);
639 		__clear_ack_timer(chan);
640 
641 		skb_queue_purge(&chan->srej_q);
642 
643 		l2cap_seq_list_free(&chan->srej_list);
644 		l2cap_seq_list_free(&chan->retrans_list);
645 
646 		/* fall through */
647 
648 	case L2CAP_MODE_STREAMING:
649 		skb_queue_purge(&chan->tx_q);
650 		break;
651 	}
652 
653 	return;
654 }
655 EXPORT_SYMBOL_GPL(l2cap_chan_del);
656 
657 static void l2cap_conn_update_id_addr(struct work_struct *work)
658 {
659 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
660 					       id_addr_update_work);
661 	struct hci_conn *hcon = conn->hcon;
662 	struct l2cap_chan *chan;
663 
664 	mutex_lock(&conn->chan_lock);
665 
666 	list_for_each_entry(chan, &conn->chan_l, list) {
667 		l2cap_chan_lock(chan);
668 		bacpy(&chan->dst, &hcon->dst);
669 		chan->dst_type = bdaddr_dst_type(hcon);
670 		l2cap_chan_unlock(chan);
671 	}
672 
673 	mutex_unlock(&conn->chan_lock);
674 }
675 
676 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
677 {
678 	struct l2cap_conn *conn = chan->conn;
679 	struct l2cap_le_conn_rsp rsp;
680 	u16 result;
681 
682 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
683 		result = L2CAP_CR_LE_AUTHORIZATION;
684 	else
685 		result = L2CAP_CR_LE_BAD_PSM;
686 
687 	l2cap_state_change(chan, BT_DISCONN);
688 
689 	rsp.dcid    = cpu_to_le16(chan->scid);
690 	rsp.mtu     = cpu_to_le16(chan->imtu);
691 	rsp.mps     = cpu_to_le16(chan->mps);
692 	rsp.credits = cpu_to_le16(chan->rx_credits);
693 	rsp.result  = cpu_to_le16(result);
694 
695 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
696 		       &rsp);
697 }
698 
699 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
700 {
701 	struct l2cap_conn *conn = chan->conn;
702 	struct l2cap_conn_rsp rsp;
703 	u16 result;
704 
705 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
706 		result = L2CAP_CR_SEC_BLOCK;
707 	else
708 		result = L2CAP_CR_BAD_PSM;
709 
710 	l2cap_state_change(chan, BT_DISCONN);
711 
712 	rsp.scid   = cpu_to_le16(chan->dcid);
713 	rsp.dcid   = cpu_to_le16(chan->scid);
714 	rsp.result = cpu_to_le16(result);
715 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
716 
717 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
718 }
719 
720 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
721 {
722 	struct l2cap_conn *conn = chan->conn;
723 
724 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
725 
726 	switch (chan->state) {
727 	case BT_LISTEN:
728 		chan->ops->teardown(chan, 0);
729 		break;
730 
731 	case BT_CONNECTED:
732 	case BT_CONFIG:
733 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
734 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
735 			l2cap_send_disconn_req(chan, reason);
736 		} else
737 			l2cap_chan_del(chan, reason);
738 		break;
739 
740 	case BT_CONNECT2:
741 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
742 			if (conn->hcon->type == ACL_LINK)
743 				l2cap_chan_connect_reject(chan);
744 			else if (conn->hcon->type == LE_LINK)
745 				l2cap_chan_le_connect_reject(chan);
746 		}
747 
748 		l2cap_chan_del(chan, reason);
749 		break;
750 
751 	case BT_CONNECT:
752 	case BT_DISCONN:
753 		l2cap_chan_del(chan, reason);
754 		break;
755 
756 	default:
757 		chan->ops->teardown(chan, 0);
758 		break;
759 	}
760 }
761 EXPORT_SYMBOL(l2cap_chan_close);
762 
763 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
764 {
765 	switch (chan->chan_type) {
766 	case L2CAP_CHAN_RAW:
767 		switch (chan->sec_level) {
768 		case BT_SECURITY_HIGH:
769 		case BT_SECURITY_FIPS:
770 			return HCI_AT_DEDICATED_BONDING_MITM;
771 		case BT_SECURITY_MEDIUM:
772 			return HCI_AT_DEDICATED_BONDING;
773 		default:
774 			return HCI_AT_NO_BONDING;
775 		}
776 		break;
777 	case L2CAP_CHAN_CONN_LESS:
778 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
779 			if (chan->sec_level == BT_SECURITY_LOW)
780 				chan->sec_level = BT_SECURITY_SDP;
781 		}
782 		if (chan->sec_level == BT_SECURITY_HIGH ||
783 		    chan->sec_level == BT_SECURITY_FIPS)
784 			return HCI_AT_NO_BONDING_MITM;
785 		else
786 			return HCI_AT_NO_BONDING;
787 		break;
788 	case L2CAP_CHAN_CONN_ORIENTED:
789 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
790 			if (chan->sec_level == BT_SECURITY_LOW)
791 				chan->sec_level = BT_SECURITY_SDP;
792 
793 			if (chan->sec_level == BT_SECURITY_HIGH ||
794 			    chan->sec_level == BT_SECURITY_FIPS)
795 				return HCI_AT_NO_BONDING_MITM;
796 			else
797 				return HCI_AT_NO_BONDING;
798 		}
799 		/* fall through */
800 	default:
801 		switch (chan->sec_level) {
802 		case BT_SECURITY_HIGH:
803 		case BT_SECURITY_FIPS:
804 			return HCI_AT_GENERAL_BONDING_MITM;
805 		case BT_SECURITY_MEDIUM:
806 			return HCI_AT_GENERAL_BONDING;
807 		default:
808 			return HCI_AT_NO_BONDING;
809 		}
810 		break;
811 	}
812 }
813 
814 /* Service level security */
815 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
816 {
817 	struct l2cap_conn *conn = chan->conn;
818 	__u8 auth_type;
819 
820 	if (conn->hcon->type == LE_LINK)
821 		return smp_conn_security(conn->hcon, chan->sec_level);
822 
823 	auth_type = l2cap_get_auth_type(chan);
824 
825 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
826 				 initiator);
827 }
828 
829 static u8 l2cap_get_ident(struct l2cap_conn *conn)
830 {
831 	u8 id;
832 
833 	/* Get next available identificator.
834 	 *    1 - 128 are used by kernel.
835 	 *  129 - 199 are reserved.
836 	 *  200 - 254 are used by utilities like l2ping, etc.
837 	 */
838 
839 	mutex_lock(&conn->ident_lock);
840 
841 	if (++conn->tx_ident > 128)
842 		conn->tx_ident = 1;
843 
844 	id = conn->tx_ident;
845 
846 	mutex_unlock(&conn->ident_lock);
847 
848 	return id;
849 }
850 
851 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
852 			   void *data)
853 {
854 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
855 	u8 flags;
856 
857 	BT_DBG("code 0x%2.2x", code);
858 
859 	if (!skb)
860 		return;
861 
862 	/* Use NO_FLUSH if supported or we have an LE link (which does
863 	 * not support auto-flushing packets) */
864 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
865 	    conn->hcon->type == LE_LINK)
866 		flags = ACL_START_NO_FLUSH;
867 	else
868 		flags = ACL_START;
869 
870 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
871 	skb->priority = HCI_PRIO_MAX;
872 
873 	hci_send_acl(conn->hchan, skb, flags);
874 }
875 
876 static bool __chan_is_moving(struct l2cap_chan *chan)
877 {
878 	return chan->move_state != L2CAP_MOVE_STABLE &&
879 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
880 }
881 
882 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
883 {
884 	struct hci_conn *hcon = chan->conn->hcon;
885 	u16 flags;
886 
887 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
888 	       skb->priority);
889 
890 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
891 		if (chan->hs_hchan)
892 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
893 		else
894 			kfree_skb(skb);
895 
896 		return;
897 	}
898 
899 	/* Use NO_FLUSH for LE links (where this is the only option) or
900 	 * if the BR/EDR link supports it and flushing has not been
901 	 * explicitly requested (through FLAG_FLUSHABLE).
902 	 */
903 	if (hcon->type == LE_LINK ||
904 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
905 	     lmp_no_flush_capable(hcon->hdev)))
906 		flags = ACL_START_NO_FLUSH;
907 	else
908 		flags = ACL_START;
909 
910 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
911 	hci_send_acl(chan->conn->hchan, skb, flags);
912 }
913 
914 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
915 {
916 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
917 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
918 
919 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
920 		/* S-Frame */
921 		control->sframe = 1;
922 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
923 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
924 
925 		control->sar = 0;
926 		control->txseq = 0;
927 	} else {
928 		/* I-Frame */
929 		control->sframe = 0;
930 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
931 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
932 
933 		control->poll = 0;
934 		control->super = 0;
935 	}
936 }
937 
938 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
939 {
940 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
941 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
942 
943 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
944 		/* S-Frame */
945 		control->sframe = 1;
946 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
947 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
948 
949 		control->sar = 0;
950 		control->txseq = 0;
951 	} else {
952 		/* I-Frame */
953 		control->sframe = 0;
954 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
955 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
956 
957 		control->poll = 0;
958 		control->super = 0;
959 	}
960 }
961 
962 static inline void __unpack_control(struct l2cap_chan *chan,
963 				    struct sk_buff *skb)
964 {
965 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
966 		__unpack_extended_control(get_unaligned_le32(skb->data),
967 					  &bt_cb(skb)->l2cap);
968 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
969 	} else {
970 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
971 					  &bt_cb(skb)->l2cap);
972 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
973 	}
974 }
975 
976 static u32 __pack_extended_control(struct l2cap_ctrl *control)
977 {
978 	u32 packed;
979 
980 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
981 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
982 
983 	if (control->sframe) {
984 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
985 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
986 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
987 	} else {
988 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
989 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
990 	}
991 
992 	return packed;
993 }
994 
995 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
996 {
997 	u16 packed;
998 
999 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1000 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1001 
1002 	if (control->sframe) {
1003 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1004 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1005 		packed |= L2CAP_CTRL_FRAME_TYPE;
1006 	} else {
1007 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1008 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1009 	}
1010 
1011 	return packed;
1012 }
1013 
1014 static inline void __pack_control(struct l2cap_chan *chan,
1015 				  struct l2cap_ctrl *control,
1016 				  struct sk_buff *skb)
1017 {
1018 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1019 		put_unaligned_le32(__pack_extended_control(control),
1020 				   skb->data + L2CAP_HDR_SIZE);
1021 	} else {
1022 		put_unaligned_le16(__pack_enhanced_control(control),
1023 				   skb->data + L2CAP_HDR_SIZE);
1024 	}
1025 }
1026 
1027 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1028 {
1029 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1030 		return L2CAP_EXT_HDR_SIZE;
1031 	else
1032 		return L2CAP_ENH_HDR_SIZE;
1033 }
1034 
1035 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1036 					       u32 control)
1037 {
1038 	struct sk_buff *skb;
1039 	struct l2cap_hdr *lh;
1040 	int hlen = __ertm_hdr_size(chan);
1041 
1042 	if (chan->fcs == L2CAP_FCS_CRC16)
1043 		hlen += L2CAP_FCS_SIZE;
1044 
1045 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1046 
1047 	if (!skb)
1048 		return ERR_PTR(-ENOMEM);
1049 
1050 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1051 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1052 	lh->cid = cpu_to_le16(chan->dcid);
1053 
1054 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1055 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1056 	else
1057 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1058 
1059 	if (chan->fcs == L2CAP_FCS_CRC16) {
1060 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1061 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1062 	}
1063 
1064 	skb->priority = HCI_PRIO_MAX;
1065 	return skb;
1066 }
1067 
1068 static void l2cap_send_sframe(struct l2cap_chan *chan,
1069 			      struct l2cap_ctrl *control)
1070 {
1071 	struct sk_buff *skb;
1072 	u32 control_field;
1073 
1074 	BT_DBG("chan %p, control %p", chan, control);
1075 
1076 	if (!control->sframe)
1077 		return;
1078 
1079 	if (__chan_is_moving(chan))
1080 		return;
1081 
1082 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1083 	    !control->poll)
1084 		control->final = 1;
1085 
1086 	if (control->super == L2CAP_SUPER_RR)
1087 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1088 	else if (control->super == L2CAP_SUPER_RNR)
1089 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1090 
1091 	if (control->super != L2CAP_SUPER_SREJ) {
1092 		chan->last_acked_seq = control->reqseq;
1093 		__clear_ack_timer(chan);
1094 	}
1095 
1096 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1097 	       control->final, control->poll, control->super);
1098 
1099 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1100 		control_field = __pack_extended_control(control);
1101 	else
1102 		control_field = __pack_enhanced_control(control);
1103 
1104 	skb = l2cap_create_sframe_pdu(chan, control_field);
1105 	if (!IS_ERR(skb))
1106 		l2cap_do_send(chan, skb);
1107 }
1108 
1109 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1110 {
1111 	struct l2cap_ctrl control;
1112 
1113 	BT_DBG("chan %p, poll %d", chan, poll);
1114 
1115 	memset(&control, 0, sizeof(control));
1116 	control.sframe = 1;
1117 	control.poll = poll;
1118 
1119 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1120 		control.super = L2CAP_SUPER_RNR;
1121 	else
1122 		control.super = L2CAP_SUPER_RR;
1123 
1124 	control.reqseq = chan->buffer_seq;
1125 	l2cap_send_sframe(chan, &control);
1126 }
1127 
1128 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1129 {
1130 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1131 		return true;
1132 
1133 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1134 }
1135 
1136 static bool __amp_capable(struct l2cap_chan *chan)
1137 {
1138 	struct l2cap_conn *conn = chan->conn;
1139 	struct hci_dev *hdev;
1140 	bool amp_available = false;
1141 
1142 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1143 		return false;
1144 
1145 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1146 		return false;
1147 
1148 	read_lock(&hci_dev_list_lock);
1149 	list_for_each_entry(hdev, &hci_dev_list, list) {
1150 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1151 		    test_bit(HCI_UP, &hdev->flags)) {
1152 			amp_available = true;
1153 			break;
1154 		}
1155 	}
1156 	read_unlock(&hci_dev_list_lock);
1157 
1158 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1159 		return amp_available;
1160 
1161 	return false;
1162 }
1163 
1164 static bool l2cap_check_efs(struct l2cap_chan *chan)
1165 {
1166 	/* Check EFS parameters */
1167 	return true;
1168 }
1169 
1170 void l2cap_send_conn_req(struct l2cap_chan *chan)
1171 {
1172 	struct l2cap_conn *conn = chan->conn;
1173 	struct l2cap_conn_req req;
1174 
1175 	req.scid = cpu_to_le16(chan->scid);
1176 	req.psm  = chan->psm;
1177 
1178 	chan->ident = l2cap_get_ident(conn);
1179 
1180 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1181 
1182 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1183 }
1184 
1185 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1186 {
1187 	struct l2cap_create_chan_req req;
1188 	req.scid = cpu_to_le16(chan->scid);
1189 	req.psm  = chan->psm;
1190 	req.amp_id = amp_id;
1191 
1192 	chan->ident = l2cap_get_ident(chan->conn);
1193 
1194 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1195 		       sizeof(req), &req);
1196 }
1197 
1198 static void l2cap_move_setup(struct l2cap_chan *chan)
1199 {
1200 	struct sk_buff *skb;
1201 
1202 	BT_DBG("chan %p", chan);
1203 
1204 	if (chan->mode != L2CAP_MODE_ERTM)
1205 		return;
1206 
1207 	__clear_retrans_timer(chan);
1208 	__clear_monitor_timer(chan);
1209 	__clear_ack_timer(chan);
1210 
1211 	chan->retry_count = 0;
1212 	skb_queue_walk(&chan->tx_q, skb) {
1213 		if (bt_cb(skb)->l2cap.retries)
1214 			bt_cb(skb)->l2cap.retries = 1;
1215 		else
1216 			break;
1217 	}
1218 
1219 	chan->expected_tx_seq = chan->buffer_seq;
1220 
1221 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1222 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1223 	l2cap_seq_list_clear(&chan->retrans_list);
1224 	l2cap_seq_list_clear(&chan->srej_list);
1225 	skb_queue_purge(&chan->srej_q);
1226 
1227 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1228 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1229 
1230 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1231 }
1232 
1233 static void l2cap_move_done(struct l2cap_chan *chan)
1234 {
1235 	u8 move_role = chan->move_role;
1236 	BT_DBG("chan %p", chan);
1237 
1238 	chan->move_state = L2CAP_MOVE_STABLE;
1239 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1240 
1241 	if (chan->mode != L2CAP_MODE_ERTM)
1242 		return;
1243 
1244 	switch (move_role) {
1245 	case L2CAP_MOVE_ROLE_INITIATOR:
1246 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1247 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1248 		break;
1249 	case L2CAP_MOVE_ROLE_RESPONDER:
1250 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1251 		break;
1252 	}
1253 }
1254 
1255 static void l2cap_chan_ready(struct l2cap_chan *chan)
1256 {
1257 	/* The channel may have already been flagged as connected in
1258 	 * case of receiving data before the L2CAP info req/rsp
1259 	 * procedure is complete.
1260 	 */
1261 	if (chan->state == BT_CONNECTED)
1262 		return;
1263 
1264 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1265 	chan->conf_state = 0;
1266 	__clear_chan_timer(chan);
1267 
1268 	if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1269 		chan->ops->suspend(chan);
1270 
1271 	chan->state = BT_CONNECTED;
1272 
1273 	chan->ops->ready(chan);
1274 }
1275 
1276 static void l2cap_le_connect(struct l2cap_chan *chan)
1277 {
1278 	struct l2cap_conn *conn = chan->conn;
1279 	struct l2cap_le_conn_req req;
1280 
1281 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1282 		return;
1283 
1284 	l2cap_le_flowctl_init(chan, 0);
1285 
1286 	req.psm     = chan->psm;
1287 	req.scid    = cpu_to_le16(chan->scid);
1288 	req.mtu     = cpu_to_le16(chan->imtu);
1289 	req.mps     = cpu_to_le16(chan->mps);
1290 	req.credits = cpu_to_le16(chan->rx_credits);
1291 
1292 	chan->ident = l2cap_get_ident(conn);
1293 
1294 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1295 		       sizeof(req), &req);
1296 }
1297 
1298 static void l2cap_le_start(struct l2cap_chan *chan)
1299 {
1300 	struct l2cap_conn *conn = chan->conn;
1301 
1302 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1303 		return;
1304 
1305 	if (!chan->psm) {
1306 		l2cap_chan_ready(chan);
1307 		return;
1308 	}
1309 
1310 	if (chan->state == BT_CONNECT)
1311 		l2cap_le_connect(chan);
1312 }
1313 
1314 static void l2cap_start_connection(struct l2cap_chan *chan)
1315 {
1316 	if (__amp_capable(chan)) {
1317 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1318 		a2mp_discover_amp(chan);
1319 	} else if (chan->conn->hcon->type == LE_LINK) {
1320 		l2cap_le_start(chan);
1321 	} else {
1322 		l2cap_send_conn_req(chan);
1323 	}
1324 }
1325 
1326 static void l2cap_request_info(struct l2cap_conn *conn)
1327 {
1328 	struct l2cap_info_req req;
1329 
1330 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1331 		return;
1332 
1333 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1334 
1335 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1336 	conn->info_ident = l2cap_get_ident(conn);
1337 
1338 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1339 
1340 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1341 		       sizeof(req), &req);
1342 }
1343 
1344 static void l2cap_do_start(struct l2cap_chan *chan)
1345 {
1346 	struct l2cap_conn *conn = chan->conn;
1347 
1348 	if (conn->hcon->type == LE_LINK) {
1349 		l2cap_le_start(chan);
1350 		return;
1351 	}
1352 
1353 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1354 		l2cap_request_info(conn);
1355 		return;
1356 	}
1357 
1358 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1359 		return;
1360 
1361 	if (l2cap_chan_check_security(chan, true) &&
1362 	    __l2cap_no_conn_pending(chan))
1363 		l2cap_start_connection(chan);
1364 }
1365 
1366 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1367 {
1368 	u32 local_feat_mask = l2cap_feat_mask;
1369 	if (!disable_ertm)
1370 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1371 
1372 	switch (mode) {
1373 	case L2CAP_MODE_ERTM:
1374 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1375 	case L2CAP_MODE_STREAMING:
1376 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1377 	default:
1378 		return 0x00;
1379 	}
1380 }
1381 
1382 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1383 {
1384 	struct l2cap_conn *conn = chan->conn;
1385 	struct l2cap_disconn_req req;
1386 
1387 	if (!conn)
1388 		return;
1389 
1390 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1391 		__clear_retrans_timer(chan);
1392 		__clear_monitor_timer(chan);
1393 		__clear_ack_timer(chan);
1394 	}
1395 
1396 	if (chan->scid == L2CAP_CID_A2MP) {
1397 		l2cap_state_change(chan, BT_DISCONN);
1398 		return;
1399 	}
1400 
1401 	req.dcid = cpu_to_le16(chan->dcid);
1402 	req.scid = cpu_to_le16(chan->scid);
1403 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1404 		       sizeof(req), &req);
1405 
1406 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1407 }
1408 
1409 /* ---- L2CAP connections ---- */
1410 static void l2cap_conn_start(struct l2cap_conn *conn)
1411 {
1412 	struct l2cap_chan *chan, *tmp;
1413 
1414 	BT_DBG("conn %p", conn);
1415 
1416 	mutex_lock(&conn->chan_lock);
1417 
1418 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1419 		l2cap_chan_lock(chan);
1420 
1421 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1422 			l2cap_chan_ready(chan);
1423 			l2cap_chan_unlock(chan);
1424 			continue;
1425 		}
1426 
1427 		if (chan->state == BT_CONNECT) {
1428 			if (!l2cap_chan_check_security(chan, true) ||
1429 			    !__l2cap_no_conn_pending(chan)) {
1430 				l2cap_chan_unlock(chan);
1431 				continue;
1432 			}
1433 
1434 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1435 			    && test_bit(CONF_STATE2_DEVICE,
1436 					&chan->conf_state)) {
1437 				l2cap_chan_close(chan, ECONNRESET);
1438 				l2cap_chan_unlock(chan);
1439 				continue;
1440 			}
1441 
1442 			l2cap_start_connection(chan);
1443 
1444 		} else if (chan->state == BT_CONNECT2) {
1445 			struct l2cap_conn_rsp rsp;
1446 			char buf[128];
1447 			rsp.scid = cpu_to_le16(chan->dcid);
1448 			rsp.dcid = cpu_to_le16(chan->scid);
1449 
1450 			if (l2cap_chan_check_security(chan, false)) {
1451 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1452 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1453 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1454 					chan->ops->defer(chan);
1455 
1456 				} else {
1457 					l2cap_state_change(chan, BT_CONFIG);
1458 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1459 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1460 				}
1461 			} else {
1462 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1463 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1464 			}
1465 
1466 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1467 				       sizeof(rsp), &rsp);
1468 
1469 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1470 			    rsp.result != L2CAP_CR_SUCCESS) {
1471 				l2cap_chan_unlock(chan);
1472 				continue;
1473 			}
1474 
1475 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1476 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1477 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1478 			chan->num_conf_req++;
1479 		}
1480 
1481 		l2cap_chan_unlock(chan);
1482 	}
1483 
1484 	mutex_unlock(&conn->chan_lock);
1485 }
1486 
1487 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1488 {
1489 	struct hci_conn *hcon = conn->hcon;
1490 	struct hci_dev *hdev = hcon->hdev;
1491 
1492 	BT_DBG("%s conn %p", hdev->name, conn);
1493 
1494 	/* For outgoing pairing which doesn't necessarily have an
1495 	 * associated socket (e.g. mgmt_pair_device).
1496 	 */
1497 	if (hcon->out)
1498 		smp_conn_security(hcon, hcon->pending_sec_level);
1499 
1500 	/* For LE slave connections, make sure the connection interval
1501 	 * is in the range of the minium and maximum interval that has
1502 	 * been configured for this connection. If not, then trigger
1503 	 * the connection update procedure.
1504 	 */
1505 	if (hcon->role == HCI_ROLE_SLAVE &&
1506 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1507 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1508 		struct l2cap_conn_param_update_req req;
1509 
1510 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1511 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1512 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1513 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1514 
1515 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1516 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1517 	}
1518 }
1519 
1520 static void l2cap_conn_ready(struct l2cap_conn *conn)
1521 {
1522 	struct l2cap_chan *chan;
1523 	struct hci_conn *hcon = conn->hcon;
1524 
1525 	BT_DBG("conn %p", conn);
1526 
1527 	if (hcon->type == ACL_LINK)
1528 		l2cap_request_info(conn);
1529 
1530 	mutex_lock(&conn->chan_lock);
1531 
1532 	list_for_each_entry(chan, &conn->chan_l, list) {
1533 
1534 		l2cap_chan_lock(chan);
1535 
1536 		if (chan->scid == L2CAP_CID_A2MP) {
1537 			l2cap_chan_unlock(chan);
1538 			continue;
1539 		}
1540 
1541 		if (hcon->type == LE_LINK) {
1542 			l2cap_le_start(chan);
1543 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1544 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1545 				l2cap_chan_ready(chan);
1546 		} else if (chan->state == BT_CONNECT) {
1547 			l2cap_do_start(chan);
1548 		}
1549 
1550 		l2cap_chan_unlock(chan);
1551 	}
1552 
1553 	mutex_unlock(&conn->chan_lock);
1554 
1555 	if (hcon->type == LE_LINK)
1556 		l2cap_le_conn_ready(conn);
1557 
1558 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1559 }
1560 
1561 /* Notify sockets that we cannot guaranty reliability anymore */
1562 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1563 {
1564 	struct l2cap_chan *chan;
1565 
1566 	BT_DBG("conn %p", conn);
1567 
1568 	mutex_lock(&conn->chan_lock);
1569 
1570 	list_for_each_entry(chan, &conn->chan_l, list) {
1571 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1572 			l2cap_chan_set_err(chan, err);
1573 	}
1574 
1575 	mutex_unlock(&conn->chan_lock);
1576 }
1577 
1578 static void l2cap_info_timeout(struct work_struct *work)
1579 {
1580 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1581 					       info_timer.work);
1582 
1583 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1584 	conn->info_ident = 0;
1585 
1586 	l2cap_conn_start(conn);
1587 }
1588 
1589 /*
1590  * l2cap_user
1591  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1592  * callback is called during registration. The ->remove callback is called
1593  * during unregistration.
1594  * An l2cap_user object can either be explicitly unregistered or when the
1595  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1596  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1597  * External modules must own a reference to the l2cap_conn object if they intend
1598  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1599  * any time if they don't.
1600  */
1601 
1602 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1603 {
1604 	struct hci_dev *hdev = conn->hcon->hdev;
1605 	int ret;
1606 
1607 	/* We need to check whether l2cap_conn is registered. If it is not, we
1608 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1609 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1610 	 * relies on the parent hci_conn object to be locked. This itself relies
1611 	 * on the hci_dev object to be locked. So we must lock the hci device
1612 	 * here, too. */
1613 
1614 	hci_dev_lock(hdev);
1615 
1616 	if (!list_empty(&user->list)) {
1617 		ret = -EINVAL;
1618 		goto out_unlock;
1619 	}
1620 
1621 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1622 	if (!conn->hchan) {
1623 		ret = -ENODEV;
1624 		goto out_unlock;
1625 	}
1626 
1627 	ret = user->probe(conn, user);
1628 	if (ret)
1629 		goto out_unlock;
1630 
1631 	list_add(&user->list, &conn->users);
1632 	ret = 0;
1633 
1634 out_unlock:
1635 	hci_dev_unlock(hdev);
1636 	return ret;
1637 }
1638 EXPORT_SYMBOL(l2cap_register_user);
1639 
1640 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1641 {
1642 	struct hci_dev *hdev = conn->hcon->hdev;
1643 
1644 	hci_dev_lock(hdev);
1645 
1646 	if (list_empty(&user->list))
1647 		goto out_unlock;
1648 
1649 	list_del_init(&user->list);
1650 	user->remove(conn, user);
1651 
1652 out_unlock:
1653 	hci_dev_unlock(hdev);
1654 }
1655 EXPORT_SYMBOL(l2cap_unregister_user);
1656 
1657 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1658 {
1659 	struct l2cap_user *user;
1660 
1661 	while (!list_empty(&conn->users)) {
1662 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1663 		list_del_init(&user->list);
1664 		user->remove(conn, user);
1665 	}
1666 }
1667 
1668 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1669 {
1670 	struct l2cap_conn *conn = hcon->l2cap_data;
1671 	struct l2cap_chan *chan, *l;
1672 
1673 	if (!conn)
1674 		return;
1675 
1676 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1677 
1678 	kfree_skb(conn->rx_skb);
1679 
1680 	skb_queue_purge(&conn->pending_rx);
1681 
1682 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1683 	 * might block if we are running on a worker from the same workqueue
1684 	 * pending_rx_work is waiting on.
1685 	 */
1686 	if (work_pending(&conn->pending_rx_work))
1687 		cancel_work_sync(&conn->pending_rx_work);
1688 
1689 	if (work_pending(&conn->id_addr_update_work))
1690 		cancel_work_sync(&conn->id_addr_update_work);
1691 
1692 	l2cap_unregister_all_users(conn);
1693 
1694 	/* Force the connection to be immediately dropped */
1695 	hcon->disc_timeout = 0;
1696 
1697 	mutex_lock(&conn->chan_lock);
1698 
1699 	/* Kill channels */
1700 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1701 		l2cap_chan_hold(chan);
1702 		l2cap_chan_lock(chan);
1703 
1704 		l2cap_chan_del(chan, err);
1705 
1706 		l2cap_chan_unlock(chan);
1707 
1708 		chan->ops->close(chan);
1709 		l2cap_chan_put(chan);
1710 	}
1711 
1712 	mutex_unlock(&conn->chan_lock);
1713 
1714 	hci_chan_del(conn->hchan);
1715 
1716 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1717 		cancel_delayed_work_sync(&conn->info_timer);
1718 
1719 	hcon->l2cap_data = NULL;
1720 	conn->hchan = NULL;
1721 	l2cap_conn_put(conn);
1722 }
1723 
1724 static void l2cap_conn_free(struct kref *ref)
1725 {
1726 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1727 
1728 	hci_conn_put(conn->hcon);
1729 	kfree(conn);
1730 }
1731 
1732 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1733 {
1734 	kref_get(&conn->ref);
1735 	return conn;
1736 }
1737 EXPORT_SYMBOL(l2cap_conn_get);
1738 
1739 void l2cap_conn_put(struct l2cap_conn *conn)
1740 {
1741 	kref_put(&conn->ref, l2cap_conn_free);
1742 }
1743 EXPORT_SYMBOL(l2cap_conn_put);
1744 
1745 /* ---- Socket interface ---- */
1746 
1747 /* Find socket with psm and source / destination bdaddr.
1748  * Returns closest match.
1749  */
1750 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1751 						   bdaddr_t *src,
1752 						   bdaddr_t *dst,
1753 						   u8 link_type)
1754 {
1755 	struct l2cap_chan *c, *c1 = NULL;
1756 
1757 	read_lock(&chan_list_lock);
1758 
1759 	list_for_each_entry(c, &chan_list, global_l) {
1760 		if (state && c->state != state)
1761 			continue;
1762 
1763 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1764 			continue;
1765 
1766 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1767 			continue;
1768 
1769 		if (c->psm == psm) {
1770 			int src_match, dst_match;
1771 			int src_any, dst_any;
1772 
1773 			/* Exact match. */
1774 			src_match = !bacmp(&c->src, src);
1775 			dst_match = !bacmp(&c->dst, dst);
1776 			if (src_match && dst_match) {
1777 				l2cap_chan_hold(c);
1778 				read_unlock(&chan_list_lock);
1779 				return c;
1780 			}
1781 
1782 			/* Closest match */
1783 			src_any = !bacmp(&c->src, BDADDR_ANY);
1784 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1785 			if ((src_match && dst_any) || (src_any && dst_match) ||
1786 			    (src_any && dst_any))
1787 				c1 = c;
1788 		}
1789 	}
1790 
1791 	if (c1)
1792 		l2cap_chan_hold(c1);
1793 
1794 	read_unlock(&chan_list_lock);
1795 
1796 	return c1;
1797 }
1798 
1799 static void l2cap_monitor_timeout(struct work_struct *work)
1800 {
1801 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1802 					       monitor_timer.work);
1803 
1804 	BT_DBG("chan %p", chan);
1805 
1806 	l2cap_chan_lock(chan);
1807 
1808 	if (!chan->conn) {
1809 		l2cap_chan_unlock(chan);
1810 		l2cap_chan_put(chan);
1811 		return;
1812 	}
1813 
1814 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1815 
1816 	l2cap_chan_unlock(chan);
1817 	l2cap_chan_put(chan);
1818 }
1819 
1820 static void l2cap_retrans_timeout(struct work_struct *work)
1821 {
1822 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1823 					       retrans_timer.work);
1824 
1825 	BT_DBG("chan %p", chan);
1826 
1827 	l2cap_chan_lock(chan);
1828 
1829 	if (!chan->conn) {
1830 		l2cap_chan_unlock(chan);
1831 		l2cap_chan_put(chan);
1832 		return;
1833 	}
1834 
1835 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1836 	l2cap_chan_unlock(chan);
1837 	l2cap_chan_put(chan);
1838 }
1839 
1840 static void l2cap_streaming_send(struct l2cap_chan *chan,
1841 				 struct sk_buff_head *skbs)
1842 {
1843 	struct sk_buff *skb;
1844 	struct l2cap_ctrl *control;
1845 
1846 	BT_DBG("chan %p, skbs %p", chan, skbs);
1847 
1848 	if (__chan_is_moving(chan))
1849 		return;
1850 
1851 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1852 
1853 	while (!skb_queue_empty(&chan->tx_q)) {
1854 
1855 		skb = skb_dequeue(&chan->tx_q);
1856 
1857 		bt_cb(skb)->l2cap.retries = 1;
1858 		control = &bt_cb(skb)->l2cap;
1859 
1860 		control->reqseq = 0;
1861 		control->txseq = chan->next_tx_seq;
1862 
1863 		__pack_control(chan, control, skb);
1864 
1865 		if (chan->fcs == L2CAP_FCS_CRC16) {
1866 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1867 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1868 		}
1869 
1870 		l2cap_do_send(chan, skb);
1871 
1872 		BT_DBG("Sent txseq %u", control->txseq);
1873 
1874 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1875 		chan->frames_sent++;
1876 	}
1877 }
1878 
1879 static int l2cap_ertm_send(struct l2cap_chan *chan)
1880 {
1881 	struct sk_buff *skb, *tx_skb;
1882 	struct l2cap_ctrl *control;
1883 	int sent = 0;
1884 
1885 	BT_DBG("chan %p", chan);
1886 
1887 	if (chan->state != BT_CONNECTED)
1888 		return -ENOTCONN;
1889 
1890 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1891 		return 0;
1892 
1893 	if (__chan_is_moving(chan))
1894 		return 0;
1895 
1896 	while (chan->tx_send_head &&
1897 	       chan->unacked_frames < chan->remote_tx_win &&
1898 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1899 
1900 		skb = chan->tx_send_head;
1901 
1902 		bt_cb(skb)->l2cap.retries = 1;
1903 		control = &bt_cb(skb)->l2cap;
1904 
1905 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1906 			control->final = 1;
1907 
1908 		control->reqseq = chan->buffer_seq;
1909 		chan->last_acked_seq = chan->buffer_seq;
1910 		control->txseq = chan->next_tx_seq;
1911 
1912 		__pack_control(chan, control, skb);
1913 
1914 		if (chan->fcs == L2CAP_FCS_CRC16) {
1915 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1916 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1917 		}
1918 
1919 		/* Clone after data has been modified. Data is assumed to be
1920 		   read-only (for locking purposes) on cloned sk_buffs.
1921 		 */
1922 		tx_skb = skb_clone(skb, GFP_KERNEL);
1923 
1924 		if (!tx_skb)
1925 			break;
1926 
1927 		__set_retrans_timer(chan);
1928 
1929 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1930 		chan->unacked_frames++;
1931 		chan->frames_sent++;
1932 		sent++;
1933 
1934 		if (skb_queue_is_last(&chan->tx_q, skb))
1935 			chan->tx_send_head = NULL;
1936 		else
1937 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1938 
1939 		l2cap_do_send(chan, tx_skb);
1940 		BT_DBG("Sent txseq %u", control->txseq);
1941 	}
1942 
1943 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1944 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
1945 
1946 	return sent;
1947 }
1948 
1949 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1950 {
1951 	struct l2cap_ctrl control;
1952 	struct sk_buff *skb;
1953 	struct sk_buff *tx_skb;
1954 	u16 seq;
1955 
1956 	BT_DBG("chan %p", chan);
1957 
1958 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1959 		return;
1960 
1961 	if (__chan_is_moving(chan))
1962 		return;
1963 
1964 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1965 		seq = l2cap_seq_list_pop(&chan->retrans_list);
1966 
1967 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1968 		if (!skb) {
1969 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
1970 			       seq);
1971 			continue;
1972 		}
1973 
1974 		bt_cb(skb)->l2cap.retries++;
1975 		control = bt_cb(skb)->l2cap;
1976 
1977 		if (chan->max_tx != 0 &&
1978 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
1979 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1980 			l2cap_send_disconn_req(chan, ECONNRESET);
1981 			l2cap_seq_list_clear(&chan->retrans_list);
1982 			break;
1983 		}
1984 
1985 		control.reqseq = chan->buffer_seq;
1986 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1987 			control.final = 1;
1988 		else
1989 			control.final = 0;
1990 
1991 		if (skb_cloned(skb)) {
1992 			/* Cloned sk_buffs are read-only, so we need a
1993 			 * writeable copy
1994 			 */
1995 			tx_skb = skb_copy(skb, GFP_KERNEL);
1996 		} else {
1997 			tx_skb = skb_clone(skb, GFP_KERNEL);
1998 		}
1999 
2000 		if (!tx_skb) {
2001 			l2cap_seq_list_clear(&chan->retrans_list);
2002 			break;
2003 		}
2004 
2005 		/* Update skb contents */
2006 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2007 			put_unaligned_le32(__pack_extended_control(&control),
2008 					   tx_skb->data + L2CAP_HDR_SIZE);
2009 		} else {
2010 			put_unaligned_le16(__pack_enhanced_control(&control),
2011 					   tx_skb->data + L2CAP_HDR_SIZE);
2012 		}
2013 
2014 		/* Update FCS */
2015 		if (chan->fcs == L2CAP_FCS_CRC16) {
2016 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2017 					tx_skb->len - L2CAP_FCS_SIZE);
2018 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2019 						L2CAP_FCS_SIZE);
2020 		}
2021 
2022 		l2cap_do_send(chan, tx_skb);
2023 
2024 		BT_DBG("Resent txseq %d", control.txseq);
2025 
2026 		chan->last_acked_seq = chan->buffer_seq;
2027 	}
2028 }
2029 
2030 static void l2cap_retransmit(struct l2cap_chan *chan,
2031 			     struct l2cap_ctrl *control)
2032 {
2033 	BT_DBG("chan %p, control %p", chan, control);
2034 
2035 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2036 	l2cap_ertm_resend(chan);
2037 }
2038 
2039 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2040 				 struct l2cap_ctrl *control)
2041 {
2042 	struct sk_buff *skb;
2043 
2044 	BT_DBG("chan %p, control %p", chan, control);
2045 
2046 	if (control->poll)
2047 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2048 
2049 	l2cap_seq_list_clear(&chan->retrans_list);
2050 
2051 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2052 		return;
2053 
2054 	if (chan->unacked_frames) {
2055 		skb_queue_walk(&chan->tx_q, skb) {
2056 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2057 			    skb == chan->tx_send_head)
2058 				break;
2059 		}
2060 
2061 		skb_queue_walk_from(&chan->tx_q, skb) {
2062 			if (skb == chan->tx_send_head)
2063 				break;
2064 
2065 			l2cap_seq_list_append(&chan->retrans_list,
2066 					      bt_cb(skb)->l2cap.txseq);
2067 		}
2068 
2069 		l2cap_ertm_resend(chan);
2070 	}
2071 }
2072 
2073 static void l2cap_send_ack(struct l2cap_chan *chan)
2074 {
2075 	struct l2cap_ctrl control;
2076 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2077 					 chan->last_acked_seq);
2078 	int threshold;
2079 
2080 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2081 	       chan, chan->last_acked_seq, chan->buffer_seq);
2082 
2083 	memset(&control, 0, sizeof(control));
2084 	control.sframe = 1;
2085 
2086 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2087 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2088 		__clear_ack_timer(chan);
2089 		control.super = L2CAP_SUPER_RNR;
2090 		control.reqseq = chan->buffer_seq;
2091 		l2cap_send_sframe(chan, &control);
2092 	} else {
2093 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2094 			l2cap_ertm_send(chan);
2095 			/* If any i-frames were sent, they included an ack */
2096 			if (chan->buffer_seq == chan->last_acked_seq)
2097 				frames_to_ack = 0;
2098 		}
2099 
2100 		/* Ack now if the window is 3/4ths full.
2101 		 * Calculate without mul or div
2102 		 */
2103 		threshold = chan->ack_win;
2104 		threshold += threshold << 1;
2105 		threshold >>= 2;
2106 
2107 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2108 		       threshold);
2109 
2110 		if (frames_to_ack >= threshold) {
2111 			__clear_ack_timer(chan);
2112 			control.super = L2CAP_SUPER_RR;
2113 			control.reqseq = chan->buffer_seq;
2114 			l2cap_send_sframe(chan, &control);
2115 			frames_to_ack = 0;
2116 		}
2117 
2118 		if (frames_to_ack)
2119 			__set_ack_timer(chan);
2120 	}
2121 }
2122 
2123 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2124 					 struct msghdr *msg, int len,
2125 					 int count, struct sk_buff *skb)
2126 {
2127 	struct l2cap_conn *conn = chan->conn;
2128 	struct sk_buff **frag;
2129 	int sent = 0;
2130 
2131 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2132 		return -EFAULT;
2133 
2134 	sent += count;
2135 	len  -= count;
2136 
2137 	/* Continuation fragments (no L2CAP header) */
2138 	frag = &skb_shinfo(skb)->frag_list;
2139 	while (len) {
2140 		struct sk_buff *tmp;
2141 
2142 		count = min_t(unsigned int, conn->mtu, len);
2143 
2144 		tmp = chan->ops->alloc_skb(chan, 0, count,
2145 					   msg->msg_flags & MSG_DONTWAIT);
2146 		if (IS_ERR(tmp))
2147 			return PTR_ERR(tmp);
2148 
2149 		*frag = tmp;
2150 
2151 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2152 				   &msg->msg_iter))
2153 			return -EFAULT;
2154 
2155 		sent += count;
2156 		len  -= count;
2157 
2158 		skb->len += (*frag)->len;
2159 		skb->data_len += (*frag)->len;
2160 
2161 		frag = &(*frag)->next;
2162 	}
2163 
2164 	return sent;
2165 }
2166 
2167 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2168 						 struct msghdr *msg, size_t len)
2169 {
2170 	struct l2cap_conn *conn = chan->conn;
2171 	struct sk_buff *skb;
2172 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2173 	struct l2cap_hdr *lh;
2174 
2175 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2176 	       __le16_to_cpu(chan->psm), len);
2177 
2178 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2179 
2180 	skb = chan->ops->alloc_skb(chan, hlen, count,
2181 				   msg->msg_flags & MSG_DONTWAIT);
2182 	if (IS_ERR(skb))
2183 		return skb;
2184 
2185 	/* Create L2CAP header */
2186 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2187 	lh->cid = cpu_to_le16(chan->dcid);
2188 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2189 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2190 
2191 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2192 	if (unlikely(err < 0)) {
2193 		kfree_skb(skb);
2194 		return ERR_PTR(err);
2195 	}
2196 	return skb;
2197 }
2198 
2199 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2200 					      struct msghdr *msg, size_t len)
2201 {
2202 	struct l2cap_conn *conn = chan->conn;
2203 	struct sk_buff *skb;
2204 	int err, count;
2205 	struct l2cap_hdr *lh;
2206 
2207 	BT_DBG("chan %p len %zu", chan, len);
2208 
2209 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2210 
2211 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2212 				   msg->msg_flags & MSG_DONTWAIT);
2213 	if (IS_ERR(skb))
2214 		return skb;
2215 
2216 	/* Create L2CAP header */
2217 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2218 	lh->cid = cpu_to_le16(chan->dcid);
2219 	lh->len = cpu_to_le16(len);
2220 
2221 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2222 	if (unlikely(err < 0)) {
2223 		kfree_skb(skb);
2224 		return ERR_PTR(err);
2225 	}
2226 	return skb;
2227 }
2228 
2229 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2230 					       struct msghdr *msg, size_t len,
2231 					       u16 sdulen)
2232 {
2233 	struct l2cap_conn *conn = chan->conn;
2234 	struct sk_buff *skb;
2235 	int err, count, hlen;
2236 	struct l2cap_hdr *lh;
2237 
2238 	BT_DBG("chan %p len %zu", chan, len);
2239 
2240 	if (!conn)
2241 		return ERR_PTR(-ENOTCONN);
2242 
2243 	hlen = __ertm_hdr_size(chan);
2244 
2245 	if (sdulen)
2246 		hlen += L2CAP_SDULEN_SIZE;
2247 
2248 	if (chan->fcs == L2CAP_FCS_CRC16)
2249 		hlen += L2CAP_FCS_SIZE;
2250 
2251 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2252 
2253 	skb = chan->ops->alloc_skb(chan, hlen, count,
2254 				   msg->msg_flags & MSG_DONTWAIT);
2255 	if (IS_ERR(skb))
2256 		return skb;
2257 
2258 	/* Create L2CAP header */
2259 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2260 	lh->cid = cpu_to_le16(chan->dcid);
2261 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2262 
2263 	/* Control header is populated later */
2264 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2265 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2266 	else
2267 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2268 
2269 	if (sdulen)
2270 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2271 
2272 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2273 	if (unlikely(err < 0)) {
2274 		kfree_skb(skb);
2275 		return ERR_PTR(err);
2276 	}
2277 
2278 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2279 	bt_cb(skb)->l2cap.retries = 0;
2280 	return skb;
2281 }
2282 
2283 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2284 			     struct sk_buff_head *seg_queue,
2285 			     struct msghdr *msg, size_t len)
2286 {
2287 	struct sk_buff *skb;
2288 	u16 sdu_len;
2289 	size_t pdu_len;
2290 	u8 sar;
2291 
2292 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2293 
2294 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2295 	 * so fragmented skbs are not used.  The HCI layer's handling
2296 	 * of fragmented skbs is not compatible with ERTM's queueing.
2297 	 */
2298 
2299 	/* PDU size is derived from the HCI MTU */
2300 	pdu_len = chan->conn->mtu;
2301 
2302 	/* Constrain PDU size for BR/EDR connections */
2303 	if (!chan->hs_hcon)
2304 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2305 
2306 	/* Adjust for largest possible L2CAP overhead. */
2307 	if (chan->fcs)
2308 		pdu_len -= L2CAP_FCS_SIZE;
2309 
2310 	pdu_len -= __ertm_hdr_size(chan);
2311 
2312 	/* Remote device may have requested smaller PDUs */
2313 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2314 
2315 	if (len <= pdu_len) {
2316 		sar = L2CAP_SAR_UNSEGMENTED;
2317 		sdu_len = 0;
2318 		pdu_len = len;
2319 	} else {
2320 		sar = L2CAP_SAR_START;
2321 		sdu_len = len;
2322 	}
2323 
2324 	while (len > 0) {
2325 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2326 
2327 		if (IS_ERR(skb)) {
2328 			__skb_queue_purge(seg_queue);
2329 			return PTR_ERR(skb);
2330 		}
2331 
2332 		bt_cb(skb)->l2cap.sar = sar;
2333 		__skb_queue_tail(seg_queue, skb);
2334 
2335 		len -= pdu_len;
2336 		if (sdu_len)
2337 			sdu_len = 0;
2338 
2339 		if (len <= pdu_len) {
2340 			sar = L2CAP_SAR_END;
2341 			pdu_len = len;
2342 		} else {
2343 			sar = L2CAP_SAR_CONTINUE;
2344 		}
2345 	}
2346 
2347 	return 0;
2348 }
2349 
2350 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2351 						   struct msghdr *msg,
2352 						   size_t len, u16 sdulen)
2353 {
2354 	struct l2cap_conn *conn = chan->conn;
2355 	struct sk_buff *skb;
2356 	int err, count, hlen;
2357 	struct l2cap_hdr *lh;
2358 
2359 	BT_DBG("chan %p len %zu", chan, len);
2360 
2361 	if (!conn)
2362 		return ERR_PTR(-ENOTCONN);
2363 
2364 	hlen = L2CAP_HDR_SIZE;
2365 
2366 	if (sdulen)
2367 		hlen += L2CAP_SDULEN_SIZE;
2368 
2369 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2370 
2371 	skb = chan->ops->alloc_skb(chan, hlen, count,
2372 				   msg->msg_flags & MSG_DONTWAIT);
2373 	if (IS_ERR(skb))
2374 		return skb;
2375 
2376 	/* Create L2CAP header */
2377 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2378 	lh->cid = cpu_to_le16(chan->dcid);
2379 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2380 
2381 	if (sdulen)
2382 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2383 
2384 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2385 	if (unlikely(err < 0)) {
2386 		kfree_skb(skb);
2387 		return ERR_PTR(err);
2388 	}
2389 
2390 	return skb;
2391 }
2392 
2393 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2394 				struct sk_buff_head *seg_queue,
2395 				struct msghdr *msg, size_t len)
2396 {
2397 	struct sk_buff *skb;
2398 	size_t pdu_len;
2399 	u16 sdu_len;
2400 
2401 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2402 
2403 	sdu_len = len;
2404 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2405 
2406 	while (len > 0) {
2407 		if (len <= pdu_len)
2408 			pdu_len = len;
2409 
2410 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2411 		if (IS_ERR(skb)) {
2412 			__skb_queue_purge(seg_queue);
2413 			return PTR_ERR(skb);
2414 		}
2415 
2416 		__skb_queue_tail(seg_queue, skb);
2417 
2418 		len -= pdu_len;
2419 
2420 		if (sdu_len) {
2421 			sdu_len = 0;
2422 			pdu_len += L2CAP_SDULEN_SIZE;
2423 		}
2424 	}
2425 
2426 	return 0;
2427 }
2428 
2429 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2430 {
2431 	int sent = 0;
2432 
2433 	BT_DBG("chan %p", chan);
2434 
2435 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2436 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2437 		chan->tx_credits--;
2438 		sent++;
2439 	}
2440 
2441 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2442 	       skb_queue_len(&chan->tx_q));
2443 }
2444 
2445 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2446 {
2447 	struct sk_buff *skb;
2448 	int err;
2449 	struct sk_buff_head seg_queue;
2450 
2451 	if (!chan->conn)
2452 		return -ENOTCONN;
2453 
2454 	/* Connectionless channel */
2455 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2456 		skb = l2cap_create_connless_pdu(chan, msg, len);
2457 		if (IS_ERR(skb))
2458 			return PTR_ERR(skb);
2459 
2460 		/* Channel lock is released before requesting new skb and then
2461 		 * reacquired thus we need to recheck channel state.
2462 		 */
2463 		if (chan->state != BT_CONNECTED) {
2464 			kfree_skb(skb);
2465 			return -ENOTCONN;
2466 		}
2467 
2468 		l2cap_do_send(chan, skb);
2469 		return len;
2470 	}
2471 
2472 	switch (chan->mode) {
2473 	case L2CAP_MODE_LE_FLOWCTL:
2474 		/* Check outgoing MTU */
2475 		if (len > chan->omtu)
2476 			return -EMSGSIZE;
2477 
2478 		__skb_queue_head_init(&seg_queue);
2479 
2480 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2481 
2482 		if (chan->state != BT_CONNECTED) {
2483 			__skb_queue_purge(&seg_queue);
2484 			err = -ENOTCONN;
2485 		}
2486 
2487 		if (err)
2488 			return err;
2489 
2490 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2491 
2492 		l2cap_le_flowctl_send(chan);
2493 
2494 		if (!chan->tx_credits)
2495 			chan->ops->suspend(chan);
2496 
2497 		err = len;
2498 
2499 		break;
2500 
2501 	case L2CAP_MODE_BASIC:
2502 		/* Check outgoing MTU */
2503 		if (len > chan->omtu)
2504 			return -EMSGSIZE;
2505 
2506 		/* Create a basic PDU */
2507 		skb = l2cap_create_basic_pdu(chan, msg, len);
2508 		if (IS_ERR(skb))
2509 			return PTR_ERR(skb);
2510 
2511 		/* Channel lock is released before requesting new skb and then
2512 		 * reacquired thus we need to recheck channel state.
2513 		 */
2514 		if (chan->state != BT_CONNECTED) {
2515 			kfree_skb(skb);
2516 			return -ENOTCONN;
2517 		}
2518 
2519 		l2cap_do_send(chan, skb);
2520 		err = len;
2521 		break;
2522 
2523 	case L2CAP_MODE_ERTM:
2524 	case L2CAP_MODE_STREAMING:
2525 		/* Check outgoing MTU */
2526 		if (len > chan->omtu) {
2527 			err = -EMSGSIZE;
2528 			break;
2529 		}
2530 
2531 		__skb_queue_head_init(&seg_queue);
2532 
2533 		/* Do segmentation before calling in to the state machine,
2534 		 * since it's possible to block while waiting for memory
2535 		 * allocation.
2536 		 */
2537 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2538 
2539 		/* The channel could have been closed while segmenting,
2540 		 * check that it is still connected.
2541 		 */
2542 		if (chan->state != BT_CONNECTED) {
2543 			__skb_queue_purge(&seg_queue);
2544 			err = -ENOTCONN;
2545 		}
2546 
2547 		if (err)
2548 			break;
2549 
2550 		if (chan->mode == L2CAP_MODE_ERTM)
2551 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2552 		else
2553 			l2cap_streaming_send(chan, &seg_queue);
2554 
2555 		err = len;
2556 
2557 		/* If the skbs were not queued for sending, they'll still be in
2558 		 * seg_queue and need to be purged.
2559 		 */
2560 		__skb_queue_purge(&seg_queue);
2561 		break;
2562 
2563 	default:
2564 		BT_DBG("bad state %1.1x", chan->mode);
2565 		err = -EBADFD;
2566 	}
2567 
2568 	return err;
2569 }
2570 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2571 
2572 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2573 {
2574 	struct l2cap_ctrl control;
2575 	u16 seq;
2576 
2577 	BT_DBG("chan %p, txseq %u", chan, txseq);
2578 
2579 	memset(&control, 0, sizeof(control));
2580 	control.sframe = 1;
2581 	control.super = L2CAP_SUPER_SREJ;
2582 
2583 	for (seq = chan->expected_tx_seq; seq != txseq;
2584 	     seq = __next_seq(chan, seq)) {
2585 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2586 			control.reqseq = seq;
2587 			l2cap_send_sframe(chan, &control);
2588 			l2cap_seq_list_append(&chan->srej_list, seq);
2589 		}
2590 	}
2591 
2592 	chan->expected_tx_seq = __next_seq(chan, txseq);
2593 }
2594 
2595 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2596 {
2597 	struct l2cap_ctrl control;
2598 
2599 	BT_DBG("chan %p", chan);
2600 
2601 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2602 		return;
2603 
2604 	memset(&control, 0, sizeof(control));
2605 	control.sframe = 1;
2606 	control.super = L2CAP_SUPER_SREJ;
2607 	control.reqseq = chan->srej_list.tail;
2608 	l2cap_send_sframe(chan, &control);
2609 }
2610 
2611 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2612 {
2613 	struct l2cap_ctrl control;
2614 	u16 initial_head;
2615 	u16 seq;
2616 
2617 	BT_DBG("chan %p, txseq %u", chan, txseq);
2618 
2619 	memset(&control, 0, sizeof(control));
2620 	control.sframe = 1;
2621 	control.super = L2CAP_SUPER_SREJ;
2622 
2623 	/* Capture initial list head to allow only one pass through the list. */
2624 	initial_head = chan->srej_list.head;
2625 
2626 	do {
2627 		seq = l2cap_seq_list_pop(&chan->srej_list);
2628 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2629 			break;
2630 
2631 		control.reqseq = seq;
2632 		l2cap_send_sframe(chan, &control);
2633 		l2cap_seq_list_append(&chan->srej_list, seq);
2634 	} while (chan->srej_list.head != initial_head);
2635 }
2636 
2637 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2638 {
2639 	struct sk_buff *acked_skb;
2640 	u16 ackseq;
2641 
2642 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2643 
2644 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2645 		return;
2646 
2647 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2648 	       chan->expected_ack_seq, chan->unacked_frames);
2649 
2650 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2651 	     ackseq = __next_seq(chan, ackseq)) {
2652 
2653 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2654 		if (acked_skb) {
2655 			skb_unlink(acked_skb, &chan->tx_q);
2656 			kfree_skb(acked_skb);
2657 			chan->unacked_frames--;
2658 		}
2659 	}
2660 
2661 	chan->expected_ack_seq = reqseq;
2662 
2663 	if (chan->unacked_frames == 0)
2664 		__clear_retrans_timer(chan);
2665 
2666 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2667 }
2668 
2669 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2670 {
2671 	BT_DBG("chan %p", chan);
2672 
2673 	chan->expected_tx_seq = chan->buffer_seq;
2674 	l2cap_seq_list_clear(&chan->srej_list);
2675 	skb_queue_purge(&chan->srej_q);
2676 	chan->rx_state = L2CAP_RX_STATE_RECV;
2677 }
2678 
2679 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2680 				struct l2cap_ctrl *control,
2681 				struct sk_buff_head *skbs, u8 event)
2682 {
2683 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2684 	       event);
2685 
2686 	switch (event) {
2687 	case L2CAP_EV_DATA_REQUEST:
2688 		if (chan->tx_send_head == NULL)
2689 			chan->tx_send_head = skb_peek(skbs);
2690 
2691 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2692 		l2cap_ertm_send(chan);
2693 		break;
2694 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2695 		BT_DBG("Enter LOCAL_BUSY");
2696 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2697 
2698 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2699 			/* The SREJ_SENT state must be aborted if we are to
2700 			 * enter the LOCAL_BUSY state.
2701 			 */
2702 			l2cap_abort_rx_srej_sent(chan);
2703 		}
2704 
2705 		l2cap_send_ack(chan);
2706 
2707 		break;
2708 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2709 		BT_DBG("Exit LOCAL_BUSY");
2710 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2711 
2712 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2713 			struct l2cap_ctrl local_control;
2714 
2715 			memset(&local_control, 0, sizeof(local_control));
2716 			local_control.sframe = 1;
2717 			local_control.super = L2CAP_SUPER_RR;
2718 			local_control.poll = 1;
2719 			local_control.reqseq = chan->buffer_seq;
2720 			l2cap_send_sframe(chan, &local_control);
2721 
2722 			chan->retry_count = 1;
2723 			__set_monitor_timer(chan);
2724 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2725 		}
2726 		break;
2727 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2728 		l2cap_process_reqseq(chan, control->reqseq);
2729 		break;
2730 	case L2CAP_EV_EXPLICIT_POLL:
2731 		l2cap_send_rr_or_rnr(chan, 1);
2732 		chan->retry_count = 1;
2733 		__set_monitor_timer(chan);
2734 		__clear_ack_timer(chan);
2735 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2736 		break;
2737 	case L2CAP_EV_RETRANS_TO:
2738 		l2cap_send_rr_or_rnr(chan, 1);
2739 		chan->retry_count = 1;
2740 		__set_monitor_timer(chan);
2741 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2742 		break;
2743 	case L2CAP_EV_RECV_FBIT:
2744 		/* Nothing to process */
2745 		break;
2746 	default:
2747 		break;
2748 	}
2749 }
2750 
2751 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2752 				  struct l2cap_ctrl *control,
2753 				  struct sk_buff_head *skbs, u8 event)
2754 {
2755 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2756 	       event);
2757 
2758 	switch (event) {
2759 	case L2CAP_EV_DATA_REQUEST:
2760 		if (chan->tx_send_head == NULL)
2761 			chan->tx_send_head = skb_peek(skbs);
2762 		/* Queue data, but don't send. */
2763 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2764 		break;
2765 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2766 		BT_DBG("Enter LOCAL_BUSY");
2767 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2768 
2769 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2770 			/* The SREJ_SENT state must be aborted if we are to
2771 			 * enter the LOCAL_BUSY state.
2772 			 */
2773 			l2cap_abort_rx_srej_sent(chan);
2774 		}
2775 
2776 		l2cap_send_ack(chan);
2777 
2778 		break;
2779 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2780 		BT_DBG("Exit LOCAL_BUSY");
2781 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2782 
2783 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2784 			struct l2cap_ctrl local_control;
2785 			memset(&local_control, 0, sizeof(local_control));
2786 			local_control.sframe = 1;
2787 			local_control.super = L2CAP_SUPER_RR;
2788 			local_control.poll = 1;
2789 			local_control.reqseq = chan->buffer_seq;
2790 			l2cap_send_sframe(chan, &local_control);
2791 
2792 			chan->retry_count = 1;
2793 			__set_monitor_timer(chan);
2794 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2795 		}
2796 		break;
2797 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2798 		l2cap_process_reqseq(chan, control->reqseq);
2799 
2800 		/* Fall through */
2801 
2802 	case L2CAP_EV_RECV_FBIT:
2803 		if (control && control->final) {
2804 			__clear_monitor_timer(chan);
2805 			if (chan->unacked_frames > 0)
2806 				__set_retrans_timer(chan);
2807 			chan->retry_count = 0;
2808 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2809 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2810 		}
2811 		break;
2812 	case L2CAP_EV_EXPLICIT_POLL:
2813 		/* Ignore */
2814 		break;
2815 	case L2CAP_EV_MONITOR_TO:
2816 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2817 			l2cap_send_rr_or_rnr(chan, 1);
2818 			__set_monitor_timer(chan);
2819 			chan->retry_count++;
2820 		} else {
2821 			l2cap_send_disconn_req(chan, ECONNABORTED);
2822 		}
2823 		break;
2824 	default:
2825 		break;
2826 	}
2827 }
2828 
2829 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2830 		     struct sk_buff_head *skbs, u8 event)
2831 {
2832 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2833 	       chan, control, skbs, event, chan->tx_state);
2834 
2835 	switch (chan->tx_state) {
2836 	case L2CAP_TX_STATE_XMIT:
2837 		l2cap_tx_state_xmit(chan, control, skbs, event);
2838 		break;
2839 	case L2CAP_TX_STATE_WAIT_F:
2840 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2841 		break;
2842 	default:
2843 		/* Ignore event */
2844 		break;
2845 	}
2846 }
2847 
2848 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2849 			     struct l2cap_ctrl *control)
2850 {
2851 	BT_DBG("chan %p, control %p", chan, control);
2852 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2853 }
2854 
2855 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2856 				  struct l2cap_ctrl *control)
2857 {
2858 	BT_DBG("chan %p, control %p", chan, control);
2859 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2860 }
2861 
2862 /* Copy frame to all raw sockets on that connection */
2863 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2864 {
2865 	struct sk_buff *nskb;
2866 	struct l2cap_chan *chan;
2867 
2868 	BT_DBG("conn %p", conn);
2869 
2870 	mutex_lock(&conn->chan_lock);
2871 
2872 	list_for_each_entry(chan, &conn->chan_l, list) {
2873 		if (chan->chan_type != L2CAP_CHAN_RAW)
2874 			continue;
2875 
2876 		/* Don't send frame to the channel it came from */
2877 		if (bt_cb(skb)->l2cap.chan == chan)
2878 			continue;
2879 
2880 		nskb = skb_clone(skb, GFP_KERNEL);
2881 		if (!nskb)
2882 			continue;
2883 		if (chan->ops->recv(chan, nskb))
2884 			kfree_skb(nskb);
2885 	}
2886 
2887 	mutex_unlock(&conn->chan_lock);
2888 }
2889 
2890 /* ---- L2CAP signalling commands ---- */
2891 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2892 				       u8 ident, u16 dlen, void *data)
2893 {
2894 	struct sk_buff *skb, **frag;
2895 	struct l2cap_cmd_hdr *cmd;
2896 	struct l2cap_hdr *lh;
2897 	int len, count;
2898 
2899 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2900 	       conn, code, ident, dlen);
2901 
2902 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2903 		return NULL;
2904 
2905 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2906 	count = min_t(unsigned int, conn->mtu, len);
2907 
2908 	skb = bt_skb_alloc(count, GFP_KERNEL);
2909 	if (!skb)
2910 		return NULL;
2911 
2912 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2913 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2914 
2915 	if (conn->hcon->type == LE_LINK)
2916 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2917 	else
2918 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2919 
2920 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2921 	cmd->code  = code;
2922 	cmd->ident = ident;
2923 	cmd->len   = cpu_to_le16(dlen);
2924 
2925 	if (dlen) {
2926 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2927 		skb_put_data(skb, data, count);
2928 		data += count;
2929 	}
2930 
2931 	len -= skb->len;
2932 
2933 	/* Continuation fragments (no L2CAP header) */
2934 	frag = &skb_shinfo(skb)->frag_list;
2935 	while (len) {
2936 		count = min_t(unsigned int, conn->mtu, len);
2937 
2938 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2939 		if (!*frag)
2940 			goto fail;
2941 
2942 		skb_put_data(*frag, data, count);
2943 
2944 		len  -= count;
2945 		data += count;
2946 
2947 		frag = &(*frag)->next;
2948 	}
2949 
2950 	return skb;
2951 
2952 fail:
2953 	kfree_skb(skb);
2954 	return NULL;
2955 }
2956 
2957 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2958 				     unsigned long *val)
2959 {
2960 	struct l2cap_conf_opt *opt = *ptr;
2961 	int len;
2962 
2963 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2964 	*ptr += len;
2965 
2966 	*type = opt->type;
2967 	*olen = opt->len;
2968 
2969 	switch (opt->len) {
2970 	case 1:
2971 		*val = *((u8 *) opt->val);
2972 		break;
2973 
2974 	case 2:
2975 		*val = get_unaligned_le16(opt->val);
2976 		break;
2977 
2978 	case 4:
2979 		*val = get_unaligned_le32(opt->val);
2980 		break;
2981 
2982 	default:
2983 		*val = (unsigned long) opt->val;
2984 		break;
2985 	}
2986 
2987 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2988 	return len;
2989 }
2990 
2991 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
2992 {
2993 	struct l2cap_conf_opt *opt = *ptr;
2994 
2995 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2996 
2997 	if (size < L2CAP_CONF_OPT_SIZE + len)
2998 		return;
2999 
3000 	opt->type = type;
3001 	opt->len  = len;
3002 
3003 	switch (len) {
3004 	case 1:
3005 		*((u8 *) opt->val)  = val;
3006 		break;
3007 
3008 	case 2:
3009 		put_unaligned_le16(val, opt->val);
3010 		break;
3011 
3012 	case 4:
3013 		put_unaligned_le32(val, opt->val);
3014 		break;
3015 
3016 	default:
3017 		memcpy(opt->val, (void *) val, len);
3018 		break;
3019 	}
3020 
3021 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3022 }
3023 
3024 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3025 {
3026 	struct l2cap_conf_efs efs;
3027 
3028 	switch (chan->mode) {
3029 	case L2CAP_MODE_ERTM:
3030 		efs.id		= chan->local_id;
3031 		efs.stype	= chan->local_stype;
3032 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3033 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3034 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3035 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3036 		break;
3037 
3038 	case L2CAP_MODE_STREAMING:
3039 		efs.id		= 1;
3040 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3041 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3042 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3043 		efs.acc_lat	= 0;
3044 		efs.flush_to	= 0;
3045 		break;
3046 
3047 	default:
3048 		return;
3049 	}
3050 
3051 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3052 			   (unsigned long) &efs, size);
3053 }
3054 
3055 static void l2cap_ack_timeout(struct work_struct *work)
3056 {
3057 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3058 					       ack_timer.work);
3059 	u16 frames_to_ack;
3060 
3061 	BT_DBG("chan %p", chan);
3062 
3063 	l2cap_chan_lock(chan);
3064 
3065 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3066 				     chan->last_acked_seq);
3067 
3068 	if (frames_to_ack)
3069 		l2cap_send_rr_or_rnr(chan, 0);
3070 
3071 	l2cap_chan_unlock(chan);
3072 	l2cap_chan_put(chan);
3073 }
3074 
3075 int l2cap_ertm_init(struct l2cap_chan *chan)
3076 {
3077 	int err;
3078 
3079 	chan->next_tx_seq = 0;
3080 	chan->expected_tx_seq = 0;
3081 	chan->expected_ack_seq = 0;
3082 	chan->unacked_frames = 0;
3083 	chan->buffer_seq = 0;
3084 	chan->frames_sent = 0;
3085 	chan->last_acked_seq = 0;
3086 	chan->sdu = NULL;
3087 	chan->sdu_last_frag = NULL;
3088 	chan->sdu_len = 0;
3089 
3090 	skb_queue_head_init(&chan->tx_q);
3091 
3092 	chan->local_amp_id = AMP_ID_BREDR;
3093 	chan->move_id = AMP_ID_BREDR;
3094 	chan->move_state = L2CAP_MOVE_STABLE;
3095 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3096 
3097 	if (chan->mode != L2CAP_MODE_ERTM)
3098 		return 0;
3099 
3100 	chan->rx_state = L2CAP_RX_STATE_RECV;
3101 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3102 
3103 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3104 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3105 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3106 
3107 	skb_queue_head_init(&chan->srej_q);
3108 
3109 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3110 	if (err < 0)
3111 		return err;
3112 
3113 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3114 	if (err < 0)
3115 		l2cap_seq_list_free(&chan->srej_list);
3116 
3117 	return err;
3118 }
3119 
3120 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3121 {
3122 	switch (mode) {
3123 	case L2CAP_MODE_STREAMING:
3124 	case L2CAP_MODE_ERTM:
3125 		if (l2cap_mode_supported(mode, remote_feat_mask))
3126 			return mode;
3127 		/* fall through */
3128 	default:
3129 		return L2CAP_MODE_BASIC;
3130 	}
3131 }
3132 
3133 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3134 {
3135 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3136 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3137 }
3138 
3139 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3140 {
3141 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3142 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3143 }
3144 
3145 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3146 				      struct l2cap_conf_rfc *rfc)
3147 {
3148 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3149 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3150 
3151 		/* Class 1 devices have must have ERTM timeouts
3152 		 * exceeding the Link Supervision Timeout.  The
3153 		 * default Link Supervision Timeout for AMP
3154 		 * controllers is 10 seconds.
3155 		 *
3156 		 * Class 1 devices use 0xffffffff for their
3157 		 * best-effort flush timeout, so the clamping logic
3158 		 * will result in a timeout that meets the above
3159 		 * requirement.  ERTM timeouts are 16-bit values, so
3160 		 * the maximum timeout is 65.535 seconds.
3161 		 */
3162 
3163 		/* Convert timeout to milliseconds and round */
3164 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3165 
3166 		/* This is the recommended formula for class 2 devices
3167 		 * that start ERTM timers when packets are sent to the
3168 		 * controller.
3169 		 */
3170 		ertm_to = 3 * ertm_to + 500;
3171 
3172 		if (ertm_to > 0xffff)
3173 			ertm_to = 0xffff;
3174 
3175 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3176 		rfc->monitor_timeout = rfc->retrans_timeout;
3177 	} else {
3178 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3179 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3180 	}
3181 }
3182 
3183 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3184 {
3185 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3186 	    __l2cap_ews_supported(chan->conn)) {
3187 		/* use extended control field */
3188 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3189 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3190 	} else {
3191 		chan->tx_win = min_t(u16, chan->tx_win,
3192 				     L2CAP_DEFAULT_TX_WINDOW);
3193 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3194 	}
3195 	chan->ack_win = chan->tx_win;
3196 }
3197 
3198 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3199 {
3200 	struct l2cap_conf_req *req = data;
3201 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3202 	void *ptr = req->data;
3203 	void *endptr = data + data_size;
3204 	u16 size;
3205 
3206 	BT_DBG("chan %p", chan);
3207 
3208 	if (chan->num_conf_req || chan->num_conf_rsp)
3209 		goto done;
3210 
3211 	switch (chan->mode) {
3212 	case L2CAP_MODE_STREAMING:
3213 	case L2CAP_MODE_ERTM:
3214 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3215 			break;
3216 
3217 		if (__l2cap_efs_supported(chan->conn))
3218 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3219 
3220 		/* fall through */
3221 	default:
3222 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3223 		break;
3224 	}
3225 
3226 done:
3227 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3228 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3229 
3230 	switch (chan->mode) {
3231 	case L2CAP_MODE_BASIC:
3232 		if (disable_ertm)
3233 			break;
3234 
3235 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3236 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3237 			break;
3238 
3239 		rfc.mode            = L2CAP_MODE_BASIC;
3240 		rfc.txwin_size      = 0;
3241 		rfc.max_transmit    = 0;
3242 		rfc.retrans_timeout = 0;
3243 		rfc.monitor_timeout = 0;
3244 		rfc.max_pdu_size    = 0;
3245 
3246 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3247 				   (unsigned long) &rfc, endptr - ptr);
3248 		break;
3249 
3250 	case L2CAP_MODE_ERTM:
3251 		rfc.mode            = L2CAP_MODE_ERTM;
3252 		rfc.max_transmit    = chan->max_tx;
3253 
3254 		__l2cap_set_ertm_timeouts(chan, &rfc);
3255 
3256 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3257 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3258 			     L2CAP_FCS_SIZE);
3259 		rfc.max_pdu_size = cpu_to_le16(size);
3260 
3261 		l2cap_txwin_setup(chan);
3262 
3263 		rfc.txwin_size = min_t(u16, chan->tx_win,
3264 				       L2CAP_DEFAULT_TX_WINDOW);
3265 
3266 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3267 				   (unsigned long) &rfc, endptr - ptr);
3268 
3269 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3270 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3271 
3272 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3273 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3274 					   chan->tx_win, endptr - ptr);
3275 
3276 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3277 			if (chan->fcs == L2CAP_FCS_NONE ||
3278 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3279 				chan->fcs = L2CAP_FCS_NONE;
3280 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3281 						   chan->fcs, endptr - ptr);
3282 			}
3283 		break;
3284 
3285 	case L2CAP_MODE_STREAMING:
3286 		l2cap_txwin_setup(chan);
3287 		rfc.mode            = L2CAP_MODE_STREAMING;
3288 		rfc.txwin_size      = 0;
3289 		rfc.max_transmit    = 0;
3290 		rfc.retrans_timeout = 0;
3291 		rfc.monitor_timeout = 0;
3292 
3293 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3294 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3295 			     L2CAP_FCS_SIZE);
3296 		rfc.max_pdu_size = cpu_to_le16(size);
3297 
3298 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3299 				   (unsigned long) &rfc, endptr - ptr);
3300 
3301 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3302 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3303 
3304 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3305 			if (chan->fcs == L2CAP_FCS_NONE ||
3306 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3307 				chan->fcs = L2CAP_FCS_NONE;
3308 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3309 						   chan->fcs, endptr - ptr);
3310 			}
3311 		break;
3312 	}
3313 
3314 	req->dcid  = cpu_to_le16(chan->dcid);
3315 	req->flags = cpu_to_le16(0);
3316 
3317 	return ptr - data;
3318 }
3319 
3320 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3321 {
3322 	struct l2cap_conf_rsp *rsp = data;
3323 	void *ptr = rsp->data;
3324 	void *endptr = data + data_size;
3325 	void *req = chan->conf_req;
3326 	int len = chan->conf_len;
3327 	int type, hint, olen;
3328 	unsigned long val;
3329 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3330 	struct l2cap_conf_efs efs;
3331 	u8 remote_efs = 0;
3332 	u16 mtu = L2CAP_DEFAULT_MTU;
3333 	u16 result = L2CAP_CONF_SUCCESS;
3334 	u16 size;
3335 
3336 	BT_DBG("chan %p", chan);
3337 
3338 	while (len >= L2CAP_CONF_OPT_SIZE) {
3339 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3340 		if (len < 0)
3341 			break;
3342 
3343 		hint  = type & L2CAP_CONF_HINT;
3344 		type &= L2CAP_CONF_MASK;
3345 
3346 		switch (type) {
3347 		case L2CAP_CONF_MTU:
3348 			if (olen != 2)
3349 				break;
3350 			mtu = val;
3351 			break;
3352 
3353 		case L2CAP_CONF_FLUSH_TO:
3354 			if (olen != 2)
3355 				break;
3356 			chan->flush_to = val;
3357 			break;
3358 
3359 		case L2CAP_CONF_QOS:
3360 			break;
3361 
3362 		case L2CAP_CONF_RFC:
3363 			if (olen != sizeof(rfc))
3364 				break;
3365 			memcpy(&rfc, (void *) val, olen);
3366 			break;
3367 
3368 		case L2CAP_CONF_FCS:
3369 			if (olen != 1)
3370 				break;
3371 			if (val == L2CAP_FCS_NONE)
3372 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3373 			break;
3374 
3375 		case L2CAP_CONF_EFS:
3376 			if (olen != sizeof(efs))
3377 				break;
3378 			remote_efs = 1;
3379 			memcpy(&efs, (void *) val, olen);
3380 			break;
3381 
3382 		case L2CAP_CONF_EWS:
3383 			if (olen != 2)
3384 				break;
3385 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3386 				return -ECONNREFUSED;
3387 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3388 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3389 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3390 			chan->remote_tx_win = val;
3391 			break;
3392 
3393 		default:
3394 			if (hint)
3395 				break;
3396 			result = L2CAP_CONF_UNKNOWN;
3397 			*((u8 *) ptr++) = type;
3398 			break;
3399 		}
3400 	}
3401 
3402 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3403 		goto done;
3404 
3405 	switch (chan->mode) {
3406 	case L2CAP_MODE_STREAMING:
3407 	case L2CAP_MODE_ERTM:
3408 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3409 			chan->mode = l2cap_select_mode(rfc.mode,
3410 						       chan->conn->feat_mask);
3411 			break;
3412 		}
3413 
3414 		if (remote_efs) {
3415 			if (__l2cap_efs_supported(chan->conn))
3416 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3417 			else
3418 				return -ECONNREFUSED;
3419 		}
3420 
3421 		if (chan->mode != rfc.mode)
3422 			return -ECONNREFUSED;
3423 
3424 		break;
3425 	}
3426 
3427 done:
3428 	if (chan->mode != rfc.mode) {
3429 		result = L2CAP_CONF_UNACCEPT;
3430 		rfc.mode = chan->mode;
3431 
3432 		if (chan->num_conf_rsp == 1)
3433 			return -ECONNREFUSED;
3434 
3435 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3436 				   (unsigned long) &rfc, endptr - ptr);
3437 	}
3438 
3439 	if (result == L2CAP_CONF_SUCCESS) {
3440 		/* Configure output options and let the other side know
3441 		 * which ones we don't like. */
3442 
3443 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3444 			result = L2CAP_CONF_UNACCEPT;
3445 		else {
3446 			chan->omtu = mtu;
3447 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3448 		}
3449 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3450 
3451 		if (remote_efs) {
3452 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3453 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3454 			    efs.stype != chan->local_stype) {
3455 
3456 				result = L2CAP_CONF_UNACCEPT;
3457 
3458 				if (chan->num_conf_req >= 1)
3459 					return -ECONNREFUSED;
3460 
3461 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3462 						   sizeof(efs),
3463 						   (unsigned long) &efs, endptr - ptr);
3464 			} else {
3465 				/* Send PENDING Conf Rsp */
3466 				result = L2CAP_CONF_PENDING;
3467 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3468 			}
3469 		}
3470 
3471 		switch (rfc.mode) {
3472 		case L2CAP_MODE_BASIC:
3473 			chan->fcs = L2CAP_FCS_NONE;
3474 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3475 			break;
3476 
3477 		case L2CAP_MODE_ERTM:
3478 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3479 				chan->remote_tx_win = rfc.txwin_size;
3480 			else
3481 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3482 
3483 			chan->remote_max_tx = rfc.max_transmit;
3484 
3485 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3486 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3487 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3488 			rfc.max_pdu_size = cpu_to_le16(size);
3489 			chan->remote_mps = size;
3490 
3491 			__l2cap_set_ertm_timeouts(chan, &rfc);
3492 
3493 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3494 
3495 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3496 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3497 
3498 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3499 				chan->remote_id = efs.id;
3500 				chan->remote_stype = efs.stype;
3501 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3502 				chan->remote_flush_to =
3503 					le32_to_cpu(efs.flush_to);
3504 				chan->remote_acc_lat =
3505 					le32_to_cpu(efs.acc_lat);
3506 				chan->remote_sdu_itime =
3507 					le32_to_cpu(efs.sdu_itime);
3508 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3509 						   sizeof(efs),
3510 						   (unsigned long) &efs, endptr - ptr);
3511 			}
3512 			break;
3513 
3514 		case L2CAP_MODE_STREAMING:
3515 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3516 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3517 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3518 			rfc.max_pdu_size = cpu_to_le16(size);
3519 			chan->remote_mps = size;
3520 
3521 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3522 
3523 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3524 					   (unsigned long) &rfc, endptr - ptr);
3525 
3526 			break;
3527 
3528 		default:
3529 			result = L2CAP_CONF_UNACCEPT;
3530 
3531 			memset(&rfc, 0, sizeof(rfc));
3532 			rfc.mode = chan->mode;
3533 		}
3534 
3535 		if (result == L2CAP_CONF_SUCCESS)
3536 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3537 	}
3538 	rsp->scid   = cpu_to_le16(chan->dcid);
3539 	rsp->result = cpu_to_le16(result);
3540 	rsp->flags  = cpu_to_le16(0);
3541 
3542 	return ptr - data;
3543 }
3544 
3545 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3546 				void *data, size_t size, u16 *result)
3547 {
3548 	struct l2cap_conf_req *req = data;
3549 	void *ptr = req->data;
3550 	void *endptr = data + size;
3551 	int type, olen;
3552 	unsigned long val;
3553 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3554 	struct l2cap_conf_efs efs;
3555 
3556 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3557 
3558 	while (len >= L2CAP_CONF_OPT_SIZE) {
3559 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3560 		if (len < 0)
3561 			break;
3562 
3563 		switch (type) {
3564 		case L2CAP_CONF_MTU:
3565 			if (olen != 2)
3566 				break;
3567 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3568 				*result = L2CAP_CONF_UNACCEPT;
3569 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3570 			} else
3571 				chan->imtu = val;
3572 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3573 					   endptr - ptr);
3574 			break;
3575 
3576 		case L2CAP_CONF_FLUSH_TO:
3577 			if (olen != 2)
3578 				break;
3579 			chan->flush_to = val;
3580 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3581 					   chan->flush_to, endptr - ptr);
3582 			break;
3583 
3584 		case L2CAP_CONF_RFC:
3585 			if (olen != sizeof(rfc))
3586 				break;
3587 			memcpy(&rfc, (void *)val, olen);
3588 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3589 			    rfc.mode != chan->mode)
3590 				return -ECONNREFUSED;
3591 			chan->fcs = 0;
3592 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3593 					   (unsigned long) &rfc, endptr - ptr);
3594 			break;
3595 
3596 		case L2CAP_CONF_EWS:
3597 			if (olen != 2)
3598 				break;
3599 			chan->ack_win = min_t(u16, val, chan->ack_win);
3600 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3601 					   chan->tx_win, endptr - ptr);
3602 			break;
3603 
3604 		case L2CAP_CONF_EFS:
3605 			if (olen != sizeof(efs))
3606 				break;
3607 			memcpy(&efs, (void *)val, olen);
3608 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3609 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3610 			    efs.stype != chan->local_stype)
3611 				return -ECONNREFUSED;
3612 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3613 					   (unsigned long) &efs, endptr - ptr);
3614 			break;
3615 
3616 		case L2CAP_CONF_FCS:
3617 			if (olen != 1)
3618 				break;
3619 			if (*result == L2CAP_CONF_PENDING)
3620 				if (val == L2CAP_FCS_NONE)
3621 					set_bit(CONF_RECV_NO_FCS,
3622 						&chan->conf_state);
3623 			break;
3624 		}
3625 	}
3626 
3627 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3628 		return -ECONNREFUSED;
3629 
3630 	chan->mode = rfc.mode;
3631 
3632 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3633 		switch (rfc.mode) {
3634 		case L2CAP_MODE_ERTM:
3635 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3636 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3637 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3638 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3639 				chan->ack_win = min_t(u16, chan->ack_win,
3640 						      rfc.txwin_size);
3641 
3642 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3643 				chan->local_msdu = le16_to_cpu(efs.msdu);
3644 				chan->local_sdu_itime =
3645 					le32_to_cpu(efs.sdu_itime);
3646 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3647 				chan->local_flush_to =
3648 					le32_to_cpu(efs.flush_to);
3649 			}
3650 			break;
3651 
3652 		case L2CAP_MODE_STREAMING:
3653 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3654 		}
3655 	}
3656 
3657 	req->dcid   = cpu_to_le16(chan->dcid);
3658 	req->flags  = cpu_to_le16(0);
3659 
3660 	return ptr - data;
3661 }
3662 
3663 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3664 				u16 result, u16 flags)
3665 {
3666 	struct l2cap_conf_rsp *rsp = data;
3667 	void *ptr = rsp->data;
3668 
3669 	BT_DBG("chan %p", chan);
3670 
3671 	rsp->scid   = cpu_to_le16(chan->dcid);
3672 	rsp->result = cpu_to_le16(result);
3673 	rsp->flags  = cpu_to_le16(flags);
3674 
3675 	return ptr - data;
3676 }
3677 
3678 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3679 {
3680 	struct l2cap_le_conn_rsp rsp;
3681 	struct l2cap_conn *conn = chan->conn;
3682 
3683 	BT_DBG("chan %p", chan);
3684 
3685 	rsp.dcid    = cpu_to_le16(chan->scid);
3686 	rsp.mtu     = cpu_to_le16(chan->imtu);
3687 	rsp.mps     = cpu_to_le16(chan->mps);
3688 	rsp.credits = cpu_to_le16(chan->rx_credits);
3689 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3690 
3691 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3692 		       &rsp);
3693 }
3694 
3695 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3696 {
3697 	struct l2cap_conn_rsp rsp;
3698 	struct l2cap_conn *conn = chan->conn;
3699 	u8 buf[128];
3700 	u8 rsp_code;
3701 
3702 	rsp.scid   = cpu_to_le16(chan->dcid);
3703 	rsp.dcid   = cpu_to_le16(chan->scid);
3704 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3705 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3706 
3707 	if (chan->hs_hcon)
3708 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3709 	else
3710 		rsp_code = L2CAP_CONN_RSP;
3711 
3712 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3713 
3714 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3715 
3716 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3717 		return;
3718 
3719 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3720 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3721 	chan->num_conf_req++;
3722 }
3723 
3724 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3725 {
3726 	int type, olen;
3727 	unsigned long val;
3728 	/* Use sane default values in case a misbehaving remote device
3729 	 * did not send an RFC or extended window size option.
3730 	 */
3731 	u16 txwin_ext = chan->ack_win;
3732 	struct l2cap_conf_rfc rfc = {
3733 		.mode = chan->mode,
3734 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3735 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3736 		.max_pdu_size = cpu_to_le16(chan->imtu),
3737 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3738 	};
3739 
3740 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3741 
3742 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3743 		return;
3744 
3745 	while (len >= L2CAP_CONF_OPT_SIZE) {
3746 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3747 		if (len < 0)
3748 			break;
3749 
3750 		switch (type) {
3751 		case L2CAP_CONF_RFC:
3752 			if (olen != sizeof(rfc))
3753 				break;
3754 			memcpy(&rfc, (void *)val, olen);
3755 			break;
3756 		case L2CAP_CONF_EWS:
3757 			if (olen != 2)
3758 				break;
3759 			txwin_ext = val;
3760 			break;
3761 		}
3762 	}
3763 
3764 	switch (rfc.mode) {
3765 	case L2CAP_MODE_ERTM:
3766 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3767 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3768 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3769 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3770 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3771 		else
3772 			chan->ack_win = min_t(u16, chan->ack_win,
3773 					      rfc.txwin_size);
3774 		break;
3775 	case L2CAP_MODE_STREAMING:
3776 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3777 	}
3778 }
3779 
3780 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3781 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3782 				    u8 *data)
3783 {
3784 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3785 
3786 	if (cmd_len < sizeof(*rej))
3787 		return -EPROTO;
3788 
3789 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3790 		return 0;
3791 
3792 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3793 	    cmd->ident == conn->info_ident) {
3794 		cancel_delayed_work(&conn->info_timer);
3795 
3796 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3797 		conn->info_ident = 0;
3798 
3799 		l2cap_conn_start(conn);
3800 	}
3801 
3802 	return 0;
3803 }
3804 
3805 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3806 					struct l2cap_cmd_hdr *cmd,
3807 					u8 *data, u8 rsp_code, u8 amp_id)
3808 {
3809 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3810 	struct l2cap_conn_rsp rsp;
3811 	struct l2cap_chan *chan = NULL, *pchan;
3812 	int result, status = L2CAP_CS_NO_INFO;
3813 
3814 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3815 	__le16 psm = req->psm;
3816 
3817 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3818 
3819 	/* Check if we have socket listening on psm */
3820 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3821 					 &conn->hcon->dst, ACL_LINK);
3822 	if (!pchan) {
3823 		result = L2CAP_CR_BAD_PSM;
3824 		goto sendresp;
3825 	}
3826 
3827 	mutex_lock(&conn->chan_lock);
3828 	l2cap_chan_lock(pchan);
3829 
3830 	/* Check if the ACL is secure enough (if not SDP) */
3831 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3832 	    !hci_conn_check_link_mode(conn->hcon)) {
3833 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3834 		result = L2CAP_CR_SEC_BLOCK;
3835 		goto response;
3836 	}
3837 
3838 	result = L2CAP_CR_NO_MEM;
3839 
3840 	/* Check for valid dynamic CID range (as per Erratum 3253) */
3841 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
3842 		result = L2CAP_CR_INVALID_SCID;
3843 		goto response;
3844 	}
3845 
3846 	/* Check if we already have channel with that dcid */
3847 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
3848 		result = L2CAP_CR_SCID_IN_USE;
3849 		goto response;
3850 	}
3851 
3852 	chan = pchan->ops->new_connection(pchan);
3853 	if (!chan)
3854 		goto response;
3855 
3856 	/* For certain devices (ex: HID mouse), support for authentication,
3857 	 * pairing and bonding is optional. For such devices, inorder to avoid
3858 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3859 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3860 	 */
3861 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3862 
3863 	bacpy(&chan->src, &conn->hcon->src);
3864 	bacpy(&chan->dst, &conn->hcon->dst);
3865 	chan->src_type = bdaddr_src_type(conn->hcon);
3866 	chan->dst_type = bdaddr_dst_type(conn->hcon);
3867 	chan->psm  = psm;
3868 	chan->dcid = scid;
3869 	chan->local_amp_id = amp_id;
3870 
3871 	__l2cap_chan_add(conn, chan);
3872 
3873 	dcid = chan->scid;
3874 
3875 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3876 
3877 	chan->ident = cmd->ident;
3878 
3879 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3880 		if (l2cap_chan_check_security(chan, false)) {
3881 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3882 				l2cap_state_change(chan, BT_CONNECT2);
3883 				result = L2CAP_CR_PEND;
3884 				status = L2CAP_CS_AUTHOR_PEND;
3885 				chan->ops->defer(chan);
3886 			} else {
3887 				/* Force pending result for AMP controllers.
3888 				 * The connection will succeed after the
3889 				 * physical link is up.
3890 				 */
3891 				if (amp_id == AMP_ID_BREDR) {
3892 					l2cap_state_change(chan, BT_CONFIG);
3893 					result = L2CAP_CR_SUCCESS;
3894 				} else {
3895 					l2cap_state_change(chan, BT_CONNECT2);
3896 					result = L2CAP_CR_PEND;
3897 				}
3898 				status = L2CAP_CS_NO_INFO;
3899 			}
3900 		} else {
3901 			l2cap_state_change(chan, BT_CONNECT2);
3902 			result = L2CAP_CR_PEND;
3903 			status = L2CAP_CS_AUTHEN_PEND;
3904 		}
3905 	} else {
3906 		l2cap_state_change(chan, BT_CONNECT2);
3907 		result = L2CAP_CR_PEND;
3908 		status = L2CAP_CS_NO_INFO;
3909 	}
3910 
3911 response:
3912 	l2cap_chan_unlock(pchan);
3913 	mutex_unlock(&conn->chan_lock);
3914 	l2cap_chan_put(pchan);
3915 
3916 sendresp:
3917 	rsp.scid   = cpu_to_le16(scid);
3918 	rsp.dcid   = cpu_to_le16(dcid);
3919 	rsp.result = cpu_to_le16(result);
3920 	rsp.status = cpu_to_le16(status);
3921 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3922 
3923 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3924 		struct l2cap_info_req info;
3925 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3926 
3927 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3928 		conn->info_ident = l2cap_get_ident(conn);
3929 
3930 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3931 
3932 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3933 			       sizeof(info), &info);
3934 	}
3935 
3936 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3937 	    result == L2CAP_CR_SUCCESS) {
3938 		u8 buf[128];
3939 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3940 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3941 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3942 		chan->num_conf_req++;
3943 	}
3944 
3945 	return chan;
3946 }
3947 
3948 static int l2cap_connect_req(struct l2cap_conn *conn,
3949 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3950 {
3951 	struct hci_dev *hdev = conn->hcon->hdev;
3952 	struct hci_conn *hcon = conn->hcon;
3953 
3954 	if (cmd_len < sizeof(struct l2cap_conn_req))
3955 		return -EPROTO;
3956 
3957 	hci_dev_lock(hdev);
3958 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3959 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3960 		mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3961 	hci_dev_unlock(hdev);
3962 
3963 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3964 	return 0;
3965 }
3966 
3967 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3968 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3969 				    u8 *data)
3970 {
3971 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3972 	u16 scid, dcid, result, status;
3973 	struct l2cap_chan *chan;
3974 	u8 req[128];
3975 	int err;
3976 
3977 	if (cmd_len < sizeof(*rsp))
3978 		return -EPROTO;
3979 
3980 	scid   = __le16_to_cpu(rsp->scid);
3981 	dcid   = __le16_to_cpu(rsp->dcid);
3982 	result = __le16_to_cpu(rsp->result);
3983 	status = __le16_to_cpu(rsp->status);
3984 
3985 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3986 	       dcid, scid, result, status);
3987 
3988 	mutex_lock(&conn->chan_lock);
3989 
3990 	if (scid) {
3991 		chan = __l2cap_get_chan_by_scid(conn, scid);
3992 		if (!chan) {
3993 			err = -EBADSLT;
3994 			goto unlock;
3995 		}
3996 	} else {
3997 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3998 		if (!chan) {
3999 			err = -EBADSLT;
4000 			goto unlock;
4001 		}
4002 	}
4003 
4004 	err = 0;
4005 
4006 	l2cap_chan_lock(chan);
4007 
4008 	switch (result) {
4009 	case L2CAP_CR_SUCCESS:
4010 		l2cap_state_change(chan, BT_CONFIG);
4011 		chan->ident = 0;
4012 		chan->dcid = dcid;
4013 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4014 
4015 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4016 			break;
4017 
4018 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4019 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4020 		chan->num_conf_req++;
4021 		break;
4022 
4023 	case L2CAP_CR_PEND:
4024 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4025 		break;
4026 
4027 	default:
4028 		l2cap_chan_del(chan, ECONNREFUSED);
4029 		break;
4030 	}
4031 
4032 	l2cap_chan_unlock(chan);
4033 
4034 unlock:
4035 	mutex_unlock(&conn->chan_lock);
4036 
4037 	return err;
4038 }
4039 
4040 static inline void set_default_fcs(struct l2cap_chan *chan)
4041 {
4042 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4043 	 * sides request it.
4044 	 */
4045 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4046 		chan->fcs = L2CAP_FCS_NONE;
4047 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4048 		chan->fcs = L2CAP_FCS_CRC16;
4049 }
4050 
4051 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4052 				    u8 ident, u16 flags)
4053 {
4054 	struct l2cap_conn *conn = chan->conn;
4055 
4056 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4057 	       flags);
4058 
4059 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4060 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4061 
4062 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4063 		       l2cap_build_conf_rsp(chan, data,
4064 					    L2CAP_CONF_SUCCESS, flags), data);
4065 }
4066 
4067 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4068 				   u16 scid, u16 dcid)
4069 {
4070 	struct l2cap_cmd_rej_cid rej;
4071 
4072 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4073 	rej.scid = __cpu_to_le16(scid);
4074 	rej.dcid = __cpu_to_le16(dcid);
4075 
4076 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4077 }
4078 
4079 static inline int l2cap_config_req(struct l2cap_conn *conn,
4080 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4081 				   u8 *data)
4082 {
4083 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4084 	u16 dcid, flags;
4085 	u8 rsp[64];
4086 	struct l2cap_chan *chan;
4087 	int len, err = 0;
4088 
4089 	if (cmd_len < sizeof(*req))
4090 		return -EPROTO;
4091 
4092 	dcid  = __le16_to_cpu(req->dcid);
4093 	flags = __le16_to_cpu(req->flags);
4094 
4095 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4096 
4097 	chan = l2cap_get_chan_by_scid(conn, dcid);
4098 	if (!chan) {
4099 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4100 		return 0;
4101 	}
4102 
4103 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4104 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4105 				       chan->dcid);
4106 		goto unlock;
4107 	}
4108 
4109 	/* Reject if config buffer is too small. */
4110 	len = cmd_len - sizeof(*req);
4111 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4112 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4113 			       l2cap_build_conf_rsp(chan, rsp,
4114 			       L2CAP_CONF_REJECT, flags), rsp);
4115 		goto unlock;
4116 	}
4117 
4118 	/* Store config. */
4119 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4120 	chan->conf_len += len;
4121 
4122 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4123 		/* Incomplete config. Send empty response. */
4124 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4125 			       l2cap_build_conf_rsp(chan, rsp,
4126 			       L2CAP_CONF_SUCCESS, flags), rsp);
4127 		goto unlock;
4128 	}
4129 
4130 	/* Complete config. */
4131 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4132 	if (len < 0) {
4133 		l2cap_send_disconn_req(chan, ECONNRESET);
4134 		goto unlock;
4135 	}
4136 
4137 	chan->ident = cmd->ident;
4138 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4139 	chan->num_conf_rsp++;
4140 
4141 	/* Reset config buffer. */
4142 	chan->conf_len = 0;
4143 
4144 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4145 		goto unlock;
4146 
4147 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4148 		set_default_fcs(chan);
4149 
4150 		if (chan->mode == L2CAP_MODE_ERTM ||
4151 		    chan->mode == L2CAP_MODE_STREAMING)
4152 			err = l2cap_ertm_init(chan);
4153 
4154 		if (err < 0)
4155 			l2cap_send_disconn_req(chan, -err);
4156 		else
4157 			l2cap_chan_ready(chan);
4158 
4159 		goto unlock;
4160 	}
4161 
4162 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4163 		u8 buf[64];
4164 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4165 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4166 		chan->num_conf_req++;
4167 	}
4168 
4169 	/* Got Conf Rsp PENDING from remote side and assume we sent
4170 	   Conf Rsp PENDING in the code above */
4171 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4172 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4173 
4174 		/* check compatibility */
4175 
4176 		/* Send rsp for BR/EDR channel */
4177 		if (!chan->hs_hcon)
4178 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4179 		else
4180 			chan->ident = cmd->ident;
4181 	}
4182 
4183 unlock:
4184 	l2cap_chan_unlock(chan);
4185 	return err;
4186 }
4187 
4188 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4189 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4190 				   u8 *data)
4191 {
4192 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4193 	u16 scid, flags, result;
4194 	struct l2cap_chan *chan;
4195 	int len = cmd_len - sizeof(*rsp);
4196 	int err = 0;
4197 
4198 	if (cmd_len < sizeof(*rsp))
4199 		return -EPROTO;
4200 
4201 	scid   = __le16_to_cpu(rsp->scid);
4202 	flags  = __le16_to_cpu(rsp->flags);
4203 	result = __le16_to_cpu(rsp->result);
4204 
4205 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4206 	       result, len);
4207 
4208 	chan = l2cap_get_chan_by_scid(conn, scid);
4209 	if (!chan)
4210 		return 0;
4211 
4212 	switch (result) {
4213 	case L2CAP_CONF_SUCCESS:
4214 		l2cap_conf_rfc_get(chan, rsp->data, len);
4215 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4216 		break;
4217 
4218 	case L2CAP_CONF_PENDING:
4219 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4220 
4221 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4222 			char buf[64];
4223 
4224 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4225 						   buf, sizeof(buf), &result);
4226 			if (len < 0) {
4227 				l2cap_send_disconn_req(chan, ECONNRESET);
4228 				goto done;
4229 			}
4230 
4231 			if (!chan->hs_hcon) {
4232 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4233 							0);
4234 			} else {
4235 				if (l2cap_check_efs(chan)) {
4236 					amp_create_logical_link(chan);
4237 					chan->ident = cmd->ident;
4238 				}
4239 			}
4240 		}
4241 		goto done;
4242 
4243 	case L2CAP_CONF_UNACCEPT:
4244 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4245 			char req[64];
4246 
4247 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4248 				l2cap_send_disconn_req(chan, ECONNRESET);
4249 				goto done;
4250 			}
4251 
4252 			/* throw out any old stored conf requests */
4253 			result = L2CAP_CONF_SUCCESS;
4254 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4255 						   req, sizeof(req), &result);
4256 			if (len < 0) {
4257 				l2cap_send_disconn_req(chan, ECONNRESET);
4258 				goto done;
4259 			}
4260 
4261 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4262 				       L2CAP_CONF_REQ, len, req);
4263 			chan->num_conf_req++;
4264 			if (result != L2CAP_CONF_SUCCESS)
4265 				goto done;
4266 			break;
4267 		}
4268 		/* fall through */
4269 
4270 	default:
4271 		l2cap_chan_set_err(chan, ECONNRESET);
4272 
4273 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4274 		l2cap_send_disconn_req(chan, ECONNRESET);
4275 		goto done;
4276 	}
4277 
4278 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4279 		goto done;
4280 
4281 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4282 
4283 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4284 		set_default_fcs(chan);
4285 
4286 		if (chan->mode == L2CAP_MODE_ERTM ||
4287 		    chan->mode == L2CAP_MODE_STREAMING)
4288 			err = l2cap_ertm_init(chan);
4289 
4290 		if (err < 0)
4291 			l2cap_send_disconn_req(chan, -err);
4292 		else
4293 			l2cap_chan_ready(chan);
4294 	}
4295 
4296 done:
4297 	l2cap_chan_unlock(chan);
4298 	return err;
4299 }
4300 
4301 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4302 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4303 				       u8 *data)
4304 {
4305 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4306 	struct l2cap_disconn_rsp rsp;
4307 	u16 dcid, scid;
4308 	struct l2cap_chan *chan;
4309 
4310 	if (cmd_len != sizeof(*req))
4311 		return -EPROTO;
4312 
4313 	scid = __le16_to_cpu(req->scid);
4314 	dcid = __le16_to_cpu(req->dcid);
4315 
4316 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4317 
4318 	mutex_lock(&conn->chan_lock);
4319 
4320 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4321 	if (!chan) {
4322 		mutex_unlock(&conn->chan_lock);
4323 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4324 		return 0;
4325 	}
4326 
4327 	l2cap_chan_lock(chan);
4328 
4329 	rsp.dcid = cpu_to_le16(chan->scid);
4330 	rsp.scid = cpu_to_le16(chan->dcid);
4331 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4332 
4333 	chan->ops->set_shutdown(chan);
4334 
4335 	l2cap_chan_hold(chan);
4336 	l2cap_chan_del(chan, ECONNRESET);
4337 
4338 	l2cap_chan_unlock(chan);
4339 
4340 	chan->ops->close(chan);
4341 	l2cap_chan_put(chan);
4342 
4343 	mutex_unlock(&conn->chan_lock);
4344 
4345 	return 0;
4346 }
4347 
4348 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4349 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4350 				       u8 *data)
4351 {
4352 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4353 	u16 dcid, scid;
4354 	struct l2cap_chan *chan;
4355 
4356 	if (cmd_len != sizeof(*rsp))
4357 		return -EPROTO;
4358 
4359 	scid = __le16_to_cpu(rsp->scid);
4360 	dcid = __le16_to_cpu(rsp->dcid);
4361 
4362 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4363 
4364 	mutex_lock(&conn->chan_lock);
4365 
4366 	chan = __l2cap_get_chan_by_scid(conn, scid);
4367 	if (!chan) {
4368 		mutex_unlock(&conn->chan_lock);
4369 		return 0;
4370 	}
4371 
4372 	l2cap_chan_lock(chan);
4373 
4374 	l2cap_chan_hold(chan);
4375 	l2cap_chan_del(chan, 0);
4376 
4377 	l2cap_chan_unlock(chan);
4378 
4379 	chan->ops->close(chan);
4380 	l2cap_chan_put(chan);
4381 
4382 	mutex_unlock(&conn->chan_lock);
4383 
4384 	return 0;
4385 }
4386 
4387 static inline int l2cap_information_req(struct l2cap_conn *conn,
4388 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4389 					u8 *data)
4390 {
4391 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4392 	u16 type;
4393 
4394 	if (cmd_len != sizeof(*req))
4395 		return -EPROTO;
4396 
4397 	type = __le16_to_cpu(req->type);
4398 
4399 	BT_DBG("type 0x%4.4x", type);
4400 
4401 	if (type == L2CAP_IT_FEAT_MASK) {
4402 		u8 buf[8];
4403 		u32 feat_mask = l2cap_feat_mask;
4404 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4405 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4406 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4407 		if (!disable_ertm)
4408 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4409 				| L2CAP_FEAT_FCS;
4410 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4411 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4412 				| L2CAP_FEAT_EXT_WINDOW;
4413 
4414 		put_unaligned_le32(feat_mask, rsp->data);
4415 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4416 			       buf);
4417 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4418 		u8 buf[12];
4419 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4420 
4421 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4422 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4423 		rsp->data[0] = conn->local_fixed_chan;
4424 		memset(rsp->data + 1, 0, 7);
4425 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4426 			       buf);
4427 	} else {
4428 		struct l2cap_info_rsp rsp;
4429 		rsp.type   = cpu_to_le16(type);
4430 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4431 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4432 			       &rsp);
4433 	}
4434 
4435 	return 0;
4436 }
4437 
4438 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4439 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4440 					u8 *data)
4441 {
4442 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4443 	u16 type, result;
4444 
4445 	if (cmd_len < sizeof(*rsp))
4446 		return -EPROTO;
4447 
4448 	type   = __le16_to_cpu(rsp->type);
4449 	result = __le16_to_cpu(rsp->result);
4450 
4451 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4452 
4453 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4454 	if (cmd->ident != conn->info_ident ||
4455 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4456 		return 0;
4457 
4458 	cancel_delayed_work(&conn->info_timer);
4459 
4460 	if (result != L2CAP_IR_SUCCESS) {
4461 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4462 		conn->info_ident = 0;
4463 
4464 		l2cap_conn_start(conn);
4465 
4466 		return 0;
4467 	}
4468 
4469 	switch (type) {
4470 	case L2CAP_IT_FEAT_MASK:
4471 		conn->feat_mask = get_unaligned_le32(rsp->data);
4472 
4473 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4474 			struct l2cap_info_req req;
4475 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4476 
4477 			conn->info_ident = l2cap_get_ident(conn);
4478 
4479 			l2cap_send_cmd(conn, conn->info_ident,
4480 				       L2CAP_INFO_REQ, sizeof(req), &req);
4481 		} else {
4482 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4483 			conn->info_ident = 0;
4484 
4485 			l2cap_conn_start(conn);
4486 		}
4487 		break;
4488 
4489 	case L2CAP_IT_FIXED_CHAN:
4490 		conn->remote_fixed_chan = rsp->data[0];
4491 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4492 		conn->info_ident = 0;
4493 
4494 		l2cap_conn_start(conn);
4495 		break;
4496 	}
4497 
4498 	return 0;
4499 }
4500 
4501 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4502 				    struct l2cap_cmd_hdr *cmd,
4503 				    u16 cmd_len, void *data)
4504 {
4505 	struct l2cap_create_chan_req *req = data;
4506 	struct l2cap_create_chan_rsp rsp;
4507 	struct l2cap_chan *chan;
4508 	struct hci_dev *hdev;
4509 	u16 psm, scid;
4510 
4511 	if (cmd_len != sizeof(*req))
4512 		return -EPROTO;
4513 
4514 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4515 		return -EINVAL;
4516 
4517 	psm = le16_to_cpu(req->psm);
4518 	scid = le16_to_cpu(req->scid);
4519 
4520 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4521 
4522 	/* For controller id 0 make BR/EDR connection */
4523 	if (req->amp_id == AMP_ID_BREDR) {
4524 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4525 			      req->amp_id);
4526 		return 0;
4527 	}
4528 
4529 	/* Validate AMP controller id */
4530 	hdev = hci_dev_get(req->amp_id);
4531 	if (!hdev)
4532 		goto error;
4533 
4534 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4535 		hci_dev_put(hdev);
4536 		goto error;
4537 	}
4538 
4539 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4540 			     req->amp_id);
4541 	if (chan) {
4542 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4543 		struct hci_conn *hs_hcon;
4544 
4545 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4546 						  &conn->hcon->dst);
4547 		if (!hs_hcon) {
4548 			hci_dev_put(hdev);
4549 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4550 					       chan->dcid);
4551 			return 0;
4552 		}
4553 
4554 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4555 
4556 		mgr->bredr_chan = chan;
4557 		chan->hs_hcon = hs_hcon;
4558 		chan->fcs = L2CAP_FCS_NONE;
4559 		conn->mtu = hdev->block_mtu;
4560 	}
4561 
4562 	hci_dev_put(hdev);
4563 
4564 	return 0;
4565 
4566 error:
4567 	rsp.dcid = 0;
4568 	rsp.scid = cpu_to_le16(scid);
4569 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4570 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4571 
4572 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4573 		       sizeof(rsp), &rsp);
4574 
4575 	return 0;
4576 }
4577 
4578 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4579 {
4580 	struct l2cap_move_chan_req req;
4581 	u8 ident;
4582 
4583 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4584 
4585 	ident = l2cap_get_ident(chan->conn);
4586 	chan->ident = ident;
4587 
4588 	req.icid = cpu_to_le16(chan->scid);
4589 	req.dest_amp_id = dest_amp_id;
4590 
4591 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4592 		       &req);
4593 
4594 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4595 }
4596 
4597 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4598 {
4599 	struct l2cap_move_chan_rsp rsp;
4600 
4601 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4602 
4603 	rsp.icid = cpu_to_le16(chan->dcid);
4604 	rsp.result = cpu_to_le16(result);
4605 
4606 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4607 		       sizeof(rsp), &rsp);
4608 }
4609 
4610 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4611 {
4612 	struct l2cap_move_chan_cfm cfm;
4613 
4614 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4615 
4616 	chan->ident = l2cap_get_ident(chan->conn);
4617 
4618 	cfm.icid = cpu_to_le16(chan->scid);
4619 	cfm.result = cpu_to_le16(result);
4620 
4621 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4622 		       sizeof(cfm), &cfm);
4623 
4624 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4625 }
4626 
4627 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4628 {
4629 	struct l2cap_move_chan_cfm cfm;
4630 
4631 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4632 
4633 	cfm.icid = cpu_to_le16(icid);
4634 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4635 
4636 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4637 		       sizeof(cfm), &cfm);
4638 }
4639 
4640 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4641 					 u16 icid)
4642 {
4643 	struct l2cap_move_chan_cfm_rsp rsp;
4644 
4645 	BT_DBG("icid 0x%4.4x", icid);
4646 
4647 	rsp.icid = cpu_to_le16(icid);
4648 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4649 }
4650 
4651 static void __release_logical_link(struct l2cap_chan *chan)
4652 {
4653 	chan->hs_hchan = NULL;
4654 	chan->hs_hcon = NULL;
4655 
4656 	/* Placeholder - release the logical link */
4657 }
4658 
4659 static void l2cap_logical_fail(struct l2cap_chan *chan)
4660 {
4661 	/* Logical link setup failed */
4662 	if (chan->state != BT_CONNECTED) {
4663 		/* Create channel failure, disconnect */
4664 		l2cap_send_disconn_req(chan, ECONNRESET);
4665 		return;
4666 	}
4667 
4668 	switch (chan->move_role) {
4669 	case L2CAP_MOVE_ROLE_RESPONDER:
4670 		l2cap_move_done(chan);
4671 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4672 		break;
4673 	case L2CAP_MOVE_ROLE_INITIATOR:
4674 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4675 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4676 			/* Remote has only sent pending or
4677 			 * success responses, clean up
4678 			 */
4679 			l2cap_move_done(chan);
4680 		}
4681 
4682 		/* Other amp move states imply that the move
4683 		 * has already aborted
4684 		 */
4685 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4686 		break;
4687 	}
4688 }
4689 
4690 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4691 					struct hci_chan *hchan)
4692 {
4693 	struct l2cap_conf_rsp rsp;
4694 
4695 	chan->hs_hchan = hchan;
4696 	chan->hs_hcon->l2cap_data = chan->conn;
4697 
4698 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4699 
4700 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4701 		int err;
4702 
4703 		set_default_fcs(chan);
4704 
4705 		err = l2cap_ertm_init(chan);
4706 		if (err < 0)
4707 			l2cap_send_disconn_req(chan, -err);
4708 		else
4709 			l2cap_chan_ready(chan);
4710 	}
4711 }
4712 
4713 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4714 				      struct hci_chan *hchan)
4715 {
4716 	chan->hs_hcon = hchan->conn;
4717 	chan->hs_hcon->l2cap_data = chan->conn;
4718 
4719 	BT_DBG("move_state %d", chan->move_state);
4720 
4721 	switch (chan->move_state) {
4722 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4723 		/* Move confirm will be sent after a success
4724 		 * response is received
4725 		 */
4726 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4727 		break;
4728 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4729 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4730 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4731 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4732 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4733 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4734 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4735 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4736 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4737 		}
4738 		break;
4739 	default:
4740 		/* Move was not in expected state, free the channel */
4741 		__release_logical_link(chan);
4742 
4743 		chan->move_state = L2CAP_MOVE_STABLE;
4744 	}
4745 }
4746 
4747 /* Call with chan locked */
4748 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4749 		       u8 status)
4750 {
4751 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4752 
4753 	if (status) {
4754 		l2cap_logical_fail(chan);
4755 		__release_logical_link(chan);
4756 		return;
4757 	}
4758 
4759 	if (chan->state != BT_CONNECTED) {
4760 		/* Ignore logical link if channel is on BR/EDR */
4761 		if (chan->local_amp_id != AMP_ID_BREDR)
4762 			l2cap_logical_finish_create(chan, hchan);
4763 	} else {
4764 		l2cap_logical_finish_move(chan, hchan);
4765 	}
4766 }
4767 
4768 void l2cap_move_start(struct l2cap_chan *chan)
4769 {
4770 	BT_DBG("chan %p", chan);
4771 
4772 	if (chan->local_amp_id == AMP_ID_BREDR) {
4773 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4774 			return;
4775 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4776 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4777 		/* Placeholder - start physical link setup */
4778 	} else {
4779 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4780 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4781 		chan->move_id = 0;
4782 		l2cap_move_setup(chan);
4783 		l2cap_send_move_chan_req(chan, 0);
4784 	}
4785 }
4786 
4787 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4788 			    u8 local_amp_id, u8 remote_amp_id)
4789 {
4790 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4791 	       local_amp_id, remote_amp_id);
4792 
4793 	chan->fcs = L2CAP_FCS_NONE;
4794 
4795 	/* Outgoing channel on AMP */
4796 	if (chan->state == BT_CONNECT) {
4797 		if (result == L2CAP_CR_SUCCESS) {
4798 			chan->local_amp_id = local_amp_id;
4799 			l2cap_send_create_chan_req(chan, remote_amp_id);
4800 		} else {
4801 			/* Revert to BR/EDR connect */
4802 			l2cap_send_conn_req(chan);
4803 		}
4804 
4805 		return;
4806 	}
4807 
4808 	/* Incoming channel on AMP */
4809 	if (__l2cap_no_conn_pending(chan)) {
4810 		struct l2cap_conn_rsp rsp;
4811 		char buf[128];
4812 		rsp.scid = cpu_to_le16(chan->dcid);
4813 		rsp.dcid = cpu_to_le16(chan->scid);
4814 
4815 		if (result == L2CAP_CR_SUCCESS) {
4816 			/* Send successful response */
4817 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4818 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4819 		} else {
4820 			/* Send negative response */
4821 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4822 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4823 		}
4824 
4825 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4826 			       sizeof(rsp), &rsp);
4827 
4828 		if (result == L2CAP_CR_SUCCESS) {
4829 			l2cap_state_change(chan, BT_CONFIG);
4830 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4831 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4832 				       L2CAP_CONF_REQ,
4833 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4834 			chan->num_conf_req++;
4835 		}
4836 	}
4837 }
4838 
4839 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4840 				   u8 remote_amp_id)
4841 {
4842 	l2cap_move_setup(chan);
4843 	chan->move_id = local_amp_id;
4844 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4845 
4846 	l2cap_send_move_chan_req(chan, remote_amp_id);
4847 }
4848 
4849 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4850 {
4851 	struct hci_chan *hchan = NULL;
4852 
4853 	/* Placeholder - get hci_chan for logical link */
4854 
4855 	if (hchan) {
4856 		if (hchan->state == BT_CONNECTED) {
4857 			/* Logical link is ready to go */
4858 			chan->hs_hcon = hchan->conn;
4859 			chan->hs_hcon->l2cap_data = chan->conn;
4860 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4861 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4862 
4863 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4864 		} else {
4865 			/* Wait for logical link to be ready */
4866 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4867 		}
4868 	} else {
4869 		/* Logical link not available */
4870 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4871 	}
4872 }
4873 
4874 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4875 {
4876 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4877 		u8 rsp_result;
4878 		if (result == -EINVAL)
4879 			rsp_result = L2CAP_MR_BAD_ID;
4880 		else
4881 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4882 
4883 		l2cap_send_move_chan_rsp(chan, rsp_result);
4884 	}
4885 
4886 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4887 	chan->move_state = L2CAP_MOVE_STABLE;
4888 
4889 	/* Restart data transmission */
4890 	l2cap_ertm_send(chan);
4891 }
4892 
4893 /* Invoke with locked chan */
4894 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4895 {
4896 	u8 local_amp_id = chan->local_amp_id;
4897 	u8 remote_amp_id = chan->remote_amp_id;
4898 
4899 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4900 	       chan, result, local_amp_id, remote_amp_id);
4901 
4902 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4903 		l2cap_chan_unlock(chan);
4904 		return;
4905 	}
4906 
4907 	if (chan->state != BT_CONNECTED) {
4908 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4909 	} else if (result != L2CAP_MR_SUCCESS) {
4910 		l2cap_do_move_cancel(chan, result);
4911 	} else {
4912 		switch (chan->move_role) {
4913 		case L2CAP_MOVE_ROLE_INITIATOR:
4914 			l2cap_do_move_initiate(chan, local_amp_id,
4915 					       remote_amp_id);
4916 			break;
4917 		case L2CAP_MOVE_ROLE_RESPONDER:
4918 			l2cap_do_move_respond(chan, result);
4919 			break;
4920 		default:
4921 			l2cap_do_move_cancel(chan, result);
4922 			break;
4923 		}
4924 	}
4925 }
4926 
4927 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4928 					 struct l2cap_cmd_hdr *cmd,
4929 					 u16 cmd_len, void *data)
4930 {
4931 	struct l2cap_move_chan_req *req = data;
4932 	struct l2cap_move_chan_rsp rsp;
4933 	struct l2cap_chan *chan;
4934 	u16 icid = 0;
4935 	u16 result = L2CAP_MR_NOT_ALLOWED;
4936 
4937 	if (cmd_len != sizeof(*req))
4938 		return -EPROTO;
4939 
4940 	icid = le16_to_cpu(req->icid);
4941 
4942 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4943 
4944 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4945 		return -EINVAL;
4946 
4947 	chan = l2cap_get_chan_by_dcid(conn, icid);
4948 	if (!chan) {
4949 		rsp.icid = cpu_to_le16(icid);
4950 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4951 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4952 			       sizeof(rsp), &rsp);
4953 		return 0;
4954 	}
4955 
4956 	chan->ident = cmd->ident;
4957 
4958 	if (chan->scid < L2CAP_CID_DYN_START ||
4959 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4960 	    (chan->mode != L2CAP_MODE_ERTM &&
4961 	     chan->mode != L2CAP_MODE_STREAMING)) {
4962 		result = L2CAP_MR_NOT_ALLOWED;
4963 		goto send_move_response;
4964 	}
4965 
4966 	if (chan->local_amp_id == req->dest_amp_id) {
4967 		result = L2CAP_MR_SAME_ID;
4968 		goto send_move_response;
4969 	}
4970 
4971 	if (req->dest_amp_id != AMP_ID_BREDR) {
4972 		struct hci_dev *hdev;
4973 		hdev = hci_dev_get(req->dest_amp_id);
4974 		if (!hdev || hdev->dev_type != HCI_AMP ||
4975 		    !test_bit(HCI_UP, &hdev->flags)) {
4976 			if (hdev)
4977 				hci_dev_put(hdev);
4978 
4979 			result = L2CAP_MR_BAD_ID;
4980 			goto send_move_response;
4981 		}
4982 		hci_dev_put(hdev);
4983 	}
4984 
4985 	/* Detect a move collision.  Only send a collision response
4986 	 * if this side has "lost", otherwise proceed with the move.
4987 	 * The winner has the larger bd_addr.
4988 	 */
4989 	if ((__chan_is_moving(chan) ||
4990 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4991 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4992 		result = L2CAP_MR_COLLISION;
4993 		goto send_move_response;
4994 	}
4995 
4996 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4997 	l2cap_move_setup(chan);
4998 	chan->move_id = req->dest_amp_id;
4999 	icid = chan->dcid;
5000 
5001 	if (req->dest_amp_id == AMP_ID_BREDR) {
5002 		/* Moving to BR/EDR */
5003 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5004 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5005 			result = L2CAP_MR_PEND;
5006 		} else {
5007 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5008 			result = L2CAP_MR_SUCCESS;
5009 		}
5010 	} else {
5011 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5012 		/* Placeholder - uncomment when amp functions are available */
5013 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5014 		result = L2CAP_MR_PEND;
5015 	}
5016 
5017 send_move_response:
5018 	l2cap_send_move_chan_rsp(chan, result);
5019 
5020 	l2cap_chan_unlock(chan);
5021 
5022 	return 0;
5023 }
5024 
5025 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5026 {
5027 	struct l2cap_chan *chan;
5028 	struct hci_chan *hchan = NULL;
5029 
5030 	chan = l2cap_get_chan_by_scid(conn, icid);
5031 	if (!chan) {
5032 		l2cap_send_move_chan_cfm_icid(conn, icid);
5033 		return;
5034 	}
5035 
5036 	__clear_chan_timer(chan);
5037 	if (result == L2CAP_MR_PEND)
5038 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5039 
5040 	switch (chan->move_state) {
5041 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5042 		/* Move confirm will be sent when logical link
5043 		 * is complete.
5044 		 */
5045 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5046 		break;
5047 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5048 		if (result == L2CAP_MR_PEND) {
5049 			break;
5050 		} else if (test_bit(CONN_LOCAL_BUSY,
5051 				    &chan->conn_state)) {
5052 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5053 		} else {
5054 			/* Logical link is up or moving to BR/EDR,
5055 			 * proceed with move
5056 			 */
5057 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5058 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5059 		}
5060 		break;
5061 	case L2CAP_MOVE_WAIT_RSP:
5062 		/* Moving to AMP */
5063 		if (result == L2CAP_MR_SUCCESS) {
5064 			/* Remote is ready, send confirm immediately
5065 			 * after logical link is ready
5066 			 */
5067 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5068 		} else {
5069 			/* Both logical link and move success
5070 			 * are required to confirm
5071 			 */
5072 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5073 		}
5074 
5075 		/* Placeholder - get hci_chan for logical link */
5076 		if (!hchan) {
5077 			/* Logical link not available */
5078 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5079 			break;
5080 		}
5081 
5082 		/* If the logical link is not yet connected, do not
5083 		 * send confirmation.
5084 		 */
5085 		if (hchan->state != BT_CONNECTED)
5086 			break;
5087 
5088 		/* Logical link is already ready to go */
5089 
5090 		chan->hs_hcon = hchan->conn;
5091 		chan->hs_hcon->l2cap_data = chan->conn;
5092 
5093 		if (result == L2CAP_MR_SUCCESS) {
5094 			/* Can confirm now */
5095 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5096 		} else {
5097 			/* Now only need move success
5098 			 * to confirm
5099 			 */
5100 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5101 		}
5102 
5103 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5104 		break;
5105 	default:
5106 		/* Any other amp move state means the move failed. */
5107 		chan->move_id = chan->local_amp_id;
5108 		l2cap_move_done(chan);
5109 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5110 	}
5111 
5112 	l2cap_chan_unlock(chan);
5113 }
5114 
5115 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5116 			    u16 result)
5117 {
5118 	struct l2cap_chan *chan;
5119 
5120 	chan = l2cap_get_chan_by_ident(conn, ident);
5121 	if (!chan) {
5122 		/* Could not locate channel, icid is best guess */
5123 		l2cap_send_move_chan_cfm_icid(conn, icid);
5124 		return;
5125 	}
5126 
5127 	__clear_chan_timer(chan);
5128 
5129 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5130 		if (result == L2CAP_MR_COLLISION) {
5131 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5132 		} else {
5133 			/* Cleanup - cancel move */
5134 			chan->move_id = chan->local_amp_id;
5135 			l2cap_move_done(chan);
5136 		}
5137 	}
5138 
5139 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5140 
5141 	l2cap_chan_unlock(chan);
5142 }
5143 
5144 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5145 				  struct l2cap_cmd_hdr *cmd,
5146 				  u16 cmd_len, void *data)
5147 {
5148 	struct l2cap_move_chan_rsp *rsp = data;
5149 	u16 icid, result;
5150 
5151 	if (cmd_len != sizeof(*rsp))
5152 		return -EPROTO;
5153 
5154 	icid = le16_to_cpu(rsp->icid);
5155 	result = le16_to_cpu(rsp->result);
5156 
5157 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5158 
5159 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5160 		l2cap_move_continue(conn, icid, result);
5161 	else
5162 		l2cap_move_fail(conn, cmd->ident, icid, result);
5163 
5164 	return 0;
5165 }
5166 
5167 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5168 				      struct l2cap_cmd_hdr *cmd,
5169 				      u16 cmd_len, void *data)
5170 {
5171 	struct l2cap_move_chan_cfm *cfm = data;
5172 	struct l2cap_chan *chan;
5173 	u16 icid, result;
5174 
5175 	if (cmd_len != sizeof(*cfm))
5176 		return -EPROTO;
5177 
5178 	icid = le16_to_cpu(cfm->icid);
5179 	result = le16_to_cpu(cfm->result);
5180 
5181 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5182 
5183 	chan = l2cap_get_chan_by_dcid(conn, icid);
5184 	if (!chan) {
5185 		/* Spec requires a response even if the icid was not found */
5186 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5187 		return 0;
5188 	}
5189 
5190 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5191 		if (result == L2CAP_MC_CONFIRMED) {
5192 			chan->local_amp_id = chan->move_id;
5193 			if (chan->local_amp_id == AMP_ID_BREDR)
5194 				__release_logical_link(chan);
5195 		} else {
5196 			chan->move_id = chan->local_amp_id;
5197 		}
5198 
5199 		l2cap_move_done(chan);
5200 	}
5201 
5202 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5203 
5204 	l2cap_chan_unlock(chan);
5205 
5206 	return 0;
5207 }
5208 
5209 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5210 						 struct l2cap_cmd_hdr *cmd,
5211 						 u16 cmd_len, void *data)
5212 {
5213 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5214 	struct l2cap_chan *chan;
5215 	u16 icid;
5216 
5217 	if (cmd_len != sizeof(*rsp))
5218 		return -EPROTO;
5219 
5220 	icid = le16_to_cpu(rsp->icid);
5221 
5222 	BT_DBG("icid 0x%4.4x", icid);
5223 
5224 	chan = l2cap_get_chan_by_scid(conn, icid);
5225 	if (!chan)
5226 		return 0;
5227 
5228 	__clear_chan_timer(chan);
5229 
5230 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5231 		chan->local_amp_id = chan->move_id;
5232 
5233 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5234 			__release_logical_link(chan);
5235 
5236 		l2cap_move_done(chan);
5237 	}
5238 
5239 	l2cap_chan_unlock(chan);
5240 
5241 	return 0;
5242 }
5243 
5244 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5245 					      struct l2cap_cmd_hdr *cmd,
5246 					      u16 cmd_len, u8 *data)
5247 {
5248 	struct hci_conn *hcon = conn->hcon;
5249 	struct l2cap_conn_param_update_req *req;
5250 	struct l2cap_conn_param_update_rsp rsp;
5251 	u16 min, max, latency, to_multiplier;
5252 	int err;
5253 
5254 	if (hcon->role != HCI_ROLE_MASTER)
5255 		return -EINVAL;
5256 
5257 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5258 		return -EPROTO;
5259 
5260 	req = (struct l2cap_conn_param_update_req *) data;
5261 	min		= __le16_to_cpu(req->min);
5262 	max		= __le16_to_cpu(req->max);
5263 	latency		= __le16_to_cpu(req->latency);
5264 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5265 
5266 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5267 	       min, max, latency, to_multiplier);
5268 
5269 	memset(&rsp, 0, sizeof(rsp));
5270 
5271 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5272 	if (err)
5273 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5274 	else
5275 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5276 
5277 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5278 		       sizeof(rsp), &rsp);
5279 
5280 	if (!err) {
5281 		u8 store_hint;
5282 
5283 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5284 						to_multiplier);
5285 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5286 				    store_hint, min, max, latency,
5287 				    to_multiplier);
5288 
5289 	}
5290 
5291 	return 0;
5292 }
5293 
5294 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5295 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5296 				u8 *data)
5297 {
5298 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5299 	struct hci_conn *hcon = conn->hcon;
5300 	u16 dcid, mtu, mps, credits, result;
5301 	struct l2cap_chan *chan;
5302 	int err, sec_level;
5303 
5304 	if (cmd_len < sizeof(*rsp))
5305 		return -EPROTO;
5306 
5307 	dcid    = __le16_to_cpu(rsp->dcid);
5308 	mtu     = __le16_to_cpu(rsp->mtu);
5309 	mps     = __le16_to_cpu(rsp->mps);
5310 	credits = __le16_to_cpu(rsp->credits);
5311 	result  = __le16_to_cpu(rsp->result);
5312 
5313 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5314 					   dcid < L2CAP_CID_DYN_START ||
5315 					   dcid > L2CAP_CID_LE_DYN_END))
5316 		return -EPROTO;
5317 
5318 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5319 	       dcid, mtu, mps, credits, result);
5320 
5321 	mutex_lock(&conn->chan_lock);
5322 
5323 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5324 	if (!chan) {
5325 		err = -EBADSLT;
5326 		goto unlock;
5327 	}
5328 
5329 	err = 0;
5330 
5331 	l2cap_chan_lock(chan);
5332 
5333 	switch (result) {
5334 	case L2CAP_CR_LE_SUCCESS:
5335 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5336 			err = -EBADSLT;
5337 			break;
5338 		}
5339 
5340 		chan->ident = 0;
5341 		chan->dcid = dcid;
5342 		chan->omtu = mtu;
5343 		chan->remote_mps = mps;
5344 		chan->tx_credits = credits;
5345 		l2cap_chan_ready(chan);
5346 		break;
5347 
5348 	case L2CAP_CR_LE_AUTHENTICATION:
5349 	case L2CAP_CR_LE_ENCRYPTION:
5350 		/* If we already have MITM protection we can't do
5351 		 * anything.
5352 		 */
5353 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5354 			l2cap_chan_del(chan, ECONNREFUSED);
5355 			break;
5356 		}
5357 
5358 		sec_level = hcon->sec_level + 1;
5359 		if (chan->sec_level < sec_level)
5360 			chan->sec_level = sec_level;
5361 
5362 		/* We'll need to send a new Connect Request */
5363 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5364 
5365 		smp_conn_security(hcon, chan->sec_level);
5366 		break;
5367 
5368 	default:
5369 		l2cap_chan_del(chan, ECONNREFUSED);
5370 		break;
5371 	}
5372 
5373 	l2cap_chan_unlock(chan);
5374 
5375 unlock:
5376 	mutex_unlock(&conn->chan_lock);
5377 
5378 	return err;
5379 }
5380 
5381 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5382 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5383 				      u8 *data)
5384 {
5385 	int err = 0;
5386 
5387 	switch (cmd->code) {
5388 	case L2CAP_COMMAND_REJ:
5389 		l2cap_command_rej(conn, cmd, cmd_len, data);
5390 		break;
5391 
5392 	case L2CAP_CONN_REQ:
5393 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5394 		break;
5395 
5396 	case L2CAP_CONN_RSP:
5397 	case L2CAP_CREATE_CHAN_RSP:
5398 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5399 		break;
5400 
5401 	case L2CAP_CONF_REQ:
5402 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5403 		break;
5404 
5405 	case L2CAP_CONF_RSP:
5406 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5407 		break;
5408 
5409 	case L2CAP_DISCONN_REQ:
5410 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5411 		break;
5412 
5413 	case L2CAP_DISCONN_RSP:
5414 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5415 		break;
5416 
5417 	case L2CAP_ECHO_REQ:
5418 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5419 		break;
5420 
5421 	case L2CAP_ECHO_RSP:
5422 		break;
5423 
5424 	case L2CAP_INFO_REQ:
5425 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5426 		break;
5427 
5428 	case L2CAP_INFO_RSP:
5429 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5430 		break;
5431 
5432 	case L2CAP_CREATE_CHAN_REQ:
5433 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5434 		break;
5435 
5436 	case L2CAP_MOVE_CHAN_REQ:
5437 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5438 		break;
5439 
5440 	case L2CAP_MOVE_CHAN_RSP:
5441 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5442 		break;
5443 
5444 	case L2CAP_MOVE_CHAN_CFM:
5445 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5446 		break;
5447 
5448 	case L2CAP_MOVE_CHAN_CFM_RSP:
5449 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5450 		break;
5451 
5452 	default:
5453 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5454 		err = -EINVAL;
5455 		break;
5456 	}
5457 
5458 	return err;
5459 }
5460 
5461 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5462 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5463 				u8 *data)
5464 {
5465 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5466 	struct l2cap_le_conn_rsp rsp;
5467 	struct l2cap_chan *chan, *pchan;
5468 	u16 dcid, scid, credits, mtu, mps;
5469 	__le16 psm;
5470 	u8 result;
5471 
5472 	if (cmd_len != sizeof(*req))
5473 		return -EPROTO;
5474 
5475 	scid = __le16_to_cpu(req->scid);
5476 	mtu  = __le16_to_cpu(req->mtu);
5477 	mps  = __le16_to_cpu(req->mps);
5478 	psm  = req->psm;
5479 	dcid = 0;
5480 	credits = 0;
5481 
5482 	if (mtu < 23 || mps < 23)
5483 		return -EPROTO;
5484 
5485 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5486 	       scid, mtu, mps);
5487 
5488 	/* Check if we have socket listening on psm */
5489 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5490 					 &conn->hcon->dst, LE_LINK);
5491 	if (!pchan) {
5492 		result = L2CAP_CR_LE_BAD_PSM;
5493 		chan = NULL;
5494 		goto response;
5495 	}
5496 
5497 	mutex_lock(&conn->chan_lock);
5498 	l2cap_chan_lock(pchan);
5499 
5500 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5501 				     SMP_ALLOW_STK)) {
5502 		result = L2CAP_CR_LE_AUTHENTICATION;
5503 		chan = NULL;
5504 		goto response_unlock;
5505 	}
5506 
5507 	/* Check for valid dynamic CID range */
5508 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5509 		result = L2CAP_CR_LE_INVALID_SCID;
5510 		chan = NULL;
5511 		goto response_unlock;
5512 	}
5513 
5514 	/* Check if we already have channel with that dcid */
5515 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5516 		result = L2CAP_CR_LE_SCID_IN_USE;
5517 		chan = NULL;
5518 		goto response_unlock;
5519 	}
5520 
5521 	chan = pchan->ops->new_connection(pchan);
5522 	if (!chan) {
5523 		result = L2CAP_CR_LE_NO_MEM;
5524 		goto response_unlock;
5525 	}
5526 
5527 	bacpy(&chan->src, &conn->hcon->src);
5528 	bacpy(&chan->dst, &conn->hcon->dst);
5529 	chan->src_type = bdaddr_src_type(conn->hcon);
5530 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5531 	chan->psm  = psm;
5532 	chan->dcid = scid;
5533 	chan->omtu = mtu;
5534 	chan->remote_mps = mps;
5535 
5536 	__l2cap_chan_add(conn, chan);
5537 
5538 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5539 
5540 	dcid = chan->scid;
5541 	credits = chan->rx_credits;
5542 
5543 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5544 
5545 	chan->ident = cmd->ident;
5546 
5547 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5548 		l2cap_state_change(chan, BT_CONNECT2);
5549 		/* The following result value is actually not defined
5550 		 * for LE CoC but we use it to let the function know
5551 		 * that it should bail out after doing its cleanup
5552 		 * instead of sending a response.
5553 		 */
5554 		result = L2CAP_CR_PEND;
5555 		chan->ops->defer(chan);
5556 	} else {
5557 		l2cap_chan_ready(chan);
5558 		result = L2CAP_CR_LE_SUCCESS;
5559 	}
5560 
5561 response_unlock:
5562 	l2cap_chan_unlock(pchan);
5563 	mutex_unlock(&conn->chan_lock);
5564 	l2cap_chan_put(pchan);
5565 
5566 	if (result == L2CAP_CR_PEND)
5567 		return 0;
5568 
5569 response:
5570 	if (chan) {
5571 		rsp.mtu = cpu_to_le16(chan->imtu);
5572 		rsp.mps = cpu_to_le16(chan->mps);
5573 	} else {
5574 		rsp.mtu = 0;
5575 		rsp.mps = 0;
5576 	}
5577 
5578 	rsp.dcid    = cpu_to_le16(dcid);
5579 	rsp.credits = cpu_to_le16(credits);
5580 	rsp.result  = cpu_to_le16(result);
5581 
5582 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5583 
5584 	return 0;
5585 }
5586 
5587 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5588 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5589 				   u8 *data)
5590 {
5591 	struct l2cap_le_credits *pkt;
5592 	struct l2cap_chan *chan;
5593 	u16 cid, credits, max_credits;
5594 
5595 	if (cmd_len != sizeof(*pkt))
5596 		return -EPROTO;
5597 
5598 	pkt = (struct l2cap_le_credits *) data;
5599 	cid	= __le16_to_cpu(pkt->cid);
5600 	credits	= __le16_to_cpu(pkt->credits);
5601 
5602 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5603 
5604 	chan = l2cap_get_chan_by_dcid(conn, cid);
5605 	if (!chan)
5606 		return -EBADSLT;
5607 
5608 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5609 	if (credits > max_credits) {
5610 		BT_ERR("LE credits overflow");
5611 		l2cap_send_disconn_req(chan, ECONNRESET);
5612 		l2cap_chan_unlock(chan);
5613 
5614 		/* Return 0 so that we don't trigger an unnecessary
5615 		 * command reject packet.
5616 		 */
5617 		return 0;
5618 	}
5619 
5620 	chan->tx_credits += credits;
5621 
5622 	/* Resume sending */
5623 	l2cap_le_flowctl_send(chan);
5624 
5625 	if (chan->tx_credits)
5626 		chan->ops->resume(chan);
5627 
5628 	l2cap_chan_unlock(chan);
5629 
5630 	return 0;
5631 }
5632 
5633 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5634 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5635 				       u8 *data)
5636 {
5637 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5638 	struct l2cap_chan *chan;
5639 
5640 	if (cmd_len < sizeof(*rej))
5641 		return -EPROTO;
5642 
5643 	mutex_lock(&conn->chan_lock);
5644 
5645 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5646 	if (!chan)
5647 		goto done;
5648 
5649 	l2cap_chan_lock(chan);
5650 	l2cap_chan_del(chan, ECONNREFUSED);
5651 	l2cap_chan_unlock(chan);
5652 
5653 done:
5654 	mutex_unlock(&conn->chan_lock);
5655 	return 0;
5656 }
5657 
5658 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5659 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5660 				   u8 *data)
5661 {
5662 	int err = 0;
5663 
5664 	switch (cmd->code) {
5665 	case L2CAP_COMMAND_REJ:
5666 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5667 		break;
5668 
5669 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5670 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5671 		break;
5672 
5673 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5674 		break;
5675 
5676 	case L2CAP_LE_CONN_RSP:
5677 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5678 		break;
5679 
5680 	case L2CAP_LE_CONN_REQ:
5681 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5682 		break;
5683 
5684 	case L2CAP_LE_CREDITS:
5685 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5686 		break;
5687 
5688 	case L2CAP_DISCONN_REQ:
5689 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5690 		break;
5691 
5692 	case L2CAP_DISCONN_RSP:
5693 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5694 		break;
5695 
5696 	default:
5697 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5698 		err = -EINVAL;
5699 		break;
5700 	}
5701 
5702 	return err;
5703 }
5704 
5705 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5706 					struct sk_buff *skb)
5707 {
5708 	struct hci_conn *hcon = conn->hcon;
5709 	struct l2cap_cmd_hdr *cmd;
5710 	u16 len;
5711 	int err;
5712 
5713 	if (hcon->type != LE_LINK)
5714 		goto drop;
5715 
5716 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5717 		goto drop;
5718 
5719 	cmd = (void *) skb->data;
5720 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5721 
5722 	len = le16_to_cpu(cmd->len);
5723 
5724 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5725 
5726 	if (len != skb->len || !cmd->ident) {
5727 		BT_DBG("corrupted command");
5728 		goto drop;
5729 	}
5730 
5731 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5732 	if (err) {
5733 		struct l2cap_cmd_rej_unk rej;
5734 
5735 		BT_ERR("Wrong link type (%d)", err);
5736 
5737 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5738 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5739 			       sizeof(rej), &rej);
5740 	}
5741 
5742 drop:
5743 	kfree_skb(skb);
5744 }
5745 
5746 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5747 				     struct sk_buff *skb)
5748 {
5749 	struct hci_conn *hcon = conn->hcon;
5750 	u8 *data = skb->data;
5751 	int len = skb->len;
5752 	struct l2cap_cmd_hdr cmd;
5753 	int err;
5754 
5755 	l2cap_raw_recv(conn, skb);
5756 
5757 	if (hcon->type != ACL_LINK)
5758 		goto drop;
5759 
5760 	while (len >= L2CAP_CMD_HDR_SIZE) {
5761 		u16 cmd_len;
5762 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5763 		data += L2CAP_CMD_HDR_SIZE;
5764 		len  -= L2CAP_CMD_HDR_SIZE;
5765 
5766 		cmd_len = le16_to_cpu(cmd.len);
5767 
5768 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5769 		       cmd.ident);
5770 
5771 		if (cmd_len > len || !cmd.ident) {
5772 			BT_DBG("corrupted command");
5773 			break;
5774 		}
5775 
5776 		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5777 		if (err) {
5778 			struct l2cap_cmd_rej_unk rej;
5779 
5780 			BT_ERR("Wrong link type (%d)", err);
5781 
5782 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5783 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5784 				       sizeof(rej), &rej);
5785 		}
5786 
5787 		data += cmd_len;
5788 		len  -= cmd_len;
5789 	}
5790 
5791 drop:
5792 	kfree_skb(skb);
5793 }
5794 
5795 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5796 {
5797 	u16 our_fcs, rcv_fcs;
5798 	int hdr_size;
5799 
5800 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5801 		hdr_size = L2CAP_EXT_HDR_SIZE;
5802 	else
5803 		hdr_size = L2CAP_ENH_HDR_SIZE;
5804 
5805 	if (chan->fcs == L2CAP_FCS_CRC16) {
5806 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5807 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5808 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5809 
5810 		if (our_fcs != rcv_fcs)
5811 			return -EBADMSG;
5812 	}
5813 	return 0;
5814 }
5815 
5816 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5817 {
5818 	struct l2cap_ctrl control;
5819 
5820 	BT_DBG("chan %p", chan);
5821 
5822 	memset(&control, 0, sizeof(control));
5823 	control.sframe = 1;
5824 	control.final = 1;
5825 	control.reqseq = chan->buffer_seq;
5826 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5827 
5828 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5829 		control.super = L2CAP_SUPER_RNR;
5830 		l2cap_send_sframe(chan, &control);
5831 	}
5832 
5833 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5834 	    chan->unacked_frames > 0)
5835 		__set_retrans_timer(chan);
5836 
5837 	/* Send pending iframes */
5838 	l2cap_ertm_send(chan);
5839 
5840 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5841 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5842 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5843 		 * send it now.
5844 		 */
5845 		control.super = L2CAP_SUPER_RR;
5846 		l2cap_send_sframe(chan, &control);
5847 	}
5848 }
5849 
5850 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5851 			    struct sk_buff **last_frag)
5852 {
5853 	/* skb->len reflects data in skb as well as all fragments
5854 	 * skb->data_len reflects only data in fragments
5855 	 */
5856 	if (!skb_has_frag_list(skb))
5857 		skb_shinfo(skb)->frag_list = new_frag;
5858 
5859 	new_frag->next = NULL;
5860 
5861 	(*last_frag)->next = new_frag;
5862 	*last_frag = new_frag;
5863 
5864 	skb->len += new_frag->len;
5865 	skb->data_len += new_frag->len;
5866 	skb->truesize += new_frag->truesize;
5867 }
5868 
5869 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5870 				struct l2cap_ctrl *control)
5871 {
5872 	int err = -EINVAL;
5873 
5874 	switch (control->sar) {
5875 	case L2CAP_SAR_UNSEGMENTED:
5876 		if (chan->sdu)
5877 			break;
5878 
5879 		err = chan->ops->recv(chan, skb);
5880 		break;
5881 
5882 	case L2CAP_SAR_START:
5883 		if (chan->sdu)
5884 			break;
5885 
5886 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5887 			break;
5888 
5889 		chan->sdu_len = get_unaligned_le16(skb->data);
5890 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5891 
5892 		if (chan->sdu_len > chan->imtu) {
5893 			err = -EMSGSIZE;
5894 			break;
5895 		}
5896 
5897 		if (skb->len >= chan->sdu_len)
5898 			break;
5899 
5900 		chan->sdu = skb;
5901 		chan->sdu_last_frag = skb;
5902 
5903 		skb = NULL;
5904 		err = 0;
5905 		break;
5906 
5907 	case L2CAP_SAR_CONTINUE:
5908 		if (!chan->sdu)
5909 			break;
5910 
5911 		append_skb_frag(chan->sdu, skb,
5912 				&chan->sdu_last_frag);
5913 		skb = NULL;
5914 
5915 		if (chan->sdu->len >= chan->sdu_len)
5916 			break;
5917 
5918 		err = 0;
5919 		break;
5920 
5921 	case L2CAP_SAR_END:
5922 		if (!chan->sdu)
5923 			break;
5924 
5925 		append_skb_frag(chan->sdu, skb,
5926 				&chan->sdu_last_frag);
5927 		skb = NULL;
5928 
5929 		if (chan->sdu->len != chan->sdu_len)
5930 			break;
5931 
5932 		err = chan->ops->recv(chan, chan->sdu);
5933 
5934 		if (!err) {
5935 			/* Reassembly complete */
5936 			chan->sdu = NULL;
5937 			chan->sdu_last_frag = NULL;
5938 			chan->sdu_len = 0;
5939 		}
5940 		break;
5941 	}
5942 
5943 	if (err) {
5944 		kfree_skb(skb);
5945 		kfree_skb(chan->sdu);
5946 		chan->sdu = NULL;
5947 		chan->sdu_last_frag = NULL;
5948 		chan->sdu_len = 0;
5949 	}
5950 
5951 	return err;
5952 }
5953 
5954 static int l2cap_resegment(struct l2cap_chan *chan)
5955 {
5956 	/* Placeholder */
5957 	return 0;
5958 }
5959 
5960 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5961 {
5962 	u8 event;
5963 
5964 	if (chan->mode != L2CAP_MODE_ERTM)
5965 		return;
5966 
5967 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5968 	l2cap_tx(chan, NULL, NULL, event);
5969 }
5970 
5971 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5972 {
5973 	int err = 0;
5974 	/* Pass sequential frames to l2cap_reassemble_sdu()
5975 	 * until a gap is encountered.
5976 	 */
5977 
5978 	BT_DBG("chan %p", chan);
5979 
5980 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5981 		struct sk_buff *skb;
5982 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5983 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5984 
5985 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5986 
5987 		if (!skb)
5988 			break;
5989 
5990 		skb_unlink(skb, &chan->srej_q);
5991 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5992 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5993 		if (err)
5994 			break;
5995 	}
5996 
5997 	if (skb_queue_empty(&chan->srej_q)) {
5998 		chan->rx_state = L2CAP_RX_STATE_RECV;
5999 		l2cap_send_ack(chan);
6000 	}
6001 
6002 	return err;
6003 }
6004 
6005 static void l2cap_handle_srej(struct l2cap_chan *chan,
6006 			      struct l2cap_ctrl *control)
6007 {
6008 	struct sk_buff *skb;
6009 
6010 	BT_DBG("chan %p, control %p", chan, control);
6011 
6012 	if (control->reqseq == chan->next_tx_seq) {
6013 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6014 		l2cap_send_disconn_req(chan, ECONNRESET);
6015 		return;
6016 	}
6017 
6018 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6019 
6020 	if (skb == NULL) {
6021 		BT_DBG("Seq %d not available for retransmission",
6022 		       control->reqseq);
6023 		return;
6024 	}
6025 
6026 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6027 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6028 		l2cap_send_disconn_req(chan, ECONNRESET);
6029 		return;
6030 	}
6031 
6032 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6033 
6034 	if (control->poll) {
6035 		l2cap_pass_to_tx(chan, control);
6036 
6037 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6038 		l2cap_retransmit(chan, control);
6039 		l2cap_ertm_send(chan);
6040 
6041 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6042 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6043 			chan->srej_save_reqseq = control->reqseq;
6044 		}
6045 	} else {
6046 		l2cap_pass_to_tx_fbit(chan, control);
6047 
6048 		if (control->final) {
6049 			if (chan->srej_save_reqseq != control->reqseq ||
6050 			    !test_and_clear_bit(CONN_SREJ_ACT,
6051 						&chan->conn_state))
6052 				l2cap_retransmit(chan, control);
6053 		} else {
6054 			l2cap_retransmit(chan, control);
6055 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6056 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6057 				chan->srej_save_reqseq = control->reqseq;
6058 			}
6059 		}
6060 	}
6061 }
6062 
6063 static void l2cap_handle_rej(struct l2cap_chan *chan,
6064 			     struct l2cap_ctrl *control)
6065 {
6066 	struct sk_buff *skb;
6067 
6068 	BT_DBG("chan %p, control %p", chan, control);
6069 
6070 	if (control->reqseq == chan->next_tx_seq) {
6071 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6072 		l2cap_send_disconn_req(chan, ECONNRESET);
6073 		return;
6074 	}
6075 
6076 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6077 
6078 	if (chan->max_tx && skb &&
6079 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6080 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6081 		l2cap_send_disconn_req(chan, ECONNRESET);
6082 		return;
6083 	}
6084 
6085 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6086 
6087 	l2cap_pass_to_tx(chan, control);
6088 
6089 	if (control->final) {
6090 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6091 			l2cap_retransmit_all(chan, control);
6092 	} else {
6093 		l2cap_retransmit_all(chan, control);
6094 		l2cap_ertm_send(chan);
6095 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6096 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6097 	}
6098 }
6099 
6100 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6101 {
6102 	BT_DBG("chan %p, txseq %d", chan, txseq);
6103 
6104 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6105 	       chan->expected_tx_seq);
6106 
6107 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6108 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6109 		    chan->tx_win) {
6110 			/* See notes below regarding "double poll" and
6111 			 * invalid packets.
6112 			 */
6113 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6114 				BT_DBG("Invalid/Ignore - after SREJ");
6115 				return L2CAP_TXSEQ_INVALID_IGNORE;
6116 			} else {
6117 				BT_DBG("Invalid - in window after SREJ sent");
6118 				return L2CAP_TXSEQ_INVALID;
6119 			}
6120 		}
6121 
6122 		if (chan->srej_list.head == txseq) {
6123 			BT_DBG("Expected SREJ");
6124 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6125 		}
6126 
6127 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6128 			BT_DBG("Duplicate SREJ - txseq already stored");
6129 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6130 		}
6131 
6132 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6133 			BT_DBG("Unexpected SREJ - not requested");
6134 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6135 		}
6136 	}
6137 
6138 	if (chan->expected_tx_seq == txseq) {
6139 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6140 		    chan->tx_win) {
6141 			BT_DBG("Invalid - txseq outside tx window");
6142 			return L2CAP_TXSEQ_INVALID;
6143 		} else {
6144 			BT_DBG("Expected");
6145 			return L2CAP_TXSEQ_EXPECTED;
6146 		}
6147 	}
6148 
6149 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6150 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6151 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6152 		return L2CAP_TXSEQ_DUPLICATE;
6153 	}
6154 
6155 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6156 		/* A source of invalid packets is a "double poll" condition,
6157 		 * where delays cause us to send multiple poll packets.  If
6158 		 * the remote stack receives and processes both polls,
6159 		 * sequence numbers can wrap around in such a way that a
6160 		 * resent frame has a sequence number that looks like new data
6161 		 * with a sequence gap.  This would trigger an erroneous SREJ
6162 		 * request.
6163 		 *
6164 		 * Fortunately, this is impossible with a tx window that's
6165 		 * less than half of the maximum sequence number, which allows
6166 		 * invalid frames to be safely ignored.
6167 		 *
6168 		 * With tx window sizes greater than half of the tx window
6169 		 * maximum, the frame is invalid and cannot be ignored.  This
6170 		 * causes a disconnect.
6171 		 */
6172 
6173 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6174 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6175 			return L2CAP_TXSEQ_INVALID_IGNORE;
6176 		} else {
6177 			BT_DBG("Invalid - txseq outside tx window");
6178 			return L2CAP_TXSEQ_INVALID;
6179 		}
6180 	} else {
6181 		BT_DBG("Unexpected - txseq indicates missing frames");
6182 		return L2CAP_TXSEQ_UNEXPECTED;
6183 	}
6184 }
6185 
6186 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6187 			       struct l2cap_ctrl *control,
6188 			       struct sk_buff *skb, u8 event)
6189 {
6190 	int err = 0;
6191 	bool skb_in_use = false;
6192 
6193 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6194 	       event);
6195 
6196 	switch (event) {
6197 	case L2CAP_EV_RECV_IFRAME:
6198 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6199 		case L2CAP_TXSEQ_EXPECTED:
6200 			l2cap_pass_to_tx(chan, control);
6201 
6202 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6203 				BT_DBG("Busy, discarding expected seq %d",
6204 				       control->txseq);
6205 				break;
6206 			}
6207 
6208 			chan->expected_tx_seq = __next_seq(chan,
6209 							   control->txseq);
6210 
6211 			chan->buffer_seq = chan->expected_tx_seq;
6212 			skb_in_use = true;
6213 
6214 			err = l2cap_reassemble_sdu(chan, skb, control);
6215 			if (err)
6216 				break;
6217 
6218 			if (control->final) {
6219 				if (!test_and_clear_bit(CONN_REJ_ACT,
6220 							&chan->conn_state)) {
6221 					control->final = 0;
6222 					l2cap_retransmit_all(chan, control);
6223 					l2cap_ertm_send(chan);
6224 				}
6225 			}
6226 
6227 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6228 				l2cap_send_ack(chan);
6229 			break;
6230 		case L2CAP_TXSEQ_UNEXPECTED:
6231 			l2cap_pass_to_tx(chan, control);
6232 
6233 			/* Can't issue SREJ frames in the local busy state.
6234 			 * Drop this frame, it will be seen as missing
6235 			 * when local busy is exited.
6236 			 */
6237 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6238 				BT_DBG("Busy, discarding unexpected seq %d",
6239 				       control->txseq);
6240 				break;
6241 			}
6242 
6243 			/* There was a gap in the sequence, so an SREJ
6244 			 * must be sent for each missing frame.  The
6245 			 * current frame is stored for later use.
6246 			 */
6247 			skb_queue_tail(&chan->srej_q, skb);
6248 			skb_in_use = true;
6249 			BT_DBG("Queued %p (queue len %d)", skb,
6250 			       skb_queue_len(&chan->srej_q));
6251 
6252 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6253 			l2cap_seq_list_clear(&chan->srej_list);
6254 			l2cap_send_srej(chan, control->txseq);
6255 
6256 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6257 			break;
6258 		case L2CAP_TXSEQ_DUPLICATE:
6259 			l2cap_pass_to_tx(chan, control);
6260 			break;
6261 		case L2CAP_TXSEQ_INVALID_IGNORE:
6262 			break;
6263 		case L2CAP_TXSEQ_INVALID:
6264 		default:
6265 			l2cap_send_disconn_req(chan, ECONNRESET);
6266 			break;
6267 		}
6268 		break;
6269 	case L2CAP_EV_RECV_RR:
6270 		l2cap_pass_to_tx(chan, control);
6271 		if (control->final) {
6272 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6273 
6274 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6275 			    !__chan_is_moving(chan)) {
6276 				control->final = 0;
6277 				l2cap_retransmit_all(chan, control);
6278 			}
6279 
6280 			l2cap_ertm_send(chan);
6281 		} else if (control->poll) {
6282 			l2cap_send_i_or_rr_or_rnr(chan);
6283 		} else {
6284 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6285 					       &chan->conn_state) &&
6286 			    chan->unacked_frames)
6287 				__set_retrans_timer(chan);
6288 
6289 			l2cap_ertm_send(chan);
6290 		}
6291 		break;
6292 	case L2CAP_EV_RECV_RNR:
6293 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6294 		l2cap_pass_to_tx(chan, control);
6295 		if (control && control->poll) {
6296 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6297 			l2cap_send_rr_or_rnr(chan, 0);
6298 		}
6299 		__clear_retrans_timer(chan);
6300 		l2cap_seq_list_clear(&chan->retrans_list);
6301 		break;
6302 	case L2CAP_EV_RECV_REJ:
6303 		l2cap_handle_rej(chan, control);
6304 		break;
6305 	case L2CAP_EV_RECV_SREJ:
6306 		l2cap_handle_srej(chan, control);
6307 		break;
6308 	default:
6309 		break;
6310 	}
6311 
6312 	if (skb && !skb_in_use) {
6313 		BT_DBG("Freeing %p", skb);
6314 		kfree_skb(skb);
6315 	}
6316 
6317 	return err;
6318 }
6319 
6320 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6321 				    struct l2cap_ctrl *control,
6322 				    struct sk_buff *skb, u8 event)
6323 {
6324 	int err = 0;
6325 	u16 txseq = control->txseq;
6326 	bool skb_in_use = false;
6327 
6328 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6329 	       event);
6330 
6331 	switch (event) {
6332 	case L2CAP_EV_RECV_IFRAME:
6333 		switch (l2cap_classify_txseq(chan, txseq)) {
6334 		case L2CAP_TXSEQ_EXPECTED:
6335 			/* Keep frame for reassembly later */
6336 			l2cap_pass_to_tx(chan, control);
6337 			skb_queue_tail(&chan->srej_q, skb);
6338 			skb_in_use = true;
6339 			BT_DBG("Queued %p (queue len %d)", skb,
6340 			       skb_queue_len(&chan->srej_q));
6341 
6342 			chan->expected_tx_seq = __next_seq(chan, txseq);
6343 			break;
6344 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6345 			l2cap_seq_list_pop(&chan->srej_list);
6346 
6347 			l2cap_pass_to_tx(chan, control);
6348 			skb_queue_tail(&chan->srej_q, skb);
6349 			skb_in_use = true;
6350 			BT_DBG("Queued %p (queue len %d)", skb,
6351 			       skb_queue_len(&chan->srej_q));
6352 
6353 			err = l2cap_rx_queued_iframes(chan);
6354 			if (err)
6355 				break;
6356 
6357 			break;
6358 		case L2CAP_TXSEQ_UNEXPECTED:
6359 			/* Got a frame that can't be reassembled yet.
6360 			 * Save it for later, and send SREJs to cover
6361 			 * the missing frames.
6362 			 */
6363 			skb_queue_tail(&chan->srej_q, skb);
6364 			skb_in_use = true;
6365 			BT_DBG("Queued %p (queue len %d)", skb,
6366 			       skb_queue_len(&chan->srej_q));
6367 
6368 			l2cap_pass_to_tx(chan, control);
6369 			l2cap_send_srej(chan, control->txseq);
6370 			break;
6371 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6372 			/* This frame was requested with an SREJ, but
6373 			 * some expected retransmitted frames are
6374 			 * missing.  Request retransmission of missing
6375 			 * SREJ'd frames.
6376 			 */
6377 			skb_queue_tail(&chan->srej_q, skb);
6378 			skb_in_use = true;
6379 			BT_DBG("Queued %p (queue len %d)", skb,
6380 			       skb_queue_len(&chan->srej_q));
6381 
6382 			l2cap_pass_to_tx(chan, control);
6383 			l2cap_send_srej_list(chan, control->txseq);
6384 			break;
6385 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6386 			/* We've already queued this frame.  Drop this copy. */
6387 			l2cap_pass_to_tx(chan, control);
6388 			break;
6389 		case L2CAP_TXSEQ_DUPLICATE:
6390 			/* Expecting a later sequence number, so this frame
6391 			 * was already received.  Ignore it completely.
6392 			 */
6393 			break;
6394 		case L2CAP_TXSEQ_INVALID_IGNORE:
6395 			break;
6396 		case L2CAP_TXSEQ_INVALID:
6397 		default:
6398 			l2cap_send_disconn_req(chan, ECONNRESET);
6399 			break;
6400 		}
6401 		break;
6402 	case L2CAP_EV_RECV_RR:
6403 		l2cap_pass_to_tx(chan, control);
6404 		if (control->final) {
6405 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6406 
6407 			if (!test_and_clear_bit(CONN_REJ_ACT,
6408 						&chan->conn_state)) {
6409 				control->final = 0;
6410 				l2cap_retransmit_all(chan, control);
6411 			}
6412 
6413 			l2cap_ertm_send(chan);
6414 		} else if (control->poll) {
6415 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6416 					       &chan->conn_state) &&
6417 			    chan->unacked_frames) {
6418 				__set_retrans_timer(chan);
6419 			}
6420 
6421 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6422 			l2cap_send_srej_tail(chan);
6423 		} else {
6424 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6425 					       &chan->conn_state) &&
6426 			    chan->unacked_frames)
6427 				__set_retrans_timer(chan);
6428 
6429 			l2cap_send_ack(chan);
6430 		}
6431 		break;
6432 	case L2CAP_EV_RECV_RNR:
6433 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6434 		l2cap_pass_to_tx(chan, control);
6435 		if (control->poll) {
6436 			l2cap_send_srej_tail(chan);
6437 		} else {
6438 			struct l2cap_ctrl rr_control;
6439 			memset(&rr_control, 0, sizeof(rr_control));
6440 			rr_control.sframe = 1;
6441 			rr_control.super = L2CAP_SUPER_RR;
6442 			rr_control.reqseq = chan->buffer_seq;
6443 			l2cap_send_sframe(chan, &rr_control);
6444 		}
6445 
6446 		break;
6447 	case L2CAP_EV_RECV_REJ:
6448 		l2cap_handle_rej(chan, control);
6449 		break;
6450 	case L2CAP_EV_RECV_SREJ:
6451 		l2cap_handle_srej(chan, control);
6452 		break;
6453 	}
6454 
6455 	if (skb && !skb_in_use) {
6456 		BT_DBG("Freeing %p", skb);
6457 		kfree_skb(skb);
6458 	}
6459 
6460 	return err;
6461 }
6462 
6463 static int l2cap_finish_move(struct l2cap_chan *chan)
6464 {
6465 	BT_DBG("chan %p", chan);
6466 
6467 	chan->rx_state = L2CAP_RX_STATE_RECV;
6468 
6469 	if (chan->hs_hcon)
6470 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6471 	else
6472 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6473 
6474 	return l2cap_resegment(chan);
6475 }
6476 
6477 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6478 				 struct l2cap_ctrl *control,
6479 				 struct sk_buff *skb, u8 event)
6480 {
6481 	int err;
6482 
6483 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6484 	       event);
6485 
6486 	if (!control->poll)
6487 		return -EPROTO;
6488 
6489 	l2cap_process_reqseq(chan, control->reqseq);
6490 
6491 	if (!skb_queue_empty(&chan->tx_q))
6492 		chan->tx_send_head = skb_peek(&chan->tx_q);
6493 	else
6494 		chan->tx_send_head = NULL;
6495 
6496 	/* Rewind next_tx_seq to the point expected
6497 	 * by the receiver.
6498 	 */
6499 	chan->next_tx_seq = control->reqseq;
6500 	chan->unacked_frames = 0;
6501 
6502 	err = l2cap_finish_move(chan);
6503 	if (err)
6504 		return err;
6505 
6506 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6507 	l2cap_send_i_or_rr_or_rnr(chan);
6508 
6509 	if (event == L2CAP_EV_RECV_IFRAME)
6510 		return -EPROTO;
6511 
6512 	return l2cap_rx_state_recv(chan, control, NULL, event);
6513 }
6514 
6515 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6516 				 struct l2cap_ctrl *control,
6517 				 struct sk_buff *skb, u8 event)
6518 {
6519 	int err;
6520 
6521 	if (!control->final)
6522 		return -EPROTO;
6523 
6524 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6525 
6526 	chan->rx_state = L2CAP_RX_STATE_RECV;
6527 	l2cap_process_reqseq(chan, control->reqseq);
6528 
6529 	if (!skb_queue_empty(&chan->tx_q))
6530 		chan->tx_send_head = skb_peek(&chan->tx_q);
6531 	else
6532 		chan->tx_send_head = NULL;
6533 
6534 	/* Rewind next_tx_seq to the point expected
6535 	 * by the receiver.
6536 	 */
6537 	chan->next_tx_seq = control->reqseq;
6538 	chan->unacked_frames = 0;
6539 
6540 	if (chan->hs_hcon)
6541 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6542 	else
6543 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6544 
6545 	err = l2cap_resegment(chan);
6546 
6547 	if (!err)
6548 		err = l2cap_rx_state_recv(chan, control, skb, event);
6549 
6550 	return err;
6551 }
6552 
6553 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6554 {
6555 	/* Make sure reqseq is for a packet that has been sent but not acked */
6556 	u16 unacked;
6557 
6558 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6559 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6560 }
6561 
6562 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6563 		    struct sk_buff *skb, u8 event)
6564 {
6565 	int err = 0;
6566 
6567 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6568 	       control, skb, event, chan->rx_state);
6569 
6570 	if (__valid_reqseq(chan, control->reqseq)) {
6571 		switch (chan->rx_state) {
6572 		case L2CAP_RX_STATE_RECV:
6573 			err = l2cap_rx_state_recv(chan, control, skb, event);
6574 			break;
6575 		case L2CAP_RX_STATE_SREJ_SENT:
6576 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6577 						       event);
6578 			break;
6579 		case L2CAP_RX_STATE_WAIT_P:
6580 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6581 			break;
6582 		case L2CAP_RX_STATE_WAIT_F:
6583 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6584 			break;
6585 		default:
6586 			/* shut it down */
6587 			break;
6588 		}
6589 	} else {
6590 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6591 		       control->reqseq, chan->next_tx_seq,
6592 		       chan->expected_ack_seq);
6593 		l2cap_send_disconn_req(chan, ECONNRESET);
6594 	}
6595 
6596 	return err;
6597 }
6598 
6599 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6600 			   struct sk_buff *skb)
6601 {
6602 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6603 	       chan->rx_state);
6604 
6605 	if (l2cap_classify_txseq(chan, control->txseq) ==
6606 	    L2CAP_TXSEQ_EXPECTED) {
6607 		l2cap_pass_to_tx(chan, control);
6608 
6609 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6610 		       __next_seq(chan, chan->buffer_seq));
6611 
6612 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6613 
6614 		l2cap_reassemble_sdu(chan, skb, control);
6615 	} else {
6616 		if (chan->sdu) {
6617 			kfree_skb(chan->sdu);
6618 			chan->sdu = NULL;
6619 		}
6620 		chan->sdu_last_frag = NULL;
6621 		chan->sdu_len = 0;
6622 
6623 		if (skb) {
6624 			BT_DBG("Freeing %p", skb);
6625 			kfree_skb(skb);
6626 		}
6627 	}
6628 
6629 	chan->last_acked_seq = control->txseq;
6630 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6631 
6632 	return 0;
6633 }
6634 
6635 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6636 {
6637 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6638 	u16 len;
6639 	u8 event;
6640 
6641 	__unpack_control(chan, skb);
6642 
6643 	len = skb->len;
6644 
6645 	/*
6646 	 * We can just drop the corrupted I-frame here.
6647 	 * Receiver will miss it and start proper recovery
6648 	 * procedures and ask for retransmission.
6649 	 */
6650 	if (l2cap_check_fcs(chan, skb))
6651 		goto drop;
6652 
6653 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6654 		len -= L2CAP_SDULEN_SIZE;
6655 
6656 	if (chan->fcs == L2CAP_FCS_CRC16)
6657 		len -= L2CAP_FCS_SIZE;
6658 
6659 	if (len > chan->mps) {
6660 		l2cap_send_disconn_req(chan, ECONNRESET);
6661 		goto drop;
6662 	}
6663 
6664 	if ((chan->mode == L2CAP_MODE_ERTM ||
6665 	     chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
6666 		goto drop;
6667 
6668 	if (!control->sframe) {
6669 		int err;
6670 
6671 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6672 		       control->sar, control->reqseq, control->final,
6673 		       control->txseq);
6674 
6675 		/* Validate F-bit - F=0 always valid, F=1 only
6676 		 * valid in TX WAIT_F
6677 		 */
6678 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6679 			goto drop;
6680 
6681 		if (chan->mode != L2CAP_MODE_STREAMING) {
6682 			event = L2CAP_EV_RECV_IFRAME;
6683 			err = l2cap_rx(chan, control, skb, event);
6684 		} else {
6685 			err = l2cap_stream_rx(chan, control, skb);
6686 		}
6687 
6688 		if (err)
6689 			l2cap_send_disconn_req(chan, ECONNRESET);
6690 	} else {
6691 		const u8 rx_func_to_event[4] = {
6692 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6693 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6694 		};
6695 
6696 		/* Only I-frames are expected in streaming mode */
6697 		if (chan->mode == L2CAP_MODE_STREAMING)
6698 			goto drop;
6699 
6700 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6701 		       control->reqseq, control->final, control->poll,
6702 		       control->super);
6703 
6704 		if (len != 0) {
6705 			BT_ERR("Trailing bytes: %d in sframe", len);
6706 			l2cap_send_disconn_req(chan, ECONNRESET);
6707 			goto drop;
6708 		}
6709 
6710 		/* Validate F and P bits */
6711 		if (control->final && (control->poll ||
6712 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6713 			goto drop;
6714 
6715 		event = rx_func_to_event[control->super];
6716 		if (l2cap_rx(chan, control, skb, event))
6717 			l2cap_send_disconn_req(chan, ECONNRESET);
6718 	}
6719 
6720 	return 0;
6721 
6722 drop:
6723 	kfree_skb(skb);
6724 	return 0;
6725 }
6726 
6727 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6728 {
6729 	struct l2cap_conn *conn = chan->conn;
6730 	struct l2cap_le_credits pkt;
6731 	u16 return_credits;
6732 
6733 	return_credits = ((chan->imtu / chan->mps) + 1) - chan->rx_credits;
6734 
6735 	if (!return_credits)
6736 		return;
6737 
6738 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6739 
6740 	chan->rx_credits += return_credits;
6741 
6742 	pkt.cid     = cpu_to_le16(chan->scid);
6743 	pkt.credits = cpu_to_le16(return_credits);
6744 
6745 	chan->ident = l2cap_get_ident(conn);
6746 
6747 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6748 }
6749 
6750 static int l2cap_le_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6751 {
6752 	int err;
6753 
6754 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6755 
6756 	/* Wait recv to confirm reception before updating the credits */
6757 	err = chan->ops->recv(chan, skb);
6758 
6759 	/* Update credits whenever an SDU is received */
6760 	l2cap_chan_le_send_credits(chan);
6761 
6762 	return err;
6763 }
6764 
6765 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6766 {
6767 	int err;
6768 
6769 	if (!chan->rx_credits) {
6770 		BT_ERR("No credits to receive LE L2CAP data");
6771 		l2cap_send_disconn_req(chan, ECONNRESET);
6772 		return -ENOBUFS;
6773 	}
6774 
6775 	if (chan->imtu < skb->len) {
6776 		BT_ERR("Too big LE L2CAP PDU");
6777 		return -ENOBUFS;
6778 	}
6779 
6780 	chan->rx_credits--;
6781 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6782 
6783 	/* Update if remote had run out of credits, this should only happens
6784 	 * if the remote is not using the entire MPS.
6785 	 */
6786 	if (!chan->rx_credits)
6787 		l2cap_chan_le_send_credits(chan);
6788 
6789 	err = 0;
6790 
6791 	if (!chan->sdu) {
6792 		u16 sdu_len;
6793 
6794 		sdu_len = get_unaligned_le16(skb->data);
6795 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6796 
6797 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6798 		       sdu_len, skb->len, chan->imtu);
6799 
6800 		if (sdu_len > chan->imtu) {
6801 			BT_ERR("Too big LE L2CAP SDU length received");
6802 			err = -EMSGSIZE;
6803 			goto failed;
6804 		}
6805 
6806 		if (skb->len > sdu_len) {
6807 			BT_ERR("Too much LE L2CAP data received");
6808 			err = -EINVAL;
6809 			goto failed;
6810 		}
6811 
6812 		if (skb->len == sdu_len)
6813 			return l2cap_le_recv(chan, skb);
6814 
6815 		chan->sdu = skb;
6816 		chan->sdu_len = sdu_len;
6817 		chan->sdu_last_frag = skb;
6818 
6819 		/* Detect if remote is not able to use the selected MPS */
6820 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6821 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6822 
6823 			/* Adjust the number of credits */
6824 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6825 			chan->mps = mps_len;
6826 			l2cap_chan_le_send_credits(chan);
6827 		}
6828 
6829 		return 0;
6830 	}
6831 
6832 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6833 	       chan->sdu->len, skb->len, chan->sdu_len);
6834 
6835 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6836 		BT_ERR("Too much LE L2CAP data received");
6837 		err = -EINVAL;
6838 		goto failed;
6839 	}
6840 
6841 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6842 	skb = NULL;
6843 
6844 	if (chan->sdu->len == chan->sdu_len) {
6845 		err = l2cap_le_recv(chan, chan->sdu);
6846 		if (!err) {
6847 			chan->sdu = NULL;
6848 			chan->sdu_last_frag = NULL;
6849 			chan->sdu_len = 0;
6850 		}
6851 	}
6852 
6853 failed:
6854 	if (err) {
6855 		kfree_skb(skb);
6856 		kfree_skb(chan->sdu);
6857 		chan->sdu = NULL;
6858 		chan->sdu_last_frag = NULL;
6859 		chan->sdu_len = 0;
6860 	}
6861 
6862 	/* We can't return an error here since we took care of the skb
6863 	 * freeing internally. An error return would cause the caller to
6864 	 * do a double-free of the skb.
6865 	 */
6866 	return 0;
6867 }
6868 
6869 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6870 			       struct sk_buff *skb)
6871 {
6872 	struct l2cap_chan *chan;
6873 
6874 	chan = l2cap_get_chan_by_scid(conn, cid);
6875 	if (!chan) {
6876 		if (cid == L2CAP_CID_A2MP) {
6877 			chan = a2mp_channel_create(conn, skb);
6878 			if (!chan) {
6879 				kfree_skb(skb);
6880 				return;
6881 			}
6882 
6883 			l2cap_chan_lock(chan);
6884 		} else {
6885 			BT_DBG("unknown cid 0x%4.4x", cid);
6886 			/* Drop packet and return */
6887 			kfree_skb(skb);
6888 			return;
6889 		}
6890 	}
6891 
6892 	BT_DBG("chan %p, len %d", chan, skb->len);
6893 
6894 	/* If we receive data on a fixed channel before the info req/rsp
6895 	 * procdure is done simply assume that the channel is supported
6896 	 * and mark it as ready.
6897 	 */
6898 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6899 		l2cap_chan_ready(chan);
6900 
6901 	if (chan->state != BT_CONNECTED)
6902 		goto drop;
6903 
6904 	switch (chan->mode) {
6905 	case L2CAP_MODE_LE_FLOWCTL:
6906 		if (l2cap_le_data_rcv(chan, skb) < 0)
6907 			goto drop;
6908 
6909 		goto done;
6910 
6911 	case L2CAP_MODE_BASIC:
6912 		/* If socket recv buffers overflows we drop data here
6913 		 * which is *bad* because L2CAP has to be reliable.
6914 		 * But we don't have any other choice. L2CAP doesn't
6915 		 * provide flow control mechanism. */
6916 
6917 		if (chan->imtu < skb->len) {
6918 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6919 			goto drop;
6920 		}
6921 
6922 		if (!chan->ops->recv(chan, skb))
6923 			goto done;
6924 		break;
6925 
6926 	case L2CAP_MODE_ERTM:
6927 	case L2CAP_MODE_STREAMING:
6928 		l2cap_data_rcv(chan, skb);
6929 		goto done;
6930 
6931 	default:
6932 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6933 		break;
6934 	}
6935 
6936 drop:
6937 	kfree_skb(skb);
6938 
6939 done:
6940 	l2cap_chan_unlock(chan);
6941 }
6942 
6943 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6944 				  struct sk_buff *skb)
6945 {
6946 	struct hci_conn *hcon = conn->hcon;
6947 	struct l2cap_chan *chan;
6948 
6949 	if (hcon->type != ACL_LINK)
6950 		goto free_skb;
6951 
6952 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6953 					ACL_LINK);
6954 	if (!chan)
6955 		goto free_skb;
6956 
6957 	BT_DBG("chan %p, len %d", chan, skb->len);
6958 
6959 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6960 		goto drop;
6961 
6962 	if (chan->imtu < skb->len)
6963 		goto drop;
6964 
6965 	/* Store remote BD_ADDR and PSM for msg_name */
6966 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6967 	bt_cb(skb)->l2cap.psm = psm;
6968 
6969 	if (!chan->ops->recv(chan, skb)) {
6970 		l2cap_chan_put(chan);
6971 		return;
6972 	}
6973 
6974 drop:
6975 	l2cap_chan_put(chan);
6976 free_skb:
6977 	kfree_skb(skb);
6978 }
6979 
6980 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6981 {
6982 	struct l2cap_hdr *lh = (void *) skb->data;
6983 	struct hci_conn *hcon = conn->hcon;
6984 	u16 cid, len;
6985 	__le16 psm;
6986 
6987 	if (hcon->state != BT_CONNECTED) {
6988 		BT_DBG("queueing pending rx skb");
6989 		skb_queue_tail(&conn->pending_rx, skb);
6990 		return;
6991 	}
6992 
6993 	skb_pull(skb, L2CAP_HDR_SIZE);
6994 	cid = __le16_to_cpu(lh->cid);
6995 	len = __le16_to_cpu(lh->len);
6996 
6997 	if (len != skb->len) {
6998 		kfree_skb(skb);
6999 		return;
7000 	}
7001 
7002 	/* Since we can't actively block incoming LE connections we must
7003 	 * at least ensure that we ignore incoming data from them.
7004 	 */
7005 	if (hcon->type == LE_LINK &&
7006 	    hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7007 				   bdaddr_dst_type(hcon))) {
7008 		kfree_skb(skb);
7009 		return;
7010 	}
7011 
7012 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7013 
7014 	switch (cid) {
7015 	case L2CAP_CID_SIGNALING:
7016 		l2cap_sig_channel(conn, skb);
7017 		break;
7018 
7019 	case L2CAP_CID_CONN_LESS:
7020 		psm = get_unaligned((__le16 *) skb->data);
7021 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7022 		l2cap_conless_channel(conn, psm, skb);
7023 		break;
7024 
7025 	case L2CAP_CID_LE_SIGNALING:
7026 		l2cap_le_sig_channel(conn, skb);
7027 		break;
7028 
7029 	default:
7030 		l2cap_data_channel(conn, cid, skb);
7031 		break;
7032 	}
7033 }
7034 
7035 static void process_pending_rx(struct work_struct *work)
7036 {
7037 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7038 					       pending_rx_work);
7039 	struct sk_buff *skb;
7040 
7041 	BT_DBG("");
7042 
7043 	while ((skb = skb_dequeue(&conn->pending_rx)))
7044 		l2cap_recv_frame(conn, skb);
7045 }
7046 
7047 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7048 {
7049 	struct l2cap_conn *conn = hcon->l2cap_data;
7050 	struct hci_chan *hchan;
7051 
7052 	if (conn)
7053 		return conn;
7054 
7055 	hchan = hci_chan_create(hcon);
7056 	if (!hchan)
7057 		return NULL;
7058 
7059 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7060 	if (!conn) {
7061 		hci_chan_del(hchan);
7062 		return NULL;
7063 	}
7064 
7065 	kref_init(&conn->ref);
7066 	hcon->l2cap_data = conn;
7067 	conn->hcon = hci_conn_get(hcon);
7068 	conn->hchan = hchan;
7069 
7070 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7071 
7072 	switch (hcon->type) {
7073 	case LE_LINK:
7074 		if (hcon->hdev->le_mtu) {
7075 			conn->mtu = hcon->hdev->le_mtu;
7076 			break;
7077 		}
7078 		/* fall through */
7079 	default:
7080 		conn->mtu = hcon->hdev->acl_mtu;
7081 		break;
7082 	}
7083 
7084 	conn->feat_mask = 0;
7085 
7086 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7087 
7088 	if (hcon->type == ACL_LINK &&
7089 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7090 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7091 
7092 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7093 	    (bredr_sc_enabled(hcon->hdev) ||
7094 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7095 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7096 
7097 	mutex_init(&conn->ident_lock);
7098 	mutex_init(&conn->chan_lock);
7099 
7100 	INIT_LIST_HEAD(&conn->chan_l);
7101 	INIT_LIST_HEAD(&conn->users);
7102 
7103 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7104 
7105 	skb_queue_head_init(&conn->pending_rx);
7106 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7107 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7108 
7109 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7110 
7111 	return conn;
7112 }
7113 
7114 static bool is_valid_psm(u16 psm, u8 dst_type) {
7115 	if (!psm)
7116 		return false;
7117 
7118 	if (bdaddr_type_is_le(dst_type))
7119 		return (psm <= 0x00ff);
7120 
7121 	/* PSM must be odd and lsb of upper byte must be 0 */
7122 	return ((psm & 0x0101) == 0x0001);
7123 }
7124 
7125 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7126 		       bdaddr_t *dst, u8 dst_type)
7127 {
7128 	struct l2cap_conn *conn;
7129 	struct hci_conn *hcon;
7130 	struct hci_dev *hdev;
7131 	int err;
7132 
7133 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7134 	       dst_type, __le16_to_cpu(psm));
7135 
7136 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7137 	if (!hdev)
7138 		return -EHOSTUNREACH;
7139 
7140 	hci_dev_lock(hdev);
7141 
7142 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7143 	    chan->chan_type != L2CAP_CHAN_RAW) {
7144 		err = -EINVAL;
7145 		goto done;
7146 	}
7147 
7148 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7149 		err = -EINVAL;
7150 		goto done;
7151 	}
7152 
7153 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7154 		err = -EINVAL;
7155 		goto done;
7156 	}
7157 
7158 	switch (chan->mode) {
7159 	case L2CAP_MODE_BASIC:
7160 		break;
7161 	case L2CAP_MODE_LE_FLOWCTL:
7162 		break;
7163 	case L2CAP_MODE_ERTM:
7164 	case L2CAP_MODE_STREAMING:
7165 		if (!disable_ertm)
7166 			break;
7167 		/* fall through */
7168 	default:
7169 		err = -EOPNOTSUPP;
7170 		goto done;
7171 	}
7172 
7173 	switch (chan->state) {
7174 	case BT_CONNECT:
7175 	case BT_CONNECT2:
7176 	case BT_CONFIG:
7177 		/* Already connecting */
7178 		err = 0;
7179 		goto done;
7180 
7181 	case BT_CONNECTED:
7182 		/* Already connected */
7183 		err = -EISCONN;
7184 		goto done;
7185 
7186 	case BT_OPEN:
7187 	case BT_BOUND:
7188 		/* Can connect */
7189 		break;
7190 
7191 	default:
7192 		err = -EBADFD;
7193 		goto done;
7194 	}
7195 
7196 	/* Set destination address and psm */
7197 	bacpy(&chan->dst, dst);
7198 	chan->dst_type = dst_type;
7199 
7200 	chan->psm = psm;
7201 	chan->dcid = cid;
7202 
7203 	if (bdaddr_type_is_le(dst_type)) {
7204 		/* Convert from L2CAP channel address type to HCI address type
7205 		 */
7206 		if (dst_type == BDADDR_LE_PUBLIC)
7207 			dst_type = ADDR_LE_DEV_PUBLIC;
7208 		else
7209 			dst_type = ADDR_LE_DEV_RANDOM;
7210 
7211 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7212 			hcon = hci_connect_le(hdev, dst, dst_type,
7213 					      chan->sec_level,
7214 					      HCI_LE_CONN_TIMEOUT,
7215 					      HCI_ROLE_SLAVE, NULL);
7216 		else
7217 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7218 						   chan->sec_level,
7219 						   HCI_LE_CONN_TIMEOUT);
7220 
7221 	} else {
7222 		u8 auth_type = l2cap_get_auth_type(chan);
7223 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7224 	}
7225 
7226 	if (IS_ERR(hcon)) {
7227 		err = PTR_ERR(hcon);
7228 		goto done;
7229 	}
7230 
7231 	conn = l2cap_conn_add(hcon);
7232 	if (!conn) {
7233 		hci_conn_drop(hcon);
7234 		err = -ENOMEM;
7235 		goto done;
7236 	}
7237 
7238 	mutex_lock(&conn->chan_lock);
7239 	l2cap_chan_lock(chan);
7240 
7241 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7242 		hci_conn_drop(hcon);
7243 		err = -EBUSY;
7244 		goto chan_unlock;
7245 	}
7246 
7247 	/* Update source addr of the socket */
7248 	bacpy(&chan->src, &hcon->src);
7249 	chan->src_type = bdaddr_src_type(hcon);
7250 
7251 	__l2cap_chan_add(conn, chan);
7252 
7253 	/* l2cap_chan_add takes its own ref so we can drop this one */
7254 	hci_conn_drop(hcon);
7255 
7256 	l2cap_state_change(chan, BT_CONNECT);
7257 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7258 
7259 	/* Release chan->sport so that it can be reused by other
7260 	 * sockets (as it's only used for listening sockets).
7261 	 */
7262 	write_lock(&chan_list_lock);
7263 	chan->sport = 0;
7264 	write_unlock(&chan_list_lock);
7265 
7266 	if (hcon->state == BT_CONNECTED) {
7267 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7268 			__clear_chan_timer(chan);
7269 			if (l2cap_chan_check_security(chan, true))
7270 				l2cap_state_change(chan, BT_CONNECTED);
7271 		} else
7272 			l2cap_do_start(chan);
7273 	}
7274 
7275 	err = 0;
7276 
7277 chan_unlock:
7278 	l2cap_chan_unlock(chan);
7279 	mutex_unlock(&conn->chan_lock);
7280 done:
7281 	hci_dev_unlock(hdev);
7282 	hci_dev_put(hdev);
7283 	return err;
7284 }
7285 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7286 
7287 /* ---- L2CAP interface with lower layer (HCI) ---- */
7288 
7289 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7290 {
7291 	int exact = 0, lm1 = 0, lm2 = 0;
7292 	struct l2cap_chan *c;
7293 
7294 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7295 
7296 	/* Find listening sockets and check their link_mode */
7297 	read_lock(&chan_list_lock);
7298 	list_for_each_entry(c, &chan_list, global_l) {
7299 		if (c->state != BT_LISTEN)
7300 			continue;
7301 
7302 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7303 			lm1 |= HCI_LM_ACCEPT;
7304 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7305 				lm1 |= HCI_LM_MASTER;
7306 			exact++;
7307 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7308 			lm2 |= HCI_LM_ACCEPT;
7309 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7310 				lm2 |= HCI_LM_MASTER;
7311 		}
7312 	}
7313 	read_unlock(&chan_list_lock);
7314 
7315 	return exact ? lm1 : lm2;
7316 }
7317 
7318 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7319  * from an existing channel in the list or from the beginning of the
7320  * global list (by passing NULL as first parameter).
7321  */
7322 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7323 						  struct hci_conn *hcon)
7324 {
7325 	u8 src_type = bdaddr_src_type(hcon);
7326 
7327 	read_lock(&chan_list_lock);
7328 
7329 	if (c)
7330 		c = list_next_entry(c, global_l);
7331 	else
7332 		c = list_entry(chan_list.next, typeof(*c), global_l);
7333 
7334 	list_for_each_entry_from(c, &chan_list, global_l) {
7335 		if (c->chan_type != L2CAP_CHAN_FIXED)
7336 			continue;
7337 		if (c->state != BT_LISTEN)
7338 			continue;
7339 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7340 			continue;
7341 		if (src_type != c->src_type)
7342 			continue;
7343 
7344 		l2cap_chan_hold(c);
7345 		read_unlock(&chan_list_lock);
7346 		return c;
7347 	}
7348 
7349 	read_unlock(&chan_list_lock);
7350 
7351 	return NULL;
7352 }
7353 
7354 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7355 {
7356 	struct hci_dev *hdev = hcon->hdev;
7357 	struct l2cap_conn *conn;
7358 	struct l2cap_chan *pchan;
7359 	u8 dst_type;
7360 
7361 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7362 		return;
7363 
7364 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7365 
7366 	if (status) {
7367 		l2cap_conn_del(hcon, bt_to_errno(status));
7368 		return;
7369 	}
7370 
7371 	conn = l2cap_conn_add(hcon);
7372 	if (!conn)
7373 		return;
7374 
7375 	dst_type = bdaddr_dst_type(hcon);
7376 
7377 	/* If device is blocked, do not create channels for it */
7378 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7379 		return;
7380 
7381 	/* Find fixed channels and notify them of the new connection. We
7382 	 * use multiple individual lookups, continuing each time where
7383 	 * we left off, because the list lock would prevent calling the
7384 	 * potentially sleeping l2cap_chan_lock() function.
7385 	 */
7386 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7387 	while (pchan) {
7388 		struct l2cap_chan *chan, *next;
7389 
7390 		/* Client fixed channels should override server ones */
7391 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7392 			goto next;
7393 
7394 		l2cap_chan_lock(pchan);
7395 		chan = pchan->ops->new_connection(pchan);
7396 		if (chan) {
7397 			bacpy(&chan->src, &hcon->src);
7398 			bacpy(&chan->dst, &hcon->dst);
7399 			chan->src_type = bdaddr_src_type(hcon);
7400 			chan->dst_type = dst_type;
7401 
7402 			__l2cap_chan_add(conn, chan);
7403 		}
7404 
7405 		l2cap_chan_unlock(pchan);
7406 next:
7407 		next = l2cap_global_fixed_chan(pchan, hcon);
7408 		l2cap_chan_put(pchan);
7409 		pchan = next;
7410 	}
7411 
7412 	l2cap_conn_ready(conn);
7413 }
7414 
7415 int l2cap_disconn_ind(struct hci_conn *hcon)
7416 {
7417 	struct l2cap_conn *conn = hcon->l2cap_data;
7418 
7419 	BT_DBG("hcon %p", hcon);
7420 
7421 	if (!conn)
7422 		return HCI_ERROR_REMOTE_USER_TERM;
7423 	return conn->disc_reason;
7424 }
7425 
7426 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7427 {
7428 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7429 		return;
7430 
7431 	BT_DBG("hcon %p reason %d", hcon, reason);
7432 
7433 	l2cap_conn_del(hcon, bt_to_errno(reason));
7434 }
7435 
7436 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7437 {
7438 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7439 		return;
7440 
7441 	if (encrypt == 0x00) {
7442 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7443 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7444 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7445 			   chan->sec_level == BT_SECURITY_FIPS)
7446 			l2cap_chan_close(chan, ECONNREFUSED);
7447 	} else {
7448 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7449 			__clear_chan_timer(chan);
7450 	}
7451 }
7452 
7453 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7454 {
7455 	struct l2cap_conn *conn = hcon->l2cap_data;
7456 	struct l2cap_chan *chan;
7457 
7458 	if (!conn)
7459 		return;
7460 
7461 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7462 
7463 	mutex_lock(&conn->chan_lock);
7464 
7465 	list_for_each_entry(chan, &conn->chan_l, list) {
7466 		l2cap_chan_lock(chan);
7467 
7468 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7469 		       state_to_string(chan->state));
7470 
7471 		if (chan->scid == L2CAP_CID_A2MP) {
7472 			l2cap_chan_unlock(chan);
7473 			continue;
7474 		}
7475 
7476 		if (!status && encrypt)
7477 			chan->sec_level = hcon->sec_level;
7478 
7479 		if (!__l2cap_no_conn_pending(chan)) {
7480 			l2cap_chan_unlock(chan);
7481 			continue;
7482 		}
7483 
7484 		if (!status && (chan->state == BT_CONNECTED ||
7485 				chan->state == BT_CONFIG)) {
7486 			chan->ops->resume(chan);
7487 			l2cap_check_encryption(chan, encrypt);
7488 			l2cap_chan_unlock(chan);
7489 			continue;
7490 		}
7491 
7492 		if (chan->state == BT_CONNECT) {
7493 			if (!status)
7494 				l2cap_start_connection(chan);
7495 			else
7496 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7497 		} else if (chan->state == BT_CONNECT2 &&
7498 			   chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7499 			struct l2cap_conn_rsp rsp;
7500 			__u16 res, stat;
7501 
7502 			if (!status) {
7503 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7504 					res = L2CAP_CR_PEND;
7505 					stat = L2CAP_CS_AUTHOR_PEND;
7506 					chan->ops->defer(chan);
7507 				} else {
7508 					l2cap_state_change(chan, BT_CONFIG);
7509 					res = L2CAP_CR_SUCCESS;
7510 					stat = L2CAP_CS_NO_INFO;
7511 				}
7512 			} else {
7513 				l2cap_state_change(chan, BT_DISCONN);
7514 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7515 				res = L2CAP_CR_SEC_BLOCK;
7516 				stat = L2CAP_CS_NO_INFO;
7517 			}
7518 
7519 			rsp.scid   = cpu_to_le16(chan->dcid);
7520 			rsp.dcid   = cpu_to_le16(chan->scid);
7521 			rsp.result = cpu_to_le16(res);
7522 			rsp.status = cpu_to_le16(stat);
7523 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7524 				       sizeof(rsp), &rsp);
7525 
7526 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7527 			    res == L2CAP_CR_SUCCESS) {
7528 				char buf[128];
7529 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7530 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7531 					       L2CAP_CONF_REQ,
7532 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
7533 					       buf);
7534 				chan->num_conf_req++;
7535 			}
7536 		}
7537 
7538 		l2cap_chan_unlock(chan);
7539 	}
7540 
7541 	mutex_unlock(&conn->chan_lock);
7542 }
7543 
7544 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7545 {
7546 	struct l2cap_conn *conn = hcon->l2cap_data;
7547 	struct l2cap_hdr *hdr;
7548 	int len;
7549 
7550 	/* For AMP controller do not create l2cap conn */
7551 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7552 		goto drop;
7553 
7554 	if (!conn)
7555 		conn = l2cap_conn_add(hcon);
7556 
7557 	if (!conn)
7558 		goto drop;
7559 
7560 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7561 
7562 	switch (flags) {
7563 	case ACL_START:
7564 	case ACL_START_NO_FLUSH:
7565 	case ACL_COMPLETE:
7566 		if (conn->rx_len) {
7567 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7568 			kfree_skb(conn->rx_skb);
7569 			conn->rx_skb = NULL;
7570 			conn->rx_len = 0;
7571 			l2cap_conn_unreliable(conn, ECOMM);
7572 		}
7573 
7574 		/* Start fragment always begin with Basic L2CAP header */
7575 		if (skb->len < L2CAP_HDR_SIZE) {
7576 			BT_ERR("Frame is too short (len %d)", skb->len);
7577 			l2cap_conn_unreliable(conn, ECOMM);
7578 			goto drop;
7579 		}
7580 
7581 		hdr = (struct l2cap_hdr *) skb->data;
7582 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7583 
7584 		if (len == skb->len) {
7585 			/* Complete frame received */
7586 			l2cap_recv_frame(conn, skb);
7587 			return;
7588 		}
7589 
7590 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7591 
7592 		if (skb->len > len) {
7593 			BT_ERR("Frame is too long (len %d, expected len %d)",
7594 			       skb->len, len);
7595 			l2cap_conn_unreliable(conn, ECOMM);
7596 			goto drop;
7597 		}
7598 
7599 		/* Allocate skb for the complete frame (with header) */
7600 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7601 		if (!conn->rx_skb)
7602 			goto drop;
7603 
7604 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7605 					  skb->len);
7606 		conn->rx_len = len - skb->len;
7607 		break;
7608 
7609 	case ACL_CONT:
7610 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7611 
7612 		if (!conn->rx_len) {
7613 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7614 			l2cap_conn_unreliable(conn, ECOMM);
7615 			goto drop;
7616 		}
7617 
7618 		if (skb->len > conn->rx_len) {
7619 			BT_ERR("Fragment is too long (len %d, expected %d)",
7620 			       skb->len, conn->rx_len);
7621 			kfree_skb(conn->rx_skb);
7622 			conn->rx_skb = NULL;
7623 			conn->rx_len = 0;
7624 			l2cap_conn_unreliable(conn, ECOMM);
7625 			goto drop;
7626 		}
7627 
7628 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7629 					  skb->len);
7630 		conn->rx_len -= skb->len;
7631 
7632 		if (!conn->rx_len) {
7633 			/* Complete frame received. l2cap_recv_frame
7634 			 * takes ownership of the skb so set the global
7635 			 * rx_skb pointer to NULL first.
7636 			 */
7637 			struct sk_buff *rx_skb = conn->rx_skb;
7638 			conn->rx_skb = NULL;
7639 			l2cap_recv_frame(conn, rx_skb);
7640 		}
7641 		break;
7642 	}
7643 
7644 drop:
7645 	kfree_skb(skb);
7646 }
7647 
7648 static struct hci_cb l2cap_cb = {
7649 	.name		= "L2CAP",
7650 	.connect_cfm	= l2cap_connect_cfm,
7651 	.disconn_cfm	= l2cap_disconn_cfm,
7652 	.security_cfm	= l2cap_security_cfm,
7653 };
7654 
7655 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7656 {
7657 	struct l2cap_chan *c;
7658 
7659 	read_lock(&chan_list_lock);
7660 
7661 	list_for_each_entry(c, &chan_list, global_l) {
7662 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7663 			   &c->src, c->src_type, &c->dst, c->dst_type,
7664 			   c->state, __le16_to_cpu(c->psm),
7665 			   c->scid, c->dcid, c->imtu, c->omtu,
7666 			   c->sec_level, c->mode);
7667 	}
7668 
7669 	read_unlock(&chan_list_lock);
7670 
7671 	return 0;
7672 }
7673 
7674 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7675 
7676 static struct dentry *l2cap_debugfs;
7677 
7678 int __init l2cap_init(void)
7679 {
7680 	int err;
7681 
7682 	err = l2cap_init_sockets();
7683 	if (err < 0)
7684 		return err;
7685 
7686 	hci_register_cb(&l2cap_cb);
7687 
7688 	if (IS_ERR_OR_NULL(bt_debugfs))
7689 		return 0;
7690 
7691 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7692 					    NULL, &l2cap_debugfs_fops);
7693 
7694 	return 0;
7695 }
7696 
7697 void l2cap_exit(void)
7698 {
7699 	debugfs_remove(l2cap_debugfs);
7700 	hci_unregister_cb(&l2cap_cb);
7701 	l2cap_cleanup_sockets();
7702 }
7703 
7704 module_param(disable_ertm, bool, 0644);
7705 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7706