xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision b68fc09be48edbc47de1a0f3d42ef8adf6c0ac55)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
53 
54 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
55 				       u8 code, u8 ident, u16 dlen, void *data);
56 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
57 			   void *data);
58 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
59 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
60 
61 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
62 		     struct sk_buff_head *skbs, u8 event);
63 
64 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
65 {
66 	if (link_type == LE_LINK) {
67 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
68 			return BDADDR_LE_PUBLIC;
69 		else
70 			return BDADDR_LE_RANDOM;
71 	}
72 
73 	return BDADDR_BREDR;
74 }
75 
76 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
77 {
78 	return bdaddr_type(hcon->type, hcon->src_type);
79 }
80 
81 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
82 {
83 	return bdaddr_type(hcon->type, hcon->dst_type);
84 }
85 
86 /* ---- L2CAP channels ---- */
87 
88 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
89 						   u16 cid)
90 {
91 	struct l2cap_chan *c;
92 
93 	list_for_each_entry(c, &conn->chan_l, list) {
94 		if (c->dcid == cid)
95 			return c;
96 	}
97 	return NULL;
98 }
99 
100 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
101 						   u16 cid)
102 {
103 	struct l2cap_chan *c;
104 
105 	list_for_each_entry(c, &conn->chan_l, list) {
106 		if (c->scid == cid)
107 			return c;
108 	}
109 	return NULL;
110 }
111 
112 /* Find channel with given SCID.
113  * Returns locked channel. */
114 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
115 						 u16 cid)
116 {
117 	struct l2cap_chan *c;
118 
119 	mutex_lock(&conn->chan_lock);
120 	c = __l2cap_get_chan_by_scid(conn, cid);
121 	if (c)
122 		l2cap_chan_lock(c);
123 	mutex_unlock(&conn->chan_lock);
124 
125 	return c;
126 }
127 
128 /* Find channel with given DCID.
129  * Returns locked channel.
130  */
131 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
132 						 u16 cid)
133 {
134 	struct l2cap_chan *c;
135 
136 	mutex_lock(&conn->chan_lock);
137 	c = __l2cap_get_chan_by_dcid(conn, cid);
138 	if (c)
139 		l2cap_chan_lock(c);
140 	mutex_unlock(&conn->chan_lock);
141 
142 	return c;
143 }
144 
145 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
146 						    u8 ident)
147 {
148 	struct l2cap_chan *c;
149 
150 	list_for_each_entry(c, &conn->chan_l, list) {
151 		if (c->ident == ident)
152 			return c;
153 	}
154 	return NULL;
155 }
156 
157 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
158 						  u8 ident)
159 {
160 	struct l2cap_chan *c;
161 
162 	mutex_lock(&conn->chan_lock);
163 	c = __l2cap_get_chan_by_ident(conn, ident);
164 	if (c)
165 		l2cap_chan_lock(c);
166 	mutex_unlock(&conn->chan_lock);
167 
168 	return c;
169 }
170 
171 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
172 {
173 	struct l2cap_chan *c;
174 
175 	list_for_each_entry(c, &chan_list, global_l) {
176 		if (c->sport == psm && !bacmp(&c->src, src))
177 			return c;
178 	}
179 	return NULL;
180 }
181 
182 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183 {
184 	int err;
185 
186 	write_lock(&chan_list_lock);
187 
188 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
189 		err = -EADDRINUSE;
190 		goto done;
191 	}
192 
193 	if (psm) {
194 		chan->psm = psm;
195 		chan->sport = psm;
196 		err = 0;
197 	} else {
198 		u16 p, start, end, incr;
199 
200 		if (chan->src_type == BDADDR_BREDR) {
201 			start = L2CAP_PSM_DYN_START;
202 			end = L2CAP_PSM_AUTO_END;
203 			incr = 2;
204 		} else {
205 			start = L2CAP_PSM_LE_DYN_START;
206 			end = L2CAP_PSM_LE_DYN_END;
207 			incr = 1;
208 		}
209 
210 		err = -EINVAL;
211 		for (p = start; p <= end; p += incr)
212 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
213 				chan->psm   = cpu_to_le16(p);
214 				chan->sport = cpu_to_le16(p);
215 				err = 0;
216 				break;
217 			}
218 	}
219 
220 done:
221 	write_unlock(&chan_list_lock);
222 	return err;
223 }
224 EXPORT_SYMBOL_GPL(l2cap_add_psm);
225 
226 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
227 {
228 	write_lock(&chan_list_lock);
229 
230 	/* Override the defaults (which are for conn-oriented) */
231 	chan->omtu = L2CAP_DEFAULT_MTU;
232 	chan->chan_type = L2CAP_CHAN_FIXED;
233 
234 	chan->scid = scid;
235 
236 	write_unlock(&chan_list_lock);
237 
238 	return 0;
239 }
240 
241 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
242 {
243 	u16 cid, dyn_end;
244 
245 	if (conn->hcon->type == LE_LINK)
246 		dyn_end = L2CAP_CID_LE_DYN_END;
247 	else
248 		dyn_end = L2CAP_CID_DYN_END;
249 
250 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
251 		if (!__l2cap_get_chan_by_scid(conn, cid))
252 			return cid;
253 	}
254 
255 	return 0;
256 }
257 
258 static void l2cap_state_change(struct l2cap_chan *chan, int state)
259 {
260 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
261 	       state_to_string(state));
262 
263 	chan->state = state;
264 	chan->ops->state_change(chan, state, 0);
265 }
266 
267 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
268 						int state, int err)
269 {
270 	chan->state = state;
271 	chan->ops->state_change(chan, chan->state, err);
272 }
273 
274 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
275 {
276 	chan->ops->state_change(chan, chan->state, err);
277 }
278 
279 static void __set_retrans_timer(struct l2cap_chan *chan)
280 {
281 	if (!delayed_work_pending(&chan->monitor_timer) &&
282 	    chan->retrans_timeout) {
283 		l2cap_set_timer(chan, &chan->retrans_timer,
284 				msecs_to_jiffies(chan->retrans_timeout));
285 	}
286 }
287 
288 static void __set_monitor_timer(struct l2cap_chan *chan)
289 {
290 	__clear_retrans_timer(chan);
291 	if (chan->monitor_timeout) {
292 		l2cap_set_timer(chan, &chan->monitor_timer,
293 				msecs_to_jiffies(chan->monitor_timeout));
294 	}
295 }
296 
297 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
298 					       u16 seq)
299 {
300 	struct sk_buff *skb;
301 
302 	skb_queue_walk(head, skb) {
303 		if (bt_cb(skb)->l2cap.txseq == seq)
304 			return skb;
305 	}
306 
307 	return NULL;
308 }
309 
310 /* ---- L2CAP sequence number lists ---- */
311 
312 /* For ERTM, ordered lists of sequence numbers must be tracked for
313  * SREJ requests that are received and for frames that are to be
314  * retransmitted. These seq_list functions implement a singly-linked
315  * list in an array, where membership in the list can also be checked
316  * in constant time. Items can also be added to the tail of the list
317  * and removed from the head in constant time, without further memory
318  * allocs or frees.
319  */
320 
321 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
322 {
323 	size_t alloc_size, i;
324 
325 	/* Allocated size is a power of 2 to map sequence numbers
326 	 * (which may be up to 14 bits) in to a smaller array that is
327 	 * sized for the negotiated ERTM transmit windows.
328 	 */
329 	alloc_size = roundup_pow_of_two(size);
330 
331 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
332 	if (!seq_list->list)
333 		return -ENOMEM;
334 
335 	seq_list->mask = alloc_size - 1;
336 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
337 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
338 	for (i = 0; i < alloc_size; i++)
339 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
340 
341 	return 0;
342 }
343 
344 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
345 {
346 	kfree(seq_list->list);
347 }
348 
349 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
350 					   u16 seq)
351 {
352 	/* Constant-time check for list membership */
353 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
354 }
355 
356 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
357 {
358 	u16 seq = seq_list->head;
359 	u16 mask = seq_list->mask;
360 
361 	seq_list->head = seq_list->list[seq & mask];
362 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
363 
364 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
365 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
366 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
367 	}
368 
369 	return seq;
370 }
371 
372 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
373 {
374 	u16 i;
375 
376 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
377 		return;
378 
379 	for (i = 0; i <= seq_list->mask; i++)
380 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
381 
382 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
383 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
384 }
385 
386 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
387 {
388 	u16 mask = seq_list->mask;
389 
390 	/* All appends happen in constant time */
391 
392 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
393 		return;
394 
395 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
396 		seq_list->head = seq;
397 	else
398 		seq_list->list[seq_list->tail & mask] = seq;
399 
400 	seq_list->tail = seq;
401 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
402 }
403 
404 static void l2cap_chan_timeout(struct work_struct *work)
405 {
406 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
407 					       chan_timer.work);
408 	struct l2cap_conn *conn = chan->conn;
409 	int reason;
410 
411 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
412 
413 	mutex_lock(&conn->chan_lock);
414 	l2cap_chan_lock(chan);
415 
416 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
417 		reason = ECONNREFUSED;
418 	else if (chan->state == BT_CONNECT &&
419 		 chan->sec_level != BT_SECURITY_SDP)
420 		reason = ECONNREFUSED;
421 	else
422 		reason = ETIMEDOUT;
423 
424 	l2cap_chan_close(chan, reason);
425 
426 	l2cap_chan_unlock(chan);
427 
428 	chan->ops->close(chan);
429 	mutex_unlock(&conn->chan_lock);
430 
431 	l2cap_chan_put(chan);
432 }
433 
434 struct l2cap_chan *l2cap_chan_create(void)
435 {
436 	struct l2cap_chan *chan;
437 
438 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
439 	if (!chan)
440 		return NULL;
441 
442 	mutex_init(&chan->lock);
443 
444 	/* Set default lock nesting level */
445 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
446 
447 	write_lock(&chan_list_lock);
448 	list_add(&chan->global_l, &chan_list);
449 	write_unlock(&chan_list_lock);
450 
451 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
452 
453 	chan->state = BT_OPEN;
454 
455 	kref_init(&chan->kref);
456 
457 	/* This flag is cleared in l2cap_chan_ready() */
458 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
459 
460 	BT_DBG("chan %p", chan);
461 
462 	return chan;
463 }
464 EXPORT_SYMBOL_GPL(l2cap_chan_create);
465 
466 static void l2cap_chan_destroy(struct kref *kref)
467 {
468 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
469 
470 	BT_DBG("chan %p", chan);
471 
472 	write_lock(&chan_list_lock);
473 	list_del(&chan->global_l);
474 	write_unlock(&chan_list_lock);
475 
476 	kfree(chan);
477 }
478 
479 void l2cap_chan_hold(struct l2cap_chan *c)
480 {
481 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
482 
483 	kref_get(&c->kref);
484 }
485 
486 void l2cap_chan_put(struct l2cap_chan *c)
487 {
488 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
489 
490 	kref_put(&c->kref, l2cap_chan_destroy);
491 }
492 EXPORT_SYMBOL_GPL(l2cap_chan_put);
493 
494 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
495 {
496 	chan->fcs  = L2CAP_FCS_CRC16;
497 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
498 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
499 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
500 	chan->remote_max_tx = chan->max_tx;
501 	chan->remote_tx_win = chan->tx_win;
502 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
503 	chan->sec_level = BT_SECURITY_LOW;
504 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
505 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
506 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
507 	chan->conf_state = 0;
508 
509 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
510 }
511 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
512 
513 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
514 {
515 	chan->sdu = NULL;
516 	chan->sdu_last_frag = NULL;
517 	chan->sdu_len = 0;
518 	chan->tx_credits = 0;
519 	/* Derive MPS from connection MTU to stop HCI fragmentation */
520 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
521 	/* Give enough credits for a full packet */
522 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
523 
524 	skb_queue_head_init(&chan->tx_q);
525 }
526 
527 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
528 {
529 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
530 	       __le16_to_cpu(chan->psm), chan->dcid);
531 
532 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
533 
534 	chan->conn = conn;
535 
536 	switch (chan->chan_type) {
537 	case L2CAP_CHAN_CONN_ORIENTED:
538 		/* Alloc CID for connection-oriented socket */
539 		chan->scid = l2cap_alloc_cid(conn);
540 		if (conn->hcon->type == ACL_LINK)
541 			chan->omtu = L2CAP_DEFAULT_MTU;
542 		break;
543 
544 	case L2CAP_CHAN_CONN_LESS:
545 		/* Connectionless socket */
546 		chan->scid = L2CAP_CID_CONN_LESS;
547 		chan->dcid = L2CAP_CID_CONN_LESS;
548 		chan->omtu = L2CAP_DEFAULT_MTU;
549 		break;
550 
551 	case L2CAP_CHAN_FIXED:
552 		/* Caller will set CID and CID specific MTU values */
553 		break;
554 
555 	default:
556 		/* Raw socket can send/recv signalling messages only */
557 		chan->scid = L2CAP_CID_SIGNALING;
558 		chan->dcid = L2CAP_CID_SIGNALING;
559 		chan->omtu = L2CAP_DEFAULT_MTU;
560 	}
561 
562 	chan->local_id		= L2CAP_BESTEFFORT_ID;
563 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
564 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
565 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
566 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
567 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
568 
569 	l2cap_chan_hold(chan);
570 
571 	/* Only keep a reference for fixed channels if they requested it */
572 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
573 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
574 		hci_conn_hold(conn->hcon);
575 
576 	list_add(&chan->list, &conn->chan_l);
577 }
578 
579 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
580 {
581 	mutex_lock(&conn->chan_lock);
582 	__l2cap_chan_add(conn, chan);
583 	mutex_unlock(&conn->chan_lock);
584 }
585 
586 void l2cap_chan_del(struct l2cap_chan *chan, int err)
587 {
588 	struct l2cap_conn *conn = chan->conn;
589 
590 	__clear_chan_timer(chan);
591 
592 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
593 	       state_to_string(chan->state));
594 
595 	chan->ops->teardown(chan, err);
596 
597 	if (conn) {
598 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
599 		/* Delete from channel list */
600 		list_del(&chan->list);
601 
602 		l2cap_chan_put(chan);
603 
604 		chan->conn = NULL;
605 
606 		/* Reference was only held for non-fixed channels or
607 		 * fixed channels that explicitly requested it using the
608 		 * FLAG_HOLD_HCI_CONN flag.
609 		 */
610 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
611 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
612 			hci_conn_drop(conn->hcon);
613 
614 		if (mgr && mgr->bredr_chan == chan)
615 			mgr->bredr_chan = NULL;
616 	}
617 
618 	if (chan->hs_hchan) {
619 		struct hci_chan *hs_hchan = chan->hs_hchan;
620 
621 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
622 		amp_disconnect_logical_link(hs_hchan);
623 	}
624 
625 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
626 		return;
627 
628 	switch(chan->mode) {
629 	case L2CAP_MODE_BASIC:
630 		break;
631 
632 	case L2CAP_MODE_LE_FLOWCTL:
633 		skb_queue_purge(&chan->tx_q);
634 		break;
635 
636 	case L2CAP_MODE_ERTM:
637 		__clear_retrans_timer(chan);
638 		__clear_monitor_timer(chan);
639 		__clear_ack_timer(chan);
640 
641 		skb_queue_purge(&chan->srej_q);
642 
643 		l2cap_seq_list_free(&chan->srej_list);
644 		l2cap_seq_list_free(&chan->retrans_list);
645 
646 		/* fall through */
647 
648 	case L2CAP_MODE_STREAMING:
649 		skb_queue_purge(&chan->tx_q);
650 		break;
651 	}
652 
653 	return;
654 }
655 EXPORT_SYMBOL_GPL(l2cap_chan_del);
656 
657 static void l2cap_conn_update_id_addr(struct work_struct *work)
658 {
659 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
660 					       id_addr_update_work);
661 	struct hci_conn *hcon = conn->hcon;
662 	struct l2cap_chan *chan;
663 
664 	mutex_lock(&conn->chan_lock);
665 
666 	list_for_each_entry(chan, &conn->chan_l, list) {
667 		l2cap_chan_lock(chan);
668 		bacpy(&chan->dst, &hcon->dst);
669 		chan->dst_type = bdaddr_dst_type(hcon);
670 		l2cap_chan_unlock(chan);
671 	}
672 
673 	mutex_unlock(&conn->chan_lock);
674 }
675 
676 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
677 {
678 	struct l2cap_conn *conn = chan->conn;
679 	struct l2cap_le_conn_rsp rsp;
680 	u16 result;
681 
682 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
683 		result = L2CAP_CR_AUTHORIZATION;
684 	else
685 		result = L2CAP_CR_BAD_PSM;
686 
687 	l2cap_state_change(chan, BT_DISCONN);
688 
689 	rsp.dcid    = cpu_to_le16(chan->scid);
690 	rsp.mtu     = cpu_to_le16(chan->imtu);
691 	rsp.mps     = cpu_to_le16(chan->mps);
692 	rsp.credits = cpu_to_le16(chan->rx_credits);
693 	rsp.result  = cpu_to_le16(result);
694 
695 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
696 		       &rsp);
697 }
698 
699 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
700 {
701 	struct l2cap_conn *conn = chan->conn;
702 	struct l2cap_conn_rsp rsp;
703 	u16 result;
704 
705 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
706 		result = L2CAP_CR_SEC_BLOCK;
707 	else
708 		result = L2CAP_CR_BAD_PSM;
709 
710 	l2cap_state_change(chan, BT_DISCONN);
711 
712 	rsp.scid   = cpu_to_le16(chan->dcid);
713 	rsp.dcid   = cpu_to_le16(chan->scid);
714 	rsp.result = cpu_to_le16(result);
715 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
716 
717 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
718 }
719 
720 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
721 {
722 	struct l2cap_conn *conn = chan->conn;
723 
724 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
725 
726 	switch (chan->state) {
727 	case BT_LISTEN:
728 		chan->ops->teardown(chan, 0);
729 		break;
730 
731 	case BT_CONNECTED:
732 	case BT_CONFIG:
733 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
734 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
735 			l2cap_send_disconn_req(chan, reason);
736 		} else
737 			l2cap_chan_del(chan, reason);
738 		break;
739 
740 	case BT_CONNECT2:
741 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
742 			if (conn->hcon->type == ACL_LINK)
743 				l2cap_chan_connect_reject(chan);
744 			else if (conn->hcon->type == LE_LINK)
745 				l2cap_chan_le_connect_reject(chan);
746 		}
747 
748 		l2cap_chan_del(chan, reason);
749 		break;
750 
751 	case BT_CONNECT:
752 	case BT_DISCONN:
753 		l2cap_chan_del(chan, reason);
754 		break;
755 
756 	default:
757 		chan->ops->teardown(chan, 0);
758 		break;
759 	}
760 }
761 EXPORT_SYMBOL(l2cap_chan_close);
762 
763 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
764 {
765 	switch (chan->chan_type) {
766 	case L2CAP_CHAN_RAW:
767 		switch (chan->sec_level) {
768 		case BT_SECURITY_HIGH:
769 		case BT_SECURITY_FIPS:
770 			return HCI_AT_DEDICATED_BONDING_MITM;
771 		case BT_SECURITY_MEDIUM:
772 			return HCI_AT_DEDICATED_BONDING;
773 		default:
774 			return HCI_AT_NO_BONDING;
775 		}
776 		break;
777 	case L2CAP_CHAN_CONN_LESS:
778 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
779 			if (chan->sec_level == BT_SECURITY_LOW)
780 				chan->sec_level = BT_SECURITY_SDP;
781 		}
782 		if (chan->sec_level == BT_SECURITY_HIGH ||
783 		    chan->sec_level == BT_SECURITY_FIPS)
784 			return HCI_AT_NO_BONDING_MITM;
785 		else
786 			return HCI_AT_NO_BONDING;
787 		break;
788 	case L2CAP_CHAN_CONN_ORIENTED:
789 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
790 			if (chan->sec_level == BT_SECURITY_LOW)
791 				chan->sec_level = BT_SECURITY_SDP;
792 
793 			if (chan->sec_level == BT_SECURITY_HIGH ||
794 			    chan->sec_level == BT_SECURITY_FIPS)
795 				return HCI_AT_NO_BONDING_MITM;
796 			else
797 				return HCI_AT_NO_BONDING;
798 		}
799 		/* fall through */
800 	default:
801 		switch (chan->sec_level) {
802 		case BT_SECURITY_HIGH:
803 		case BT_SECURITY_FIPS:
804 			return HCI_AT_GENERAL_BONDING_MITM;
805 		case BT_SECURITY_MEDIUM:
806 			return HCI_AT_GENERAL_BONDING;
807 		default:
808 			return HCI_AT_NO_BONDING;
809 		}
810 		break;
811 	}
812 }
813 
814 /* Service level security */
815 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
816 {
817 	struct l2cap_conn *conn = chan->conn;
818 	__u8 auth_type;
819 
820 	if (conn->hcon->type == LE_LINK)
821 		return smp_conn_security(conn->hcon, chan->sec_level);
822 
823 	auth_type = l2cap_get_auth_type(chan);
824 
825 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
826 				 initiator);
827 }
828 
829 static u8 l2cap_get_ident(struct l2cap_conn *conn)
830 {
831 	u8 id;
832 
833 	/* Get next available identificator.
834 	 *    1 - 128 are used by kernel.
835 	 *  129 - 199 are reserved.
836 	 *  200 - 254 are used by utilities like l2ping, etc.
837 	 */
838 
839 	mutex_lock(&conn->ident_lock);
840 
841 	if (++conn->tx_ident > 128)
842 		conn->tx_ident = 1;
843 
844 	id = conn->tx_ident;
845 
846 	mutex_unlock(&conn->ident_lock);
847 
848 	return id;
849 }
850 
851 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
852 			   void *data)
853 {
854 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
855 	u8 flags;
856 
857 	BT_DBG("code 0x%2.2x", code);
858 
859 	if (!skb)
860 		return;
861 
862 	/* Use NO_FLUSH if supported or we have an LE link (which does
863 	 * not support auto-flushing packets) */
864 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
865 	    conn->hcon->type == LE_LINK)
866 		flags = ACL_START_NO_FLUSH;
867 	else
868 		flags = ACL_START;
869 
870 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
871 	skb->priority = HCI_PRIO_MAX;
872 
873 	hci_send_acl(conn->hchan, skb, flags);
874 }
875 
876 static bool __chan_is_moving(struct l2cap_chan *chan)
877 {
878 	return chan->move_state != L2CAP_MOVE_STABLE &&
879 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
880 }
881 
882 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
883 {
884 	struct hci_conn *hcon = chan->conn->hcon;
885 	u16 flags;
886 
887 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
888 	       skb->priority);
889 
890 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
891 		if (chan->hs_hchan)
892 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
893 		else
894 			kfree_skb(skb);
895 
896 		return;
897 	}
898 
899 	/* Use NO_FLUSH for LE links (where this is the only option) or
900 	 * if the BR/EDR link supports it and flushing has not been
901 	 * explicitly requested (through FLAG_FLUSHABLE).
902 	 */
903 	if (hcon->type == LE_LINK ||
904 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
905 	     lmp_no_flush_capable(hcon->hdev)))
906 		flags = ACL_START_NO_FLUSH;
907 	else
908 		flags = ACL_START;
909 
910 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
911 	hci_send_acl(chan->conn->hchan, skb, flags);
912 }
913 
914 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
915 {
916 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
917 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
918 
919 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
920 		/* S-Frame */
921 		control->sframe = 1;
922 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
923 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
924 
925 		control->sar = 0;
926 		control->txseq = 0;
927 	} else {
928 		/* I-Frame */
929 		control->sframe = 0;
930 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
931 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
932 
933 		control->poll = 0;
934 		control->super = 0;
935 	}
936 }
937 
938 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
939 {
940 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
941 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
942 
943 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
944 		/* S-Frame */
945 		control->sframe = 1;
946 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
947 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
948 
949 		control->sar = 0;
950 		control->txseq = 0;
951 	} else {
952 		/* I-Frame */
953 		control->sframe = 0;
954 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
955 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
956 
957 		control->poll = 0;
958 		control->super = 0;
959 	}
960 }
961 
962 static inline void __unpack_control(struct l2cap_chan *chan,
963 				    struct sk_buff *skb)
964 {
965 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
966 		__unpack_extended_control(get_unaligned_le32(skb->data),
967 					  &bt_cb(skb)->l2cap);
968 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
969 	} else {
970 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
971 					  &bt_cb(skb)->l2cap);
972 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
973 	}
974 }
975 
976 static u32 __pack_extended_control(struct l2cap_ctrl *control)
977 {
978 	u32 packed;
979 
980 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
981 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
982 
983 	if (control->sframe) {
984 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
985 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
986 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
987 	} else {
988 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
989 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
990 	}
991 
992 	return packed;
993 }
994 
995 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
996 {
997 	u16 packed;
998 
999 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1000 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1001 
1002 	if (control->sframe) {
1003 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1004 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1005 		packed |= L2CAP_CTRL_FRAME_TYPE;
1006 	} else {
1007 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1008 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1009 	}
1010 
1011 	return packed;
1012 }
1013 
1014 static inline void __pack_control(struct l2cap_chan *chan,
1015 				  struct l2cap_ctrl *control,
1016 				  struct sk_buff *skb)
1017 {
1018 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1019 		put_unaligned_le32(__pack_extended_control(control),
1020 				   skb->data + L2CAP_HDR_SIZE);
1021 	} else {
1022 		put_unaligned_le16(__pack_enhanced_control(control),
1023 				   skb->data + L2CAP_HDR_SIZE);
1024 	}
1025 }
1026 
1027 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1028 {
1029 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1030 		return L2CAP_EXT_HDR_SIZE;
1031 	else
1032 		return L2CAP_ENH_HDR_SIZE;
1033 }
1034 
1035 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1036 					       u32 control)
1037 {
1038 	struct sk_buff *skb;
1039 	struct l2cap_hdr *lh;
1040 	int hlen = __ertm_hdr_size(chan);
1041 
1042 	if (chan->fcs == L2CAP_FCS_CRC16)
1043 		hlen += L2CAP_FCS_SIZE;
1044 
1045 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1046 
1047 	if (!skb)
1048 		return ERR_PTR(-ENOMEM);
1049 
1050 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1051 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1052 	lh->cid = cpu_to_le16(chan->dcid);
1053 
1054 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1055 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1056 	else
1057 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1058 
1059 	if (chan->fcs == L2CAP_FCS_CRC16) {
1060 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1061 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1062 	}
1063 
1064 	skb->priority = HCI_PRIO_MAX;
1065 	return skb;
1066 }
1067 
1068 static void l2cap_send_sframe(struct l2cap_chan *chan,
1069 			      struct l2cap_ctrl *control)
1070 {
1071 	struct sk_buff *skb;
1072 	u32 control_field;
1073 
1074 	BT_DBG("chan %p, control %p", chan, control);
1075 
1076 	if (!control->sframe)
1077 		return;
1078 
1079 	if (__chan_is_moving(chan))
1080 		return;
1081 
1082 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1083 	    !control->poll)
1084 		control->final = 1;
1085 
1086 	if (control->super == L2CAP_SUPER_RR)
1087 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1088 	else if (control->super == L2CAP_SUPER_RNR)
1089 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1090 
1091 	if (control->super != L2CAP_SUPER_SREJ) {
1092 		chan->last_acked_seq = control->reqseq;
1093 		__clear_ack_timer(chan);
1094 	}
1095 
1096 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1097 	       control->final, control->poll, control->super);
1098 
1099 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1100 		control_field = __pack_extended_control(control);
1101 	else
1102 		control_field = __pack_enhanced_control(control);
1103 
1104 	skb = l2cap_create_sframe_pdu(chan, control_field);
1105 	if (!IS_ERR(skb))
1106 		l2cap_do_send(chan, skb);
1107 }
1108 
1109 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1110 {
1111 	struct l2cap_ctrl control;
1112 
1113 	BT_DBG("chan %p, poll %d", chan, poll);
1114 
1115 	memset(&control, 0, sizeof(control));
1116 	control.sframe = 1;
1117 	control.poll = poll;
1118 
1119 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1120 		control.super = L2CAP_SUPER_RNR;
1121 	else
1122 		control.super = L2CAP_SUPER_RR;
1123 
1124 	control.reqseq = chan->buffer_seq;
1125 	l2cap_send_sframe(chan, &control);
1126 }
1127 
1128 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1129 {
1130 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1131 		return true;
1132 
1133 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1134 }
1135 
1136 static bool __amp_capable(struct l2cap_chan *chan)
1137 {
1138 	struct l2cap_conn *conn = chan->conn;
1139 	struct hci_dev *hdev;
1140 	bool amp_available = false;
1141 
1142 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1143 		return false;
1144 
1145 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1146 		return false;
1147 
1148 	read_lock(&hci_dev_list_lock);
1149 	list_for_each_entry(hdev, &hci_dev_list, list) {
1150 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1151 		    test_bit(HCI_UP, &hdev->flags)) {
1152 			amp_available = true;
1153 			break;
1154 		}
1155 	}
1156 	read_unlock(&hci_dev_list_lock);
1157 
1158 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1159 		return amp_available;
1160 
1161 	return false;
1162 }
1163 
1164 static bool l2cap_check_efs(struct l2cap_chan *chan)
1165 {
1166 	/* Check EFS parameters */
1167 	return true;
1168 }
1169 
1170 void l2cap_send_conn_req(struct l2cap_chan *chan)
1171 {
1172 	struct l2cap_conn *conn = chan->conn;
1173 	struct l2cap_conn_req req;
1174 
1175 	req.scid = cpu_to_le16(chan->scid);
1176 	req.psm  = chan->psm;
1177 
1178 	chan->ident = l2cap_get_ident(conn);
1179 
1180 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1181 
1182 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1183 }
1184 
1185 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1186 {
1187 	struct l2cap_create_chan_req req;
1188 	req.scid = cpu_to_le16(chan->scid);
1189 	req.psm  = chan->psm;
1190 	req.amp_id = amp_id;
1191 
1192 	chan->ident = l2cap_get_ident(chan->conn);
1193 
1194 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1195 		       sizeof(req), &req);
1196 }
1197 
1198 static void l2cap_move_setup(struct l2cap_chan *chan)
1199 {
1200 	struct sk_buff *skb;
1201 
1202 	BT_DBG("chan %p", chan);
1203 
1204 	if (chan->mode != L2CAP_MODE_ERTM)
1205 		return;
1206 
1207 	__clear_retrans_timer(chan);
1208 	__clear_monitor_timer(chan);
1209 	__clear_ack_timer(chan);
1210 
1211 	chan->retry_count = 0;
1212 	skb_queue_walk(&chan->tx_q, skb) {
1213 		if (bt_cb(skb)->l2cap.retries)
1214 			bt_cb(skb)->l2cap.retries = 1;
1215 		else
1216 			break;
1217 	}
1218 
1219 	chan->expected_tx_seq = chan->buffer_seq;
1220 
1221 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1222 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1223 	l2cap_seq_list_clear(&chan->retrans_list);
1224 	l2cap_seq_list_clear(&chan->srej_list);
1225 	skb_queue_purge(&chan->srej_q);
1226 
1227 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1228 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1229 
1230 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1231 }
1232 
1233 static void l2cap_move_done(struct l2cap_chan *chan)
1234 {
1235 	u8 move_role = chan->move_role;
1236 	BT_DBG("chan %p", chan);
1237 
1238 	chan->move_state = L2CAP_MOVE_STABLE;
1239 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1240 
1241 	if (chan->mode != L2CAP_MODE_ERTM)
1242 		return;
1243 
1244 	switch (move_role) {
1245 	case L2CAP_MOVE_ROLE_INITIATOR:
1246 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1247 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1248 		break;
1249 	case L2CAP_MOVE_ROLE_RESPONDER:
1250 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1251 		break;
1252 	}
1253 }
1254 
1255 static void l2cap_chan_ready(struct l2cap_chan *chan)
1256 {
1257 	/* The channel may have already been flagged as connected in
1258 	 * case of receiving data before the L2CAP info req/rsp
1259 	 * procedure is complete.
1260 	 */
1261 	if (chan->state == BT_CONNECTED)
1262 		return;
1263 
1264 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1265 	chan->conf_state = 0;
1266 	__clear_chan_timer(chan);
1267 
1268 	if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1269 		chan->ops->suspend(chan);
1270 
1271 	chan->state = BT_CONNECTED;
1272 
1273 	chan->ops->ready(chan);
1274 }
1275 
1276 static void l2cap_le_connect(struct l2cap_chan *chan)
1277 {
1278 	struct l2cap_conn *conn = chan->conn;
1279 	struct l2cap_le_conn_req req;
1280 
1281 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1282 		return;
1283 
1284 	l2cap_le_flowctl_init(chan);
1285 
1286 	req.psm     = chan->psm;
1287 	req.scid    = cpu_to_le16(chan->scid);
1288 	req.mtu     = cpu_to_le16(chan->imtu);
1289 	req.mps     = cpu_to_le16(chan->mps);
1290 	req.credits = cpu_to_le16(chan->rx_credits);
1291 
1292 	chan->ident = l2cap_get_ident(conn);
1293 
1294 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1295 		       sizeof(req), &req);
1296 }
1297 
1298 static void l2cap_le_start(struct l2cap_chan *chan)
1299 {
1300 	struct l2cap_conn *conn = chan->conn;
1301 
1302 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1303 		return;
1304 
1305 	if (!chan->psm) {
1306 		l2cap_chan_ready(chan);
1307 		return;
1308 	}
1309 
1310 	if (chan->state == BT_CONNECT)
1311 		l2cap_le_connect(chan);
1312 }
1313 
1314 static void l2cap_start_connection(struct l2cap_chan *chan)
1315 {
1316 	if (__amp_capable(chan)) {
1317 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1318 		a2mp_discover_amp(chan);
1319 	} else if (chan->conn->hcon->type == LE_LINK) {
1320 		l2cap_le_start(chan);
1321 	} else {
1322 		l2cap_send_conn_req(chan);
1323 	}
1324 }
1325 
1326 static void l2cap_request_info(struct l2cap_conn *conn)
1327 {
1328 	struct l2cap_info_req req;
1329 
1330 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1331 		return;
1332 
1333 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1334 
1335 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1336 	conn->info_ident = l2cap_get_ident(conn);
1337 
1338 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1339 
1340 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1341 		       sizeof(req), &req);
1342 }
1343 
1344 static void l2cap_do_start(struct l2cap_chan *chan)
1345 {
1346 	struct l2cap_conn *conn = chan->conn;
1347 
1348 	if (conn->hcon->type == LE_LINK) {
1349 		l2cap_le_start(chan);
1350 		return;
1351 	}
1352 
1353 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1354 		l2cap_request_info(conn);
1355 		return;
1356 	}
1357 
1358 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1359 		return;
1360 
1361 	if (l2cap_chan_check_security(chan, true) &&
1362 	    __l2cap_no_conn_pending(chan))
1363 		l2cap_start_connection(chan);
1364 }
1365 
1366 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1367 {
1368 	u32 local_feat_mask = l2cap_feat_mask;
1369 	if (!disable_ertm)
1370 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1371 
1372 	switch (mode) {
1373 	case L2CAP_MODE_ERTM:
1374 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1375 	case L2CAP_MODE_STREAMING:
1376 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1377 	default:
1378 		return 0x00;
1379 	}
1380 }
1381 
1382 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1383 {
1384 	struct l2cap_conn *conn = chan->conn;
1385 	struct l2cap_disconn_req req;
1386 
1387 	if (!conn)
1388 		return;
1389 
1390 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1391 		__clear_retrans_timer(chan);
1392 		__clear_monitor_timer(chan);
1393 		__clear_ack_timer(chan);
1394 	}
1395 
1396 	if (chan->scid == L2CAP_CID_A2MP) {
1397 		l2cap_state_change(chan, BT_DISCONN);
1398 		return;
1399 	}
1400 
1401 	req.dcid = cpu_to_le16(chan->dcid);
1402 	req.scid = cpu_to_le16(chan->scid);
1403 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1404 		       sizeof(req), &req);
1405 
1406 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1407 }
1408 
1409 /* ---- L2CAP connections ---- */
1410 static void l2cap_conn_start(struct l2cap_conn *conn)
1411 {
1412 	struct l2cap_chan *chan, *tmp;
1413 
1414 	BT_DBG("conn %p", conn);
1415 
1416 	mutex_lock(&conn->chan_lock);
1417 
1418 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1419 		l2cap_chan_lock(chan);
1420 
1421 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1422 			l2cap_chan_ready(chan);
1423 			l2cap_chan_unlock(chan);
1424 			continue;
1425 		}
1426 
1427 		if (chan->state == BT_CONNECT) {
1428 			if (!l2cap_chan_check_security(chan, true) ||
1429 			    !__l2cap_no_conn_pending(chan)) {
1430 				l2cap_chan_unlock(chan);
1431 				continue;
1432 			}
1433 
1434 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1435 			    && test_bit(CONF_STATE2_DEVICE,
1436 					&chan->conf_state)) {
1437 				l2cap_chan_close(chan, ECONNRESET);
1438 				l2cap_chan_unlock(chan);
1439 				continue;
1440 			}
1441 
1442 			l2cap_start_connection(chan);
1443 
1444 		} else if (chan->state == BT_CONNECT2) {
1445 			struct l2cap_conn_rsp rsp;
1446 			char buf[128];
1447 			rsp.scid = cpu_to_le16(chan->dcid);
1448 			rsp.dcid = cpu_to_le16(chan->scid);
1449 
1450 			if (l2cap_chan_check_security(chan, false)) {
1451 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1452 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1453 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1454 					chan->ops->defer(chan);
1455 
1456 				} else {
1457 					l2cap_state_change(chan, BT_CONFIG);
1458 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1459 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1460 				}
1461 			} else {
1462 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1463 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1464 			}
1465 
1466 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1467 				       sizeof(rsp), &rsp);
1468 
1469 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1470 			    rsp.result != L2CAP_CR_SUCCESS) {
1471 				l2cap_chan_unlock(chan);
1472 				continue;
1473 			}
1474 
1475 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1476 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1477 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1478 			chan->num_conf_req++;
1479 		}
1480 
1481 		l2cap_chan_unlock(chan);
1482 	}
1483 
1484 	mutex_unlock(&conn->chan_lock);
1485 }
1486 
1487 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1488 {
1489 	struct hci_conn *hcon = conn->hcon;
1490 	struct hci_dev *hdev = hcon->hdev;
1491 
1492 	BT_DBG("%s conn %p", hdev->name, conn);
1493 
1494 	/* For outgoing pairing which doesn't necessarily have an
1495 	 * associated socket (e.g. mgmt_pair_device).
1496 	 */
1497 	if (hcon->out)
1498 		smp_conn_security(hcon, hcon->pending_sec_level);
1499 
1500 	/* For LE slave connections, make sure the connection interval
1501 	 * is in the range of the minium and maximum interval that has
1502 	 * been configured for this connection. If not, then trigger
1503 	 * the connection update procedure.
1504 	 */
1505 	if (hcon->role == HCI_ROLE_SLAVE &&
1506 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1507 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1508 		struct l2cap_conn_param_update_req req;
1509 
1510 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1511 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1512 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1513 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1514 
1515 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1516 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1517 	}
1518 }
1519 
1520 static void l2cap_conn_ready(struct l2cap_conn *conn)
1521 {
1522 	struct l2cap_chan *chan;
1523 	struct hci_conn *hcon = conn->hcon;
1524 
1525 	BT_DBG("conn %p", conn);
1526 
1527 	if (hcon->type == ACL_LINK)
1528 		l2cap_request_info(conn);
1529 
1530 	mutex_lock(&conn->chan_lock);
1531 
1532 	list_for_each_entry(chan, &conn->chan_l, list) {
1533 
1534 		l2cap_chan_lock(chan);
1535 
1536 		if (chan->scid == L2CAP_CID_A2MP) {
1537 			l2cap_chan_unlock(chan);
1538 			continue;
1539 		}
1540 
1541 		if (hcon->type == LE_LINK) {
1542 			l2cap_le_start(chan);
1543 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1544 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1545 				l2cap_chan_ready(chan);
1546 		} else if (chan->state == BT_CONNECT) {
1547 			l2cap_do_start(chan);
1548 		}
1549 
1550 		l2cap_chan_unlock(chan);
1551 	}
1552 
1553 	mutex_unlock(&conn->chan_lock);
1554 
1555 	if (hcon->type == LE_LINK)
1556 		l2cap_le_conn_ready(conn);
1557 
1558 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1559 }
1560 
1561 /* Notify sockets that we cannot guaranty reliability anymore */
1562 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1563 {
1564 	struct l2cap_chan *chan;
1565 
1566 	BT_DBG("conn %p", conn);
1567 
1568 	mutex_lock(&conn->chan_lock);
1569 
1570 	list_for_each_entry(chan, &conn->chan_l, list) {
1571 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1572 			l2cap_chan_set_err(chan, err);
1573 	}
1574 
1575 	mutex_unlock(&conn->chan_lock);
1576 }
1577 
1578 static void l2cap_info_timeout(struct work_struct *work)
1579 {
1580 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1581 					       info_timer.work);
1582 
1583 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1584 	conn->info_ident = 0;
1585 
1586 	l2cap_conn_start(conn);
1587 }
1588 
1589 /*
1590  * l2cap_user
1591  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1592  * callback is called during registration. The ->remove callback is called
1593  * during unregistration.
1594  * An l2cap_user object can either be explicitly unregistered or when the
1595  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1596  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1597  * External modules must own a reference to the l2cap_conn object if they intend
1598  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1599  * any time if they don't.
1600  */
1601 
1602 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1603 {
1604 	struct hci_dev *hdev = conn->hcon->hdev;
1605 	int ret;
1606 
1607 	/* We need to check whether l2cap_conn is registered. If it is not, we
1608 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1609 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1610 	 * relies on the parent hci_conn object to be locked. This itself relies
1611 	 * on the hci_dev object to be locked. So we must lock the hci device
1612 	 * here, too. */
1613 
1614 	hci_dev_lock(hdev);
1615 
1616 	if (!list_empty(&user->list)) {
1617 		ret = -EINVAL;
1618 		goto out_unlock;
1619 	}
1620 
1621 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1622 	if (!conn->hchan) {
1623 		ret = -ENODEV;
1624 		goto out_unlock;
1625 	}
1626 
1627 	ret = user->probe(conn, user);
1628 	if (ret)
1629 		goto out_unlock;
1630 
1631 	list_add(&user->list, &conn->users);
1632 	ret = 0;
1633 
1634 out_unlock:
1635 	hci_dev_unlock(hdev);
1636 	return ret;
1637 }
1638 EXPORT_SYMBOL(l2cap_register_user);
1639 
1640 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1641 {
1642 	struct hci_dev *hdev = conn->hcon->hdev;
1643 
1644 	hci_dev_lock(hdev);
1645 
1646 	if (list_empty(&user->list))
1647 		goto out_unlock;
1648 
1649 	list_del_init(&user->list);
1650 	user->remove(conn, user);
1651 
1652 out_unlock:
1653 	hci_dev_unlock(hdev);
1654 }
1655 EXPORT_SYMBOL(l2cap_unregister_user);
1656 
1657 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1658 {
1659 	struct l2cap_user *user;
1660 
1661 	while (!list_empty(&conn->users)) {
1662 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1663 		list_del_init(&user->list);
1664 		user->remove(conn, user);
1665 	}
1666 }
1667 
1668 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1669 {
1670 	struct l2cap_conn *conn = hcon->l2cap_data;
1671 	struct l2cap_chan *chan, *l;
1672 
1673 	if (!conn)
1674 		return;
1675 
1676 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1677 
1678 	kfree_skb(conn->rx_skb);
1679 
1680 	skb_queue_purge(&conn->pending_rx);
1681 
1682 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1683 	 * might block if we are running on a worker from the same workqueue
1684 	 * pending_rx_work is waiting on.
1685 	 */
1686 	if (work_pending(&conn->pending_rx_work))
1687 		cancel_work_sync(&conn->pending_rx_work);
1688 
1689 	if (work_pending(&conn->id_addr_update_work))
1690 		cancel_work_sync(&conn->id_addr_update_work);
1691 
1692 	l2cap_unregister_all_users(conn);
1693 
1694 	/* Force the connection to be immediately dropped */
1695 	hcon->disc_timeout = 0;
1696 
1697 	mutex_lock(&conn->chan_lock);
1698 
1699 	/* Kill channels */
1700 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1701 		l2cap_chan_hold(chan);
1702 		l2cap_chan_lock(chan);
1703 
1704 		l2cap_chan_del(chan, err);
1705 
1706 		l2cap_chan_unlock(chan);
1707 
1708 		chan->ops->close(chan);
1709 		l2cap_chan_put(chan);
1710 	}
1711 
1712 	mutex_unlock(&conn->chan_lock);
1713 
1714 	hci_chan_del(conn->hchan);
1715 
1716 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1717 		cancel_delayed_work_sync(&conn->info_timer);
1718 
1719 	hcon->l2cap_data = NULL;
1720 	conn->hchan = NULL;
1721 	l2cap_conn_put(conn);
1722 }
1723 
1724 static void l2cap_conn_free(struct kref *ref)
1725 {
1726 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1727 
1728 	hci_conn_put(conn->hcon);
1729 	kfree(conn);
1730 }
1731 
1732 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1733 {
1734 	kref_get(&conn->ref);
1735 	return conn;
1736 }
1737 EXPORT_SYMBOL(l2cap_conn_get);
1738 
1739 void l2cap_conn_put(struct l2cap_conn *conn)
1740 {
1741 	kref_put(&conn->ref, l2cap_conn_free);
1742 }
1743 EXPORT_SYMBOL(l2cap_conn_put);
1744 
1745 /* ---- Socket interface ---- */
1746 
1747 /* Find socket with psm and source / destination bdaddr.
1748  * Returns closest match.
1749  */
1750 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1751 						   bdaddr_t *src,
1752 						   bdaddr_t *dst,
1753 						   u8 link_type)
1754 {
1755 	struct l2cap_chan *c, *c1 = NULL;
1756 
1757 	read_lock(&chan_list_lock);
1758 
1759 	list_for_each_entry(c, &chan_list, global_l) {
1760 		if (state && c->state != state)
1761 			continue;
1762 
1763 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1764 			continue;
1765 
1766 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1767 			continue;
1768 
1769 		if (c->psm == psm) {
1770 			int src_match, dst_match;
1771 			int src_any, dst_any;
1772 
1773 			/* Exact match. */
1774 			src_match = !bacmp(&c->src, src);
1775 			dst_match = !bacmp(&c->dst, dst);
1776 			if (src_match && dst_match) {
1777 				l2cap_chan_hold(c);
1778 				read_unlock(&chan_list_lock);
1779 				return c;
1780 			}
1781 
1782 			/* Closest match */
1783 			src_any = !bacmp(&c->src, BDADDR_ANY);
1784 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1785 			if ((src_match && dst_any) || (src_any && dst_match) ||
1786 			    (src_any && dst_any))
1787 				c1 = c;
1788 		}
1789 	}
1790 
1791 	if (c1)
1792 		l2cap_chan_hold(c1);
1793 
1794 	read_unlock(&chan_list_lock);
1795 
1796 	return c1;
1797 }
1798 
1799 static void l2cap_monitor_timeout(struct work_struct *work)
1800 {
1801 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1802 					       monitor_timer.work);
1803 
1804 	BT_DBG("chan %p", chan);
1805 
1806 	l2cap_chan_lock(chan);
1807 
1808 	if (!chan->conn) {
1809 		l2cap_chan_unlock(chan);
1810 		l2cap_chan_put(chan);
1811 		return;
1812 	}
1813 
1814 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1815 
1816 	l2cap_chan_unlock(chan);
1817 	l2cap_chan_put(chan);
1818 }
1819 
1820 static void l2cap_retrans_timeout(struct work_struct *work)
1821 {
1822 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1823 					       retrans_timer.work);
1824 
1825 	BT_DBG("chan %p", chan);
1826 
1827 	l2cap_chan_lock(chan);
1828 
1829 	if (!chan->conn) {
1830 		l2cap_chan_unlock(chan);
1831 		l2cap_chan_put(chan);
1832 		return;
1833 	}
1834 
1835 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1836 	l2cap_chan_unlock(chan);
1837 	l2cap_chan_put(chan);
1838 }
1839 
1840 static void l2cap_streaming_send(struct l2cap_chan *chan,
1841 				 struct sk_buff_head *skbs)
1842 {
1843 	struct sk_buff *skb;
1844 	struct l2cap_ctrl *control;
1845 
1846 	BT_DBG("chan %p, skbs %p", chan, skbs);
1847 
1848 	if (__chan_is_moving(chan))
1849 		return;
1850 
1851 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1852 
1853 	while (!skb_queue_empty(&chan->tx_q)) {
1854 
1855 		skb = skb_dequeue(&chan->tx_q);
1856 
1857 		bt_cb(skb)->l2cap.retries = 1;
1858 		control = &bt_cb(skb)->l2cap;
1859 
1860 		control->reqseq = 0;
1861 		control->txseq = chan->next_tx_seq;
1862 
1863 		__pack_control(chan, control, skb);
1864 
1865 		if (chan->fcs == L2CAP_FCS_CRC16) {
1866 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1867 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1868 		}
1869 
1870 		l2cap_do_send(chan, skb);
1871 
1872 		BT_DBG("Sent txseq %u", control->txseq);
1873 
1874 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1875 		chan->frames_sent++;
1876 	}
1877 }
1878 
1879 static int l2cap_ertm_send(struct l2cap_chan *chan)
1880 {
1881 	struct sk_buff *skb, *tx_skb;
1882 	struct l2cap_ctrl *control;
1883 	int sent = 0;
1884 
1885 	BT_DBG("chan %p", chan);
1886 
1887 	if (chan->state != BT_CONNECTED)
1888 		return -ENOTCONN;
1889 
1890 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1891 		return 0;
1892 
1893 	if (__chan_is_moving(chan))
1894 		return 0;
1895 
1896 	while (chan->tx_send_head &&
1897 	       chan->unacked_frames < chan->remote_tx_win &&
1898 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1899 
1900 		skb = chan->tx_send_head;
1901 
1902 		bt_cb(skb)->l2cap.retries = 1;
1903 		control = &bt_cb(skb)->l2cap;
1904 
1905 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1906 			control->final = 1;
1907 
1908 		control->reqseq = chan->buffer_seq;
1909 		chan->last_acked_seq = chan->buffer_seq;
1910 		control->txseq = chan->next_tx_seq;
1911 
1912 		__pack_control(chan, control, skb);
1913 
1914 		if (chan->fcs == L2CAP_FCS_CRC16) {
1915 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1916 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1917 		}
1918 
1919 		/* Clone after data has been modified. Data is assumed to be
1920 		   read-only (for locking purposes) on cloned sk_buffs.
1921 		 */
1922 		tx_skb = skb_clone(skb, GFP_KERNEL);
1923 
1924 		if (!tx_skb)
1925 			break;
1926 
1927 		__set_retrans_timer(chan);
1928 
1929 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1930 		chan->unacked_frames++;
1931 		chan->frames_sent++;
1932 		sent++;
1933 
1934 		if (skb_queue_is_last(&chan->tx_q, skb))
1935 			chan->tx_send_head = NULL;
1936 		else
1937 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1938 
1939 		l2cap_do_send(chan, tx_skb);
1940 		BT_DBG("Sent txseq %u", control->txseq);
1941 	}
1942 
1943 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1944 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
1945 
1946 	return sent;
1947 }
1948 
1949 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1950 {
1951 	struct l2cap_ctrl control;
1952 	struct sk_buff *skb;
1953 	struct sk_buff *tx_skb;
1954 	u16 seq;
1955 
1956 	BT_DBG("chan %p", chan);
1957 
1958 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1959 		return;
1960 
1961 	if (__chan_is_moving(chan))
1962 		return;
1963 
1964 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1965 		seq = l2cap_seq_list_pop(&chan->retrans_list);
1966 
1967 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1968 		if (!skb) {
1969 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
1970 			       seq);
1971 			continue;
1972 		}
1973 
1974 		bt_cb(skb)->l2cap.retries++;
1975 		control = bt_cb(skb)->l2cap;
1976 
1977 		if (chan->max_tx != 0 &&
1978 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
1979 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1980 			l2cap_send_disconn_req(chan, ECONNRESET);
1981 			l2cap_seq_list_clear(&chan->retrans_list);
1982 			break;
1983 		}
1984 
1985 		control.reqseq = chan->buffer_seq;
1986 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1987 			control.final = 1;
1988 		else
1989 			control.final = 0;
1990 
1991 		if (skb_cloned(skb)) {
1992 			/* Cloned sk_buffs are read-only, so we need a
1993 			 * writeable copy
1994 			 */
1995 			tx_skb = skb_copy(skb, GFP_KERNEL);
1996 		} else {
1997 			tx_skb = skb_clone(skb, GFP_KERNEL);
1998 		}
1999 
2000 		if (!tx_skb) {
2001 			l2cap_seq_list_clear(&chan->retrans_list);
2002 			break;
2003 		}
2004 
2005 		/* Update skb contents */
2006 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2007 			put_unaligned_le32(__pack_extended_control(&control),
2008 					   tx_skb->data + L2CAP_HDR_SIZE);
2009 		} else {
2010 			put_unaligned_le16(__pack_enhanced_control(&control),
2011 					   tx_skb->data + L2CAP_HDR_SIZE);
2012 		}
2013 
2014 		/* Update FCS */
2015 		if (chan->fcs == L2CAP_FCS_CRC16) {
2016 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2017 					tx_skb->len - L2CAP_FCS_SIZE);
2018 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2019 						L2CAP_FCS_SIZE);
2020 		}
2021 
2022 		l2cap_do_send(chan, tx_skb);
2023 
2024 		BT_DBG("Resent txseq %d", control.txseq);
2025 
2026 		chan->last_acked_seq = chan->buffer_seq;
2027 	}
2028 }
2029 
2030 static void l2cap_retransmit(struct l2cap_chan *chan,
2031 			     struct l2cap_ctrl *control)
2032 {
2033 	BT_DBG("chan %p, control %p", chan, control);
2034 
2035 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2036 	l2cap_ertm_resend(chan);
2037 }
2038 
2039 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2040 				 struct l2cap_ctrl *control)
2041 {
2042 	struct sk_buff *skb;
2043 
2044 	BT_DBG("chan %p, control %p", chan, control);
2045 
2046 	if (control->poll)
2047 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2048 
2049 	l2cap_seq_list_clear(&chan->retrans_list);
2050 
2051 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2052 		return;
2053 
2054 	if (chan->unacked_frames) {
2055 		skb_queue_walk(&chan->tx_q, skb) {
2056 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2057 			    skb == chan->tx_send_head)
2058 				break;
2059 		}
2060 
2061 		skb_queue_walk_from(&chan->tx_q, skb) {
2062 			if (skb == chan->tx_send_head)
2063 				break;
2064 
2065 			l2cap_seq_list_append(&chan->retrans_list,
2066 					      bt_cb(skb)->l2cap.txseq);
2067 		}
2068 
2069 		l2cap_ertm_resend(chan);
2070 	}
2071 }
2072 
2073 static void l2cap_send_ack(struct l2cap_chan *chan)
2074 {
2075 	struct l2cap_ctrl control;
2076 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2077 					 chan->last_acked_seq);
2078 	int threshold;
2079 
2080 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2081 	       chan, chan->last_acked_seq, chan->buffer_seq);
2082 
2083 	memset(&control, 0, sizeof(control));
2084 	control.sframe = 1;
2085 
2086 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2087 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2088 		__clear_ack_timer(chan);
2089 		control.super = L2CAP_SUPER_RNR;
2090 		control.reqseq = chan->buffer_seq;
2091 		l2cap_send_sframe(chan, &control);
2092 	} else {
2093 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2094 			l2cap_ertm_send(chan);
2095 			/* If any i-frames were sent, they included an ack */
2096 			if (chan->buffer_seq == chan->last_acked_seq)
2097 				frames_to_ack = 0;
2098 		}
2099 
2100 		/* Ack now if the window is 3/4ths full.
2101 		 * Calculate without mul or div
2102 		 */
2103 		threshold = chan->ack_win;
2104 		threshold += threshold << 1;
2105 		threshold >>= 2;
2106 
2107 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2108 		       threshold);
2109 
2110 		if (frames_to_ack >= threshold) {
2111 			__clear_ack_timer(chan);
2112 			control.super = L2CAP_SUPER_RR;
2113 			control.reqseq = chan->buffer_seq;
2114 			l2cap_send_sframe(chan, &control);
2115 			frames_to_ack = 0;
2116 		}
2117 
2118 		if (frames_to_ack)
2119 			__set_ack_timer(chan);
2120 	}
2121 }
2122 
2123 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2124 					 struct msghdr *msg, int len,
2125 					 int count, struct sk_buff *skb)
2126 {
2127 	struct l2cap_conn *conn = chan->conn;
2128 	struct sk_buff **frag;
2129 	int sent = 0;
2130 
2131 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2132 		return -EFAULT;
2133 
2134 	sent += count;
2135 	len  -= count;
2136 
2137 	/* Continuation fragments (no L2CAP header) */
2138 	frag = &skb_shinfo(skb)->frag_list;
2139 	while (len) {
2140 		struct sk_buff *tmp;
2141 
2142 		count = min_t(unsigned int, conn->mtu, len);
2143 
2144 		tmp = chan->ops->alloc_skb(chan, 0, count,
2145 					   msg->msg_flags & MSG_DONTWAIT);
2146 		if (IS_ERR(tmp))
2147 			return PTR_ERR(tmp);
2148 
2149 		*frag = tmp;
2150 
2151 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2152 				   &msg->msg_iter))
2153 			return -EFAULT;
2154 
2155 		sent += count;
2156 		len  -= count;
2157 
2158 		skb->len += (*frag)->len;
2159 		skb->data_len += (*frag)->len;
2160 
2161 		frag = &(*frag)->next;
2162 	}
2163 
2164 	return sent;
2165 }
2166 
2167 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2168 						 struct msghdr *msg, size_t len)
2169 {
2170 	struct l2cap_conn *conn = chan->conn;
2171 	struct sk_buff *skb;
2172 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2173 	struct l2cap_hdr *lh;
2174 
2175 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2176 	       __le16_to_cpu(chan->psm), len);
2177 
2178 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2179 
2180 	skb = chan->ops->alloc_skb(chan, hlen, count,
2181 				   msg->msg_flags & MSG_DONTWAIT);
2182 	if (IS_ERR(skb))
2183 		return skb;
2184 
2185 	/* Create L2CAP header */
2186 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2187 	lh->cid = cpu_to_le16(chan->dcid);
2188 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2189 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2190 
2191 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2192 	if (unlikely(err < 0)) {
2193 		kfree_skb(skb);
2194 		return ERR_PTR(err);
2195 	}
2196 	return skb;
2197 }
2198 
2199 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2200 					      struct msghdr *msg, size_t len)
2201 {
2202 	struct l2cap_conn *conn = chan->conn;
2203 	struct sk_buff *skb;
2204 	int err, count;
2205 	struct l2cap_hdr *lh;
2206 
2207 	BT_DBG("chan %p len %zu", chan, len);
2208 
2209 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2210 
2211 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2212 				   msg->msg_flags & MSG_DONTWAIT);
2213 	if (IS_ERR(skb))
2214 		return skb;
2215 
2216 	/* Create L2CAP header */
2217 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2218 	lh->cid = cpu_to_le16(chan->dcid);
2219 	lh->len = cpu_to_le16(len);
2220 
2221 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2222 	if (unlikely(err < 0)) {
2223 		kfree_skb(skb);
2224 		return ERR_PTR(err);
2225 	}
2226 	return skb;
2227 }
2228 
2229 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2230 					       struct msghdr *msg, size_t len,
2231 					       u16 sdulen)
2232 {
2233 	struct l2cap_conn *conn = chan->conn;
2234 	struct sk_buff *skb;
2235 	int err, count, hlen;
2236 	struct l2cap_hdr *lh;
2237 
2238 	BT_DBG("chan %p len %zu", chan, len);
2239 
2240 	if (!conn)
2241 		return ERR_PTR(-ENOTCONN);
2242 
2243 	hlen = __ertm_hdr_size(chan);
2244 
2245 	if (sdulen)
2246 		hlen += L2CAP_SDULEN_SIZE;
2247 
2248 	if (chan->fcs == L2CAP_FCS_CRC16)
2249 		hlen += L2CAP_FCS_SIZE;
2250 
2251 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2252 
2253 	skb = chan->ops->alloc_skb(chan, hlen, count,
2254 				   msg->msg_flags & MSG_DONTWAIT);
2255 	if (IS_ERR(skb))
2256 		return skb;
2257 
2258 	/* Create L2CAP header */
2259 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2260 	lh->cid = cpu_to_le16(chan->dcid);
2261 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2262 
2263 	/* Control header is populated later */
2264 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2265 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2266 	else
2267 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2268 
2269 	if (sdulen)
2270 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2271 
2272 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2273 	if (unlikely(err < 0)) {
2274 		kfree_skb(skb);
2275 		return ERR_PTR(err);
2276 	}
2277 
2278 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2279 	bt_cb(skb)->l2cap.retries = 0;
2280 	return skb;
2281 }
2282 
2283 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2284 			     struct sk_buff_head *seg_queue,
2285 			     struct msghdr *msg, size_t len)
2286 {
2287 	struct sk_buff *skb;
2288 	u16 sdu_len;
2289 	size_t pdu_len;
2290 	u8 sar;
2291 
2292 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2293 
2294 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2295 	 * so fragmented skbs are not used.  The HCI layer's handling
2296 	 * of fragmented skbs is not compatible with ERTM's queueing.
2297 	 */
2298 
2299 	/* PDU size is derived from the HCI MTU */
2300 	pdu_len = chan->conn->mtu;
2301 
2302 	/* Constrain PDU size for BR/EDR connections */
2303 	if (!chan->hs_hcon)
2304 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2305 
2306 	/* Adjust for largest possible L2CAP overhead. */
2307 	if (chan->fcs)
2308 		pdu_len -= L2CAP_FCS_SIZE;
2309 
2310 	pdu_len -= __ertm_hdr_size(chan);
2311 
2312 	/* Remote device may have requested smaller PDUs */
2313 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2314 
2315 	if (len <= pdu_len) {
2316 		sar = L2CAP_SAR_UNSEGMENTED;
2317 		sdu_len = 0;
2318 		pdu_len = len;
2319 	} else {
2320 		sar = L2CAP_SAR_START;
2321 		sdu_len = len;
2322 	}
2323 
2324 	while (len > 0) {
2325 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2326 
2327 		if (IS_ERR(skb)) {
2328 			__skb_queue_purge(seg_queue);
2329 			return PTR_ERR(skb);
2330 		}
2331 
2332 		bt_cb(skb)->l2cap.sar = sar;
2333 		__skb_queue_tail(seg_queue, skb);
2334 
2335 		len -= pdu_len;
2336 		if (sdu_len)
2337 			sdu_len = 0;
2338 
2339 		if (len <= pdu_len) {
2340 			sar = L2CAP_SAR_END;
2341 			pdu_len = len;
2342 		} else {
2343 			sar = L2CAP_SAR_CONTINUE;
2344 		}
2345 	}
2346 
2347 	return 0;
2348 }
2349 
2350 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2351 						   struct msghdr *msg,
2352 						   size_t len, u16 sdulen)
2353 {
2354 	struct l2cap_conn *conn = chan->conn;
2355 	struct sk_buff *skb;
2356 	int err, count, hlen;
2357 	struct l2cap_hdr *lh;
2358 
2359 	BT_DBG("chan %p len %zu", chan, len);
2360 
2361 	if (!conn)
2362 		return ERR_PTR(-ENOTCONN);
2363 
2364 	hlen = L2CAP_HDR_SIZE;
2365 
2366 	if (sdulen)
2367 		hlen += L2CAP_SDULEN_SIZE;
2368 
2369 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2370 
2371 	skb = chan->ops->alloc_skb(chan, hlen, count,
2372 				   msg->msg_flags & MSG_DONTWAIT);
2373 	if (IS_ERR(skb))
2374 		return skb;
2375 
2376 	/* Create L2CAP header */
2377 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2378 	lh->cid = cpu_to_le16(chan->dcid);
2379 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2380 
2381 	if (sdulen)
2382 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2383 
2384 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2385 	if (unlikely(err < 0)) {
2386 		kfree_skb(skb);
2387 		return ERR_PTR(err);
2388 	}
2389 
2390 	return skb;
2391 }
2392 
2393 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2394 				struct sk_buff_head *seg_queue,
2395 				struct msghdr *msg, size_t len)
2396 {
2397 	struct sk_buff *skb;
2398 	size_t pdu_len;
2399 	u16 sdu_len;
2400 
2401 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2402 
2403 	sdu_len = len;
2404 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2405 
2406 	while (len > 0) {
2407 		if (len <= pdu_len)
2408 			pdu_len = len;
2409 
2410 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2411 		if (IS_ERR(skb)) {
2412 			__skb_queue_purge(seg_queue);
2413 			return PTR_ERR(skb);
2414 		}
2415 
2416 		__skb_queue_tail(seg_queue, skb);
2417 
2418 		len -= pdu_len;
2419 
2420 		if (sdu_len) {
2421 			sdu_len = 0;
2422 			pdu_len += L2CAP_SDULEN_SIZE;
2423 		}
2424 	}
2425 
2426 	return 0;
2427 }
2428 
2429 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2430 {
2431 	int sent = 0;
2432 
2433 	BT_DBG("chan %p", chan);
2434 
2435 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2436 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2437 		chan->tx_credits--;
2438 		sent++;
2439 	}
2440 
2441 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2442 	       skb_queue_len(&chan->tx_q));
2443 }
2444 
2445 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2446 {
2447 	struct sk_buff *skb;
2448 	int err;
2449 	struct sk_buff_head seg_queue;
2450 
2451 	if (!chan->conn)
2452 		return -ENOTCONN;
2453 
2454 	/* Connectionless channel */
2455 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2456 		skb = l2cap_create_connless_pdu(chan, msg, len);
2457 		if (IS_ERR(skb))
2458 			return PTR_ERR(skb);
2459 
2460 		/* Channel lock is released before requesting new skb and then
2461 		 * reacquired thus we need to recheck channel state.
2462 		 */
2463 		if (chan->state != BT_CONNECTED) {
2464 			kfree_skb(skb);
2465 			return -ENOTCONN;
2466 		}
2467 
2468 		l2cap_do_send(chan, skb);
2469 		return len;
2470 	}
2471 
2472 	switch (chan->mode) {
2473 	case L2CAP_MODE_LE_FLOWCTL:
2474 		/* Check outgoing MTU */
2475 		if (len > chan->omtu)
2476 			return -EMSGSIZE;
2477 
2478 		__skb_queue_head_init(&seg_queue);
2479 
2480 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2481 
2482 		if (chan->state != BT_CONNECTED) {
2483 			__skb_queue_purge(&seg_queue);
2484 			err = -ENOTCONN;
2485 		}
2486 
2487 		if (err)
2488 			return err;
2489 
2490 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2491 
2492 		l2cap_le_flowctl_send(chan);
2493 
2494 		if (!chan->tx_credits)
2495 			chan->ops->suspend(chan);
2496 
2497 		err = len;
2498 
2499 		break;
2500 
2501 	case L2CAP_MODE_BASIC:
2502 		/* Check outgoing MTU */
2503 		if (len > chan->omtu)
2504 			return -EMSGSIZE;
2505 
2506 		/* Create a basic PDU */
2507 		skb = l2cap_create_basic_pdu(chan, msg, len);
2508 		if (IS_ERR(skb))
2509 			return PTR_ERR(skb);
2510 
2511 		/* Channel lock is released before requesting new skb and then
2512 		 * reacquired thus we need to recheck channel state.
2513 		 */
2514 		if (chan->state != BT_CONNECTED) {
2515 			kfree_skb(skb);
2516 			return -ENOTCONN;
2517 		}
2518 
2519 		l2cap_do_send(chan, skb);
2520 		err = len;
2521 		break;
2522 
2523 	case L2CAP_MODE_ERTM:
2524 	case L2CAP_MODE_STREAMING:
2525 		/* Check outgoing MTU */
2526 		if (len > chan->omtu) {
2527 			err = -EMSGSIZE;
2528 			break;
2529 		}
2530 
2531 		__skb_queue_head_init(&seg_queue);
2532 
2533 		/* Do segmentation before calling in to the state machine,
2534 		 * since it's possible to block while waiting for memory
2535 		 * allocation.
2536 		 */
2537 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2538 
2539 		/* The channel could have been closed while segmenting,
2540 		 * check that it is still connected.
2541 		 */
2542 		if (chan->state != BT_CONNECTED) {
2543 			__skb_queue_purge(&seg_queue);
2544 			err = -ENOTCONN;
2545 		}
2546 
2547 		if (err)
2548 			break;
2549 
2550 		if (chan->mode == L2CAP_MODE_ERTM)
2551 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2552 		else
2553 			l2cap_streaming_send(chan, &seg_queue);
2554 
2555 		err = len;
2556 
2557 		/* If the skbs were not queued for sending, they'll still be in
2558 		 * seg_queue and need to be purged.
2559 		 */
2560 		__skb_queue_purge(&seg_queue);
2561 		break;
2562 
2563 	default:
2564 		BT_DBG("bad state %1.1x", chan->mode);
2565 		err = -EBADFD;
2566 	}
2567 
2568 	return err;
2569 }
2570 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2571 
2572 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2573 {
2574 	struct l2cap_ctrl control;
2575 	u16 seq;
2576 
2577 	BT_DBG("chan %p, txseq %u", chan, txseq);
2578 
2579 	memset(&control, 0, sizeof(control));
2580 	control.sframe = 1;
2581 	control.super = L2CAP_SUPER_SREJ;
2582 
2583 	for (seq = chan->expected_tx_seq; seq != txseq;
2584 	     seq = __next_seq(chan, seq)) {
2585 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2586 			control.reqseq = seq;
2587 			l2cap_send_sframe(chan, &control);
2588 			l2cap_seq_list_append(&chan->srej_list, seq);
2589 		}
2590 	}
2591 
2592 	chan->expected_tx_seq = __next_seq(chan, txseq);
2593 }
2594 
2595 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2596 {
2597 	struct l2cap_ctrl control;
2598 
2599 	BT_DBG("chan %p", chan);
2600 
2601 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2602 		return;
2603 
2604 	memset(&control, 0, sizeof(control));
2605 	control.sframe = 1;
2606 	control.super = L2CAP_SUPER_SREJ;
2607 	control.reqseq = chan->srej_list.tail;
2608 	l2cap_send_sframe(chan, &control);
2609 }
2610 
2611 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2612 {
2613 	struct l2cap_ctrl control;
2614 	u16 initial_head;
2615 	u16 seq;
2616 
2617 	BT_DBG("chan %p, txseq %u", chan, txseq);
2618 
2619 	memset(&control, 0, sizeof(control));
2620 	control.sframe = 1;
2621 	control.super = L2CAP_SUPER_SREJ;
2622 
2623 	/* Capture initial list head to allow only one pass through the list. */
2624 	initial_head = chan->srej_list.head;
2625 
2626 	do {
2627 		seq = l2cap_seq_list_pop(&chan->srej_list);
2628 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2629 			break;
2630 
2631 		control.reqseq = seq;
2632 		l2cap_send_sframe(chan, &control);
2633 		l2cap_seq_list_append(&chan->srej_list, seq);
2634 	} while (chan->srej_list.head != initial_head);
2635 }
2636 
2637 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2638 {
2639 	struct sk_buff *acked_skb;
2640 	u16 ackseq;
2641 
2642 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2643 
2644 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2645 		return;
2646 
2647 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2648 	       chan->expected_ack_seq, chan->unacked_frames);
2649 
2650 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2651 	     ackseq = __next_seq(chan, ackseq)) {
2652 
2653 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2654 		if (acked_skb) {
2655 			skb_unlink(acked_skb, &chan->tx_q);
2656 			kfree_skb(acked_skb);
2657 			chan->unacked_frames--;
2658 		}
2659 	}
2660 
2661 	chan->expected_ack_seq = reqseq;
2662 
2663 	if (chan->unacked_frames == 0)
2664 		__clear_retrans_timer(chan);
2665 
2666 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2667 }
2668 
2669 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2670 {
2671 	BT_DBG("chan %p", chan);
2672 
2673 	chan->expected_tx_seq = chan->buffer_seq;
2674 	l2cap_seq_list_clear(&chan->srej_list);
2675 	skb_queue_purge(&chan->srej_q);
2676 	chan->rx_state = L2CAP_RX_STATE_RECV;
2677 }
2678 
2679 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2680 				struct l2cap_ctrl *control,
2681 				struct sk_buff_head *skbs, u8 event)
2682 {
2683 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2684 	       event);
2685 
2686 	switch (event) {
2687 	case L2CAP_EV_DATA_REQUEST:
2688 		if (chan->tx_send_head == NULL)
2689 			chan->tx_send_head = skb_peek(skbs);
2690 
2691 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2692 		l2cap_ertm_send(chan);
2693 		break;
2694 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2695 		BT_DBG("Enter LOCAL_BUSY");
2696 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2697 
2698 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2699 			/* The SREJ_SENT state must be aborted if we are to
2700 			 * enter the LOCAL_BUSY state.
2701 			 */
2702 			l2cap_abort_rx_srej_sent(chan);
2703 		}
2704 
2705 		l2cap_send_ack(chan);
2706 
2707 		break;
2708 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2709 		BT_DBG("Exit LOCAL_BUSY");
2710 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2711 
2712 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2713 			struct l2cap_ctrl local_control;
2714 
2715 			memset(&local_control, 0, sizeof(local_control));
2716 			local_control.sframe = 1;
2717 			local_control.super = L2CAP_SUPER_RR;
2718 			local_control.poll = 1;
2719 			local_control.reqseq = chan->buffer_seq;
2720 			l2cap_send_sframe(chan, &local_control);
2721 
2722 			chan->retry_count = 1;
2723 			__set_monitor_timer(chan);
2724 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2725 		}
2726 		break;
2727 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2728 		l2cap_process_reqseq(chan, control->reqseq);
2729 		break;
2730 	case L2CAP_EV_EXPLICIT_POLL:
2731 		l2cap_send_rr_or_rnr(chan, 1);
2732 		chan->retry_count = 1;
2733 		__set_monitor_timer(chan);
2734 		__clear_ack_timer(chan);
2735 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2736 		break;
2737 	case L2CAP_EV_RETRANS_TO:
2738 		l2cap_send_rr_or_rnr(chan, 1);
2739 		chan->retry_count = 1;
2740 		__set_monitor_timer(chan);
2741 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2742 		break;
2743 	case L2CAP_EV_RECV_FBIT:
2744 		/* Nothing to process */
2745 		break;
2746 	default:
2747 		break;
2748 	}
2749 }
2750 
2751 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2752 				  struct l2cap_ctrl *control,
2753 				  struct sk_buff_head *skbs, u8 event)
2754 {
2755 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2756 	       event);
2757 
2758 	switch (event) {
2759 	case L2CAP_EV_DATA_REQUEST:
2760 		if (chan->tx_send_head == NULL)
2761 			chan->tx_send_head = skb_peek(skbs);
2762 		/* Queue data, but don't send. */
2763 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2764 		break;
2765 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2766 		BT_DBG("Enter LOCAL_BUSY");
2767 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2768 
2769 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2770 			/* The SREJ_SENT state must be aborted if we are to
2771 			 * enter the LOCAL_BUSY state.
2772 			 */
2773 			l2cap_abort_rx_srej_sent(chan);
2774 		}
2775 
2776 		l2cap_send_ack(chan);
2777 
2778 		break;
2779 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2780 		BT_DBG("Exit LOCAL_BUSY");
2781 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2782 
2783 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2784 			struct l2cap_ctrl local_control;
2785 			memset(&local_control, 0, sizeof(local_control));
2786 			local_control.sframe = 1;
2787 			local_control.super = L2CAP_SUPER_RR;
2788 			local_control.poll = 1;
2789 			local_control.reqseq = chan->buffer_seq;
2790 			l2cap_send_sframe(chan, &local_control);
2791 
2792 			chan->retry_count = 1;
2793 			__set_monitor_timer(chan);
2794 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2795 		}
2796 		break;
2797 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2798 		l2cap_process_reqseq(chan, control->reqseq);
2799 
2800 		/* Fall through */
2801 
2802 	case L2CAP_EV_RECV_FBIT:
2803 		if (control && control->final) {
2804 			__clear_monitor_timer(chan);
2805 			if (chan->unacked_frames > 0)
2806 				__set_retrans_timer(chan);
2807 			chan->retry_count = 0;
2808 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2809 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2810 		}
2811 		break;
2812 	case L2CAP_EV_EXPLICIT_POLL:
2813 		/* Ignore */
2814 		break;
2815 	case L2CAP_EV_MONITOR_TO:
2816 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2817 			l2cap_send_rr_or_rnr(chan, 1);
2818 			__set_monitor_timer(chan);
2819 			chan->retry_count++;
2820 		} else {
2821 			l2cap_send_disconn_req(chan, ECONNABORTED);
2822 		}
2823 		break;
2824 	default:
2825 		break;
2826 	}
2827 }
2828 
2829 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2830 		     struct sk_buff_head *skbs, u8 event)
2831 {
2832 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2833 	       chan, control, skbs, event, chan->tx_state);
2834 
2835 	switch (chan->tx_state) {
2836 	case L2CAP_TX_STATE_XMIT:
2837 		l2cap_tx_state_xmit(chan, control, skbs, event);
2838 		break;
2839 	case L2CAP_TX_STATE_WAIT_F:
2840 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2841 		break;
2842 	default:
2843 		/* Ignore event */
2844 		break;
2845 	}
2846 }
2847 
2848 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2849 			     struct l2cap_ctrl *control)
2850 {
2851 	BT_DBG("chan %p, control %p", chan, control);
2852 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2853 }
2854 
2855 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2856 				  struct l2cap_ctrl *control)
2857 {
2858 	BT_DBG("chan %p, control %p", chan, control);
2859 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2860 }
2861 
2862 /* Copy frame to all raw sockets on that connection */
2863 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2864 {
2865 	struct sk_buff *nskb;
2866 	struct l2cap_chan *chan;
2867 
2868 	BT_DBG("conn %p", conn);
2869 
2870 	mutex_lock(&conn->chan_lock);
2871 
2872 	list_for_each_entry(chan, &conn->chan_l, list) {
2873 		if (chan->chan_type != L2CAP_CHAN_RAW)
2874 			continue;
2875 
2876 		/* Don't send frame to the channel it came from */
2877 		if (bt_cb(skb)->l2cap.chan == chan)
2878 			continue;
2879 
2880 		nskb = skb_clone(skb, GFP_KERNEL);
2881 		if (!nskb)
2882 			continue;
2883 		if (chan->ops->recv(chan, nskb))
2884 			kfree_skb(nskb);
2885 	}
2886 
2887 	mutex_unlock(&conn->chan_lock);
2888 }
2889 
2890 /* ---- L2CAP signalling commands ---- */
2891 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2892 				       u8 ident, u16 dlen, void *data)
2893 {
2894 	struct sk_buff *skb, **frag;
2895 	struct l2cap_cmd_hdr *cmd;
2896 	struct l2cap_hdr *lh;
2897 	int len, count;
2898 
2899 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2900 	       conn, code, ident, dlen);
2901 
2902 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2903 		return NULL;
2904 
2905 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2906 	count = min_t(unsigned int, conn->mtu, len);
2907 
2908 	skb = bt_skb_alloc(count, GFP_KERNEL);
2909 	if (!skb)
2910 		return NULL;
2911 
2912 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2913 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2914 
2915 	if (conn->hcon->type == LE_LINK)
2916 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2917 	else
2918 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2919 
2920 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2921 	cmd->code  = code;
2922 	cmd->ident = ident;
2923 	cmd->len   = cpu_to_le16(dlen);
2924 
2925 	if (dlen) {
2926 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2927 		skb_put_data(skb, data, count);
2928 		data += count;
2929 	}
2930 
2931 	len -= skb->len;
2932 
2933 	/* Continuation fragments (no L2CAP header) */
2934 	frag = &skb_shinfo(skb)->frag_list;
2935 	while (len) {
2936 		count = min_t(unsigned int, conn->mtu, len);
2937 
2938 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2939 		if (!*frag)
2940 			goto fail;
2941 
2942 		skb_put_data(*frag, data, count);
2943 
2944 		len  -= count;
2945 		data += count;
2946 
2947 		frag = &(*frag)->next;
2948 	}
2949 
2950 	return skb;
2951 
2952 fail:
2953 	kfree_skb(skb);
2954 	return NULL;
2955 }
2956 
2957 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2958 				     unsigned long *val)
2959 {
2960 	struct l2cap_conf_opt *opt = *ptr;
2961 	int len;
2962 
2963 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2964 	*ptr += len;
2965 
2966 	*type = opt->type;
2967 	*olen = opt->len;
2968 
2969 	switch (opt->len) {
2970 	case 1:
2971 		*val = *((u8 *) opt->val);
2972 		break;
2973 
2974 	case 2:
2975 		*val = get_unaligned_le16(opt->val);
2976 		break;
2977 
2978 	case 4:
2979 		*val = get_unaligned_le32(opt->val);
2980 		break;
2981 
2982 	default:
2983 		*val = (unsigned long) opt->val;
2984 		break;
2985 	}
2986 
2987 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2988 	return len;
2989 }
2990 
2991 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
2992 {
2993 	struct l2cap_conf_opt *opt = *ptr;
2994 
2995 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2996 
2997 	if (size < L2CAP_CONF_OPT_SIZE + len)
2998 		return;
2999 
3000 	opt->type = type;
3001 	opt->len  = len;
3002 
3003 	switch (len) {
3004 	case 1:
3005 		*((u8 *) opt->val)  = val;
3006 		break;
3007 
3008 	case 2:
3009 		put_unaligned_le16(val, opt->val);
3010 		break;
3011 
3012 	case 4:
3013 		put_unaligned_le32(val, opt->val);
3014 		break;
3015 
3016 	default:
3017 		memcpy(opt->val, (void *) val, len);
3018 		break;
3019 	}
3020 
3021 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3022 }
3023 
3024 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3025 {
3026 	struct l2cap_conf_efs efs;
3027 
3028 	switch (chan->mode) {
3029 	case L2CAP_MODE_ERTM:
3030 		efs.id		= chan->local_id;
3031 		efs.stype	= chan->local_stype;
3032 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3033 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3034 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3035 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3036 		break;
3037 
3038 	case L2CAP_MODE_STREAMING:
3039 		efs.id		= 1;
3040 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3041 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3042 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3043 		efs.acc_lat	= 0;
3044 		efs.flush_to	= 0;
3045 		break;
3046 
3047 	default:
3048 		return;
3049 	}
3050 
3051 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3052 			   (unsigned long) &efs, size);
3053 }
3054 
3055 static void l2cap_ack_timeout(struct work_struct *work)
3056 {
3057 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3058 					       ack_timer.work);
3059 	u16 frames_to_ack;
3060 
3061 	BT_DBG("chan %p", chan);
3062 
3063 	l2cap_chan_lock(chan);
3064 
3065 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3066 				     chan->last_acked_seq);
3067 
3068 	if (frames_to_ack)
3069 		l2cap_send_rr_or_rnr(chan, 0);
3070 
3071 	l2cap_chan_unlock(chan);
3072 	l2cap_chan_put(chan);
3073 }
3074 
3075 int l2cap_ertm_init(struct l2cap_chan *chan)
3076 {
3077 	int err;
3078 
3079 	chan->next_tx_seq = 0;
3080 	chan->expected_tx_seq = 0;
3081 	chan->expected_ack_seq = 0;
3082 	chan->unacked_frames = 0;
3083 	chan->buffer_seq = 0;
3084 	chan->frames_sent = 0;
3085 	chan->last_acked_seq = 0;
3086 	chan->sdu = NULL;
3087 	chan->sdu_last_frag = NULL;
3088 	chan->sdu_len = 0;
3089 
3090 	skb_queue_head_init(&chan->tx_q);
3091 
3092 	chan->local_amp_id = AMP_ID_BREDR;
3093 	chan->move_id = AMP_ID_BREDR;
3094 	chan->move_state = L2CAP_MOVE_STABLE;
3095 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3096 
3097 	if (chan->mode != L2CAP_MODE_ERTM)
3098 		return 0;
3099 
3100 	chan->rx_state = L2CAP_RX_STATE_RECV;
3101 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3102 
3103 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3104 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3105 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3106 
3107 	skb_queue_head_init(&chan->srej_q);
3108 
3109 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3110 	if (err < 0)
3111 		return err;
3112 
3113 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3114 	if (err < 0)
3115 		l2cap_seq_list_free(&chan->srej_list);
3116 
3117 	return err;
3118 }
3119 
3120 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3121 {
3122 	switch (mode) {
3123 	case L2CAP_MODE_STREAMING:
3124 	case L2CAP_MODE_ERTM:
3125 		if (l2cap_mode_supported(mode, remote_feat_mask))
3126 			return mode;
3127 		/* fall through */
3128 	default:
3129 		return L2CAP_MODE_BASIC;
3130 	}
3131 }
3132 
3133 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3134 {
3135 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3136 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3137 }
3138 
3139 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3140 {
3141 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3142 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3143 }
3144 
3145 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3146 				      struct l2cap_conf_rfc *rfc)
3147 {
3148 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3149 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3150 
3151 		/* Class 1 devices have must have ERTM timeouts
3152 		 * exceeding the Link Supervision Timeout.  The
3153 		 * default Link Supervision Timeout for AMP
3154 		 * controllers is 10 seconds.
3155 		 *
3156 		 * Class 1 devices use 0xffffffff for their
3157 		 * best-effort flush timeout, so the clamping logic
3158 		 * will result in a timeout that meets the above
3159 		 * requirement.  ERTM timeouts are 16-bit values, so
3160 		 * the maximum timeout is 65.535 seconds.
3161 		 */
3162 
3163 		/* Convert timeout to milliseconds and round */
3164 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3165 
3166 		/* This is the recommended formula for class 2 devices
3167 		 * that start ERTM timers when packets are sent to the
3168 		 * controller.
3169 		 */
3170 		ertm_to = 3 * ertm_to + 500;
3171 
3172 		if (ertm_to > 0xffff)
3173 			ertm_to = 0xffff;
3174 
3175 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3176 		rfc->monitor_timeout = rfc->retrans_timeout;
3177 	} else {
3178 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3179 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3180 	}
3181 }
3182 
3183 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3184 {
3185 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3186 	    __l2cap_ews_supported(chan->conn)) {
3187 		/* use extended control field */
3188 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3189 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3190 	} else {
3191 		chan->tx_win = min_t(u16, chan->tx_win,
3192 				     L2CAP_DEFAULT_TX_WINDOW);
3193 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3194 	}
3195 	chan->ack_win = chan->tx_win;
3196 }
3197 
3198 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3199 {
3200 	struct l2cap_conf_req *req = data;
3201 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3202 	void *ptr = req->data;
3203 	void *endptr = data + data_size;
3204 	u16 size;
3205 
3206 	BT_DBG("chan %p", chan);
3207 
3208 	if (chan->num_conf_req || chan->num_conf_rsp)
3209 		goto done;
3210 
3211 	switch (chan->mode) {
3212 	case L2CAP_MODE_STREAMING:
3213 	case L2CAP_MODE_ERTM:
3214 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3215 			break;
3216 
3217 		if (__l2cap_efs_supported(chan->conn))
3218 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3219 
3220 		/* fall through */
3221 	default:
3222 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3223 		break;
3224 	}
3225 
3226 done:
3227 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3228 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3229 
3230 	switch (chan->mode) {
3231 	case L2CAP_MODE_BASIC:
3232 		if (disable_ertm)
3233 			break;
3234 
3235 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3236 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3237 			break;
3238 
3239 		rfc.mode            = L2CAP_MODE_BASIC;
3240 		rfc.txwin_size      = 0;
3241 		rfc.max_transmit    = 0;
3242 		rfc.retrans_timeout = 0;
3243 		rfc.monitor_timeout = 0;
3244 		rfc.max_pdu_size    = 0;
3245 
3246 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3247 				   (unsigned long) &rfc, endptr - ptr);
3248 		break;
3249 
3250 	case L2CAP_MODE_ERTM:
3251 		rfc.mode            = L2CAP_MODE_ERTM;
3252 		rfc.max_transmit    = chan->max_tx;
3253 
3254 		__l2cap_set_ertm_timeouts(chan, &rfc);
3255 
3256 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3257 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3258 			     L2CAP_FCS_SIZE);
3259 		rfc.max_pdu_size = cpu_to_le16(size);
3260 
3261 		l2cap_txwin_setup(chan);
3262 
3263 		rfc.txwin_size = min_t(u16, chan->tx_win,
3264 				       L2CAP_DEFAULT_TX_WINDOW);
3265 
3266 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3267 				   (unsigned long) &rfc, endptr - ptr);
3268 
3269 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3270 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3271 
3272 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3273 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3274 					   chan->tx_win, endptr - ptr);
3275 
3276 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3277 			if (chan->fcs == L2CAP_FCS_NONE ||
3278 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3279 				chan->fcs = L2CAP_FCS_NONE;
3280 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3281 						   chan->fcs, endptr - ptr);
3282 			}
3283 		break;
3284 
3285 	case L2CAP_MODE_STREAMING:
3286 		l2cap_txwin_setup(chan);
3287 		rfc.mode            = L2CAP_MODE_STREAMING;
3288 		rfc.txwin_size      = 0;
3289 		rfc.max_transmit    = 0;
3290 		rfc.retrans_timeout = 0;
3291 		rfc.monitor_timeout = 0;
3292 
3293 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3294 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3295 			     L2CAP_FCS_SIZE);
3296 		rfc.max_pdu_size = cpu_to_le16(size);
3297 
3298 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3299 				   (unsigned long) &rfc, endptr - ptr);
3300 
3301 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3302 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3303 
3304 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3305 			if (chan->fcs == L2CAP_FCS_NONE ||
3306 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3307 				chan->fcs = L2CAP_FCS_NONE;
3308 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3309 						   chan->fcs, endptr - ptr);
3310 			}
3311 		break;
3312 	}
3313 
3314 	req->dcid  = cpu_to_le16(chan->dcid);
3315 	req->flags = cpu_to_le16(0);
3316 
3317 	return ptr - data;
3318 }
3319 
3320 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3321 {
3322 	struct l2cap_conf_rsp *rsp = data;
3323 	void *ptr = rsp->data;
3324 	void *endptr = data + data_size;
3325 	void *req = chan->conf_req;
3326 	int len = chan->conf_len;
3327 	int type, hint, olen;
3328 	unsigned long val;
3329 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3330 	struct l2cap_conf_efs efs;
3331 	u8 remote_efs = 0;
3332 	u16 mtu = L2CAP_DEFAULT_MTU;
3333 	u16 result = L2CAP_CONF_SUCCESS;
3334 	u16 size;
3335 
3336 	BT_DBG("chan %p", chan);
3337 
3338 	while (len >= L2CAP_CONF_OPT_SIZE) {
3339 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3340 
3341 		hint  = type & L2CAP_CONF_HINT;
3342 		type &= L2CAP_CONF_MASK;
3343 
3344 		switch (type) {
3345 		case L2CAP_CONF_MTU:
3346 			mtu = val;
3347 			break;
3348 
3349 		case L2CAP_CONF_FLUSH_TO:
3350 			chan->flush_to = val;
3351 			break;
3352 
3353 		case L2CAP_CONF_QOS:
3354 			break;
3355 
3356 		case L2CAP_CONF_RFC:
3357 			if (olen == sizeof(rfc))
3358 				memcpy(&rfc, (void *) val, olen);
3359 			break;
3360 
3361 		case L2CAP_CONF_FCS:
3362 			if (val == L2CAP_FCS_NONE)
3363 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3364 			break;
3365 
3366 		case L2CAP_CONF_EFS:
3367 			if (olen == sizeof(efs)) {
3368 				remote_efs = 1;
3369 				memcpy(&efs, (void *) val, olen);
3370 			}
3371 			break;
3372 
3373 		case L2CAP_CONF_EWS:
3374 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3375 				return -ECONNREFUSED;
3376 
3377 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3378 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3379 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3380 			chan->remote_tx_win = val;
3381 			break;
3382 
3383 		default:
3384 			if (hint)
3385 				break;
3386 
3387 			result = L2CAP_CONF_UNKNOWN;
3388 			*((u8 *) ptr++) = type;
3389 			break;
3390 		}
3391 	}
3392 
3393 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3394 		goto done;
3395 
3396 	switch (chan->mode) {
3397 	case L2CAP_MODE_STREAMING:
3398 	case L2CAP_MODE_ERTM:
3399 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3400 			chan->mode = l2cap_select_mode(rfc.mode,
3401 						       chan->conn->feat_mask);
3402 			break;
3403 		}
3404 
3405 		if (remote_efs) {
3406 			if (__l2cap_efs_supported(chan->conn))
3407 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3408 			else
3409 				return -ECONNREFUSED;
3410 		}
3411 
3412 		if (chan->mode != rfc.mode)
3413 			return -ECONNREFUSED;
3414 
3415 		break;
3416 	}
3417 
3418 done:
3419 	if (chan->mode != rfc.mode) {
3420 		result = L2CAP_CONF_UNACCEPT;
3421 		rfc.mode = chan->mode;
3422 
3423 		if (chan->num_conf_rsp == 1)
3424 			return -ECONNREFUSED;
3425 
3426 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3427 				   (unsigned long) &rfc, endptr - ptr);
3428 	}
3429 
3430 	if (result == L2CAP_CONF_SUCCESS) {
3431 		/* Configure output options and let the other side know
3432 		 * which ones we don't like. */
3433 
3434 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3435 			result = L2CAP_CONF_UNACCEPT;
3436 		else {
3437 			chan->omtu = mtu;
3438 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3439 		}
3440 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3441 
3442 		if (remote_efs) {
3443 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3444 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3445 			    efs.stype != chan->local_stype) {
3446 
3447 				result = L2CAP_CONF_UNACCEPT;
3448 
3449 				if (chan->num_conf_req >= 1)
3450 					return -ECONNREFUSED;
3451 
3452 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3453 						   sizeof(efs),
3454 						   (unsigned long) &efs, endptr - ptr);
3455 			} else {
3456 				/* Send PENDING Conf Rsp */
3457 				result = L2CAP_CONF_PENDING;
3458 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3459 			}
3460 		}
3461 
3462 		switch (rfc.mode) {
3463 		case L2CAP_MODE_BASIC:
3464 			chan->fcs = L2CAP_FCS_NONE;
3465 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3466 			break;
3467 
3468 		case L2CAP_MODE_ERTM:
3469 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3470 				chan->remote_tx_win = rfc.txwin_size;
3471 			else
3472 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3473 
3474 			chan->remote_max_tx = rfc.max_transmit;
3475 
3476 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3477 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3478 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3479 			rfc.max_pdu_size = cpu_to_le16(size);
3480 			chan->remote_mps = size;
3481 
3482 			__l2cap_set_ertm_timeouts(chan, &rfc);
3483 
3484 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3485 
3486 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3487 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3488 
3489 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3490 				chan->remote_id = efs.id;
3491 				chan->remote_stype = efs.stype;
3492 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3493 				chan->remote_flush_to =
3494 					le32_to_cpu(efs.flush_to);
3495 				chan->remote_acc_lat =
3496 					le32_to_cpu(efs.acc_lat);
3497 				chan->remote_sdu_itime =
3498 					le32_to_cpu(efs.sdu_itime);
3499 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3500 						   sizeof(efs),
3501 						   (unsigned long) &efs, endptr - ptr);
3502 			}
3503 			break;
3504 
3505 		case L2CAP_MODE_STREAMING:
3506 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3507 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3508 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3509 			rfc.max_pdu_size = cpu_to_le16(size);
3510 			chan->remote_mps = size;
3511 
3512 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3513 
3514 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3515 					   (unsigned long) &rfc, endptr - ptr);
3516 
3517 			break;
3518 
3519 		default:
3520 			result = L2CAP_CONF_UNACCEPT;
3521 
3522 			memset(&rfc, 0, sizeof(rfc));
3523 			rfc.mode = chan->mode;
3524 		}
3525 
3526 		if (result == L2CAP_CONF_SUCCESS)
3527 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3528 	}
3529 	rsp->scid   = cpu_to_le16(chan->dcid);
3530 	rsp->result = cpu_to_le16(result);
3531 	rsp->flags  = cpu_to_le16(0);
3532 
3533 	return ptr - data;
3534 }
3535 
3536 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3537 				void *data, size_t size, u16 *result)
3538 {
3539 	struct l2cap_conf_req *req = data;
3540 	void *ptr = req->data;
3541 	void *endptr = data + size;
3542 	int type, olen;
3543 	unsigned long val;
3544 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3545 	struct l2cap_conf_efs efs;
3546 
3547 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3548 
3549 	while (len >= L2CAP_CONF_OPT_SIZE) {
3550 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3551 
3552 		switch (type) {
3553 		case L2CAP_CONF_MTU:
3554 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3555 				*result = L2CAP_CONF_UNACCEPT;
3556 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3557 			} else
3558 				chan->imtu = val;
3559 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3560 			break;
3561 
3562 		case L2CAP_CONF_FLUSH_TO:
3563 			chan->flush_to = val;
3564 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3565 					   2, chan->flush_to, endptr - ptr);
3566 			break;
3567 
3568 		case L2CAP_CONF_RFC:
3569 			if (olen == sizeof(rfc))
3570 				memcpy(&rfc, (void *)val, olen);
3571 
3572 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3573 			    rfc.mode != chan->mode)
3574 				return -ECONNREFUSED;
3575 
3576 			chan->fcs = 0;
3577 
3578 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3579 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3580 			break;
3581 
3582 		case L2CAP_CONF_EWS:
3583 			chan->ack_win = min_t(u16, val, chan->ack_win);
3584 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3585 					   chan->tx_win, endptr - ptr);
3586 			break;
3587 
3588 		case L2CAP_CONF_EFS:
3589 			if (olen == sizeof(efs)) {
3590 				memcpy(&efs, (void *)val, olen);
3591 
3592 				if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3593 				    efs.stype != L2CAP_SERV_NOTRAFIC &&
3594 				    efs.stype != chan->local_stype)
3595 					return -ECONNREFUSED;
3596 
3597 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3598 						   (unsigned long) &efs, endptr - ptr);
3599 			}
3600 			break;
3601 
3602 		case L2CAP_CONF_FCS:
3603 			if (*result == L2CAP_CONF_PENDING)
3604 				if (val == L2CAP_FCS_NONE)
3605 					set_bit(CONF_RECV_NO_FCS,
3606 						&chan->conf_state);
3607 			break;
3608 		}
3609 	}
3610 
3611 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3612 		return -ECONNREFUSED;
3613 
3614 	chan->mode = rfc.mode;
3615 
3616 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3617 		switch (rfc.mode) {
3618 		case L2CAP_MODE_ERTM:
3619 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3620 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3621 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3622 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3623 				chan->ack_win = min_t(u16, chan->ack_win,
3624 						      rfc.txwin_size);
3625 
3626 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3627 				chan->local_msdu = le16_to_cpu(efs.msdu);
3628 				chan->local_sdu_itime =
3629 					le32_to_cpu(efs.sdu_itime);
3630 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3631 				chan->local_flush_to =
3632 					le32_to_cpu(efs.flush_to);
3633 			}
3634 			break;
3635 
3636 		case L2CAP_MODE_STREAMING:
3637 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3638 		}
3639 	}
3640 
3641 	req->dcid   = cpu_to_le16(chan->dcid);
3642 	req->flags  = cpu_to_le16(0);
3643 
3644 	return ptr - data;
3645 }
3646 
3647 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3648 				u16 result, u16 flags)
3649 {
3650 	struct l2cap_conf_rsp *rsp = data;
3651 	void *ptr = rsp->data;
3652 
3653 	BT_DBG("chan %p", chan);
3654 
3655 	rsp->scid   = cpu_to_le16(chan->dcid);
3656 	rsp->result = cpu_to_le16(result);
3657 	rsp->flags  = cpu_to_le16(flags);
3658 
3659 	return ptr - data;
3660 }
3661 
3662 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3663 {
3664 	struct l2cap_le_conn_rsp rsp;
3665 	struct l2cap_conn *conn = chan->conn;
3666 
3667 	BT_DBG("chan %p", chan);
3668 
3669 	rsp.dcid    = cpu_to_le16(chan->scid);
3670 	rsp.mtu     = cpu_to_le16(chan->imtu);
3671 	rsp.mps     = cpu_to_le16(chan->mps);
3672 	rsp.credits = cpu_to_le16(chan->rx_credits);
3673 	rsp.result  = cpu_to_le16(L2CAP_CR_SUCCESS);
3674 
3675 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3676 		       &rsp);
3677 }
3678 
3679 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3680 {
3681 	struct l2cap_conn_rsp rsp;
3682 	struct l2cap_conn *conn = chan->conn;
3683 	u8 buf[128];
3684 	u8 rsp_code;
3685 
3686 	rsp.scid   = cpu_to_le16(chan->dcid);
3687 	rsp.dcid   = cpu_to_le16(chan->scid);
3688 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3689 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3690 
3691 	if (chan->hs_hcon)
3692 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3693 	else
3694 		rsp_code = L2CAP_CONN_RSP;
3695 
3696 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3697 
3698 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3699 
3700 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3701 		return;
3702 
3703 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3704 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3705 	chan->num_conf_req++;
3706 }
3707 
3708 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3709 {
3710 	int type, olen;
3711 	unsigned long val;
3712 	/* Use sane default values in case a misbehaving remote device
3713 	 * did not send an RFC or extended window size option.
3714 	 */
3715 	u16 txwin_ext = chan->ack_win;
3716 	struct l2cap_conf_rfc rfc = {
3717 		.mode = chan->mode,
3718 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3719 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3720 		.max_pdu_size = cpu_to_le16(chan->imtu),
3721 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3722 	};
3723 
3724 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3725 
3726 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3727 		return;
3728 
3729 	while (len >= L2CAP_CONF_OPT_SIZE) {
3730 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3731 
3732 		switch (type) {
3733 		case L2CAP_CONF_RFC:
3734 			if (olen == sizeof(rfc))
3735 				memcpy(&rfc, (void *)val, olen);
3736 			break;
3737 		case L2CAP_CONF_EWS:
3738 			txwin_ext = val;
3739 			break;
3740 		}
3741 	}
3742 
3743 	switch (rfc.mode) {
3744 	case L2CAP_MODE_ERTM:
3745 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3746 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3747 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3748 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3749 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3750 		else
3751 			chan->ack_win = min_t(u16, chan->ack_win,
3752 					      rfc.txwin_size);
3753 		break;
3754 	case L2CAP_MODE_STREAMING:
3755 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3756 	}
3757 }
3758 
3759 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3760 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3761 				    u8 *data)
3762 {
3763 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3764 
3765 	if (cmd_len < sizeof(*rej))
3766 		return -EPROTO;
3767 
3768 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3769 		return 0;
3770 
3771 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3772 	    cmd->ident == conn->info_ident) {
3773 		cancel_delayed_work(&conn->info_timer);
3774 
3775 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3776 		conn->info_ident = 0;
3777 
3778 		l2cap_conn_start(conn);
3779 	}
3780 
3781 	return 0;
3782 }
3783 
3784 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3785 					struct l2cap_cmd_hdr *cmd,
3786 					u8 *data, u8 rsp_code, u8 amp_id)
3787 {
3788 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3789 	struct l2cap_conn_rsp rsp;
3790 	struct l2cap_chan *chan = NULL, *pchan;
3791 	int result, status = L2CAP_CS_NO_INFO;
3792 
3793 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3794 	__le16 psm = req->psm;
3795 
3796 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3797 
3798 	/* Check if we have socket listening on psm */
3799 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3800 					 &conn->hcon->dst, ACL_LINK);
3801 	if (!pchan) {
3802 		result = L2CAP_CR_BAD_PSM;
3803 		goto sendresp;
3804 	}
3805 
3806 	mutex_lock(&conn->chan_lock);
3807 	l2cap_chan_lock(pchan);
3808 
3809 	/* Check if the ACL is secure enough (if not SDP) */
3810 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3811 	    !hci_conn_check_link_mode(conn->hcon)) {
3812 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3813 		result = L2CAP_CR_SEC_BLOCK;
3814 		goto response;
3815 	}
3816 
3817 	result = L2CAP_CR_NO_MEM;
3818 
3819 	/* Check if we already have channel with that dcid */
3820 	if (__l2cap_get_chan_by_dcid(conn, scid))
3821 		goto response;
3822 
3823 	chan = pchan->ops->new_connection(pchan);
3824 	if (!chan)
3825 		goto response;
3826 
3827 	/* For certain devices (ex: HID mouse), support for authentication,
3828 	 * pairing and bonding is optional. For such devices, inorder to avoid
3829 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3830 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3831 	 */
3832 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3833 
3834 	bacpy(&chan->src, &conn->hcon->src);
3835 	bacpy(&chan->dst, &conn->hcon->dst);
3836 	chan->src_type = bdaddr_src_type(conn->hcon);
3837 	chan->dst_type = bdaddr_dst_type(conn->hcon);
3838 	chan->psm  = psm;
3839 	chan->dcid = scid;
3840 	chan->local_amp_id = amp_id;
3841 
3842 	__l2cap_chan_add(conn, chan);
3843 
3844 	dcid = chan->scid;
3845 
3846 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3847 
3848 	chan->ident = cmd->ident;
3849 
3850 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3851 		if (l2cap_chan_check_security(chan, false)) {
3852 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3853 				l2cap_state_change(chan, BT_CONNECT2);
3854 				result = L2CAP_CR_PEND;
3855 				status = L2CAP_CS_AUTHOR_PEND;
3856 				chan->ops->defer(chan);
3857 			} else {
3858 				/* Force pending result for AMP controllers.
3859 				 * The connection will succeed after the
3860 				 * physical link is up.
3861 				 */
3862 				if (amp_id == AMP_ID_BREDR) {
3863 					l2cap_state_change(chan, BT_CONFIG);
3864 					result = L2CAP_CR_SUCCESS;
3865 				} else {
3866 					l2cap_state_change(chan, BT_CONNECT2);
3867 					result = L2CAP_CR_PEND;
3868 				}
3869 				status = L2CAP_CS_NO_INFO;
3870 			}
3871 		} else {
3872 			l2cap_state_change(chan, BT_CONNECT2);
3873 			result = L2CAP_CR_PEND;
3874 			status = L2CAP_CS_AUTHEN_PEND;
3875 		}
3876 	} else {
3877 		l2cap_state_change(chan, BT_CONNECT2);
3878 		result = L2CAP_CR_PEND;
3879 		status = L2CAP_CS_NO_INFO;
3880 	}
3881 
3882 response:
3883 	l2cap_chan_unlock(pchan);
3884 	mutex_unlock(&conn->chan_lock);
3885 	l2cap_chan_put(pchan);
3886 
3887 sendresp:
3888 	rsp.scid   = cpu_to_le16(scid);
3889 	rsp.dcid   = cpu_to_le16(dcid);
3890 	rsp.result = cpu_to_le16(result);
3891 	rsp.status = cpu_to_le16(status);
3892 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3893 
3894 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3895 		struct l2cap_info_req info;
3896 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3897 
3898 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3899 		conn->info_ident = l2cap_get_ident(conn);
3900 
3901 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3902 
3903 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3904 			       sizeof(info), &info);
3905 	}
3906 
3907 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3908 	    result == L2CAP_CR_SUCCESS) {
3909 		u8 buf[128];
3910 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3911 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3912 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3913 		chan->num_conf_req++;
3914 	}
3915 
3916 	return chan;
3917 }
3918 
3919 static int l2cap_connect_req(struct l2cap_conn *conn,
3920 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3921 {
3922 	struct hci_dev *hdev = conn->hcon->hdev;
3923 	struct hci_conn *hcon = conn->hcon;
3924 
3925 	if (cmd_len < sizeof(struct l2cap_conn_req))
3926 		return -EPROTO;
3927 
3928 	hci_dev_lock(hdev);
3929 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3930 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3931 		mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3932 	hci_dev_unlock(hdev);
3933 
3934 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3935 	return 0;
3936 }
3937 
3938 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3939 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3940 				    u8 *data)
3941 {
3942 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3943 	u16 scid, dcid, result, status;
3944 	struct l2cap_chan *chan;
3945 	u8 req[128];
3946 	int err;
3947 
3948 	if (cmd_len < sizeof(*rsp))
3949 		return -EPROTO;
3950 
3951 	scid   = __le16_to_cpu(rsp->scid);
3952 	dcid   = __le16_to_cpu(rsp->dcid);
3953 	result = __le16_to_cpu(rsp->result);
3954 	status = __le16_to_cpu(rsp->status);
3955 
3956 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3957 	       dcid, scid, result, status);
3958 
3959 	mutex_lock(&conn->chan_lock);
3960 
3961 	if (scid) {
3962 		chan = __l2cap_get_chan_by_scid(conn, scid);
3963 		if (!chan) {
3964 			err = -EBADSLT;
3965 			goto unlock;
3966 		}
3967 	} else {
3968 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3969 		if (!chan) {
3970 			err = -EBADSLT;
3971 			goto unlock;
3972 		}
3973 	}
3974 
3975 	err = 0;
3976 
3977 	l2cap_chan_lock(chan);
3978 
3979 	switch (result) {
3980 	case L2CAP_CR_SUCCESS:
3981 		l2cap_state_change(chan, BT_CONFIG);
3982 		chan->ident = 0;
3983 		chan->dcid = dcid;
3984 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3985 
3986 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3987 			break;
3988 
3989 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3990 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
3991 		chan->num_conf_req++;
3992 		break;
3993 
3994 	case L2CAP_CR_PEND:
3995 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3996 		break;
3997 
3998 	default:
3999 		l2cap_chan_del(chan, ECONNREFUSED);
4000 		break;
4001 	}
4002 
4003 	l2cap_chan_unlock(chan);
4004 
4005 unlock:
4006 	mutex_unlock(&conn->chan_lock);
4007 
4008 	return err;
4009 }
4010 
4011 static inline void set_default_fcs(struct l2cap_chan *chan)
4012 {
4013 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4014 	 * sides request it.
4015 	 */
4016 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4017 		chan->fcs = L2CAP_FCS_NONE;
4018 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4019 		chan->fcs = L2CAP_FCS_CRC16;
4020 }
4021 
4022 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4023 				    u8 ident, u16 flags)
4024 {
4025 	struct l2cap_conn *conn = chan->conn;
4026 
4027 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4028 	       flags);
4029 
4030 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4031 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4032 
4033 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4034 		       l2cap_build_conf_rsp(chan, data,
4035 					    L2CAP_CONF_SUCCESS, flags), data);
4036 }
4037 
4038 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4039 				   u16 scid, u16 dcid)
4040 {
4041 	struct l2cap_cmd_rej_cid rej;
4042 
4043 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4044 	rej.scid = __cpu_to_le16(scid);
4045 	rej.dcid = __cpu_to_le16(dcid);
4046 
4047 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4048 }
4049 
4050 static inline int l2cap_config_req(struct l2cap_conn *conn,
4051 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4052 				   u8 *data)
4053 {
4054 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4055 	u16 dcid, flags;
4056 	u8 rsp[64];
4057 	struct l2cap_chan *chan;
4058 	int len, err = 0;
4059 
4060 	if (cmd_len < sizeof(*req))
4061 		return -EPROTO;
4062 
4063 	dcid  = __le16_to_cpu(req->dcid);
4064 	flags = __le16_to_cpu(req->flags);
4065 
4066 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4067 
4068 	chan = l2cap_get_chan_by_scid(conn, dcid);
4069 	if (!chan) {
4070 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4071 		return 0;
4072 	}
4073 
4074 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4075 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4076 				       chan->dcid);
4077 		goto unlock;
4078 	}
4079 
4080 	/* Reject if config buffer is too small. */
4081 	len = cmd_len - sizeof(*req);
4082 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4083 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4084 			       l2cap_build_conf_rsp(chan, rsp,
4085 			       L2CAP_CONF_REJECT, flags), rsp);
4086 		goto unlock;
4087 	}
4088 
4089 	/* Store config. */
4090 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4091 	chan->conf_len += len;
4092 
4093 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4094 		/* Incomplete config. Send empty response. */
4095 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4096 			       l2cap_build_conf_rsp(chan, rsp,
4097 			       L2CAP_CONF_SUCCESS, flags), rsp);
4098 		goto unlock;
4099 	}
4100 
4101 	/* Complete config. */
4102 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4103 	if (len < 0) {
4104 		l2cap_send_disconn_req(chan, ECONNRESET);
4105 		goto unlock;
4106 	}
4107 
4108 	chan->ident = cmd->ident;
4109 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4110 	chan->num_conf_rsp++;
4111 
4112 	/* Reset config buffer. */
4113 	chan->conf_len = 0;
4114 
4115 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4116 		goto unlock;
4117 
4118 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4119 		set_default_fcs(chan);
4120 
4121 		if (chan->mode == L2CAP_MODE_ERTM ||
4122 		    chan->mode == L2CAP_MODE_STREAMING)
4123 			err = l2cap_ertm_init(chan);
4124 
4125 		if (err < 0)
4126 			l2cap_send_disconn_req(chan, -err);
4127 		else
4128 			l2cap_chan_ready(chan);
4129 
4130 		goto unlock;
4131 	}
4132 
4133 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4134 		u8 buf[64];
4135 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4136 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4137 		chan->num_conf_req++;
4138 	}
4139 
4140 	/* Got Conf Rsp PENDING from remote side and assume we sent
4141 	   Conf Rsp PENDING in the code above */
4142 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4143 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4144 
4145 		/* check compatibility */
4146 
4147 		/* Send rsp for BR/EDR channel */
4148 		if (!chan->hs_hcon)
4149 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4150 		else
4151 			chan->ident = cmd->ident;
4152 	}
4153 
4154 unlock:
4155 	l2cap_chan_unlock(chan);
4156 	return err;
4157 }
4158 
4159 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4160 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4161 				   u8 *data)
4162 {
4163 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4164 	u16 scid, flags, result;
4165 	struct l2cap_chan *chan;
4166 	int len = cmd_len - sizeof(*rsp);
4167 	int err = 0;
4168 
4169 	if (cmd_len < sizeof(*rsp))
4170 		return -EPROTO;
4171 
4172 	scid   = __le16_to_cpu(rsp->scid);
4173 	flags  = __le16_to_cpu(rsp->flags);
4174 	result = __le16_to_cpu(rsp->result);
4175 
4176 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4177 	       result, len);
4178 
4179 	chan = l2cap_get_chan_by_scid(conn, scid);
4180 	if (!chan)
4181 		return 0;
4182 
4183 	switch (result) {
4184 	case L2CAP_CONF_SUCCESS:
4185 		l2cap_conf_rfc_get(chan, rsp->data, len);
4186 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4187 		break;
4188 
4189 	case L2CAP_CONF_PENDING:
4190 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4191 
4192 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4193 			char buf[64];
4194 
4195 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4196 						   buf, sizeof(buf), &result);
4197 			if (len < 0) {
4198 				l2cap_send_disconn_req(chan, ECONNRESET);
4199 				goto done;
4200 			}
4201 
4202 			if (!chan->hs_hcon) {
4203 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4204 							0);
4205 			} else {
4206 				if (l2cap_check_efs(chan)) {
4207 					amp_create_logical_link(chan);
4208 					chan->ident = cmd->ident;
4209 				}
4210 			}
4211 		}
4212 		goto done;
4213 
4214 	case L2CAP_CONF_UNACCEPT:
4215 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4216 			char req[64];
4217 
4218 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4219 				l2cap_send_disconn_req(chan, ECONNRESET);
4220 				goto done;
4221 			}
4222 
4223 			/* throw out any old stored conf requests */
4224 			result = L2CAP_CONF_SUCCESS;
4225 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4226 						   req, sizeof(req), &result);
4227 			if (len < 0) {
4228 				l2cap_send_disconn_req(chan, ECONNRESET);
4229 				goto done;
4230 			}
4231 
4232 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4233 				       L2CAP_CONF_REQ, len, req);
4234 			chan->num_conf_req++;
4235 			if (result != L2CAP_CONF_SUCCESS)
4236 				goto done;
4237 			break;
4238 		}
4239 
4240 	default:
4241 		l2cap_chan_set_err(chan, ECONNRESET);
4242 
4243 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4244 		l2cap_send_disconn_req(chan, ECONNRESET);
4245 		goto done;
4246 	}
4247 
4248 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4249 		goto done;
4250 
4251 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4252 
4253 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4254 		set_default_fcs(chan);
4255 
4256 		if (chan->mode == L2CAP_MODE_ERTM ||
4257 		    chan->mode == L2CAP_MODE_STREAMING)
4258 			err = l2cap_ertm_init(chan);
4259 
4260 		if (err < 0)
4261 			l2cap_send_disconn_req(chan, -err);
4262 		else
4263 			l2cap_chan_ready(chan);
4264 	}
4265 
4266 done:
4267 	l2cap_chan_unlock(chan);
4268 	return err;
4269 }
4270 
4271 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4272 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4273 				       u8 *data)
4274 {
4275 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4276 	struct l2cap_disconn_rsp rsp;
4277 	u16 dcid, scid;
4278 	struct l2cap_chan *chan;
4279 
4280 	if (cmd_len != sizeof(*req))
4281 		return -EPROTO;
4282 
4283 	scid = __le16_to_cpu(req->scid);
4284 	dcid = __le16_to_cpu(req->dcid);
4285 
4286 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4287 
4288 	mutex_lock(&conn->chan_lock);
4289 
4290 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4291 	if (!chan) {
4292 		mutex_unlock(&conn->chan_lock);
4293 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4294 		return 0;
4295 	}
4296 
4297 	l2cap_chan_lock(chan);
4298 
4299 	rsp.dcid = cpu_to_le16(chan->scid);
4300 	rsp.scid = cpu_to_le16(chan->dcid);
4301 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4302 
4303 	chan->ops->set_shutdown(chan);
4304 
4305 	l2cap_chan_hold(chan);
4306 	l2cap_chan_del(chan, ECONNRESET);
4307 
4308 	l2cap_chan_unlock(chan);
4309 
4310 	chan->ops->close(chan);
4311 	l2cap_chan_put(chan);
4312 
4313 	mutex_unlock(&conn->chan_lock);
4314 
4315 	return 0;
4316 }
4317 
4318 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4319 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4320 				       u8 *data)
4321 {
4322 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4323 	u16 dcid, scid;
4324 	struct l2cap_chan *chan;
4325 
4326 	if (cmd_len != sizeof(*rsp))
4327 		return -EPROTO;
4328 
4329 	scid = __le16_to_cpu(rsp->scid);
4330 	dcid = __le16_to_cpu(rsp->dcid);
4331 
4332 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4333 
4334 	mutex_lock(&conn->chan_lock);
4335 
4336 	chan = __l2cap_get_chan_by_scid(conn, scid);
4337 	if (!chan) {
4338 		mutex_unlock(&conn->chan_lock);
4339 		return 0;
4340 	}
4341 
4342 	l2cap_chan_lock(chan);
4343 
4344 	l2cap_chan_hold(chan);
4345 	l2cap_chan_del(chan, 0);
4346 
4347 	l2cap_chan_unlock(chan);
4348 
4349 	chan->ops->close(chan);
4350 	l2cap_chan_put(chan);
4351 
4352 	mutex_unlock(&conn->chan_lock);
4353 
4354 	return 0;
4355 }
4356 
4357 static inline int l2cap_information_req(struct l2cap_conn *conn,
4358 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4359 					u8 *data)
4360 {
4361 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4362 	u16 type;
4363 
4364 	if (cmd_len != sizeof(*req))
4365 		return -EPROTO;
4366 
4367 	type = __le16_to_cpu(req->type);
4368 
4369 	BT_DBG("type 0x%4.4x", type);
4370 
4371 	if (type == L2CAP_IT_FEAT_MASK) {
4372 		u8 buf[8];
4373 		u32 feat_mask = l2cap_feat_mask;
4374 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4375 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4376 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4377 		if (!disable_ertm)
4378 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4379 				| L2CAP_FEAT_FCS;
4380 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4381 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4382 				| L2CAP_FEAT_EXT_WINDOW;
4383 
4384 		put_unaligned_le32(feat_mask, rsp->data);
4385 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4386 			       buf);
4387 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4388 		u8 buf[12];
4389 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4390 
4391 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4392 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4393 		rsp->data[0] = conn->local_fixed_chan;
4394 		memset(rsp->data + 1, 0, 7);
4395 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4396 			       buf);
4397 	} else {
4398 		struct l2cap_info_rsp rsp;
4399 		rsp.type   = cpu_to_le16(type);
4400 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4401 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4402 			       &rsp);
4403 	}
4404 
4405 	return 0;
4406 }
4407 
4408 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4409 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4410 					u8 *data)
4411 {
4412 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4413 	u16 type, result;
4414 
4415 	if (cmd_len < sizeof(*rsp))
4416 		return -EPROTO;
4417 
4418 	type   = __le16_to_cpu(rsp->type);
4419 	result = __le16_to_cpu(rsp->result);
4420 
4421 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4422 
4423 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4424 	if (cmd->ident != conn->info_ident ||
4425 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4426 		return 0;
4427 
4428 	cancel_delayed_work(&conn->info_timer);
4429 
4430 	if (result != L2CAP_IR_SUCCESS) {
4431 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4432 		conn->info_ident = 0;
4433 
4434 		l2cap_conn_start(conn);
4435 
4436 		return 0;
4437 	}
4438 
4439 	switch (type) {
4440 	case L2CAP_IT_FEAT_MASK:
4441 		conn->feat_mask = get_unaligned_le32(rsp->data);
4442 
4443 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4444 			struct l2cap_info_req req;
4445 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4446 
4447 			conn->info_ident = l2cap_get_ident(conn);
4448 
4449 			l2cap_send_cmd(conn, conn->info_ident,
4450 				       L2CAP_INFO_REQ, sizeof(req), &req);
4451 		} else {
4452 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4453 			conn->info_ident = 0;
4454 
4455 			l2cap_conn_start(conn);
4456 		}
4457 		break;
4458 
4459 	case L2CAP_IT_FIXED_CHAN:
4460 		conn->remote_fixed_chan = rsp->data[0];
4461 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4462 		conn->info_ident = 0;
4463 
4464 		l2cap_conn_start(conn);
4465 		break;
4466 	}
4467 
4468 	return 0;
4469 }
4470 
4471 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4472 				    struct l2cap_cmd_hdr *cmd,
4473 				    u16 cmd_len, void *data)
4474 {
4475 	struct l2cap_create_chan_req *req = data;
4476 	struct l2cap_create_chan_rsp rsp;
4477 	struct l2cap_chan *chan;
4478 	struct hci_dev *hdev;
4479 	u16 psm, scid;
4480 
4481 	if (cmd_len != sizeof(*req))
4482 		return -EPROTO;
4483 
4484 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4485 		return -EINVAL;
4486 
4487 	psm = le16_to_cpu(req->psm);
4488 	scid = le16_to_cpu(req->scid);
4489 
4490 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4491 
4492 	/* For controller id 0 make BR/EDR connection */
4493 	if (req->amp_id == AMP_ID_BREDR) {
4494 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4495 			      req->amp_id);
4496 		return 0;
4497 	}
4498 
4499 	/* Validate AMP controller id */
4500 	hdev = hci_dev_get(req->amp_id);
4501 	if (!hdev)
4502 		goto error;
4503 
4504 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4505 		hci_dev_put(hdev);
4506 		goto error;
4507 	}
4508 
4509 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4510 			     req->amp_id);
4511 	if (chan) {
4512 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4513 		struct hci_conn *hs_hcon;
4514 
4515 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4516 						  &conn->hcon->dst);
4517 		if (!hs_hcon) {
4518 			hci_dev_put(hdev);
4519 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4520 					       chan->dcid);
4521 			return 0;
4522 		}
4523 
4524 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4525 
4526 		mgr->bredr_chan = chan;
4527 		chan->hs_hcon = hs_hcon;
4528 		chan->fcs = L2CAP_FCS_NONE;
4529 		conn->mtu = hdev->block_mtu;
4530 	}
4531 
4532 	hci_dev_put(hdev);
4533 
4534 	return 0;
4535 
4536 error:
4537 	rsp.dcid = 0;
4538 	rsp.scid = cpu_to_le16(scid);
4539 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4540 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4541 
4542 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4543 		       sizeof(rsp), &rsp);
4544 
4545 	return 0;
4546 }
4547 
4548 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4549 {
4550 	struct l2cap_move_chan_req req;
4551 	u8 ident;
4552 
4553 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4554 
4555 	ident = l2cap_get_ident(chan->conn);
4556 	chan->ident = ident;
4557 
4558 	req.icid = cpu_to_le16(chan->scid);
4559 	req.dest_amp_id = dest_amp_id;
4560 
4561 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4562 		       &req);
4563 
4564 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4565 }
4566 
4567 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4568 {
4569 	struct l2cap_move_chan_rsp rsp;
4570 
4571 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4572 
4573 	rsp.icid = cpu_to_le16(chan->dcid);
4574 	rsp.result = cpu_to_le16(result);
4575 
4576 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4577 		       sizeof(rsp), &rsp);
4578 }
4579 
4580 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4581 {
4582 	struct l2cap_move_chan_cfm cfm;
4583 
4584 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4585 
4586 	chan->ident = l2cap_get_ident(chan->conn);
4587 
4588 	cfm.icid = cpu_to_le16(chan->scid);
4589 	cfm.result = cpu_to_le16(result);
4590 
4591 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4592 		       sizeof(cfm), &cfm);
4593 
4594 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4595 }
4596 
4597 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4598 {
4599 	struct l2cap_move_chan_cfm cfm;
4600 
4601 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4602 
4603 	cfm.icid = cpu_to_le16(icid);
4604 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4605 
4606 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4607 		       sizeof(cfm), &cfm);
4608 }
4609 
4610 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4611 					 u16 icid)
4612 {
4613 	struct l2cap_move_chan_cfm_rsp rsp;
4614 
4615 	BT_DBG("icid 0x%4.4x", icid);
4616 
4617 	rsp.icid = cpu_to_le16(icid);
4618 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4619 }
4620 
4621 static void __release_logical_link(struct l2cap_chan *chan)
4622 {
4623 	chan->hs_hchan = NULL;
4624 	chan->hs_hcon = NULL;
4625 
4626 	/* Placeholder - release the logical link */
4627 }
4628 
4629 static void l2cap_logical_fail(struct l2cap_chan *chan)
4630 {
4631 	/* Logical link setup failed */
4632 	if (chan->state != BT_CONNECTED) {
4633 		/* Create channel failure, disconnect */
4634 		l2cap_send_disconn_req(chan, ECONNRESET);
4635 		return;
4636 	}
4637 
4638 	switch (chan->move_role) {
4639 	case L2CAP_MOVE_ROLE_RESPONDER:
4640 		l2cap_move_done(chan);
4641 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4642 		break;
4643 	case L2CAP_MOVE_ROLE_INITIATOR:
4644 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4645 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4646 			/* Remote has only sent pending or
4647 			 * success responses, clean up
4648 			 */
4649 			l2cap_move_done(chan);
4650 		}
4651 
4652 		/* Other amp move states imply that the move
4653 		 * has already aborted
4654 		 */
4655 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4656 		break;
4657 	}
4658 }
4659 
4660 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4661 					struct hci_chan *hchan)
4662 {
4663 	struct l2cap_conf_rsp rsp;
4664 
4665 	chan->hs_hchan = hchan;
4666 	chan->hs_hcon->l2cap_data = chan->conn;
4667 
4668 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4669 
4670 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4671 		int err;
4672 
4673 		set_default_fcs(chan);
4674 
4675 		err = l2cap_ertm_init(chan);
4676 		if (err < 0)
4677 			l2cap_send_disconn_req(chan, -err);
4678 		else
4679 			l2cap_chan_ready(chan);
4680 	}
4681 }
4682 
4683 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4684 				      struct hci_chan *hchan)
4685 {
4686 	chan->hs_hcon = hchan->conn;
4687 	chan->hs_hcon->l2cap_data = chan->conn;
4688 
4689 	BT_DBG("move_state %d", chan->move_state);
4690 
4691 	switch (chan->move_state) {
4692 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4693 		/* Move confirm will be sent after a success
4694 		 * response is received
4695 		 */
4696 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4697 		break;
4698 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4699 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4700 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4701 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4702 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4703 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4704 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4705 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4706 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4707 		}
4708 		break;
4709 	default:
4710 		/* Move was not in expected state, free the channel */
4711 		__release_logical_link(chan);
4712 
4713 		chan->move_state = L2CAP_MOVE_STABLE;
4714 	}
4715 }
4716 
4717 /* Call with chan locked */
4718 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4719 		       u8 status)
4720 {
4721 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4722 
4723 	if (status) {
4724 		l2cap_logical_fail(chan);
4725 		__release_logical_link(chan);
4726 		return;
4727 	}
4728 
4729 	if (chan->state != BT_CONNECTED) {
4730 		/* Ignore logical link if channel is on BR/EDR */
4731 		if (chan->local_amp_id != AMP_ID_BREDR)
4732 			l2cap_logical_finish_create(chan, hchan);
4733 	} else {
4734 		l2cap_logical_finish_move(chan, hchan);
4735 	}
4736 }
4737 
4738 void l2cap_move_start(struct l2cap_chan *chan)
4739 {
4740 	BT_DBG("chan %p", chan);
4741 
4742 	if (chan->local_amp_id == AMP_ID_BREDR) {
4743 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4744 			return;
4745 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4746 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4747 		/* Placeholder - start physical link setup */
4748 	} else {
4749 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4750 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4751 		chan->move_id = 0;
4752 		l2cap_move_setup(chan);
4753 		l2cap_send_move_chan_req(chan, 0);
4754 	}
4755 }
4756 
4757 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4758 			    u8 local_amp_id, u8 remote_amp_id)
4759 {
4760 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4761 	       local_amp_id, remote_amp_id);
4762 
4763 	chan->fcs = L2CAP_FCS_NONE;
4764 
4765 	/* Outgoing channel on AMP */
4766 	if (chan->state == BT_CONNECT) {
4767 		if (result == L2CAP_CR_SUCCESS) {
4768 			chan->local_amp_id = local_amp_id;
4769 			l2cap_send_create_chan_req(chan, remote_amp_id);
4770 		} else {
4771 			/* Revert to BR/EDR connect */
4772 			l2cap_send_conn_req(chan);
4773 		}
4774 
4775 		return;
4776 	}
4777 
4778 	/* Incoming channel on AMP */
4779 	if (__l2cap_no_conn_pending(chan)) {
4780 		struct l2cap_conn_rsp rsp;
4781 		char buf[128];
4782 		rsp.scid = cpu_to_le16(chan->dcid);
4783 		rsp.dcid = cpu_to_le16(chan->scid);
4784 
4785 		if (result == L2CAP_CR_SUCCESS) {
4786 			/* Send successful response */
4787 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4788 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4789 		} else {
4790 			/* Send negative response */
4791 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4792 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4793 		}
4794 
4795 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4796 			       sizeof(rsp), &rsp);
4797 
4798 		if (result == L2CAP_CR_SUCCESS) {
4799 			l2cap_state_change(chan, BT_CONFIG);
4800 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4801 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4802 				       L2CAP_CONF_REQ,
4803 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4804 			chan->num_conf_req++;
4805 		}
4806 	}
4807 }
4808 
4809 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4810 				   u8 remote_amp_id)
4811 {
4812 	l2cap_move_setup(chan);
4813 	chan->move_id = local_amp_id;
4814 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4815 
4816 	l2cap_send_move_chan_req(chan, remote_amp_id);
4817 }
4818 
4819 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4820 {
4821 	struct hci_chan *hchan = NULL;
4822 
4823 	/* Placeholder - get hci_chan for logical link */
4824 
4825 	if (hchan) {
4826 		if (hchan->state == BT_CONNECTED) {
4827 			/* Logical link is ready to go */
4828 			chan->hs_hcon = hchan->conn;
4829 			chan->hs_hcon->l2cap_data = chan->conn;
4830 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4831 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4832 
4833 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4834 		} else {
4835 			/* Wait for logical link to be ready */
4836 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4837 		}
4838 	} else {
4839 		/* Logical link not available */
4840 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4841 	}
4842 }
4843 
4844 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4845 {
4846 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4847 		u8 rsp_result;
4848 		if (result == -EINVAL)
4849 			rsp_result = L2CAP_MR_BAD_ID;
4850 		else
4851 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4852 
4853 		l2cap_send_move_chan_rsp(chan, rsp_result);
4854 	}
4855 
4856 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4857 	chan->move_state = L2CAP_MOVE_STABLE;
4858 
4859 	/* Restart data transmission */
4860 	l2cap_ertm_send(chan);
4861 }
4862 
4863 /* Invoke with locked chan */
4864 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4865 {
4866 	u8 local_amp_id = chan->local_amp_id;
4867 	u8 remote_amp_id = chan->remote_amp_id;
4868 
4869 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4870 	       chan, result, local_amp_id, remote_amp_id);
4871 
4872 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4873 		l2cap_chan_unlock(chan);
4874 		return;
4875 	}
4876 
4877 	if (chan->state != BT_CONNECTED) {
4878 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4879 	} else if (result != L2CAP_MR_SUCCESS) {
4880 		l2cap_do_move_cancel(chan, result);
4881 	} else {
4882 		switch (chan->move_role) {
4883 		case L2CAP_MOVE_ROLE_INITIATOR:
4884 			l2cap_do_move_initiate(chan, local_amp_id,
4885 					       remote_amp_id);
4886 			break;
4887 		case L2CAP_MOVE_ROLE_RESPONDER:
4888 			l2cap_do_move_respond(chan, result);
4889 			break;
4890 		default:
4891 			l2cap_do_move_cancel(chan, result);
4892 			break;
4893 		}
4894 	}
4895 }
4896 
4897 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4898 					 struct l2cap_cmd_hdr *cmd,
4899 					 u16 cmd_len, void *data)
4900 {
4901 	struct l2cap_move_chan_req *req = data;
4902 	struct l2cap_move_chan_rsp rsp;
4903 	struct l2cap_chan *chan;
4904 	u16 icid = 0;
4905 	u16 result = L2CAP_MR_NOT_ALLOWED;
4906 
4907 	if (cmd_len != sizeof(*req))
4908 		return -EPROTO;
4909 
4910 	icid = le16_to_cpu(req->icid);
4911 
4912 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4913 
4914 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4915 		return -EINVAL;
4916 
4917 	chan = l2cap_get_chan_by_dcid(conn, icid);
4918 	if (!chan) {
4919 		rsp.icid = cpu_to_le16(icid);
4920 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4921 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4922 			       sizeof(rsp), &rsp);
4923 		return 0;
4924 	}
4925 
4926 	chan->ident = cmd->ident;
4927 
4928 	if (chan->scid < L2CAP_CID_DYN_START ||
4929 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4930 	    (chan->mode != L2CAP_MODE_ERTM &&
4931 	     chan->mode != L2CAP_MODE_STREAMING)) {
4932 		result = L2CAP_MR_NOT_ALLOWED;
4933 		goto send_move_response;
4934 	}
4935 
4936 	if (chan->local_amp_id == req->dest_amp_id) {
4937 		result = L2CAP_MR_SAME_ID;
4938 		goto send_move_response;
4939 	}
4940 
4941 	if (req->dest_amp_id != AMP_ID_BREDR) {
4942 		struct hci_dev *hdev;
4943 		hdev = hci_dev_get(req->dest_amp_id);
4944 		if (!hdev || hdev->dev_type != HCI_AMP ||
4945 		    !test_bit(HCI_UP, &hdev->flags)) {
4946 			if (hdev)
4947 				hci_dev_put(hdev);
4948 
4949 			result = L2CAP_MR_BAD_ID;
4950 			goto send_move_response;
4951 		}
4952 		hci_dev_put(hdev);
4953 	}
4954 
4955 	/* Detect a move collision.  Only send a collision response
4956 	 * if this side has "lost", otherwise proceed with the move.
4957 	 * The winner has the larger bd_addr.
4958 	 */
4959 	if ((__chan_is_moving(chan) ||
4960 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4961 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4962 		result = L2CAP_MR_COLLISION;
4963 		goto send_move_response;
4964 	}
4965 
4966 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4967 	l2cap_move_setup(chan);
4968 	chan->move_id = req->dest_amp_id;
4969 	icid = chan->dcid;
4970 
4971 	if (req->dest_amp_id == AMP_ID_BREDR) {
4972 		/* Moving to BR/EDR */
4973 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4974 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4975 			result = L2CAP_MR_PEND;
4976 		} else {
4977 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4978 			result = L2CAP_MR_SUCCESS;
4979 		}
4980 	} else {
4981 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4982 		/* Placeholder - uncomment when amp functions are available */
4983 		/*amp_accept_physical(chan, req->dest_amp_id);*/
4984 		result = L2CAP_MR_PEND;
4985 	}
4986 
4987 send_move_response:
4988 	l2cap_send_move_chan_rsp(chan, result);
4989 
4990 	l2cap_chan_unlock(chan);
4991 
4992 	return 0;
4993 }
4994 
4995 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4996 {
4997 	struct l2cap_chan *chan;
4998 	struct hci_chan *hchan = NULL;
4999 
5000 	chan = l2cap_get_chan_by_scid(conn, icid);
5001 	if (!chan) {
5002 		l2cap_send_move_chan_cfm_icid(conn, icid);
5003 		return;
5004 	}
5005 
5006 	__clear_chan_timer(chan);
5007 	if (result == L2CAP_MR_PEND)
5008 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5009 
5010 	switch (chan->move_state) {
5011 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5012 		/* Move confirm will be sent when logical link
5013 		 * is complete.
5014 		 */
5015 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5016 		break;
5017 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5018 		if (result == L2CAP_MR_PEND) {
5019 			break;
5020 		} else if (test_bit(CONN_LOCAL_BUSY,
5021 				    &chan->conn_state)) {
5022 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5023 		} else {
5024 			/* Logical link is up or moving to BR/EDR,
5025 			 * proceed with move
5026 			 */
5027 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5028 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5029 		}
5030 		break;
5031 	case L2CAP_MOVE_WAIT_RSP:
5032 		/* Moving to AMP */
5033 		if (result == L2CAP_MR_SUCCESS) {
5034 			/* Remote is ready, send confirm immediately
5035 			 * after logical link is ready
5036 			 */
5037 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5038 		} else {
5039 			/* Both logical link and move success
5040 			 * are required to confirm
5041 			 */
5042 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5043 		}
5044 
5045 		/* Placeholder - get hci_chan for logical link */
5046 		if (!hchan) {
5047 			/* Logical link not available */
5048 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5049 			break;
5050 		}
5051 
5052 		/* If the logical link is not yet connected, do not
5053 		 * send confirmation.
5054 		 */
5055 		if (hchan->state != BT_CONNECTED)
5056 			break;
5057 
5058 		/* Logical link is already ready to go */
5059 
5060 		chan->hs_hcon = hchan->conn;
5061 		chan->hs_hcon->l2cap_data = chan->conn;
5062 
5063 		if (result == L2CAP_MR_SUCCESS) {
5064 			/* Can confirm now */
5065 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5066 		} else {
5067 			/* Now only need move success
5068 			 * to confirm
5069 			 */
5070 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5071 		}
5072 
5073 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5074 		break;
5075 	default:
5076 		/* Any other amp move state means the move failed. */
5077 		chan->move_id = chan->local_amp_id;
5078 		l2cap_move_done(chan);
5079 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5080 	}
5081 
5082 	l2cap_chan_unlock(chan);
5083 }
5084 
5085 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5086 			    u16 result)
5087 {
5088 	struct l2cap_chan *chan;
5089 
5090 	chan = l2cap_get_chan_by_ident(conn, ident);
5091 	if (!chan) {
5092 		/* Could not locate channel, icid is best guess */
5093 		l2cap_send_move_chan_cfm_icid(conn, icid);
5094 		return;
5095 	}
5096 
5097 	__clear_chan_timer(chan);
5098 
5099 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5100 		if (result == L2CAP_MR_COLLISION) {
5101 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5102 		} else {
5103 			/* Cleanup - cancel move */
5104 			chan->move_id = chan->local_amp_id;
5105 			l2cap_move_done(chan);
5106 		}
5107 	}
5108 
5109 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5110 
5111 	l2cap_chan_unlock(chan);
5112 }
5113 
5114 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5115 				  struct l2cap_cmd_hdr *cmd,
5116 				  u16 cmd_len, void *data)
5117 {
5118 	struct l2cap_move_chan_rsp *rsp = data;
5119 	u16 icid, result;
5120 
5121 	if (cmd_len != sizeof(*rsp))
5122 		return -EPROTO;
5123 
5124 	icid = le16_to_cpu(rsp->icid);
5125 	result = le16_to_cpu(rsp->result);
5126 
5127 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5128 
5129 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5130 		l2cap_move_continue(conn, icid, result);
5131 	else
5132 		l2cap_move_fail(conn, cmd->ident, icid, result);
5133 
5134 	return 0;
5135 }
5136 
5137 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5138 				      struct l2cap_cmd_hdr *cmd,
5139 				      u16 cmd_len, void *data)
5140 {
5141 	struct l2cap_move_chan_cfm *cfm = data;
5142 	struct l2cap_chan *chan;
5143 	u16 icid, result;
5144 
5145 	if (cmd_len != sizeof(*cfm))
5146 		return -EPROTO;
5147 
5148 	icid = le16_to_cpu(cfm->icid);
5149 	result = le16_to_cpu(cfm->result);
5150 
5151 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5152 
5153 	chan = l2cap_get_chan_by_dcid(conn, icid);
5154 	if (!chan) {
5155 		/* Spec requires a response even if the icid was not found */
5156 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5157 		return 0;
5158 	}
5159 
5160 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5161 		if (result == L2CAP_MC_CONFIRMED) {
5162 			chan->local_amp_id = chan->move_id;
5163 			if (chan->local_amp_id == AMP_ID_BREDR)
5164 				__release_logical_link(chan);
5165 		} else {
5166 			chan->move_id = chan->local_amp_id;
5167 		}
5168 
5169 		l2cap_move_done(chan);
5170 	}
5171 
5172 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5173 
5174 	l2cap_chan_unlock(chan);
5175 
5176 	return 0;
5177 }
5178 
5179 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5180 						 struct l2cap_cmd_hdr *cmd,
5181 						 u16 cmd_len, void *data)
5182 {
5183 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5184 	struct l2cap_chan *chan;
5185 	u16 icid;
5186 
5187 	if (cmd_len != sizeof(*rsp))
5188 		return -EPROTO;
5189 
5190 	icid = le16_to_cpu(rsp->icid);
5191 
5192 	BT_DBG("icid 0x%4.4x", icid);
5193 
5194 	chan = l2cap_get_chan_by_scid(conn, icid);
5195 	if (!chan)
5196 		return 0;
5197 
5198 	__clear_chan_timer(chan);
5199 
5200 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5201 		chan->local_amp_id = chan->move_id;
5202 
5203 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5204 			__release_logical_link(chan);
5205 
5206 		l2cap_move_done(chan);
5207 	}
5208 
5209 	l2cap_chan_unlock(chan);
5210 
5211 	return 0;
5212 }
5213 
5214 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5215 					      struct l2cap_cmd_hdr *cmd,
5216 					      u16 cmd_len, u8 *data)
5217 {
5218 	struct hci_conn *hcon = conn->hcon;
5219 	struct l2cap_conn_param_update_req *req;
5220 	struct l2cap_conn_param_update_rsp rsp;
5221 	u16 min, max, latency, to_multiplier;
5222 	int err;
5223 
5224 	if (hcon->role != HCI_ROLE_MASTER)
5225 		return -EINVAL;
5226 
5227 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5228 		return -EPROTO;
5229 
5230 	req = (struct l2cap_conn_param_update_req *) data;
5231 	min		= __le16_to_cpu(req->min);
5232 	max		= __le16_to_cpu(req->max);
5233 	latency		= __le16_to_cpu(req->latency);
5234 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5235 
5236 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5237 	       min, max, latency, to_multiplier);
5238 
5239 	memset(&rsp, 0, sizeof(rsp));
5240 
5241 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5242 	if (err)
5243 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5244 	else
5245 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5246 
5247 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5248 		       sizeof(rsp), &rsp);
5249 
5250 	if (!err) {
5251 		u8 store_hint;
5252 
5253 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5254 						to_multiplier);
5255 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5256 				    store_hint, min, max, latency,
5257 				    to_multiplier);
5258 
5259 	}
5260 
5261 	return 0;
5262 }
5263 
5264 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5265 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5266 				u8 *data)
5267 {
5268 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5269 	struct hci_conn *hcon = conn->hcon;
5270 	u16 dcid, mtu, mps, credits, result;
5271 	struct l2cap_chan *chan;
5272 	int err, sec_level;
5273 
5274 	if (cmd_len < sizeof(*rsp))
5275 		return -EPROTO;
5276 
5277 	dcid    = __le16_to_cpu(rsp->dcid);
5278 	mtu     = __le16_to_cpu(rsp->mtu);
5279 	mps     = __le16_to_cpu(rsp->mps);
5280 	credits = __le16_to_cpu(rsp->credits);
5281 	result  = __le16_to_cpu(rsp->result);
5282 
5283 	if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
5284 					   dcid < L2CAP_CID_DYN_START ||
5285 					   dcid > L2CAP_CID_LE_DYN_END))
5286 		return -EPROTO;
5287 
5288 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5289 	       dcid, mtu, mps, credits, result);
5290 
5291 	mutex_lock(&conn->chan_lock);
5292 
5293 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5294 	if (!chan) {
5295 		err = -EBADSLT;
5296 		goto unlock;
5297 	}
5298 
5299 	err = 0;
5300 
5301 	l2cap_chan_lock(chan);
5302 
5303 	switch (result) {
5304 	case L2CAP_CR_SUCCESS:
5305 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5306 			err = -EBADSLT;
5307 			break;
5308 		}
5309 
5310 		chan->ident = 0;
5311 		chan->dcid = dcid;
5312 		chan->omtu = mtu;
5313 		chan->remote_mps = mps;
5314 		chan->tx_credits = credits;
5315 		l2cap_chan_ready(chan);
5316 		break;
5317 
5318 	case L2CAP_CR_AUTHENTICATION:
5319 	case L2CAP_CR_ENCRYPTION:
5320 		/* If we already have MITM protection we can't do
5321 		 * anything.
5322 		 */
5323 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5324 			l2cap_chan_del(chan, ECONNREFUSED);
5325 			break;
5326 		}
5327 
5328 		sec_level = hcon->sec_level + 1;
5329 		if (chan->sec_level < sec_level)
5330 			chan->sec_level = sec_level;
5331 
5332 		/* We'll need to send a new Connect Request */
5333 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5334 
5335 		smp_conn_security(hcon, chan->sec_level);
5336 		break;
5337 
5338 	default:
5339 		l2cap_chan_del(chan, ECONNREFUSED);
5340 		break;
5341 	}
5342 
5343 	l2cap_chan_unlock(chan);
5344 
5345 unlock:
5346 	mutex_unlock(&conn->chan_lock);
5347 
5348 	return err;
5349 }
5350 
5351 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5352 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5353 				      u8 *data)
5354 {
5355 	int err = 0;
5356 
5357 	switch (cmd->code) {
5358 	case L2CAP_COMMAND_REJ:
5359 		l2cap_command_rej(conn, cmd, cmd_len, data);
5360 		break;
5361 
5362 	case L2CAP_CONN_REQ:
5363 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5364 		break;
5365 
5366 	case L2CAP_CONN_RSP:
5367 	case L2CAP_CREATE_CHAN_RSP:
5368 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5369 		break;
5370 
5371 	case L2CAP_CONF_REQ:
5372 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5373 		break;
5374 
5375 	case L2CAP_CONF_RSP:
5376 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5377 		break;
5378 
5379 	case L2CAP_DISCONN_REQ:
5380 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5381 		break;
5382 
5383 	case L2CAP_DISCONN_RSP:
5384 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5385 		break;
5386 
5387 	case L2CAP_ECHO_REQ:
5388 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5389 		break;
5390 
5391 	case L2CAP_ECHO_RSP:
5392 		break;
5393 
5394 	case L2CAP_INFO_REQ:
5395 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5396 		break;
5397 
5398 	case L2CAP_INFO_RSP:
5399 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5400 		break;
5401 
5402 	case L2CAP_CREATE_CHAN_REQ:
5403 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5404 		break;
5405 
5406 	case L2CAP_MOVE_CHAN_REQ:
5407 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5408 		break;
5409 
5410 	case L2CAP_MOVE_CHAN_RSP:
5411 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5412 		break;
5413 
5414 	case L2CAP_MOVE_CHAN_CFM:
5415 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5416 		break;
5417 
5418 	case L2CAP_MOVE_CHAN_CFM_RSP:
5419 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5420 		break;
5421 
5422 	default:
5423 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5424 		err = -EINVAL;
5425 		break;
5426 	}
5427 
5428 	return err;
5429 }
5430 
5431 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5432 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5433 				u8 *data)
5434 {
5435 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5436 	struct l2cap_le_conn_rsp rsp;
5437 	struct l2cap_chan *chan, *pchan;
5438 	u16 dcid, scid, credits, mtu, mps;
5439 	__le16 psm;
5440 	u8 result;
5441 
5442 	if (cmd_len != sizeof(*req))
5443 		return -EPROTO;
5444 
5445 	scid = __le16_to_cpu(req->scid);
5446 	mtu  = __le16_to_cpu(req->mtu);
5447 	mps  = __le16_to_cpu(req->mps);
5448 	psm  = req->psm;
5449 	dcid = 0;
5450 	credits = 0;
5451 
5452 	if (mtu < 23 || mps < 23)
5453 		return -EPROTO;
5454 
5455 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5456 	       scid, mtu, mps);
5457 
5458 	/* Check if we have socket listening on psm */
5459 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5460 					 &conn->hcon->dst, LE_LINK);
5461 	if (!pchan) {
5462 		result = L2CAP_CR_BAD_PSM;
5463 		chan = NULL;
5464 		goto response;
5465 	}
5466 
5467 	mutex_lock(&conn->chan_lock);
5468 	l2cap_chan_lock(pchan);
5469 
5470 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5471 				     SMP_ALLOW_STK)) {
5472 		result = L2CAP_CR_AUTHENTICATION;
5473 		chan = NULL;
5474 		goto response_unlock;
5475 	}
5476 
5477 	/* Check for valid dynamic CID range */
5478 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5479 		result = L2CAP_CR_INVALID_SCID;
5480 		chan = NULL;
5481 		goto response_unlock;
5482 	}
5483 
5484 	/* Check if we already have channel with that dcid */
5485 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5486 		result = L2CAP_CR_SCID_IN_USE;
5487 		chan = NULL;
5488 		goto response_unlock;
5489 	}
5490 
5491 	chan = pchan->ops->new_connection(pchan);
5492 	if (!chan) {
5493 		result = L2CAP_CR_NO_MEM;
5494 		goto response_unlock;
5495 	}
5496 
5497 	bacpy(&chan->src, &conn->hcon->src);
5498 	bacpy(&chan->dst, &conn->hcon->dst);
5499 	chan->src_type = bdaddr_src_type(conn->hcon);
5500 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5501 	chan->psm  = psm;
5502 	chan->dcid = scid;
5503 	chan->omtu = mtu;
5504 	chan->remote_mps = mps;
5505 	chan->tx_credits = __le16_to_cpu(req->credits);
5506 
5507 	__l2cap_chan_add(conn, chan);
5508 
5509 	l2cap_le_flowctl_init(chan);
5510 
5511 	dcid = chan->scid;
5512 	credits = chan->rx_credits;
5513 
5514 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5515 
5516 	chan->ident = cmd->ident;
5517 
5518 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5519 		l2cap_state_change(chan, BT_CONNECT2);
5520 		/* The following result value is actually not defined
5521 		 * for LE CoC but we use it to let the function know
5522 		 * that it should bail out after doing its cleanup
5523 		 * instead of sending a response.
5524 		 */
5525 		result = L2CAP_CR_PEND;
5526 		chan->ops->defer(chan);
5527 	} else {
5528 		l2cap_chan_ready(chan);
5529 		result = L2CAP_CR_SUCCESS;
5530 	}
5531 
5532 response_unlock:
5533 	l2cap_chan_unlock(pchan);
5534 	mutex_unlock(&conn->chan_lock);
5535 	l2cap_chan_put(pchan);
5536 
5537 	if (result == L2CAP_CR_PEND)
5538 		return 0;
5539 
5540 response:
5541 	if (chan) {
5542 		rsp.mtu = cpu_to_le16(chan->imtu);
5543 		rsp.mps = cpu_to_le16(chan->mps);
5544 	} else {
5545 		rsp.mtu = 0;
5546 		rsp.mps = 0;
5547 	}
5548 
5549 	rsp.dcid    = cpu_to_le16(dcid);
5550 	rsp.credits = cpu_to_le16(credits);
5551 	rsp.result  = cpu_to_le16(result);
5552 
5553 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5554 
5555 	return 0;
5556 }
5557 
5558 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5559 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5560 				   u8 *data)
5561 {
5562 	struct l2cap_le_credits *pkt;
5563 	struct l2cap_chan *chan;
5564 	u16 cid, credits, max_credits;
5565 
5566 	if (cmd_len != sizeof(*pkt))
5567 		return -EPROTO;
5568 
5569 	pkt = (struct l2cap_le_credits *) data;
5570 	cid	= __le16_to_cpu(pkt->cid);
5571 	credits	= __le16_to_cpu(pkt->credits);
5572 
5573 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5574 
5575 	chan = l2cap_get_chan_by_dcid(conn, cid);
5576 	if (!chan)
5577 		return -EBADSLT;
5578 
5579 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5580 	if (credits > max_credits) {
5581 		BT_ERR("LE credits overflow");
5582 		l2cap_send_disconn_req(chan, ECONNRESET);
5583 		l2cap_chan_unlock(chan);
5584 
5585 		/* Return 0 so that we don't trigger an unnecessary
5586 		 * command reject packet.
5587 		 */
5588 		return 0;
5589 	}
5590 
5591 	chan->tx_credits += credits;
5592 
5593 	/* Resume sending */
5594 	l2cap_le_flowctl_send(chan);
5595 
5596 	if (chan->tx_credits)
5597 		chan->ops->resume(chan);
5598 
5599 	l2cap_chan_unlock(chan);
5600 
5601 	return 0;
5602 }
5603 
5604 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5605 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5606 				       u8 *data)
5607 {
5608 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5609 	struct l2cap_chan *chan;
5610 
5611 	if (cmd_len < sizeof(*rej))
5612 		return -EPROTO;
5613 
5614 	mutex_lock(&conn->chan_lock);
5615 
5616 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5617 	if (!chan)
5618 		goto done;
5619 
5620 	l2cap_chan_lock(chan);
5621 	l2cap_chan_del(chan, ECONNREFUSED);
5622 	l2cap_chan_unlock(chan);
5623 
5624 done:
5625 	mutex_unlock(&conn->chan_lock);
5626 	return 0;
5627 }
5628 
5629 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5630 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5631 				   u8 *data)
5632 {
5633 	int err = 0;
5634 
5635 	switch (cmd->code) {
5636 	case L2CAP_COMMAND_REJ:
5637 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5638 		break;
5639 
5640 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5641 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5642 		break;
5643 
5644 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5645 		break;
5646 
5647 	case L2CAP_LE_CONN_RSP:
5648 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5649 		break;
5650 
5651 	case L2CAP_LE_CONN_REQ:
5652 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5653 		break;
5654 
5655 	case L2CAP_LE_CREDITS:
5656 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5657 		break;
5658 
5659 	case L2CAP_DISCONN_REQ:
5660 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5661 		break;
5662 
5663 	case L2CAP_DISCONN_RSP:
5664 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5665 		break;
5666 
5667 	default:
5668 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5669 		err = -EINVAL;
5670 		break;
5671 	}
5672 
5673 	return err;
5674 }
5675 
5676 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5677 					struct sk_buff *skb)
5678 {
5679 	struct hci_conn *hcon = conn->hcon;
5680 	struct l2cap_cmd_hdr *cmd;
5681 	u16 len;
5682 	int err;
5683 
5684 	if (hcon->type != LE_LINK)
5685 		goto drop;
5686 
5687 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5688 		goto drop;
5689 
5690 	cmd = (void *) skb->data;
5691 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5692 
5693 	len = le16_to_cpu(cmd->len);
5694 
5695 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5696 
5697 	if (len != skb->len || !cmd->ident) {
5698 		BT_DBG("corrupted command");
5699 		goto drop;
5700 	}
5701 
5702 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5703 	if (err) {
5704 		struct l2cap_cmd_rej_unk rej;
5705 
5706 		BT_ERR("Wrong link type (%d)", err);
5707 
5708 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5709 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5710 			       sizeof(rej), &rej);
5711 	}
5712 
5713 drop:
5714 	kfree_skb(skb);
5715 }
5716 
5717 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5718 				     struct sk_buff *skb)
5719 {
5720 	struct hci_conn *hcon = conn->hcon;
5721 	u8 *data = skb->data;
5722 	int len = skb->len;
5723 	struct l2cap_cmd_hdr cmd;
5724 	int err;
5725 
5726 	l2cap_raw_recv(conn, skb);
5727 
5728 	if (hcon->type != ACL_LINK)
5729 		goto drop;
5730 
5731 	while (len >= L2CAP_CMD_HDR_SIZE) {
5732 		u16 cmd_len;
5733 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5734 		data += L2CAP_CMD_HDR_SIZE;
5735 		len  -= L2CAP_CMD_HDR_SIZE;
5736 
5737 		cmd_len = le16_to_cpu(cmd.len);
5738 
5739 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5740 		       cmd.ident);
5741 
5742 		if (cmd_len > len || !cmd.ident) {
5743 			BT_DBG("corrupted command");
5744 			break;
5745 		}
5746 
5747 		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5748 		if (err) {
5749 			struct l2cap_cmd_rej_unk rej;
5750 
5751 			BT_ERR("Wrong link type (%d)", err);
5752 
5753 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5754 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5755 				       sizeof(rej), &rej);
5756 		}
5757 
5758 		data += cmd_len;
5759 		len  -= cmd_len;
5760 	}
5761 
5762 drop:
5763 	kfree_skb(skb);
5764 }
5765 
5766 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5767 {
5768 	u16 our_fcs, rcv_fcs;
5769 	int hdr_size;
5770 
5771 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5772 		hdr_size = L2CAP_EXT_HDR_SIZE;
5773 	else
5774 		hdr_size = L2CAP_ENH_HDR_SIZE;
5775 
5776 	if (chan->fcs == L2CAP_FCS_CRC16) {
5777 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5778 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5779 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5780 
5781 		if (our_fcs != rcv_fcs)
5782 			return -EBADMSG;
5783 	}
5784 	return 0;
5785 }
5786 
5787 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5788 {
5789 	struct l2cap_ctrl control;
5790 
5791 	BT_DBG("chan %p", chan);
5792 
5793 	memset(&control, 0, sizeof(control));
5794 	control.sframe = 1;
5795 	control.final = 1;
5796 	control.reqseq = chan->buffer_seq;
5797 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5798 
5799 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5800 		control.super = L2CAP_SUPER_RNR;
5801 		l2cap_send_sframe(chan, &control);
5802 	}
5803 
5804 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5805 	    chan->unacked_frames > 0)
5806 		__set_retrans_timer(chan);
5807 
5808 	/* Send pending iframes */
5809 	l2cap_ertm_send(chan);
5810 
5811 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5812 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5813 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5814 		 * send it now.
5815 		 */
5816 		control.super = L2CAP_SUPER_RR;
5817 		l2cap_send_sframe(chan, &control);
5818 	}
5819 }
5820 
5821 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5822 			    struct sk_buff **last_frag)
5823 {
5824 	/* skb->len reflects data in skb as well as all fragments
5825 	 * skb->data_len reflects only data in fragments
5826 	 */
5827 	if (!skb_has_frag_list(skb))
5828 		skb_shinfo(skb)->frag_list = new_frag;
5829 
5830 	new_frag->next = NULL;
5831 
5832 	(*last_frag)->next = new_frag;
5833 	*last_frag = new_frag;
5834 
5835 	skb->len += new_frag->len;
5836 	skb->data_len += new_frag->len;
5837 	skb->truesize += new_frag->truesize;
5838 }
5839 
5840 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5841 				struct l2cap_ctrl *control)
5842 {
5843 	int err = -EINVAL;
5844 
5845 	switch (control->sar) {
5846 	case L2CAP_SAR_UNSEGMENTED:
5847 		if (chan->sdu)
5848 			break;
5849 
5850 		err = chan->ops->recv(chan, skb);
5851 		break;
5852 
5853 	case L2CAP_SAR_START:
5854 		if (chan->sdu)
5855 			break;
5856 
5857 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5858 			break;
5859 
5860 		chan->sdu_len = get_unaligned_le16(skb->data);
5861 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5862 
5863 		if (chan->sdu_len > chan->imtu) {
5864 			err = -EMSGSIZE;
5865 			break;
5866 		}
5867 
5868 		if (skb->len >= chan->sdu_len)
5869 			break;
5870 
5871 		chan->sdu = skb;
5872 		chan->sdu_last_frag = skb;
5873 
5874 		skb = NULL;
5875 		err = 0;
5876 		break;
5877 
5878 	case L2CAP_SAR_CONTINUE:
5879 		if (!chan->sdu)
5880 			break;
5881 
5882 		append_skb_frag(chan->sdu, skb,
5883 				&chan->sdu_last_frag);
5884 		skb = NULL;
5885 
5886 		if (chan->sdu->len >= chan->sdu_len)
5887 			break;
5888 
5889 		err = 0;
5890 		break;
5891 
5892 	case L2CAP_SAR_END:
5893 		if (!chan->sdu)
5894 			break;
5895 
5896 		append_skb_frag(chan->sdu, skb,
5897 				&chan->sdu_last_frag);
5898 		skb = NULL;
5899 
5900 		if (chan->sdu->len != chan->sdu_len)
5901 			break;
5902 
5903 		err = chan->ops->recv(chan, chan->sdu);
5904 
5905 		if (!err) {
5906 			/* Reassembly complete */
5907 			chan->sdu = NULL;
5908 			chan->sdu_last_frag = NULL;
5909 			chan->sdu_len = 0;
5910 		}
5911 		break;
5912 	}
5913 
5914 	if (err) {
5915 		kfree_skb(skb);
5916 		kfree_skb(chan->sdu);
5917 		chan->sdu = NULL;
5918 		chan->sdu_last_frag = NULL;
5919 		chan->sdu_len = 0;
5920 	}
5921 
5922 	return err;
5923 }
5924 
5925 static int l2cap_resegment(struct l2cap_chan *chan)
5926 {
5927 	/* Placeholder */
5928 	return 0;
5929 }
5930 
5931 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5932 {
5933 	u8 event;
5934 
5935 	if (chan->mode != L2CAP_MODE_ERTM)
5936 		return;
5937 
5938 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5939 	l2cap_tx(chan, NULL, NULL, event);
5940 }
5941 
5942 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5943 {
5944 	int err = 0;
5945 	/* Pass sequential frames to l2cap_reassemble_sdu()
5946 	 * until a gap is encountered.
5947 	 */
5948 
5949 	BT_DBG("chan %p", chan);
5950 
5951 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5952 		struct sk_buff *skb;
5953 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5954 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5955 
5956 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5957 
5958 		if (!skb)
5959 			break;
5960 
5961 		skb_unlink(skb, &chan->srej_q);
5962 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5963 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5964 		if (err)
5965 			break;
5966 	}
5967 
5968 	if (skb_queue_empty(&chan->srej_q)) {
5969 		chan->rx_state = L2CAP_RX_STATE_RECV;
5970 		l2cap_send_ack(chan);
5971 	}
5972 
5973 	return err;
5974 }
5975 
5976 static void l2cap_handle_srej(struct l2cap_chan *chan,
5977 			      struct l2cap_ctrl *control)
5978 {
5979 	struct sk_buff *skb;
5980 
5981 	BT_DBG("chan %p, control %p", chan, control);
5982 
5983 	if (control->reqseq == chan->next_tx_seq) {
5984 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5985 		l2cap_send_disconn_req(chan, ECONNRESET);
5986 		return;
5987 	}
5988 
5989 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5990 
5991 	if (skb == NULL) {
5992 		BT_DBG("Seq %d not available for retransmission",
5993 		       control->reqseq);
5994 		return;
5995 	}
5996 
5997 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5998 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5999 		l2cap_send_disconn_req(chan, ECONNRESET);
6000 		return;
6001 	}
6002 
6003 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6004 
6005 	if (control->poll) {
6006 		l2cap_pass_to_tx(chan, control);
6007 
6008 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6009 		l2cap_retransmit(chan, control);
6010 		l2cap_ertm_send(chan);
6011 
6012 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6013 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6014 			chan->srej_save_reqseq = control->reqseq;
6015 		}
6016 	} else {
6017 		l2cap_pass_to_tx_fbit(chan, control);
6018 
6019 		if (control->final) {
6020 			if (chan->srej_save_reqseq != control->reqseq ||
6021 			    !test_and_clear_bit(CONN_SREJ_ACT,
6022 						&chan->conn_state))
6023 				l2cap_retransmit(chan, control);
6024 		} else {
6025 			l2cap_retransmit(chan, control);
6026 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6027 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6028 				chan->srej_save_reqseq = control->reqseq;
6029 			}
6030 		}
6031 	}
6032 }
6033 
6034 static void l2cap_handle_rej(struct l2cap_chan *chan,
6035 			     struct l2cap_ctrl *control)
6036 {
6037 	struct sk_buff *skb;
6038 
6039 	BT_DBG("chan %p, control %p", chan, control);
6040 
6041 	if (control->reqseq == chan->next_tx_seq) {
6042 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6043 		l2cap_send_disconn_req(chan, ECONNRESET);
6044 		return;
6045 	}
6046 
6047 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6048 
6049 	if (chan->max_tx && skb &&
6050 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6051 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6052 		l2cap_send_disconn_req(chan, ECONNRESET);
6053 		return;
6054 	}
6055 
6056 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6057 
6058 	l2cap_pass_to_tx(chan, control);
6059 
6060 	if (control->final) {
6061 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6062 			l2cap_retransmit_all(chan, control);
6063 	} else {
6064 		l2cap_retransmit_all(chan, control);
6065 		l2cap_ertm_send(chan);
6066 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6067 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6068 	}
6069 }
6070 
6071 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6072 {
6073 	BT_DBG("chan %p, txseq %d", chan, txseq);
6074 
6075 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6076 	       chan->expected_tx_seq);
6077 
6078 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6079 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6080 		    chan->tx_win) {
6081 			/* See notes below regarding "double poll" and
6082 			 * invalid packets.
6083 			 */
6084 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6085 				BT_DBG("Invalid/Ignore - after SREJ");
6086 				return L2CAP_TXSEQ_INVALID_IGNORE;
6087 			} else {
6088 				BT_DBG("Invalid - in window after SREJ sent");
6089 				return L2CAP_TXSEQ_INVALID;
6090 			}
6091 		}
6092 
6093 		if (chan->srej_list.head == txseq) {
6094 			BT_DBG("Expected SREJ");
6095 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6096 		}
6097 
6098 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6099 			BT_DBG("Duplicate SREJ - txseq already stored");
6100 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6101 		}
6102 
6103 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6104 			BT_DBG("Unexpected SREJ - not requested");
6105 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6106 		}
6107 	}
6108 
6109 	if (chan->expected_tx_seq == txseq) {
6110 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6111 		    chan->tx_win) {
6112 			BT_DBG("Invalid - txseq outside tx window");
6113 			return L2CAP_TXSEQ_INVALID;
6114 		} else {
6115 			BT_DBG("Expected");
6116 			return L2CAP_TXSEQ_EXPECTED;
6117 		}
6118 	}
6119 
6120 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6121 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6122 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6123 		return L2CAP_TXSEQ_DUPLICATE;
6124 	}
6125 
6126 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6127 		/* A source of invalid packets is a "double poll" condition,
6128 		 * where delays cause us to send multiple poll packets.  If
6129 		 * the remote stack receives and processes both polls,
6130 		 * sequence numbers can wrap around in such a way that a
6131 		 * resent frame has a sequence number that looks like new data
6132 		 * with a sequence gap.  This would trigger an erroneous SREJ
6133 		 * request.
6134 		 *
6135 		 * Fortunately, this is impossible with a tx window that's
6136 		 * less than half of the maximum sequence number, which allows
6137 		 * invalid frames to be safely ignored.
6138 		 *
6139 		 * With tx window sizes greater than half of the tx window
6140 		 * maximum, the frame is invalid and cannot be ignored.  This
6141 		 * causes a disconnect.
6142 		 */
6143 
6144 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6145 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6146 			return L2CAP_TXSEQ_INVALID_IGNORE;
6147 		} else {
6148 			BT_DBG("Invalid - txseq outside tx window");
6149 			return L2CAP_TXSEQ_INVALID;
6150 		}
6151 	} else {
6152 		BT_DBG("Unexpected - txseq indicates missing frames");
6153 		return L2CAP_TXSEQ_UNEXPECTED;
6154 	}
6155 }
6156 
6157 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6158 			       struct l2cap_ctrl *control,
6159 			       struct sk_buff *skb, u8 event)
6160 {
6161 	int err = 0;
6162 	bool skb_in_use = false;
6163 
6164 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6165 	       event);
6166 
6167 	switch (event) {
6168 	case L2CAP_EV_RECV_IFRAME:
6169 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6170 		case L2CAP_TXSEQ_EXPECTED:
6171 			l2cap_pass_to_tx(chan, control);
6172 
6173 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6174 				BT_DBG("Busy, discarding expected seq %d",
6175 				       control->txseq);
6176 				break;
6177 			}
6178 
6179 			chan->expected_tx_seq = __next_seq(chan,
6180 							   control->txseq);
6181 
6182 			chan->buffer_seq = chan->expected_tx_seq;
6183 			skb_in_use = true;
6184 
6185 			err = l2cap_reassemble_sdu(chan, skb, control);
6186 			if (err)
6187 				break;
6188 
6189 			if (control->final) {
6190 				if (!test_and_clear_bit(CONN_REJ_ACT,
6191 							&chan->conn_state)) {
6192 					control->final = 0;
6193 					l2cap_retransmit_all(chan, control);
6194 					l2cap_ertm_send(chan);
6195 				}
6196 			}
6197 
6198 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6199 				l2cap_send_ack(chan);
6200 			break;
6201 		case L2CAP_TXSEQ_UNEXPECTED:
6202 			l2cap_pass_to_tx(chan, control);
6203 
6204 			/* Can't issue SREJ frames in the local busy state.
6205 			 * Drop this frame, it will be seen as missing
6206 			 * when local busy is exited.
6207 			 */
6208 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6209 				BT_DBG("Busy, discarding unexpected seq %d",
6210 				       control->txseq);
6211 				break;
6212 			}
6213 
6214 			/* There was a gap in the sequence, so an SREJ
6215 			 * must be sent for each missing frame.  The
6216 			 * current frame is stored for later use.
6217 			 */
6218 			skb_queue_tail(&chan->srej_q, skb);
6219 			skb_in_use = true;
6220 			BT_DBG("Queued %p (queue len %d)", skb,
6221 			       skb_queue_len(&chan->srej_q));
6222 
6223 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6224 			l2cap_seq_list_clear(&chan->srej_list);
6225 			l2cap_send_srej(chan, control->txseq);
6226 
6227 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6228 			break;
6229 		case L2CAP_TXSEQ_DUPLICATE:
6230 			l2cap_pass_to_tx(chan, control);
6231 			break;
6232 		case L2CAP_TXSEQ_INVALID_IGNORE:
6233 			break;
6234 		case L2CAP_TXSEQ_INVALID:
6235 		default:
6236 			l2cap_send_disconn_req(chan, ECONNRESET);
6237 			break;
6238 		}
6239 		break;
6240 	case L2CAP_EV_RECV_RR:
6241 		l2cap_pass_to_tx(chan, control);
6242 		if (control->final) {
6243 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6244 
6245 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6246 			    !__chan_is_moving(chan)) {
6247 				control->final = 0;
6248 				l2cap_retransmit_all(chan, control);
6249 			}
6250 
6251 			l2cap_ertm_send(chan);
6252 		} else if (control->poll) {
6253 			l2cap_send_i_or_rr_or_rnr(chan);
6254 		} else {
6255 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6256 					       &chan->conn_state) &&
6257 			    chan->unacked_frames)
6258 				__set_retrans_timer(chan);
6259 
6260 			l2cap_ertm_send(chan);
6261 		}
6262 		break;
6263 	case L2CAP_EV_RECV_RNR:
6264 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6265 		l2cap_pass_to_tx(chan, control);
6266 		if (control && control->poll) {
6267 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6268 			l2cap_send_rr_or_rnr(chan, 0);
6269 		}
6270 		__clear_retrans_timer(chan);
6271 		l2cap_seq_list_clear(&chan->retrans_list);
6272 		break;
6273 	case L2CAP_EV_RECV_REJ:
6274 		l2cap_handle_rej(chan, control);
6275 		break;
6276 	case L2CAP_EV_RECV_SREJ:
6277 		l2cap_handle_srej(chan, control);
6278 		break;
6279 	default:
6280 		break;
6281 	}
6282 
6283 	if (skb && !skb_in_use) {
6284 		BT_DBG("Freeing %p", skb);
6285 		kfree_skb(skb);
6286 	}
6287 
6288 	return err;
6289 }
6290 
6291 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6292 				    struct l2cap_ctrl *control,
6293 				    struct sk_buff *skb, u8 event)
6294 {
6295 	int err = 0;
6296 	u16 txseq = control->txseq;
6297 	bool skb_in_use = false;
6298 
6299 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6300 	       event);
6301 
6302 	switch (event) {
6303 	case L2CAP_EV_RECV_IFRAME:
6304 		switch (l2cap_classify_txseq(chan, txseq)) {
6305 		case L2CAP_TXSEQ_EXPECTED:
6306 			/* Keep frame for reassembly later */
6307 			l2cap_pass_to_tx(chan, control);
6308 			skb_queue_tail(&chan->srej_q, skb);
6309 			skb_in_use = true;
6310 			BT_DBG("Queued %p (queue len %d)", skb,
6311 			       skb_queue_len(&chan->srej_q));
6312 
6313 			chan->expected_tx_seq = __next_seq(chan, txseq);
6314 			break;
6315 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6316 			l2cap_seq_list_pop(&chan->srej_list);
6317 
6318 			l2cap_pass_to_tx(chan, control);
6319 			skb_queue_tail(&chan->srej_q, skb);
6320 			skb_in_use = true;
6321 			BT_DBG("Queued %p (queue len %d)", skb,
6322 			       skb_queue_len(&chan->srej_q));
6323 
6324 			err = l2cap_rx_queued_iframes(chan);
6325 			if (err)
6326 				break;
6327 
6328 			break;
6329 		case L2CAP_TXSEQ_UNEXPECTED:
6330 			/* Got a frame that can't be reassembled yet.
6331 			 * Save it for later, and send SREJs to cover
6332 			 * the missing frames.
6333 			 */
6334 			skb_queue_tail(&chan->srej_q, skb);
6335 			skb_in_use = true;
6336 			BT_DBG("Queued %p (queue len %d)", skb,
6337 			       skb_queue_len(&chan->srej_q));
6338 
6339 			l2cap_pass_to_tx(chan, control);
6340 			l2cap_send_srej(chan, control->txseq);
6341 			break;
6342 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6343 			/* This frame was requested with an SREJ, but
6344 			 * some expected retransmitted frames are
6345 			 * missing.  Request retransmission of missing
6346 			 * SREJ'd frames.
6347 			 */
6348 			skb_queue_tail(&chan->srej_q, skb);
6349 			skb_in_use = true;
6350 			BT_DBG("Queued %p (queue len %d)", skb,
6351 			       skb_queue_len(&chan->srej_q));
6352 
6353 			l2cap_pass_to_tx(chan, control);
6354 			l2cap_send_srej_list(chan, control->txseq);
6355 			break;
6356 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6357 			/* We've already queued this frame.  Drop this copy. */
6358 			l2cap_pass_to_tx(chan, control);
6359 			break;
6360 		case L2CAP_TXSEQ_DUPLICATE:
6361 			/* Expecting a later sequence number, so this frame
6362 			 * was already received.  Ignore it completely.
6363 			 */
6364 			break;
6365 		case L2CAP_TXSEQ_INVALID_IGNORE:
6366 			break;
6367 		case L2CAP_TXSEQ_INVALID:
6368 		default:
6369 			l2cap_send_disconn_req(chan, ECONNRESET);
6370 			break;
6371 		}
6372 		break;
6373 	case L2CAP_EV_RECV_RR:
6374 		l2cap_pass_to_tx(chan, control);
6375 		if (control->final) {
6376 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6377 
6378 			if (!test_and_clear_bit(CONN_REJ_ACT,
6379 						&chan->conn_state)) {
6380 				control->final = 0;
6381 				l2cap_retransmit_all(chan, control);
6382 			}
6383 
6384 			l2cap_ertm_send(chan);
6385 		} else if (control->poll) {
6386 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6387 					       &chan->conn_state) &&
6388 			    chan->unacked_frames) {
6389 				__set_retrans_timer(chan);
6390 			}
6391 
6392 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6393 			l2cap_send_srej_tail(chan);
6394 		} else {
6395 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6396 					       &chan->conn_state) &&
6397 			    chan->unacked_frames)
6398 				__set_retrans_timer(chan);
6399 
6400 			l2cap_send_ack(chan);
6401 		}
6402 		break;
6403 	case L2CAP_EV_RECV_RNR:
6404 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6405 		l2cap_pass_to_tx(chan, control);
6406 		if (control->poll) {
6407 			l2cap_send_srej_tail(chan);
6408 		} else {
6409 			struct l2cap_ctrl rr_control;
6410 			memset(&rr_control, 0, sizeof(rr_control));
6411 			rr_control.sframe = 1;
6412 			rr_control.super = L2CAP_SUPER_RR;
6413 			rr_control.reqseq = chan->buffer_seq;
6414 			l2cap_send_sframe(chan, &rr_control);
6415 		}
6416 
6417 		break;
6418 	case L2CAP_EV_RECV_REJ:
6419 		l2cap_handle_rej(chan, control);
6420 		break;
6421 	case L2CAP_EV_RECV_SREJ:
6422 		l2cap_handle_srej(chan, control);
6423 		break;
6424 	}
6425 
6426 	if (skb && !skb_in_use) {
6427 		BT_DBG("Freeing %p", skb);
6428 		kfree_skb(skb);
6429 	}
6430 
6431 	return err;
6432 }
6433 
6434 static int l2cap_finish_move(struct l2cap_chan *chan)
6435 {
6436 	BT_DBG("chan %p", chan);
6437 
6438 	chan->rx_state = L2CAP_RX_STATE_RECV;
6439 
6440 	if (chan->hs_hcon)
6441 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6442 	else
6443 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6444 
6445 	return l2cap_resegment(chan);
6446 }
6447 
6448 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6449 				 struct l2cap_ctrl *control,
6450 				 struct sk_buff *skb, u8 event)
6451 {
6452 	int err;
6453 
6454 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6455 	       event);
6456 
6457 	if (!control->poll)
6458 		return -EPROTO;
6459 
6460 	l2cap_process_reqseq(chan, control->reqseq);
6461 
6462 	if (!skb_queue_empty(&chan->tx_q))
6463 		chan->tx_send_head = skb_peek(&chan->tx_q);
6464 	else
6465 		chan->tx_send_head = NULL;
6466 
6467 	/* Rewind next_tx_seq to the point expected
6468 	 * by the receiver.
6469 	 */
6470 	chan->next_tx_seq = control->reqseq;
6471 	chan->unacked_frames = 0;
6472 
6473 	err = l2cap_finish_move(chan);
6474 	if (err)
6475 		return err;
6476 
6477 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6478 	l2cap_send_i_or_rr_or_rnr(chan);
6479 
6480 	if (event == L2CAP_EV_RECV_IFRAME)
6481 		return -EPROTO;
6482 
6483 	return l2cap_rx_state_recv(chan, control, NULL, event);
6484 }
6485 
6486 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6487 				 struct l2cap_ctrl *control,
6488 				 struct sk_buff *skb, u8 event)
6489 {
6490 	int err;
6491 
6492 	if (!control->final)
6493 		return -EPROTO;
6494 
6495 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6496 
6497 	chan->rx_state = L2CAP_RX_STATE_RECV;
6498 	l2cap_process_reqseq(chan, control->reqseq);
6499 
6500 	if (!skb_queue_empty(&chan->tx_q))
6501 		chan->tx_send_head = skb_peek(&chan->tx_q);
6502 	else
6503 		chan->tx_send_head = NULL;
6504 
6505 	/* Rewind next_tx_seq to the point expected
6506 	 * by the receiver.
6507 	 */
6508 	chan->next_tx_seq = control->reqseq;
6509 	chan->unacked_frames = 0;
6510 
6511 	if (chan->hs_hcon)
6512 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6513 	else
6514 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6515 
6516 	err = l2cap_resegment(chan);
6517 
6518 	if (!err)
6519 		err = l2cap_rx_state_recv(chan, control, skb, event);
6520 
6521 	return err;
6522 }
6523 
6524 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6525 {
6526 	/* Make sure reqseq is for a packet that has been sent but not acked */
6527 	u16 unacked;
6528 
6529 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6530 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6531 }
6532 
6533 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6534 		    struct sk_buff *skb, u8 event)
6535 {
6536 	int err = 0;
6537 
6538 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6539 	       control, skb, event, chan->rx_state);
6540 
6541 	if (__valid_reqseq(chan, control->reqseq)) {
6542 		switch (chan->rx_state) {
6543 		case L2CAP_RX_STATE_RECV:
6544 			err = l2cap_rx_state_recv(chan, control, skb, event);
6545 			break;
6546 		case L2CAP_RX_STATE_SREJ_SENT:
6547 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6548 						       event);
6549 			break;
6550 		case L2CAP_RX_STATE_WAIT_P:
6551 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6552 			break;
6553 		case L2CAP_RX_STATE_WAIT_F:
6554 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6555 			break;
6556 		default:
6557 			/* shut it down */
6558 			break;
6559 		}
6560 	} else {
6561 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6562 		       control->reqseq, chan->next_tx_seq,
6563 		       chan->expected_ack_seq);
6564 		l2cap_send_disconn_req(chan, ECONNRESET);
6565 	}
6566 
6567 	return err;
6568 }
6569 
6570 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6571 			   struct sk_buff *skb)
6572 {
6573 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6574 	       chan->rx_state);
6575 
6576 	if (l2cap_classify_txseq(chan, control->txseq) ==
6577 	    L2CAP_TXSEQ_EXPECTED) {
6578 		l2cap_pass_to_tx(chan, control);
6579 
6580 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6581 		       __next_seq(chan, chan->buffer_seq));
6582 
6583 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6584 
6585 		l2cap_reassemble_sdu(chan, skb, control);
6586 	} else {
6587 		if (chan->sdu) {
6588 			kfree_skb(chan->sdu);
6589 			chan->sdu = NULL;
6590 		}
6591 		chan->sdu_last_frag = NULL;
6592 		chan->sdu_len = 0;
6593 
6594 		if (skb) {
6595 			BT_DBG("Freeing %p", skb);
6596 			kfree_skb(skb);
6597 		}
6598 	}
6599 
6600 	chan->last_acked_seq = control->txseq;
6601 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6602 
6603 	return 0;
6604 }
6605 
6606 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6607 {
6608 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6609 	u16 len;
6610 	u8 event;
6611 
6612 	__unpack_control(chan, skb);
6613 
6614 	len = skb->len;
6615 
6616 	/*
6617 	 * We can just drop the corrupted I-frame here.
6618 	 * Receiver will miss it and start proper recovery
6619 	 * procedures and ask for retransmission.
6620 	 */
6621 	if (l2cap_check_fcs(chan, skb))
6622 		goto drop;
6623 
6624 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6625 		len -= L2CAP_SDULEN_SIZE;
6626 
6627 	if (chan->fcs == L2CAP_FCS_CRC16)
6628 		len -= L2CAP_FCS_SIZE;
6629 
6630 	if (len > chan->mps) {
6631 		l2cap_send_disconn_req(chan, ECONNRESET);
6632 		goto drop;
6633 	}
6634 
6635 	if ((chan->mode == L2CAP_MODE_ERTM ||
6636 	     chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
6637 		goto drop;
6638 
6639 	if (!control->sframe) {
6640 		int err;
6641 
6642 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6643 		       control->sar, control->reqseq, control->final,
6644 		       control->txseq);
6645 
6646 		/* Validate F-bit - F=0 always valid, F=1 only
6647 		 * valid in TX WAIT_F
6648 		 */
6649 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6650 			goto drop;
6651 
6652 		if (chan->mode != L2CAP_MODE_STREAMING) {
6653 			event = L2CAP_EV_RECV_IFRAME;
6654 			err = l2cap_rx(chan, control, skb, event);
6655 		} else {
6656 			err = l2cap_stream_rx(chan, control, skb);
6657 		}
6658 
6659 		if (err)
6660 			l2cap_send_disconn_req(chan, ECONNRESET);
6661 	} else {
6662 		const u8 rx_func_to_event[4] = {
6663 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6664 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6665 		};
6666 
6667 		/* Only I-frames are expected in streaming mode */
6668 		if (chan->mode == L2CAP_MODE_STREAMING)
6669 			goto drop;
6670 
6671 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6672 		       control->reqseq, control->final, control->poll,
6673 		       control->super);
6674 
6675 		if (len != 0) {
6676 			BT_ERR("Trailing bytes: %d in sframe", len);
6677 			l2cap_send_disconn_req(chan, ECONNRESET);
6678 			goto drop;
6679 		}
6680 
6681 		/* Validate F and P bits */
6682 		if (control->final && (control->poll ||
6683 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6684 			goto drop;
6685 
6686 		event = rx_func_to_event[control->super];
6687 		if (l2cap_rx(chan, control, skb, event))
6688 			l2cap_send_disconn_req(chan, ECONNRESET);
6689 	}
6690 
6691 	return 0;
6692 
6693 drop:
6694 	kfree_skb(skb);
6695 	return 0;
6696 }
6697 
6698 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6699 {
6700 	struct l2cap_conn *conn = chan->conn;
6701 	struct l2cap_le_credits pkt;
6702 	u16 return_credits;
6703 
6704 	return_credits = ((chan->imtu / chan->mps) + 1) - chan->rx_credits;
6705 
6706 	if (!return_credits)
6707 		return;
6708 
6709 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6710 
6711 	chan->rx_credits += return_credits;
6712 
6713 	pkt.cid     = cpu_to_le16(chan->scid);
6714 	pkt.credits = cpu_to_le16(return_credits);
6715 
6716 	chan->ident = l2cap_get_ident(conn);
6717 
6718 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6719 }
6720 
6721 static int l2cap_le_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6722 {
6723 	int err;
6724 
6725 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6726 
6727 	/* Wait recv to confirm reception before updating the credits */
6728 	err = chan->ops->recv(chan, skb);
6729 
6730 	/* Update credits whenever an SDU is received */
6731 	l2cap_chan_le_send_credits(chan);
6732 
6733 	return err;
6734 }
6735 
6736 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6737 {
6738 	int err;
6739 
6740 	if (!chan->rx_credits) {
6741 		BT_ERR("No credits to receive LE L2CAP data");
6742 		l2cap_send_disconn_req(chan, ECONNRESET);
6743 		return -ENOBUFS;
6744 	}
6745 
6746 	if (chan->imtu < skb->len) {
6747 		BT_ERR("Too big LE L2CAP PDU");
6748 		return -ENOBUFS;
6749 	}
6750 
6751 	chan->rx_credits--;
6752 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6753 
6754 	/* Update if remote had run out of credits, this should only happens
6755 	 * if the remote is not using the entire MPS.
6756 	 */
6757 	if (!chan->rx_credits)
6758 		l2cap_chan_le_send_credits(chan);
6759 
6760 	err = 0;
6761 
6762 	if (!chan->sdu) {
6763 		u16 sdu_len;
6764 
6765 		sdu_len = get_unaligned_le16(skb->data);
6766 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6767 
6768 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6769 		       sdu_len, skb->len, chan->imtu);
6770 
6771 		if (sdu_len > chan->imtu) {
6772 			BT_ERR("Too big LE L2CAP SDU length received");
6773 			err = -EMSGSIZE;
6774 			goto failed;
6775 		}
6776 
6777 		if (skb->len > sdu_len) {
6778 			BT_ERR("Too much LE L2CAP data received");
6779 			err = -EINVAL;
6780 			goto failed;
6781 		}
6782 
6783 		if (skb->len == sdu_len)
6784 			return l2cap_le_recv(chan, skb);
6785 
6786 		chan->sdu = skb;
6787 		chan->sdu_len = sdu_len;
6788 		chan->sdu_last_frag = skb;
6789 
6790 		/* Detect if remote is not able to use the selected MPS */
6791 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6792 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6793 
6794 			/* Adjust the number of credits */
6795 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6796 			chan->mps = mps_len;
6797 			l2cap_chan_le_send_credits(chan);
6798 		}
6799 
6800 		return 0;
6801 	}
6802 
6803 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6804 	       chan->sdu->len, skb->len, chan->sdu_len);
6805 
6806 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6807 		BT_ERR("Too much LE L2CAP data received");
6808 		err = -EINVAL;
6809 		goto failed;
6810 	}
6811 
6812 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6813 	skb = NULL;
6814 
6815 	if (chan->sdu->len == chan->sdu_len) {
6816 		err = l2cap_le_recv(chan, chan->sdu);
6817 		if (!err) {
6818 			chan->sdu = NULL;
6819 			chan->sdu_last_frag = NULL;
6820 			chan->sdu_len = 0;
6821 		}
6822 	}
6823 
6824 failed:
6825 	if (err) {
6826 		kfree_skb(skb);
6827 		kfree_skb(chan->sdu);
6828 		chan->sdu = NULL;
6829 		chan->sdu_last_frag = NULL;
6830 		chan->sdu_len = 0;
6831 	}
6832 
6833 	/* We can't return an error here since we took care of the skb
6834 	 * freeing internally. An error return would cause the caller to
6835 	 * do a double-free of the skb.
6836 	 */
6837 	return 0;
6838 }
6839 
6840 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6841 			       struct sk_buff *skb)
6842 {
6843 	struct l2cap_chan *chan;
6844 
6845 	chan = l2cap_get_chan_by_scid(conn, cid);
6846 	if (!chan) {
6847 		if (cid == L2CAP_CID_A2MP) {
6848 			chan = a2mp_channel_create(conn, skb);
6849 			if (!chan) {
6850 				kfree_skb(skb);
6851 				return;
6852 			}
6853 
6854 			l2cap_chan_lock(chan);
6855 		} else {
6856 			BT_DBG("unknown cid 0x%4.4x", cid);
6857 			/* Drop packet and return */
6858 			kfree_skb(skb);
6859 			return;
6860 		}
6861 	}
6862 
6863 	BT_DBG("chan %p, len %d", chan, skb->len);
6864 
6865 	/* If we receive data on a fixed channel before the info req/rsp
6866 	 * procdure is done simply assume that the channel is supported
6867 	 * and mark it as ready.
6868 	 */
6869 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6870 		l2cap_chan_ready(chan);
6871 
6872 	if (chan->state != BT_CONNECTED)
6873 		goto drop;
6874 
6875 	switch (chan->mode) {
6876 	case L2CAP_MODE_LE_FLOWCTL:
6877 		if (l2cap_le_data_rcv(chan, skb) < 0)
6878 			goto drop;
6879 
6880 		goto done;
6881 
6882 	case L2CAP_MODE_BASIC:
6883 		/* If socket recv buffers overflows we drop data here
6884 		 * which is *bad* because L2CAP has to be reliable.
6885 		 * But we don't have any other choice. L2CAP doesn't
6886 		 * provide flow control mechanism. */
6887 
6888 		if (chan->imtu < skb->len) {
6889 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6890 			goto drop;
6891 		}
6892 
6893 		if (!chan->ops->recv(chan, skb))
6894 			goto done;
6895 		break;
6896 
6897 	case L2CAP_MODE_ERTM:
6898 	case L2CAP_MODE_STREAMING:
6899 		l2cap_data_rcv(chan, skb);
6900 		goto done;
6901 
6902 	default:
6903 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6904 		break;
6905 	}
6906 
6907 drop:
6908 	kfree_skb(skb);
6909 
6910 done:
6911 	l2cap_chan_unlock(chan);
6912 }
6913 
6914 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6915 				  struct sk_buff *skb)
6916 {
6917 	struct hci_conn *hcon = conn->hcon;
6918 	struct l2cap_chan *chan;
6919 
6920 	if (hcon->type != ACL_LINK)
6921 		goto free_skb;
6922 
6923 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6924 					ACL_LINK);
6925 	if (!chan)
6926 		goto free_skb;
6927 
6928 	BT_DBG("chan %p, len %d", chan, skb->len);
6929 
6930 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6931 		goto drop;
6932 
6933 	if (chan->imtu < skb->len)
6934 		goto drop;
6935 
6936 	/* Store remote BD_ADDR and PSM for msg_name */
6937 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6938 	bt_cb(skb)->l2cap.psm = psm;
6939 
6940 	if (!chan->ops->recv(chan, skb)) {
6941 		l2cap_chan_put(chan);
6942 		return;
6943 	}
6944 
6945 drop:
6946 	l2cap_chan_put(chan);
6947 free_skb:
6948 	kfree_skb(skb);
6949 }
6950 
6951 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6952 {
6953 	struct l2cap_hdr *lh = (void *) skb->data;
6954 	struct hci_conn *hcon = conn->hcon;
6955 	u16 cid, len;
6956 	__le16 psm;
6957 
6958 	if (hcon->state != BT_CONNECTED) {
6959 		BT_DBG("queueing pending rx skb");
6960 		skb_queue_tail(&conn->pending_rx, skb);
6961 		return;
6962 	}
6963 
6964 	skb_pull(skb, L2CAP_HDR_SIZE);
6965 	cid = __le16_to_cpu(lh->cid);
6966 	len = __le16_to_cpu(lh->len);
6967 
6968 	if (len != skb->len) {
6969 		kfree_skb(skb);
6970 		return;
6971 	}
6972 
6973 	/* Since we can't actively block incoming LE connections we must
6974 	 * at least ensure that we ignore incoming data from them.
6975 	 */
6976 	if (hcon->type == LE_LINK &&
6977 	    hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6978 				   bdaddr_dst_type(hcon))) {
6979 		kfree_skb(skb);
6980 		return;
6981 	}
6982 
6983 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6984 
6985 	switch (cid) {
6986 	case L2CAP_CID_SIGNALING:
6987 		l2cap_sig_channel(conn, skb);
6988 		break;
6989 
6990 	case L2CAP_CID_CONN_LESS:
6991 		psm = get_unaligned((__le16 *) skb->data);
6992 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6993 		l2cap_conless_channel(conn, psm, skb);
6994 		break;
6995 
6996 	case L2CAP_CID_LE_SIGNALING:
6997 		l2cap_le_sig_channel(conn, skb);
6998 		break;
6999 
7000 	default:
7001 		l2cap_data_channel(conn, cid, skb);
7002 		break;
7003 	}
7004 }
7005 
7006 static void process_pending_rx(struct work_struct *work)
7007 {
7008 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7009 					       pending_rx_work);
7010 	struct sk_buff *skb;
7011 
7012 	BT_DBG("");
7013 
7014 	while ((skb = skb_dequeue(&conn->pending_rx)))
7015 		l2cap_recv_frame(conn, skb);
7016 }
7017 
7018 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7019 {
7020 	struct l2cap_conn *conn = hcon->l2cap_data;
7021 	struct hci_chan *hchan;
7022 
7023 	if (conn)
7024 		return conn;
7025 
7026 	hchan = hci_chan_create(hcon);
7027 	if (!hchan)
7028 		return NULL;
7029 
7030 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7031 	if (!conn) {
7032 		hci_chan_del(hchan);
7033 		return NULL;
7034 	}
7035 
7036 	kref_init(&conn->ref);
7037 	hcon->l2cap_data = conn;
7038 	conn->hcon = hci_conn_get(hcon);
7039 	conn->hchan = hchan;
7040 
7041 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7042 
7043 	switch (hcon->type) {
7044 	case LE_LINK:
7045 		if (hcon->hdev->le_mtu) {
7046 			conn->mtu = hcon->hdev->le_mtu;
7047 			break;
7048 		}
7049 		/* fall through */
7050 	default:
7051 		conn->mtu = hcon->hdev->acl_mtu;
7052 		break;
7053 	}
7054 
7055 	conn->feat_mask = 0;
7056 
7057 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7058 
7059 	if (hcon->type == ACL_LINK &&
7060 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7061 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7062 
7063 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7064 	    (bredr_sc_enabled(hcon->hdev) ||
7065 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7066 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7067 
7068 	mutex_init(&conn->ident_lock);
7069 	mutex_init(&conn->chan_lock);
7070 
7071 	INIT_LIST_HEAD(&conn->chan_l);
7072 	INIT_LIST_HEAD(&conn->users);
7073 
7074 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7075 
7076 	skb_queue_head_init(&conn->pending_rx);
7077 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7078 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7079 
7080 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7081 
7082 	return conn;
7083 }
7084 
7085 static bool is_valid_psm(u16 psm, u8 dst_type) {
7086 	if (!psm)
7087 		return false;
7088 
7089 	if (bdaddr_type_is_le(dst_type))
7090 		return (psm <= 0x00ff);
7091 
7092 	/* PSM must be odd and lsb of upper byte must be 0 */
7093 	return ((psm & 0x0101) == 0x0001);
7094 }
7095 
7096 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7097 		       bdaddr_t *dst, u8 dst_type)
7098 {
7099 	struct l2cap_conn *conn;
7100 	struct hci_conn *hcon;
7101 	struct hci_dev *hdev;
7102 	int err;
7103 
7104 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7105 	       dst_type, __le16_to_cpu(psm));
7106 
7107 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7108 	if (!hdev)
7109 		return -EHOSTUNREACH;
7110 
7111 	hci_dev_lock(hdev);
7112 
7113 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7114 	    chan->chan_type != L2CAP_CHAN_RAW) {
7115 		err = -EINVAL;
7116 		goto done;
7117 	}
7118 
7119 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7120 		err = -EINVAL;
7121 		goto done;
7122 	}
7123 
7124 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7125 		err = -EINVAL;
7126 		goto done;
7127 	}
7128 
7129 	switch (chan->mode) {
7130 	case L2CAP_MODE_BASIC:
7131 		break;
7132 	case L2CAP_MODE_LE_FLOWCTL:
7133 		break;
7134 	case L2CAP_MODE_ERTM:
7135 	case L2CAP_MODE_STREAMING:
7136 		if (!disable_ertm)
7137 			break;
7138 		/* fall through */
7139 	default:
7140 		err = -EOPNOTSUPP;
7141 		goto done;
7142 	}
7143 
7144 	switch (chan->state) {
7145 	case BT_CONNECT:
7146 	case BT_CONNECT2:
7147 	case BT_CONFIG:
7148 		/* Already connecting */
7149 		err = 0;
7150 		goto done;
7151 
7152 	case BT_CONNECTED:
7153 		/* Already connected */
7154 		err = -EISCONN;
7155 		goto done;
7156 
7157 	case BT_OPEN:
7158 	case BT_BOUND:
7159 		/* Can connect */
7160 		break;
7161 
7162 	default:
7163 		err = -EBADFD;
7164 		goto done;
7165 	}
7166 
7167 	/* Set destination address and psm */
7168 	bacpy(&chan->dst, dst);
7169 	chan->dst_type = dst_type;
7170 
7171 	chan->psm = psm;
7172 	chan->dcid = cid;
7173 
7174 	if (bdaddr_type_is_le(dst_type)) {
7175 		/* Convert from L2CAP channel address type to HCI address type
7176 		 */
7177 		if (dst_type == BDADDR_LE_PUBLIC)
7178 			dst_type = ADDR_LE_DEV_PUBLIC;
7179 		else
7180 			dst_type = ADDR_LE_DEV_RANDOM;
7181 
7182 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7183 			hcon = hci_connect_le(hdev, dst, dst_type,
7184 					      chan->sec_level,
7185 					      HCI_LE_CONN_TIMEOUT,
7186 					      HCI_ROLE_SLAVE, NULL);
7187 		else
7188 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7189 						   chan->sec_level,
7190 						   HCI_LE_CONN_TIMEOUT);
7191 
7192 	} else {
7193 		u8 auth_type = l2cap_get_auth_type(chan);
7194 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7195 	}
7196 
7197 	if (IS_ERR(hcon)) {
7198 		err = PTR_ERR(hcon);
7199 		goto done;
7200 	}
7201 
7202 	conn = l2cap_conn_add(hcon);
7203 	if (!conn) {
7204 		hci_conn_drop(hcon);
7205 		err = -ENOMEM;
7206 		goto done;
7207 	}
7208 
7209 	mutex_lock(&conn->chan_lock);
7210 	l2cap_chan_lock(chan);
7211 
7212 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7213 		hci_conn_drop(hcon);
7214 		err = -EBUSY;
7215 		goto chan_unlock;
7216 	}
7217 
7218 	/* Update source addr of the socket */
7219 	bacpy(&chan->src, &hcon->src);
7220 	chan->src_type = bdaddr_src_type(hcon);
7221 
7222 	__l2cap_chan_add(conn, chan);
7223 
7224 	/* l2cap_chan_add takes its own ref so we can drop this one */
7225 	hci_conn_drop(hcon);
7226 
7227 	l2cap_state_change(chan, BT_CONNECT);
7228 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7229 
7230 	/* Release chan->sport so that it can be reused by other
7231 	 * sockets (as it's only used for listening sockets).
7232 	 */
7233 	write_lock(&chan_list_lock);
7234 	chan->sport = 0;
7235 	write_unlock(&chan_list_lock);
7236 
7237 	if (hcon->state == BT_CONNECTED) {
7238 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7239 			__clear_chan_timer(chan);
7240 			if (l2cap_chan_check_security(chan, true))
7241 				l2cap_state_change(chan, BT_CONNECTED);
7242 		} else
7243 			l2cap_do_start(chan);
7244 	}
7245 
7246 	err = 0;
7247 
7248 chan_unlock:
7249 	l2cap_chan_unlock(chan);
7250 	mutex_unlock(&conn->chan_lock);
7251 done:
7252 	hci_dev_unlock(hdev);
7253 	hci_dev_put(hdev);
7254 	return err;
7255 }
7256 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7257 
7258 /* ---- L2CAP interface with lower layer (HCI) ---- */
7259 
7260 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7261 {
7262 	int exact = 0, lm1 = 0, lm2 = 0;
7263 	struct l2cap_chan *c;
7264 
7265 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7266 
7267 	/* Find listening sockets and check their link_mode */
7268 	read_lock(&chan_list_lock);
7269 	list_for_each_entry(c, &chan_list, global_l) {
7270 		if (c->state != BT_LISTEN)
7271 			continue;
7272 
7273 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7274 			lm1 |= HCI_LM_ACCEPT;
7275 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7276 				lm1 |= HCI_LM_MASTER;
7277 			exact++;
7278 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7279 			lm2 |= HCI_LM_ACCEPT;
7280 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7281 				lm2 |= HCI_LM_MASTER;
7282 		}
7283 	}
7284 	read_unlock(&chan_list_lock);
7285 
7286 	return exact ? lm1 : lm2;
7287 }
7288 
7289 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7290  * from an existing channel in the list or from the beginning of the
7291  * global list (by passing NULL as first parameter).
7292  */
7293 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7294 						  struct hci_conn *hcon)
7295 {
7296 	u8 src_type = bdaddr_src_type(hcon);
7297 
7298 	read_lock(&chan_list_lock);
7299 
7300 	if (c)
7301 		c = list_next_entry(c, global_l);
7302 	else
7303 		c = list_entry(chan_list.next, typeof(*c), global_l);
7304 
7305 	list_for_each_entry_from(c, &chan_list, global_l) {
7306 		if (c->chan_type != L2CAP_CHAN_FIXED)
7307 			continue;
7308 		if (c->state != BT_LISTEN)
7309 			continue;
7310 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7311 			continue;
7312 		if (src_type != c->src_type)
7313 			continue;
7314 
7315 		l2cap_chan_hold(c);
7316 		read_unlock(&chan_list_lock);
7317 		return c;
7318 	}
7319 
7320 	read_unlock(&chan_list_lock);
7321 
7322 	return NULL;
7323 }
7324 
7325 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7326 {
7327 	struct hci_dev *hdev = hcon->hdev;
7328 	struct l2cap_conn *conn;
7329 	struct l2cap_chan *pchan;
7330 	u8 dst_type;
7331 
7332 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7333 		return;
7334 
7335 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7336 
7337 	if (status) {
7338 		l2cap_conn_del(hcon, bt_to_errno(status));
7339 		return;
7340 	}
7341 
7342 	conn = l2cap_conn_add(hcon);
7343 	if (!conn)
7344 		return;
7345 
7346 	dst_type = bdaddr_dst_type(hcon);
7347 
7348 	/* If device is blocked, do not create channels for it */
7349 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7350 		return;
7351 
7352 	/* Find fixed channels and notify them of the new connection. We
7353 	 * use multiple individual lookups, continuing each time where
7354 	 * we left off, because the list lock would prevent calling the
7355 	 * potentially sleeping l2cap_chan_lock() function.
7356 	 */
7357 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7358 	while (pchan) {
7359 		struct l2cap_chan *chan, *next;
7360 
7361 		/* Client fixed channels should override server ones */
7362 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7363 			goto next;
7364 
7365 		l2cap_chan_lock(pchan);
7366 		chan = pchan->ops->new_connection(pchan);
7367 		if (chan) {
7368 			bacpy(&chan->src, &hcon->src);
7369 			bacpy(&chan->dst, &hcon->dst);
7370 			chan->src_type = bdaddr_src_type(hcon);
7371 			chan->dst_type = dst_type;
7372 
7373 			__l2cap_chan_add(conn, chan);
7374 		}
7375 
7376 		l2cap_chan_unlock(pchan);
7377 next:
7378 		next = l2cap_global_fixed_chan(pchan, hcon);
7379 		l2cap_chan_put(pchan);
7380 		pchan = next;
7381 	}
7382 
7383 	l2cap_conn_ready(conn);
7384 }
7385 
7386 int l2cap_disconn_ind(struct hci_conn *hcon)
7387 {
7388 	struct l2cap_conn *conn = hcon->l2cap_data;
7389 
7390 	BT_DBG("hcon %p", hcon);
7391 
7392 	if (!conn)
7393 		return HCI_ERROR_REMOTE_USER_TERM;
7394 	return conn->disc_reason;
7395 }
7396 
7397 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7398 {
7399 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7400 		return;
7401 
7402 	BT_DBG("hcon %p reason %d", hcon, reason);
7403 
7404 	l2cap_conn_del(hcon, bt_to_errno(reason));
7405 }
7406 
7407 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7408 {
7409 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7410 		return;
7411 
7412 	if (encrypt == 0x00) {
7413 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7414 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7415 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7416 			   chan->sec_level == BT_SECURITY_FIPS)
7417 			l2cap_chan_close(chan, ECONNREFUSED);
7418 	} else {
7419 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7420 			__clear_chan_timer(chan);
7421 	}
7422 }
7423 
7424 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7425 {
7426 	struct l2cap_conn *conn = hcon->l2cap_data;
7427 	struct l2cap_chan *chan;
7428 
7429 	if (!conn)
7430 		return;
7431 
7432 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7433 
7434 	mutex_lock(&conn->chan_lock);
7435 
7436 	list_for_each_entry(chan, &conn->chan_l, list) {
7437 		l2cap_chan_lock(chan);
7438 
7439 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7440 		       state_to_string(chan->state));
7441 
7442 		if (chan->scid == L2CAP_CID_A2MP) {
7443 			l2cap_chan_unlock(chan);
7444 			continue;
7445 		}
7446 
7447 		if (!status && encrypt)
7448 			chan->sec_level = hcon->sec_level;
7449 
7450 		if (!__l2cap_no_conn_pending(chan)) {
7451 			l2cap_chan_unlock(chan);
7452 			continue;
7453 		}
7454 
7455 		if (!status && (chan->state == BT_CONNECTED ||
7456 				chan->state == BT_CONFIG)) {
7457 			chan->ops->resume(chan);
7458 			l2cap_check_encryption(chan, encrypt);
7459 			l2cap_chan_unlock(chan);
7460 			continue;
7461 		}
7462 
7463 		if (chan->state == BT_CONNECT) {
7464 			if (!status)
7465 				l2cap_start_connection(chan);
7466 			else
7467 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7468 		} else if (chan->state == BT_CONNECT2 &&
7469 			   chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7470 			struct l2cap_conn_rsp rsp;
7471 			__u16 res, stat;
7472 
7473 			if (!status) {
7474 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7475 					res = L2CAP_CR_PEND;
7476 					stat = L2CAP_CS_AUTHOR_PEND;
7477 					chan->ops->defer(chan);
7478 				} else {
7479 					l2cap_state_change(chan, BT_CONFIG);
7480 					res = L2CAP_CR_SUCCESS;
7481 					stat = L2CAP_CS_NO_INFO;
7482 				}
7483 			} else {
7484 				l2cap_state_change(chan, BT_DISCONN);
7485 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7486 				res = L2CAP_CR_SEC_BLOCK;
7487 				stat = L2CAP_CS_NO_INFO;
7488 			}
7489 
7490 			rsp.scid   = cpu_to_le16(chan->dcid);
7491 			rsp.dcid   = cpu_to_le16(chan->scid);
7492 			rsp.result = cpu_to_le16(res);
7493 			rsp.status = cpu_to_le16(stat);
7494 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7495 				       sizeof(rsp), &rsp);
7496 
7497 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7498 			    res == L2CAP_CR_SUCCESS) {
7499 				char buf[128];
7500 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7501 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7502 					       L2CAP_CONF_REQ,
7503 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
7504 					       buf);
7505 				chan->num_conf_req++;
7506 			}
7507 		}
7508 
7509 		l2cap_chan_unlock(chan);
7510 	}
7511 
7512 	mutex_unlock(&conn->chan_lock);
7513 }
7514 
7515 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7516 {
7517 	struct l2cap_conn *conn = hcon->l2cap_data;
7518 	struct l2cap_hdr *hdr;
7519 	int len;
7520 
7521 	/* For AMP controller do not create l2cap conn */
7522 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7523 		goto drop;
7524 
7525 	if (!conn)
7526 		conn = l2cap_conn_add(hcon);
7527 
7528 	if (!conn)
7529 		goto drop;
7530 
7531 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7532 
7533 	switch (flags) {
7534 	case ACL_START:
7535 	case ACL_START_NO_FLUSH:
7536 	case ACL_COMPLETE:
7537 		if (conn->rx_len) {
7538 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7539 			kfree_skb(conn->rx_skb);
7540 			conn->rx_skb = NULL;
7541 			conn->rx_len = 0;
7542 			l2cap_conn_unreliable(conn, ECOMM);
7543 		}
7544 
7545 		/* Start fragment always begin with Basic L2CAP header */
7546 		if (skb->len < L2CAP_HDR_SIZE) {
7547 			BT_ERR("Frame is too short (len %d)", skb->len);
7548 			l2cap_conn_unreliable(conn, ECOMM);
7549 			goto drop;
7550 		}
7551 
7552 		hdr = (struct l2cap_hdr *) skb->data;
7553 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7554 
7555 		if (len == skb->len) {
7556 			/* Complete frame received */
7557 			l2cap_recv_frame(conn, skb);
7558 			return;
7559 		}
7560 
7561 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7562 
7563 		if (skb->len > len) {
7564 			BT_ERR("Frame is too long (len %d, expected len %d)",
7565 			       skb->len, len);
7566 			l2cap_conn_unreliable(conn, ECOMM);
7567 			goto drop;
7568 		}
7569 
7570 		/* Allocate skb for the complete frame (with header) */
7571 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7572 		if (!conn->rx_skb)
7573 			goto drop;
7574 
7575 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7576 					  skb->len);
7577 		conn->rx_len = len - skb->len;
7578 		break;
7579 
7580 	case ACL_CONT:
7581 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7582 
7583 		if (!conn->rx_len) {
7584 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7585 			l2cap_conn_unreliable(conn, ECOMM);
7586 			goto drop;
7587 		}
7588 
7589 		if (skb->len > conn->rx_len) {
7590 			BT_ERR("Fragment is too long (len %d, expected %d)",
7591 			       skb->len, conn->rx_len);
7592 			kfree_skb(conn->rx_skb);
7593 			conn->rx_skb = NULL;
7594 			conn->rx_len = 0;
7595 			l2cap_conn_unreliable(conn, ECOMM);
7596 			goto drop;
7597 		}
7598 
7599 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7600 					  skb->len);
7601 		conn->rx_len -= skb->len;
7602 
7603 		if (!conn->rx_len) {
7604 			/* Complete frame received. l2cap_recv_frame
7605 			 * takes ownership of the skb so set the global
7606 			 * rx_skb pointer to NULL first.
7607 			 */
7608 			struct sk_buff *rx_skb = conn->rx_skb;
7609 			conn->rx_skb = NULL;
7610 			l2cap_recv_frame(conn, rx_skb);
7611 		}
7612 		break;
7613 	}
7614 
7615 drop:
7616 	kfree_skb(skb);
7617 }
7618 
7619 static struct hci_cb l2cap_cb = {
7620 	.name		= "L2CAP",
7621 	.connect_cfm	= l2cap_connect_cfm,
7622 	.disconn_cfm	= l2cap_disconn_cfm,
7623 	.security_cfm	= l2cap_security_cfm,
7624 };
7625 
7626 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7627 {
7628 	struct l2cap_chan *c;
7629 
7630 	read_lock(&chan_list_lock);
7631 
7632 	list_for_each_entry(c, &chan_list, global_l) {
7633 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7634 			   &c->src, c->src_type, &c->dst, c->dst_type,
7635 			   c->state, __le16_to_cpu(c->psm),
7636 			   c->scid, c->dcid, c->imtu, c->omtu,
7637 			   c->sec_level, c->mode);
7638 	}
7639 
7640 	read_unlock(&chan_list_lock);
7641 
7642 	return 0;
7643 }
7644 
7645 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7646 {
7647 	return single_open(file, l2cap_debugfs_show, inode->i_private);
7648 }
7649 
7650 static const struct file_operations l2cap_debugfs_fops = {
7651 	.open		= l2cap_debugfs_open,
7652 	.read		= seq_read,
7653 	.llseek		= seq_lseek,
7654 	.release	= single_release,
7655 };
7656 
7657 static struct dentry *l2cap_debugfs;
7658 
7659 int __init l2cap_init(void)
7660 {
7661 	int err;
7662 
7663 	err = l2cap_init_sockets();
7664 	if (err < 0)
7665 		return err;
7666 
7667 	hci_register_cb(&l2cap_cb);
7668 
7669 	if (IS_ERR_OR_NULL(bt_debugfs))
7670 		return 0;
7671 
7672 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7673 					    NULL, &l2cap_debugfs_fops);
7674 
7675 	return 0;
7676 }
7677 
7678 void l2cap_exit(void)
7679 {
7680 	debugfs_remove(l2cap_debugfs);
7681 	hci_unregister_cb(&l2cap_cb);
7682 	l2cap_cleanup_sockets();
7683 }
7684 
7685 module_param(disable_ertm, bool, 0644);
7686 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7687