xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision b6bec26c)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
42 
43 bool disable_ertm;
44 
45 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
46 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47 
48 static LIST_HEAD(chan_list);
49 static DEFINE_RWLOCK(chan_list_lock);
50 
51 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
52 				       u8 code, u8 ident, u16 dlen, void *data);
53 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 			   void *data);
55 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
56 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
57 
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 		     struct sk_buff_head *skbs, u8 event);
60 
61 /* ---- L2CAP channels ---- */
62 
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 						   u16 cid)
65 {
66 	struct l2cap_chan *c;
67 
68 	list_for_each_entry(c, &conn->chan_l, list) {
69 		if (c->dcid == cid)
70 			return c;
71 	}
72 	return NULL;
73 }
74 
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 						   u16 cid)
77 {
78 	struct l2cap_chan *c;
79 
80 	list_for_each_entry(c, &conn->chan_l, list) {
81 		if (c->scid == cid)
82 			return c;
83 	}
84 	return NULL;
85 }
86 
87 /* Find channel with given SCID.
88  * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 						 u16 cid)
91 {
92 	struct l2cap_chan *c;
93 
94 	mutex_lock(&conn->chan_lock);
95 	c = __l2cap_get_chan_by_scid(conn, cid);
96 	if (c)
97 		l2cap_chan_lock(c);
98 	mutex_unlock(&conn->chan_lock);
99 
100 	return c;
101 }
102 
103 /* Find channel with given DCID.
104  * Returns locked channel.
105  */
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
107 						 u16 cid)
108 {
109 	struct l2cap_chan *c;
110 
111 	mutex_lock(&conn->chan_lock);
112 	c = __l2cap_get_chan_by_dcid(conn, cid);
113 	if (c)
114 		l2cap_chan_lock(c);
115 	mutex_unlock(&conn->chan_lock);
116 
117 	return c;
118 }
119 
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
121 						    u8 ident)
122 {
123 	struct l2cap_chan *c;
124 
125 	list_for_each_entry(c, &conn->chan_l, list) {
126 		if (c->ident == ident)
127 			return c;
128 	}
129 	return NULL;
130 }
131 
132 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
133 						  u8 ident)
134 {
135 	struct l2cap_chan *c;
136 
137 	mutex_lock(&conn->chan_lock);
138 	c = __l2cap_get_chan_by_ident(conn, ident);
139 	if (c)
140 		l2cap_chan_lock(c);
141 	mutex_unlock(&conn->chan_lock);
142 
143 	return c;
144 }
145 
146 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
147 {
148 	struct l2cap_chan *c;
149 
150 	list_for_each_entry(c, &chan_list, global_l) {
151 		if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
152 			return c;
153 	}
154 	return NULL;
155 }
156 
157 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
158 {
159 	int err;
160 
161 	write_lock(&chan_list_lock);
162 
163 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 		err = -EADDRINUSE;
165 		goto done;
166 	}
167 
168 	if (psm) {
169 		chan->psm = psm;
170 		chan->sport = psm;
171 		err = 0;
172 	} else {
173 		u16 p;
174 
175 		err = -EINVAL;
176 		for (p = 0x1001; p < 0x1100; p += 2)
177 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 				chan->psm   = cpu_to_le16(p);
179 				chan->sport = cpu_to_le16(p);
180 				err = 0;
181 				break;
182 			}
183 	}
184 
185 done:
186 	write_unlock(&chan_list_lock);
187 	return err;
188 }
189 
190 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
191 {
192 	write_lock(&chan_list_lock);
193 
194 	chan->scid = scid;
195 
196 	write_unlock(&chan_list_lock);
197 
198 	return 0;
199 }
200 
201 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
202 {
203 	u16 cid = L2CAP_CID_DYN_START;
204 
205 	for (; cid < L2CAP_CID_DYN_END; cid++) {
206 		if (!__l2cap_get_chan_by_scid(conn, cid))
207 			return cid;
208 	}
209 
210 	return 0;
211 }
212 
213 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
214 {
215 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 	       state_to_string(state));
217 
218 	chan->state = state;
219 	chan->ops->state_change(chan, state);
220 }
221 
222 static void l2cap_state_change(struct l2cap_chan *chan, int state)
223 {
224 	struct sock *sk = chan->sk;
225 
226 	lock_sock(sk);
227 	__l2cap_state_change(chan, state);
228 	release_sock(sk);
229 }
230 
231 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
232 {
233 	struct sock *sk = chan->sk;
234 
235 	sk->sk_err = err;
236 }
237 
238 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
239 {
240 	struct sock *sk = chan->sk;
241 
242 	lock_sock(sk);
243 	__l2cap_chan_set_err(chan, err);
244 	release_sock(sk);
245 }
246 
247 static void __set_retrans_timer(struct l2cap_chan *chan)
248 {
249 	if (!delayed_work_pending(&chan->monitor_timer) &&
250 	    chan->retrans_timeout) {
251 		l2cap_set_timer(chan, &chan->retrans_timer,
252 				msecs_to_jiffies(chan->retrans_timeout));
253 	}
254 }
255 
256 static void __set_monitor_timer(struct l2cap_chan *chan)
257 {
258 	__clear_retrans_timer(chan);
259 	if (chan->monitor_timeout) {
260 		l2cap_set_timer(chan, &chan->monitor_timer,
261 				msecs_to_jiffies(chan->monitor_timeout));
262 	}
263 }
264 
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 					       u16 seq)
267 {
268 	struct sk_buff *skb;
269 
270 	skb_queue_walk(head, skb) {
271 		if (bt_cb(skb)->control.txseq == seq)
272 			return skb;
273 	}
274 
275 	return NULL;
276 }
277 
278 /* ---- L2CAP sequence number lists ---- */
279 
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281  * SREJ requests that are received and for frames that are to be
282  * retransmitted. These seq_list functions implement a singly-linked
283  * list in an array, where membership in the list can also be checked
284  * in constant time. Items can also be added to the tail of the list
285  * and removed from the head in constant time, without further memory
286  * allocs or frees.
287  */
288 
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
290 {
291 	size_t alloc_size, i;
292 
293 	/* Allocated size is a power of 2 to map sequence numbers
294 	 * (which may be up to 14 bits) in to a smaller array that is
295 	 * sized for the negotiated ERTM transmit windows.
296 	 */
297 	alloc_size = roundup_pow_of_two(size);
298 
299 	seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 	if (!seq_list->list)
301 		return -ENOMEM;
302 
303 	seq_list->mask = alloc_size - 1;
304 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 	for (i = 0; i < alloc_size; i++)
307 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
308 
309 	return 0;
310 }
311 
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
313 {
314 	kfree(seq_list->list);
315 }
316 
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 					   u16 seq)
319 {
320 	/* Constant-time check for list membership */
321 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
322 }
323 
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
325 {
326 	u16 mask = seq_list->mask;
327 
328 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 		/* In case someone tries to pop the head of an empty list */
330 		return L2CAP_SEQ_LIST_CLEAR;
331 	} else if (seq_list->head == seq) {
332 		/* Head can be removed in constant time */
333 		seq_list->head = seq_list->list[seq & mask];
334 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335 
336 		if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 			seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 			seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 		}
340 	} else {
341 		/* Walk the list to find the sequence number */
342 		u16 prev = seq_list->head;
343 		while (seq_list->list[prev & mask] != seq) {
344 			prev = seq_list->list[prev & mask];
345 			if (prev == L2CAP_SEQ_LIST_TAIL)
346 				return L2CAP_SEQ_LIST_CLEAR;
347 		}
348 
349 		/* Unlink the number from the list and clear it */
350 		seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 		if (seq_list->tail == seq)
353 			seq_list->tail = prev;
354 	}
355 	return seq;
356 }
357 
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
359 {
360 	/* Remove the head in constant time */
361 	return l2cap_seq_list_remove(seq_list, seq_list->head);
362 }
363 
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 	u16 i;
367 
368 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 		return;
370 
371 	for (i = 0; i <= seq_list->mask; i++)
372 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373 
374 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377 
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 	u16 mask = seq_list->mask;
381 
382 	/* All appends happen in constant time */
383 
384 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 		return;
386 
387 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 		seq_list->head = seq;
389 	else
390 		seq_list->list[seq_list->tail & mask] = seq;
391 
392 	seq_list->tail = seq;
393 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395 
396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 					       chan_timer.work);
400 	struct l2cap_conn *conn = chan->conn;
401 	int reason;
402 
403 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404 
405 	mutex_lock(&conn->chan_lock);
406 	l2cap_chan_lock(chan);
407 
408 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 		reason = ECONNREFUSED;
410 	else if (chan->state == BT_CONNECT &&
411 		 chan->sec_level != BT_SECURITY_SDP)
412 		reason = ECONNREFUSED;
413 	else
414 		reason = ETIMEDOUT;
415 
416 	l2cap_chan_close(chan, reason);
417 
418 	l2cap_chan_unlock(chan);
419 
420 	chan->ops->close(chan);
421 	mutex_unlock(&conn->chan_lock);
422 
423 	l2cap_chan_put(chan);
424 }
425 
426 struct l2cap_chan *l2cap_chan_create(void)
427 {
428 	struct l2cap_chan *chan;
429 
430 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 	if (!chan)
432 		return NULL;
433 
434 	mutex_init(&chan->lock);
435 
436 	write_lock(&chan_list_lock);
437 	list_add(&chan->global_l, &chan_list);
438 	write_unlock(&chan_list_lock);
439 
440 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
441 
442 	chan->state = BT_OPEN;
443 
444 	kref_init(&chan->kref);
445 
446 	/* This flag is cleared in l2cap_chan_ready() */
447 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
448 
449 	BT_DBG("chan %p", chan);
450 
451 	return chan;
452 }
453 
454 static void l2cap_chan_destroy(struct kref *kref)
455 {
456 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
457 
458 	BT_DBG("chan %p", chan);
459 
460 	write_lock(&chan_list_lock);
461 	list_del(&chan->global_l);
462 	write_unlock(&chan_list_lock);
463 
464 	kfree(chan);
465 }
466 
467 void l2cap_chan_hold(struct l2cap_chan *c)
468 {
469 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
470 
471 	kref_get(&c->kref);
472 }
473 
474 void l2cap_chan_put(struct l2cap_chan *c)
475 {
476 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
477 
478 	kref_put(&c->kref, l2cap_chan_destroy);
479 }
480 
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482 {
483 	chan->fcs  = L2CAP_FCS_CRC16;
484 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 	chan->sec_level = BT_SECURITY_LOW;
489 
490 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
491 }
492 
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
494 {
495 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 	       __le16_to_cpu(chan->psm), chan->dcid);
497 
498 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
499 
500 	chan->conn = conn;
501 
502 	switch (chan->chan_type) {
503 	case L2CAP_CHAN_CONN_ORIENTED:
504 		if (conn->hcon->type == LE_LINK) {
505 			/* LE connection */
506 			chan->omtu = L2CAP_DEFAULT_MTU;
507 			chan->scid = L2CAP_CID_LE_DATA;
508 			chan->dcid = L2CAP_CID_LE_DATA;
509 		} else {
510 			/* Alloc CID for connection-oriented socket */
511 			chan->scid = l2cap_alloc_cid(conn);
512 			chan->omtu = L2CAP_DEFAULT_MTU;
513 		}
514 		break;
515 
516 	case L2CAP_CHAN_CONN_LESS:
517 		/* Connectionless socket */
518 		chan->scid = L2CAP_CID_CONN_LESS;
519 		chan->dcid = L2CAP_CID_CONN_LESS;
520 		chan->omtu = L2CAP_DEFAULT_MTU;
521 		break;
522 
523 	case L2CAP_CHAN_CONN_FIX_A2MP:
524 		chan->scid = L2CAP_CID_A2MP;
525 		chan->dcid = L2CAP_CID_A2MP;
526 		chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
527 		chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
528 		break;
529 
530 	default:
531 		/* Raw socket can send/recv signalling messages only */
532 		chan->scid = L2CAP_CID_SIGNALING;
533 		chan->dcid = L2CAP_CID_SIGNALING;
534 		chan->omtu = L2CAP_DEFAULT_MTU;
535 	}
536 
537 	chan->local_id		= L2CAP_BESTEFFORT_ID;
538 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
539 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
540 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
541 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
542 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
543 
544 	l2cap_chan_hold(chan);
545 
546 	list_add(&chan->list, &conn->chan_l);
547 }
548 
549 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
550 {
551 	mutex_lock(&conn->chan_lock);
552 	__l2cap_chan_add(conn, chan);
553 	mutex_unlock(&conn->chan_lock);
554 }
555 
556 void l2cap_chan_del(struct l2cap_chan *chan, int err)
557 {
558 	struct l2cap_conn *conn = chan->conn;
559 
560 	__clear_chan_timer(chan);
561 
562 	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
563 
564 	if (conn) {
565 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
566 		/* Delete from channel list */
567 		list_del(&chan->list);
568 
569 		l2cap_chan_put(chan);
570 
571 		chan->conn = NULL;
572 
573 		if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
574 			hci_conn_put(conn->hcon);
575 
576 		if (mgr && mgr->bredr_chan == chan)
577 			mgr->bredr_chan = NULL;
578 	}
579 
580 	if (chan->hs_hchan) {
581 		struct hci_chan *hs_hchan = chan->hs_hchan;
582 
583 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
584 		amp_disconnect_logical_link(hs_hchan);
585 	}
586 
587 	chan->ops->teardown(chan, err);
588 
589 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
590 		return;
591 
592 	switch(chan->mode) {
593 	case L2CAP_MODE_BASIC:
594 		break;
595 
596 	case L2CAP_MODE_ERTM:
597 		__clear_retrans_timer(chan);
598 		__clear_monitor_timer(chan);
599 		__clear_ack_timer(chan);
600 
601 		skb_queue_purge(&chan->srej_q);
602 
603 		l2cap_seq_list_free(&chan->srej_list);
604 		l2cap_seq_list_free(&chan->retrans_list);
605 
606 		/* fall through */
607 
608 	case L2CAP_MODE_STREAMING:
609 		skb_queue_purge(&chan->tx_q);
610 		break;
611 	}
612 
613 	return;
614 }
615 
616 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
617 {
618 	struct l2cap_conn *conn = chan->conn;
619 	struct sock *sk = chan->sk;
620 
621 	BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
622 	       sk);
623 
624 	switch (chan->state) {
625 	case BT_LISTEN:
626 		chan->ops->teardown(chan, 0);
627 		break;
628 
629 	case BT_CONNECTED:
630 	case BT_CONFIG:
631 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
632 		    conn->hcon->type == ACL_LINK) {
633 			__set_chan_timer(chan, sk->sk_sndtimeo);
634 			l2cap_send_disconn_req(chan, reason);
635 		} else
636 			l2cap_chan_del(chan, reason);
637 		break;
638 
639 	case BT_CONNECT2:
640 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
641 		    conn->hcon->type == ACL_LINK) {
642 			struct l2cap_conn_rsp rsp;
643 			__u16 result;
644 
645 			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
646 				result = L2CAP_CR_SEC_BLOCK;
647 			else
648 				result = L2CAP_CR_BAD_PSM;
649 			l2cap_state_change(chan, BT_DISCONN);
650 
651 			rsp.scid   = cpu_to_le16(chan->dcid);
652 			rsp.dcid   = cpu_to_le16(chan->scid);
653 			rsp.result = cpu_to_le16(result);
654 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
655 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
656 				       sizeof(rsp), &rsp);
657 		}
658 
659 		l2cap_chan_del(chan, reason);
660 		break;
661 
662 	case BT_CONNECT:
663 	case BT_DISCONN:
664 		l2cap_chan_del(chan, reason);
665 		break;
666 
667 	default:
668 		chan->ops->teardown(chan, 0);
669 		break;
670 	}
671 }
672 
673 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
674 {
675 	if (chan->chan_type == L2CAP_CHAN_RAW) {
676 		switch (chan->sec_level) {
677 		case BT_SECURITY_HIGH:
678 			return HCI_AT_DEDICATED_BONDING_MITM;
679 		case BT_SECURITY_MEDIUM:
680 			return HCI_AT_DEDICATED_BONDING;
681 		default:
682 			return HCI_AT_NO_BONDING;
683 		}
684 	} else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
685 		if (chan->sec_level == BT_SECURITY_LOW)
686 			chan->sec_level = BT_SECURITY_SDP;
687 
688 		if (chan->sec_level == BT_SECURITY_HIGH)
689 			return HCI_AT_NO_BONDING_MITM;
690 		else
691 			return HCI_AT_NO_BONDING;
692 	} else {
693 		switch (chan->sec_level) {
694 		case BT_SECURITY_HIGH:
695 			return HCI_AT_GENERAL_BONDING_MITM;
696 		case BT_SECURITY_MEDIUM:
697 			return HCI_AT_GENERAL_BONDING;
698 		default:
699 			return HCI_AT_NO_BONDING;
700 		}
701 	}
702 }
703 
704 /* Service level security */
705 int l2cap_chan_check_security(struct l2cap_chan *chan)
706 {
707 	struct l2cap_conn *conn = chan->conn;
708 	__u8 auth_type;
709 
710 	auth_type = l2cap_get_auth_type(chan);
711 
712 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
713 }
714 
715 static u8 l2cap_get_ident(struct l2cap_conn *conn)
716 {
717 	u8 id;
718 
719 	/* Get next available identificator.
720 	 *    1 - 128 are used by kernel.
721 	 *  129 - 199 are reserved.
722 	 *  200 - 254 are used by utilities like l2ping, etc.
723 	 */
724 
725 	spin_lock(&conn->lock);
726 
727 	if (++conn->tx_ident > 128)
728 		conn->tx_ident = 1;
729 
730 	id = conn->tx_ident;
731 
732 	spin_unlock(&conn->lock);
733 
734 	return id;
735 }
736 
737 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
738 			   void *data)
739 {
740 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
741 	u8 flags;
742 
743 	BT_DBG("code 0x%2.2x", code);
744 
745 	if (!skb)
746 		return;
747 
748 	if (lmp_no_flush_capable(conn->hcon->hdev))
749 		flags = ACL_START_NO_FLUSH;
750 	else
751 		flags = ACL_START;
752 
753 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
754 	skb->priority = HCI_PRIO_MAX;
755 
756 	hci_send_acl(conn->hchan, skb, flags);
757 }
758 
759 static bool __chan_is_moving(struct l2cap_chan *chan)
760 {
761 	return chan->move_state != L2CAP_MOVE_STABLE &&
762 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
763 }
764 
765 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
766 {
767 	struct hci_conn *hcon = chan->conn->hcon;
768 	u16 flags;
769 
770 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
771 	       skb->priority);
772 
773 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
774 		if (chan->hs_hchan)
775 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
776 		else
777 			kfree_skb(skb);
778 
779 		return;
780 	}
781 
782 	if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
783 	    lmp_no_flush_capable(hcon->hdev))
784 		flags = ACL_START_NO_FLUSH;
785 	else
786 		flags = ACL_START;
787 
788 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
789 	hci_send_acl(chan->conn->hchan, skb, flags);
790 }
791 
792 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
793 {
794 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
795 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
796 
797 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
798 		/* S-Frame */
799 		control->sframe = 1;
800 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
801 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
802 
803 		control->sar = 0;
804 		control->txseq = 0;
805 	} else {
806 		/* I-Frame */
807 		control->sframe = 0;
808 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
809 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
810 
811 		control->poll = 0;
812 		control->super = 0;
813 	}
814 }
815 
816 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
817 {
818 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
819 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
820 
821 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
822 		/* S-Frame */
823 		control->sframe = 1;
824 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
825 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
826 
827 		control->sar = 0;
828 		control->txseq = 0;
829 	} else {
830 		/* I-Frame */
831 		control->sframe = 0;
832 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
833 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
834 
835 		control->poll = 0;
836 		control->super = 0;
837 	}
838 }
839 
840 static inline void __unpack_control(struct l2cap_chan *chan,
841 				    struct sk_buff *skb)
842 {
843 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
844 		__unpack_extended_control(get_unaligned_le32(skb->data),
845 					  &bt_cb(skb)->control);
846 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
847 	} else {
848 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
849 					  &bt_cb(skb)->control);
850 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
851 	}
852 }
853 
854 static u32 __pack_extended_control(struct l2cap_ctrl *control)
855 {
856 	u32 packed;
857 
858 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
859 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
860 
861 	if (control->sframe) {
862 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
863 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
864 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
865 	} else {
866 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
867 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
868 	}
869 
870 	return packed;
871 }
872 
873 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
874 {
875 	u16 packed;
876 
877 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
878 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
879 
880 	if (control->sframe) {
881 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
882 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
883 		packed |= L2CAP_CTRL_FRAME_TYPE;
884 	} else {
885 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
886 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
887 	}
888 
889 	return packed;
890 }
891 
892 static inline void __pack_control(struct l2cap_chan *chan,
893 				  struct l2cap_ctrl *control,
894 				  struct sk_buff *skb)
895 {
896 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
897 		put_unaligned_le32(__pack_extended_control(control),
898 				   skb->data + L2CAP_HDR_SIZE);
899 	} else {
900 		put_unaligned_le16(__pack_enhanced_control(control),
901 				   skb->data + L2CAP_HDR_SIZE);
902 	}
903 }
904 
905 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
906 {
907 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
908 		return L2CAP_EXT_HDR_SIZE;
909 	else
910 		return L2CAP_ENH_HDR_SIZE;
911 }
912 
913 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
914 					       u32 control)
915 {
916 	struct sk_buff *skb;
917 	struct l2cap_hdr *lh;
918 	int hlen = __ertm_hdr_size(chan);
919 
920 	if (chan->fcs == L2CAP_FCS_CRC16)
921 		hlen += L2CAP_FCS_SIZE;
922 
923 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
924 
925 	if (!skb)
926 		return ERR_PTR(-ENOMEM);
927 
928 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
929 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
930 	lh->cid = cpu_to_le16(chan->dcid);
931 
932 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
933 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
934 	else
935 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
936 
937 	if (chan->fcs == L2CAP_FCS_CRC16) {
938 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
939 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
940 	}
941 
942 	skb->priority = HCI_PRIO_MAX;
943 	return skb;
944 }
945 
946 static void l2cap_send_sframe(struct l2cap_chan *chan,
947 			      struct l2cap_ctrl *control)
948 {
949 	struct sk_buff *skb;
950 	u32 control_field;
951 
952 	BT_DBG("chan %p, control %p", chan, control);
953 
954 	if (!control->sframe)
955 		return;
956 
957 	if (__chan_is_moving(chan))
958 		return;
959 
960 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
961 	    !control->poll)
962 		control->final = 1;
963 
964 	if (control->super == L2CAP_SUPER_RR)
965 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
966 	else if (control->super == L2CAP_SUPER_RNR)
967 		set_bit(CONN_RNR_SENT, &chan->conn_state);
968 
969 	if (control->super != L2CAP_SUPER_SREJ) {
970 		chan->last_acked_seq = control->reqseq;
971 		__clear_ack_timer(chan);
972 	}
973 
974 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
975 	       control->final, control->poll, control->super);
976 
977 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
978 		control_field = __pack_extended_control(control);
979 	else
980 		control_field = __pack_enhanced_control(control);
981 
982 	skb = l2cap_create_sframe_pdu(chan, control_field);
983 	if (!IS_ERR(skb))
984 		l2cap_do_send(chan, skb);
985 }
986 
987 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
988 {
989 	struct l2cap_ctrl control;
990 
991 	BT_DBG("chan %p, poll %d", chan, poll);
992 
993 	memset(&control, 0, sizeof(control));
994 	control.sframe = 1;
995 	control.poll = poll;
996 
997 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
998 		control.super = L2CAP_SUPER_RNR;
999 	else
1000 		control.super = L2CAP_SUPER_RR;
1001 
1002 	control.reqseq = chan->buffer_seq;
1003 	l2cap_send_sframe(chan, &control);
1004 }
1005 
1006 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1007 {
1008 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1009 }
1010 
1011 static bool __amp_capable(struct l2cap_chan *chan)
1012 {
1013 	struct l2cap_conn *conn = chan->conn;
1014 
1015 	if (enable_hs &&
1016 	    hci_amp_capable() &&
1017 	    chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1018 	    conn->fixed_chan_mask & L2CAP_FC_A2MP)
1019 		return true;
1020 	else
1021 		return false;
1022 }
1023 
1024 static bool l2cap_check_efs(struct l2cap_chan *chan)
1025 {
1026 	/* Check EFS parameters */
1027 	return true;
1028 }
1029 
1030 void l2cap_send_conn_req(struct l2cap_chan *chan)
1031 {
1032 	struct l2cap_conn *conn = chan->conn;
1033 	struct l2cap_conn_req req;
1034 
1035 	req.scid = cpu_to_le16(chan->scid);
1036 	req.psm  = chan->psm;
1037 
1038 	chan->ident = l2cap_get_ident(conn);
1039 
1040 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1041 
1042 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1043 }
1044 
1045 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1046 {
1047 	struct l2cap_create_chan_req req;
1048 	req.scid = cpu_to_le16(chan->scid);
1049 	req.psm  = chan->psm;
1050 	req.amp_id = amp_id;
1051 
1052 	chan->ident = l2cap_get_ident(chan->conn);
1053 
1054 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1055 		       sizeof(req), &req);
1056 }
1057 
1058 static void l2cap_move_setup(struct l2cap_chan *chan)
1059 {
1060 	struct sk_buff *skb;
1061 
1062 	BT_DBG("chan %p", chan);
1063 
1064 	if (chan->mode != L2CAP_MODE_ERTM)
1065 		return;
1066 
1067 	__clear_retrans_timer(chan);
1068 	__clear_monitor_timer(chan);
1069 	__clear_ack_timer(chan);
1070 
1071 	chan->retry_count = 0;
1072 	skb_queue_walk(&chan->tx_q, skb) {
1073 		if (bt_cb(skb)->control.retries)
1074 			bt_cb(skb)->control.retries = 1;
1075 		else
1076 			break;
1077 	}
1078 
1079 	chan->expected_tx_seq = chan->buffer_seq;
1080 
1081 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1082 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1083 	l2cap_seq_list_clear(&chan->retrans_list);
1084 	l2cap_seq_list_clear(&chan->srej_list);
1085 	skb_queue_purge(&chan->srej_q);
1086 
1087 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1088 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1089 
1090 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1091 }
1092 
1093 static void l2cap_move_done(struct l2cap_chan *chan)
1094 {
1095 	u8 move_role = chan->move_role;
1096 	BT_DBG("chan %p", chan);
1097 
1098 	chan->move_state = L2CAP_MOVE_STABLE;
1099 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1100 
1101 	if (chan->mode != L2CAP_MODE_ERTM)
1102 		return;
1103 
1104 	switch (move_role) {
1105 	case L2CAP_MOVE_ROLE_INITIATOR:
1106 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1107 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1108 		break;
1109 	case L2CAP_MOVE_ROLE_RESPONDER:
1110 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1111 		break;
1112 	}
1113 }
1114 
1115 static void l2cap_chan_ready(struct l2cap_chan *chan)
1116 {
1117 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1118 	chan->conf_state = 0;
1119 	__clear_chan_timer(chan);
1120 
1121 	chan->state = BT_CONNECTED;
1122 
1123 	chan->ops->ready(chan);
1124 }
1125 
1126 static void l2cap_start_connection(struct l2cap_chan *chan)
1127 {
1128 	if (__amp_capable(chan)) {
1129 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1130 		a2mp_discover_amp(chan);
1131 	} else {
1132 		l2cap_send_conn_req(chan);
1133 	}
1134 }
1135 
1136 static void l2cap_do_start(struct l2cap_chan *chan)
1137 {
1138 	struct l2cap_conn *conn = chan->conn;
1139 
1140 	if (conn->hcon->type == LE_LINK) {
1141 		l2cap_chan_ready(chan);
1142 		return;
1143 	}
1144 
1145 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1146 		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1147 			return;
1148 
1149 		if (l2cap_chan_check_security(chan) &&
1150 		    __l2cap_no_conn_pending(chan)) {
1151 			l2cap_start_connection(chan);
1152 		}
1153 	} else {
1154 		struct l2cap_info_req req;
1155 		req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1156 
1157 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1158 		conn->info_ident = l2cap_get_ident(conn);
1159 
1160 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1161 
1162 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1163 			       sizeof(req), &req);
1164 	}
1165 }
1166 
1167 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1168 {
1169 	u32 local_feat_mask = l2cap_feat_mask;
1170 	if (!disable_ertm)
1171 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1172 
1173 	switch (mode) {
1174 	case L2CAP_MODE_ERTM:
1175 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1176 	case L2CAP_MODE_STREAMING:
1177 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1178 	default:
1179 		return 0x00;
1180 	}
1181 }
1182 
1183 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1184 {
1185 	struct sock *sk = chan->sk;
1186 	struct l2cap_conn *conn = chan->conn;
1187 	struct l2cap_disconn_req req;
1188 
1189 	if (!conn)
1190 		return;
1191 
1192 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1193 		__clear_retrans_timer(chan);
1194 		__clear_monitor_timer(chan);
1195 		__clear_ack_timer(chan);
1196 	}
1197 
1198 	if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1199 		l2cap_state_change(chan, BT_DISCONN);
1200 		return;
1201 	}
1202 
1203 	req.dcid = cpu_to_le16(chan->dcid);
1204 	req.scid = cpu_to_le16(chan->scid);
1205 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1206 		       sizeof(req), &req);
1207 
1208 	lock_sock(sk);
1209 	__l2cap_state_change(chan, BT_DISCONN);
1210 	__l2cap_chan_set_err(chan, err);
1211 	release_sock(sk);
1212 }
1213 
1214 /* ---- L2CAP connections ---- */
1215 static void l2cap_conn_start(struct l2cap_conn *conn)
1216 {
1217 	struct l2cap_chan *chan, *tmp;
1218 
1219 	BT_DBG("conn %p", conn);
1220 
1221 	mutex_lock(&conn->chan_lock);
1222 
1223 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1224 		struct sock *sk = chan->sk;
1225 
1226 		l2cap_chan_lock(chan);
1227 
1228 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1229 			l2cap_chan_unlock(chan);
1230 			continue;
1231 		}
1232 
1233 		if (chan->state == BT_CONNECT) {
1234 			if (!l2cap_chan_check_security(chan) ||
1235 			    !__l2cap_no_conn_pending(chan)) {
1236 				l2cap_chan_unlock(chan);
1237 				continue;
1238 			}
1239 
1240 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1241 			    && test_bit(CONF_STATE2_DEVICE,
1242 					&chan->conf_state)) {
1243 				l2cap_chan_close(chan, ECONNRESET);
1244 				l2cap_chan_unlock(chan);
1245 				continue;
1246 			}
1247 
1248 			l2cap_start_connection(chan);
1249 
1250 		} else if (chan->state == BT_CONNECT2) {
1251 			struct l2cap_conn_rsp rsp;
1252 			char buf[128];
1253 			rsp.scid = cpu_to_le16(chan->dcid);
1254 			rsp.dcid = cpu_to_le16(chan->scid);
1255 
1256 			if (l2cap_chan_check_security(chan)) {
1257 				lock_sock(sk);
1258 				if (test_bit(BT_SK_DEFER_SETUP,
1259 					     &bt_sk(sk)->flags)) {
1260 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1261 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1262 					chan->ops->defer(chan);
1263 
1264 				} else {
1265 					__l2cap_state_change(chan, BT_CONFIG);
1266 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1267 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1268 				}
1269 				release_sock(sk);
1270 			} else {
1271 				rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1272 				rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1273 			}
1274 
1275 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1276 				       sizeof(rsp), &rsp);
1277 
1278 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1279 			    rsp.result != L2CAP_CR_SUCCESS) {
1280 				l2cap_chan_unlock(chan);
1281 				continue;
1282 			}
1283 
1284 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1285 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1286 				       l2cap_build_conf_req(chan, buf), buf);
1287 			chan->num_conf_req++;
1288 		}
1289 
1290 		l2cap_chan_unlock(chan);
1291 	}
1292 
1293 	mutex_unlock(&conn->chan_lock);
1294 }
1295 
1296 /* Find socket with cid and source/destination bdaddr.
1297  * Returns closest match, locked.
1298  */
1299 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1300 						    bdaddr_t *src,
1301 						    bdaddr_t *dst)
1302 {
1303 	struct l2cap_chan *c, *c1 = NULL;
1304 
1305 	read_lock(&chan_list_lock);
1306 
1307 	list_for_each_entry(c, &chan_list, global_l) {
1308 		struct sock *sk = c->sk;
1309 
1310 		if (state && c->state != state)
1311 			continue;
1312 
1313 		if (c->scid == cid) {
1314 			int src_match, dst_match;
1315 			int src_any, dst_any;
1316 
1317 			/* Exact match. */
1318 			src_match = !bacmp(&bt_sk(sk)->src, src);
1319 			dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1320 			if (src_match && dst_match) {
1321 				read_unlock(&chan_list_lock);
1322 				return c;
1323 			}
1324 
1325 			/* Closest match */
1326 			src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1327 			dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1328 			if ((src_match && dst_any) || (src_any && dst_match) ||
1329 			    (src_any && dst_any))
1330 				c1 = c;
1331 		}
1332 	}
1333 
1334 	read_unlock(&chan_list_lock);
1335 
1336 	return c1;
1337 }
1338 
1339 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1340 {
1341 	struct sock *parent, *sk;
1342 	struct l2cap_chan *chan, *pchan;
1343 
1344 	BT_DBG("");
1345 
1346 	/* Check if we have socket listening on cid */
1347 	pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1348 					  conn->src, conn->dst);
1349 	if (!pchan)
1350 		return;
1351 
1352 	parent = pchan->sk;
1353 
1354 	lock_sock(parent);
1355 
1356 	chan = pchan->ops->new_connection(pchan);
1357 	if (!chan)
1358 		goto clean;
1359 
1360 	sk = chan->sk;
1361 
1362 	hci_conn_hold(conn->hcon);
1363 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1364 
1365 	bacpy(&bt_sk(sk)->src, conn->src);
1366 	bacpy(&bt_sk(sk)->dst, conn->dst);
1367 
1368 	l2cap_chan_add(conn, chan);
1369 
1370 	l2cap_chan_ready(chan);
1371 
1372 clean:
1373 	release_sock(parent);
1374 }
1375 
1376 static void l2cap_conn_ready(struct l2cap_conn *conn)
1377 {
1378 	struct l2cap_chan *chan;
1379 	struct hci_conn *hcon = conn->hcon;
1380 
1381 	BT_DBG("conn %p", conn);
1382 
1383 	if (!hcon->out && hcon->type == LE_LINK)
1384 		l2cap_le_conn_ready(conn);
1385 
1386 	if (hcon->out && hcon->type == LE_LINK)
1387 		smp_conn_security(hcon, hcon->pending_sec_level);
1388 
1389 	mutex_lock(&conn->chan_lock);
1390 
1391 	list_for_each_entry(chan, &conn->chan_l, list) {
1392 
1393 		l2cap_chan_lock(chan);
1394 
1395 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1396 			l2cap_chan_unlock(chan);
1397 			continue;
1398 		}
1399 
1400 		if (hcon->type == LE_LINK) {
1401 			if (smp_conn_security(hcon, chan->sec_level))
1402 				l2cap_chan_ready(chan);
1403 
1404 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1405 			struct sock *sk = chan->sk;
1406 			__clear_chan_timer(chan);
1407 			lock_sock(sk);
1408 			__l2cap_state_change(chan, BT_CONNECTED);
1409 			sk->sk_state_change(sk);
1410 			release_sock(sk);
1411 
1412 		} else if (chan->state == BT_CONNECT)
1413 			l2cap_do_start(chan);
1414 
1415 		l2cap_chan_unlock(chan);
1416 	}
1417 
1418 	mutex_unlock(&conn->chan_lock);
1419 }
1420 
1421 /* Notify sockets that we cannot guaranty reliability anymore */
1422 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1423 {
1424 	struct l2cap_chan *chan;
1425 
1426 	BT_DBG("conn %p", conn);
1427 
1428 	mutex_lock(&conn->chan_lock);
1429 
1430 	list_for_each_entry(chan, &conn->chan_l, list) {
1431 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1432 			l2cap_chan_set_err(chan, err);
1433 	}
1434 
1435 	mutex_unlock(&conn->chan_lock);
1436 }
1437 
1438 static void l2cap_info_timeout(struct work_struct *work)
1439 {
1440 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1441 					       info_timer.work);
1442 
1443 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1444 	conn->info_ident = 0;
1445 
1446 	l2cap_conn_start(conn);
1447 }
1448 
1449 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1450 {
1451 	struct l2cap_conn *conn = hcon->l2cap_data;
1452 	struct l2cap_chan *chan, *l;
1453 
1454 	if (!conn)
1455 		return;
1456 
1457 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1458 
1459 	kfree_skb(conn->rx_skb);
1460 
1461 	mutex_lock(&conn->chan_lock);
1462 
1463 	/* Kill channels */
1464 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1465 		l2cap_chan_hold(chan);
1466 		l2cap_chan_lock(chan);
1467 
1468 		l2cap_chan_del(chan, err);
1469 
1470 		l2cap_chan_unlock(chan);
1471 
1472 		chan->ops->close(chan);
1473 		l2cap_chan_put(chan);
1474 	}
1475 
1476 	mutex_unlock(&conn->chan_lock);
1477 
1478 	hci_chan_del(conn->hchan);
1479 
1480 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1481 		cancel_delayed_work_sync(&conn->info_timer);
1482 
1483 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1484 		cancel_delayed_work_sync(&conn->security_timer);
1485 		smp_chan_destroy(conn);
1486 	}
1487 
1488 	hcon->l2cap_data = NULL;
1489 	kfree(conn);
1490 }
1491 
1492 static void security_timeout(struct work_struct *work)
1493 {
1494 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1495 					       security_timer.work);
1496 
1497 	BT_DBG("conn %p", conn);
1498 
1499 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1500 		smp_chan_destroy(conn);
1501 		l2cap_conn_del(conn->hcon, ETIMEDOUT);
1502 	}
1503 }
1504 
1505 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1506 {
1507 	struct l2cap_conn *conn = hcon->l2cap_data;
1508 	struct hci_chan *hchan;
1509 
1510 	if (conn || status)
1511 		return conn;
1512 
1513 	hchan = hci_chan_create(hcon);
1514 	if (!hchan)
1515 		return NULL;
1516 
1517 	conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1518 	if (!conn) {
1519 		hci_chan_del(hchan);
1520 		return NULL;
1521 	}
1522 
1523 	hcon->l2cap_data = conn;
1524 	conn->hcon = hcon;
1525 	conn->hchan = hchan;
1526 
1527 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1528 
1529 	switch (hcon->type) {
1530 	case AMP_LINK:
1531 		conn->mtu = hcon->hdev->block_mtu;
1532 		break;
1533 
1534 	case LE_LINK:
1535 		if (hcon->hdev->le_mtu) {
1536 			conn->mtu = hcon->hdev->le_mtu;
1537 			break;
1538 		}
1539 		/* fall through */
1540 
1541 	default:
1542 		conn->mtu = hcon->hdev->acl_mtu;
1543 		break;
1544 	}
1545 
1546 	conn->src = &hcon->hdev->bdaddr;
1547 	conn->dst = &hcon->dst;
1548 
1549 	conn->feat_mask = 0;
1550 
1551 	spin_lock_init(&conn->lock);
1552 	mutex_init(&conn->chan_lock);
1553 
1554 	INIT_LIST_HEAD(&conn->chan_l);
1555 
1556 	if (hcon->type == LE_LINK)
1557 		INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1558 	else
1559 		INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1560 
1561 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1562 
1563 	return conn;
1564 }
1565 
1566 /* ---- Socket interface ---- */
1567 
1568 /* Find socket with psm and source / destination bdaddr.
1569  * Returns closest match.
1570  */
1571 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1572 						   bdaddr_t *src,
1573 						   bdaddr_t *dst)
1574 {
1575 	struct l2cap_chan *c, *c1 = NULL;
1576 
1577 	read_lock(&chan_list_lock);
1578 
1579 	list_for_each_entry(c, &chan_list, global_l) {
1580 		struct sock *sk = c->sk;
1581 
1582 		if (state && c->state != state)
1583 			continue;
1584 
1585 		if (c->psm == psm) {
1586 			int src_match, dst_match;
1587 			int src_any, dst_any;
1588 
1589 			/* Exact match. */
1590 			src_match = !bacmp(&bt_sk(sk)->src, src);
1591 			dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1592 			if (src_match && dst_match) {
1593 				read_unlock(&chan_list_lock);
1594 				return c;
1595 			}
1596 
1597 			/* Closest match */
1598 			src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1599 			dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1600 			if ((src_match && dst_any) || (src_any && dst_match) ||
1601 			    (src_any && dst_any))
1602 				c1 = c;
1603 		}
1604 	}
1605 
1606 	read_unlock(&chan_list_lock);
1607 
1608 	return c1;
1609 }
1610 
1611 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1612 		       bdaddr_t *dst, u8 dst_type)
1613 {
1614 	struct sock *sk = chan->sk;
1615 	bdaddr_t *src = &bt_sk(sk)->src;
1616 	struct l2cap_conn *conn;
1617 	struct hci_conn *hcon;
1618 	struct hci_dev *hdev;
1619 	__u8 auth_type;
1620 	int err;
1621 
1622 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1623 	       dst_type, __le16_to_cpu(psm));
1624 
1625 	hdev = hci_get_route(dst, src);
1626 	if (!hdev)
1627 		return -EHOSTUNREACH;
1628 
1629 	hci_dev_lock(hdev);
1630 
1631 	l2cap_chan_lock(chan);
1632 
1633 	/* PSM must be odd and lsb of upper byte must be 0 */
1634 	if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1635 	    chan->chan_type != L2CAP_CHAN_RAW) {
1636 		err = -EINVAL;
1637 		goto done;
1638 	}
1639 
1640 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1641 		err = -EINVAL;
1642 		goto done;
1643 	}
1644 
1645 	switch (chan->mode) {
1646 	case L2CAP_MODE_BASIC:
1647 		break;
1648 	case L2CAP_MODE_ERTM:
1649 	case L2CAP_MODE_STREAMING:
1650 		if (!disable_ertm)
1651 			break;
1652 		/* fall through */
1653 	default:
1654 		err = -ENOTSUPP;
1655 		goto done;
1656 	}
1657 
1658 	switch (chan->state) {
1659 	case BT_CONNECT:
1660 	case BT_CONNECT2:
1661 	case BT_CONFIG:
1662 		/* Already connecting */
1663 		err = 0;
1664 		goto done;
1665 
1666 	case BT_CONNECTED:
1667 		/* Already connected */
1668 		err = -EISCONN;
1669 		goto done;
1670 
1671 	case BT_OPEN:
1672 	case BT_BOUND:
1673 		/* Can connect */
1674 		break;
1675 
1676 	default:
1677 		err = -EBADFD;
1678 		goto done;
1679 	}
1680 
1681 	/* Set destination address and psm */
1682 	lock_sock(sk);
1683 	bacpy(&bt_sk(sk)->dst, dst);
1684 	release_sock(sk);
1685 
1686 	chan->psm = psm;
1687 	chan->dcid = cid;
1688 
1689 	auth_type = l2cap_get_auth_type(chan);
1690 
1691 	if (chan->dcid == L2CAP_CID_LE_DATA)
1692 		hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1693 				   chan->sec_level, auth_type);
1694 	else
1695 		hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1696 				   chan->sec_level, auth_type);
1697 
1698 	if (IS_ERR(hcon)) {
1699 		err = PTR_ERR(hcon);
1700 		goto done;
1701 	}
1702 
1703 	conn = l2cap_conn_add(hcon, 0);
1704 	if (!conn) {
1705 		hci_conn_put(hcon);
1706 		err = -ENOMEM;
1707 		goto done;
1708 	}
1709 
1710 	if (hcon->type == LE_LINK) {
1711 		err = 0;
1712 
1713 		if (!list_empty(&conn->chan_l)) {
1714 			err = -EBUSY;
1715 			hci_conn_put(hcon);
1716 		}
1717 
1718 		if (err)
1719 			goto done;
1720 	}
1721 
1722 	/* Update source addr of the socket */
1723 	bacpy(src, conn->src);
1724 
1725 	l2cap_chan_unlock(chan);
1726 	l2cap_chan_add(conn, chan);
1727 	l2cap_chan_lock(chan);
1728 
1729 	l2cap_state_change(chan, BT_CONNECT);
1730 	__set_chan_timer(chan, sk->sk_sndtimeo);
1731 
1732 	if (hcon->state == BT_CONNECTED) {
1733 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1734 			__clear_chan_timer(chan);
1735 			if (l2cap_chan_check_security(chan))
1736 				l2cap_state_change(chan, BT_CONNECTED);
1737 		} else
1738 			l2cap_do_start(chan);
1739 	}
1740 
1741 	err = 0;
1742 
1743 done:
1744 	l2cap_chan_unlock(chan);
1745 	hci_dev_unlock(hdev);
1746 	hci_dev_put(hdev);
1747 	return err;
1748 }
1749 
1750 int __l2cap_wait_ack(struct sock *sk)
1751 {
1752 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1753 	DECLARE_WAITQUEUE(wait, current);
1754 	int err = 0;
1755 	int timeo = HZ/5;
1756 
1757 	add_wait_queue(sk_sleep(sk), &wait);
1758 	set_current_state(TASK_INTERRUPTIBLE);
1759 	while (chan->unacked_frames > 0 && chan->conn) {
1760 		if (!timeo)
1761 			timeo = HZ/5;
1762 
1763 		if (signal_pending(current)) {
1764 			err = sock_intr_errno(timeo);
1765 			break;
1766 		}
1767 
1768 		release_sock(sk);
1769 		timeo = schedule_timeout(timeo);
1770 		lock_sock(sk);
1771 		set_current_state(TASK_INTERRUPTIBLE);
1772 
1773 		err = sock_error(sk);
1774 		if (err)
1775 			break;
1776 	}
1777 	set_current_state(TASK_RUNNING);
1778 	remove_wait_queue(sk_sleep(sk), &wait);
1779 	return err;
1780 }
1781 
1782 static void l2cap_monitor_timeout(struct work_struct *work)
1783 {
1784 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1785 					       monitor_timer.work);
1786 
1787 	BT_DBG("chan %p", chan);
1788 
1789 	l2cap_chan_lock(chan);
1790 
1791 	if (!chan->conn) {
1792 		l2cap_chan_unlock(chan);
1793 		l2cap_chan_put(chan);
1794 		return;
1795 	}
1796 
1797 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1798 
1799 	l2cap_chan_unlock(chan);
1800 	l2cap_chan_put(chan);
1801 }
1802 
1803 static void l2cap_retrans_timeout(struct work_struct *work)
1804 {
1805 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1806 					       retrans_timer.work);
1807 
1808 	BT_DBG("chan %p", chan);
1809 
1810 	l2cap_chan_lock(chan);
1811 
1812 	if (!chan->conn) {
1813 		l2cap_chan_unlock(chan);
1814 		l2cap_chan_put(chan);
1815 		return;
1816 	}
1817 
1818 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1819 	l2cap_chan_unlock(chan);
1820 	l2cap_chan_put(chan);
1821 }
1822 
1823 static void l2cap_streaming_send(struct l2cap_chan *chan,
1824 				 struct sk_buff_head *skbs)
1825 {
1826 	struct sk_buff *skb;
1827 	struct l2cap_ctrl *control;
1828 
1829 	BT_DBG("chan %p, skbs %p", chan, skbs);
1830 
1831 	if (__chan_is_moving(chan))
1832 		return;
1833 
1834 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1835 
1836 	while (!skb_queue_empty(&chan->tx_q)) {
1837 
1838 		skb = skb_dequeue(&chan->tx_q);
1839 
1840 		bt_cb(skb)->control.retries = 1;
1841 		control = &bt_cb(skb)->control;
1842 
1843 		control->reqseq = 0;
1844 		control->txseq = chan->next_tx_seq;
1845 
1846 		__pack_control(chan, control, skb);
1847 
1848 		if (chan->fcs == L2CAP_FCS_CRC16) {
1849 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1850 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1851 		}
1852 
1853 		l2cap_do_send(chan, skb);
1854 
1855 		BT_DBG("Sent txseq %u", control->txseq);
1856 
1857 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1858 		chan->frames_sent++;
1859 	}
1860 }
1861 
1862 static int l2cap_ertm_send(struct l2cap_chan *chan)
1863 {
1864 	struct sk_buff *skb, *tx_skb;
1865 	struct l2cap_ctrl *control;
1866 	int sent = 0;
1867 
1868 	BT_DBG("chan %p", chan);
1869 
1870 	if (chan->state != BT_CONNECTED)
1871 		return -ENOTCONN;
1872 
1873 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1874 		return 0;
1875 
1876 	if (__chan_is_moving(chan))
1877 		return 0;
1878 
1879 	while (chan->tx_send_head &&
1880 	       chan->unacked_frames < chan->remote_tx_win &&
1881 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1882 
1883 		skb = chan->tx_send_head;
1884 
1885 		bt_cb(skb)->control.retries = 1;
1886 		control = &bt_cb(skb)->control;
1887 
1888 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1889 			control->final = 1;
1890 
1891 		control->reqseq = chan->buffer_seq;
1892 		chan->last_acked_seq = chan->buffer_seq;
1893 		control->txseq = chan->next_tx_seq;
1894 
1895 		__pack_control(chan, control, skb);
1896 
1897 		if (chan->fcs == L2CAP_FCS_CRC16) {
1898 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1899 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1900 		}
1901 
1902 		/* Clone after data has been modified. Data is assumed to be
1903 		   read-only (for locking purposes) on cloned sk_buffs.
1904 		 */
1905 		tx_skb = skb_clone(skb, GFP_KERNEL);
1906 
1907 		if (!tx_skb)
1908 			break;
1909 
1910 		__set_retrans_timer(chan);
1911 
1912 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1913 		chan->unacked_frames++;
1914 		chan->frames_sent++;
1915 		sent++;
1916 
1917 		if (skb_queue_is_last(&chan->tx_q, skb))
1918 			chan->tx_send_head = NULL;
1919 		else
1920 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1921 
1922 		l2cap_do_send(chan, tx_skb);
1923 		BT_DBG("Sent txseq %u", control->txseq);
1924 	}
1925 
1926 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1927 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
1928 
1929 	return sent;
1930 }
1931 
1932 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1933 {
1934 	struct l2cap_ctrl control;
1935 	struct sk_buff *skb;
1936 	struct sk_buff *tx_skb;
1937 	u16 seq;
1938 
1939 	BT_DBG("chan %p", chan);
1940 
1941 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1942 		return;
1943 
1944 	if (__chan_is_moving(chan))
1945 		return;
1946 
1947 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1948 		seq = l2cap_seq_list_pop(&chan->retrans_list);
1949 
1950 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1951 		if (!skb) {
1952 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
1953 			       seq);
1954 			continue;
1955 		}
1956 
1957 		bt_cb(skb)->control.retries++;
1958 		control = bt_cb(skb)->control;
1959 
1960 		if (chan->max_tx != 0 &&
1961 		    bt_cb(skb)->control.retries > chan->max_tx) {
1962 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1963 			l2cap_send_disconn_req(chan, ECONNRESET);
1964 			l2cap_seq_list_clear(&chan->retrans_list);
1965 			break;
1966 		}
1967 
1968 		control.reqseq = chan->buffer_seq;
1969 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1970 			control.final = 1;
1971 		else
1972 			control.final = 0;
1973 
1974 		if (skb_cloned(skb)) {
1975 			/* Cloned sk_buffs are read-only, so we need a
1976 			 * writeable copy
1977 			 */
1978 			tx_skb = skb_copy(skb, GFP_KERNEL);
1979 		} else {
1980 			tx_skb = skb_clone(skb, GFP_KERNEL);
1981 		}
1982 
1983 		if (!tx_skb) {
1984 			l2cap_seq_list_clear(&chan->retrans_list);
1985 			break;
1986 		}
1987 
1988 		/* Update skb contents */
1989 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1990 			put_unaligned_le32(__pack_extended_control(&control),
1991 					   tx_skb->data + L2CAP_HDR_SIZE);
1992 		} else {
1993 			put_unaligned_le16(__pack_enhanced_control(&control),
1994 					   tx_skb->data + L2CAP_HDR_SIZE);
1995 		}
1996 
1997 		if (chan->fcs == L2CAP_FCS_CRC16) {
1998 			u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1999 			put_unaligned_le16(fcs, skb_put(tx_skb,
2000 							L2CAP_FCS_SIZE));
2001 		}
2002 
2003 		l2cap_do_send(chan, tx_skb);
2004 
2005 		BT_DBG("Resent txseq %d", control.txseq);
2006 
2007 		chan->last_acked_seq = chan->buffer_seq;
2008 	}
2009 }
2010 
2011 static void l2cap_retransmit(struct l2cap_chan *chan,
2012 			     struct l2cap_ctrl *control)
2013 {
2014 	BT_DBG("chan %p, control %p", chan, control);
2015 
2016 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2017 	l2cap_ertm_resend(chan);
2018 }
2019 
2020 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2021 				 struct l2cap_ctrl *control)
2022 {
2023 	struct sk_buff *skb;
2024 
2025 	BT_DBG("chan %p, control %p", chan, control);
2026 
2027 	if (control->poll)
2028 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2029 
2030 	l2cap_seq_list_clear(&chan->retrans_list);
2031 
2032 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2033 		return;
2034 
2035 	if (chan->unacked_frames) {
2036 		skb_queue_walk(&chan->tx_q, skb) {
2037 			if (bt_cb(skb)->control.txseq == control->reqseq ||
2038 			    skb == chan->tx_send_head)
2039 				break;
2040 		}
2041 
2042 		skb_queue_walk_from(&chan->tx_q, skb) {
2043 			if (skb == chan->tx_send_head)
2044 				break;
2045 
2046 			l2cap_seq_list_append(&chan->retrans_list,
2047 					      bt_cb(skb)->control.txseq);
2048 		}
2049 
2050 		l2cap_ertm_resend(chan);
2051 	}
2052 }
2053 
2054 static void l2cap_send_ack(struct l2cap_chan *chan)
2055 {
2056 	struct l2cap_ctrl control;
2057 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2058 					 chan->last_acked_seq);
2059 	int threshold;
2060 
2061 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2062 	       chan, chan->last_acked_seq, chan->buffer_seq);
2063 
2064 	memset(&control, 0, sizeof(control));
2065 	control.sframe = 1;
2066 
2067 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2068 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2069 		__clear_ack_timer(chan);
2070 		control.super = L2CAP_SUPER_RNR;
2071 		control.reqseq = chan->buffer_seq;
2072 		l2cap_send_sframe(chan, &control);
2073 	} else {
2074 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2075 			l2cap_ertm_send(chan);
2076 			/* If any i-frames were sent, they included an ack */
2077 			if (chan->buffer_seq == chan->last_acked_seq)
2078 				frames_to_ack = 0;
2079 		}
2080 
2081 		/* Ack now if the window is 3/4ths full.
2082 		 * Calculate without mul or div
2083 		 */
2084 		threshold = chan->ack_win;
2085 		threshold += threshold << 1;
2086 		threshold >>= 2;
2087 
2088 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2089 		       threshold);
2090 
2091 		if (frames_to_ack >= threshold) {
2092 			__clear_ack_timer(chan);
2093 			control.super = L2CAP_SUPER_RR;
2094 			control.reqseq = chan->buffer_seq;
2095 			l2cap_send_sframe(chan, &control);
2096 			frames_to_ack = 0;
2097 		}
2098 
2099 		if (frames_to_ack)
2100 			__set_ack_timer(chan);
2101 	}
2102 }
2103 
2104 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2105 					 struct msghdr *msg, int len,
2106 					 int count, struct sk_buff *skb)
2107 {
2108 	struct l2cap_conn *conn = chan->conn;
2109 	struct sk_buff **frag;
2110 	int sent = 0;
2111 
2112 	if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2113 		return -EFAULT;
2114 
2115 	sent += count;
2116 	len  -= count;
2117 
2118 	/* Continuation fragments (no L2CAP header) */
2119 	frag = &skb_shinfo(skb)->frag_list;
2120 	while (len) {
2121 		struct sk_buff *tmp;
2122 
2123 		count = min_t(unsigned int, conn->mtu, len);
2124 
2125 		tmp = chan->ops->alloc_skb(chan, count,
2126 					   msg->msg_flags & MSG_DONTWAIT);
2127 		if (IS_ERR(tmp))
2128 			return PTR_ERR(tmp);
2129 
2130 		*frag = tmp;
2131 
2132 		if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2133 			return -EFAULT;
2134 
2135 		(*frag)->priority = skb->priority;
2136 
2137 		sent += count;
2138 		len  -= count;
2139 
2140 		skb->len += (*frag)->len;
2141 		skb->data_len += (*frag)->len;
2142 
2143 		frag = &(*frag)->next;
2144 	}
2145 
2146 	return sent;
2147 }
2148 
2149 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2150 						 struct msghdr *msg, size_t len,
2151 						 u32 priority)
2152 {
2153 	struct l2cap_conn *conn = chan->conn;
2154 	struct sk_buff *skb;
2155 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2156 	struct l2cap_hdr *lh;
2157 
2158 	BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2159 
2160 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2161 
2162 	skb = chan->ops->alloc_skb(chan, count + hlen,
2163 				   msg->msg_flags & MSG_DONTWAIT);
2164 	if (IS_ERR(skb))
2165 		return skb;
2166 
2167 	skb->priority = priority;
2168 
2169 	/* Create L2CAP header */
2170 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2171 	lh->cid = cpu_to_le16(chan->dcid);
2172 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2173 	put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2174 
2175 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2176 	if (unlikely(err < 0)) {
2177 		kfree_skb(skb);
2178 		return ERR_PTR(err);
2179 	}
2180 	return skb;
2181 }
2182 
2183 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2184 					      struct msghdr *msg, size_t len,
2185 					      u32 priority)
2186 {
2187 	struct l2cap_conn *conn = chan->conn;
2188 	struct sk_buff *skb;
2189 	int err, count;
2190 	struct l2cap_hdr *lh;
2191 
2192 	BT_DBG("chan %p len %zu", chan, len);
2193 
2194 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2195 
2196 	skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2197 				   msg->msg_flags & MSG_DONTWAIT);
2198 	if (IS_ERR(skb))
2199 		return skb;
2200 
2201 	skb->priority = priority;
2202 
2203 	/* Create L2CAP header */
2204 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2205 	lh->cid = cpu_to_le16(chan->dcid);
2206 	lh->len = cpu_to_le16(len);
2207 
2208 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2209 	if (unlikely(err < 0)) {
2210 		kfree_skb(skb);
2211 		return ERR_PTR(err);
2212 	}
2213 	return skb;
2214 }
2215 
2216 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2217 					       struct msghdr *msg, size_t len,
2218 					       u16 sdulen)
2219 {
2220 	struct l2cap_conn *conn = chan->conn;
2221 	struct sk_buff *skb;
2222 	int err, count, hlen;
2223 	struct l2cap_hdr *lh;
2224 
2225 	BT_DBG("chan %p len %zu", chan, len);
2226 
2227 	if (!conn)
2228 		return ERR_PTR(-ENOTCONN);
2229 
2230 	hlen = __ertm_hdr_size(chan);
2231 
2232 	if (sdulen)
2233 		hlen += L2CAP_SDULEN_SIZE;
2234 
2235 	if (chan->fcs == L2CAP_FCS_CRC16)
2236 		hlen += L2CAP_FCS_SIZE;
2237 
2238 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2239 
2240 	skb = chan->ops->alloc_skb(chan, count + hlen,
2241 				   msg->msg_flags & MSG_DONTWAIT);
2242 	if (IS_ERR(skb))
2243 		return skb;
2244 
2245 	/* Create L2CAP header */
2246 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2247 	lh->cid = cpu_to_le16(chan->dcid);
2248 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2249 
2250 	/* Control header is populated later */
2251 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2252 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2253 	else
2254 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2255 
2256 	if (sdulen)
2257 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2258 
2259 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2260 	if (unlikely(err < 0)) {
2261 		kfree_skb(skb);
2262 		return ERR_PTR(err);
2263 	}
2264 
2265 	bt_cb(skb)->control.fcs = chan->fcs;
2266 	bt_cb(skb)->control.retries = 0;
2267 	return skb;
2268 }
2269 
2270 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2271 			     struct sk_buff_head *seg_queue,
2272 			     struct msghdr *msg, size_t len)
2273 {
2274 	struct sk_buff *skb;
2275 	u16 sdu_len;
2276 	size_t pdu_len;
2277 	u8 sar;
2278 
2279 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2280 
2281 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2282 	 * so fragmented skbs are not used.  The HCI layer's handling
2283 	 * of fragmented skbs is not compatible with ERTM's queueing.
2284 	 */
2285 
2286 	/* PDU size is derived from the HCI MTU */
2287 	pdu_len = chan->conn->mtu;
2288 
2289 	/* Constrain PDU size for BR/EDR connections */
2290 	if (!chan->hs_hcon)
2291 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2292 
2293 	/* Adjust for largest possible L2CAP overhead. */
2294 	if (chan->fcs)
2295 		pdu_len -= L2CAP_FCS_SIZE;
2296 
2297 	pdu_len -= __ertm_hdr_size(chan);
2298 
2299 	/* Remote device may have requested smaller PDUs */
2300 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2301 
2302 	if (len <= pdu_len) {
2303 		sar = L2CAP_SAR_UNSEGMENTED;
2304 		sdu_len = 0;
2305 		pdu_len = len;
2306 	} else {
2307 		sar = L2CAP_SAR_START;
2308 		sdu_len = len;
2309 		pdu_len -= L2CAP_SDULEN_SIZE;
2310 	}
2311 
2312 	while (len > 0) {
2313 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2314 
2315 		if (IS_ERR(skb)) {
2316 			__skb_queue_purge(seg_queue);
2317 			return PTR_ERR(skb);
2318 		}
2319 
2320 		bt_cb(skb)->control.sar = sar;
2321 		__skb_queue_tail(seg_queue, skb);
2322 
2323 		len -= pdu_len;
2324 		if (sdu_len) {
2325 			sdu_len = 0;
2326 			pdu_len += L2CAP_SDULEN_SIZE;
2327 		}
2328 
2329 		if (len <= pdu_len) {
2330 			sar = L2CAP_SAR_END;
2331 			pdu_len = len;
2332 		} else {
2333 			sar = L2CAP_SAR_CONTINUE;
2334 		}
2335 	}
2336 
2337 	return 0;
2338 }
2339 
2340 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2341 		    u32 priority)
2342 {
2343 	struct sk_buff *skb;
2344 	int err;
2345 	struct sk_buff_head seg_queue;
2346 
2347 	/* Connectionless channel */
2348 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2349 		skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2350 		if (IS_ERR(skb))
2351 			return PTR_ERR(skb);
2352 
2353 		l2cap_do_send(chan, skb);
2354 		return len;
2355 	}
2356 
2357 	switch (chan->mode) {
2358 	case L2CAP_MODE_BASIC:
2359 		/* Check outgoing MTU */
2360 		if (len > chan->omtu)
2361 			return -EMSGSIZE;
2362 
2363 		/* Create a basic PDU */
2364 		skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2365 		if (IS_ERR(skb))
2366 			return PTR_ERR(skb);
2367 
2368 		l2cap_do_send(chan, skb);
2369 		err = len;
2370 		break;
2371 
2372 	case L2CAP_MODE_ERTM:
2373 	case L2CAP_MODE_STREAMING:
2374 		/* Check outgoing MTU */
2375 		if (len > chan->omtu) {
2376 			err = -EMSGSIZE;
2377 			break;
2378 		}
2379 
2380 		__skb_queue_head_init(&seg_queue);
2381 
2382 		/* Do segmentation before calling in to the state machine,
2383 		 * since it's possible to block while waiting for memory
2384 		 * allocation.
2385 		 */
2386 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2387 
2388 		/* The channel could have been closed while segmenting,
2389 		 * check that it is still connected.
2390 		 */
2391 		if (chan->state != BT_CONNECTED) {
2392 			__skb_queue_purge(&seg_queue);
2393 			err = -ENOTCONN;
2394 		}
2395 
2396 		if (err)
2397 			break;
2398 
2399 		if (chan->mode == L2CAP_MODE_ERTM)
2400 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2401 		else
2402 			l2cap_streaming_send(chan, &seg_queue);
2403 
2404 		err = len;
2405 
2406 		/* If the skbs were not queued for sending, they'll still be in
2407 		 * seg_queue and need to be purged.
2408 		 */
2409 		__skb_queue_purge(&seg_queue);
2410 		break;
2411 
2412 	default:
2413 		BT_DBG("bad state %1.1x", chan->mode);
2414 		err = -EBADFD;
2415 	}
2416 
2417 	return err;
2418 }
2419 
2420 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2421 {
2422 	struct l2cap_ctrl control;
2423 	u16 seq;
2424 
2425 	BT_DBG("chan %p, txseq %u", chan, txseq);
2426 
2427 	memset(&control, 0, sizeof(control));
2428 	control.sframe = 1;
2429 	control.super = L2CAP_SUPER_SREJ;
2430 
2431 	for (seq = chan->expected_tx_seq; seq != txseq;
2432 	     seq = __next_seq(chan, seq)) {
2433 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2434 			control.reqseq = seq;
2435 			l2cap_send_sframe(chan, &control);
2436 			l2cap_seq_list_append(&chan->srej_list, seq);
2437 		}
2438 	}
2439 
2440 	chan->expected_tx_seq = __next_seq(chan, txseq);
2441 }
2442 
2443 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2444 {
2445 	struct l2cap_ctrl control;
2446 
2447 	BT_DBG("chan %p", chan);
2448 
2449 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2450 		return;
2451 
2452 	memset(&control, 0, sizeof(control));
2453 	control.sframe = 1;
2454 	control.super = L2CAP_SUPER_SREJ;
2455 	control.reqseq = chan->srej_list.tail;
2456 	l2cap_send_sframe(chan, &control);
2457 }
2458 
2459 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2460 {
2461 	struct l2cap_ctrl control;
2462 	u16 initial_head;
2463 	u16 seq;
2464 
2465 	BT_DBG("chan %p, txseq %u", chan, txseq);
2466 
2467 	memset(&control, 0, sizeof(control));
2468 	control.sframe = 1;
2469 	control.super = L2CAP_SUPER_SREJ;
2470 
2471 	/* Capture initial list head to allow only one pass through the list. */
2472 	initial_head = chan->srej_list.head;
2473 
2474 	do {
2475 		seq = l2cap_seq_list_pop(&chan->srej_list);
2476 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2477 			break;
2478 
2479 		control.reqseq = seq;
2480 		l2cap_send_sframe(chan, &control);
2481 		l2cap_seq_list_append(&chan->srej_list, seq);
2482 	} while (chan->srej_list.head != initial_head);
2483 }
2484 
2485 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2486 {
2487 	struct sk_buff *acked_skb;
2488 	u16 ackseq;
2489 
2490 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2491 
2492 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2493 		return;
2494 
2495 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2496 	       chan->expected_ack_seq, chan->unacked_frames);
2497 
2498 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2499 	     ackseq = __next_seq(chan, ackseq)) {
2500 
2501 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2502 		if (acked_skb) {
2503 			skb_unlink(acked_skb, &chan->tx_q);
2504 			kfree_skb(acked_skb);
2505 			chan->unacked_frames--;
2506 		}
2507 	}
2508 
2509 	chan->expected_ack_seq = reqseq;
2510 
2511 	if (chan->unacked_frames == 0)
2512 		__clear_retrans_timer(chan);
2513 
2514 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2515 }
2516 
2517 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2518 {
2519 	BT_DBG("chan %p", chan);
2520 
2521 	chan->expected_tx_seq = chan->buffer_seq;
2522 	l2cap_seq_list_clear(&chan->srej_list);
2523 	skb_queue_purge(&chan->srej_q);
2524 	chan->rx_state = L2CAP_RX_STATE_RECV;
2525 }
2526 
2527 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2528 				struct l2cap_ctrl *control,
2529 				struct sk_buff_head *skbs, u8 event)
2530 {
2531 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2532 	       event);
2533 
2534 	switch (event) {
2535 	case L2CAP_EV_DATA_REQUEST:
2536 		if (chan->tx_send_head == NULL)
2537 			chan->tx_send_head = skb_peek(skbs);
2538 
2539 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2540 		l2cap_ertm_send(chan);
2541 		break;
2542 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2543 		BT_DBG("Enter LOCAL_BUSY");
2544 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2545 
2546 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2547 			/* The SREJ_SENT state must be aborted if we are to
2548 			 * enter the LOCAL_BUSY state.
2549 			 */
2550 			l2cap_abort_rx_srej_sent(chan);
2551 		}
2552 
2553 		l2cap_send_ack(chan);
2554 
2555 		break;
2556 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2557 		BT_DBG("Exit LOCAL_BUSY");
2558 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2559 
2560 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2561 			struct l2cap_ctrl local_control;
2562 
2563 			memset(&local_control, 0, sizeof(local_control));
2564 			local_control.sframe = 1;
2565 			local_control.super = L2CAP_SUPER_RR;
2566 			local_control.poll = 1;
2567 			local_control.reqseq = chan->buffer_seq;
2568 			l2cap_send_sframe(chan, &local_control);
2569 
2570 			chan->retry_count = 1;
2571 			__set_monitor_timer(chan);
2572 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2573 		}
2574 		break;
2575 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2576 		l2cap_process_reqseq(chan, control->reqseq);
2577 		break;
2578 	case L2CAP_EV_EXPLICIT_POLL:
2579 		l2cap_send_rr_or_rnr(chan, 1);
2580 		chan->retry_count = 1;
2581 		__set_monitor_timer(chan);
2582 		__clear_ack_timer(chan);
2583 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2584 		break;
2585 	case L2CAP_EV_RETRANS_TO:
2586 		l2cap_send_rr_or_rnr(chan, 1);
2587 		chan->retry_count = 1;
2588 		__set_monitor_timer(chan);
2589 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2590 		break;
2591 	case L2CAP_EV_RECV_FBIT:
2592 		/* Nothing to process */
2593 		break;
2594 	default:
2595 		break;
2596 	}
2597 }
2598 
2599 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2600 				  struct l2cap_ctrl *control,
2601 				  struct sk_buff_head *skbs, u8 event)
2602 {
2603 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2604 	       event);
2605 
2606 	switch (event) {
2607 	case L2CAP_EV_DATA_REQUEST:
2608 		if (chan->tx_send_head == NULL)
2609 			chan->tx_send_head = skb_peek(skbs);
2610 		/* Queue data, but don't send. */
2611 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2612 		break;
2613 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2614 		BT_DBG("Enter LOCAL_BUSY");
2615 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2616 
2617 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2618 			/* The SREJ_SENT state must be aborted if we are to
2619 			 * enter the LOCAL_BUSY state.
2620 			 */
2621 			l2cap_abort_rx_srej_sent(chan);
2622 		}
2623 
2624 		l2cap_send_ack(chan);
2625 
2626 		break;
2627 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2628 		BT_DBG("Exit LOCAL_BUSY");
2629 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2630 
2631 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2632 			struct l2cap_ctrl local_control;
2633 			memset(&local_control, 0, sizeof(local_control));
2634 			local_control.sframe = 1;
2635 			local_control.super = L2CAP_SUPER_RR;
2636 			local_control.poll = 1;
2637 			local_control.reqseq = chan->buffer_seq;
2638 			l2cap_send_sframe(chan, &local_control);
2639 
2640 			chan->retry_count = 1;
2641 			__set_monitor_timer(chan);
2642 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2643 		}
2644 		break;
2645 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2646 		l2cap_process_reqseq(chan, control->reqseq);
2647 
2648 		/* Fall through */
2649 
2650 	case L2CAP_EV_RECV_FBIT:
2651 		if (control && control->final) {
2652 			__clear_monitor_timer(chan);
2653 			if (chan->unacked_frames > 0)
2654 				__set_retrans_timer(chan);
2655 			chan->retry_count = 0;
2656 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2657 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2658 		}
2659 		break;
2660 	case L2CAP_EV_EXPLICIT_POLL:
2661 		/* Ignore */
2662 		break;
2663 	case L2CAP_EV_MONITOR_TO:
2664 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2665 			l2cap_send_rr_or_rnr(chan, 1);
2666 			__set_monitor_timer(chan);
2667 			chan->retry_count++;
2668 		} else {
2669 			l2cap_send_disconn_req(chan, ECONNABORTED);
2670 		}
2671 		break;
2672 	default:
2673 		break;
2674 	}
2675 }
2676 
2677 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2678 		     struct sk_buff_head *skbs, u8 event)
2679 {
2680 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2681 	       chan, control, skbs, event, chan->tx_state);
2682 
2683 	switch (chan->tx_state) {
2684 	case L2CAP_TX_STATE_XMIT:
2685 		l2cap_tx_state_xmit(chan, control, skbs, event);
2686 		break;
2687 	case L2CAP_TX_STATE_WAIT_F:
2688 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2689 		break;
2690 	default:
2691 		/* Ignore event */
2692 		break;
2693 	}
2694 }
2695 
2696 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2697 			     struct l2cap_ctrl *control)
2698 {
2699 	BT_DBG("chan %p, control %p", chan, control);
2700 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2701 }
2702 
2703 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2704 				  struct l2cap_ctrl *control)
2705 {
2706 	BT_DBG("chan %p, control %p", chan, control);
2707 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2708 }
2709 
2710 /* Copy frame to all raw sockets on that connection */
2711 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2712 {
2713 	struct sk_buff *nskb;
2714 	struct l2cap_chan *chan;
2715 
2716 	BT_DBG("conn %p", conn);
2717 
2718 	mutex_lock(&conn->chan_lock);
2719 
2720 	list_for_each_entry(chan, &conn->chan_l, list) {
2721 		struct sock *sk = chan->sk;
2722 		if (chan->chan_type != L2CAP_CHAN_RAW)
2723 			continue;
2724 
2725 		/* Don't send frame to the socket it came from */
2726 		if (skb->sk == sk)
2727 			continue;
2728 		nskb = skb_clone(skb, GFP_KERNEL);
2729 		if (!nskb)
2730 			continue;
2731 
2732 		if (chan->ops->recv(chan, nskb))
2733 			kfree_skb(nskb);
2734 	}
2735 
2736 	mutex_unlock(&conn->chan_lock);
2737 }
2738 
2739 /* ---- L2CAP signalling commands ---- */
2740 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2741 				       u8 ident, u16 dlen, void *data)
2742 {
2743 	struct sk_buff *skb, **frag;
2744 	struct l2cap_cmd_hdr *cmd;
2745 	struct l2cap_hdr *lh;
2746 	int len, count;
2747 
2748 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2749 	       conn, code, ident, dlen);
2750 
2751 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2752 	count = min_t(unsigned int, conn->mtu, len);
2753 
2754 	skb = bt_skb_alloc(count, GFP_KERNEL);
2755 	if (!skb)
2756 		return NULL;
2757 
2758 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2759 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2760 
2761 	if (conn->hcon->type == LE_LINK)
2762 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2763 	else
2764 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2765 
2766 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2767 	cmd->code  = code;
2768 	cmd->ident = ident;
2769 	cmd->len   = cpu_to_le16(dlen);
2770 
2771 	if (dlen) {
2772 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2773 		memcpy(skb_put(skb, count), data, count);
2774 		data += count;
2775 	}
2776 
2777 	len -= skb->len;
2778 
2779 	/* Continuation fragments (no L2CAP header) */
2780 	frag = &skb_shinfo(skb)->frag_list;
2781 	while (len) {
2782 		count = min_t(unsigned int, conn->mtu, len);
2783 
2784 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2785 		if (!*frag)
2786 			goto fail;
2787 
2788 		memcpy(skb_put(*frag, count), data, count);
2789 
2790 		len  -= count;
2791 		data += count;
2792 
2793 		frag = &(*frag)->next;
2794 	}
2795 
2796 	return skb;
2797 
2798 fail:
2799 	kfree_skb(skb);
2800 	return NULL;
2801 }
2802 
2803 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2804 				     unsigned long *val)
2805 {
2806 	struct l2cap_conf_opt *opt = *ptr;
2807 	int len;
2808 
2809 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2810 	*ptr += len;
2811 
2812 	*type = opt->type;
2813 	*olen = opt->len;
2814 
2815 	switch (opt->len) {
2816 	case 1:
2817 		*val = *((u8 *) opt->val);
2818 		break;
2819 
2820 	case 2:
2821 		*val = get_unaligned_le16(opt->val);
2822 		break;
2823 
2824 	case 4:
2825 		*val = get_unaligned_le32(opt->val);
2826 		break;
2827 
2828 	default:
2829 		*val = (unsigned long) opt->val;
2830 		break;
2831 	}
2832 
2833 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2834 	return len;
2835 }
2836 
2837 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2838 {
2839 	struct l2cap_conf_opt *opt = *ptr;
2840 
2841 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2842 
2843 	opt->type = type;
2844 	opt->len  = len;
2845 
2846 	switch (len) {
2847 	case 1:
2848 		*((u8 *) opt->val)  = val;
2849 		break;
2850 
2851 	case 2:
2852 		put_unaligned_le16(val, opt->val);
2853 		break;
2854 
2855 	case 4:
2856 		put_unaligned_le32(val, opt->val);
2857 		break;
2858 
2859 	default:
2860 		memcpy(opt->val, (void *) val, len);
2861 		break;
2862 	}
2863 
2864 	*ptr += L2CAP_CONF_OPT_SIZE + len;
2865 }
2866 
2867 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2868 {
2869 	struct l2cap_conf_efs efs;
2870 
2871 	switch (chan->mode) {
2872 	case L2CAP_MODE_ERTM:
2873 		efs.id		= chan->local_id;
2874 		efs.stype	= chan->local_stype;
2875 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2876 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2877 		efs.acc_lat	= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2878 		efs.flush_to	= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2879 		break;
2880 
2881 	case L2CAP_MODE_STREAMING:
2882 		efs.id		= 1;
2883 		efs.stype	= L2CAP_SERV_BESTEFFORT;
2884 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2885 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2886 		efs.acc_lat	= 0;
2887 		efs.flush_to	= 0;
2888 		break;
2889 
2890 	default:
2891 		return;
2892 	}
2893 
2894 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2895 			   (unsigned long) &efs);
2896 }
2897 
2898 static void l2cap_ack_timeout(struct work_struct *work)
2899 {
2900 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2901 					       ack_timer.work);
2902 	u16 frames_to_ack;
2903 
2904 	BT_DBG("chan %p", chan);
2905 
2906 	l2cap_chan_lock(chan);
2907 
2908 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2909 				     chan->last_acked_seq);
2910 
2911 	if (frames_to_ack)
2912 		l2cap_send_rr_or_rnr(chan, 0);
2913 
2914 	l2cap_chan_unlock(chan);
2915 	l2cap_chan_put(chan);
2916 }
2917 
2918 int l2cap_ertm_init(struct l2cap_chan *chan)
2919 {
2920 	int err;
2921 
2922 	chan->next_tx_seq = 0;
2923 	chan->expected_tx_seq = 0;
2924 	chan->expected_ack_seq = 0;
2925 	chan->unacked_frames = 0;
2926 	chan->buffer_seq = 0;
2927 	chan->frames_sent = 0;
2928 	chan->last_acked_seq = 0;
2929 	chan->sdu = NULL;
2930 	chan->sdu_last_frag = NULL;
2931 	chan->sdu_len = 0;
2932 
2933 	skb_queue_head_init(&chan->tx_q);
2934 
2935 	chan->local_amp_id = 0;
2936 	chan->move_id = 0;
2937 	chan->move_state = L2CAP_MOVE_STABLE;
2938 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
2939 
2940 	if (chan->mode != L2CAP_MODE_ERTM)
2941 		return 0;
2942 
2943 	chan->rx_state = L2CAP_RX_STATE_RECV;
2944 	chan->tx_state = L2CAP_TX_STATE_XMIT;
2945 
2946 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2947 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2948 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2949 
2950 	skb_queue_head_init(&chan->srej_q);
2951 
2952 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2953 	if (err < 0)
2954 		return err;
2955 
2956 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2957 	if (err < 0)
2958 		l2cap_seq_list_free(&chan->srej_list);
2959 
2960 	return err;
2961 }
2962 
2963 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2964 {
2965 	switch (mode) {
2966 	case L2CAP_MODE_STREAMING:
2967 	case L2CAP_MODE_ERTM:
2968 		if (l2cap_mode_supported(mode, remote_feat_mask))
2969 			return mode;
2970 		/* fall through */
2971 	default:
2972 		return L2CAP_MODE_BASIC;
2973 	}
2974 }
2975 
2976 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2977 {
2978 	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2979 }
2980 
2981 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2982 {
2983 	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2984 }
2985 
2986 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
2987 				      struct l2cap_conf_rfc *rfc)
2988 {
2989 	if (chan->local_amp_id && chan->hs_hcon) {
2990 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
2991 
2992 		/* Class 1 devices have must have ERTM timeouts
2993 		 * exceeding the Link Supervision Timeout.  The
2994 		 * default Link Supervision Timeout for AMP
2995 		 * controllers is 10 seconds.
2996 		 *
2997 		 * Class 1 devices use 0xffffffff for their
2998 		 * best-effort flush timeout, so the clamping logic
2999 		 * will result in a timeout that meets the above
3000 		 * requirement.  ERTM timeouts are 16-bit values, so
3001 		 * the maximum timeout is 65.535 seconds.
3002 		 */
3003 
3004 		/* Convert timeout to milliseconds and round */
3005 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3006 
3007 		/* This is the recommended formula for class 2 devices
3008 		 * that start ERTM timers when packets are sent to the
3009 		 * controller.
3010 		 */
3011 		ertm_to = 3 * ertm_to + 500;
3012 
3013 		if (ertm_to > 0xffff)
3014 			ertm_to = 0xffff;
3015 
3016 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3017 		rfc->monitor_timeout = rfc->retrans_timeout;
3018 	} else {
3019 		rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3020 		rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3021 	}
3022 }
3023 
3024 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3025 {
3026 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3027 	    __l2cap_ews_supported(chan)) {
3028 		/* use extended control field */
3029 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3030 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3031 	} else {
3032 		chan->tx_win = min_t(u16, chan->tx_win,
3033 				     L2CAP_DEFAULT_TX_WINDOW);
3034 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3035 	}
3036 	chan->ack_win = chan->tx_win;
3037 }
3038 
3039 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3040 {
3041 	struct l2cap_conf_req *req = data;
3042 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3043 	void *ptr = req->data;
3044 	u16 size;
3045 
3046 	BT_DBG("chan %p", chan);
3047 
3048 	if (chan->num_conf_req || chan->num_conf_rsp)
3049 		goto done;
3050 
3051 	switch (chan->mode) {
3052 	case L2CAP_MODE_STREAMING:
3053 	case L2CAP_MODE_ERTM:
3054 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3055 			break;
3056 
3057 		if (__l2cap_efs_supported(chan))
3058 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3059 
3060 		/* fall through */
3061 	default:
3062 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3063 		break;
3064 	}
3065 
3066 done:
3067 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3068 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3069 
3070 	switch (chan->mode) {
3071 	case L2CAP_MODE_BASIC:
3072 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3073 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3074 			break;
3075 
3076 		rfc.mode            = L2CAP_MODE_BASIC;
3077 		rfc.txwin_size      = 0;
3078 		rfc.max_transmit    = 0;
3079 		rfc.retrans_timeout = 0;
3080 		rfc.monitor_timeout = 0;
3081 		rfc.max_pdu_size    = 0;
3082 
3083 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3084 				   (unsigned long) &rfc);
3085 		break;
3086 
3087 	case L2CAP_MODE_ERTM:
3088 		rfc.mode            = L2CAP_MODE_ERTM;
3089 		rfc.max_transmit    = chan->max_tx;
3090 
3091 		__l2cap_set_ertm_timeouts(chan, &rfc);
3092 
3093 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3094 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3095 			     L2CAP_FCS_SIZE);
3096 		rfc.max_pdu_size = cpu_to_le16(size);
3097 
3098 		l2cap_txwin_setup(chan);
3099 
3100 		rfc.txwin_size = min_t(u16, chan->tx_win,
3101 				       L2CAP_DEFAULT_TX_WINDOW);
3102 
3103 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3104 				   (unsigned long) &rfc);
3105 
3106 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3107 			l2cap_add_opt_efs(&ptr, chan);
3108 
3109 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3110 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3111 					   chan->tx_win);
3112 
3113 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3114 			if (chan->fcs == L2CAP_FCS_NONE ||
3115 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3116 				chan->fcs = L2CAP_FCS_NONE;
3117 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3118 						   chan->fcs);
3119 			}
3120 		break;
3121 
3122 	case L2CAP_MODE_STREAMING:
3123 		l2cap_txwin_setup(chan);
3124 		rfc.mode            = L2CAP_MODE_STREAMING;
3125 		rfc.txwin_size      = 0;
3126 		rfc.max_transmit    = 0;
3127 		rfc.retrans_timeout = 0;
3128 		rfc.monitor_timeout = 0;
3129 
3130 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3131 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3132 			     L2CAP_FCS_SIZE);
3133 		rfc.max_pdu_size = cpu_to_le16(size);
3134 
3135 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3136 				   (unsigned long) &rfc);
3137 
3138 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3139 			l2cap_add_opt_efs(&ptr, chan);
3140 
3141 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3142 			if (chan->fcs == L2CAP_FCS_NONE ||
3143 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3144 				chan->fcs = L2CAP_FCS_NONE;
3145 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3146 						   chan->fcs);
3147 			}
3148 		break;
3149 	}
3150 
3151 	req->dcid  = cpu_to_le16(chan->dcid);
3152 	req->flags = __constant_cpu_to_le16(0);
3153 
3154 	return ptr - data;
3155 }
3156 
3157 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3158 {
3159 	struct l2cap_conf_rsp *rsp = data;
3160 	void *ptr = rsp->data;
3161 	void *req = chan->conf_req;
3162 	int len = chan->conf_len;
3163 	int type, hint, olen;
3164 	unsigned long val;
3165 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3166 	struct l2cap_conf_efs efs;
3167 	u8 remote_efs = 0;
3168 	u16 mtu = L2CAP_DEFAULT_MTU;
3169 	u16 result = L2CAP_CONF_SUCCESS;
3170 	u16 size;
3171 
3172 	BT_DBG("chan %p", chan);
3173 
3174 	while (len >= L2CAP_CONF_OPT_SIZE) {
3175 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3176 
3177 		hint  = type & L2CAP_CONF_HINT;
3178 		type &= L2CAP_CONF_MASK;
3179 
3180 		switch (type) {
3181 		case L2CAP_CONF_MTU:
3182 			mtu = val;
3183 			break;
3184 
3185 		case L2CAP_CONF_FLUSH_TO:
3186 			chan->flush_to = val;
3187 			break;
3188 
3189 		case L2CAP_CONF_QOS:
3190 			break;
3191 
3192 		case L2CAP_CONF_RFC:
3193 			if (olen == sizeof(rfc))
3194 				memcpy(&rfc, (void *) val, olen);
3195 			break;
3196 
3197 		case L2CAP_CONF_FCS:
3198 			if (val == L2CAP_FCS_NONE)
3199 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3200 			break;
3201 
3202 		case L2CAP_CONF_EFS:
3203 			remote_efs = 1;
3204 			if (olen == sizeof(efs))
3205 				memcpy(&efs, (void *) val, olen);
3206 			break;
3207 
3208 		case L2CAP_CONF_EWS:
3209 			if (!enable_hs)
3210 				return -ECONNREFUSED;
3211 
3212 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3213 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3214 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3215 			chan->remote_tx_win = val;
3216 			break;
3217 
3218 		default:
3219 			if (hint)
3220 				break;
3221 
3222 			result = L2CAP_CONF_UNKNOWN;
3223 			*((u8 *) ptr++) = type;
3224 			break;
3225 		}
3226 	}
3227 
3228 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3229 		goto done;
3230 
3231 	switch (chan->mode) {
3232 	case L2CAP_MODE_STREAMING:
3233 	case L2CAP_MODE_ERTM:
3234 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3235 			chan->mode = l2cap_select_mode(rfc.mode,
3236 						       chan->conn->feat_mask);
3237 			break;
3238 		}
3239 
3240 		if (remote_efs) {
3241 			if (__l2cap_efs_supported(chan))
3242 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3243 			else
3244 				return -ECONNREFUSED;
3245 		}
3246 
3247 		if (chan->mode != rfc.mode)
3248 			return -ECONNREFUSED;
3249 
3250 		break;
3251 	}
3252 
3253 done:
3254 	if (chan->mode != rfc.mode) {
3255 		result = L2CAP_CONF_UNACCEPT;
3256 		rfc.mode = chan->mode;
3257 
3258 		if (chan->num_conf_rsp == 1)
3259 			return -ECONNREFUSED;
3260 
3261 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3262 				   (unsigned long) &rfc);
3263 	}
3264 
3265 	if (result == L2CAP_CONF_SUCCESS) {
3266 		/* Configure output options and let the other side know
3267 		 * which ones we don't like. */
3268 
3269 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3270 			result = L2CAP_CONF_UNACCEPT;
3271 		else {
3272 			chan->omtu = mtu;
3273 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3274 		}
3275 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3276 
3277 		if (remote_efs) {
3278 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3279 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3280 			    efs.stype != chan->local_stype) {
3281 
3282 				result = L2CAP_CONF_UNACCEPT;
3283 
3284 				if (chan->num_conf_req >= 1)
3285 					return -ECONNREFUSED;
3286 
3287 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3288 						   sizeof(efs),
3289 						   (unsigned long) &efs);
3290 			} else {
3291 				/* Send PENDING Conf Rsp */
3292 				result = L2CAP_CONF_PENDING;
3293 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3294 			}
3295 		}
3296 
3297 		switch (rfc.mode) {
3298 		case L2CAP_MODE_BASIC:
3299 			chan->fcs = L2CAP_FCS_NONE;
3300 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3301 			break;
3302 
3303 		case L2CAP_MODE_ERTM:
3304 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3305 				chan->remote_tx_win = rfc.txwin_size;
3306 			else
3307 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3308 
3309 			chan->remote_max_tx = rfc.max_transmit;
3310 
3311 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3312 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3313 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3314 			rfc.max_pdu_size = cpu_to_le16(size);
3315 			chan->remote_mps = size;
3316 
3317 			__l2cap_set_ertm_timeouts(chan, &rfc);
3318 
3319 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3320 
3321 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3322 					   sizeof(rfc), (unsigned long) &rfc);
3323 
3324 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3325 				chan->remote_id = efs.id;
3326 				chan->remote_stype = efs.stype;
3327 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3328 				chan->remote_flush_to =
3329 					le32_to_cpu(efs.flush_to);
3330 				chan->remote_acc_lat =
3331 					le32_to_cpu(efs.acc_lat);
3332 				chan->remote_sdu_itime =
3333 					le32_to_cpu(efs.sdu_itime);
3334 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3335 						   sizeof(efs),
3336 						   (unsigned long) &efs);
3337 			}
3338 			break;
3339 
3340 		case L2CAP_MODE_STREAMING:
3341 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3342 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3343 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3344 			rfc.max_pdu_size = cpu_to_le16(size);
3345 			chan->remote_mps = size;
3346 
3347 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3348 
3349 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3350 					   (unsigned long) &rfc);
3351 
3352 			break;
3353 
3354 		default:
3355 			result = L2CAP_CONF_UNACCEPT;
3356 
3357 			memset(&rfc, 0, sizeof(rfc));
3358 			rfc.mode = chan->mode;
3359 		}
3360 
3361 		if (result == L2CAP_CONF_SUCCESS)
3362 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3363 	}
3364 	rsp->scid   = cpu_to_le16(chan->dcid);
3365 	rsp->result = cpu_to_le16(result);
3366 	rsp->flags  = __constant_cpu_to_le16(0);
3367 
3368 	return ptr - data;
3369 }
3370 
3371 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3372 				void *data, u16 *result)
3373 {
3374 	struct l2cap_conf_req *req = data;
3375 	void *ptr = req->data;
3376 	int type, olen;
3377 	unsigned long val;
3378 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3379 	struct l2cap_conf_efs efs;
3380 
3381 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3382 
3383 	while (len >= L2CAP_CONF_OPT_SIZE) {
3384 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3385 
3386 		switch (type) {
3387 		case L2CAP_CONF_MTU:
3388 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3389 				*result = L2CAP_CONF_UNACCEPT;
3390 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3391 			} else
3392 				chan->imtu = val;
3393 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3394 			break;
3395 
3396 		case L2CAP_CONF_FLUSH_TO:
3397 			chan->flush_to = val;
3398 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3399 					   2, chan->flush_to);
3400 			break;
3401 
3402 		case L2CAP_CONF_RFC:
3403 			if (olen == sizeof(rfc))
3404 				memcpy(&rfc, (void *)val, olen);
3405 
3406 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3407 			    rfc.mode != chan->mode)
3408 				return -ECONNREFUSED;
3409 
3410 			chan->fcs = 0;
3411 
3412 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3413 					   sizeof(rfc), (unsigned long) &rfc);
3414 			break;
3415 
3416 		case L2CAP_CONF_EWS:
3417 			chan->ack_win = min_t(u16, val, chan->ack_win);
3418 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3419 					   chan->tx_win);
3420 			break;
3421 
3422 		case L2CAP_CONF_EFS:
3423 			if (olen == sizeof(efs))
3424 				memcpy(&efs, (void *)val, olen);
3425 
3426 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3427 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3428 			    efs.stype != chan->local_stype)
3429 				return -ECONNREFUSED;
3430 
3431 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3432 					   (unsigned long) &efs);
3433 			break;
3434 
3435 		case L2CAP_CONF_FCS:
3436 			if (*result == L2CAP_CONF_PENDING)
3437 				if (val == L2CAP_FCS_NONE)
3438 					set_bit(CONF_RECV_NO_FCS,
3439 						&chan->conf_state);
3440 			break;
3441 		}
3442 	}
3443 
3444 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3445 		return -ECONNREFUSED;
3446 
3447 	chan->mode = rfc.mode;
3448 
3449 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3450 		switch (rfc.mode) {
3451 		case L2CAP_MODE_ERTM:
3452 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3453 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3454 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3455 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3456 				chan->ack_win = min_t(u16, chan->ack_win,
3457 						      rfc.txwin_size);
3458 
3459 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3460 				chan->local_msdu = le16_to_cpu(efs.msdu);
3461 				chan->local_sdu_itime =
3462 					le32_to_cpu(efs.sdu_itime);
3463 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3464 				chan->local_flush_to =
3465 					le32_to_cpu(efs.flush_to);
3466 			}
3467 			break;
3468 
3469 		case L2CAP_MODE_STREAMING:
3470 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3471 		}
3472 	}
3473 
3474 	req->dcid   = cpu_to_le16(chan->dcid);
3475 	req->flags  = __constant_cpu_to_le16(0);
3476 
3477 	return ptr - data;
3478 }
3479 
3480 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3481 				u16 result, u16 flags)
3482 {
3483 	struct l2cap_conf_rsp *rsp = data;
3484 	void *ptr = rsp->data;
3485 
3486 	BT_DBG("chan %p", chan);
3487 
3488 	rsp->scid   = cpu_to_le16(chan->dcid);
3489 	rsp->result = cpu_to_le16(result);
3490 	rsp->flags  = cpu_to_le16(flags);
3491 
3492 	return ptr - data;
3493 }
3494 
3495 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3496 {
3497 	struct l2cap_conn_rsp rsp;
3498 	struct l2cap_conn *conn = chan->conn;
3499 	u8 buf[128];
3500 	u8 rsp_code;
3501 
3502 	rsp.scid   = cpu_to_le16(chan->dcid);
3503 	rsp.dcid   = cpu_to_le16(chan->scid);
3504 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3505 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3506 
3507 	if (chan->hs_hcon)
3508 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3509 	else
3510 		rsp_code = L2CAP_CONN_RSP;
3511 
3512 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3513 
3514 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3515 
3516 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3517 		return;
3518 
3519 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3520 		       l2cap_build_conf_req(chan, buf), buf);
3521 	chan->num_conf_req++;
3522 }
3523 
3524 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3525 {
3526 	int type, olen;
3527 	unsigned long val;
3528 	/* Use sane default values in case a misbehaving remote device
3529 	 * did not send an RFC or extended window size option.
3530 	 */
3531 	u16 txwin_ext = chan->ack_win;
3532 	struct l2cap_conf_rfc rfc = {
3533 		.mode = chan->mode,
3534 		.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3535 		.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3536 		.max_pdu_size = cpu_to_le16(chan->imtu),
3537 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3538 	};
3539 
3540 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3541 
3542 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3543 		return;
3544 
3545 	while (len >= L2CAP_CONF_OPT_SIZE) {
3546 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3547 
3548 		switch (type) {
3549 		case L2CAP_CONF_RFC:
3550 			if (olen == sizeof(rfc))
3551 				memcpy(&rfc, (void *)val, olen);
3552 			break;
3553 		case L2CAP_CONF_EWS:
3554 			txwin_ext = val;
3555 			break;
3556 		}
3557 	}
3558 
3559 	switch (rfc.mode) {
3560 	case L2CAP_MODE_ERTM:
3561 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3562 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3563 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3564 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3565 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3566 		else
3567 			chan->ack_win = min_t(u16, chan->ack_win,
3568 					      rfc.txwin_size);
3569 		break;
3570 	case L2CAP_MODE_STREAMING:
3571 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3572 	}
3573 }
3574 
3575 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3576 				    struct l2cap_cmd_hdr *cmd, u8 *data)
3577 {
3578 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3579 
3580 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3581 		return 0;
3582 
3583 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3584 	    cmd->ident == conn->info_ident) {
3585 		cancel_delayed_work(&conn->info_timer);
3586 
3587 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3588 		conn->info_ident = 0;
3589 
3590 		l2cap_conn_start(conn);
3591 	}
3592 
3593 	return 0;
3594 }
3595 
3596 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3597 					struct l2cap_cmd_hdr *cmd,
3598 					u8 *data, u8 rsp_code, u8 amp_id)
3599 {
3600 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3601 	struct l2cap_conn_rsp rsp;
3602 	struct l2cap_chan *chan = NULL, *pchan;
3603 	struct sock *parent, *sk = NULL;
3604 	int result, status = L2CAP_CS_NO_INFO;
3605 
3606 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3607 	__le16 psm = req->psm;
3608 
3609 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3610 
3611 	/* Check if we have socket listening on psm */
3612 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3613 	if (!pchan) {
3614 		result = L2CAP_CR_BAD_PSM;
3615 		goto sendresp;
3616 	}
3617 
3618 	parent = pchan->sk;
3619 
3620 	mutex_lock(&conn->chan_lock);
3621 	lock_sock(parent);
3622 
3623 	/* Check if the ACL is secure enough (if not SDP) */
3624 	if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3625 	    !hci_conn_check_link_mode(conn->hcon)) {
3626 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3627 		result = L2CAP_CR_SEC_BLOCK;
3628 		goto response;
3629 	}
3630 
3631 	result = L2CAP_CR_NO_MEM;
3632 
3633 	/* Check if we already have channel with that dcid */
3634 	if (__l2cap_get_chan_by_dcid(conn, scid))
3635 		goto response;
3636 
3637 	chan = pchan->ops->new_connection(pchan);
3638 	if (!chan)
3639 		goto response;
3640 
3641 	sk = chan->sk;
3642 
3643 	hci_conn_hold(conn->hcon);
3644 
3645 	bacpy(&bt_sk(sk)->src, conn->src);
3646 	bacpy(&bt_sk(sk)->dst, conn->dst);
3647 	chan->psm  = psm;
3648 	chan->dcid = scid;
3649 	chan->local_amp_id = amp_id;
3650 
3651 	__l2cap_chan_add(conn, chan);
3652 
3653 	dcid = chan->scid;
3654 
3655 	__set_chan_timer(chan, sk->sk_sndtimeo);
3656 
3657 	chan->ident = cmd->ident;
3658 
3659 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3660 		if (l2cap_chan_check_security(chan)) {
3661 			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3662 				__l2cap_state_change(chan, BT_CONNECT2);
3663 				result = L2CAP_CR_PEND;
3664 				status = L2CAP_CS_AUTHOR_PEND;
3665 				chan->ops->defer(chan);
3666 			} else {
3667 				/* Force pending result for AMP controllers.
3668 				 * The connection will succeed after the
3669 				 * physical link is up.
3670 				 */
3671 				if (amp_id) {
3672 					__l2cap_state_change(chan, BT_CONNECT2);
3673 					result = L2CAP_CR_PEND;
3674 				} else {
3675 					__l2cap_state_change(chan, BT_CONFIG);
3676 					result = L2CAP_CR_SUCCESS;
3677 				}
3678 				status = L2CAP_CS_NO_INFO;
3679 			}
3680 		} else {
3681 			__l2cap_state_change(chan, BT_CONNECT2);
3682 			result = L2CAP_CR_PEND;
3683 			status = L2CAP_CS_AUTHEN_PEND;
3684 		}
3685 	} else {
3686 		__l2cap_state_change(chan, BT_CONNECT2);
3687 		result = L2CAP_CR_PEND;
3688 		status = L2CAP_CS_NO_INFO;
3689 	}
3690 
3691 response:
3692 	release_sock(parent);
3693 	mutex_unlock(&conn->chan_lock);
3694 
3695 sendresp:
3696 	rsp.scid   = cpu_to_le16(scid);
3697 	rsp.dcid   = cpu_to_le16(dcid);
3698 	rsp.result = cpu_to_le16(result);
3699 	rsp.status = cpu_to_le16(status);
3700 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3701 
3702 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3703 		struct l2cap_info_req info;
3704 		info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3705 
3706 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3707 		conn->info_ident = l2cap_get_ident(conn);
3708 
3709 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3710 
3711 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3712 			       sizeof(info), &info);
3713 	}
3714 
3715 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3716 	    result == L2CAP_CR_SUCCESS) {
3717 		u8 buf[128];
3718 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3719 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3720 			       l2cap_build_conf_req(chan, buf), buf);
3721 		chan->num_conf_req++;
3722 	}
3723 
3724 	return chan;
3725 }
3726 
3727 static int l2cap_connect_req(struct l2cap_conn *conn,
3728 			     struct l2cap_cmd_hdr *cmd, u8 *data)
3729 {
3730 	struct hci_dev *hdev = conn->hcon->hdev;
3731 	struct hci_conn *hcon = conn->hcon;
3732 
3733 	hci_dev_lock(hdev);
3734 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3735 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3736 		mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3737 				      hcon->dst_type, 0, NULL, 0,
3738 				      hcon->dev_class);
3739 	hci_dev_unlock(hdev);
3740 
3741 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3742 	return 0;
3743 }
3744 
3745 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3746 				    struct l2cap_cmd_hdr *cmd, u8 *data)
3747 {
3748 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3749 	u16 scid, dcid, result, status;
3750 	struct l2cap_chan *chan;
3751 	u8 req[128];
3752 	int err;
3753 
3754 	scid   = __le16_to_cpu(rsp->scid);
3755 	dcid   = __le16_to_cpu(rsp->dcid);
3756 	result = __le16_to_cpu(rsp->result);
3757 	status = __le16_to_cpu(rsp->status);
3758 
3759 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3760 	       dcid, scid, result, status);
3761 
3762 	mutex_lock(&conn->chan_lock);
3763 
3764 	if (scid) {
3765 		chan = __l2cap_get_chan_by_scid(conn, scid);
3766 		if (!chan) {
3767 			err = -EFAULT;
3768 			goto unlock;
3769 		}
3770 	} else {
3771 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3772 		if (!chan) {
3773 			err = -EFAULT;
3774 			goto unlock;
3775 		}
3776 	}
3777 
3778 	err = 0;
3779 
3780 	l2cap_chan_lock(chan);
3781 
3782 	switch (result) {
3783 	case L2CAP_CR_SUCCESS:
3784 		l2cap_state_change(chan, BT_CONFIG);
3785 		chan->ident = 0;
3786 		chan->dcid = dcid;
3787 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3788 
3789 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3790 			break;
3791 
3792 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3793 			       l2cap_build_conf_req(chan, req), req);
3794 		chan->num_conf_req++;
3795 		break;
3796 
3797 	case L2CAP_CR_PEND:
3798 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3799 		break;
3800 
3801 	default:
3802 		l2cap_chan_del(chan, ECONNREFUSED);
3803 		break;
3804 	}
3805 
3806 	l2cap_chan_unlock(chan);
3807 
3808 unlock:
3809 	mutex_unlock(&conn->chan_lock);
3810 
3811 	return err;
3812 }
3813 
3814 static inline void set_default_fcs(struct l2cap_chan *chan)
3815 {
3816 	/* FCS is enabled only in ERTM or streaming mode, if one or both
3817 	 * sides request it.
3818 	 */
3819 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3820 		chan->fcs = L2CAP_FCS_NONE;
3821 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3822 		chan->fcs = L2CAP_FCS_CRC16;
3823 }
3824 
3825 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3826 				    u8 ident, u16 flags)
3827 {
3828 	struct l2cap_conn *conn = chan->conn;
3829 
3830 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3831 	       flags);
3832 
3833 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3834 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3835 
3836 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3837 		       l2cap_build_conf_rsp(chan, data,
3838 					    L2CAP_CONF_SUCCESS, flags), data);
3839 }
3840 
3841 static inline int l2cap_config_req(struct l2cap_conn *conn,
3842 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3843 				   u8 *data)
3844 {
3845 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3846 	u16 dcid, flags;
3847 	u8 rsp[64];
3848 	struct l2cap_chan *chan;
3849 	int len, err = 0;
3850 
3851 	dcid  = __le16_to_cpu(req->dcid);
3852 	flags = __le16_to_cpu(req->flags);
3853 
3854 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3855 
3856 	chan = l2cap_get_chan_by_scid(conn, dcid);
3857 	if (!chan)
3858 		return -ENOENT;
3859 
3860 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3861 		struct l2cap_cmd_rej_cid rej;
3862 
3863 		rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3864 		rej.scid = cpu_to_le16(chan->scid);
3865 		rej.dcid = cpu_to_le16(chan->dcid);
3866 
3867 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3868 			       sizeof(rej), &rej);
3869 		goto unlock;
3870 	}
3871 
3872 	/* Reject if config buffer is too small. */
3873 	len = cmd_len - sizeof(*req);
3874 	if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3875 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3876 			       l2cap_build_conf_rsp(chan, rsp,
3877 			       L2CAP_CONF_REJECT, flags), rsp);
3878 		goto unlock;
3879 	}
3880 
3881 	/* Store config. */
3882 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
3883 	chan->conf_len += len;
3884 
3885 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3886 		/* Incomplete config. Send empty response. */
3887 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3888 			       l2cap_build_conf_rsp(chan, rsp,
3889 			       L2CAP_CONF_SUCCESS, flags), rsp);
3890 		goto unlock;
3891 	}
3892 
3893 	/* Complete config. */
3894 	len = l2cap_parse_conf_req(chan, rsp);
3895 	if (len < 0) {
3896 		l2cap_send_disconn_req(chan, ECONNRESET);
3897 		goto unlock;
3898 	}
3899 
3900 	chan->ident = cmd->ident;
3901 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3902 	chan->num_conf_rsp++;
3903 
3904 	/* Reset config buffer. */
3905 	chan->conf_len = 0;
3906 
3907 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3908 		goto unlock;
3909 
3910 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3911 		set_default_fcs(chan);
3912 
3913 		if (chan->mode == L2CAP_MODE_ERTM ||
3914 		    chan->mode == L2CAP_MODE_STREAMING)
3915 			err = l2cap_ertm_init(chan);
3916 
3917 		if (err < 0)
3918 			l2cap_send_disconn_req(chan, -err);
3919 		else
3920 			l2cap_chan_ready(chan);
3921 
3922 		goto unlock;
3923 	}
3924 
3925 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3926 		u8 buf[64];
3927 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3928 			       l2cap_build_conf_req(chan, buf), buf);
3929 		chan->num_conf_req++;
3930 	}
3931 
3932 	/* Got Conf Rsp PENDING from remote side and asume we sent
3933 	   Conf Rsp PENDING in the code above */
3934 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3935 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3936 
3937 		/* check compatibility */
3938 
3939 		/* Send rsp for BR/EDR channel */
3940 		if (!chan->hs_hcon)
3941 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
3942 		else
3943 			chan->ident = cmd->ident;
3944 	}
3945 
3946 unlock:
3947 	l2cap_chan_unlock(chan);
3948 	return err;
3949 }
3950 
3951 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
3952 				   struct l2cap_cmd_hdr *cmd, u8 *data)
3953 {
3954 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3955 	u16 scid, flags, result;
3956 	struct l2cap_chan *chan;
3957 	int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3958 	int err = 0;
3959 
3960 	scid   = __le16_to_cpu(rsp->scid);
3961 	flags  = __le16_to_cpu(rsp->flags);
3962 	result = __le16_to_cpu(rsp->result);
3963 
3964 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3965 	       result, len);
3966 
3967 	chan = l2cap_get_chan_by_scid(conn, scid);
3968 	if (!chan)
3969 		return 0;
3970 
3971 	switch (result) {
3972 	case L2CAP_CONF_SUCCESS:
3973 		l2cap_conf_rfc_get(chan, rsp->data, len);
3974 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3975 		break;
3976 
3977 	case L2CAP_CONF_PENDING:
3978 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3979 
3980 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3981 			char buf[64];
3982 
3983 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3984 						   buf, &result);
3985 			if (len < 0) {
3986 				l2cap_send_disconn_req(chan, ECONNRESET);
3987 				goto done;
3988 			}
3989 
3990 			if (!chan->hs_hcon) {
3991 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
3992 							0);
3993 			} else {
3994 				if (l2cap_check_efs(chan)) {
3995 					amp_create_logical_link(chan);
3996 					chan->ident = cmd->ident;
3997 				}
3998 			}
3999 		}
4000 		goto done;
4001 
4002 	case L2CAP_CONF_UNACCEPT:
4003 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4004 			char req[64];
4005 
4006 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4007 				l2cap_send_disconn_req(chan, ECONNRESET);
4008 				goto done;
4009 			}
4010 
4011 			/* throw out any old stored conf requests */
4012 			result = L2CAP_CONF_SUCCESS;
4013 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4014 						   req, &result);
4015 			if (len < 0) {
4016 				l2cap_send_disconn_req(chan, ECONNRESET);
4017 				goto done;
4018 			}
4019 
4020 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4021 				       L2CAP_CONF_REQ, len, req);
4022 			chan->num_conf_req++;
4023 			if (result != L2CAP_CONF_SUCCESS)
4024 				goto done;
4025 			break;
4026 		}
4027 
4028 	default:
4029 		l2cap_chan_set_err(chan, ECONNRESET);
4030 
4031 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4032 		l2cap_send_disconn_req(chan, ECONNRESET);
4033 		goto done;
4034 	}
4035 
4036 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4037 		goto done;
4038 
4039 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4040 
4041 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4042 		set_default_fcs(chan);
4043 
4044 		if (chan->mode == L2CAP_MODE_ERTM ||
4045 		    chan->mode == L2CAP_MODE_STREAMING)
4046 			err = l2cap_ertm_init(chan);
4047 
4048 		if (err < 0)
4049 			l2cap_send_disconn_req(chan, -err);
4050 		else
4051 			l2cap_chan_ready(chan);
4052 	}
4053 
4054 done:
4055 	l2cap_chan_unlock(chan);
4056 	return err;
4057 }
4058 
4059 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4060 				       struct l2cap_cmd_hdr *cmd, u8 *data)
4061 {
4062 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4063 	struct l2cap_disconn_rsp rsp;
4064 	u16 dcid, scid;
4065 	struct l2cap_chan *chan;
4066 	struct sock *sk;
4067 
4068 	scid = __le16_to_cpu(req->scid);
4069 	dcid = __le16_to_cpu(req->dcid);
4070 
4071 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4072 
4073 	mutex_lock(&conn->chan_lock);
4074 
4075 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4076 	if (!chan) {
4077 		mutex_unlock(&conn->chan_lock);
4078 		return 0;
4079 	}
4080 
4081 	l2cap_chan_lock(chan);
4082 
4083 	sk = chan->sk;
4084 
4085 	rsp.dcid = cpu_to_le16(chan->scid);
4086 	rsp.scid = cpu_to_le16(chan->dcid);
4087 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4088 
4089 	lock_sock(sk);
4090 	sk->sk_shutdown = SHUTDOWN_MASK;
4091 	release_sock(sk);
4092 
4093 	l2cap_chan_hold(chan);
4094 	l2cap_chan_del(chan, ECONNRESET);
4095 
4096 	l2cap_chan_unlock(chan);
4097 
4098 	chan->ops->close(chan);
4099 	l2cap_chan_put(chan);
4100 
4101 	mutex_unlock(&conn->chan_lock);
4102 
4103 	return 0;
4104 }
4105 
4106 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4107 				       struct l2cap_cmd_hdr *cmd, u8 *data)
4108 {
4109 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4110 	u16 dcid, scid;
4111 	struct l2cap_chan *chan;
4112 
4113 	scid = __le16_to_cpu(rsp->scid);
4114 	dcid = __le16_to_cpu(rsp->dcid);
4115 
4116 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4117 
4118 	mutex_lock(&conn->chan_lock);
4119 
4120 	chan = __l2cap_get_chan_by_scid(conn, scid);
4121 	if (!chan) {
4122 		mutex_unlock(&conn->chan_lock);
4123 		return 0;
4124 	}
4125 
4126 	l2cap_chan_lock(chan);
4127 
4128 	l2cap_chan_hold(chan);
4129 	l2cap_chan_del(chan, 0);
4130 
4131 	l2cap_chan_unlock(chan);
4132 
4133 	chan->ops->close(chan);
4134 	l2cap_chan_put(chan);
4135 
4136 	mutex_unlock(&conn->chan_lock);
4137 
4138 	return 0;
4139 }
4140 
4141 static inline int l2cap_information_req(struct l2cap_conn *conn,
4142 					struct l2cap_cmd_hdr *cmd, u8 *data)
4143 {
4144 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4145 	u16 type;
4146 
4147 	type = __le16_to_cpu(req->type);
4148 
4149 	BT_DBG("type 0x%4.4x", type);
4150 
4151 	if (type == L2CAP_IT_FEAT_MASK) {
4152 		u8 buf[8];
4153 		u32 feat_mask = l2cap_feat_mask;
4154 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4155 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4156 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4157 		if (!disable_ertm)
4158 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4159 				| L2CAP_FEAT_FCS;
4160 		if (enable_hs)
4161 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4162 				| L2CAP_FEAT_EXT_WINDOW;
4163 
4164 		put_unaligned_le32(feat_mask, rsp->data);
4165 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4166 			       buf);
4167 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4168 		u8 buf[12];
4169 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4170 
4171 		if (enable_hs)
4172 			l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4173 		else
4174 			l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4175 
4176 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4177 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4178 		memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4179 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4180 			       buf);
4181 	} else {
4182 		struct l2cap_info_rsp rsp;
4183 		rsp.type   = cpu_to_le16(type);
4184 		rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4185 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4186 			       &rsp);
4187 	}
4188 
4189 	return 0;
4190 }
4191 
4192 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4193 					struct l2cap_cmd_hdr *cmd, u8 *data)
4194 {
4195 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4196 	u16 type, result;
4197 
4198 	type   = __le16_to_cpu(rsp->type);
4199 	result = __le16_to_cpu(rsp->result);
4200 
4201 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4202 
4203 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4204 	if (cmd->ident != conn->info_ident ||
4205 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4206 		return 0;
4207 
4208 	cancel_delayed_work(&conn->info_timer);
4209 
4210 	if (result != L2CAP_IR_SUCCESS) {
4211 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4212 		conn->info_ident = 0;
4213 
4214 		l2cap_conn_start(conn);
4215 
4216 		return 0;
4217 	}
4218 
4219 	switch (type) {
4220 	case L2CAP_IT_FEAT_MASK:
4221 		conn->feat_mask = get_unaligned_le32(rsp->data);
4222 
4223 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4224 			struct l2cap_info_req req;
4225 			req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4226 
4227 			conn->info_ident = l2cap_get_ident(conn);
4228 
4229 			l2cap_send_cmd(conn, conn->info_ident,
4230 				       L2CAP_INFO_REQ, sizeof(req), &req);
4231 		} else {
4232 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4233 			conn->info_ident = 0;
4234 
4235 			l2cap_conn_start(conn);
4236 		}
4237 		break;
4238 
4239 	case L2CAP_IT_FIXED_CHAN:
4240 		conn->fixed_chan_mask = rsp->data[0];
4241 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4242 		conn->info_ident = 0;
4243 
4244 		l2cap_conn_start(conn);
4245 		break;
4246 	}
4247 
4248 	return 0;
4249 }
4250 
4251 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4252 				    struct l2cap_cmd_hdr *cmd,
4253 				    u16 cmd_len, void *data)
4254 {
4255 	struct l2cap_create_chan_req *req = data;
4256 	struct l2cap_create_chan_rsp rsp;
4257 	struct l2cap_chan *chan;
4258 	struct hci_dev *hdev;
4259 	u16 psm, scid;
4260 
4261 	if (cmd_len != sizeof(*req))
4262 		return -EPROTO;
4263 
4264 	if (!enable_hs)
4265 		return -EINVAL;
4266 
4267 	psm = le16_to_cpu(req->psm);
4268 	scid = le16_to_cpu(req->scid);
4269 
4270 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4271 
4272 	/* For controller id 0 make BR/EDR connection */
4273 	if (req->amp_id == HCI_BREDR_ID) {
4274 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4275 			      req->amp_id);
4276 		return 0;
4277 	}
4278 
4279 	/* Validate AMP controller id */
4280 	hdev = hci_dev_get(req->amp_id);
4281 	if (!hdev)
4282 		goto error;
4283 
4284 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4285 		hci_dev_put(hdev);
4286 		goto error;
4287 	}
4288 
4289 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4290 			     req->amp_id);
4291 	if (chan) {
4292 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4293 		struct hci_conn *hs_hcon;
4294 
4295 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4296 		if (!hs_hcon) {
4297 			hci_dev_put(hdev);
4298 			return -EFAULT;
4299 		}
4300 
4301 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4302 
4303 		mgr->bredr_chan = chan;
4304 		chan->hs_hcon = hs_hcon;
4305 		chan->fcs = L2CAP_FCS_NONE;
4306 		conn->mtu = hdev->block_mtu;
4307 	}
4308 
4309 	hci_dev_put(hdev);
4310 
4311 	return 0;
4312 
4313 error:
4314 	rsp.dcid = 0;
4315 	rsp.scid = cpu_to_le16(scid);
4316 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4317 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4318 
4319 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4320 		       sizeof(rsp), &rsp);
4321 
4322 	return -EFAULT;
4323 }
4324 
4325 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4326 {
4327 	struct l2cap_move_chan_req req;
4328 	u8 ident;
4329 
4330 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4331 
4332 	ident = l2cap_get_ident(chan->conn);
4333 	chan->ident = ident;
4334 
4335 	req.icid = cpu_to_le16(chan->scid);
4336 	req.dest_amp_id = dest_amp_id;
4337 
4338 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4339 		       &req);
4340 
4341 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4342 }
4343 
4344 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4345 {
4346 	struct l2cap_move_chan_rsp rsp;
4347 
4348 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4349 
4350 	rsp.icid = cpu_to_le16(chan->dcid);
4351 	rsp.result = cpu_to_le16(result);
4352 
4353 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4354 		       sizeof(rsp), &rsp);
4355 }
4356 
4357 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4358 {
4359 	struct l2cap_move_chan_cfm cfm;
4360 
4361 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4362 
4363 	chan->ident = l2cap_get_ident(chan->conn);
4364 
4365 	cfm.icid = cpu_to_le16(chan->scid);
4366 	cfm.result = cpu_to_le16(result);
4367 
4368 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4369 		       sizeof(cfm), &cfm);
4370 
4371 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4372 }
4373 
4374 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4375 {
4376 	struct l2cap_move_chan_cfm cfm;
4377 
4378 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4379 
4380 	cfm.icid = cpu_to_le16(icid);
4381 	cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4382 
4383 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4384 		       sizeof(cfm), &cfm);
4385 }
4386 
4387 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4388 					 u16 icid)
4389 {
4390 	struct l2cap_move_chan_cfm_rsp rsp;
4391 
4392 	BT_DBG("icid 0x%4.4x", icid);
4393 
4394 	rsp.icid = cpu_to_le16(icid);
4395 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4396 }
4397 
4398 static void __release_logical_link(struct l2cap_chan *chan)
4399 {
4400 	chan->hs_hchan = NULL;
4401 	chan->hs_hcon = NULL;
4402 
4403 	/* Placeholder - release the logical link */
4404 }
4405 
4406 static void l2cap_logical_fail(struct l2cap_chan *chan)
4407 {
4408 	/* Logical link setup failed */
4409 	if (chan->state != BT_CONNECTED) {
4410 		/* Create channel failure, disconnect */
4411 		l2cap_send_disconn_req(chan, ECONNRESET);
4412 		return;
4413 	}
4414 
4415 	switch (chan->move_role) {
4416 	case L2CAP_MOVE_ROLE_RESPONDER:
4417 		l2cap_move_done(chan);
4418 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4419 		break;
4420 	case L2CAP_MOVE_ROLE_INITIATOR:
4421 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4422 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4423 			/* Remote has only sent pending or
4424 			 * success responses, clean up
4425 			 */
4426 			l2cap_move_done(chan);
4427 		}
4428 
4429 		/* Other amp move states imply that the move
4430 		 * has already aborted
4431 		 */
4432 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4433 		break;
4434 	}
4435 }
4436 
4437 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4438 					struct hci_chan *hchan)
4439 {
4440 	struct l2cap_conf_rsp rsp;
4441 
4442 	chan->hs_hchan = hchan;
4443 	chan->hs_hcon->l2cap_data = chan->conn;
4444 
4445 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4446 
4447 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4448 		int err;
4449 
4450 		set_default_fcs(chan);
4451 
4452 		err = l2cap_ertm_init(chan);
4453 		if (err < 0)
4454 			l2cap_send_disconn_req(chan, -err);
4455 		else
4456 			l2cap_chan_ready(chan);
4457 	}
4458 }
4459 
4460 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4461 				      struct hci_chan *hchan)
4462 {
4463 	chan->hs_hcon = hchan->conn;
4464 	chan->hs_hcon->l2cap_data = chan->conn;
4465 
4466 	BT_DBG("move_state %d", chan->move_state);
4467 
4468 	switch (chan->move_state) {
4469 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4470 		/* Move confirm will be sent after a success
4471 		 * response is received
4472 		 */
4473 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4474 		break;
4475 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4476 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4477 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4478 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4479 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4480 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4481 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4482 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4483 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4484 		}
4485 		break;
4486 	default:
4487 		/* Move was not in expected state, free the channel */
4488 		__release_logical_link(chan);
4489 
4490 		chan->move_state = L2CAP_MOVE_STABLE;
4491 	}
4492 }
4493 
4494 /* Call with chan locked */
4495 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4496 		       u8 status)
4497 {
4498 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4499 
4500 	if (status) {
4501 		l2cap_logical_fail(chan);
4502 		__release_logical_link(chan);
4503 		return;
4504 	}
4505 
4506 	if (chan->state != BT_CONNECTED) {
4507 		/* Ignore logical link if channel is on BR/EDR */
4508 		if (chan->local_amp_id)
4509 			l2cap_logical_finish_create(chan, hchan);
4510 	} else {
4511 		l2cap_logical_finish_move(chan, hchan);
4512 	}
4513 }
4514 
4515 void l2cap_move_start(struct l2cap_chan *chan)
4516 {
4517 	BT_DBG("chan %p", chan);
4518 
4519 	if (chan->local_amp_id == HCI_BREDR_ID) {
4520 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4521 			return;
4522 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4523 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4524 		/* Placeholder - start physical link setup */
4525 	} else {
4526 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4527 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4528 		chan->move_id = 0;
4529 		l2cap_move_setup(chan);
4530 		l2cap_send_move_chan_req(chan, 0);
4531 	}
4532 }
4533 
4534 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4535 			    u8 local_amp_id, u8 remote_amp_id)
4536 {
4537 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4538 	       local_amp_id, remote_amp_id);
4539 
4540 	chan->fcs = L2CAP_FCS_NONE;
4541 
4542 	/* Outgoing channel on AMP */
4543 	if (chan->state == BT_CONNECT) {
4544 		if (result == L2CAP_CR_SUCCESS) {
4545 			chan->local_amp_id = local_amp_id;
4546 			l2cap_send_create_chan_req(chan, remote_amp_id);
4547 		} else {
4548 			/* Revert to BR/EDR connect */
4549 			l2cap_send_conn_req(chan);
4550 		}
4551 
4552 		return;
4553 	}
4554 
4555 	/* Incoming channel on AMP */
4556 	if (__l2cap_no_conn_pending(chan)) {
4557 		struct l2cap_conn_rsp rsp;
4558 		char buf[128];
4559 		rsp.scid = cpu_to_le16(chan->dcid);
4560 		rsp.dcid = cpu_to_le16(chan->scid);
4561 
4562 		if (result == L2CAP_CR_SUCCESS) {
4563 			/* Send successful response */
4564 			rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4565 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4566 		} else {
4567 			/* Send negative response */
4568 			rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4569 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4570 		}
4571 
4572 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4573 			       sizeof(rsp), &rsp);
4574 
4575 		if (result == L2CAP_CR_SUCCESS) {
4576 			__l2cap_state_change(chan, BT_CONFIG);
4577 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4578 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4579 				       L2CAP_CONF_REQ,
4580 				       l2cap_build_conf_req(chan, buf), buf);
4581 			chan->num_conf_req++;
4582 		}
4583 	}
4584 }
4585 
4586 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4587 				   u8 remote_amp_id)
4588 {
4589 	l2cap_move_setup(chan);
4590 	chan->move_id = local_amp_id;
4591 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4592 
4593 	l2cap_send_move_chan_req(chan, remote_amp_id);
4594 }
4595 
4596 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4597 {
4598 	struct hci_chan *hchan = NULL;
4599 
4600 	/* Placeholder - get hci_chan for logical link */
4601 
4602 	if (hchan) {
4603 		if (hchan->state == BT_CONNECTED) {
4604 			/* Logical link is ready to go */
4605 			chan->hs_hcon = hchan->conn;
4606 			chan->hs_hcon->l2cap_data = chan->conn;
4607 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4608 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4609 
4610 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4611 		} else {
4612 			/* Wait for logical link to be ready */
4613 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4614 		}
4615 	} else {
4616 		/* Logical link not available */
4617 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4618 	}
4619 }
4620 
4621 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4622 {
4623 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4624 		u8 rsp_result;
4625 		if (result == -EINVAL)
4626 			rsp_result = L2CAP_MR_BAD_ID;
4627 		else
4628 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4629 
4630 		l2cap_send_move_chan_rsp(chan, rsp_result);
4631 	}
4632 
4633 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4634 	chan->move_state = L2CAP_MOVE_STABLE;
4635 
4636 	/* Restart data transmission */
4637 	l2cap_ertm_send(chan);
4638 }
4639 
4640 /* Invoke with locked chan */
4641 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4642 {
4643 	u8 local_amp_id = chan->local_amp_id;
4644 	u8 remote_amp_id = chan->remote_amp_id;
4645 
4646 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4647 	       chan, result, local_amp_id, remote_amp_id);
4648 
4649 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4650 		l2cap_chan_unlock(chan);
4651 		return;
4652 	}
4653 
4654 	if (chan->state != BT_CONNECTED) {
4655 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4656 	} else if (result != L2CAP_MR_SUCCESS) {
4657 		l2cap_do_move_cancel(chan, result);
4658 	} else {
4659 		switch (chan->move_role) {
4660 		case L2CAP_MOVE_ROLE_INITIATOR:
4661 			l2cap_do_move_initiate(chan, local_amp_id,
4662 					       remote_amp_id);
4663 			break;
4664 		case L2CAP_MOVE_ROLE_RESPONDER:
4665 			l2cap_do_move_respond(chan, result);
4666 			break;
4667 		default:
4668 			l2cap_do_move_cancel(chan, result);
4669 			break;
4670 		}
4671 	}
4672 }
4673 
4674 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4675 					 struct l2cap_cmd_hdr *cmd,
4676 					 u16 cmd_len, void *data)
4677 {
4678 	struct l2cap_move_chan_req *req = data;
4679 	struct l2cap_move_chan_rsp rsp;
4680 	struct l2cap_chan *chan;
4681 	u16 icid = 0;
4682 	u16 result = L2CAP_MR_NOT_ALLOWED;
4683 
4684 	if (cmd_len != sizeof(*req))
4685 		return -EPROTO;
4686 
4687 	icid = le16_to_cpu(req->icid);
4688 
4689 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4690 
4691 	if (!enable_hs)
4692 		return -EINVAL;
4693 
4694 	chan = l2cap_get_chan_by_dcid(conn, icid);
4695 	if (!chan) {
4696 		rsp.icid = cpu_to_le16(icid);
4697 		rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4698 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4699 			       sizeof(rsp), &rsp);
4700 		return 0;
4701 	}
4702 
4703 	chan->ident = cmd->ident;
4704 
4705 	if (chan->scid < L2CAP_CID_DYN_START ||
4706 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4707 	    (chan->mode != L2CAP_MODE_ERTM &&
4708 	     chan->mode != L2CAP_MODE_STREAMING)) {
4709 		result = L2CAP_MR_NOT_ALLOWED;
4710 		goto send_move_response;
4711 	}
4712 
4713 	if (chan->local_amp_id == req->dest_amp_id) {
4714 		result = L2CAP_MR_SAME_ID;
4715 		goto send_move_response;
4716 	}
4717 
4718 	if (req->dest_amp_id) {
4719 		struct hci_dev *hdev;
4720 		hdev = hci_dev_get(req->dest_amp_id);
4721 		if (!hdev || hdev->dev_type != HCI_AMP ||
4722 		    !test_bit(HCI_UP, &hdev->flags)) {
4723 			if (hdev)
4724 				hci_dev_put(hdev);
4725 
4726 			result = L2CAP_MR_BAD_ID;
4727 			goto send_move_response;
4728 		}
4729 		hci_dev_put(hdev);
4730 	}
4731 
4732 	/* Detect a move collision.  Only send a collision response
4733 	 * if this side has "lost", otherwise proceed with the move.
4734 	 * The winner has the larger bd_addr.
4735 	 */
4736 	if ((__chan_is_moving(chan) ||
4737 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4738 	    bacmp(conn->src, conn->dst) > 0) {
4739 		result = L2CAP_MR_COLLISION;
4740 		goto send_move_response;
4741 	}
4742 
4743 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4744 	l2cap_move_setup(chan);
4745 	chan->move_id = req->dest_amp_id;
4746 	icid = chan->dcid;
4747 
4748 	if (!req->dest_amp_id) {
4749 		/* Moving to BR/EDR */
4750 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4751 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4752 			result = L2CAP_MR_PEND;
4753 		} else {
4754 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4755 			result = L2CAP_MR_SUCCESS;
4756 		}
4757 	} else {
4758 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4759 		/* Placeholder - uncomment when amp functions are available */
4760 		/*amp_accept_physical(chan, req->dest_amp_id);*/
4761 		result = L2CAP_MR_PEND;
4762 	}
4763 
4764 send_move_response:
4765 	l2cap_send_move_chan_rsp(chan, result);
4766 
4767 	l2cap_chan_unlock(chan);
4768 
4769 	return 0;
4770 }
4771 
4772 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4773 {
4774 	struct l2cap_chan *chan;
4775 	struct hci_chan *hchan = NULL;
4776 
4777 	chan = l2cap_get_chan_by_scid(conn, icid);
4778 	if (!chan) {
4779 		l2cap_send_move_chan_cfm_icid(conn, icid);
4780 		return;
4781 	}
4782 
4783 	__clear_chan_timer(chan);
4784 	if (result == L2CAP_MR_PEND)
4785 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4786 
4787 	switch (chan->move_state) {
4788 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4789 		/* Move confirm will be sent when logical link
4790 		 * is complete.
4791 		 */
4792 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4793 		break;
4794 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4795 		if (result == L2CAP_MR_PEND) {
4796 			break;
4797 		} else if (test_bit(CONN_LOCAL_BUSY,
4798 				    &chan->conn_state)) {
4799 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4800 		} else {
4801 			/* Logical link is up or moving to BR/EDR,
4802 			 * proceed with move
4803 			 */
4804 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4805 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4806 		}
4807 		break;
4808 	case L2CAP_MOVE_WAIT_RSP:
4809 		/* Moving to AMP */
4810 		if (result == L2CAP_MR_SUCCESS) {
4811 			/* Remote is ready, send confirm immediately
4812 			 * after logical link is ready
4813 			 */
4814 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4815 		} else {
4816 			/* Both logical link and move success
4817 			 * are required to confirm
4818 			 */
4819 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4820 		}
4821 
4822 		/* Placeholder - get hci_chan for logical link */
4823 		if (!hchan) {
4824 			/* Logical link not available */
4825 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4826 			break;
4827 		}
4828 
4829 		/* If the logical link is not yet connected, do not
4830 		 * send confirmation.
4831 		 */
4832 		if (hchan->state != BT_CONNECTED)
4833 			break;
4834 
4835 		/* Logical link is already ready to go */
4836 
4837 		chan->hs_hcon = hchan->conn;
4838 		chan->hs_hcon->l2cap_data = chan->conn;
4839 
4840 		if (result == L2CAP_MR_SUCCESS) {
4841 			/* Can confirm now */
4842 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4843 		} else {
4844 			/* Now only need move success
4845 			 * to confirm
4846 			 */
4847 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4848 		}
4849 
4850 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4851 		break;
4852 	default:
4853 		/* Any other amp move state means the move failed. */
4854 		chan->move_id = chan->local_amp_id;
4855 		l2cap_move_done(chan);
4856 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4857 	}
4858 
4859 	l2cap_chan_unlock(chan);
4860 }
4861 
4862 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
4863 			    u16 result)
4864 {
4865 	struct l2cap_chan *chan;
4866 
4867 	chan = l2cap_get_chan_by_ident(conn, ident);
4868 	if (!chan) {
4869 		/* Could not locate channel, icid is best guess */
4870 		l2cap_send_move_chan_cfm_icid(conn, icid);
4871 		return;
4872 	}
4873 
4874 	__clear_chan_timer(chan);
4875 
4876 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4877 		if (result == L2CAP_MR_COLLISION) {
4878 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4879 		} else {
4880 			/* Cleanup - cancel move */
4881 			chan->move_id = chan->local_amp_id;
4882 			l2cap_move_done(chan);
4883 		}
4884 	}
4885 
4886 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4887 
4888 	l2cap_chan_unlock(chan);
4889 }
4890 
4891 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4892 				  struct l2cap_cmd_hdr *cmd,
4893 				  u16 cmd_len, void *data)
4894 {
4895 	struct l2cap_move_chan_rsp *rsp = data;
4896 	u16 icid, result;
4897 
4898 	if (cmd_len != sizeof(*rsp))
4899 		return -EPROTO;
4900 
4901 	icid = le16_to_cpu(rsp->icid);
4902 	result = le16_to_cpu(rsp->result);
4903 
4904 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4905 
4906 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
4907 		l2cap_move_continue(conn, icid, result);
4908 	else
4909 		l2cap_move_fail(conn, cmd->ident, icid, result);
4910 
4911 	return 0;
4912 }
4913 
4914 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4915 				      struct l2cap_cmd_hdr *cmd,
4916 				      u16 cmd_len, void *data)
4917 {
4918 	struct l2cap_move_chan_cfm *cfm = data;
4919 	struct l2cap_chan *chan;
4920 	u16 icid, result;
4921 
4922 	if (cmd_len != sizeof(*cfm))
4923 		return -EPROTO;
4924 
4925 	icid = le16_to_cpu(cfm->icid);
4926 	result = le16_to_cpu(cfm->result);
4927 
4928 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4929 
4930 	chan = l2cap_get_chan_by_dcid(conn, icid);
4931 	if (!chan) {
4932 		/* Spec requires a response even if the icid was not found */
4933 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4934 		return 0;
4935 	}
4936 
4937 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
4938 		if (result == L2CAP_MC_CONFIRMED) {
4939 			chan->local_amp_id = chan->move_id;
4940 			if (!chan->local_amp_id)
4941 				__release_logical_link(chan);
4942 		} else {
4943 			chan->move_id = chan->local_amp_id;
4944 		}
4945 
4946 		l2cap_move_done(chan);
4947 	}
4948 
4949 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4950 
4951 	l2cap_chan_unlock(chan);
4952 
4953 	return 0;
4954 }
4955 
4956 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4957 						 struct l2cap_cmd_hdr *cmd,
4958 						 u16 cmd_len, void *data)
4959 {
4960 	struct l2cap_move_chan_cfm_rsp *rsp = data;
4961 	struct l2cap_chan *chan;
4962 	u16 icid;
4963 
4964 	if (cmd_len != sizeof(*rsp))
4965 		return -EPROTO;
4966 
4967 	icid = le16_to_cpu(rsp->icid);
4968 
4969 	BT_DBG("icid 0x%4.4x", icid);
4970 
4971 	chan = l2cap_get_chan_by_scid(conn, icid);
4972 	if (!chan)
4973 		return 0;
4974 
4975 	__clear_chan_timer(chan);
4976 
4977 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
4978 		chan->local_amp_id = chan->move_id;
4979 
4980 		if (!chan->local_amp_id && chan->hs_hchan)
4981 			__release_logical_link(chan);
4982 
4983 		l2cap_move_done(chan);
4984 	}
4985 
4986 	l2cap_chan_unlock(chan);
4987 
4988 	return 0;
4989 }
4990 
4991 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4992 					 u16 to_multiplier)
4993 {
4994 	u16 max_latency;
4995 
4996 	if (min > max || min < 6 || max > 3200)
4997 		return -EINVAL;
4998 
4999 	if (to_multiplier < 10 || to_multiplier > 3200)
5000 		return -EINVAL;
5001 
5002 	if (max >= to_multiplier * 8)
5003 		return -EINVAL;
5004 
5005 	max_latency = (to_multiplier * 8 / max) - 1;
5006 	if (latency > 499 || latency > max_latency)
5007 		return -EINVAL;
5008 
5009 	return 0;
5010 }
5011 
5012 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5013 					      struct l2cap_cmd_hdr *cmd,
5014 					      u8 *data)
5015 {
5016 	struct hci_conn *hcon = conn->hcon;
5017 	struct l2cap_conn_param_update_req *req;
5018 	struct l2cap_conn_param_update_rsp rsp;
5019 	u16 min, max, latency, to_multiplier, cmd_len;
5020 	int err;
5021 
5022 	if (!(hcon->link_mode & HCI_LM_MASTER))
5023 		return -EINVAL;
5024 
5025 	cmd_len = __le16_to_cpu(cmd->len);
5026 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5027 		return -EPROTO;
5028 
5029 	req = (struct l2cap_conn_param_update_req *) data;
5030 	min		= __le16_to_cpu(req->min);
5031 	max		= __le16_to_cpu(req->max);
5032 	latency		= __le16_to_cpu(req->latency);
5033 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5034 
5035 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5036 	       min, max, latency, to_multiplier);
5037 
5038 	memset(&rsp, 0, sizeof(rsp));
5039 
5040 	err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5041 	if (err)
5042 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5043 	else
5044 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5045 
5046 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5047 		       sizeof(rsp), &rsp);
5048 
5049 	if (!err)
5050 		hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5051 
5052 	return 0;
5053 }
5054 
5055 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5056 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5057 				      u8 *data)
5058 {
5059 	int err = 0;
5060 
5061 	switch (cmd->code) {
5062 	case L2CAP_COMMAND_REJ:
5063 		l2cap_command_rej(conn, cmd, data);
5064 		break;
5065 
5066 	case L2CAP_CONN_REQ:
5067 		err = l2cap_connect_req(conn, cmd, data);
5068 		break;
5069 
5070 	case L2CAP_CONN_RSP:
5071 	case L2CAP_CREATE_CHAN_RSP:
5072 		err = l2cap_connect_create_rsp(conn, cmd, data);
5073 		break;
5074 
5075 	case L2CAP_CONF_REQ:
5076 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5077 		break;
5078 
5079 	case L2CAP_CONF_RSP:
5080 		err = l2cap_config_rsp(conn, cmd, data);
5081 		break;
5082 
5083 	case L2CAP_DISCONN_REQ:
5084 		err = l2cap_disconnect_req(conn, cmd, data);
5085 		break;
5086 
5087 	case L2CAP_DISCONN_RSP:
5088 		err = l2cap_disconnect_rsp(conn, cmd, data);
5089 		break;
5090 
5091 	case L2CAP_ECHO_REQ:
5092 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5093 		break;
5094 
5095 	case L2CAP_ECHO_RSP:
5096 		break;
5097 
5098 	case L2CAP_INFO_REQ:
5099 		err = l2cap_information_req(conn, cmd, data);
5100 		break;
5101 
5102 	case L2CAP_INFO_RSP:
5103 		err = l2cap_information_rsp(conn, cmd, data);
5104 		break;
5105 
5106 	case L2CAP_CREATE_CHAN_REQ:
5107 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5108 		break;
5109 
5110 	case L2CAP_MOVE_CHAN_REQ:
5111 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5112 		break;
5113 
5114 	case L2CAP_MOVE_CHAN_RSP:
5115 		err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5116 		break;
5117 
5118 	case L2CAP_MOVE_CHAN_CFM:
5119 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5120 		break;
5121 
5122 	case L2CAP_MOVE_CHAN_CFM_RSP:
5123 		err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5124 		break;
5125 
5126 	default:
5127 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5128 		err = -EINVAL;
5129 		break;
5130 	}
5131 
5132 	return err;
5133 }
5134 
5135 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5136 				   struct l2cap_cmd_hdr *cmd, u8 *data)
5137 {
5138 	switch (cmd->code) {
5139 	case L2CAP_COMMAND_REJ:
5140 		return 0;
5141 
5142 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5143 		return l2cap_conn_param_update_req(conn, cmd, data);
5144 
5145 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5146 		return 0;
5147 
5148 	default:
5149 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5150 		return -EINVAL;
5151 	}
5152 }
5153 
5154 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5155 				     struct sk_buff *skb)
5156 {
5157 	u8 *data = skb->data;
5158 	int len = skb->len;
5159 	struct l2cap_cmd_hdr cmd;
5160 	int err;
5161 
5162 	l2cap_raw_recv(conn, skb);
5163 
5164 	while (len >= L2CAP_CMD_HDR_SIZE) {
5165 		u16 cmd_len;
5166 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5167 		data += L2CAP_CMD_HDR_SIZE;
5168 		len  -= L2CAP_CMD_HDR_SIZE;
5169 
5170 		cmd_len = le16_to_cpu(cmd.len);
5171 
5172 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5173 		       cmd.ident);
5174 
5175 		if (cmd_len > len || !cmd.ident) {
5176 			BT_DBG("corrupted command");
5177 			break;
5178 		}
5179 
5180 		if (conn->hcon->type == LE_LINK)
5181 			err = l2cap_le_sig_cmd(conn, &cmd, data);
5182 		else
5183 			err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5184 
5185 		if (err) {
5186 			struct l2cap_cmd_rej_unk rej;
5187 
5188 			BT_ERR("Wrong link type (%d)", err);
5189 
5190 			/* FIXME: Map err to a valid reason */
5191 			rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5192 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5193 				       sizeof(rej), &rej);
5194 		}
5195 
5196 		data += cmd_len;
5197 		len  -= cmd_len;
5198 	}
5199 
5200 	kfree_skb(skb);
5201 }
5202 
5203 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5204 {
5205 	u16 our_fcs, rcv_fcs;
5206 	int hdr_size;
5207 
5208 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5209 		hdr_size = L2CAP_EXT_HDR_SIZE;
5210 	else
5211 		hdr_size = L2CAP_ENH_HDR_SIZE;
5212 
5213 	if (chan->fcs == L2CAP_FCS_CRC16) {
5214 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5215 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5216 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5217 
5218 		if (our_fcs != rcv_fcs)
5219 			return -EBADMSG;
5220 	}
5221 	return 0;
5222 }
5223 
5224 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5225 {
5226 	struct l2cap_ctrl control;
5227 
5228 	BT_DBG("chan %p", chan);
5229 
5230 	memset(&control, 0, sizeof(control));
5231 	control.sframe = 1;
5232 	control.final = 1;
5233 	control.reqseq = chan->buffer_seq;
5234 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5235 
5236 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5237 		control.super = L2CAP_SUPER_RNR;
5238 		l2cap_send_sframe(chan, &control);
5239 	}
5240 
5241 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5242 	    chan->unacked_frames > 0)
5243 		__set_retrans_timer(chan);
5244 
5245 	/* Send pending iframes */
5246 	l2cap_ertm_send(chan);
5247 
5248 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5249 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5250 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5251 		 * send it now.
5252 		 */
5253 		control.super = L2CAP_SUPER_RR;
5254 		l2cap_send_sframe(chan, &control);
5255 	}
5256 }
5257 
5258 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5259 			    struct sk_buff **last_frag)
5260 {
5261 	/* skb->len reflects data in skb as well as all fragments
5262 	 * skb->data_len reflects only data in fragments
5263 	 */
5264 	if (!skb_has_frag_list(skb))
5265 		skb_shinfo(skb)->frag_list = new_frag;
5266 
5267 	new_frag->next = NULL;
5268 
5269 	(*last_frag)->next = new_frag;
5270 	*last_frag = new_frag;
5271 
5272 	skb->len += new_frag->len;
5273 	skb->data_len += new_frag->len;
5274 	skb->truesize += new_frag->truesize;
5275 }
5276 
5277 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5278 				struct l2cap_ctrl *control)
5279 {
5280 	int err = -EINVAL;
5281 
5282 	switch (control->sar) {
5283 	case L2CAP_SAR_UNSEGMENTED:
5284 		if (chan->sdu)
5285 			break;
5286 
5287 		err = chan->ops->recv(chan, skb);
5288 		break;
5289 
5290 	case L2CAP_SAR_START:
5291 		if (chan->sdu)
5292 			break;
5293 
5294 		chan->sdu_len = get_unaligned_le16(skb->data);
5295 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5296 
5297 		if (chan->sdu_len > chan->imtu) {
5298 			err = -EMSGSIZE;
5299 			break;
5300 		}
5301 
5302 		if (skb->len >= chan->sdu_len)
5303 			break;
5304 
5305 		chan->sdu = skb;
5306 		chan->sdu_last_frag = skb;
5307 
5308 		skb = NULL;
5309 		err = 0;
5310 		break;
5311 
5312 	case L2CAP_SAR_CONTINUE:
5313 		if (!chan->sdu)
5314 			break;
5315 
5316 		append_skb_frag(chan->sdu, skb,
5317 				&chan->sdu_last_frag);
5318 		skb = NULL;
5319 
5320 		if (chan->sdu->len >= chan->sdu_len)
5321 			break;
5322 
5323 		err = 0;
5324 		break;
5325 
5326 	case L2CAP_SAR_END:
5327 		if (!chan->sdu)
5328 			break;
5329 
5330 		append_skb_frag(chan->sdu, skb,
5331 				&chan->sdu_last_frag);
5332 		skb = NULL;
5333 
5334 		if (chan->sdu->len != chan->sdu_len)
5335 			break;
5336 
5337 		err = chan->ops->recv(chan, chan->sdu);
5338 
5339 		if (!err) {
5340 			/* Reassembly complete */
5341 			chan->sdu = NULL;
5342 			chan->sdu_last_frag = NULL;
5343 			chan->sdu_len = 0;
5344 		}
5345 		break;
5346 	}
5347 
5348 	if (err) {
5349 		kfree_skb(skb);
5350 		kfree_skb(chan->sdu);
5351 		chan->sdu = NULL;
5352 		chan->sdu_last_frag = NULL;
5353 		chan->sdu_len = 0;
5354 	}
5355 
5356 	return err;
5357 }
5358 
5359 static int l2cap_resegment(struct l2cap_chan *chan)
5360 {
5361 	/* Placeholder */
5362 	return 0;
5363 }
5364 
5365 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5366 {
5367 	u8 event;
5368 
5369 	if (chan->mode != L2CAP_MODE_ERTM)
5370 		return;
5371 
5372 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5373 	l2cap_tx(chan, NULL, NULL, event);
5374 }
5375 
5376 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5377 {
5378 	int err = 0;
5379 	/* Pass sequential frames to l2cap_reassemble_sdu()
5380 	 * until a gap is encountered.
5381 	 */
5382 
5383 	BT_DBG("chan %p", chan);
5384 
5385 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5386 		struct sk_buff *skb;
5387 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5388 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5389 
5390 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5391 
5392 		if (!skb)
5393 			break;
5394 
5395 		skb_unlink(skb, &chan->srej_q);
5396 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5397 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5398 		if (err)
5399 			break;
5400 	}
5401 
5402 	if (skb_queue_empty(&chan->srej_q)) {
5403 		chan->rx_state = L2CAP_RX_STATE_RECV;
5404 		l2cap_send_ack(chan);
5405 	}
5406 
5407 	return err;
5408 }
5409 
5410 static void l2cap_handle_srej(struct l2cap_chan *chan,
5411 			      struct l2cap_ctrl *control)
5412 {
5413 	struct sk_buff *skb;
5414 
5415 	BT_DBG("chan %p, control %p", chan, control);
5416 
5417 	if (control->reqseq == chan->next_tx_seq) {
5418 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5419 		l2cap_send_disconn_req(chan, ECONNRESET);
5420 		return;
5421 	}
5422 
5423 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5424 
5425 	if (skb == NULL) {
5426 		BT_DBG("Seq %d not available for retransmission",
5427 		       control->reqseq);
5428 		return;
5429 	}
5430 
5431 	if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5432 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5433 		l2cap_send_disconn_req(chan, ECONNRESET);
5434 		return;
5435 	}
5436 
5437 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5438 
5439 	if (control->poll) {
5440 		l2cap_pass_to_tx(chan, control);
5441 
5442 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5443 		l2cap_retransmit(chan, control);
5444 		l2cap_ertm_send(chan);
5445 
5446 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5447 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5448 			chan->srej_save_reqseq = control->reqseq;
5449 		}
5450 	} else {
5451 		l2cap_pass_to_tx_fbit(chan, control);
5452 
5453 		if (control->final) {
5454 			if (chan->srej_save_reqseq != control->reqseq ||
5455 			    !test_and_clear_bit(CONN_SREJ_ACT,
5456 						&chan->conn_state))
5457 				l2cap_retransmit(chan, control);
5458 		} else {
5459 			l2cap_retransmit(chan, control);
5460 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5461 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5462 				chan->srej_save_reqseq = control->reqseq;
5463 			}
5464 		}
5465 	}
5466 }
5467 
5468 static void l2cap_handle_rej(struct l2cap_chan *chan,
5469 			     struct l2cap_ctrl *control)
5470 {
5471 	struct sk_buff *skb;
5472 
5473 	BT_DBG("chan %p, control %p", chan, control);
5474 
5475 	if (control->reqseq == chan->next_tx_seq) {
5476 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5477 		l2cap_send_disconn_req(chan, ECONNRESET);
5478 		return;
5479 	}
5480 
5481 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5482 
5483 	if (chan->max_tx && skb &&
5484 	    bt_cb(skb)->control.retries >= chan->max_tx) {
5485 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5486 		l2cap_send_disconn_req(chan, ECONNRESET);
5487 		return;
5488 	}
5489 
5490 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5491 
5492 	l2cap_pass_to_tx(chan, control);
5493 
5494 	if (control->final) {
5495 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5496 			l2cap_retransmit_all(chan, control);
5497 	} else {
5498 		l2cap_retransmit_all(chan, control);
5499 		l2cap_ertm_send(chan);
5500 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5501 			set_bit(CONN_REJ_ACT, &chan->conn_state);
5502 	}
5503 }
5504 
5505 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5506 {
5507 	BT_DBG("chan %p, txseq %d", chan, txseq);
5508 
5509 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5510 	       chan->expected_tx_seq);
5511 
5512 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5513 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5514 		    chan->tx_win) {
5515 			/* See notes below regarding "double poll" and
5516 			 * invalid packets.
5517 			 */
5518 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5519 				BT_DBG("Invalid/Ignore - after SREJ");
5520 				return L2CAP_TXSEQ_INVALID_IGNORE;
5521 			} else {
5522 				BT_DBG("Invalid - in window after SREJ sent");
5523 				return L2CAP_TXSEQ_INVALID;
5524 			}
5525 		}
5526 
5527 		if (chan->srej_list.head == txseq) {
5528 			BT_DBG("Expected SREJ");
5529 			return L2CAP_TXSEQ_EXPECTED_SREJ;
5530 		}
5531 
5532 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5533 			BT_DBG("Duplicate SREJ - txseq already stored");
5534 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
5535 		}
5536 
5537 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5538 			BT_DBG("Unexpected SREJ - not requested");
5539 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5540 		}
5541 	}
5542 
5543 	if (chan->expected_tx_seq == txseq) {
5544 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5545 		    chan->tx_win) {
5546 			BT_DBG("Invalid - txseq outside tx window");
5547 			return L2CAP_TXSEQ_INVALID;
5548 		} else {
5549 			BT_DBG("Expected");
5550 			return L2CAP_TXSEQ_EXPECTED;
5551 		}
5552 	}
5553 
5554 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5555 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5556 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
5557 		return L2CAP_TXSEQ_DUPLICATE;
5558 	}
5559 
5560 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5561 		/* A source of invalid packets is a "double poll" condition,
5562 		 * where delays cause us to send multiple poll packets.  If
5563 		 * the remote stack receives and processes both polls,
5564 		 * sequence numbers can wrap around in such a way that a
5565 		 * resent frame has a sequence number that looks like new data
5566 		 * with a sequence gap.  This would trigger an erroneous SREJ
5567 		 * request.
5568 		 *
5569 		 * Fortunately, this is impossible with a tx window that's
5570 		 * less than half of the maximum sequence number, which allows
5571 		 * invalid frames to be safely ignored.
5572 		 *
5573 		 * With tx window sizes greater than half of the tx window
5574 		 * maximum, the frame is invalid and cannot be ignored.  This
5575 		 * causes a disconnect.
5576 		 */
5577 
5578 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5579 			BT_DBG("Invalid/Ignore - txseq outside tx window");
5580 			return L2CAP_TXSEQ_INVALID_IGNORE;
5581 		} else {
5582 			BT_DBG("Invalid - txseq outside tx window");
5583 			return L2CAP_TXSEQ_INVALID;
5584 		}
5585 	} else {
5586 		BT_DBG("Unexpected - txseq indicates missing frames");
5587 		return L2CAP_TXSEQ_UNEXPECTED;
5588 	}
5589 }
5590 
5591 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5592 			       struct l2cap_ctrl *control,
5593 			       struct sk_buff *skb, u8 event)
5594 {
5595 	int err = 0;
5596 	bool skb_in_use = 0;
5597 
5598 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5599 	       event);
5600 
5601 	switch (event) {
5602 	case L2CAP_EV_RECV_IFRAME:
5603 		switch (l2cap_classify_txseq(chan, control->txseq)) {
5604 		case L2CAP_TXSEQ_EXPECTED:
5605 			l2cap_pass_to_tx(chan, control);
5606 
5607 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5608 				BT_DBG("Busy, discarding expected seq %d",
5609 				       control->txseq);
5610 				break;
5611 			}
5612 
5613 			chan->expected_tx_seq = __next_seq(chan,
5614 							   control->txseq);
5615 
5616 			chan->buffer_seq = chan->expected_tx_seq;
5617 			skb_in_use = 1;
5618 
5619 			err = l2cap_reassemble_sdu(chan, skb, control);
5620 			if (err)
5621 				break;
5622 
5623 			if (control->final) {
5624 				if (!test_and_clear_bit(CONN_REJ_ACT,
5625 							&chan->conn_state)) {
5626 					control->final = 0;
5627 					l2cap_retransmit_all(chan, control);
5628 					l2cap_ertm_send(chan);
5629 				}
5630 			}
5631 
5632 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5633 				l2cap_send_ack(chan);
5634 			break;
5635 		case L2CAP_TXSEQ_UNEXPECTED:
5636 			l2cap_pass_to_tx(chan, control);
5637 
5638 			/* Can't issue SREJ frames in the local busy state.
5639 			 * Drop this frame, it will be seen as missing
5640 			 * when local busy is exited.
5641 			 */
5642 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5643 				BT_DBG("Busy, discarding unexpected seq %d",
5644 				       control->txseq);
5645 				break;
5646 			}
5647 
5648 			/* There was a gap in the sequence, so an SREJ
5649 			 * must be sent for each missing frame.  The
5650 			 * current frame is stored for later use.
5651 			 */
5652 			skb_queue_tail(&chan->srej_q, skb);
5653 			skb_in_use = 1;
5654 			BT_DBG("Queued %p (queue len %d)", skb,
5655 			       skb_queue_len(&chan->srej_q));
5656 
5657 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5658 			l2cap_seq_list_clear(&chan->srej_list);
5659 			l2cap_send_srej(chan, control->txseq);
5660 
5661 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5662 			break;
5663 		case L2CAP_TXSEQ_DUPLICATE:
5664 			l2cap_pass_to_tx(chan, control);
5665 			break;
5666 		case L2CAP_TXSEQ_INVALID_IGNORE:
5667 			break;
5668 		case L2CAP_TXSEQ_INVALID:
5669 		default:
5670 			l2cap_send_disconn_req(chan, ECONNRESET);
5671 			break;
5672 		}
5673 		break;
5674 	case L2CAP_EV_RECV_RR:
5675 		l2cap_pass_to_tx(chan, control);
5676 		if (control->final) {
5677 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5678 
5679 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5680 			    !__chan_is_moving(chan)) {
5681 				control->final = 0;
5682 				l2cap_retransmit_all(chan, control);
5683 			}
5684 
5685 			l2cap_ertm_send(chan);
5686 		} else if (control->poll) {
5687 			l2cap_send_i_or_rr_or_rnr(chan);
5688 		} else {
5689 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
5690 					       &chan->conn_state) &&
5691 			    chan->unacked_frames)
5692 				__set_retrans_timer(chan);
5693 
5694 			l2cap_ertm_send(chan);
5695 		}
5696 		break;
5697 	case L2CAP_EV_RECV_RNR:
5698 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5699 		l2cap_pass_to_tx(chan, control);
5700 		if (control && control->poll) {
5701 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
5702 			l2cap_send_rr_or_rnr(chan, 0);
5703 		}
5704 		__clear_retrans_timer(chan);
5705 		l2cap_seq_list_clear(&chan->retrans_list);
5706 		break;
5707 	case L2CAP_EV_RECV_REJ:
5708 		l2cap_handle_rej(chan, control);
5709 		break;
5710 	case L2CAP_EV_RECV_SREJ:
5711 		l2cap_handle_srej(chan, control);
5712 		break;
5713 	default:
5714 		break;
5715 	}
5716 
5717 	if (skb && !skb_in_use) {
5718 		BT_DBG("Freeing %p", skb);
5719 		kfree_skb(skb);
5720 	}
5721 
5722 	return err;
5723 }
5724 
5725 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5726 				    struct l2cap_ctrl *control,
5727 				    struct sk_buff *skb, u8 event)
5728 {
5729 	int err = 0;
5730 	u16 txseq = control->txseq;
5731 	bool skb_in_use = 0;
5732 
5733 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5734 	       event);
5735 
5736 	switch (event) {
5737 	case L2CAP_EV_RECV_IFRAME:
5738 		switch (l2cap_classify_txseq(chan, txseq)) {
5739 		case L2CAP_TXSEQ_EXPECTED:
5740 			/* Keep frame for reassembly later */
5741 			l2cap_pass_to_tx(chan, control);
5742 			skb_queue_tail(&chan->srej_q, skb);
5743 			skb_in_use = 1;
5744 			BT_DBG("Queued %p (queue len %d)", skb,
5745 			       skb_queue_len(&chan->srej_q));
5746 
5747 			chan->expected_tx_seq = __next_seq(chan, txseq);
5748 			break;
5749 		case L2CAP_TXSEQ_EXPECTED_SREJ:
5750 			l2cap_seq_list_pop(&chan->srej_list);
5751 
5752 			l2cap_pass_to_tx(chan, control);
5753 			skb_queue_tail(&chan->srej_q, skb);
5754 			skb_in_use = 1;
5755 			BT_DBG("Queued %p (queue len %d)", skb,
5756 			       skb_queue_len(&chan->srej_q));
5757 
5758 			err = l2cap_rx_queued_iframes(chan);
5759 			if (err)
5760 				break;
5761 
5762 			break;
5763 		case L2CAP_TXSEQ_UNEXPECTED:
5764 			/* Got a frame that can't be reassembled yet.
5765 			 * Save it for later, and send SREJs to cover
5766 			 * the missing frames.
5767 			 */
5768 			skb_queue_tail(&chan->srej_q, skb);
5769 			skb_in_use = 1;
5770 			BT_DBG("Queued %p (queue len %d)", skb,
5771 			       skb_queue_len(&chan->srej_q));
5772 
5773 			l2cap_pass_to_tx(chan, control);
5774 			l2cap_send_srej(chan, control->txseq);
5775 			break;
5776 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5777 			/* This frame was requested with an SREJ, but
5778 			 * some expected retransmitted frames are
5779 			 * missing.  Request retransmission of missing
5780 			 * SREJ'd frames.
5781 			 */
5782 			skb_queue_tail(&chan->srej_q, skb);
5783 			skb_in_use = 1;
5784 			BT_DBG("Queued %p (queue len %d)", skb,
5785 			       skb_queue_len(&chan->srej_q));
5786 
5787 			l2cap_pass_to_tx(chan, control);
5788 			l2cap_send_srej_list(chan, control->txseq);
5789 			break;
5790 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
5791 			/* We've already queued this frame.  Drop this copy. */
5792 			l2cap_pass_to_tx(chan, control);
5793 			break;
5794 		case L2CAP_TXSEQ_DUPLICATE:
5795 			/* Expecting a later sequence number, so this frame
5796 			 * was already received.  Ignore it completely.
5797 			 */
5798 			break;
5799 		case L2CAP_TXSEQ_INVALID_IGNORE:
5800 			break;
5801 		case L2CAP_TXSEQ_INVALID:
5802 		default:
5803 			l2cap_send_disconn_req(chan, ECONNRESET);
5804 			break;
5805 		}
5806 		break;
5807 	case L2CAP_EV_RECV_RR:
5808 		l2cap_pass_to_tx(chan, control);
5809 		if (control->final) {
5810 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5811 
5812 			if (!test_and_clear_bit(CONN_REJ_ACT,
5813 						&chan->conn_state)) {
5814 				control->final = 0;
5815 				l2cap_retransmit_all(chan, control);
5816 			}
5817 
5818 			l2cap_ertm_send(chan);
5819 		} else if (control->poll) {
5820 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
5821 					       &chan->conn_state) &&
5822 			    chan->unacked_frames) {
5823 				__set_retrans_timer(chan);
5824 			}
5825 
5826 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
5827 			l2cap_send_srej_tail(chan);
5828 		} else {
5829 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
5830 					       &chan->conn_state) &&
5831 			    chan->unacked_frames)
5832 				__set_retrans_timer(chan);
5833 
5834 			l2cap_send_ack(chan);
5835 		}
5836 		break;
5837 	case L2CAP_EV_RECV_RNR:
5838 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5839 		l2cap_pass_to_tx(chan, control);
5840 		if (control->poll) {
5841 			l2cap_send_srej_tail(chan);
5842 		} else {
5843 			struct l2cap_ctrl rr_control;
5844 			memset(&rr_control, 0, sizeof(rr_control));
5845 			rr_control.sframe = 1;
5846 			rr_control.super = L2CAP_SUPER_RR;
5847 			rr_control.reqseq = chan->buffer_seq;
5848 			l2cap_send_sframe(chan, &rr_control);
5849 		}
5850 
5851 		break;
5852 	case L2CAP_EV_RECV_REJ:
5853 		l2cap_handle_rej(chan, control);
5854 		break;
5855 	case L2CAP_EV_RECV_SREJ:
5856 		l2cap_handle_srej(chan, control);
5857 		break;
5858 	}
5859 
5860 	if (skb && !skb_in_use) {
5861 		BT_DBG("Freeing %p", skb);
5862 		kfree_skb(skb);
5863 	}
5864 
5865 	return err;
5866 }
5867 
5868 static int l2cap_finish_move(struct l2cap_chan *chan)
5869 {
5870 	BT_DBG("chan %p", chan);
5871 
5872 	chan->rx_state = L2CAP_RX_STATE_RECV;
5873 
5874 	if (chan->hs_hcon)
5875 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5876 	else
5877 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5878 
5879 	return l2cap_resegment(chan);
5880 }
5881 
5882 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
5883 				 struct l2cap_ctrl *control,
5884 				 struct sk_buff *skb, u8 event)
5885 {
5886 	int err;
5887 
5888 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5889 	       event);
5890 
5891 	if (!control->poll)
5892 		return -EPROTO;
5893 
5894 	l2cap_process_reqseq(chan, control->reqseq);
5895 
5896 	if (!skb_queue_empty(&chan->tx_q))
5897 		chan->tx_send_head = skb_peek(&chan->tx_q);
5898 	else
5899 		chan->tx_send_head = NULL;
5900 
5901 	/* Rewind next_tx_seq to the point expected
5902 	 * by the receiver.
5903 	 */
5904 	chan->next_tx_seq = control->reqseq;
5905 	chan->unacked_frames = 0;
5906 
5907 	err = l2cap_finish_move(chan);
5908 	if (err)
5909 		return err;
5910 
5911 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5912 	l2cap_send_i_or_rr_or_rnr(chan);
5913 
5914 	if (event == L2CAP_EV_RECV_IFRAME)
5915 		return -EPROTO;
5916 
5917 	return l2cap_rx_state_recv(chan, control, NULL, event);
5918 }
5919 
5920 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
5921 				 struct l2cap_ctrl *control,
5922 				 struct sk_buff *skb, u8 event)
5923 {
5924 	int err;
5925 
5926 	if (!control->final)
5927 		return -EPROTO;
5928 
5929 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5930 
5931 	chan->rx_state = L2CAP_RX_STATE_RECV;
5932 	l2cap_process_reqseq(chan, control->reqseq);
5933 
5934 	if (!skb_queue_empty(&chan->tx_q))
5935 		chan->tx_send_head = skb_peek(&chan->tx_q);
5936 	else
5937 		chan->tx_send_head = NULL;
5938 
5939 	/* Rewind next_tx_seq to the point expected
5940 	 * by the receiver.
5941 	 */
5942 	chan->next_tx_seq = control->reqseq;
5943 	chan->unacked_frames = 0;
5944 
5945 	if (chan->hs_hcon)
5946 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5947 	else
5948 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5949 
5950 	err = l2cap_resegment(chan);
5951 
5952 	if (!err)
5953 		err = l2cap_rx_state_recv(chan, control, skb, event);
5954 
5955 	return err;
5956 }
5957 
5958 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5959 {
5960 	/* Make sure reqseq is for a packet that has been sent but not acked */
5961 	u16 unacked;
5962 
5963 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5964 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5965 }
5966 
5967 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5968 		    struct sk_buff *skb, u8 event)
5969 {
5970 	int err = 0;
5971 
5972 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5973 	       control, skb, event, chan->rx_state);
5974 
5975 	if (__valid_reqseq(chan, control->reqseq)) {
5976 		switch (chan->rx_state) {
5977 		case L2CAP_RX_STATE_RECV:
5978 			err = l2cap_rx_state_recv(chan, control, skb, event);
5979 			break;
5980 		case L2CAP_RX_STATE_SREJ_SENT:
5981 			err = l2cap_rx_state_srej_sent(chan, control, skb,
5982 						       event);
5983 			break;
5984 		case L2CAP_RX_STATE_WAIT_P:
5985 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
5986 			break;
5987 		case L2CAP_RX_STATE_WAIT_F:
5988 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
5989 			break;
5990 		default:
5991 			/* shut it down */
5992 			break;
5993 		}
5994 	} else {
5995 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5996 		       control->reqseq, chan->next_tx_seq,
5997 		       chan->expected_ack_seq);
5998 		l2cap_send_disconn_req(chan, ECONNRESET);
5999 	}
6000 
6001 	return err;
6002 }
6003 
6004 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6005 			   struct sk_buff *skb)
6006 {
6007 	int err = 0;
6008 
6009 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6010 	       chan->rx_state);
6011 
6012 	if (l2cap_classify_txseq(chan, control->txseq) ==
6013 	    L2CAP_TXSEQ_EXPECTED) {
6014 		l2cap_pass_to_tx(chan, control);
6015 
6016 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6017 		       __next_seq(chan, chan->buffer_seq));
6018 
6019 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6020 
6021 		l2cap_reassemble_sdu(chan, skb, control);
6022 	} else {
6023 		if (chan->sdu) {
6024 			kfree_skb(chan->sdu);
6025 			chan->sdu = NULL;
6026 		}
6027 		chan->sdu_last_frag = NULL;
6028 		chan->sdu_len = 0;
6029 
6030 		if (skb) {
6031 			BT_DBG("Freeing %p", skb);
6032 			kfree_skb(skb);
6033 		}
6034 	}
6035 
6036 	chan->last_acked_seq = control->txseq;
6037 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6038 
6039 	return err;
6040 }
6041 
6042 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6043 {
6044 	struct l2cap_ctrl *control = &bt_cb(skb)->control;
6045 	u16 len;
6046 	u8 event;
6047 
6048 	__unpack_control(chan, skb);
6049 
6050 	len = skb->len;
6051 
6052 	/*
6053 	 * We can just drop the corrupted I-frame here.
6054 	 * Receiver will miss it and start proper recovery
6055 	 * procedures and ask for retransmission.
6056 	 */
6057 	if (l2cap_check_fcs(chan, skb))
6058 		goto drop;
6059 
6060 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6061 		len -= L2CAP_SDULEN_SIZE;
6062 
6063 	if (chan->fcs == L2CAP_FCS_CRC16)
6064 		len -= L2CAP_FCS_SIZE;
6065 
6066 	if (len > chan->mps) {
6067 		l2cap_send_disconn_req(chan, ECONNRESET);
6068 		goto drop;
6069 	}
6070 
6071 	if (!control->sframe) {
6072 		int err;
6073 
6074 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6075 		       control->sar, control->reqseq, control->final,
6076 		       control->txseq);
6077 
6078 		/* Validate F-bit - F=0 always valid, F=1 only
6079 		 * valid in TX WAIT_F
6080 		 */
6081 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6082 			goto drop;
6083 
6084 		if (chan->mode != L2CAP_MODE_STREAMING) {
6085 			event = L2CAP_EV_RECV_IFRAME;
6086 			err = l2cap_rx(chan, control, skb, event);
6087 		} else {
6088 			err = l2cap_stream_rx(chan, control, skb);
6089 		}
6090 
6091 		if (err)
6092 			l2cap_send_disconn_req(chan, ECONNRESET);
6093 	} else {
6094 		const u8 rx_func_to_event[4] = {
6095 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6096 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6097 		};
6098 
6099 		/* Only I-frames are expected in streaming mode */
6100 		if (chan->mode == L2CAP_MODE_STREAMING)
6101 			goto drop;
6102 
6103 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6104 		       control->reqseq, control->final, control->poll,
6105 		       control->super);
6106 
6107 		if (len != 0) {
6108 			BT_ERR("Trailing bytes: %d in sframe", len);
6109 			l2cap_send_disconn_req(chan, ECONNRESET);
6110 			goto drop;
6111 		}
6112 
6113 		/* Validate F and P bits */
6114 		if (control->final && (control->poll ||
6115 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6116 			goto drop;
6117 
6118 		event = rx_func_to_event[control->super];
6119 		if (l2cap_rx(chan, control, skb, event))
6120 			l2cap_send_disconn_req(chan, ECONNRESET);
6121 	}
6122 
6123 	return 0;
6124 
6125 drop:
6126 	kfree_skb(skb);
6127 	return 0;
6128 }
6129 
6130 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6131 			       struct sk_buff *skb)
6132 {
6133 	struct l2cap_chan *chan;
6134 
6135 	chan = l2cap_get_chan_by_scid(conn, cid);
6136 	if (!chan) {
6137 		if (cid == L2CAP_CID_A2MP) {
6138 			chan = a2mp_channel_create(conn, skb);
6139 			if (!chan) {
6140 				kfree_skb(skb);
6141 				return;
6142 			}
6143 
6144 			l2cap_chan_lock(chan);
6145 		} else {
6146 			BT_DBG("unknown cid 0x%4.4x", cid);
6147 			/* Drop packet and return */
6148 			kfree_skb(skb);
6149 			return;
6150 		}
6151 	}
6152 
6153 	BT_DBG("chan %p, len %d", chan, skb->len);
6154 
6155 	if (chan->state != BT_CONNECTED)
6156 		goto drop;
6157 
6158 	switch (chan->mode) {
6159 	case L2CAP_MODE_BASIC:
6160 		/* If socket recv buffers overflows we drop data here
6161 		 * which is *bad* because L2CAP has to be reliable.
6162 		 * But we don't have any other choice. L2CAP doesn't
6163 		 * provide flow control mechanism. */
6164 
6165 		if (chan->imtu < skb->len)
6166 			goto drop;
6167 
6168 		if (!chan->ops->recv(chan, skb))
6169 			goto done;
6170 		break;
6171 
6172 	case L2CAP_MODE_ERTM:
6173 	case L2CAP_MODE_STREAMING:
6174 		l2cap_data_rcv(chan, skb);
6175 		goto done;
6176 
6177 	default:
6178 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6179 		break;
6180 	}
6181 
6182 drop:
6183 	kfree_skb(skb);
6184 
6185 done:
6186 	l2cap_chan_unlock(chan);
6187 }
6188 
6189 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6190 				  struct sk_buff *skb)
6191 {
6192 	struct l2cap_chan *chan;
6193 
6194 	chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6195 	if (!chan)
6196 		goto drop;
6197 
6198 	BT_DBG("chan %p, len %d", chan, skb->len);
6199 
6200 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6201 		goto drop;
6202 
6203 	if (chan->imtu < skb->len)
6204 		goto drop;
6205 
6206 	if (!chan->ops->recv(chan, skb))
6207 		return;
6208 
6209 drop:
6210 	kfree_skb(skb);
6211 }
6212 
6213 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
6214 			      struct sk_buff *skb)
6215 {
6216 	struct l2cap_chan *chan;
6217 
6218 	chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
6219 	if (!chan)
6220 		goto drop;
6221 
6222 	BT_DBG("chan %p, len %d", chan, skb->len);
6223 
6224 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6225 		goto drop;
6226 
6227 	if (chan->imtu < skb->len)
6228 		goto drop;
6229 
6230 	if (!chan->ops->recv(chan, skb))
6231 		return;
6232 
6233 drop:
6234 	kfree_skb(skb);
6235 }
6236 
6237 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6238 {
6239 	struct l2cap_hdr *lh = (void *) skb->data;
6240 	u16 cid, len;
6241 	__le16 psm;
6242 
6243 	skb_pull(skb, L2CAP_HDR_SIZE);
6244 	cid = __le16_to_cpu(lh->cid);
6245 	len = __le16_to_cpu(lh->len);
6246 
6247 	if (len != skb->len) {
6248 		kfree_skb(skb);
6249 		return;
6250 	}
6251 
6252 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6253 
6254 	switch (cid) {
6255 	case L2CAP_CID_LE_SIGNALING:
6256 	case L2CAP_CID_SIGNALING:
6257 		l2cap_sig_channel(conn, skb);
6258 		break;
6259 
6260 	case L2CAP_CID_CONN_LESS:
6261 		psm = get_unaligned((__le16 *) skb->data);
6262 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6263 		l2cap_conless_channel(conn, psm, skb);
6264 		break;
6265 
6266 	case L2CAP_CID_LE_DATA:
6267 		l2cap_att_channel(conn, cid, skb);
6268 		break;
6269 
6270 	case L2CAP_CID_SMP:
6271 		if (smp_sig_channel(conn, skb))
6272 			l2cap_conn_del(conn->hcon, EACCES);
6273 		break;
6274 
6275 	default:
6276 		l2cap_data_channel(conn, cid, skb);
6277 		break;
6278 	}
6279 }
6280 
6281 /* ---- L2CAP interface with lower layer (HCI) ---- */
6282 
6283 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6284 {
6285 	int exact = 0, lm1 = 0, lm2 = 0;
6286 	struct l2cap_chan *c;
6287 
6288 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6289 
6290 	/* Find listening sockets and check their link_mode */
6291 	read_lock(&chan_list_lock);
6292 	list_for_each_entry(c, &chan_list, global_l) {
6293 		struct sock *sk = c->sk;
6294 
6295 		if (c->state != BT_LISTEN)
6296 			continue;
6297 
6298 		if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6299 			lm1 |= HCI_LM_ACCEPT;
6300 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6301 				lm1 |= HCI_LM_MASTER;
6302 			exact++;
6303 		} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6304 			lm2 |= HCI_LM_ACCEPT;
6305 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6306 				lm2 |= HCI_LM_MASTER;
6307 		}
6308 	}
6309 	read_unlock(&chan_list_lock);
6310 
6311 	return exact ? lm1 : lm2;
6312 }
6313 
6314 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6315 {
6316 	struct l2cap_conn *conn;
6317 
6318 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6319 
6320 	if (!status) {
6321 		conn = l2cap_conn_add(hcon, status);
6322 		if (conn)
6323 			l2cap_conn_ready(conn);
6324 	} else {
6325 		l2cap_conn_del(hcon, bt_to_errno(status));
6326 	}
6327 }
6328 
6329 int l2cap_disconn_ind(struct hci_conn *hcon)
6330 {
6331 	struct l2cap_conn *conn = hcon->l2cap_data;
6332 
6333 	BT_DBG("hcon %p", hcon);
6334 
6335 	if (!conn)
6336 		return HCI_ERROR_REMOTE_USER_TERM;
6337 	return conn->disc_reason;
6338 }
6339 
6340 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6341 {
6342 	BT_DBG("hcon %p reason %d", hcon, reason);
6343 
6344 	l2cap_conn_del(hcon, bt_to_errno(reason));
6345 }
6346 
6347 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6348 {
6349 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6350 		return;
6351 
6352 	if (encrypt == 0x00) {
6353 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
6354 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6355 		} else if (chan->sec_level == BT_SECURITY_HIGH)
6356 			l2cap_chan_close(chan, ECONNREFUSED);
6357 	} else {
6358 		if (chan->sec_level == BT_SECURITY_MEDIUM)
6359 			__clear_chan_timer(chan);
6360 	}
6361 }
6362 
6363 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6364 {
6365 	struct l2cap_conn *conn = hcon->l2cap_data;
6366 	struct l2cap_chan *chan;
6367 
6368 	if (!conn)
6369 		return 0;
6370 
6371 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6372 
6373 	if (hcon->type == LE_LINK) {
6374 		if (!status && encrypt)
6375 			smp_distribute_keys(conn, 0);
6376 		cancel_delayed_work(&conn->security_timer);
6377 	}
6378 
6379 	mutex_lock(&conn->chan_lock);
6380 
6381 	list_for_each_entry(chan, &conn->chan_l, list) {
6382 		l2cap_chan_lock(chan);
6383 
6384 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6385 		       state_to_string(chan->state));
6386 
6387 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6388 			l2cap_chan_unlock(chan);
6389 			continue;
6390 		}
6391 
6392 		if (chan->scid == L2CAP_CID_LE_DATA) {
6393 			if (!status && encrypt) {
6394 				chan->sec_level = hcon->sec_level;
6395 				l2cap_chan_ready(chan);
6396 			}
6397 
6398 			l2cap_chan_unlock(chan);
6399 			continue;
6400 		}
6401 
6402 		if (!__l2cap_no_conn_pending(chan)) {
6403 			l2cap_chan_unlock(chan);
6404 			continue;
6405 		}
6406 
6407 		if (!status && (chan->state == BT_CONNECTED ||
6408 				chan->state == BT_CONFIG)) {
6409 			struct sock *sk = chan->sk;
6410 
6411 			clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6412 			sk->sk_state_change(sk);
6413 
6414 			l2cap_check_encryption(chan, encrypt);
6415 			l2cap_chan_unlock(chan);
6416 			continue;
6417 		}
6418 
6419 		if (chan->state == BT_CONNECT) {
6420 			if (!status) {
6421 				l2cap_start_connection(chan);
6422 			} else {
6423 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6424 			}
6425 		} else if (chan->state == BT_CONNECT2) {
6426 			struct sock *sk = chan->sk;
6427 			struct l2cap_conn_rsp rsp;
6428 			__u16 res, stat;
6429 
6430 			lock_sock(sk);
6431 
6432 			if (!status) {
6433 				if (test_bit(BT_SK_DEFER_SETUP,
6434 					     &bt_sk(sk)->flags)) {
6435 					res = L2CAP_CR_PEND;
6436 					stat = L2CAP_CS_AUTHOR_PEND;
6437 					chan->ops->defer(chan);
6438 				} else {
6439 					__l2cap_state_change(chan, BT_CONFIG);
6440 					res = L2CAP_CR_SUCCESS;
6441 					stat = L2CAP_CS_NO_INFO;
6442 				}
6443 			} else {
6444 				__l2cap_state_change(chan, BT_DISCONN);
6445 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6446 				res = L2CAP_CR_SEC_BLOCK;
6447 				stat = L2CAP_CS_NO_INFO;
6448 			}
6449 
6450 			release_sock(sk);
6451 
6452 			rsp.scid   = cpu_to_le16(chan->dcid);
6453 			rsp.dcid   = cpu_to_le16(chan->scid);
6454 			rsp.result = cpu_to_le16(res);
6455 			rsp.status = cpu_to_le16(stat);
6456 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6457 				       sizeof(rsp), &rsp);
6458 
6459 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6460 			    res == L2CAP_CR_SUCCESS) {
6461 				char buf[128];
6462 				set_bit(CONF_REQ_SENT, &chan->conf_state);
6463 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
6464 					       L2CAP_CONF_REQ,
6465 					       l2cap_build_conf_req(chan, buf),
6466 					       buf);
6467 				chan->num_conf_req++;
6468 			}
6469 		}
6470 
6471 		l2cap_chan_unlock(chan);
6472 	}
6473 
6474 	mutex_unlock(&conn->chan_lock);
6475 
6476 	return 0;
6477 }
6478 
6479 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6480 {
6481 	struct l2cap_conn *conn = hcon->l2cap_data;
6482 	struct l2cap_hdr *hdr;
6483 	int len;
6484 
6485 	/* For AMP controller do not create l2cap conn */
6486 	if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6487 		goto drop;
6488 
6489 	if (!conn)
6490 		conn = l2cap_conn_add(hcon, 0);
6491 
6492 	if (!conn)
6493 		goto drop;
6494 
6495 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6496 
6497 	switch (flags) {
6498 	case ACL_START:
6499 	case ACL_START_NO_FLUSH:
6500 	case ACL_COMPLETE:
6501 		if (conn->rx_len) {
6502 			BT_ERR("Unexpected start frame (len %d)", skb->len);
6503 			kfree_skb(conn->rx_skb);
6504 			conn->rx_skb = NULL;
6505 			conn->rx_len = 0;
6506 			l2cap_conn_unreliable(conn, ECOMM);
6507 		}
6508 
6509 		/* Start fragment always begin with Basic L2CAP header */
6510 		if (skb->len < L2CAP_HDR_SIZE) {
6511 			BT_ERR("Frame is too short (len %d)", skb->len);
6512 			l2cap_conn_unreliable(conn, ECOMM);
6513 			goto drop;
6514 		}
6515 
6516 		hdr = (struct l2cap_hdr *) skb->data;
6517 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6518 
6519 		if (len == skb->len) {
6520 			/* Complete frame received */
6521 			l2cap_recv_frame(conn, skb);
6522 			return 0;
6523 		}
6524 
6525 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6526 
6527 		if (skb->len > len) {
6528 			BT_ERR("Frame is too long (len %d, expected len %d)",
6529 			       skb->len, len);
6530 			l2cap_conn_unreliable(conn, ECOMM);
6531 			goto drop;
6532 		}
6533 
6534 		/* Allocate skb for the complete frame (with header) */
6535 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6536 		if (!conn->rx_skb)
6537 			goto drop;
6538 
6539 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6540 					  skb->len);
6541 		conn->rx_len = len - skb->len;
6542 		break;
6543 
6544 	case ACL_CONT:
6545 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6546 
6547 		if (!conn->rx_len) {
6548 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6549 			l2cap_conn_unreliable(conn, ECOMM);
6550 			goto drop;
6551 		}
6552 
6553 		if (skb->len > conn->rx_len) {
6554 			BT_ERR("Fragment is too long (len %d, expected %d)",
6555 			       skb->len, conn->rx_len);
6556 			kfree_skb(conn->rx_skb);
6557 			conn->rx_skb = NULL;
6558 			conn->rx_len = 0;
6559 			l2cap_conn_unreliable(conn, ECOMM);
6560 			goto drop;
6561 		}
6562 
6563 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6564 					  skb->len);
6565 		conn->rx_len -= skb->len;
6566 
6567 		if (!conn->rx_len) {
6568 			/* Complete frame received */
6569 			l2cap_recv_frame(conn, conn->rx_skb);
6570 			conn->rx_skb = NULL;
6571 		}
6572 		break;
6573 	}
6574 
6575 drop:
6576 	kfree_skb(skb);
6577 	return 0;
6578 }
6579 
6580 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6581 {
6582 	struct l2cap_chan *c;
6583 
6584 	read_lock(&chan_list_lock);
6585 
6586 	list_for_each_entry(c, &chan_list, global_l) {
6587 		struct sock *sk = c->sk;
6588 
6589 		seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6590 			   &bt_sk(sk)->src, &bt_sk(sk)->dst,
6591 			   c->state, __le16_to_cpu(c->psm),
6592 			   c->scid, c->dcid, c->imtu, c->omtu,
6593 			   c->sec_level, c->mode);
6594 	}
6595 
6596 	read_unlock(&chan_list_lock);
6597 
6598 	return 0;
6599 }
6600 
6601 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6602 {
6603 	return single_open(file, l2cap_debugfs_show, inode->i_private);
6604 }
6605 
6606 static const struct file_operations l2cap_debugfs_fops = {
6607 	.open		= l2cap_debugfs_open,
6608 	.read		= seq_read,
6609 	.llseek		= seq_lseek,
6610 	.release	= single_release,
6611 };
6612 
6613 static struct dentry *l2cap_debugfs;
6614 
6615 int __init l2cap_init(void)
6616 {
6617 	int err;
6618 
6619 	err = l2cap_init_sockets();
6620 	if (err < 0)
6621 		return err;
6622 
6623 	if (bt_debugfs) {
6624 		l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6625 						    NULL, &l2cap_debugfs_fops);
6626 		if (!l2cap_debugfs)
6627 			BT_ERR("Failed to create L2CAP debug file");
6628 	}
6629 
6630 	return 0;
6631 }
6632 
6633 void l2cap_exit(void)
6634 {
6635 	debugfs_remove(l2cap_debugfs);
6636 	l2cap_cleanup_sockets();
6637 }
6638 
6639 module_param(disable_ertm, bool, 0644);
6640 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
6641