xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision 089a49b6)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
42 
43 bool disable_ertm;
44 
45 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
46 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47 
48 static LIST_HEAD(chan_list);
49 static DEFINE_RWLOCK(chan_list_lock);
50 
51 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
52 				       u8 code, u8 ident, u16 dlen, void *data);
53 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 			   void *data);
55 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
56 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
57 
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 		     struct sk_buff_head *skbs, u8 event);
60 
61 /* ---- L2CAP channels ---- */
62 
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 						   u16 cid)
65 {
66 	struct l2cap_chan *c;
67 
68 	list_for_each_entry(c, &conn->chan_l, list) {
69 		if (c->dcid == cid)
70 			return c;
71 	}
72 	return NULL;
73 }
74 
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 						   u16 cid)
77 {
78 	struct l2cap_chan *c;
79 
80 	list_for_each_entry(c, &conn->chan_l, list) {
81 		if (c->scid == cid)
82 			return c;
83 	}
84 	return NULL;
85 }
86 
87 /* Find channel with given SCID.
88  * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 						 u16 cid)
91 {
92 	struct l2cap_chan *c;
93 
94 	mutex_lock(&conn->chan_lock);
95 	c = __l2cap_get_chan_by_scid(conn, cid);
96 	if (c)
97 		l2cap_chan_lock(c);
98 	mutex_unlock(&conn->chan_lock);
99 
100 	return c;
101 }
102 
103 /* Find channel with given DCID.
104  * Returns locked channel.
105  */
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
107 						 u16 cid)
108 {
109 	struct l2cap_chan *c;
110 
111 	mutex_lock(&conn->chan_lock);
112 	c = __l2cap_get_chan_by_dcid(conn, cid);
113 	if (c)
114 		l2cap_chan_lock(c);
115 	mutex_unlock(&conn->chan_lock);
116 
117 	return c;
118 }
119 
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
121 						    u8 ident)
122 {
123 	struct l2cap_chan *c;
124 
125 	list_for_each_entry(c, &conn->chan_l, list) {
126 		if (c->ident == ident)
127 			return c;
128 	}
129 	return NULL;
130 }
131 
132 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
133 						  u8 ident)
134 {
135 	struct l2cap_chan *c;
136 
137 	mutex_lock(&conn->chan_lock);
138 	c = __l2cap_get_chan_by_ident(conn, ident);
139 	if (c)
140 		l2cap_chan_lock(c);
141 	mutex_unlock(&conn->chan_lock);
142 
143 	return c;
144 }
145 
146 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
147 {
148 	struct l2cap_chan *c;
149 
150 	list_for_each_entry(c, &chan_list, global_l) {
151 		if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
152 			return c;
153 	}
154 	return NULL;
155 }
156 
157 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
158 {
159 	int err;
160 
161 	write_lock(&chan_list_lock);
162 
163 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 		err = -EADDRINUSE;
165 		goto done;
166 	}
167 
168 	if (psm) {
169 		chan->psm = psm;
170 		chan->sport = psm;
171 		err = 0;
172 	} else {
173 		u16 p;
174 
175 		err = -EINVAL;
176 		for (p = 0x1001; p < 0x1100; p += 2)
177 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 				chan->psm   = cpu_to_le16(p);
179 				chan->sport = cpu_to_le16(p);
180 				err = 0;
181 				break;
182 			}
183 	}
184 
185 done:
186 	write_unlock(&chan_list_lock);
187 	return err;
188 }
189 
190 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
191 {
192 	write_lock(&chan_list_lock);
193 
194 	chan->scid = scid;
195 
196 	write_unlock(&chan_list_lock);
197 
198 	return 0;
199 }
200 
201 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
202 {
203 	u16 cid = L2CAP_CID_DYN_START;
204 
205 	for (; cid < L2CAP_CID_DYN_END; cid++) {
206 		if (!__l2cap_get_chan_by_scid(conn, cid))
207 			return cid;
208 	}
209 
210 	return 0;
211 }
212 
213 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
214 {
215 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 	       state_to_string(state));
217 
218 	chan->state = state;
219 	chan->ops->state_change(chan, state);
220 }
221 
222 static void l2cap_state_change(struct l2cap_chan *chan, int state)
223 {
224 	struct sock *sk = chan->sk;
225 
226 	lock_sock(sk);
227 	__l2cap_state_change(chan, state);
228 	release_sock(sk);
229 }
230 
231 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
232 {
233 	struct sock *sk = chan->sk;
234 
235 	sk->sk_err = err;
236 }
237 
238 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
239 {
240 	struct sock *sk = chan->sk;
241 
242 	lock_sock(sk);
243 	__l2cap_chan_set_err(chan, err);
244 	release_sock(sk);
245 }
246 
247 static void __set_retrans_timer(struct l2cap_chan *chan)
248 {
249 	if (!delayed_work_pending(&chan->monitor_timer) &&
250 	    chan->retrans_timeout) {
251 		l2cap_set_timer(chan, &chan->retrans_timer,
252 				msecs_to_jiffies(chan->retrans_timeout));
253 	}
254 }
255 
256 static void __set_monitor_timer(struct l2cap_chan *chan)
257 {
258 	__clear_retrans_timer(chan);
259 	if (chan->monitor_timeout) {
260 		l2cap_set_timer(chan, &chan->monitor_timer,
261 				msecs_to_jiffies(chan->monitor_timeout));
262 	}
263 }
264 
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 					       u16 seq)
267 {
268 	struct sk_buff *skb;
269 
270 	skb_queue_walk(head, skb) {
271 		if (bt_cb(skb)->control.txseq == seq)
272 			return skb;
273 	}
274 
275 	return NULL;
276 }
277 
278 /* ---- L2CAP sequence number lists ---- */
279 
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281  * SREJ requests that are received and for frames that are to be
282  * retransmitted. These seq_list functions implement a singly-linked
283  * list in an array, where membership in the list can also be checked
284  * in constant time. Items can also be added to the tail of the list
285  * and removed from the head in constant time, without further memory
286  * allocs or frees.
287  */
288 
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
290 {
291 	size_t alloc_size, i;
292 
293 	/* Allocated size is a power of 2 to map sequence numbers
294 	 * (which may be up to 14 bits) in to a smaller array that is
295 	 * sized for the negotiated ERTM transmit windows.
296 	 */
297 	alloc_size = roundup_pow_of_two(size);
298 
299 	seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 	if (!seq_list->list)
301 		return -ENOMEM;
302 
303 	seq_list->mask = alloc_size - 1;
304 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 	for (i = 0; i < alloc_size; i++)
307 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
308 
309 	return 0;
310 }
311 
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
313 {
314 	kfree(seq_list->list);
315 }
316 
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 					   u16 seq)
319 {
320 	/* Constant-time check for list membership */
321 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
322 }
323 
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
325 {
326 	u16 mask = seq_list->mask;
327 
328 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 		/* In case someone tries to pop the head of an empty list */
330 		return L2CAP_SEQ_LIST_CLEAR;
331 	} else if (seq_list->head == seq) {
332 		/* Head can be removed in constant time */
333 		seq_list->head = seq_list->list[seq & mask];
334 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335 
336 		if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 			seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 			seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 		}
340 	} else {
341 		/* Walk the list to find the sequence number */
342 		u16 prev = seq_list->head;
343 		while (seq_list->list[prev & mask] != seq) {
344 			prev = seq_list->list[prev & mask];
345 			if (prev == L2CAP_SEQ_LIST_TAIL)
346 				return L2CAP_SEQ_LIST_CLEAR;
347 		}
348 
349 		/* Unlink the number from the list and clear it */
350 		seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 		if (seq_list->tail == seq)
353 			seq_list->tail = prev;
354 	}
355 	return seq;
356 }
357 
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
359 {
360 	/* Remove the head in constant time */
361 	return l2cap_seq_list_remove(seq_list, seq_list->head);
362 }
363 
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 	u16 i;
367 
368 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 		return;
370 
371 	for (i = 0; i <= seq_list->mask; i++)
372 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373 
374 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377 
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 	u16 mask = seq_list->mask;
381 
382 	/* All appends happen in constant time */
383 
384 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 		return;
386 
387 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 		seq_list->head = seq;
389 	else
390 		seq_list->list[seq_list->tail & mask] = seq;
391 
392 	seq_list->tail = seq;
393 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395 
396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 					       chan_timer.work);
400 	struct l2cap_conn *conn = chan->conn;
401 	int reason;
402 
403 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404 
405 	mutex_lock(&conn->chan_lock);
406 	l2cap_chan_lock(chan);
407 
408 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 		reason = ECONNREFUSED;
410 	else if (chan->state == BT_CONNECT &&
411 		 chan->sec_level != BT_SECURITY_SDP)
412 		reason = ECONNREFUSED;
413 	else
414 		reason = ETIMEDOUT;
415 
416 	l2cap_chan_close(chan, reason);
417 
418 	l2cap_chan_unlock(chan);
419 
420 	chan->ops->close(chan);
421 	mutex_unlock(&conn->chan_lock);
422 
423 	l2cap_chan_put(chan);
424 }
425 
426 struct l2cap_chan *l2cap_chan_create(void)
427 {
428 	struct l2cap_chan *chan;
429 
430 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 	if (!chan)
432 		return NULL;
433 
434 	mutex_init(&chan->lock);
435 
436 	write_lock(&chan_list_lock);
437 	list_add(&chan->global_l, &chan_list);
438 	write_unlock(&chan_list_lock);
439 
440 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
441 
442 	chan->state = BT_OPEN;
443 
444 	kref_init(&chan->kref);
445 
446 	/* This flag is cleared in l2cap_chan_ready() */
447 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
448 
449 	BT_DBG("chan %p", chan);
450 
451 	return chan;
452 }
453 
454 static void l2cap_chan_destroy(struct kref *kref)
455 {
456 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
457 
458 	BT_DBG("chan %p", chan);
459 
460 	write_lock(&chan_list_lock);
461 	list_del(&chan->global_l);
462 	write_unlock(&chan_list_lock);
463 
464 	kfree(chan);
465 }
466 
467 void l2cap_chan_hold(struct l2cap_chan *c)
468 {
469 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
470 
471 	kref_get(&c->kref);
472 }
473 
474 void l2cap_chan_put(struct l2cap_chan *c)
475 {
476 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
477 
478 	kref_put(&c->kref, l2cap_chan_destroy);
479 }
480 
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482 {
483 	chan->fcs  = L2CAP_FCS_CRC16;
484 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 	chan->sec_level = BT_SECURITY_LOW;
489 
490 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
491 }
492 
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
494 {
495 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 	       __le16_to_cpu(chan->psm), chan->dcid);
497 
498 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
499 
500 	chan->conn = conn;
501 
502 	switch (chan->chan_type) {
503 	case L2CAP_CHAN_CONN_ORIENTED:
504 		if (conn->hcon->type == LE_LINK) {
505 			/* LE connection */
506 			chan->omtu = L2CAP_DEFAULT_MTU;
507 			if (chan->dcid == L2CAP_CID_ATT)
508 				chan->scid = L2CAP_CID_ATT;
509 			else
510 				chan->scid = l2cap_alloc_cid(conn);
511 		} else {
512 			/* Alloc CID for connection-oriented socket */
513 			chan->scid = l2cap_alloc_cid(conn);
514 			chan->omtu = L2CAP_DEFAULT_MTU;
515 		}
516 		break;
517 
518 	case L2CAP_CHAN_CONN_LESS:
519 		/* Connectionless socket */
520 		chan->scid = L2CAP_CID_CONN_LESS;
521 		chan->dcid = L2CAP_CID_CONN_LESS;
522 		chan->omtu = L2CAP_DEFAULT_MTU;
523 		break;
524 
525 	case L2CAP_CHAN_CONN_FIX_A2MP:
526 		chan->scid = L2CAP_CID_A2MP;
527 		chan->dcid = L2CAP_CID_A2MP;
528 		chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
529 		chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
530 		break;
531 
532 	default:
533 		/* Raw socket can send/recv signalling messages only */
534 		chan->scid = L2CAP_CID_SIGNALING;
535 		chan->dcid = L2CAP_CID_SIGNALING;
536 		chan->omtu = L2CAP_DEFAULT_MTU;
537 	}
538 
539 	chan->local_id		= L2CAP_BESTEFFORT_ID;
540 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
541 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
542 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
543 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
544 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
545 
546 	l2cap_chan_hold(chan);
547 
548 	hci_conn_hold(conn->hcon);
549 
550 	list_add(&chan->list, &conn->chan_l);
551 }
552 
553 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
554 {
555 	mutex_lock(&conn->chan_lock);
556 	__l2cap_chan_add(conn, chan);
557 	mutex_unlock(&conn->chan_lock);
558 }
559 
560 void l2cap_chan_del(struct l2cap_chan *chan, int err)
561 {
562 	struct l2cap_conn *conn = chan->conn;
563 
564 	__clear_chan_timer(chan);
565 
566 	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
567 
568 	if (conn) {
569 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
570 		/* Delete from channel list */
571 		list_del(&chan->list);
572 
573 		l2cap_chan_put(chan);
574 
575 		chan->conn = NULL;
576 
577 		if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
578 			hci_conn_drop(conn->hcon);
579 
580 		if (mgr && mgr->bredr_chan == chan)
581 			mgr->bredr_chan = NULL;
582 	}
583 
584 	if (chan->hs_hchan) {
585 		struct hci_chan *hs_hchan = chan->hs_hchan;
586 
587 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
588 		amp_disconnect_logical_link(hs_hchan);
589 	}
590 
591 	chan->ops->teardown(chan, err);
592 
593 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
594 		return;
595 
596 	switch(chan->mode) {
597 	case L2CAP_MODE_BASIC:
598 		break;
599 
600 	case L2CAP_MODE_ERTM:
601 		__clear_retrans_timer(chan);
602 		__clear_monitor_timer(chan);
603 		__clear_ack_timer(chan);
604 
605 		skb_queue_purge(&chan->srej_q);
606 
607 		l2cap_seq_list_free(&chan->srej_list);
608 		l2cap_seq_list_free(&chan->retrans_list);
609 
610 		/* fall through */
611 
612 	case L2CAP_MODE_STREAMING:
613 		skb_queue_purge(&chan->tx_q);
614 		break;
615 	}
616 
617 	return;
618 }
619 
620 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
621 {
622 	struct l2cap_conn *conn = chan->conn;
623 	struct sock *sk = chan->sk;
624 
625 	BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
626 	       sk);
627 
628 	switch (chan->state) {
629 	case BT_LISTEN:
630 		chan->ops->teardown(chan, 0);
631 		break;
632 
633 	case BT_CONNECTED:
634 	case BT_CONFIG:
635 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
636 		    conn->hcon->type == ACL_LINK) {
637 			__set_chan_timer(chan, sk->sk_sndtimeo);
638 			l2cap_send_disconn_req(chan, reason);
639 		} else
640 			l2cap_chan_del(chan, reason);
641 		break;
642 
643 	case BT_CONNECT2:
644 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
645 		    conn->hcon->type == ACL_LINK) {
646 			struct l2cap_conn_rsp rsp;
647 			__u16 result;
648 
649 			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
650 				result = L2CAP_CR_SEC_BLOCK;
651 			else
652 				result = L2CAP_CR_BAD_PSM;
653 			l2cap_state_change(chan, BT_DISCONN);
654 
655 			rsp.scid   = cpu_to_le16(chan->dcid);
656 			rsp.dcid   = cpu_to_le16(chan->scid);
657 			rsp.result = cpu_to_le16(result);
658 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
659 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
660 				       sizeof(rsp), &rsp);
661 		}
662 
663 		l2cap_chan_del(chan, reason);
664 		break;
665 
666 	case BT_CONNECT:
667 	case BT_DISCONN:
668 		l2cap_chan_del(chan, reason);
669 		break;
670 
671 	default:
672 		chan->ops->teardown(chan, 0);
673 		break;
674 	}
675 }
676 
677 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
678 {
679 	if (chan->chan_type == L2CAP_CHAN_RAW) {
680 		switch (chan->sec_level) {
681 		case BT_SECURITY_HIGH:
682 			return HCI_AT_DEDICATED_BONDING_MITM;
683 		case BT_SECURITY_MEDIUM:
684 			return HCI_AT_DEDICATED_BONDING;
685 		default:
686 			return HCI_AT_NO_BONDING;
687 		}
688 	} else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
689 		if (chan->sec_level == BT_SECURITY_LOW)
690 			chan->sec_level = BT_SECURITY_SDP;
691 
692 		if (chan->sec_level == BT_SECURITY_HIGH)
693 			return HCI_AT_NO_BONDING_MITM;
694 		else
695 			return HCI_AT_NO_BONDING;
696 	} else {
697 		switch (chan->sec_level) {
698 		case BT_SECURITY_HIGH:
699 			return HCI_AT_GENERAL_BONDING_MITM;
700 		case BT_SECURITY_MEDIUM:
701 			return HCI_AT_GENERAL_BONDING;
702 		default:
703 			return HCI_AT_NO_BONDING;
704 		}
705 	}
706 }
707 
708 /* Service level security */
709 int l2cap_chan_check_security(struct l2cap_chan *chan)
710 {
711 	struct l2cap_conn *conn = chan->conn;
712 	__u8 auth_type;
713 
714 	auth_type = l2cap_get_auth_type(chan);
715 
716 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
717 }
718 
719 static u8 l2cap_get_ident(struct l2cap_conn *conn)
720 {
721 	u8 id;
722 
723 	/* Get next available identificator.
724 	 *    1 - 128 are used by kernel.
725 	 *  129 - 199 are reserved.
726 	 *  200 - 254 are used by utilities like l2ping, etc.
727 	 */
728 
729 	spin_lock(&conn->lock);
730 
731 	if (++conn->tx_ident > 128)
732 		conn->tx_ident = 1;
733 
734 	id = conn->tx_ident;
735 
736 	spin_unlock(&conn->lock);
737 
738 	return id;
739 }
740 
741 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
742 			   void *data)
743 {
744 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
745 	u8 flags;
746 
747 	BT_DBG("code 0x%2.2x", code);
748 
749 	if (!skb)
750 		return;
751 
752 	if (lmp_no_flush_capable(conn->hcon->hdev))
753 		flags = ACL_START_NO_FLUSH;
754 	else
755 		flags = ACL_START;
756 
757 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
758 	skb->priority = HCI_PRIO_MAX;
759 
760 	hci_send_acl(conn->hchan, skb, flags);
761 }
762 
763 static bool __chan_is_moving(struct l2cap_chan *chan)
764 {
765 	return chan->move_state != L2CAP_MOVE_STABLE &&
766 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
767 }
768 
769 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
770 {
771 	struct hci_conn *hcon = chan->conn->hcon;
772 	u16 flags;
773 
774 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
775 	       skb->priority);
776 
777 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
778 		if (chan->hs_hchan)
779 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
780 		else
781 			kfree_skb(skb);
782 
783 		return;
784 	}
785 
786 	if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
787 	    lmp_no_flush_capable(hcon->hdev))
788 		flags = ACL_START_NO_FLUSH;
789 	else
790 		flags = ACL_START;
791 
792 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
793 	hci_send_acl(chan->conn->hchan, skb, flags);
794 }
795 
796 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
797 {
798 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
799 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
800 
801 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
802 		/* S-Frame */
803 		control->sframe = 1;
804 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
805 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
806 
807 		control->sar = 0;
808 		control->txseq = 0;
809 	} else {
810 		/* I-Frame */
811 		control->sframe = 0;
812 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
813 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
814 
815 		control->poll = 0;
816 		control->super = 0;
817 	}
818 }
819 
820 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
821 {
822 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
823 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
824 
825 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
826 		/* S-Frame */
827 		control->sframe = 1;
828 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
829 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
830 
831 		control->sar = 0;
832 		control->txseq = 0;
833 	} else {
834 		/* I-Frame */
835 		control->sframe = 0;
836 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
837 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
838 
839 		control->poll = 0;
840 		control->super = 0;
841 	}
842 }
843 
844 static inline void __unpack_control(struct l2cap_chan *chan,
845 				    struct sk_buff *skb)
846 {
847 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
848 		__unpack_extended_control(get_unaligned_le32(skb->data),
849 					  &bt_cb(skb)->control);
850 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
851 	} else {
852 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
853 					  &bt_cb(skb)->control);
854 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
855 	}
856 }
857 
858 static u32 __pack_extended_control(struct l2cap_ctrl *control)
859 {
860 	u32 packed;
861 
862 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
863 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
864 
865 	if (control->sframe) {
866 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
867 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
868 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
869 	} else {
870 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
871 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
872 	}
873 
874 	return packed;
875 }
876 
877 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
878 {
879 	u16 packed;
880 
881 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
882 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
883 
884 	if (control->sframe) {
885 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
886 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
887 		packed |= L2CAP_CTRL_FRAME_TYPE;
888 	} else {
889 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
890 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
891 	}
892 
893 	return packed;
894 }
895 
896 static inline void __pack_control(struct l2cap_chan *chan,
897 				  struct l2cap_ctrl *control,
898 				  struct sk_buff *skb)
899 {
900 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
901 		put_unaligned_le32(__pack_extended_control(control),
902 				   skb->data + L2CAP_HDR_SIZE);
903 	} else {
904 		put_unaligned_le16(__pack_enhanced_control(control),
905 				   skb->data + L2CAP_HDR_SIZE);
906 	}
907 }
908 
909 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
910 {
911 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
912 		return L2CAP_EXT_HDR_SIZE;
913 	else
914 		return L2CAP_ENH_HDR_SIZE;
915 }
916 
917 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
918 					       u32 control)
919 {
920 	struct sk_buff *skb;
921 	struct l2cap_hdr *lh;
922 	int hlen = __ertm_hdr_size(chan);
923 
924 	if (chan->fcs == L2CAP_FCS_CRC16)
925 		hlen += L2CAP_FCS_SIZE;
926 
927 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
928 
929 	if (!skb)
930 		return ERR_PTR(-ENOMEM);
931 
932 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
933 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
934 	lh->cid = cpu_to_le16(chan->dcid);
935 
936 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
937 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
938 	else
939 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
940 
941 	if (chan->fcs == L2CAP_FCS_CRC16) {
942 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
943 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
944 	}
945 
946 	skb->priority = HCI_PRIO_MAX;
947 	return skb;
948 }
949 
950 static void l2cap_send_sframe(struct l2cap_chan *chan,
951 			      struct l2cap_ctrl *control)
952 {
953 	struct sk_buff *skb;
954 	u32 control_field;
955 
956 	BT_DBG("chan %p, control %p", chan, control);
957 
958 	if (!control->sframe)
959 		return;
960 
961 	if (__chan_is_moving(chan))
962 		return;
963 
964 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
965 	    !control->poll)
966 		control->final = 1;
967 
968 	if (control->super == L2CAP_SUPER_RR)
969 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
970 	else if (control->super == L2CAP_SUPER_RNR)
971 		set_bit(CONN_RNR_SENT, &chan->conn_state);
972 
973 	if (control->super != L2CAP_SUPER_SREJ) {
974 		chan->last_acked_seq = control->reqseq;
975 		__clear_ack_timer(chan);
976 	}
977 
978 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
979 	       control->final, control->poll, control->super);
980 
981 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
982 		control_field = __pack_extended_control(control);
983 	else
984 		control_field = __pack_enhanced_control(control);
985 
986 	skb = l2cap_create_sframe_pdu(chan, control_field);
987 	if (!IS_ERR(skb))
988 		l2cap_do_send(chan, skb);
989 }
990 
991 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
992 {
993 	struct l2cap_ctrl control;
994 
995 	BT_DBG("chan %p, poll %d", chan, poll);
996 
997 	memset(&control, 0, sizeof(control));
998 	control.sframe = 1;
999 	control.poll = poll;
1000 
1001 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1002 		control.super = L2CAP_SUPER_RNR;
1003 	else
1004 		control.super = L2CAP_SUPER_RR;
1005 
1006 	control.reqseq = chan->buffer_seq;
1007 	l2cap_send_sframe(chan, &control);
1008 }
1009 
1010 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1011 {
1012 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1013 }
1014 
1015 static bool __amp_capable(struct l2cap_chan *chan)
1016 {
1017 	struct l2cap_conn *conn = chan->conn;
1018 
1019 	if (enable_hs &&
1020 	    hci_amp_capable() &&
1021 	    chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1022 	    conn->fixed_chan_mask & L2CAP_FC_A2MP)
1023 		return true;
1024 	else
1025 		return false;
1026 }
1027 
1028 static bool l2cap_check_efs(struct l2cap_chan *chan)
1029 {
1030 	/* Check EFS parameters */
1031 	return true;
1032 }
1033 
1034 void l2cap_send_conn_req(struct l2cap_chan *chan)
1035 {
1036 	struct l2cap_conn *conn = chan->conn;
1037 	struct l2cap_conn_req req;
1038 
1039 	req.scid = cpu_to_le16(chan->scid);
1040 	req.psm  = chan->psm;
1041 
1042 	chan->ident = l2cap_get_ident(conn);
1043 
1044 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1045 
1046 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1047 }
1048 
1049 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1050 {
1051 	struct l2cap_create_chan_req req;
1052 	req.scid = cpu_to_le16(chan->scid);
1053 	req.psm  = chan->psm;
1054 	req.amp_id = amp_id;
1055 
1056 	chan->ident = l2cap_get_ident(chan->conn);
1057 
1058 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1059 		       sizeof(req), &req);
1060 }
1061 
1062 static void l2cap_move_setup(struct l2cap_chan *chan)
1063 {
1064 	struct sk_buff *skb;
1065 
1066 	BT_DBG("chan %p", chan);
1067 
1068 	if (chan->mode != L2CAP_MODE_ERTM)
1069 		return;
1070 
1071 	__clear_retrans_timer(chan);
1072 	__clear_monitor_timer(chan);
1073 	__clear_ack_timer(chan);
1074 
1075 	chan->retry_count = 0;
1076 	skb_queue_walk(&chan->tx_q, skb) {
1077 		if (bt_cb(skb)->control.retries)
1078 			bt_cb(skb)->control.retries = 1;
1079 		else
1080 			break;
1081 	}
1082 
1083 	chan->expected_tx_seq = chan->buffer_seq;
1084 
1085 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1086 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1087 	l2cap_seq_list_clear(&chan->retrans_list);
1088 	l2cap_seq_list_clear(&chan->srej_list);
1089 	skb_queue_purge(&chan->srej_q);
1090 
1091 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1092 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1093 
1094 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1095 }
1096 
1097 static void l2cap_move_done(struct l2cap_chan *chan)
1098 {
1099 	u8 move_role = chan->move_role;
1100 	BT_DBG("chan %p", chan);
1101 
1102 	chan->move_state = L2CAP_MOVE_STABLE;
1103 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1104 
1105 	if (chan->mode != L2CAP_MODE_ERTM)
1106 		return;
1107 
1108 	switch (move_role) {
1109 	case L2CAP_MOVE_ROLE_INITIATOR:
1110 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1111 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1112 		break;
1113 	case L2CAP_MOVE_ROLE_RESPONDER:
1114 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1115 		break;
1116 	}
1117 }
1118 
1119 static void l2cap_chan_ready(struct l2cap_chan *chan)
1120 {
1121 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1122 	chan->conf_state = 0;
1123 	__clear_chan_timer(chan);
1124 
1125 	chan->state = BT_CONNECTED;
1126 
1127 	chan->ops->ready(chan);
1128 }
1129 
1130 static void l2cap_start_connection(struct l2cap_chan *chan)
1131 {
1132 	if (__amp_capable(chan)) {
1133 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1134 		a2mp_discover_amp(chan);
1135 	} else {
1136 		l2cap_send_conn_req(chan);
1137 	}
1138 }
1139 
1140 static void l2cap_do_start(struct l2cap_chan *chan)
1141 {
1142 	struct l2cap_conn *conn = chan->conn;
1143 
1144 	if (conn->hcon->type == LE_LINK) {
1145 		l2cap_chan_ready(chan);
1146 		return;
1147 	}
1148 
1149 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1150 		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1151 			return;
1152 
1153 		if (l2cap_chan_check_security(chan) &&
1154 		    __l2cap_no_conn_pending(chan)) {
1155 			l2cap_start_connection(chan);
1156 		}
1157 	} else {
1158 		struct l2cap_info_req req;
1159 		req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1160 
1161 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1162 		conn->info_ident = l2cap_get_ident(conn);
1163 
1164 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1165 
1166 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1167 			       sizeof(req), &req);
1168 	}
1169 }
1170 
1171 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1172 {
1173 	u32 local_feat_mask = l2cap_feat_mask;
1174 	if (!disable_ertm)
1175 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1176 
1177 	switch (mode) {
1178 	case L2CAP_MODE_ERTM:
1179 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1180 	case L2CAP_MODE_STREAMING:
1181 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1182 	default:
1183 		return 0x00;
1184 	}
1185 }
1186 
1187 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1188 {
1189 	struct sock *sk = chan->sk;
1190 	struct l2cap_conn *conn = chan->conn;
1191 	struct l2cap_disconn_req req;
1192 
1193 	if (!conn)
1194 		return;
1195 
1196 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1197 		__clear_retrans_timer(chan);
1198 		__clear_monitor_timer(chan);
1199 		__clear_ack_timer(chan);
1200 	}
1201 
1202 	if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1203 		l2cap_state_change(chan, BT_DISCONN);
1204 		return;
1205 	}
1206 
1207 	req.dcid = cpu_to_le16(chan->dcid);
1208 	req.scid = cpu_to_le16(chan->scid);
1209 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1210 		       sizeof(req), &req);
1211 
1212 	lock_sock(sk);
1213 	__l2cap_state_change(chan, BT_DISCONN);
1214 	__l2cap_chan_set_err(chan, err);
1215 	release_sock(sk);
1216 }
1217 
1218 /* ---- L2CAP connections ---- */
1219 static void l2cap_conn_start(struct l2cap_conn *conn)
1220 {
1221 	struct l2cap_chan *chan, *tmp;
1222 
1223 	BT_DBG("conn %p", conn);
1224 
1225 	mutex_lock(&conn->chan_lock);
1226 
1227 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1228 		struct sock *sk = chan->sk;
1229 
1230 		l2cap_chan_lock(chan);
1231 
1232 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1233 			l2cap_chan_unlock(chan);
1234 			continue;
1235 		}
1236 
1237 		if (chan->state == BT_CONNECT) {
1238 			if (!l2cap_chan_check_security(chan) ||
1239 			    !__l2cap_no_conn_pending(chan)) {
1240 				l2cap_chan_unlock(chan);
1241 				continue;
1242 			}
1243 
1244 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1245 			    && test_bit(CONF_STATE2_DEVICE,
1246 					&chan->conf_state)) {
1247 				l2cap_chan_close(chan, ECONNRESET);
1248 				l2cap_chan_unlock(chan);
1249 				continue;
1250 			}
1251 
1252 			l2cap_start_connection(chan);
1253 
1254 		} else if (chan->state == BT_CONNECT2) {
1255 			struct l2cap_conn_rsp rsp;
1256 			char buf[128];
1257 			rsp.scid = cpu_to_le16(chan->dcid);
1258 			rsp.dcid = cpu_to_le16(chan->scid);
1259 
1260 			if (l2cap_chan_check_security(chan)) {
1261 				lock_sock(sk);
1262 				if (test_bit(BT_SK_DEFER_SETUP,
1263 					     &bt_sk(sk)->flags)) {
1264 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1265 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1266 					chan->ops->defer(chan);
1267 
1268 				} else {
1269 					__l2cap_state_change(chan, BT_CONFIG);
1270 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1271 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1272 				}
1273 				release_sock(sk);
1274 			} else {
1275 				rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1276 				rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1277 			}
1278 
1279 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1280 				       sizeof(rsp), &rsp);
1281 
1282 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1283 			    rsp.result != L2CAP_CR_SUCCESS) {
1284 				l2cap_chan_unlock(chan);
1285 				continue;
1286 			}
1287 
1288 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1289 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1290 				       l2cap_build_conf_req(chan, buf), buf);
1291 			chan->num_conf_req++;
1292 		}
1293 
1294 		l2cap_chan_unlock(chan);
1295 	}
1296 
1297 	mutex_unlock(&conn->chan_lock);
1298 }
1299 
1300 /* Find socket with cid and source/destination bdaddr.
1301  * Returns closest match, locked.
1302  */
1303 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1304 						    bdaddr_t *src,
1305 						    bdaddr_t *dst)
1306 {
1307 	struct l2cap_chan *c, *c1 = NULL;
1308 
1309 	read_lock(&chan_list_lock);
1310 
1311 	list_for_each_entry(c, &chan_list, global_l) {
1312 		struct sock *sk = c->sk;
1313 
1314 		if (state && c->state != state)
1315 			continue;
1316 
1317 		if (c->scid == cid) {
1318 			int src_match, dst_match;
1319 			int src_any, dst_any;
1320 
1321 			/* Exact match. */
1322 			src_match = !bacmp(&bt_sk(sk)->src, src);
1323 			dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1324 			if (src_match && dst_match) {
1325 				read_unlock(&chan_list_lock);
1326 				return c;
1327 			}
1328 
1329 			/* Closest match */
1330 			src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1331 			dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1332 			if ((src_match && dst_any) || (src_any && dst_match) ||
1333 			    (src_any && dst_any))
1334 				c1 = c;
1335 		}
1336 	}
1337 
1338 	read_unlock(&chan_list_lock);
1339 
1340 	return c1;
1341 }
1342 
1343 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1344 {
1345 	struct sock *parent;
1346 	struct l2cap_chan *chan, *pchan;
1347 
1348 	BT_DBG("");
1349 
1350 	/* Check if we have socket listening on cid */
1351 	pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1352 					  conn->src, conn->dst);
1353 	if (!pchan)
1354 		return;
1355 
1356 	/* Client ATT sockets should override the server one */
1357 	if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1358 		return;
1359 
1360 	parent = pchan->sk;
1361 
1362 	lock_sock(parent);
1363 
1364 	chan = pchan->ops->new_connection(pchan);
1365 	if (!chan)
1366 		goto clean;
1367 
1368 	chan->dcid = L2CAP_CID_ATT;
1369 
1370 	bacpy(&bt_sk(chan->sk)->src, conn->src);
1371 	bacpy(&bt_sk(chan->sk)->dst, conn->dst);
1372 
1373 	__l2cap_chan_add(conn, chan);
1374 
1375 clean:
1376 	release_sock(parent);
1377 }
1378 
1379 static void l2cap_conn_ready(struct l2cap_conn *conn)
1380 {
1381 	struct l2cap_chan *chan;
1382 	struct hci_conn *hcon = conn->hcon;
1383 
1384 	BT_DBG("conn %p", conn);
1385 
1386 	/* For outgoing pairing which doesn't necessarily have an
1387 	 * associated socket (e.g. mgmt_pair_device).
1388 	 */
1389 	if (hcon->out && hcon->type == LE_LINK)
1390 		smp_conn_security(hcon, hcon->pending_sec_level);
1391 
1392 	mutex_lock(&conn->chan_lock);
1393 
1394 	if (hcon->type == LE_LINK)
1395 		l2cap_le_conn_ready(conn);
1396 
1397 	list_for_each_entry(chan, &conn->chan_l, list) {
1398 
1399 		l2cap_chan_lock(chan);
1400 
1401 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1402 			l2cap_chan_unlock(chan);
1403 			continue;
1404 		}
1405 
1406 		if (hcon->type == LE_LINK) {
1407 			if (smp_conn_security(hcon, chan->sec_level))
1408 				l2cap_chan_ready(chan);
1409 
1410 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1411 			struct sock *sk = chan->sk;
1412 			__clear_chan_timer(chan);
1413 			lock_sock(sk);
1414 			__l2cap_state_change(chan, BT_CONNECTED);
1415 			sk->sk_state_change(sk);
1416 			release_sock(sk);
1417 
1418 		} else if (chan->state == BT_CONNECT) {
1419 			l2cap_do_start(chan);
1420 		}
1421 
1422 		l2cap_chan_unlock(chan);
1423 	}
1424 
1425 	mutex_unlock(&conn->chan_lock);
1426 }
1427 
1428 /* Notify sockets that we cannot guaranty reliability anymore */
1429 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1430 {
1431 	struct l2cap_chan *chan;
1432 
1433 	BT_DBG("conn %p", conn);
1434 
1435 	mutex_lock(&conn->chan_lock);
1436 
1437 	list_for_each_entry(chan, &conn->chan_l, list) {
1438 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1439 			l2cap_chan_set_err(chan, err);
1440 	}
1441 
1442 	mutex_unlock(&conn->chan_lock);
1443 }
1444 
1445 static void l2cap_info_timeout(struct work_struct *work)
1446 {
1447 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1448 					       info_timer.work);
1449 
1450 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1451 	conn->info_ident = 0;
1452 
1453 	l2cap_conn_start(conn);
1454 }
1455 
1456 /*
1457  * l2cap_user
1458  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1459  * callback is called during registration. The ->remove callback is called
1460  * during unregistration.
1461  * An l2cap_user object can either be explicitly unregistered or when the
1462  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1463  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1464  * External modules must own a reference to the l2cap_conn object if they intend
1465  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1466  * any time if they don't.
1467  */
1468 
1469 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1470 {
1471 	struct hci_dev *hdev = conn->hcon->hdev;
1472 	int ret;
1473 
1474 	/* We need to check whether l2cap_conn is registered. If it is not, we
1475 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1476 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1477 	 * relies on the parent hci_conn object to be locked. This itself relies
1478 	 * on the hci_dev object to be locked. So we must lock the hci device
1479 	 * here, too. */
1480 
1481 	hci_dev_lock(hdev);
1482 
1483 	if (user->list.next || user->list.prev) {
1484 		ret = -EINVAL;
1485 		goto out_unlock;
1486 	}
1487 
1488 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1489 	if (!conn->hchan) {
1490 		ret = -ENODEV;
1491 		goto out_unlock;
1492 	}
1493 
1494 	ret = user->probe(conn, user);
1495 	if (ret)
1496 		goto out_unlock;
1497 
1498 	list_add(&user->list, &conn->users);
1499 	ret = 0;
1500 
1501 out_unlock:
1502 	hci_dev_unlock(hdev);
1503 	return ret;
1504 }
1505 EXPORT_SYMBOL(l2cap_register_user);
1506 
1507 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1508 {
1509 	struct hci_dev *hdev = conn->hcon->hdev;
1510 
1511 	hci_dev_lock(hdev);
1512 
1513 	if (!user->list.next || !user->list.prev)
1514 		goto out_unlock;
1515 
1516 	list_del(&user->list);
1517 	user->list.next = NULL;
1518 	user->list.prev = NULL;
1519 	user->remove(conn, user);
1520 
1521 out_unlock:
1522 	hci_dev_unlock(hdev);
1523 }
1524 EXPORT_SYMBOL(l2cap_unregister_user);
1525 
1526 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1527 {
1528 	struct l2cap_user *user;
1529 
1530 	while (!list_empty(&conn->users)) {
1531 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1532 		list_del(&user->list);
1533 		user->list.next = NULL;
1534 		user->list.prev = NULL;
1535 		user->remove(conn, user);
1536 	}
1537 }
1538 
1539 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1540 {
1541 	struct l2cap_conn *conn = hcon->l2cap_data;
1542 	struct l2cap_chan *chan, *l;
1543 
1544 	if (!conn)
1545 		return;
1546 
1547 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1548 
1549 	kfree_skb(conn->rx_skb);
1550 
1551 	l2cap_unregister_all_users(conn);
1552 
1553 	mutex_lock(&conn->chan_lock);
1554 
1555 	/* Kill channels */
1556 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1557 		l2cap_chan_hold(chan);
1558 		l2cap_chan_lock(chan);
1559 
1560 		l2cap_chan_del(chan, err);
1561 
1562 		l2cap_chan_unlock(chan);
1563 
1564 		chan->ops->close(chan);
1565 		l2cap_chan_put(chan);
1566 	}
1567 
1568 	mutex_unlock(&conn->chan_lock);
1569 
1570 	hci_chan_del(conn->hchan);
1571 
1572 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1573 		cancel_delayed_work_sync(&conn->info_timer);
1574 
1575 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1576 		cancel_delayed_work_sync(&conn->security_timer);
1577 		smp_chan_destroy(conn);
1578 	}
1579 
1580 	hcon->l2cap_data = NULL;
1581 	conn->hchan = NULL;
1582 	l2cap_conn_put(conn);
1583 }
1584 
1585 static void security_timeout(struct work_struct *work)
1586 {
1587 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1588 					       security_timer.work);
1589 
1590 	BT_DBG("conn %p", conn);
1591 
1592 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1593 		smp_chan_destroy(conn);
1594 		l2cap_conn_del(conn->hcon, ETIMEDOUT);
1595 	}
1596 }
1597 
1598 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1599 {
1600 	struct l2cap_conn *conn = hcon->l2cap_data;
1601 	struct hci_chan *hchan;
1602 
1603 	if (conn)
1604 		return conn;
1605 
1606 	hchan = hci_chan_create(hcon);
1607 	if (!hchan)
1608 		return NULL;
1609 
1610 	conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1611 	if (!conn) {
1612 		hci_chan_del(hchan);
1613 		return NULL;
1614 	}
1615 
1616 	kref_init(&conn->ref);
1617 	hcon->l2cap_data = conn;
1618 	conn->hcon = hcon;
1619 	hci_conn_get(conn->hcon);
1620 	conn->hchan = hchan;
1621 
1622 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1623 
1624 	switch (hcon->type) {
1625 	case LE_LINK:
1626 		if (hcon->hdev->le_mtu) {
1627 			conn->mtu = hcon->hdev->le_mtu;
1628 			break;
1629 		}
1630 		/* fall through */
1631 	default:
1632 		conn->mtu = hcon->hdev->acl_mtu;
1633 		break;
1634 	}
1635 
1636 	conn->src = &hcon->hdev->bdaddr;
1637 	conn->dst = &hcon->dst;
1638 
1639 	conn->feat_mask = 0;
1640 
1641 	spin_lock_init(&conn->lock);
1642 	mutex_init(&conn->chan_lock);
1643 
1644 	INIT_LIST_HEAD(&conn->chan_l);
1645 	INIT_LIST_HEAD(&conn->users);
1646 
1647 	if (hcon->type == LE_LINK)
1648 		INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1649 	else
1650 		INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1651 
1652 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1653 
1654 	return conn;
1655 }
1656 
1657 static void l2cap_conn_free(struct kref *ref)
1658 {
1659 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1660 
1661 	hci_conn_put(conn->hcon);
1662 	kfree(conn);
1663 }
1664 
1665 void l2cap_conn_get(struct l2cap_conn *conn)
1666 {
1667 	kref_get(&conn->ref);
1668 }
1669 EXPORT_SYMBOL(l2cap_conn_get);
1670 
1671 void l2cap_conn_put(struct l2cap_conn *conn)
1672 {
1673 	kref_put(&conn->ref, l2cap_conn_free);
1674 }
1675 EXPORT_SYMBOL(l2cap_conn_put);
1676 
1677 /* ---- Socket interface ---- */
1678 
1679 /* Find socket with psm and source / destination bdaddr.
1680  * Returns closest match.
1681  */
1682 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1683 						   bdaddr_t *src,
1684 						   bdaddr_t *dst)
1685 {
1686 	struct l2cap_chan *c, *c1 = NULL;
1687 
1688 	read_lock(&chan_list_lock);
1689 
1690 	list_for_each_entry(c, &chan_list, global_l) {
1691 		struct sock *sk = c->sk;
1692 
1693 		if (state && c->state != state)
1694 			continue;
1695 
1696 		if (c->psm == psm) {
1697 			int src_match, dst_match;
1698 			int src_any, dst_any;
1699 
1700 			/* Exact match. */
1701 			src_match = !bacmp(&bt_sk(sk)->src, src);
1702 			dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1703 			if (src_match && dst_match) {
1704 				read_unlock(&chan_list_lock);
1705 				return c;
1706 			}
1707 
1708 			/* Closest match */
1709 			src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1710 			dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1711 			if ((src_match && dst_any) || (src_any && dst_match) ||
1712 			    (src_any && dst_any))
1713 				c1 = c;
1714 		}
1715 	}
1716 
1717 	read_unlock(&chan_list_lock);
1718 
1719 	return c1;
1720 }
1721 
1722 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1723 		       bdaddr_t *dst, u8 dst_type)
1724 {
1725 	struct sock *sk = chan->sk;
1726 	bdaddr_t *src = &bt_sk(sk)->src;
1727 	struct l2cap_conn *conn;
1728 	struct hci_conn *hcon;
1729 	struct hci_dev *hdev;
1730 	__u8 auth_type;
1731 	int err;
1732 
1733 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1734 	       dst_type, __le16_to_cpu(psm));
1735 
1736 	hdev = hci_get_route(dst, src);
1737 	if (!hdev)
1738 		return -EHOSTUNREACH;
1739 
1740 	hci_dev_lock(hdev);
1741 
1742 	l2cap_chan_lock(chan);
1743 
1744 	/* PSM must be odd and lsb of upper byte must be 0 */
1745 	if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1746 	    chan->chan_type != L2CAP_CHAN_RAW) {
1747 		err = -EINVAL;
1748 		goto done;
1749 	}
1750 
1751 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1752 		err = -EINVAL;
1753 		goto done;
1754 	}
1755 
1756 	switch (chan->mode) {
1757 	case L2CAP_MODE_BASIC:
1758 		break;
1759 	case L2CAP_MODE_ERTM:
1760 	case L2CAP_MODE_STREAMING:
1761 		if (!disable_ertm)
1762 			break;
1763 		/* fall through */
1764 	default:
1765 		err = -ENOTSUPP;
1766 		goto done;
1767 	}
1768 
1769 	switch (chan->state) {
1770 	case BT_CONNECT:
1771 	case BT_CONNECT2:
1772 	case BT_CONFIG:
1773 		/* Already connecting */
1774 		err = 0;
1775 		goto done;
1776 
1777 	case BT_CONNECTED:
1778 		/* Already connected */
1779 		err = -EISCONN;
1780 		goto done;
1781 
1782 	case BT_OPEN:
1783 	case BT_BOUND:
1784 		/* Can connect */
1785 		break;
1786 
1787 	default:
1788 		err = -EBADFD;
1789 		goto done;
1790 	}
1791 
1792 	/* Set destination address and psm */
1793 	lock_sock(sk);
1794 	bacpy(&bt_sk(sk)->dst, dst);
1795 	release_sock(sk);
1796 
1797 	chan->psm = psm;
1798 	chan->dcid = cid;
1799 
1800 	auth_type = l2cap_get_auth_type(chan);
1801 
1802 	if (bdaddr_type_is_le(dst_type))
1803 		hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1804 				   chan->sec_level, auth_type);
1805 	else
1806 		hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1807 				   chan->sec_level, auth_type);
1808 
1809 	if (IS_ERR(hcon)) {
1810 		err = PTR_ERR(hcon);
1811 		goto done;
1812 	}
1813 
1814 	conn = l2cap_conn_add(hcon);
1815 	if (!conn) {
1816 		hci_conn_drop(hcon);
1817 		err = -ENOMEM;
1818 		goto done;
1819 	}
1820 
1821 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1822 		hci_conn_drop(hcon);
1823 		err = -EBUSY;
1824 		goto done;
1825 	}
1826 
1827 	/* Update source addr of the socket */
1828 	bacpy(src, conn->src);
1829 
1830 	l2cap_chan_unlock(chan);
1831 	l2cap_chan_add(conn, chan);
1832 	l2cap_chan_lock(chan);
1833 
1834 	/* l2cap_chan_add takes its own ref so we can drop this one */
1835 	hci_conn_drop(hcon);
1836 
1837 	l2cap_state_change(chan, BT_CONNECT);
1838 	__set_chan_timer(chan, sk->sk_sndtimeo);
1839 
1840 	if (hcon->state == BT_CONNECTED) {
1841 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1842 			__clear_chan_timer(chan);
1843 			if (l2cap_chan_check_security(chan))
1844 				l2cap_state_change(chan, BT_CONNECTED);
1845 		} else
1846 			l2cap_do_start(chan);
1847 	}
1848 
1849 	err = 0;
1850 
1851 done:
1852 	l2cap_chan_unlock(chan);
1853 	hci_dev_unlock(hdev);
1854 	hci_dev_put(hdev);
1855 	return err;
1856 }
1857 
1858 int __l2cap_wait_ack(struct sock *sk)
1859 {
1860 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1861 	DECLARE_WAITQUEUE(wait, current);
1862 	int err = 0;
1863 	int timeo = HZ/5;
1864 
1865 	add_wait_queue(sk_sleep(sk), &wait);
1866 	set_current_state(TASK_INTERRUPTIBLE);
1867 	while (chan->unacked_frames > 0 && chan->conn) {
1868 		if (!timeo)
1869 			timeo = HZ/5;
1870 
1871 		if (signal_pending(current)) {
1872 			err = sock_intr_errno(timeo);
1873 			break;
1874 		}
1875 
1876 		release_sock(sk);
1877 		timeo = schedule_timeout(timeo);
1878 		lock_sock(sk);
1879 		set_current_state(TASK_INTERRUPTIBLE);
1880 
1881 		err = sock_error(sk);
1882 		if (err)
1883 			break;
1884 	}
1885 	set_current_state(TASK_RUNNING);
1886 	remove_wait_queue(sk_sleep(sk), &wait);
1887 	return err;
1888 }
1889 
1890 static void l2cap_monitor_timeout(struct work_struct *work)
1891 {
1892 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1893 					       monitor_timer.work);
1894 
1895 	BT_DBG("chan %p", chan);
1896 
1897 	l2cap_chan_lock(chan);
1898 
1899 	if (!chan->conn) {
1900 		l2cap_chan_unlock(chan);
1901 		l2cap_chan_put(chan);
1902 		return;
1903 	}
1904 
1905 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1906 
1907 	l2cap_chan_unlock(chan);
1908 	l2cap_chan_put(chan);
1909 }
1910 
1911 static void l2cap_retrans_timeout(struct work_struct *work)
1912 {
1913 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1914 					       retrans_timer.work);
1915 
1916 	BT_DBG("chan %p", chan);
1917 
1918 	l2cap_chan_lock(chan);
1919 
1920 	if (!chan->conn) {
1921 		l2cap_chan_unlock(chan);
1922 		l2cap_chan_put(chan);
1923 		return;
1924 	}
1925 
1926 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1927 	l2cap_chan_unlock(chan);
1928 	l2cap_chan_put(chan);
1929 }
1930 
1931 static void l2cap_streaming_send(struct l2cap_chan *chan,
1932 				 struct sk_buff_head *skbs)
1933 {
1934 	struct sk_buff *skb;
1935 	struct l2cap_ctrl *control;
1936 
1937 	BT_DBG("chan %p, skbs %p", chan, skbs);
1938 
1939 	if (__chan_is_moving(chan))
1940 		return;
1941 
1942 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1943 
1944 	while (!skb_queue_empty(&chan->tx_q)) {
1945 
1946 		skb = skb_dequeue(&chan->tx_q);
1947 
1948 		bt_cb(skb)->control.retries = 1;
1949 		control = &bt_cb(skb)->control;
1950 
1951 		control->reqseq = 0;
1952 		control->txseq = chan->next_tx_seq;
1953 
1954 		__pack_control(chan, control, skb);
1955 
1956 		if (chan->fcs == L2CAP_FCS_CRC16) {
1957 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1958 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1959 		}
1960 
1961 		l2cap_do_send(chan, skb);
1962 
1963 		BT_DBG("Sent txseq %u", control->txseq);
1964 
1965 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1966 		chan->frames_sent++;
1967 	}
1968 }
1969 
1970 static int l2cap_ertm_send(struct l2cap_chan *chan)
1971 {
1972 	struct sk_buff *skb, *tx_skb;
1973 	struct l2cap_ctrl *control;
1974 	int sent = 0;
1975 
1976 	BT_DBG("chan %p", chan);
1977 
1978 	if (chan->state != BT_CONNECTED)
1979 		return -ENOTCONN;
1980 
1981 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1982 		return 0;
1983 
1984 	if (__chan_is_moving(chan))
1985 		return 0;
1986 
1987 	while (chan->tx_send_head &&
1988 	       chan->unacked_frames < chan->remote_tx_win &&
1989 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1990 
1991 		skb = chan->tx_send_head;
1992 
1993 		bt_cb(skb)->control.retries = 1;
1994 		control = &bt_cb(skb)->control;
1995 
1996 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1997 			control->final = 1;
1998 
1999 		control->reqseq = chan->buffer_seq;
2000 		chan->last_acked_seq = chan->buffer_seq;
2001 		control->txseq = chan->next_tx_seq;
2002 
2003 		__pack_control(chan, control, skb);
2004 
2005 		if (chan->fcs == L2CAP_FCS_CRC16) {
2006 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2007 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2008 		}
2009 
2010 		/* Clone after data has been modified. Data is assumed to be
2011 		   read-only (for locking purposes) on cloned sk_buffs.
2012 		 */
2013 		tx_skb = skb_clone(skb, GFP_KERNEL);
2014 
2015 		if (!tx_skb)
2016 			break;
2017 
2018 		__set_retrans_timer(chan);
2019 
2020 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2021 		chan->unacked_frames++;
2022 		chan->frames_sent++;
2023 		sent++;
2024 
2025 		if (skb_queue_is_last(&chan->tx_q, skb))
2026 			chan->tx_send_head = NULL;
2027 		else
2028 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2029 
2030 		l2cap_do_send(chan, tx_skb);
2031 		BT_DBG("Sent txseq %u", control->txseq);
2032 	}
2033 
2034 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2035 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2036 
2037 	return sent;
2038 }
2039 
2040 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2041 {
2042 	struct l2cap_ctrl control;
2043 	struct sk_buff *skb;
2044 	struct sk_buff *tx_skb;
2045 	u16 seq;
2046 
2047 	BT_DBG("chan %p", chan);
2048 
2049 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2050 		return;
2051 
2052 	if (__chan_is_moving(chan))
2053 		return;
2054 
2055 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2056 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2057 
2058 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2059 		if (!skb) {
2060 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2061 			       seq);
2062 			continue;
2063 		}
2064 
2065 		bt_cb(skb)->control.retries++;
2066 		control = bt_cb(skb)->control;
2067 
2068 		if (chan->max_tx != 0 &&
2069 		    bt_cb(skb)->control.retries > chan->max_tx) {
2070 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2071 			l2cap_send_disconn_req(chan, ECONNRESET);
2072 			l2cap_seq_list_clear(&chan->retrans_list);
2073 			break;
2074 		}
2075 
2076 		control.reqseq = chan->buffer_seq;
2077 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2078 			control.final = 1;
2079 		else
2080 			control.final = 0;
2081 
2082 		if (skb_cloned(skb)) {
2083 			/* Cloned sk_buffs are read-only, so we need a
2084 			 * writeable copy
2085 			 */
2086 			tx_skb = skb_copy(skb, GFP_KERNEL);
2087 		} else {
2088 			tx_skb = skb_clone(skb, GFP_KERNEL);
2089 		}
2090 
2091 		if (!tx_skb) {
2092 			l2cap_seq_list_clear(&chan->retrans_list);
2093 			break;
2094 		}
2095 
2096 		/* Update skb contents */
2097 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2098 			put_unaligned_le32(__pack_extended_control(&control),
2099 					   tx_skb->data + L2CAP_HDR_SIZE);
2100 		} else {
2101 			put_unaligned_le16(__pack_enhanced_control(&control),
2102 					   tx_skb->data + L2CAP_HDR_SIZE);
2103 		}
2104 
2105 		if (chan->fcs == L2CAP_FCS_CRC16) {
2106 			u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2107 			put_unaligned_le16(fcs, skb_put(tx_skb,
2108 							L2CAP_FCS_SIZE));
2109 		}
2110 
2111 		l2cap_do_send(chan, tx_skb);
2112 
2113 		BT_DBG("Resent txseq %d", control.txseq);
2114 
2115 		chan->last_acked_seq = chan->buffer_seq;
2116 	}
2117 }
2118 
2119 static void l2cap_retransmit(struct l2cap_chan *chan,
2120 			     struct l2cap_ctrl *control)
2121 {
2122 	BT_DBG("chan %p, control %p", chan, control);
2123 
2124 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2125 	l2cap_ertm_resend(chan);
2126 }
2127 
2128 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2129 				 struct l2cap_ctrl *control)
2130 {
2131 	struct sk_buff *skb;
2132 
2133 	BT_DBG("chan %p, control %p", chan, control);
2134 
2135 	if (control->poll)
2136 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2137 
2138 	l2cap_seq_list_clear(&chan->retrans_list);
2139 
2140 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2141 		return;
2142 
2143 	if (chan->unacked_frames) {
2144 		skb_queue_walk(&chan->tx_q, skb) {
2145 			if (bt_cb(skb)->control.txseq == control->reqseq ||
2146 			    skb == chan->tx_send_head)
2147 				break;
2148 		}
2149 
2150 		skb_queue_walk_from(&chan->tx_q, skb) {
2151 			if (skb == chan->tx_send_head)
2152 				break;
2153 
2154 			l2cap_seq_list_append(&chan->retrans_list,
2155 					      bt_cb(skb)->control.txseq);
2156 		}
2157 
2158 		l2cap_ertm_resend(chan);
2159 	}
2160 }
2161 
2162 static void l2cap_send_ack(struct l2cap_chan *chan)
2163 {
2164 	struct l2cap_ctrl control;
2165 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2166 					 chan->last_acked_seq);
2167 	int threshold;
2168 
2169 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2170 	       chan, chan->last_acked_seq, chan->buffer_seq);
2171 
2172 	memset(&control, 0, sizeof(control));
2173 	control.sframe = 1;
2174 
2175 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2176 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2177 		__clear_ack_timer(chan);
2178 		control.super = L2CAP_SUPER_RNR;
2179 		control.reqseq = chan->buffer_seq;
2180 		l2cap_send_sframe(chan, &control);
2181 	} else {
2182 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2183 			l2cap_ertm_send(chan);
2184 			/* If any i-frames were sent, they included an ack */
2185 			if (chan->buffer_seq == chan->last_acked_seq)
2186 				frames_to_ack = 0;
2187 		}
2188 
2189 		/* Ack now if the window is 3/4ths full.
2190 		 * Calculate without mul or div
2191 		 */
2192 		threshold = chan->ack_win;
2193 		threshold += threshold << 1;
2194 		threshold >>= 2;
2195 
2196 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2197 		       threshold);
2198 
2199 		if (frames_to_ack >= threshold) {
2200 			__clear_ack_timer(chan);
2201 			control.super = L2CAP_SUPER_RR;
2202 			control.reqseq = chan->buffer_seq;
2203 			l2cap_send_sframe(chan, &control);
2204 			frames_to_ack = 0;
2205 		}
2206 
2207 		if (frames_to_ack)
2208 			__set_ack_timer(chan);
2209 	}
2210 }
2211 
2212 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2213 					 struct msghdr *msg, int len,
2214 					 int count, struct sk_buff *skb)
2215 {
2216 	struct l2cap_conn *conn = chan->conn;
2217 	struct sk_buff **frag;
2218 	int sent = 0;
2219 
2220 	if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2221 		return -EFAULT;
2222 
2223 	sent += count;
2224 	len  -= count;
2225 
2226 	/* Continuation fragments (no L2CAP header) */
2227 	frag = &skb_shinfo(skb)->frag_list;
2228 	while (len) {
2229 		struct sk_buff *tmp;
2230 
2231 		count = min_t(unsigned int, conn->mtu, len);
2232 
2233 		tmp = chan->ops->alloc_skb(chan, count,
2234 					   msg->msg_flags & MSG_DONTWAIT);
2235 		if (IS_ERR(tmp))
2236 			return PTR_ERR(tmp);
2237 
2238 		*frag = tmp;
2239 
2240 		if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2241 			return -EFAULT;
2242 
2243 		(*frag)->priority = skb->priority;
2244 
2245 		sent += count;
2246 		len  -= count;
2247 
2248 		skb->len += (*frag)->len;
2249 		skb->data_len += (*frag)->len;
2250 
2251 		frag = &(*frag)->next;
2252 	}
2253 
2254 	return sent;
2255 }
2256 
2257 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2258 						 struct msghdr *msg, size_t len,
2259 						 u32 priority)
2260 {
2261 	struct l2cap_conn *conn = chan->conn;
2262 	struct sk_buff *skb;
2263 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2264 	struct l2cap_hdr *lh;
2265 
2266 	BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2267 
2268 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2269 
2270 	skb = chan->ops->alloc_skb(chan, count + hlen,
2271 				   msg->msg_flags & MSG_DONTWAIT);
2272 	if (IS_ERR(skb))
2273 		return skb;
2274 
2275 	skb->priority = priority;
2276 
2277 	/* Create L2CAP header */
2278 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2279 	lh->cid = cpu_to_le16(chan->dcid);
2280 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2281 	put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2282 
2283 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2284 	if (unlikely(err < 0)) {
2285 		kfree_skb(skb);
2286 		return ERR_PTR(err);
2287 	}
2288 	return skb;
2289 }
2290 
2291 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2292 					      struct msghdr *msg, size_t len,
2293 					      u32 priority)
2294 {
2295 	struct l2cap_conn *conn = chan->conn;
2296 	struct sk_buff *skb;
2297 	int err, count;
2298 	struct l2cap_hdr *lh;
2299 
2300 	BT_DBG("chan %p len %zu", chan, len);
2301 
2302 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2303 
2304 	skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2305 				   msg->msg_flags & MSG_DONTWAIT);
2306 	if (IS_ERR(skb))
2307 		return skb;
2308 
2309 	skb->priority = priority;
2310 
2311 	/* Create L2CAP header */
2312 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2313 	lh->cid = cpu_to_le16(chan->dcid);
2314 	lh->len = cpu_to_le16(len);
2315 
2316 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2317 	if (unlikely(err < 0)) {
2318 		kfree_skb(skb);
2319 		return ERR_PTR(err);
2320 	}
2321 	return skb;
2322 }
2323 
2324 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2325 					       struct msghdr *msg, size_t len,
2326 					       u16 sdulen)
2327 {
2328 	struct l2cap_conn *conn = chan->conn;
2329 	struct sk_buff *skb;
2330 	int err, count, hlen;
2331 	struct l2cap_hdr *lh;
2332 
2333 	BT_DBG("chan %p len %zu", chan, len);
2334 
2335 	if (!conn)
2336 		return ERR_PTR(-ENOTCONN);
2337 
2338 	hlen = __ertm_hdr_size(chan);
2339 
2340 	if (sdulen)
2341 		hlen += L2CAP_SDULEN_SIZE;
2342 
2343 	if (chan->fcs == L2CAP_FCS_CRC16)
2344 		hlen += L2CAP_FCS_SIZE;
2345 
2346 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2347 
2348 	skb = chan->ops->alloc_skb(chan, count + hlen,
2349 				   msg->msg_flags & MSG_DONTWAIT);
2350 	if (IS_ERR(skb))
2351 		return skb;
2352 
2353 	/* Create L2CAP header */
2354 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2355 	lh->cid = cpu_to_le16(chan->dcid);
2356 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2357 
2358 	/* Control header is populated later */
2359 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2360 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2361 	else
2362 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2363 
2364 	if (sdulen)
2365 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2366 
2367 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2368 	if (unlikely(err < 0)) {
2369 		kfree_skb(skb);
2370 		return ERR_PTR(err);
2371 	}
2372 
2373 	bt_cb(skb)->control.fcs = chan->fcs;
2374 	bt_cb(skb)->control.retries = 0;
2375 	return skb;
2376 }
2377 
2378 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2379 			     struct sk_buff_head *seg_queue,
2380 			     struct msghdr *msg, size_t len)
2381 {
2382 	struct sk_buff *skb;
2383 	u16 sdu_len;
2384 	size_t pdu_len;
2385 	u8 sar;
2386 
2387 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2388 
2389 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2390 	 * so fragmented skbs are not used.  The HCI layer's handling
2391 	 * of fragmented skbs is not compatible with ERTM's queueing.
2392 	 */
2393 
2394 	/* PDU size is derived from the HCI MTU */
2395 	pdu_len = chan->conn->mtu;
2396 
2397 	/* Constrain PDU size for BR/EDR connections */
2398 	if (!chan->hs_hcon)
2399 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2400 
2401 	/* Adjust for largest possible L2CAP overhead. */
2402 	if (chan->fcs)
2403 		pdu_len -= L2CAP_FCS_SIZE;
2404 
2405 	pdu_len -= __ertm_hdr_size(chan);
2406 
2407 	/* Remote device may have requested smaller PDUs */
2408 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2409 
2410 	if (len <= pdu_len) {
2411 		sar = L2CAP_SAR_UNSEGMENTED;
2412 		sdu_len = 0;
2413 		pdu_len = len;
2414 	} else {
2415 		sar = L2CAP_SAR_START;
2416 		sdu_len = len;
2417 		pdu_len -= L2CAP_SDULEN_SIZE;
2418 	}
2419 
2420 	while (len > 0) {
2421 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2422 
2423 		if (IS_ERR(skb)) {
2424 			__skb_queue_purge(seg_queue);
2425 			return PTR_ERR(skb);
2426 		}
2427 
2428 		bt_cb(skb)->control.sar = sar;
2429 		__skb_queue_tail(seg_queue, skb);
2430 
2431 		len -= pdu_len;
2432 		if (sdu_len) {
2433 			sdu_len = 0;
2434 			pdu_len += L2CAP_SDULEN_SIZE;
2435 		}
2436 
2437 		if (len <= pdu_len) {
2438 			sar = L2CAP_SAR_END;
2439 			pdu_len = len;
2440 		} else {
2441 			sar = L2CAP_SAR_CONTINUE;
2442 		}
2443 	}
2444 
2445 	return 0;
2446 }
2447 
2448 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2449 		    u32 priority)
2450 {
2451 	struct sk_buff *skb;
2452 	int err;
2453 	struct sk_buff_head seg_queue;
2454 
2455 	/* Connectionless channel */
2456 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2457 		skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2458 		if (IS_ERR(skb))
2459 			return PTR_ERR(skb);
2460 
2461 		l2cap_do_send(chan, skb);
2462 		return len;
2463 	}
2464 
2465 	switch (chan->mode) {
2466 	case L2CAP_MODE_BASIC:
2467 		/* Check outgoing MTU */
2468 		if (len > chan->omtu)
2469 			return -EMSGSIZE;
2470 
2471 		/* Create a basic PDU */
2472 		skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2473 		if (IS_ERR(skb))
2474 			return PTR_ERR(skb);
2475 
2476 		l2cap_do_send(chan, skb);
2477 		err = len;
2478 		break;
2479 
2480 	case L2CAP_MODE_ERTM:
2481 	case L2CAP_MODE_STREAMING:
2482 		/* Check outgoing MTU */
2483 		if (len > chan->omtu) {
2484 			err = -EMSGSIZE;
2485 			break;
2486 		}
2487 
2488 		__skb_queue_head_init(&seg_queue);
2489 
2490 		/* Do segmentation before calling in to the state machine,
2491 		 * since it's possible to block while waiting for memory
2492 		 * allocation.
2493 		 */
2494 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2495 
2496 		/* The channel could have been closed while segmenting,
2497 		 * check that it is still connected.
2498 		 */
2499 		if (chan->state != BT_CONNECTED) {
2500 			__skb_queue_purge(&seg_queue);
2501 			err = -ENOTCONN;
2502 		}
2503 
2504 		if (err)
2505 			break;
2506 
2507 		if (chan->mode == L2CAP_MODE_ERTM)
2508 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2509 		else
2510 			l2cap_streaming_send(chan, &seg_queue);
2511 
2512 		err = len;
2513 
2514 		/* If the skbs were not queued for sending, they'll still be in
2515 		 * seg_queue and need to be purged.
2516 		 */
2517 		__skb_queue_purge(&seg_queue);
2518 		break;
2519 
2520 	default:
2521 		BT_DBG("bad state %1.1x", chan->mode);
2522 		err = -EBADFD;
2523 	}
2524 
2525 	return err;
2526 }
2527 
2528 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2529 {
2530 	struct l2cap_ctrl control;
2531 	u16 seq;
2532 
2533 	BT_DBG("chan %p, txseq %u", chan, txseq);
2534 
2535 	memset(&control, 0, sizeof(control));
2536 	control.sframe = 1;
2537 	control.super = L2CAP_SUPER_SREJ;
2538 
2539 	for (seq = chan->expected_tx_seq; seq != txseq;
2540 	     seq = __next_seq(chan, seq)) {
2541 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2542 			control.reqseq = seq;
2543 			l2cap_send_sframe(chan, &control);
2544 			l2cap_seq_list_append(&chan->srej_list, seq);
2545 		}
2546 	}
2547 
2548 	chan->expected_tx_seq = __next_seq(chan, txseq);
2549 }
2550 
2551 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2552 {
2553 	struct l2cap_ctrl control;
2554 
2555 	BT_DBG("chan %p", chan);
2556 
2557 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2558 		return;
2559 
2560 	memset(&control, 0, sizeof(control));
2561 	control.sframe = 1;
2562 	control.super = L2CAP_SUPER_SREJ;
2563 	control.reqseq = chan->srej_list.tail;
2564 	l2cap_send_sframe(chan, &control);
2565 }
2566 
2567 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2568 {
2569 	struct l2cap_ctrl control;
2570 	u16 initial_head;
2571 	u16 seq;
2572 
2573 	BT_DBG("chan %p, txseq %u", chan, txseq);
2574 
2575 	memset(&control, 0, sizeof(control));
2576 	control.sframe = 1;
2577 	control.super = L2CAP_SUPER_SREJ;
2578 
2579 	/* Capture initial list head to allow only one pass through the list. */
2580 	initial_head = chan->srej_list.head;
2581 
2582 	do {
2583 		seq = l2cap_seq_list_pop(&chan->srej_list);
2584 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2585 			break;
2586 
2587 		control.reqseq = seq;
2588 		l2cap_send_sframe(chan, &control);
2589 		l2cap_seq_list_append(&chan->srej_list, seq);
2590 	} while (chan->srej_list.head != initial_head);
2591 }
2592 
2593 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2594 {
2595 	struct sk_buff *acked_skb;
2596 	u16 ackseq;
2597 
2598 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2599 
2600 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2601 		return;
2602 
2603 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2604 	       chan->expected_ack_seq, chan->unacked_frames);
2605 
2606 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2607 	     ackseq = __next_seq(chan, ackseq)) {
2608 
2609 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2610 		if (acked_skb) {
2611 			skb_unlink(acked_skb, &chan->tx_q);
2612 			kfree_skb(acked_skb);
2613 			chan->unacked_frames--;
2614 		}
2615 	}
2616 
2617 	chan->expected_ack_seq = reqseq;
2618 
2619 	if (chan->unacked_frames == 0)
2620 		__clear_retrans_timer(chan);
2621 
2622 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2623 }
2624 
2625 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2626 {
2627 	BT_DBG("chan %p", chan);
2628 
2629 	chan->expected_tx_seq = chan->buffer_seq;
2630 	l2cap_seq_list_clear(&chan->srej_list);
2631 	skb_queue_purge(&chan->srej_q);
2632 	chan->rx_state = L2CAP_RX_STATE_RECV;
2633 }
2634 
2635 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2636 				struct l2cap_ctrl *control,
2637 				struct sk_buff_head *skbs, u8 event)
2638 {
2639 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2640 	       event);
2641 
2642 	switch (event) {
2643 	case L2CAP_EV_DATA_REQUEST:
2644 		if (chan->tx_send_head == NULL)
2645 			chan->tx_send_head = skb_peek(skbs);
2646 
2647 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2648 		l2cap_ertm_send(chan);
2649 		break;
2650 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2651 		BT_DBG("Enter LOCAL_BUSY");
2652 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2653 
2654 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2655 			/* The SREJ_SENT state must be aborted if we are to
2656 			 * enter the LOCAL_BUSY state.
2657 			 */
2658 			l2cap_abort_rx_srej_sent(chan);
2659 		}
2660 
2661 		l2cap_send_ack(chan);
2662 
2663 		break;
2664 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2665 		BT_DBG("Exit LOCAL_BUSY");
2666 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2667 
2668 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2669 			struct l2cap_ctrl local_control;
2670 
2671 			memset(&local_control, 0, sizeof(local_control));
2672 			local_control.sframe = 1;
2673 			local_control.super = L2CAP_SUPER_RR;
2674 			local_control.poll = 1;
2675 			local_control.reqseq = chan->buffer_seq;
2676 			l2cap_send_sframe(chan, &local_control);
2677 
2678 			chan->retry_count = 1;
2679 			__set_monitor_timer(chan);
2680 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2681 		}
2682 		break;
2683 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2684 		l2cap_process_reqseq(chan, control->reqseq);
2685 		break;
2686 	case L2CAP_EV_EXPLICIT_POLL:
2687 		l2cap_send_rr_or_rnr(chan, 1);
2688 		chan->retry_count = 1;
2689 		__set_monitor_timer(chan);
2690 		__clear_ack_timer(chan);
2691 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2692 		break;
2693 	case L2CAP_EV_RETRANS_TO:
2694 		l2cap_send_rr_or_rnr(chan, 1);
2695 		chan->retry_count = 1;
2696 		__set_monitor_timer(chan);
2697 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2698 		break;
2699 	case L2CAP_EV_RECV_FBIT:
2700 		/* Nothing to process */
2701 		break;
2702 	default:
2703 		break;
2704 	}
2705 }
2706 
2707 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2708 				  struct l2cap_ctrl *control,
2709 				  struct sk_buff_head *skbs, u8 event)
2710 {
2711 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2712 	       event);
2713 
2714 	switch (event) {
2715 	case L2CAP_EV_DATA_REQUEST:
2716 		if (chan->tx_send_head == NULL)
2717 			chan->tx_send_head = skb_peek(skbs);
2718 		/* Queue data, but don't send. */
2719 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2720 		break;
2721 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2722 		BT_DBG("Enter LOCAL_BUSY");
2723 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2724 
2725 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2726 			/* The SREJ_SENT state must be aborted if we are to
2727 			 * enter the LOCAL_BUSY state.
2728 			 */
2729 			l2cap_abort_rx_srej_sent(chan);
2730 		}
2731 
2732 		l2cap_send_ack(chan);
2733 
2734 		break;
2735 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2736 		BT_DBG("Exit LOCAL_BUSY");
2737 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2738 
2739 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2740 			struct l2cap_ctrl local_control;
2741 			memset(&local_control, 0, sizeof(local_control));
2742 			local_control.sframe = 1;
2743 			local_control.super = L2CAP_SUPER_RR;
2744 			local_control.poll = 1;
2745 			local_control.reqseq = chan->buffer_seq;
2746 			l2cap_send_sframe(chan, &local_control);
2747 
2748 			chan->retry_count = 1;
2749 			__set_monitor_timer(chan);
2750 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2751 		}
2752 		break;
2753 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2754 		l2cap_process_reqseq(chan, control->reqseq);
2755 
2756 		/* Fall through */
2757 
2758 	case L2CAP_EV_RECV_FBIT:
2759 		if (control && control->final) {
2760 			__clear_monitor_timer(chan);
2761 			if (chan->unacked_frames > 0)
2762 				__set_retrans_timer(chan);
2763 			chan->retry_count = 0;
2764 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2765 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2766 		}
2767 		break;
2768 	case L2CAP_EV_EXPLICIT_POLL:
2769 		/* Ignore */
2770 		break;
2771 	case L2CAP_EV_MONITOR_TO:
2772 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2773 			l2cap_send_rr_or_rnr(chan, 1);
2774 			__set_monitor_timer(chan);
2775 			chan->retry_count++;
2776 		} else {
2777 			l2cap_send_disconn_req(chan, ECONNABORTED);
2778 		}
2779 		break;
2780 	default:
2781 		break;
2782 	}
2783 }
2784 
2785 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2786 		     struct sk_buff_head *skbs, u8 event)
2787 {
2788 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2789 	       chan, control, skbs, event, chan->tx_state);
2790 
2791 	switch (chan->tx_state) {
2792 	case L2CAP_TX_STATE_XMIT:
2793 		l2cap_tx_state_xmit(chan, control, skbs, event);
2794 		break;
2795 	case L2CAP_TX_STATE_WAIT_F:
2796 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2797 		break;
2798 	default:
2799 		/* Ignore event */
2800 		break;
2801 	}
2802 }
2803 
2804 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2805 			     struct l2cap_ctrl *control)
2806 {
2807 	BT_DBG("chan %p, control %p", chan, control);
2808 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2809 }
2810 
2811 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2812 				  struct l2cap_ctrl *control)
2813 {
2814 	BT_DBG("chan %p, control %p", chan, control);
2815 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2816 }
2817 
2818 /* Copy frame to all raw sockets on that connection */
2819 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2820 {
2821 	struct sk_buff *nskb;
2822 	struct l2cap_chan *chan;
2823 
2824 	BT_DBG("conn %p", conn);
2825 
2826 	mutex_lock(&conn->chan_lock);
2827 
2828 	list_for_each_entry(chan, &conn->chan_l, list) {
2829 		struct sock *sk = chan->sk;
2830 		if (chan->chan_type != L2CAP_CHAN_RAW)
2831 			continue;
2832 
2833 		/* Don't send frame to the socket it came from */
2834 		if (skb->sk == sk)
2835 			continue;
2836 		nskb = skb_clone(skb, GFP_KERNEL);
2837 		if (!nskb)
2838 			continue;
2839 
2840 		if (chan->ops->recv(chan, nskb))
2841 			kfree_skb(nskb);
2842 	}
2843 
2844 	mutex_unlock(&conn->chan_lock);
2845 }
2846 
2847 /* ---- L2CAP signalling commands ---- */
2848 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2849 				       u8 ident, u16 dlen, void *data)
2850 {
2851 	struct sk_buff *skb, **frag;
2852 	struct l2cap_cmd_hdr *cmd;
2853 	struct l2cap_hdr *lh;
2854 	int len, count;
2855 
2856 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2857 	       conn, code, ident, dlen);
2858 
2859 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2860 		return NULL;
2861 
2862 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2863 	count = min_t(unsigned int, conn->mtu, len);
2864 
2865 	skb = bt_skb_alloc(count, GFP_KERNEL);
2866 	if (!skb)
2867 		return NULL;
2868 
2869 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2870 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2871 
2872 	if (conn->hcon->type == LE_LINK)
2873 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2874 	else
2875 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2876 
2877 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2878 	cmd->code  = code;
2879 	cmd->ident = ident;
2880 	cmd->len   = cpu_to_le16(dlen);
2881 
2882 	if (dlen) {
2883 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2884 		memcpy(skb_put(skb, count), data, count);
2885 		data += count;
2886 	}
2887 
2888 	len -= skb->len;
2889 
2890 	/* Continuation fragments (no L2CAP header) */
2891 	frag = &skb_shinfo(skb)->frag_list;
2892 	while (len) {
2893 		count = min_t(unsigned int, conn->mtu, len);
2894 
2895 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2896 		if (!*frag)
2897 			goto fail;
2898 
2899 		memcpy(skb_put(*frag, count), data, count);
2900 
2901 		len  -= count;
2902 		data += count;
2903 
2904 		frag = &(*frag)->next;
2905 	}
2906 
2907 	return skb;
2908 
2909 fail:
2910 	kfree_skb(skb);
2911 	return NULL;
2912 }
2913 
2914 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2915 				     unsigned long *val)
2916 {
2917 	struct l2cap_conf_opt *opt = *ptr;
2918 	int len;
2919 
2920 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2921 	*ptr += len;
2922 
2923 	*type = opt->type;
2924 	*olen = opt->len;
2925 
2926 	switch (opt->len) {
2927 	case 1:
2928 		*val = *((u8 *) opt->val);
2929 		break;
2930 
2931 	case 2:
2932 		*val = get_unaligned_le16(opt->val);
2933 		break;
2934 
2935 	case 4:
2936 		*val = get_unaligned_le32(opt->val);
2937 		break;
2938 
2939 	default:
2940 		*val = (unsigned long) opt->val;
2941 		break;
2942 	}
2943 
2944 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2945 	return len;
2946 }
2947 
2948 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2949 {
2950 	struct l2cap_conf_opt *opt = *ptr;
2951 
2952 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2953 
2954 	opt->type = type;
2955 	opt->len  = len;
2956 
2957 	switch (len) {
2958 	case 1:
2959 		*((u8 *) opt->val)  = val;
2960 		break;
2961 
2962 	case 2:
2963 		put_unaligned_le16(val, opt->val);
2964 		break;
2965 
2966 	case 4:
2967 		put_unaligned_le32(val, opt->val);
2968 		break;
2969 
2970 	default:
2971 		memcpy(opt->val, (void *) val, len);
2972 		break;
2973 	}
2974 
2975 	*ptr += L2CAP_CONF_OPT_SIZE + len;
2976 }
2977 
2978 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2979 {
2980 	struct l2cap_conf_efs efs;
2981 
2982 	switch (chan->mode) {
2983 	case L2CAP_MODE_ERTM:
2984 		efs.id		= chan->local_id;
2985 		efs.stype	= chan->local_stype;
2986 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2987 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2988 		efs.acc_lat	= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2989 		efs.flush_to	= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2990 		break;
2991 
2992 	case L2CAP_MODE_STREAMING:
2993 		efs.id		= 1;
2994 		efs.stype	= L2CAP_SERV_BESTEFFORT;
2995 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2996 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2997 		efs.acc_lat	= 0;
2998 		efs.flush_to	= 0;
2999 		break;
3000 
3001 	default:
3002 		return;
3003 	}
3004 
3005 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3006 			   (unsigned long) &efs);
3007 }
3008 
3009 static void l2cap_ack_timeout(struct work_struct *work)
3010 {
3011 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3012 					       ack_timer.work);
3013 	u16 frames_to_ack;
3014 
3015 	BT_DBG("chan %p", chan);
3016 
3017 	l2cap_chan_lock(chan);
3018 
3019 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3020 				     chan->last_acked_seq);
3021 
3022 	if (frames_to_ack)
3023 		l2cap_send_rr_or_rnr(chan, 0);
3024 
3025 	l2cap_chan_unlock(chan);
3026 	l2cap_chan_put(chan);
3027 }
3028 
3029 int l2cap_ertm_init(struct l2cap_chan *chan)
3030 {
3031 	int err;
3032 
3033 	chan->next_tx_seq = 0;
3034 	chan->expected_tx_seq = 0;
3035 	chan->expected_ack_seq = 0;
3036 	chan->unacked_frames = 0;
3037 	chan->buffer_seq = 0;
3038 	chan->frames_sent = 0;
3039 	chan->last_acked_seq = 0;
3040 	chan->sdu = NULL;
3041 	chan->sdu_last_frag = NULL;
3042 	chan->sdu_len = 0;
3043 
3044 	skb_queue_head_init(&chan->tx_q);
3045 
3046 	chan->local_amp_id = 0;
3047 	chan->move_id = 0;
3048 	chan->move_state = L2CAP_MOVE_STABLE;
3049 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3050 
3051 	if (chan->mode != L2CAP_MODE_ERTM)
3052 		return 0;
3053 
3054 	chan->rx_state = L2CAP_RX_STATE_RECV;
3055 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3056 
3057 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3058 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3059 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3060 
3061 	skb_queue_head_init(&chan->srej_q);
3062 
3063 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3064 	if (err < 0)
3065 		return err;
3066 
3067 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3068 	if (err < 0)
3069 		l2cap_seq_list_free(&chan->srej_list);
3070 
3071 	return err;
3072 }
3073 
3074 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3075 {
3076 	switch (mode) {
3077 	case L2CAP_MODE_STREAMING:
3078 	case L2CAP_MODE_ERTM:
3079 		if (l2cap_mode_supported(mode, remote_feat_mask))
3080 			return mode;
3081 		/* fall through */
3082 	default:
3083 		return L2CAP_MODE_BASIC;
3084 	}
3085 }
3086 
3087 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
3088 {
3089 	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3090 }
3091 
3092 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
3093 {
3094 	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3095 }
3096 
3097 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3098 				      struct l2cap_conf_rfc *rfc)
3099 {
3100 	if (chan->local_amp_id && chan->hs_hcon) {
3101 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3102 
3103 		/* Class 1 devices have must have ERTM timeouts
3104 		 * exceeding the Link Supervision Timeout.  The
3105 		 * default Link Supervision Timeout for AMP
3106 		 * controllers is 10 seconds.
3107 		 *
3108 		 * Class 1 devices use 0xffffffff for their
3109 		 * best-effort flush timeout, so the clamping logic
3110 		 * will result in a timeout that meets the above
3111 		 * requirement.  ERTM timeouts are 16-bit values, so
3112 		 * the maximum timeout is 65.535 seconds.
3113 		 */
3114 
3115 		/* Convert timeout to milliseconds and round */
3116 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3117 
3118 		/* This is the recommended formula for class 2 devices
3119 		 * that start ERTM timers when packets are sent to the
3120 		 * controller.
3121 		 */
3122 		ertm_to = 3 * ertm_to + 500;
3123 
3124 		if (ertm_to > 0xffff)
3125 			ertm_to = 0xffff;
3126 
3127 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3128 		rfc->monitor_timeout = rfc->retrans_timeout;
3129 	} else {
3130 		rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3131 		rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3132 	}
3133 }
3134 
3135 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3136 {
3137 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3138 	    __l2cap_ews_supported(chan)) {
3139 		/* use extended control field */
3140 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3141 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3142 	} else {
3143 		chan->tx_win = min_t(u16, chan->tx_win,
3144 				     L2CAP_DEFAULT_TX_WINDOW);
3145 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3146 	}
3147 	chan->ack_win = chan->tx_win;
3148 }
3149 
3150 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3151 {
3152 	struct l2cap_conf_req *req = data;
3153 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3154 	void *ptr = req->data;
3155 	u16 size;
3156 
3157 	BT_DBG("chan %p", chan);
3158 
3159 	if (chan->num_conf_req || chan->num_conf_rsp)
3160 		goto done;
3161 
3162 	switch (chan->mode) {
3163 	case L2CAP_MODE_STREAMING:
3164 	case L2CAP_MODE_ERTM:
3165 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3166 			break;
3167 
3168 		if (__l2cap_efs_supported(chan))
3169 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3170 
3171 		/* fall through */
3172 	default:
3173 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3174 		break;
3175 	}
3176 
3177 done:
3178 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3179 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3180 
3181 	switch (chan->mode) {
3182 	case L2CAP_MODE_BASIC:
3183 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3184 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3185 			break;
3186 
3187 		rfc.mode            = L2CAP_MODE_BASIC;
3188 		rfc.txwin_size      = 0;
3189 		rfc.max_transmit    = 0;
3190 		rfc.retrans_timeout = 0;
3191 		rfc.monitor_timeout = 0;
3192 		rfc.max_pdu_size    = 0;
3193 
3194 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3195 				   (unsigned long) &rfc);
3196 		break;
3197 
3198 	case L2CAP_MODE_ERTM:
3199 		rfc.mode            = L2CAP_MODE_ERTM;
3200 		rfc.max_transmit    = chan->max_tx;
3201 
3202 		__l2cap_set_ertm_timeouts(chan, &rfc);
3203 
3204 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3205 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3206 			     L2CAP_FCS_SIZE);
3207 		rfc.max_pdu_size = cpu_to_le16(size);
3208 
3209 		l2cap_txwin_setup(chan);
3210 
3211 		rfc.txwin_size = min_t(u16, chan->tx_win,
3212 				       L2CAP_DEFAULT_TX_WINDOW);
3213 
3214 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3215 				   (unsigned long) &rfc);
3216 
3217 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3218 			l2cap_add_opt_efs(&ptr, chan);
3219 
3220 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3221 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3222 					   chan->tx_win);
3223 
3224 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3225 			if (chan->fcs == L2CAP_FCS_NONE ||
3226 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3227 				chan->fcs = L2CAP_FCS_NONE;
3228 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3229 						   chan->fcs);
3230 			}
3231 		break;
3232 
3233 	case L2CAP_MODE_STREAMING:
3234 		l2cap_txwin_setup(chan);
3235 		rfc.mode            = L2CAP_MODE_STREAMING;
3236 		rfc.txwin_size      = 0;
3237 		rfc.max_transmit    = 0;
3238 		rfc.retrans_timeout = 0;
3239 		rfc.monitor_timeout = 0;
3240 
3241 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3242 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3243 			     L2CAP_FCS_SIZE);
3244 		rfc.max_pdu_size = cpu_to_le16(size);
3245 
3246 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3247 				   (unsigned long) &rfc);
3248 
3249 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3250 			l2cap_add_opt_efs(&ptr, chan);
3251 
3252 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3253 			if (chan->fcs == L2CAP_FCS_NONE ||
3254 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3255 				chan->fcs = L2CAP_FCS_NONE;
3256 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3257 						   chan->fcs);
3258 			}
3259 		break;
3260 	}
3261 
3262 	req->dcid  = cpu_to_le16(chan->dcid);
3263 	req->flags = __constant_cpu_to_le16(0);
3264 
3265 	return ptr - data;
3266 }
3267 
3268 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3269 {
3270 	struct l2cap_conf_rsp *rsp = data;
3271 	void *ptr = rsp->data;
3272 	void *req = chan->conf_req;
3273 	int len = chan->conf_len;
3274 	int type, hint, olen;
3275 	unsigned long val;
3276 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3277 	struct l2cap_conf_efs efs;
3278 	u8 remote_efs = 0;
3279 	u16 mtu = L2CAP_DEFAULT_MTU;
3280 	u16 result = L2CAP_CONF_SUCCESS;
3281 	u16 size;
3282 
3283 	BT_DBG("chan %p", chan);
3284 
3285 	while (len >= L2CAP_CONF_OPT_SIZE) {
3286 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3287 
3288 		hint  = type & L2CAP_CONF_HINT;
3289 		type &= L2CAP_CONF_MASK;
3290 
3291 		switch (type) {
3292 		case L2CAP_CONF_MTU:
3293 			mtu = val;
3294 			break;
3295 
3296 		case L2CAP_CONF_FLUSH_TO:
3297 			chan->flush_to = val;
3298 			break;
3299 
3300 		case L2CAP_CONF_QOS:
3301 			break;
3302 
3303 		case L2CAP_CONF_RFC:
3304 			if (olen == sizeof(rfc))
3305 				memcpy(&rfc, (void *) val, olen);
3306 			break;
3307 
3308 		case L2CAP_CONF_FCS:
3309 			if (val == L2CAP_FCS_NONE)
3310 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3311 			break;
3312 
3313 		case L2CAP_CONF_EFS:
3314 			remote_efs = 1;
3315 			if (olen == sizeof(efs))
3316 				memcpy(&efs, (void *) val, olen);
3317 			break;
3318 
3319 		case L2CAP_CONF_EWS:
3320 			if (!enable_hs)
3321 				return -ECONNREFUSED;
3322 
3323 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3324 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3325 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3326 			chan->remote_tx_win = val;
3327 			break;
3328 
3329 		default:
3330 			if (hint)
3331 				break;
3332 
3333 			result = L2CAP_CONF_UNKNOWN;
3334 			*((u8 *) ptr++) = type;
3335 			break;
3336 		}
3337 	}
3338 
3339 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3340 		goto done;
3341 
3342 	switch (chan->mode) {
3343 	case L2CAP_MODE_STREAMING:
3344 	case L2CAP_MODE_ERTM:
3345 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3346 			chan->mode = l2cap_select_mode(rfc.mode,
3347 						       chan->conn->feat_mask);
3348 			break;
3349 		}
3350 
3351 		if (remote_efs) {
3352 			if (__l2cap_efs_supported(chan))
3353 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3354 			else
3355 				return -ECONNREFUSED;
3356 		}
3357 
3358 		if (chan->mode != rfc.mode)
3359 			return -ECONNREFUSED;
3360 
3361 		break;
3362 	}
3363 
3364 done:
3365 	if (chan->mode != rfc.mode) {
3366 		result = L2CAP_CONF_UNACCEPT;
3367 		rfc.mode = chan->mode;
3368 
3369 		if (chan->num_conf_rsp == 1)
3370 			return -ECONNREFUSED;
3371 
3372 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3373 				   (unsigned long) &rfc);
3374 	}
3375 
3376 	if (result == L2CAP_CONF_SUCCESS) {
3377 		/* Configure output options and let the other side know
3378 		 * which ones we don't like. */
3379 
3380 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3381 			result = L2CAP_CONF_UNACCEPT;
3382 		else {
3383 			chan->omtu = mtu;
3384 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3385 		}
3386 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3387 
3388 		if (remote_efs) {
3389 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3390 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3391 			    efs.stype != chan->local_stype) {
3392 
3393 				result = L2CAP_CONF_UNACCEPT;
3394 
3395 				if (chan->num_conf_req >= 1)
3396 					return -ECONNREFUSED;
3397 
3398 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3399 						   sizeof(efs),
3400 						   (unsigned long) &efs);
3401 			} else {
3402 				/* Send PENDING Conf Rsp */
3403 				result = L2CAP_CONF_PENDING;
3404 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3405 			}
3406 		}
3407 
3408 		switch (rfc.mode) {
3409 		case L2CAP_MODE_BASIC:
3410 			chan->fcs = L2CAP_FCS_NONE;
3411 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3412 			break;
3413 
3414 		case L2CAP_MODE_ERTM:
3415 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3416 				chan->remote_tx_win = rfc.txwin_size;
3417 			else
3418 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3419 
3420 			chan->remote_max_tx = rfc.max_transmit;
3421 
3422 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3423 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3424 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3425 			rfc.max_pdu_size = cpu_to_le16(size);
3426 			chan->remote_mps = size;
3427 
3428 			__l2cap_set_ertm_timeouts(chan, &rfc);
3429 
3430 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3431 
3432 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3433 					   sizeof(rfc), (unsigned long) &rfc);
3434 
3435 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3436 				chan->remote_id = efs.id;
3437 				chan->remote_stype = efs.stype;
3438 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3439 				chan->remote_flush_to =
3440 					le32_to_cpu(efs.flush_to);
3441 				chan->remote_acc_lat =
3442 					le32_to_cpu(efs.acc_lat);
3443 				chan->remote_sdu_itime =
3444 					le32_to_cpu(efs.sdu_itime);
3445 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3446 						   sizeof(efs),
3447 						   (unsigned long) &efs);
3448 			}
3449 			break;
3450 
3451 		case L2CAP_MODE_STREAMING:
3452 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3453 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3454 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3455 			rfc.max_pdu_size = cpu_to_le16(size);
3456 			chan->remote_mps = size;
3457 
3458 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3459 
3460 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3461 					   (unsigned long) &rfc);
3462 
3463 			break;
3464 
3465 		default:
3466 			result = L2CAP_CONF_UNACCEPT;
3467 
3468 			memset(&rfc, 0, sizeof(rfc));
3469 			rfc.mode = chan->mode;
3470 		}
3471 
3472 		if (result == L2CAP_CONF_SUCCESS)
3473 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3474 	}
3475 	rsp->scid   = cpu_to_le16(chan->dcid);
3476 	rsp->result = cpu_to_le16(result);
3477 	rsp->flags  = __constant_cpu_to_le16(0);
3478 
3479 	return ptr - data;
3480 }
3481 
3482 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3483 				void *data, u16 *result)
3484 {
3485 	struct l2cap_conf_req *req = data;
3486 	void *ptr = req->data;
3487 	int type, olen;
3488 	unsigned long val;
3489 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3490 	struct l2cap_conf_efs efs;
3491 
3492 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3493 
3494 	while (len >= L2CAP_CONF_OPT_SIZE) {
3495 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3496 
3497 		switch (type) {
3498 		case L2CAP_CONF_MTU:
3499 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3500 				*result = L2CAP_CONF_UNACCEPT;
3501 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3502 			} else
3503 				chan->imtu = val;
3504 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3505 			break;
3506 
3507 		case L2CAP_CONF_FLUSH_TO:
3508 			chan->flush_to = val;
3509 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3510 					   2, chan->flush_to);
3511 			break;
3512 
3513 		case L2CAP_CONF_RFC:
3514 			if (olen == sizeof(rfc))
3515 				memcpy(&rfc, (void *)val, olen);
3516 
3517 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3518 			    rfc.mode != chan->mode)
3519 				return -ECONNREFUSED;
3520 
3521 			chan->fcs = 0;
3522 
3523 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3524 					   sizeof(rfc), (unsigned long) &rfc);
3525 			break;
3526 
3527 		case L2CAP_CONF_EWS:
3528 			chan->ack_win = min_t(u16, val, chan->ack_win);
3529 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3530 					   chan->tx_win);
3531 			break;
3532 
3533 		case L2CAP_CONF_EFS:
3534 			if (olen == sizeof(efs))
3535 				memcpy(&efs, (void *)val, olen);
3536 
3537 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3538 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3539 			    efs.stype != chan->local_stype)
3540 				return -ECONNREFUSED;
3541 
3542 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3543 					   (unsigned long) &efs);
3544 			break;
3545 
3546 		case L2CAP_CONF_FCS:
3547 			if (*result == L2CAP_CONF_PENDING)
3548 				if (val == L2CAP_FCS_NONE)
3549 					set_bit(CONF_RECV_NO_FCS,
3550 						&chan->conf_state);
3551 			break;
3552 		}
3553 	}
3554 
3555 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3556 		return -ECONNREFUSED;
3557 
3558 	chan->mode = rfc.mode;
3559 
3560 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3561 		switch (rfc.mode) {
3562 		case L2CAP_MODE_ERTM:
3563 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3564 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3565 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3566 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3567 				chan->ack_win = min_t(u16, chan->ack_win,
3568 						      rfc.txwin_size);
3569 
3570 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3571 				chan->local_msdu = le16_to_cpu(efs.msdu);
3572 				chan->local_sdu_itime =
3573 					le32_to_cpu(efs.sdu_itime);
3574 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3575 				chan->local_flush_to =
3576 					le32_to_cpu(efs.flush_to);
3577 			}
3578 			break;
3579 
3580 		case L2CAP_MODE_STREAMING:
3581 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3582 		}
3583 	}
3584 
3585 	req->dcid   = cpu_to_le16(chan->dcid);
3586 	req->flags  = __constant_cpu_to_le16(0);
3587 
3588 	return ptr - data;
3589 }
3590 
3591 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3592 				u16 result, u16 flags)
3593 {
3594 	struct l2cap_conf_rsp *rsp = data;
3595 	void *ptr = rsp->data;
3596 
3597 	BT_DBG("chan %p", chan);
3598 
3599 	rsp->scid   = cpu_to_le16(chan->dcid);
3600 	rsp->result = cpu_to_le16(result);
3601 	rsp->flags  = cpu_to_le16(flags);
3602 
3603 	return ptr - data;
3604 }
3605 
3606 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3607 {
3608 	struct l2cap_conn_rsp rsp;
3609 	struct l2cap_conn *conn = chan->conn;
3610 	u8 buf[128];
3611 	u8 rsp_code;
3612 
3613 	rsp.scid   = cpu_to_le16(chan->dcid);
3614 	rsp.dcid   = cpu_to_le16(chan->scid);
3615 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3616 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3617 
3618 	if (chan->hs_hcon)
3619 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3620 	else
3621 		rsp_code = L2CAP_CONN_RSP;
3622 
3623 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3624 
3625 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3626 
3627 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3628 		return;
3629 
3630 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3631 		       l2cap_build_conf_req(chan, buf), buf);
3632 	chan->num_conf_req++;
3633 }
3634 
3635 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3636 {
3637 	int type, olen;
3638 	unsigned long val;
3639 	/* Use sane default values in case a misbehaving remote device
3640 	 * did not send an RFC or extended window size option.
3641 	 */
3642 	u16 txwin_ext = chan->ack_win;
3643 	struct l2cap_conf_rfc rfc = {
3644 		.mode = chan->mode,
3645 		.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3646 		.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3647 		.max_pdu_size = cpu_to_le16(chan->imtu),
3648 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3649 	};
3650 
3651 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3652 
3653 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3654 		return;
3655 
3656 	while (len >= L2CAP_CONF_OPT_SIZE) {
3657 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3658 
3659 		switch (type) {
3660 		case L2CAP_CONF_RFC:
3661 			if (olen == sizeof(rfc))
3662 				memcpy(&rfc, (void *)val, olen);
3663 			break;
3664 		case L2CAP_CONF_EWS:
3665 			txwin_ext = val;
3666 			break;
3667 		}
3668 	}
3669 
3670 	switch (rfc.mode) {
3671 	case L2CAP_MODE_ERTM:
3672 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3673 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3674 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3675 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3676 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3677 		else
3678 			chan->ack_win = min_t(u16, chan->ack_win,
3679 					      rfc.txwin_size);
3680 		break;
3681 	case L2CAP_MODE_STREAMING:
3682 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3683 	}
3684 }
3685 
3686 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3687 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3688 				    u8 *data)
3689 {
3690 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3691 
3692 	if (cmd_len < sizeof(*rej))
3693 		return -EPROTO;
3694 
3695 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3696 		return 0;
3697 
3698 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3699 	    cmd->ident == conn->info_ident) {
3700 		cancel_delayed_work(&conn->info_timer);
3701 
3702 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3703 		conn->info_ident = 0;
3704 
3705 		l2cap_conn_start(conn);
3706 	}
3707 
3708 	return 0;
3709 }
3710 
3711 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3712 					struct l2cap_cmd_hdr *cmd,
3713 					u8 *data, u8 rsp_code, u8 amp_id)
3714 {
3715 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3716 	struct l2cap_conn_rsp rsp;
3717 	struct l2cap_chan *chan = NULL, *pchan;
3718 	struct sock *parent, *sk = NULL;
3719 	int result, status = L2CAP_CS_NO_INFO;
3720 
3721 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3722 	__le16 psm = req->psm;
3723 
3724 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3725 
3726 	/* Check if we have socket listening on psm */
3727 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3728 	if (!pchan) {
3729 		result = L2CAP_CR_BAD_PSM;
3730 		goto sendresp;
3731 	}
3732 
3733 	parent = pchan->sk;
3734 
3735 	mutex_lock(&conn->chan_lock);
3736 	lock_sock(parent);
3737 
3738 	/* Check if the ACL is secure enough (if not SDP) */
3739 	if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3740 	    !hci_conn_check_link_mode(conn->hcon)) {
3741 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3742 		result = L2CAP_CR_SEC_BLOCK;
3743 		goto response;
3744 	}
3745 
3746 	result = L2CAP_CR_NO_MEM;
3747 
3748 	/* Check if we already have channel with that dcid */
3749 	if (__l2cap_get_chan_by_dcid(conn, scid))
3750 		goto response;
3751 
3752 	chan = pchan->ops->new_connection(pchan);
3753 	if (!chan)
3754 		goto response;
3755 
3756 	sk = chan->sk;
3757 
3758 	bacpy(&bt_sk(sk)->src, conn->src);
3759 	bacpy(&bt_sk(sk)->dst, conn->dst);
3760 	chan->psm  = psm;
3761 	chan->dcid = scid;
3762 	chan->local_amp_id = amp_id;
3763 
3764 	__l2cap_chan_add(conn, chan);
3765 
3766 	dcid = chan->scid;
3767 
3768 	__set_chan_timer(chan, sk->sk_sndtimeo);
3769 
3770 	chan->ident = cmd->ident;
3771 
3772 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3773 		if (l2cap_chan_check_security(chan)) {
3774 			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3775 				__l2cap_state_change(chan, BT_CONNECT2);
3776 				result = L2CAP_CR_PEND;
3777 				status = L2CAP_CS_AUTHOR_PEND;
3778 				chan->ops->defer(chan);
3779 			} else {
3780 				/* Force pending result for AMP controllers.
3781 				 * The connection will succeed after the
3782 				 * physical link is up.
3783 				 */
3784 				if (amp_id) {
3785 					__l2cap_state_change(chan, BT_CONNECT2);
3786 					result = L2CAP_CR_PEND;
3787 				} else {
3788 					__l2cap_state_change(chan, BT_CONFIG);
3789 					result = L2CAP_CR_SUCCESS;
3790 				}
3791 				status = L2CAP_CS_NO_INFO;
3792 			}
3793 		} else {
3794 			__l2cap_state_change(chan, BT_CONNECT2);
3795 			result = L2CAP_CR_PEND;
3796 			status = L2CAP_CS_AUTHEN_PEND;
3797 		}
3798 	} else {
3799 		__l2cap_state_change(chan, BT_CONNECT2);
3800 		result = L2CAP_CR_PEND;
3801 		status = L2CAP_CS_NO_INFO;
3802 	}
3803 
3804 response:
3805 	release_sock(parent);
3806 	mutex_unlock(&conn->chan_lock);
3807 
3808 sendresp:
3809 	rsp.scid   = cpu_to_le16(scid);
3810 	rsp.dcid   = cpu_to_le16(dcid);
3811 	rsp.result = cpu_to_le16(result);
3812 	rsp.status = cpu_to_le16(status);
3813 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3814 
3815 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3816 		struct l2cap_info_req info;
3817 		info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3818 
3819 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3820 		conn->info_ident = l2cap_get_ident(conn);
3821 
3822 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3823 
3824 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3825 			       sizeof(info), &info);
3826 	}
3827 
3828 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3829 	    result == L2CAP_CR_SUCCESS) {
3830 		u8 buf[128];
3831 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3832 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3833 			       l2cap_build_conf_req(chan, buf), buf);
3834 		chan->num_conf_req++;
3835 	}
3836 
3837 	return chan;
3838 }
3839 
3840 static int l2cap_connect_req(struct l2cap_conn *conn,
3841 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3842 {
3843 	struct hci_dev *hdev = conn->hcon->hdev;
3844 	struct hci_conn *hcon = conn->hcon;
3845 
3846 	if (cmd_len < sizeof(struct l2cap_conn_req))
3847 		return -EPROTO;
3848 
3849 	hci_dev_lock(hdev);
3850 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3851 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3852 		mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3853 				      hcon->dst_type, 0, NULL, 0,
3854 				      hcon->dev_class);
3855 	hci_dev_unlock(hdev);
3856 
3857 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3858 	return 0;
3859 }
3860 
3861 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3862 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3863 				    u8 *data)
3864 {
3865 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3866 	u16 scid, dcid, result, status;
3867 	struct l2cap_chan *chan;
3868 	u8 req[128];
3869 	int err;
3870 
3871 	if (cmd_len < sizeof(*rsp))
3872 		return -EPROTO;
3873 
3874 	scid   = __le16_to_cpu(rsp->scid);
3875 	dcid   = __le16_to_cpu(rsp->dcid);
3876 	result = __le16_to_cpu(rsp->result);
3877 	status = __le16_to_cpu(rsp->status);
3878 
3879 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3880 	       dcid, scid, result, status);
3881 
3882 	mutex_lock(&conn->chan_lock);
3883 
3884 	if (scid) {
3885 		chan = __l2cap_get_chan_by_scid(conn, scid);
3886 		if (!chan) {
3887 			err = -EFAULT;
3888 			goto unlock;
3889 		}
3890 	} else {
3891 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3892 		if (!chan) {
3893 			err = -EFAULT;
3894 			goto unlock;
3895 		}
3896 	}
3897 
3898 	err = 0;
3899 
3900 	l2cap_chan_lock(chan);
3901 
3902 	switch (result) {
3903 	case L2CAP_CR_SUCCESS:
3904 		l2cap_state_change(chan, BT_CONFIG);
3905 		chan->ident = 0;
3906 		chan->dcid = dcid;
3907 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3908 
3909 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3910 			break;
3911 
3912 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3913 			       l2cap_build_conf_req(chan, req), req);
3914 		chan->num_conf_req++;
3915 		break;
3916 
3917 	case L2CAP_CR_PEND:
3918 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3919 		break;
3920 
3921 	default:
3922 		l2cap_chan_del(chan, ECONNREFUSED);
3923 		break;
3924 	}
3925 
3926 	l2cap_chan_unlock(chan);
3927 
3928 unlock:
3929 	mutex_unlock(&conn->chan_lock);
3930 
3931 	return err;
3932 }
3933 
3934 static inline void set_default_fcs(struct l2cap_chan *chan)
3935 {
3936 	/* FCS is enabled only in ERTM or streaming mode, if one or both
3937 	 * sides request it.
3938 	 */
3939 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3940 		chan->fcs = L2CAP_FCS_NONE;
3941 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3942 		chan->fcs = L2CAP_FCS_CRC16;
3943 }
3944 
3945 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3946 				    u8 ident, u16 flags)
3947 {
3948 	struct l2cap_conn *conn = chan->conn;
3949 
3950 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3951 	       flags);
3952 
3953 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3954 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3955 
3956 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3957 		       l2cap_build_conf_rsp(chan, data,
3958 					    L2CAP_CONF_SUCCESS, flags), data);
3959 }
3960 
3961 static inline int l2cap_config_req(struct l2cap_conn *conn,
3962 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3963 				   u8 *data)
3964 {
3965 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3966 	u16 dcid, flags;
3967 	u8 rsp[64];
3968 	struct l2cap_chan *chan;
3969 	int len, err = 0;
3970 
3971 	if (cmd_len < sizeof(*req))
3972 		return -EPROTO;
3973 
3974 	dcid  = __le16_to_cpu(req->dcid);
3975 	flags = __le16_to_cpu(req->flags);
3976 
3977 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3978 
3979 	chan = l2cap_get_chan_by_scid(conn, dcid);
3980 	if (!chan)
3981 		return -ENOENT;
3982 
3983 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3984 		struct l2cap_cmd_rej_cid rej;
3985 
3986 		rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3987 		rej.scid = cpu_to_le16(chan->scid);
3988 		rej.dcid = cpu_to_le16(chan->dcid);
3989 
3990 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3991 			       sizeof(rej), &rej);
3992 		goto unlock;
3993 	}
3994 
3995 	/* Reject if config buffer is too small. */
3996 	len = cmd_len - sizeof(*req);
3997 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
3998 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3999 			       l2cap_build_conf_rsp(chan, rsp,
4000 			       L2CAP_CONF_REJECT, flags), rsp);
4001 		goto unlock;
4002 	}
4003 
4004 	/* Store config. */
4005 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4006 	chan->conf_len += len;
4007 
4008 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4009 		/* Incomplete config. Send empty response. */
4010 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4011 			       l2cap_build_conf_rsp(chan, rsp,
4012 			       L2CAP_CONF_SUCCESS, flags), rsp);
4013 		goto unlock;
4014 	}
4015 
4016 	/* Complete config. */
4017 	len = l2cap_parse_conf_req(chan, rsp);
4018 	if (len < 0) {
4019 		l2cap_send_disconn_req(chan, ECONNRESET);
4020 		goto unlock;
4021 	}
4022 
4023 	chan->ident = cmd->ident;
4024 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4025 	chan->num_conf_rsp++;
4026 
4027 	/* Reset config buffer. */
4028 	chan->conf_len = 0;
4029 
4030 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4031 		goto unlock;
4032 
4033 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4034 		set_default_fcs(chan);
4035 
4036 		if (chan->mode == L2CAP_MODE_ERTM ||
4037 		    chan->mode == L2CAP_MODE_STREAMING)
4038 			err = l2cap_ertm_init(chan);
4039 
4040 		if (err < 0)
4041 			l2cap_send_disconn_req(chan, -err);
4042 		else
4043 			l2cap_chan_ready(chan);
4044 
4045 		goto unlock;
4046 	}
4047 
4048 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4049 		u8 buf[64];
4050 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4051 			       l2cap_build_conf_req(chan, buf), buf);
4052 		chan->num_conf_req++;
4053 	}
4054 
4055 	/* Got Conf Rsp PENDING from remote side and asume we sent
4056 	   Conf Rsp PENDING in the code above */
4057 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4058 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4059 
4060 		/* check compatibility */
4061 
4062 		/* Send rsp for BR/EDR channel */
4063 		if (!chan->hs_hcon)
4064 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4065 		else
4066 			chan->ident = cmd->ident;
4067 	}
4068 
4069 unlock:
4070 	l2cap_chan_unlock(chan);
4071 	return err;
4072 }
4073 
4074 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4075 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4076 				   u8 *data)
4077 {
4078 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4079 	u16 scid, flags, result;
4080 	struct l2cap_chan *chan;
4081 	int len = cmd_len - sizeof(*rsp);
4082 	int err = 0;
4083 
4084 	if (cmd_len < sizeof(*rsp))
4085 		return -EPROTO;
4086 
4087 	scid   = __le16_to_cpu(rsp->scid);
4088 	flags  = __le16_to_cpu(rsp->flags);
4089 	result = __le16_to_cpu(rsp->result);
4090 
4091 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4092 	       result, len);
4093 
4094 	chan = l2cap_get_chan_by_scid(conn, scid);
4095 	if (!chan)
4096 		return 0;
4097 
4098 	switch (result) {
4099 	case L2CAP_CONF_SUCCESS:
4100 		l2cap_conf_rfc_get(chan, rsp->data, len);
4101 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4102 		break;
4103 
4104 	case L2CAP_CONF_PENDING:
4105 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4106 
4107 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4108 			char buf[64];
4109 
4110 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4111 						   buf, &result);
4112 			if (len < 0) {
4113 				l2cap_send_disconn_req(chan, ECONNRESET);
4114 				goto done;
4115 			}
4116 
4117 			if (!chan->hs_hcon) {
4118 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4119 							0);
4120 			} else {
4121 				if (l2cap_check_efs(chan)) {
4122 					amp_create_logical_link(chan);
4123 					chan->ident = cmd->ident;
4124 				}
4125 			}
4126 		}
4127 		goto done;
4128 
4129 	case L2CAP_CONF_UNACCEPT:
4130 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4131 			char req[64];
4132 
4133 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4134 				l2cap_send_disconn_req(chan, ECONNRESET);
4135 				goto done;
4136 			}
4137 
4138 			/* throw out any old stored conf requests */
4139 			result = L2CAP_CONF_SUCCESS;
4140 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4141 						   req, &result);
4142 			if (len < 0) {
4143 				l2cap_send_disconn_req(chan, ECONNRESET);
4144 				goto done;
4145 			}
4146 
4147 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4148 				       L2CAP_CONF_REQ, len, req);
4149 			chan->num_conf_req++;
4150 			if (result != L2CAP_CONF_SUCCESS)
4151 				goto done;
4152 			break;
4153 		}
4154 
4155 	default:
4156 		l2cap_chan_set_err(chan, ECONNRESET);
4157 
4158 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4159 		l2cap_send_disconn_req(chan, ECONNRESET);
4160 		goto done;
4161 	}
4162 
4163 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4164 		goto done;
4165 
4166 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4167 
4168 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4169 		set_default_fcs(chan);
4170 
4171 		if (chan->mode == L2CAP_MODE_ERTM ||
4172 		    chan->mode == L2CAP_MODE_STREAMING)
4173 			err = l2cap_ertm_init(chan);
4174 
4175 		if (err < 0)
4176 			l2cap_send_disconn_req(chan, -err);
4177 		else
4178 			l2cap_chan_ready(chan);
4179 	}
4180 
4181 done:
4182 	l2cap_chan_unlock(chan);
4183 	return err;
4184 }
4185 
4186 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4187 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4188 				       u8 *data)
4189 {
4190 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4191 	struct l2cap_disconn_rsp rsp;
4192 	u16 dcid, scid;
4193 	struct l2cap_chan *chan;
4194 	struct sock *sk;
4195 
4196 	if (cmd_len != sizeof(*req))
4197 		return -EPROTO;
4198 
4199 	scid = __le16_to_cpu(req->scid);
4200 	dcid = __le16_to_cpu(req->dcid);
4201 
4202 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4203 
4204 	mutex_lock(&conn->chan_lock);
4205 
4206 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4207 	if (!chan) {
4208 		mutex_unlock(&conn->chan_lock);
4209 		return 0;
4210 	}
4211 
4212 	l2cap_chan_lock(chan);
4213 
4214 	sk = chan->sk;
4215 
4216 	rsp.dcid = cpu_to_le16(chan->scid);
4217 	rsp.scid = cpu_to_le16(chan->dcid);
4218 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4219 
4220 	lock_sock(sk);
4221 	sk->sk_shutdown = SHUTDOWN_MASK;
4222 	release_sock(sk);
4223 
4224 	l2cap_chan_hold(chan);
4225 	l2cap_chan_del(chan, ECONNRESET);
4226 
4227 	l2cap_chan_unlock(chan);
4228 
4229 	chan->ops->close(chan);
4230 	l2cap_chan_put(chan);
4231 
4232 	mutex_unlock(&conn->chan_lock);
4233 
4234 	return 0;
4235 }
4236 
4237 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4238 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4239 				       u8 *data)
4240 {
4241 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4242 	u16 dcid, scid;
4243 	struct l2cap_chan *chan;
4244 
4245 	if (cmd_len != sizeof(*rsp))
4246 		return -EPROTO;
4247 
4248 	scid = __le16_to_cpu(rsp->scid);
4249 	dcid = __le16_to_cpu(rsp->dcid);
4250 
4251 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4252 
4253 	mutex_lock(&conn->chan_lock);
4254 
4255 	chan = __l2cap_get_chan_by_scid(conn, scid);
4256 	if (!chan) {
4257 		mutex_unlock(&conn->chan_lock);
4258 		return 0;
4259 	}
4260 
4261 	l2cap_chan_lock(chan);
4262 
4263 	l2cap_chan_hold(chan);
4264 	l2cap_chan_del(chan, 0);
4265 
4266 	l2cap_chan_unlock(chan);
4267 
4268 	chan->ops->close(chan);
4269 	l2cap_chan_put(chan);
4270 
4271 	mutex_unlock(&conn->chan_lock);
4272 
4273 	return 0;
4274 }
4275 
4276 static inline int l2cap_information_req(struct l2cap_conn *conn,
4277 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4278 					u8 *data)
4279 {
4280 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4281 	u16 type;
4282 
4283 	if (cmd_len != sizeof(*req))
4284 		return -EPROTO;
4285 
4286 	type = __le16_to_cpu(req->type);
4287 
4288 	BT_DBG("type 0x%4.4x", type);
4289 
4290 	if (type == L2CAP_IT_FEAT_MASK) {
4291 		u8 buf[8];
4292 		u32 feat_mask = l2cap_feat_mask;
4293 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4294 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4295 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4296 		if (!disable_ertm)
4297 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4298 				| L2CAP_FEAT_FCS;
4299 		if (enable_hs)
4300 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4301 				| L2CAP_FEAT_EXT_WINDOW;
4302 
4303 		put_unaligned_le32(feat_mask, rsp->data);
4304 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4305 			       buf);
4306 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4307 		u8 buf[12];
4308 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4309 
4310 		if (enable_hs)
4311 			l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4312 		else
4313 			l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4314 
4315 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4316 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4317 		memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4318 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4319 			       buf);
4320 	} else {
4321 		struct l2cap_info_rsp rsp;
4322 		rsp.type   = cpu_to_le16(type);
4323 		rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4324 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4325 			       &rsp);
4326 	}
4327 
4328 	return 0;
4329 }
4330 
4331 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4332 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4333 					u8 *data)
4334 {
4335 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4336 	u16 type, result;
4337 
4338 	if (cmd_len < sizeof(*rsp))
4339 		return -EPROTO;
4340 
4341 	type   = __le16_to_cpu(rsp->type);
4342 	result = __le16_to_cpu(rsp->result);
4343 
4344 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4345 
4346 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4347 	if (cmd->ident != conn->info_ident ||
4348 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4349 		return 0;
4350 
4351 	cancel_delayed_work(&conn->info_timer);
4352 
4353 	if (result != L2CAP_IR_SUCCESS) {
4354 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4355 		conn->info_ident = 0;
4356 
4357 		l2cap_conn_start(conn);
4358 
4359 		return 0;
4360 	}
4361 
4362 	switch (type) {
4363 	case L2CAP_IT_FEAT_MASK:
4364 		conn->feat_mask = get_unaligned_le32(rsp->data);
4365 
4366 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4367 			struct l2cap_info_req req;
4368 			req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4369 
4370 			conn->info_ident = l2cap_get_ident(conn);
4371 
4372 			l2cap_send_cmd(conn, conn->info_ident,
4373 				       L2CAP_INFO_REQ, sizeof(req), &req);
4374 		} else {
4375 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4376 			conn->info_ident = 0;
4377 
4378 			l2cap_conn_start(conn);
4379 		}
4380 		break;
4381 
4382 	case L2CAP_IT_FIXED_CHAN:
4383 		conn->fixed_chan_mask = rsp->data[0];
4384 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4385 		conn->info_ident = 0;
4386 
4387 		l2cap_conn_start(conn);
4388 		break;
4389 	}
4390 
4391 	return 0;
4392 }
4393 
4394 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4395 				    struct l2cap_cmd_hdr *cmd,
4396 				    u16 cmd_len, void *data)
4397 {
4398 	struct l2cap_create_chan_req *req = data;
4399 	struct l2cap_create_chan_rsp rsp;
4400 	struct l2cap_chan *chan;
4401 	struct hci_dev *hdev;
4402 	u16 psm, scid;
4403 
4404 	if (cmd_len != sizeof(*req))
4405 		return -EPROTO;
4406 
4407 	if (!enable_hs)
4408 		return -EINVAL;
4409 
4410 	psm = le16_to_cpu(req->psm);
4411 	scid = le16_to_cpu(req->scid);
4412 
4413 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4414 
4415 	/* For controller id 0 make BR/EDR connection */
4416 	if (req->amp_id == HCI_BREDR_ID) {
4417 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4418 			      req->amp_id);
4419 		return 0;
4420 	}
4421 
4422 	/* Validate AMP controller id */
4423 	hdev = hci_dev_get(req->amp_id);
4424 	if (!hdev)
4425 		goto error;
4426 
4427 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4428 		hci_dev_put(hdev);
4429 		goto error;
4430 	}
4431 
4432 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4433 			     req->amp_id);
4434 	if (chan) {
4435 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4436 		struct hci_conn *hs_hcon;
4437 
4438 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4439 		if (!hs_hcon) {
4440 			hci_dev_put(hdev);
4441 			return -EFAULT;
4442 		}
4443 
4444 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4445 
4446 		mgr->bredr_chan = chan;
4447 		chan->hs_hcon = hs_hcon;
4448 		chan->fcs = L2CAP_FCS_NONE;
4449 		conn->mtu = hdev->block_mtu;
4450 	}
4451 
4452 	hci_dev_put(hdev);
4453 
4454 	return 0;
4455 
4456 error:
4457 	rsp.dcid = 0;
4458 	rsp.scid = cpu_to_le16(scid);
4459 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4460 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4461 
4462 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4463 		       sizeof(rsp), &rsp);
4464 
4465 	return -EFAULT;
4466 }
4467 
4468 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4469 {
4470 	struct l2cap_move_chan_req req;
4471 	u8 ident;
4472 
4473 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4474 
4475 	ident = l2cap_get_ident(chan->conn);
4476 	chan->ident = ident;
4477 
4478 	req.icid = cpu_to_le16(chan->scid);
4479 	req.dest_amp_id = dest_amp_id;
4480 
4481 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4482 		       &req);
4483 
4484 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4485 }
4486 
4487 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4488 {
4489 	struct l2cap_move_chan_rsp rsp;
4490 
4491 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4492 
4493 	rsp.icid = cpu_to_le16(chan->dcid);
4494 	rsp.result = cpu_to_le16(result);
4495 
4496 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4497 		       sizeof(rsp), &rsp);
4498 }
4499 
4500 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4501 {
4502 	struct l2cap_move_chan_cfm cfm;
4503 
4504 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4505 
4506 	chan->ident = l2cap_get_ident(chan->conn);
4507 
4508 	cfm.icid = cpu_to_le16(chan->scid);
4509 	cfm.result = cpu_to_le16(result);
4510 
4511 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4512 		       sizeof(cfm), &cfm);
4513 
4514 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4515 }
4516 
4517 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4518 {
4519 	struct l2cap_move_chan_cfm cfm;
4520 
4521 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4522 
4523 	cfm.icid = cpu_to_le16(icid);
4524 	cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4525 
4526 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4527 		       sizeof(cfm), &cfm);
4528 }
4529 
4530 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4531 					 u16 icid)
4532 {
4533 	struct l2cap_move_chan_cfm_rsp rsp;
4534 
4535 	BT_DBG("icid 0x%4.4x", icid);
4536 
4537 	rsp.icid = cpu_to_le16(icid);
4538 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4539 }
4540 
4541 static void __release_logical_link(struct l2cap_chan *chan)
4542 {
4543 	chan->hs_hchan = NULL;
4544 	chan->hs_hcon = NULL;
4545 
4546 	/* Placeholder - release the logical link */
4547 }
4548 
4549 static void l2cap_logical_fail(struct l2cap_chan *chan)
4550 {
4551 	/* Logical link setup failed */
4552 	if (chan->state != BT_CONNECTED) {
4553 		/* Create channel failure, disconnect */
4554 		l2cap_send_disconn_req(chan, ECONNRESET);
4555 		return;
4556 	}
4557 
4558 	switch (chan->move_role) {
4559 	case L2CAP_MOVE_ROLE_RESPONDER:
4560 		l2cap_move_done(chan);
4561 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4562 		break;
4563 	case L2CAP_MOVE_ROLE_INITIATOR:
4564 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4565 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4566 			/* Remote has only sent pending or
4567 			 * success responses, clean up
4568 			 */
4569 			l2cap_move_done(chan);
4570 		}
4571 
4572 		/* Other amp move states imply that the move
4573 		 * has already aborted
4574 		 */
4575 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4576 		break;
4577 	}
4578 }
4579 
4580 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4581 					struct hci_chan *hchan)
4582 {
4583 	struct l2cap_conf_rsp rsp;
4584 
4585 	chan->hs_hchan = hchan;
4586 	chan->hs_hcon->l2cap_data = chan->conn;
4587 
4588 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4589 
4590 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4591 		int err;
4592 
4593 		set_default_fcs(chan);
4594 
4595 		err = l2cap_ertm_init(chan);
4596 		if (err < 0)
4597 			l2cap_send_disconn_req(chan, -err);
4598 		else
4599 			l2cap_chan_ready(chan);
4600 	}
4601 }
4602 
4603 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4604 				      struct hci_chan *hchan)
4605 {
4606 	chan->hs_hcon = hchan->conn;
4607 	chan->hs_hcon->l2cap_data = chan->conn;
4608 
4609 	BT_DBG("move_state %d", chan->move_state);
4610 
4611 	switch (chan->move_state) {
4612 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4613 		/* Move confirm will be sent after a success
4614 		 * response is received
4615 		 */
4616 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4617 		break;
4618 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4619 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4620 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4621 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4622 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4623 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4624 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4625 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4626 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4627 		}
4628 		break;
4629 	default:
4630 		/* Move was not in expected state, free the channel */
4631 		__release_logical_link(chan);
4632 
4633 		chan->move_state = L2CAP_MOVE_STABLE;
4634 	}
4635 }
4636 
4637 /* Call with chan locked */
4638 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4639 		       u8 status)
4640 {
4641 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4642 
4643 	if (status) {
4644 		l2cap_logical_fail(chan);
4645 		__release_logical_link(chan);
4646 		return;
4647 	}
4648 
4649 	if (chan->state != BT_CONNECTED) {
4650 		/* Ignore logical link if channel is on BR/EDR */
4651 		if (chan->local_amp_id)
4652 			l2cap_logical_finish_create(chan, hchan);
4653 	} else {
4654 		l2cap_logical_finish_move(chan, hchan);
4655 	}
4656 }
4657 
4658 void l2cap_move_start(struct l2cap_chan *chan)
4659 {
4660 	BT_DBG("chan %p", chan);
4661 
4662 	if (chan->local_amp_id == HCI_BREDR_ID) {
4663 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4664 			return;
4665 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4666 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4667 		/* Placeholder - start physical link setup */
4668 	} else {
4669 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4670 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4671 		chan->move_id = 0;
4672 		l2cap_move_setup(chan);
4673 		l2cap_send_move_chan_req(chan, 0);
4674 	}
4675 }
4676 
4677 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4678 			    u8 local_amp_id, u8 remote_amp_id)
4679 {
4680 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4681 	       local_amp_id, remote_amp_id);
4682 
4683 	chan->fcs = L2CAP_FCS_NONE;
4684 
4685 	/* Outgoing channel on AMP */
4686 	if (chan->state == BT_CONNECT) {
4687 		if (result == L2CAP_CR_SUCCESS) {
4688 			chan->local_amp_id = local_amp_id;
4689 			l2cap_send_create_chan_req(chan, remote_amp_id);
4690 		} else {
4691 			/* Revert to BR/EDR connect */
4692 			l2cap_send_conn_req(chan);
4693 		}
4694 
4695 		return;
4696 	}
4697 
4698 	/* Incoming channel on AMP */
4699 	if (__l2cap_no_conn_pending(chan)) {
4700 		struct l2cap_conn_rsp rsp;
4701 		char buf[128];
4702 		rsp.scid = cpu_to_le16(chan->dcid);
4703 		rsp.dcid = cpu_to_le16(chan->scid);
4704 
4705 		if (result == L2CAP_CR_SUCCESS) {
4706 			/* Send successful response */
4707 			rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4708 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4709 		} else {
4710 			/* Send negative response */
4711 			rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4712 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4713 		}
4714 
4715 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4716 			       sizeof(rsp), &rsp);
4717 
4718 		if (result == L2CAP_CR_SUCCESS) {
4719 			__l2cap_state_change(chan, BT_CONFIG);
4720 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4721 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4722 				       L2CAP_CONF_REQ,
4723 				       l2cap_build_conf_req(chan, buf), buf);
4724 			chan->num_conf_req++;
4725 		}
4726 	}
4727 }
4728 
4729 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4730 				   u8 remote_amp_id)
4731 {
4732 	l2cap_move_setup(chan);
4733 	chan->move_id = local_amp_id;
4734 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4735 
4736 	l2cap_send_move_chan_req(chan, remote_amp_id);
4737 }
4738 
4739 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4740 {
4741 	struct hci_chan *hchan = NULL;
4742 
4743 	/* Placeholder - get hci_chan for logical link */
4744 
4745 	if (hchan) {
4746 		if (hchan->state == BT_CONNECTED) {
4747 			/* Logical link is ready to go */
4748 			chan->hs_hcon = hchan->conn;
4749 			chan->hs_hcon->l2cap_data = chan->conn;
4750 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4751 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4752 
4753 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4754 		} else {
4755 			/* Wait for logical link to be ready */
4756 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4757 		}
4758 	} else {
4759 		/* Logical link not available */
4760 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4761 	}
4762 }
4763 
4764 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4765 {
4766 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4767 		u8 rsp_result;
4768 		if (result == -EINVAL)
4769 			rsp_result = L2CAP_MR_BAD_ID;
4770 		else
4771 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4772 
4773 		l2cap_send_move_chan_rsp(chan, rsp_result);
4774 	}
4775 
4776 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4777 	chan->move_state = L2CAP_MOVE_STABLE;
4778 
4779 	/* Restart data transmission */
4780 	l2cap_ertm_send(chan);
4781 }
4782 
4783 /* Invoke with locked chan */
4784 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4785 {
4786 	u8 local_amp_id = chan->local_amp_id;
4787 	u8 remote_amp_id = chan->remote_amp_id;
4788 
4789 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4790 	       chan, result, local_amp_id, remote_amp_id);
4791 
4792 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4793 		l2cap_chan_unlock(chan);
4794 		return;
4795 	}
4796 
4797 	if (chan->state != BT_CONNECTED) {
4798 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4799 	} else if (result != L2CAP_MR_SUCCESS) {
4800 		l2cap_do_move_cancel(chan, result);
4801 	} else {
4802 		switch (chan->move_role) {
4803 		case L2CAP_MOVE_ROLE_INITIATOR:
4804 			l2cap_do_move_initiate(chan, local_amp_id,
4805 					       remote_amp_id);
4806 			break;
4807 		case L2CAP_MOVE_ROLE_RESPONDER:
4808 			l2cap_do_move_respond(chan, result);
4809 			break;
4810 		default:
4811 			l2cap_do_move_cancel(chan, result);
4812 			break;
4813 		}
4814 	}
4815 }
4816 
4817 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4818 					 struct l2cap_cmd_hdr *cmd,
4819 					 u16 cmd_len, void *data)
4820 {
4821 	struct l2cap_move_chan_req *req = data;
4822 	struct l2cap_move_chan_rsp rsp;
4823 	struct l2cap_chan *chan;
4824 	u16 icid = 0;
4825 	u16 result = L2CAP_MR_NOT_ALLOWED;
4826 
4827 	if (cmd_len != sizeof(*req))
4828 		return -EPROTO;
4829 
4830 	icid = le16_to_cpu(req->icid);
4831 
4832 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4833 
4834 	if (!enable_hs)
4835 		return -EINVAL;
4836 
4837 	chan = l2cap_get_chan_by_dcid(conn, icid);
4838 	if (!chan) {
4839 		rsp.icid = cpu_to_le16(icid);
4840 		rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4841 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4842 			       sizeof(rsp), &rsp);
4843 		return 0;
4844 	}
4845 
4846 	chan->ident = cmd->ident;
4847 
4848 	if (chan->scid < L2CAP_CID_DYN_START ||
4849 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4850 	    (chan->mode != L2CAP_MODE_ERTM &&
4851 	     chan->mode != L2CAP_MODE_STREAMING)) {
4852 		result = L2CAP_MR_NOT_ALLOWED;
4853 		goto send_move_response;
4854 	}
4855 
4856 	if (chan->local_amp_id == req->dest_amp_id) {
4857 		result = L2CAP_MR_SAME_ID;
4858 		goto send_move_response;
4859 	}
4860 
4861 	if (req->dest_amp_id) {
4862 		struct hci_dev *hdev;
4863 		hdev = hci_dev_get(req->dest_amp_id);
4864 		if (!hdev || hdev->dev_type != HCI_AMP ||
4865 		    !test_bit(HCI_UP, &hdev->flags)) {
4866 			if (hdev)
4867 				hci_dev_put(hdev);
4868 
4869 			result = L2CAP_MR_BAD_ID;
4870 			goto send_move_response;
4871 		}
4872 		hci_dev_put(hdev);
4873 	}
4874 
4875 	/* Detect a move collision.  Only send a collision response
4876 	 * if this side has "lost", otherwise proceed with the move.
4877 	 * The winner has the larger bd_addr.
4878 	 */
4879 	if ((__chan_is_moving(chan) ||
4880 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4881 	    bacmp(conn->src, conn->dst) > 0) {
4882 		result = L2CAP_MR_COLLISION;
4883 		goto send_move_response;
4884 	}
4885 
4886 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4887 	l2cap_move_setup(chan);
4888 	chan->move_id = req->dest_amp_id;
4889 	icid = chan->dcid;
4890 
4891 	if (!req->dest_amp_id) {
4892 		/* Moving to BR/EDR */
4893 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4894 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4895 			result = L2CAP_MR_PEND;
4896 		} else {
4897 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4898 			result = L2CAP_MR_SUCCESS;
4899 		}
4900 	} else {
4901 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4902 		/* Placeholder - uncomment when amp functions are available */
4903 		/*amp_accept_physical(chan, req->dest_amp_id);*/
4904 		result = L2CAP_MR_PEND;
4905 	}
4906 
4907 send_move_response:
4908 	l2cap_send_move_chan_rsp(chan, result);
4909 
4910 	l2cap_chan_unlock(chan);
4911 
4912 	return 0;
4913 }
4914 
4915 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4916 {
4917 	struct l2cap_chan *chan;
4918 	struct hci_chan *hchan = NULL;
4919 
4920 	chan = l2cap_get_chan_by_scid(conn, icid);
4921 	if (!chan) {
4922 		l2cap_send_move_chan_cfm_icid(conn, icid);
4923 		return;
4924 	}
4925 
4926 	__clear_chan_timer(chan);
4927 	if (result == L2CAP_MR_PEND)
4928 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4929 
4930 	switch (chan->move_state) {
4931 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4932 		/* Move confirm will be sent when logical link
4933 		 * is complete.
4934 		 */
4935 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4936 		break;
4937 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4938 		if (result == L2CAP_MR_PEND) {
4939 			break;
4940 		} else if (test_bit(CONN_LOCAL_BUSY,
4941 				    &chan->conn_state)) {
4942 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4943 		} else {
4944 			/* Logical link is up or moving to BR/EDR,
4945 			 * proceed with move
4946 			 */
4947 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4948 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4949 		}
4950 		break;
4951 	case L2CAP_MOVE_WAIT_RSP:
4952 		/* Moving to AMP */
4953 		if (result == L2CAP_MR_SUCCESS) {
4954 			/* Remote is ready, send confirm immediately
4955 			 * after logical link is ready
4956 			 */
4957 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4958 		} else {
4959 			/* Both logical link and move success
4960 			 * are required to confirm
4961 			 */
4962 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4963 		}
4964 
4965 		/* Placeholder - get hci_chan for logical link */
4966 		if (!hchan) {
4967 			/* Logical link not available */
4968 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4969 			break;
4970 		}
4971 
4972 		/* If the logical link is not yet connected, do not
4973 		 * send confirmation.
4974 		 */
4975 		if (hchan->state != BT_CONNECTED)
4976 			break;
4977 
4978 		/* Logical link is already ready to go */
4979 
4980 		chan->hs_hcon = hchan->conn;
4981 		chan->hs_hcon->l2cap_data = chan->conn;
4982 
4983 		if (result == L2CAP_MR_SUCCESS) {
4984 			/* Can confirm now */
4985 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4986 		} else {
4987 			/* Now only need move success
4988 			 * to confirm
4989 			 */
4990 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4991 		}
4992 
4993 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4994 		break;
4995 	default:
4996 		/* Any other amp move state means the move failed. */
4997 		chan->move_id = chan->local_amp_id;
4998 		l2cap_move_done(chan);
4999 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5000 	}
5001 
5002 	l2cap_chan_unlock(chan);
5003 }
5004 
5005 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5006 			    u16 result)
5007 {
5008 	struct l2cap_chan *chan;
5009 
5010 	chan = l2cap_get_chan_by_ident(conn, ident);
5011 	if (!chan) {
5012 		/* Could not locate channel, icid is best guess */
5013 		l2cap_send_move_chan_cfm_icid(conn, icid);
5014 		return;
5015 	}
5016 
5017 	__clear_chan_timer(chan);
5018 
5019 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5020 		if (result == L2CAP_MR_COLLISION) {
5021 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5022 		} else {
5023 			/* Cleanup - cancel move */
5024 			chan->move_id = chan->local_amp_id;
5025 			l2cap_move_done(chan);
5026 		}
5027 	}
5028 
5029 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5030 
5031 	l2cap_chan_unlock(chan);
5032 }
5033 
5034 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5035 				  struct l2cap_cmd_hdr *cmd,
5036 				  u16 cmd_len, void *data)
5037 {
5038 	struct l2cap_move_chan_rsp *rsp = data;
5039 	u16 icid, result;
5040 
5041 	if (cmd_len != sizeof(*rsp))
5042 		return -EPROTO;
5043 
5044 	icid = le16_to_cpu(rsp->icid);
5045 	result = le16_to_cpu(rsp->result);
5046 
5047 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5048 
5049 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5050 		l2cap_move_continue(conn, icid, result);
5051 	else
5052 		l2cap_move_fail(conn, cmd->ident, icid, result);
5053 
5054 	return 0;
5055 }
5056 
5057 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5058 				      struct l2cap_cmd_hdr *cmd,
5059 				      u16 cmd_len, void *data)
5060 {
5061 	struct l2cap_move_chan_cfm *cfm = data;
5062 	struct l2cap_chan *chan;
5063 	u16 icid, result;
5064 
5065 	if (cmd_len != sizeof(*cfm))
5066 		return -EPROTO;
5067 
5068 	icid = le16_to_cpu(cfm->icid);
5069 	result = le16_to_cpu(cfm->result);
5070 
5071 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5072 
5073 	chan = l2cap_get_chan_by_dcid(conn, icid);
5074 	if (!chan) {
5075 		/* Spec requires a response even if the icid was not found */
5076 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5077 		return 0;
5078 	}
5079 
5080 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5081 		if (result == L2CAP_MC_CONFIRMED) {
5082 			chan->local_amp_id = chan->move_id;
5083 			if (!chan->local_amp_id)
5084 				__release_logical_link(chan);
5085 		} else {
5086 			chan->move_id = chan->local_amp_id;
5087 		}
5088 
5089 		l2cap_move_done(chan);
5090 	}
5091 
5092 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5093 
5094 	l2cap_chan_unlock(chan);
5095 
5096 	return 0;
5097 }
5098 
5099 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5100 						 struct l2cap_cmd_hdr *cmd,
5101 						 u16 cmd_len, void *data)
5102 {
5103 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5104 	struct l2cap_chan *chan;
5105 	u16 icid;
5106 
5107 	if (cmd_len != sizeof(*rsp))
5108 		return -EPROTO;
5109 
5110 	icid = le16_to_cpu(rsp->icid);
5111 
5112 	BT_DBG("icid 0x%4.4x", icid);
5113 
5114 	chan = l2cap_get_chan_by_scid(conn, icid);
5115 	if (!chan)
5116 		return 0;
5117 
5118 	__clear_chan_timer(chan);
5119 
5120 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5121 		chan->local_amp_id = chan->move_id;
5122 
5123 		if (!chan->local_amp_id && chan->hs_hchan)
5124 			__release_logical_link(chan);
5125 
5126 		l2cap_move_done(chan);
5127 	}
5128 
5129 	l2cap_chan_unlock(chan);
5130 
5131 	return 0;
5132 }
5133 
5134 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5135 					 u16 to_multiplier)
5136 {
5137 	u16 max_latency;
5138 
5139 	if (min > max || min < 6 || max > 3200)
5140 		return -EINVAL;
5141 
5142 	if (to_multiplier < 10 || to_multiplier > 3200)
5143 		return -EINVAL;
5144 
5145 	if (max >= to_multiplier * 8)
5146 		return -EINVAL;
5147 
5148 	max_latency = (to_multiplier * 8 / max) - 1;
5149 	if (latency > 499 || latency > max_latency)
5150 		return -EINVAL;
5151 
5152 	return 0;
5153 }
5154 
5155 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5156 					      struct l2cap_cmd_hdr *cmd,
5157 					      u8 *data)
5158 {
5159 	struct hci_conn *hcon = conn->hcon;
5160 	struct l2cap_conn_param_update_req *req;
5161 	struct l2cap_conn_param_update_rsp rsp;
5162 	u16 min, max, latency, to_multiplier, cmd_len;
5163 	int err;
5164 
5165 	if (!(hcon->link_mode & HCI_LM_MASTER))
5166 		return -EINVAL;
5167 
5168 	cmd_len = __le16_to_cpu(cmd->len);
5169 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5170 		return -EPROTO;
5171 
5172 	req = (struct l2cap_conn_param_update_req *) data;
5173 	min		= __le16_to_cpu(req->min);
5174 	max		= __le16_to_cpu(req->max);
5175 	latency		= __le16_to_cpu(req->latency);
5176 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5177 
5178 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5179 	       min, max, latency, to_multiplier);
5180 
5181 	memset(&rsp, 0, sizeof(rsp));
5182 
5183 	err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5184 	if (err)
5185 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5186 	else
5187 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5188 
5189 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5190 		       sizeof(rsp), &rsp);
5191 
5192 	if (!err)
5193 		hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5194 
5195 	return 0;
5196 }
5197 
5198 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5199 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5200 				      u8 *data)
5201 {
5202 	int err = 0;
5203 
5204 	switch (cmd->code) {
5205 	case L2CAP_COMMAND_REJ:
5206 		l2cap_command_rej(conn, cmd, cmd_len, data);
5207 		break;
5208 
5209 	case L2CAP_CONN_REQ:
5210 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5211 		break;
5212 
5213 	case L2CAP_CONN_RSP:
5214 	case L2CAP_CREATE_CHAN_RSP:
5215 		err = l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5216 		break;
5217 
5218 	case L2CAP_CONF_REQ:
5219 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5220 		break;
5221 
5222 	case L2CAP_CONF_RSP:
5223 		err = l2cap_config_rsp(conn, cmd, cmd_len, data);
5224 		break;
5225 
5226 	case L2CAP_DISCONN_REQ:
5227 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5228 		break;
5229 
5230 	case L2CAP_DISCONN_RSP:
5231 		err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5232 		break;
5233 
5234 	case L2CAP_ECHO_REQ:
5235 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5236 		break;
5237 
5238 	case L2CAP_ECHO_RSP:
5239 		break;
5240 
5241 	case L2CAP_INFO_REQ:
5242 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5243 		break;
5244 
5245 	case L2CAP_INFO_RSP:
5246 		err = l2cap_information_rsp(conn, cmd, cmd_len, data);
5247 		break;
5248 
5249 	case L2CAP_CREATE_CHAN_REQ:
5250 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5251 		break;
5252 
5253 	case L2CAP_MOVE_CHAN_REQ:
5254 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5255 		break;
5256 
5257 	case L2CAP_MOVE_CHAN_RSP:
5258 		err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5259 		break;
5260 
5261 	case L2CAP_MOVE_CHAN_CFM:
5262 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5263 		break;
5264 
5265 	case L2CAP_MOVE_CHAN_CFM_RSP:
5266 		err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5267 		break;
5268 
5269 	default:
5270 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5271 		err = -EINVAL;
5272 		break;
5273 	}
5274 
5275 	return err;
5276 }
5277 
5278 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5279 				   struct l2cap_cmd_hdr *cmd, u8 *data)
5280 {
5281 	switch (cmd->code) {
5282 	case L2CAP_COMMAND_REJ:
5283 		return 0;
5284 
5285 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5286 		return l2cap_conn_param_update_req(conn, cmd, data);
5287 
5288 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5289 		return 0;
5290 
5291 	default:
5292 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5293 		return -EINVAL;
5294 	}
5295 }
5296 
5297 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5298 					struct sk_buff *skb)
5299 {
5300 	u8 *data = skb->data;
5301 	int len = skb->len;
5302 	struct l2cap_cmd_hdr cmd;
5303 	int err;
5304 
5305 	l2cap_raw_recv(conn, skb);
5306 
5307 	while (len >= L2CAP_CMD_HDR_SIZE) {
5308 		u16 cmd_len;
5309 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5310 		data += L2CAP_CMD_HDR_SIZE;
5311 		len  -= L2CAP_CMD_HDR_SIZE;
5312 
5313 		cmd_len = le16_to_cpu(cmd.len);
5314 
5315 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5316 		       cmd.ident);
5317 
5318 		if (cmd_len > len || !cmd.ident) {
5319 			BT_DBG("corrupted command");
5320 			break;
5321 		}
5322 
5323 		err = l2cap_le_sig_cmd(conn, &cmd, data);
5324 		if (err) {
5325 			struct l2cap_cmd_rej_unk rej;
5326 
5327 			BT_ERR("Wrong link type (%d)", err);
5328 
5329 			/* FIXME: Map err to a valid reason */
5330 			rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5331 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5332 				       sizeof(rej), &rej);
5333 		}
5334 
5335 		data += cmd_len;
5336 		len  -= cmd_len;
5337 	}
5338 
5339 	kfree_skb(skb);
5340 }
5341 
5342 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5343 				     struct sk_buff *skb)
5344 {
5345 	u8 *data = skb->data;
5346 	int len = skb->len;
5347 	struct l2cap_cmd_hdr cmd;
5348 	int err;
5349 
5350 	l2cap_raw_recv(conn, skb);
5351 
5352 	while (len >= L2CAP_CMD_HDR_SIZE) {
5353 		u16 cmd_len;
5354 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5355 		data += L2CAP_CMD_HDR_SIZE;
5356 		len  -= L2CAP_CMD_HDR_SIZE;
5357 
5358 		cmd_len = le16_to_cpu(cmd.len);
5359 
5360 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5361 		       cmd.ident);
5362 
5363 		if (cmd_len > len || !cmd.ident) {
5364 			BT_DBG("corrupted command");
5365 			break;
5366 		}
5367 
5368 		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5369 		if (err) {
5370 			struct l2cap_cmd_rej_unk rej;
5371 
5372 			BT_ERR("Wrong link type (%d)", err);
5373 
5374 			/* FIXME: Map err to a valid reason */
5375 			rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5376 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5377 				       sizeof(rej), &rej);
5378 		}
5379 
5380 		data += cmd_len;
5381 		len  -= cmd_len;
5382 	}
5383 
5384 	kfree_skb(skb);
5385 }
5386 
5387 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5388 {
5389 	u16 our_fcs, rcv_fcs;
5390 	int hdr_size;
5391 
5392 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5393 		hdr_size = L2CAP_EXT_HDR_SIZE;
5394 	else
5395 		hdr_size = L2CAP_ENH_HDR_SIZE;
5396 
5397 	if (chan->fcs == L2CAP_FCS_CRC16) {
5398 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5399 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5400 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5401 
5402 		if (our_fcs != rcv_fcs)
5403 			return -EBADMSG;
5404 	}
5405 	return 0;
5406 }
5407 
5408 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5409 {
5410 	struct l2cap_ctrl control;
5411 
5412 	BT_DBG("chan %p", chan);
5413 
5414 	memset(&control, 0, sizeof(control));
5415 	control.sframe = 1;
5416 	control.final = 1;
5417 	control.reqseq = chan->buffer_seq;
5418 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5419 
5420 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5421 		control.super = L2CAP_SUPER_RNR;
5422 		l2cap_send_sframe(chan, &control);
5423 	}
5424 
5425 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5426 	    chan->unacked_frames > 0)
5427 		__set_retrans_timer(chan);
5428 
5429 	/* Send pending iframes */
5430 	l2cap_ertm_send(chan);
5431 
5432 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5433 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5434 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5435 		 * send it now.
5436 		 */
5437 		control.super = L2CAP_SUPER_RR;
5438 		l2cap_send_sframe(chan, &control);
5439 	}
5440 }
5441 
5442 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5443 			    struct sk_buff **last_frag)
5444 {
5445 	/* skb->len reflects data in skb as well as all fragments
5446 	 * skb->data_len reflects only data in fragments
5447 	 */
5448 	if (!skb_has_frag_list(skb))
5449 		skb_shinfo(skb)->frag_list = new_frag;
5450 
5451 	new_frag->next = NULL;
5452 
5453 	(*last_frag)->next = new_frag;
5454 	*last_frag = new_frag;
5455 
5456 	skb->len += new_frag->len;
5457 	skb->data_len += new_frag->len;
5458 	skb->truesize += new_frag->truesize;
5459 }
5460 
5461 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5462 				struct l2cap_ctrl *control)
5463 {
5464 	int err = -EINVAL;
5465 
5466 	switch (control->sar) {
5467 	case L2CAP_SAR_UNSEGMENTED:
5468 		if (chan->sdu)
5469 			break;
5470 
5471 		err = chan->ops->recv(chan, skb);
5472 		break;
5473 
5474 	case L2CAP_SAR_START:
5475 		if (chan->sdu)
5476 			break;
5477 
5478 		chan->sdu_len = get_unaligned_le16(skb->data);
5479 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5480 
5481 		if (chan->sdu_len > chan->imtu) {
5482 			err = -EMSGSIZE;
5483 			break;
5484 		}
5485 
5486 		if (skb->len >= chan->sdu_len)
5487 			break;
5488 
5489 		chan->sdu = skb;
5490 		chan->sdu_last_frag = skb;
5491 
5492 		skb = NULL;
5493 		err = 0;
5494 		break;
5495 
5496 	case L2CAP_SAR_CONTINUE:
5497 		if (!chan->sdu)
5498 			break;
5499 
5500 		append_skb_frag(chan->sdu, skb,
5501 				&chan->sdu_last_frag);
5502 		skb = NULL;
5503 
5504 		if (chan->sdu->len >= chan->sdu_len)
5505 			break;
5506 
5507 		err = 0;
5508 		break;
5509 
5510 	case L2CAP_SAR_END:
5511 		if (!chan->sdu)
5512 			break;
5513 
5514 		append_skb_frag(chan->sdu, skb,
5515 				&chan->sdu_last_frag);
5516 		skb = NULL;
5517 
5518 		if (chan->sdu->len != chan->sdu_len)
5519 			break;
5520 
5521 		err = chan->ops->recv(chan, chan->sdu);
5522 
5523 		if (!err) {
5524 			/* Reassembly complete */
5525 			chan->sdu = NULL;
5526 			chan->sdu_last_frag = NULL;
5527 			chan->sdu_len = 0;
5528 		}
5529 		break;
5530 	}
5531 
5532 	if (err) {
5533 		kfree_skb(skb);
5534 		kfree_skb(chan->sdu);
5535 		chan->sdu = NULL;
5536 		chan->sdu_last_frag = NULL;
5537 		chan->sdu_len = 0;
5538 	}
5539 
5540 	return err;
5541 }
5542 
5543 static int l2cap_resegment(struct l2cap_chan *chan)
5544 {
5545 	/* Placeholder */
5546 	return 0;
5547 }
5548 
5549 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5550 {
5551 	u8 event;
5552 
5553 	if (chan->mode != L2CAP_MODE_ERTM)
5554 		return;
5555 
5556 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5557 	l2cap_tx(chan, NULL, NULL, event);
5558 }
5559 
5560 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5561 {
5562 	int err = 0;
5563 	/* Pass sequential frames to l2cap_reassemble_sdu()
5564 	 * until a gap is encountered.
5565 	 */
5566 
5567 	BT_DBG("chan %p", chan);
5568 
5569 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5570 		struct sk_buff *skb;
5571 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5572 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5573 
5574 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5575 
5576 		if (!skb)
5577 			break;
5578 
5579 		skb_unlink(skb, &chan->srej_q);
5580 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5581 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5582 		if (err)
5583 			break;
5584 	}
5585 
5586 	if (skb_queue_empty(&chan->srej_q)) {
5587 		chan->rx_state = L2CAP_RX_STATE_RECV;
5588 		l2cap_send_ack(chan);
5589 	}
5590 
5591 	return err;
5592 }
5593 
5594 static void l2cap_handle_srej(struct l2cap_chan *chan,
5595 			      struct l2cap_ctrl *control)
5596 {
5597 	struct sk_buff *skb;
5598 
5599 	BT_DBG("chan %p, control %p", chan, control);
5600 
5601 	if (control->reqseq == chan->next_tx_seq) {
5602 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5603 		l2cap_send_disconn_req(chan, ECONNRESET);
5604 		return;
5605 	}
5606 
5607 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5608 
5609 	if (skb == NULL) {
5610 		BT_DBG("Seq %d not available for retransmission",
5611 		       control->reqseq);
5612 		return;
5613 	}
5614 
5615 	if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5616 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5617 		l2cap_send_disconn_req(chan, ECONNRESET);
5618 		return;
5619 	}
5620 
5621 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5622 
5623 	if (control->poll) {
5624 		l2cap_pass_to_tx(chan, control);
5625 
5626 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5627 		l2cap_retransmit(chan, control);
5628 		l2cap_ertm_send(chan);
5629 
5630 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5631 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5632 			chan->srej_save_reqseq = control->reqseq;
5633 		}
5634 	} else {
5635 		l2cap_pass_to_tx_fbit(chan, control);
5636 
5637 		if (control->final) {
5638 			if (chan->srej_save_reqseq != control->reqseq ||
5639 			    !test_and_clear_bit(CONN_SREJ_ACT,
5640 						&chan->conn_state))
5641 				l2cap_retransmit(chan, control);
5642 		} else {
5643 			l2cap_retransmit(chan, control);
5644 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5645 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5646 				chan->srej_save_reqseq = control->reqseq;
5647 			}
5648 		}
5649 	}
5650 }
5651 
5652 static void l2cap_handle_rej(struct l2cap_chan *chan,
5653 			     struct l2cap_ctrl *control)
5654 {
5655 	struct sk_buff *skb;
5656 
5657 	BT_DBG("chan %p, control %p", chan, control);
5658 
5659 	if (control->reqseq == chan->next_tx_seq) {
5660 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5661 		l2cap_send_disconn_req(chan, ECONNRESET);
5662 		return;
5663 	}
5664 
5665 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5666 
5667 	if (chan->max_tx && skb &&
5668 	    bt_cb(skb)->control.retries >= chan->max_tx) {
5669 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5670 		l2cap_send_disconn_req(chan, ECONNRESET);
5671 		return;
5672 	}
5673 
5674 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5675 
5676 	l2cap_pass_to_tx(chan, control);
5677 
5678 	if (control->final) {
5679 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5680 			l2cap_retransmit_all(chan, control);
5681 	} else {
5682 		l2cap_retransmit_all(chan, control);
5683 		l2cap_ertm_send(chan);
5684 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5685 			set_bit(CONN_REJ_ACT, &chan->conn_state);
5686 	}
5687 }
5688 
5689 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5690 {
5691 	BT_DBG("chan %p, txseq %d", chan, txseq);
5692 
5693 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5694 	       chan->expected_tx_seq);
5695 
5696 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5697 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5698 		    chan->tx_win) {
5699 			/* See notes below regarding "double poll" and
5700 			 * invalid packets.
5701 			 */
5702 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5703 				BT_DBG("Invalid/Ignore - after SREJ");
5704 				return L2CAP_TXSEQ_INVALID_IGNORE;
5705 			} else {
5706 				BT_DBG("Invalid - in window after SREJ sent");
5707 				return L2CAP_TXSEQ_INVALID;
5708 			}
5709 		}
5710 
5711 		if (chan->srej_list.head == txseq) {
5712 			BT_DBG("Expected SREJ");
5713 			return L2CAP_TXSEQ_EXPECTED_SREJ;
5714 		}
5715 
5716 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5717 			BT_DBG("Duplicate SREJ - txseq already stored");
5718 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
5719 		}
5720 
5721 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5722 			BT_DBG("Unexpected SREJ - not requested");
5723 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5724 		}
5725 	}
5726 
5727 	if (chan->expected_tx_seq == txseq) {
5728 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5729 		    chan->tx_win) {
5730 			BT_DBG("Invalid - txseq outside tx window");
5731 			return L2CAP_TXSEQ_INVALID;
5732 		} else {
5733 			BT_DBG("Expected");
5734 			return L2CAP_TXSEQ_EXPECTED;
5735 		}
5736 	}
5737 
5738 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5739 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5740 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
5741 		return L2CAP_TXSEQ_DUPLICATE;
5742 	}
5743 
5744 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5745 		/* A source of invalid packets is a "double poll" condition,
5746 		 * where delays cause us to send multiple poll packets.  If
5747 		 * the remote stack receives and processes both polls,
5748 		 * sequence numbers can wrap around in such a way that a
5749 		 * resent frame has a sequence number that looks like new data
5750 		 * with a sequence gap.  This would trigger an erroneous SREJ
5751 		 * request.
5752 		 *
5753 		 * Fortunately, this is impossible with a tx window that's
5754 		 * less than half of the maximum sequence number, which allows
5755 		 * invalid frames to be safely ignored.
5756 		 *
5757 		 * With tx window sizes greater than half of the tx window
5758 		 * maximum, the frame is invalid and cannot be ignored.  This
5759 		 * causes a disconnect.
5760 		 */
5761 
5762 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5763 			BT_DBG("Invalid/Ignore - txseq outside tx window");
5764 			return L2CAP_TXSEQ_INVALID_IGNORE;
5765 		} else {
5766 			BT_DBG("Invalid - txseq outside tx window");
5767 			return L2CAP_TXSEQ_INVALID;
5768 		}
5769 	} else {
5770 		BT_DBG("Unexpected - txseq indicates missing frames");
5771 		return L2CAP_TXSEQ_UNEXPECTED;
5772 	}
5773 }
5774 
5775 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5776 			       struct l2cap_ctrl *control,
5777 			       struct sk_buff *skb, u8 event)
5778 {
5779 	int err = 0;
5780 	bool skb_in_use = 0;
5781 
5782 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5783 	       event);
5784 
5785 	switch (event) {
5786 	case L2CAP_EV_RECV_IFRAME:
5787 		switch (l2cap_classify_txseq(chan, control->txseq)) {
5788 		case L2CAP_TXSEQ_EXPECTED:
5789 			l2cap_pass_to_tx(chan, control);
5790 
5791 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5792 				BT_DBG("Busy, discarding expected seq %d",
5793 				       control->txseq);
5794 				break;
5795 			}
5796 
5797 			chan->expected_tx_seq = __next_seq(chan,
5798 							   control->txseq);
5799 
5800 			chan->buffer_seq = chan->expected_tx_seq;
5801 			skb_in_use = 1;
5802 
5803 			err = l2cap_reassemble_sdu(chan, skb, control);
5804 			if (err)
5805 				break;
5806 
5807 			if (control->final) {
5808 				if (!test_and_clear_bit(CONN_REJ_ACT,
5809 							&chan->conn_state)) {
5810 					control->final = 0;
5811 					l2cap_retransmit_all(chan, control);
5812 					l2cap_ertm_send(chan);
5813 				}
5814 			}
5815 
5816 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5817 				l2cap_send_ack(chan);
5818 			break;
5819 		case L2CAP_TXSEQ_UNEXPECTED:
5820 			l2cap_pass_to_tx(chan, control);
5821 
5822 			/* Can't issue SREJ frames in the local busy state.
5823 			 * Drop this frame, it will be seen as missing
5824 			 * when local busy is exited.
5825 			 */
5826 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5827 				BT_DBG("Busy, discarding unexpected seq %d",
5828 				       control->txseq);
5829 				break;
5830 			}
5831 
5832 			/* There was a gap in the sequence, so an SREJ
5833 			 * must be sent for each missing frame.  The
5834 			 * current frame is stored for later use.
5835 			 */
5836 			skb_queue_tail(&chan->srej_q, skb);
5837 			skb_in_use = 1;
5838 			BT_DBG("Queued %p (queue len %d)", skb,
5839 			       skb_queue_len(&chan->srej_q));
5840 
5841 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5842 			l2cap_seq_list_clear(&chan->srej_list);
5843 			l2cap_send_srej(chan, control->txseq);
5844 
5845 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5846 			break;
5847 		case L2CAP_TXSEQ_DUPLICATE:
5848 			l2cap_pass_to_tx(chan, control);
5849 			break;
5850 		case L2CAP_TXSEQ_INVALID_IGNORE:
5851 			break;
5852 		case L2CAP_TXSEQ_INVALID:
5853 		default:
5854 			l2cap_send_disconn_req(chan, ECONNRESET);
5855 			break;
5856 		}
5857 		break;
5858 	case L2CAP_EV_RECV_RR:
5859 		l2cap_pass_to_tx(chan, control);
5860 		if (control->final) {
5861 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5862 
5863 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5864 			    !__chan_is_moving(chan)) {
5865 				control->final = 0;
5866 				l2cap_retransmit_all(chan, control);
5867 			}
5868 
5869 			l2cap_ertm_send(chan);
5870 		} else if (control->poll) {
5871 			l2cap_send_i_or_rr_or_rnr(chan);
5872 		} else {
5873 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
5874 					       &chan->conn_state) &&
5875 			    chan->unacked_frames)
5876 				__set_retrans_timer(chan);
5877 
5878 			l2cap_ertm_send(chan);
5879 		}
5880 		break;
5881 	case L2CAP_EV_RECV_RNR:
5882 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5883 		l2cap_pass_to_tx(chan, control);
5884 		if (control && control->poll) {
5885 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
5886 			l2cap_send_rr_or_rnr(chan, 0);
5887 		}
5888 		__clear_retrans_timer(chan);
5889 		l2cap_seq_list_clear(&chan->retrans_list);
5890 		break;
5891 	case L2CAP_EV_RECV_REJ:
5892 		l2cap_handle_rej(chan, control);
5893 		break;
5894 	case L2CAP_EV_RECV_SREJ:
5895 		l2cap_handle_srej(chan, control);
5896 		break;
5897 	default:
5898 		break;
5899 	}
5900 
5901 	if (skb && !skb_in_use) {
5902 		BT_DBG("Freeing %p", skb);
5903 		kfree_skb(skb);
5904 	}
5905 
5906 	return err;
5907 }
5908 
5909 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5910 				    struct l2cap_ctrl *control,
5911 				    struct sk_buff *skb, u8 event)
5912 {
5913 	int err = 0;
5914 	u16 txseq = control->txseq;
5915 	bool skb_in_use = 0;
5916 
5917 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5918 	       event);
5919 
5920 	switch (event) {
5921 	case L2CAP_EV_RECV_IFRAME:
5922 		switch (l2cap_classify_txseq(chan, txseq)) {
5923 		case L2CAP_TXSEQ_EXPECTED:
5924 			/* Keep frame for reassembly later */
5925 			l2cap_pass_to_tx(chan, control);
5926 			skb_queue_tail(&chan->srej_q, skb);
5927 			skb_in_use = 1;
5928 			BT_DBG("Queued %p (queue len %d)", skb,
5929 			       skb_queue_len(&chan->srej_q));
5930 
5931 			chan->expected_tx_seq = __next_seq(chan, txseq);
5932 			break;
5933 		case L2CAP_TXSEQ_EXPECTED_SREJ:
5934 			l2cap_seq_list_pop(&chan->srej_list);
5935 
5936 			l2cap_pass_to_tx(chan, control);
5937 			skb_queue_tail(&chan->srej_q, skb);
5938 			skb_in_use = 1;
5939 			BT_DBG("Queued %p (queue len %d)", skb,
5940 			       skb_queue_len(&chan->srej_q));
5941 
5942 			err = l2cap_rx_queued_iframes(chan);
5943 			if (err)
5944 				break;
5945 
5946 			break;
5947 		case L2CAP_TXSEQ_UNEXPECTED:
5948 			/* Got a frame that can't be reassembled yet.
5949 			 * Save it for later, and send SREJs to cover
5950 			 * the missing frames.
5951 			 */
5952 			skb_queue_tail(&chan->srej_q, skb);
5953 			skb_in_use = 1;
5954 			BT_DBG("Queued %p (queue len %d)", skb,
5955 			       skb_queue_len(&chan->srej_q));
5956 
5957 			l2cap_pass_to_tx(chan, control);
5958 			l2cap_send_srej(chan, control->txseq);
5959 			break;
5960 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5961 			/* This frame was requested with an SREJ, but
5962 			 * some expected retransmitted frames are
5963 			 * missing.  Request retransmission of missing
5964 			 * SREJ'd frames.
5965 			 */
5966 			skb_queue_tail(&chan->srej_q, skb);
5967 			skb_in_use = 1;
5968 			BT_DBG("Queued %p (queue len %d)", skb,
5969 			       skb_queue_len(&chan->srej_q));
5970 
5971 			l2cap_pass_to_tx(chan, control);
5972 			l2cap_send_srej_list(chan, control->txseq);
5973 			break;
5974 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
5975 			/* We've already queued this frame.  Drop this copy. */
5976 			l2cap_pass_to_tx(chan, control);
5977 			break;
5978 		case L2CAP_TXSEQ_DUPLICATE:
5979 			/* Expecting a later sequence number, so this frame
5980 			 * was already received.  Ignore it completely.
5981 			 */
5982 			break;
5983 		case L2CAP_TXSEQ_INVALID_IGNORE:
5984 			break;
5985 		case L2CAP_TXSEQ_INVALID:
5986 		default:
5987 			l2cap_send_disconn_req(chan, ECONNRESET);
5988 			break;
5989 		}
5990 		break;
5991 	case L2CAP_EV_RECV_RR:
5992 		l2cap_pass_to_tx(chan, control);
5993 		if (control->final) {
5994 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5995 
5996 			if (!test_and_clear_bit(CONN_REJ_ACT,
5997 						&chan->conn_state)) {
5998 				control->final = 0;
5999 				l2cap_retransmit_all(chan, control);
6000 			}
6001 
6002 			l2cap_ertm_send(chan);
6003 		} else if (control->poll) {
6004 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6005 					       &chan->conn_state) &&
6006 			    chan->unacked_frames) {
6007 				__set_retrans_timer(chan);
6008 			}
6009 
6010 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6011 			l2cap_send_srej_tail(chan);
6012 		} else {
6013 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6014 					       &chan->conn_state) &&
6015 			    chan->unacked_frames)
6016 				__set_retrans_timer(chan);
6017 
6018 			l2cap_send_ack(chan);
6019 		}
6020 		break;
6021 	case L2CAP_EV_RECV_RNR:
6022 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6023 		l2cap_pass_to_tx(chan, control);
6024 		if (control->poll) {
6025 			l2cap_send_srej_tail(chan);
6026 		} else {
6027 			struct l2cap_ctrl rr_control;
6028 			memset(&rr_control, 0, sizeof(rr_control));
6029 			rr_control.sframe = 1;
6030 			rr_control.super = L2CAP_SUPER_RR;
6031 			rr_control.reqseq = chan->buffer_seq;
6032 			l2cap_send_sframe(chan, &rr_control);
6033 		}
6034 
6035 		break;
6036 	case L2CAP_EV_RECV_REJ:
6037 		l2cap_handle_rej(chan, control);
6038 		break;
6039 	case L2CAP_EV_RECV_SREJ:
6040 		l2cap_handle_srej(chan, control);
6041 		break;
6042 	}
6043 
6044 	if (skb && !skb_in_use) {
6045 		BT_DBG("Freeing %p", skb);
6046 		kfree_skb(skb);
6047 	}
6048 
6049 	return err;
6050 }
6051 
6052 static int l2cap_finish_move(struct l2cap_chan *chan)
6053 {
6054 	BT_DBG("chan %p", chan);
6055 
6056 	chan->rx_state = L2CAP_RX_STATE_RECV;
6057 
6058 	if (chan->hs_hcon)
6059 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6060 	else
6061 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6062 
6063 	return l2cap_resegment(chan);
6064 }
6065 
6066 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6067 				 struct l2cap_ctrl *control,
6068 				 struct sk_buff *skb, u8 event)
6069 {
6070 	int err;
6071 
6072 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6073 	       event);
6074 
6075 	if (!control->poll)
6076 		return -EPROTO;
6077 
6078 	l2cap_process_reqseq(chan, control->reqseq);
6079 
6080 	if (!skb_queue_empty(&chan->tx_q))
6081 		chan->tx_send_head = skb_peek(&chan->tx_q);
6082 	else
6083 		chan->tx_send_head = NULL;
6084 
6085 	/* Rewind next_tx_seq to the point expected
6086 	 * by the receiver.
6087 	 */
6088 	chan->next_tx_seq = control->reqseq;
6089 	chan->unacked_frames = 0;
6090 
6091 	err = l2cap_finish_move(chan);
6092 	if (err)
6093 		return err;
6094 
6095 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6096 	l2cap_send_i_or_rr_or_rnr(chan);
6097 
6098 	if (event == L2CAP_EV_RECV_IFRAME)
6099 		return -EPROTO;
6100 
6101 	return l2cap_rx_state_recv(chan, control, NULL, event);
6102 }
6103 
6104 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6105 				 struct l2cap_ctrl *control,
6106 				 struct sk_buff *skb, u8 event)
6107 {
6108 	int err;
6109 
6110 	if (!control->final)
6111 		return -EPROTO;
6112 
6113 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6114 
6115 	chan->rx_state = L2CAP_RX_STATE_RECV;
6116 	l2cap_process_reqseq(chan, control->reqseq);
6117 
6118 	if (!skb_queue_empty(&chan->tx_q))
6119 		chan->tx_send_head = skb_peek(&chan->tx_q);
6120 	else
6121 		chan->tx_send_head = NULL;
6122 
6123 	/* Rewind next_tx_seq to the point expected
6124 	 * by the receiver.
6125 	 */
6126 	chan->next_tx_seq = control->reqseq;
6127 	chan->unacked_frames = 0;
6128 
6129 	if (chan->hs_hcon)
6130 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6131 	else
6132 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6133 
6134 	err = l2cap_resegment(chan);
6135 
6136 	if (!err)
6137 		err = l2cap_rx_state_recv(chan, control, skb, event);
6138 
6139 	return err;
6140 }
6141 
6142 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6143 {
6144 	/* Make sure reqseq is for a packet that has been sent but not acked */
6145 	u16 unacked;
6146 
6147 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6148 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6149 }
6150 
6151 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6152 		    struct sk_buff *skb, u8 event)
6153 {
6154 	int err = 0;
6155 
6156 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6157 	       control, skb, event, chan->rx_state);
6158 
6159 	if (__valid_reqseq(chan, control->reqseq)) {
6160 		switch (chan->rx_state) {
6161 		case L2CAP_RX_STATE_RECV:
6162 			err = l2cap_rx_state_recv(chan, control, skb, event);
6163 			break;
6164 		case L2CAP_RX_STATE_SREJ_SENT:
6165 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6166 						       event);
6167 			break;
6168 		case L2CAP_RX_STATE_WAIT_P:
6169 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6170 			break;
6171 		case L2CAP_RX_STATE_WAIT_F:
6172 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6173 			break;
6174 		default:
6175 			/* shut it down */
6176 			break;
6177 		}
6178 	} else {
6179 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6180 		       control->reqseq, chan->next_tx_seq,
6181 		       chan->expected_ack_seq);
6182 		l2cap_send_disconn_req(chan, ECONNRESET);
6183 	}
6184 
6185 	return err;
6186 }
6187 
6188 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6189 			   struct sk_buff *skb)
6190 {
6191 	int err = 0;
6192 
6193 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6194 	       chan->rx_state);
6195 
6196 	if (l2cap_classify_txseq(chan, control->txseq) ==
6197 	    L2CAP_TXSEQ_EXPECTED) {
6198 		l2cap_pass_to_tx(chan, control);
6199 
6200 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6201 		       __next_seq(chan, chan->buffer_seq));
6202 
6203 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6204 
6205 		l2cap_reassemble_sdu(chan, skb, control);
6206 	} else {
6207 		if (chan->sdu) {
6208 			kfree_skb(chan->sdu);
6209 			chan->sdu = NULL;
6210 		}
6211 		chan->sdu_last_frag = NULL;
6212 		chan->sdu_len = 0;
6213 
6214 		if (skb) {
6215 			BT_DBG("Freeing %p", skb);
6216 			kfree_skb(skb);
6217 		}
6218 	}
6219 
6220 	chan->last_acked_seq = control->txseq;
6221 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6222 
6223 	return err;
6224 }
6225 
6226 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6227 {
6228 	struct l2cap_ctrl *control = &bt_cb(skb)->control;
6229 	u16 len;
6230 	u8 event;
6231 
6232 	__unpack_control(chan, skb);
6233 
6234 	len = skb->len;
6235 
6236 	/*
6237 	 * We can just drop the corrupted I-frame here.
6238 	 * Receiver will miss it and start proper recovery
6239 	 * procedures and ask for retransmission.
6240 	 */
6241 	if (l2cap_check_fcs(chan, skb))
6242 		goto drop;
6243 
6244 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6245 		len -= L2CAP_SDULEN_SIZE;
6246 
6247 	if (chan->fcs == L2CAP_FCS_CRC16)
6248 		len -= L2CAP_FCS_SIZE;
6249 
6250 	if (len > chan->mps) {
6251 		l2cap_send_disconn_req(chan, ECONNRESET);
6252 		goto drop;
6253 	}
6254 
6255 	if (!control->sframe) {
6256 		int err;
6257 
6258 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6259 		       control->sar, control->reqseq, control->final,
6260 		       control->txseq);
6261 
6262 		/* Validate F-bit - F=0 always valid, F=1 only
6263 		 * valid in TX WAIT_F
6264 		 */
6265 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6266 			goto drop;
6267 
6268 		if (chan->mode != L2CAP_MODE_STREAMING) {
6269 			event = L2CAP_EV_RECV_IFRAME;
6270 			err = l2cap_rx(chan, control, skb, event);
6271 		} else {
6272 			err = l2cap_stream_rx(chan, control, skb);
6273 		}
6274 
6275 		if (err)
6276 			l2cap_send_disconn_req(chan, ECONNRESET);
6277 	} else {
6278 		const u8 rx_func_to_event[4] = {
6279 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6280 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6281 		};
6282 
6283 		/* Only I-frames are expected in streaming mode */
6284 		if (chan->mode == L2CAP_MODE_STREAMING)
6285 			goto drop;
6286 
6287 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6288 		       control->reqseq, control->final, control->poll,
6289 		       control->super);
6290 
6291 		if (len != 0) {
6292 			BT_ERR("Trailing bytes: %d in sframe", len);
6293 			l2cap_send_disconn_req(chan, ECONNRESET);
6294 			goto drop;
6295 		}
6296 
6297 		/* Validate F and P bits */
6298 		if (control->final && (control->poll ||
6299 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6300 			goto drop;
6301 
6302 		event = rx_func_to_event[control->super];
6303 		if (l2cap_rx(chan, control, skb, event))
6304 			l2cap_send_disconn_req(chan, ECONNRESET);
6305 	}
6306 
6307 	return 0;
6308 
6309 drop:
6310 	kfree_skb(skb);
6311 	return 0;
6312 }
6313 
6314 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6315 			       struct sk_buff *skb)
6316 {
6317 	struct l2cap_chan *chan;
6318 
6319 	chan = l2cap_get_chan_by_scid(conn, cid);
6320 	if (!chan) {
6321 		if (cid == L2CAP_CID_A2MP) {
6322 			chan = a2mp_channel_create(conn, skb);
6323 			if (!chan) {
6324 				kfree_skb(skb);
6325 				return;
6326 			}
6327 
6328 			l2cap_chan_lock(chan);
6329 		} else {
6330 			BT_DBG("unknown cid 0x%4.4x", cid);
6331 			/* Drop packet and return */
6332 			kfree_skb(skb);
6333 			return;
6334 		}
6335 	}
6336 
6337 	BT_DBG("chan %p, len %d", chan, skb->len);
6338 
6339 	if (chan->state != BT_CONNECTED)
6340 		goto drop;
6341 
6342 	switch (chan->mode) {
6343 	case L2CAP_MODE_BASIC:
6344 		/* If socket recv buffers overflows we drop data here
6345 		 * which is *bad* because L2CAP has to be reliable.
6346 		 * But we don't have any other choice. L2CAP doesn't
6347 		 * provide flow control mechanism. */
6348 
6349 		if (chan->imtu < skb->len)
6350 			goto drop;
6351 
6352 		if (!chan->ops->recv(chan, skb))
6353 			goto done;
6354 		break;
6355 
6356 	case L2CAP_MODE_ERTM:
6357 	case L2CAP_MODE_STREAMING:
6358 		l2cap_data_rcv(chan, skb);
6359 		goto done;
6360 
6361 	default:
6362 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6363 		break;
6364 	}
6365 
6366 drop:
6367 	kfree_skb(skb);
6368 
6369 done:
6370 	l2cap_chan_unlock(chan);
6371 }
6372 
6373 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6374 				  struct sk_buff *skb)
6375 {
6376 	struct l2cap_chan *chan;
6377 
6378 	chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6379 	if (!chan)
6380 		goto drop;
6381 
6382 	BT_DBG("chan %p, len %d", chan, skb->len);
6383 
6384 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6385 		goto drop;
6386 
6387 	if (chan->imtu < skb->len)
6388 		goto drop;
6389 
6390 	if (!chan->ops->recv(chan, skb))
6391 		return;
6392 
6393 drop:
6394 	kfree_skb(skb);
6395 }
6396 
6397 static void l2cap_att_channel(struct l2cap_conn *conn,
6398 			      struct sk_buff *skb)
6399 {
6400 	struct l2cap_chan *chan;
6401 
6402 	chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6403 					 conn->src, conn->dst);
6404 	if (!chan)
6405 		goto drop;
6406 
6407 	BT_DBG("chan %p, len %d", chan, skb->len);
6408 
6409 	if (chan->imtu < skb->len)
6410 		goto drop;
6411 
6412 	if (!chan->ops->recv(chan, skb))
6413 		return;
6414 
6415 drop:
6416 	kfree_skb(skb);
6417 }
6418 
6419 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6420 {
6421 	struct l2cap_hdr *lh = (void *) skb->data;
6422 	u16 cid, len;
6423 	__le16 psm;
6424 
6425 	skb_pull(skb, L2CAP_HDR_SIZE);
6426 	cid = __le16_to_cpu(lh->cid);
6427 	len = __le16_to_cpu(lh->len);
6428 
6429 	if (len != skb->len) {
6430 		kfree_skb(skb);
6431 		return;
6432 	}
6433 
6434 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6435 
6436 	switch (cid) {
6437 	case L2CAP_CID_LE_SIGNALING:
6438 		l2cap_le_sig_channel(conn, skb);
6439 		break;
6440 	case L2CAP_CID_SIGNALING:
6441 		l2cap_sig_channel(conn, skb);
6442 		break;
6443 
6444 	case L2CAP_CID_CONN_LESS:
6445 		psm = get_unaligned((__le16 *) skb->data);
6446 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6447 		l2cap_conless_channel(conn, psm, skb);
6448 		break;
6449 
6450 	case L2CAP_CID_ATT:
6451 		l2cap_att_channel(conn, skb);
6452 		break;
6453 
6454 	case L2CAP_CID_SMP:
6455 		if (smp_sig_channel(conn, skb))
6456 			l2cap_conn_del(conn->hcon, EACCES);
6457 		break;
6458 
6459 	default:
6460 		l2cap_data_channel(conn, cid, skb);
6461 		break;
6462 	}
6463 }
6464 
6465 /* ---- L2CAP interface with lower layer (HCI) ---- */
6466 
6467 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6468 {
6469 	int exact = 0, lm1 = 0, lm2 = 0;
6470 	struct l2cap_chan *c;
6471 
6472 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6473 
6474 	/* Find listening sockets and check their link_mode */
6475 	read_lock(&chan_list_lock);
6476 	list_for_each_entry(c, &chan_list, global_l) {
6477 		struct sock *sk = c->sk;
6478 
6479 		if (c->state != BT_LISTEN)
6480 			continue;
6481 
6482 		if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6483 			lm1 |= HCI_LM_ACCEPT;
6484 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6485 				lm1 |= HCI_LM_MASTER;
6486 			exact++;
6487 		} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6488 			lm2 |= HCI_LM_ACCEPT;
6489 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6490 				lm2 |= HCI_LM_MASTER;
6491 		}
6492 	}
6493 	read_unlock(&chan_list_lock);
6494 
6495 	return exact ? lm1 : lm2;
6496 }
6497 
6498 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6499 {
6500 	struct l2cap_conn *conn;
6501 
6502 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6503 
6504 	if (!status) {
6505 		conn = l2cap_conn_add(hcon);
6506 		if (conn)
6507 			l2cap_conn_ready(conn);
6508 	} else {
6509 		l2cap_conn_del(hcon, bt_to_errno(status));
6510 	}
6511 }
6512 
6513 int l2cap_disconn_ind(struct hci_conn *hcon)
6514 {
6515 	struct l2cap_conn *conn = hcon->l2cap_data;
6516 
6517 	BT_DBG("hcon %p", hcon);
6518 
6519 	if (!conn)
6520 		return HCI_ERROR_REMOTE_USER_TERM;
6521 	return conn->disc_reason;
6522 }
6523 
6524 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6525 {
6526 	BT_DBG("hcon %p reason %d", hcon, reason);
6527 
6528 	l2cap_conn_del(hcon, bt_to_errno(reason));
6529 }
6530 
6531 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6532 {
6533 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6534 		return;
6535 
6536 	if (encrypt == 0x00) {
6537 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
6538 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6539 		} else if (chan->sec_level == BT_SECURITY_HIGH)
6540 			l2cap_chan_close(chan, ECONNREFUSED);
6541 	} else {
6542 		if (chan->sec_level == BT_SECURITY_MEDIUM)
6543 			__clear_chan_timer(chan);
6544 	}
6545 }
6546 
6547 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6548 {
6549 	struct l2cap_conn *conn = hcon->l2cap_data;
6550 	struct l2cap_chan *chan;
6551 
6552 	if (!conn)
6553 		return 0;
6554 
6555 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6556 
6557 	if (hcon->type == LE_LINK) {
6558 		if (!status && encrypt)
6559 			smp_distribute_keys(conn, 0);
6560 		cancel_delayed_work(&conn->security_timer);
6561 	}
6562 
6563 	mutex_lock(&conn->chan_lock);
6564 
6565 	list_for_each_entry(chan, &conn->chan_l, list) {
6566 		l2cap_chan_lock(chan);
6567 
6568 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6569 		       state_to_string(chan->state));
6570 
6571 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6572 			l2cap_chan_unlock(chan);
6573 			continue;
6574 		}
6575 
6576 		if (chan->scid == L2CAP_CID_ATT) {
6577 			if (!status && encrypt) {
6578 				chan->sec_level = hcon->sec_level;
6579 				l2cap_chan_ready(chan);
6580 			}
6581 
6582 			l2cap_chan_unlock(chan);
6583 			continue;
6584 		}
6585 
6586 		if (!__l2cap_no_conn_pending(chan)) {
6587 			l2cap_chan_unlock(chan);
6588 			continue;
6589 		}
6590 
6591 		if (!status && (chan->state == BT_CONNECTED ||
6592 				chan->state == BT_CONFIG)) {
6593 			struct sock *sk = chan->sk;
6594 
6595 			clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6596 			sk->sk_state_change(sk);
6597 
6598 			l2cap_check_encryption(chan, encrypt);
6599 			l2cap_chan_unlock(chan);
6600 			continue;
6601 		}
6602 
6603 		if (chan->state == BT_CONNECT) {
6604 			if (!status) {
6605 				l2cap_start_connection(chan);
6606 			} else {
6607 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6608 			}
6609 		} else if (chan->state == BT_CONNECT2) {
6610 			struct sock *sk = chan->sk;
6611 			struct l2cap_conn_rsp rsp;
6612 			__u16 res, stat;
6613 
6614 			lock_sock(sk);
6615 
6616 			if (!status) {
6617 				if (test_bit(BT_SK_DEFER_SETUP,
6618 					     &bt_sk(sk)->flags)) {
6619 					res = L2CAP_CR_PEND;
6620 					stat = L2CAP_CS_AUTHOR_PEND;
6621 					chan->ops->defer(chan);
6622 				} else {
6623 					__l2cap_state_change(chan, BT_CONFIG);
6624 					res = L2CAP_CR_SUCCESS;
6625 					stat = L2CAP_CS_NO_INFO;
6626 				}
6627 			} else {
6628 				__l2cap_state_change(chan, BT_DISCONN);
6629 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6630 				res = L2CAP_CR_SEC_BLOCK;
6631 				stat = L2CAP_CS_NO_INFO;
6632 			}
6633 
6634 			release_sock(sk);
6635 
6636 			rsp.scid   = cpu_to_le16(chan->dcid);
6637 			rsp.dcid   = cpu_to_le16(chan->scid);
6638 			rsp.result = cpu_to_le16(res);
6639 			rsp.status = cpu_to_le16(stat);
6640 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6641 				       sizeof(rsp), &rsp);
6642 
6643 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6644 			    res == L2CAP_CR_SUCCESS) {
6645 				char buf[128];
6646 				set_bit(CONF_REQ_SENT, &chan->conf_state);
6647 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
6648 					       L2CAP_CONF_REQ,
6649 					       l2cap_build_conf_req(chan, buf),
6650 					       buf);
6651 				chan->num_conf_req++;
6652 			}
6653 		}
6654 
6655 		l2cap_chan_unlock(chan);
6656 	}
6657 
6658 	mutex_unlock(&conn->chan_lock);
6659 
6660 	return 0;
6661 }
6662 
6663 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6664 {
6665 	struct l2cap_conn *conn = hcon->l2cap_data;
6666 	struct l2cap_hdr *hdr;
6667 	int len;
6668 
6669 	/* For AMP controller do not create l2cap conn */
6670 	if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6671 		goto drop;
6672 
6673 	if (!conn)
6674 		conn = l2cap_conn_add(hcon);
6675 
6676 	if (!conn)
6677 		goto drop;
6678 
6679 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6680 
6681 	switch (flags) {
6682 	case ACL_START:
6683 	case ACL_START_NO_FLUSH:
6684 	case ACL_COMPLETE:
6685 		if (conn->rx_len) {
6686 			BT_ERR("Unexpected start frame (len %d)", skb->len);
6687 			kfree_skb(conn->rx_skb);
6688 			conn->rx_skb = NULL;
6689 			conn->rx_len = 0;
6690 			l2cap_conn_unreliable(conn, ECOMM);
6691 		}
6692 
6693 		/* Start fragment always begin with Basic L2CAP header */
6694 		if (skb->len < L2CAP_HDR_SIZE) {
6695 			BT_ERR("Frame is too short (len %d)", skb->len);
6696 			l2cap_conn_unreliable(conn, ECOMM);
6697 			goto drop;
6698 		}
6699 
6700 		hdr = (struct l2cap_hdr *) skb->data;
6701 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6702 
6703 		if (len == skb->len) {
6704 			/* Complete frame received */
6705 			l2cap_recv_frame(conn, skb);
6706 			return 0;
6707 		}
6708 
6709 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6710 
6711 		if (skb->len > len) {
6712 			BT_ERR("Frame is too long (len %d, expected len %d)",
6713 			       skb->len, len);
6714 			l2cap_conn_unreliable(conn, ECOMM);
6715 			goto drop;
6716 		}
6717 
6718 		/* Allocate skb for the complete frame (with header) */
6719 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6720 		if (!conn->rx_skb)
6721 			goto drop;
6722 
6723 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6724 					  skb->len);
6725 		conn->rx_len = len - skb->len;
6726 		break;
6727 
6728 	case ACL_CONT:
6729 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6730 
6731 		if (!conn->rx_len) {
6732 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6733 			l2cap_conn_unreliable(conn, ECOMM);
6734 			goto drop;
6735 		}
6736 
6737 		if (skb->len > conn->rx_len) {
6738 			BT_ERR("Fragment is too long (len %d, expected %d)",
6739 			       skb->len, conn->rx_len);
6740 			kfree_skb(conn->rx_skb);
6741 			conn->rx_skb = NULL;
6742 			conn->rx_len = 0;
6743 			l2cap_conn_unreliable(conn, ECOMM);
6744 			goto drop;
6745 		}
6746 
6747 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6748 					  skb->len);
6749 		conn->rx_len -= skb->len;
6750 
6751 		if (!conn->rx_len) {
6752 			/* Complete frame received */
6753 			l2cap_recv_frame(conn, conn->rx_skb);
6754 			conn->rx_skb = NULL;
6755 		}
6756 		break;
6757 	}
6758 
6759 drop:
6760 	kfree_skb(skb);
6761 	return 0;
6762 }
6763 
6764 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6765 {
6766 	struct l2cap_chan *c;
6767 
6768 	read_lock(&chan_list_lock);
6769 
6770 	list_for_each_entry(c, &chan_list, global_l) {
6771 		struct sock *sk = c->sk;
6772 
6773 		seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6774 			   &bt_sk(sk)->src, &bt_sk(sk)->dst,
6775 			   c->state, __le16_to_cpu(c->psm),
6776 			   c->scid, c->dcid, c->imtu, c->omtu,
6777 			   c->sec_level, c->mode);
6778 	}
6779 
6780 	read_unlock(&chan_list_lock);
6781 
6782 	return 0;
6783 }
6784 
6785 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6786 {
6787 	return single_open(file, l2cap_debugfs_show, inode->i_private);
6788 }
6789 
6790 static const struct file_operations l2cap_debugfs_fops = {
6791 	.open		= l2cap_debugfs_open,
6792 	.read		= seq_read,
6793 	.llseek		= seq_lseek,
6794 	.release	= single_release,
6795 };
6796 
6797 static struct dentry *l2cap_debugfs;
6798 
6799 int __init l2cap_init(void)
6800 {
6801 	int err;
6802 
6803 	err = l2cap_init_sockets();
6804 	if (err < 0)
6805 		return err;
6806 
6807 	if (bt_debugfs) {
6808 		l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6809 						    NULL, &l2cap_debugfs_fops);
6810 		if (!l2cap_debugfs)
6811 			BT_ERR("Failed to create L2CAP debug file");
6812 	}
6813 
6814 	return 0;
6815 }
6816 
6817 void l2cap_exit(void)
6818 {
6819 	debugfs_remove(l2cap_debugfs);
6820 	l2cap_cleanup_sockets();
6821 }
6822 
6823 module_param(disable_ertm, bool, 0644);
6824 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
6825