xref: /openbmc/linux/net/bluetooth/l2cap_core.c (revision cfe560c7)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 
43 #define LE_FLOWCTL_MAX_CREDITS 65535
44 
45 bool disable_ertm;
46 bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
47 
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
52 
53 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54 				       u8 code, u8 ident, u16 dlen, void *data);
55 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56 			   void *data);
57 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59 
60 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61 		     struct sk_buff_head *skbs, u8 event);
62 static void l2cap_retrans_timeout(struct work_struct *work);
63 static void l2cap_monitor_timeout(struct work_struct *work);
64 static void l2cap_ack_timeout(struct work_struct *work);
65 
66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 {
68 	if (link_type == LE_LINK) {
69 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 			return BDADDR_LE_PUBLIC;
71 		else
72 			return BDADDR_LE_RANDOM;
73 	}
74 
75 	return BDADDR_BREDR;
76 }
77 
78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 {
80 	return bdaddr_type(hcon->type, hcon->src_type);
81 }
82 
83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 {
85 	return bdaddr_type(hcon->type, hcon->dst_type);
86 }
87 
88 /* ---- L2CAP channels ---- */
89 
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 						   u16 cid)
92 {
93 	struct l2cap_chan *c;
94 
95 	list_for_each_entry(c, &conn->chan_l, list) {
96 		if (c->dcid == cid)
97 			return c;
98 	}
99 	return NULL;
100 }
101 
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 						   u16 cid)
104 {
105 	struct l2cap_chan *c;
106 
107 	list_for_each_entry(c, &conn->chan_l, list) {
108 		if (c->scid == cid)
109 			return c;
110 	}
111 	return NULL;
112 }
113 
114 /* Find channel with given SCID.
115  * Returns a reference locked channel.
116  */
117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 						 u16 cid)
119 {
120 	struct l2cap_chan *c;
121 
122 	mutex_lock(&conn->chan_lock);
123 	c = __l2cap_get_chan_by_scid(conn, cid);
124 	if (c) {
125 		/* Only lock if chan reference is not 0 */
126 		c = l2cap_chan_hold_unless_zero(c);
127 		if (c)
128 			l2cap_chan_lock(c);
129 	}
130 	mutex_unlock(&conn->chan_lock);
131 
132 	return c;
133 }
134 
135 /* Find channel with given DCID.
136  * Returns a reference locked channel.
137  */
138 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
139 						 u16 cid)
140 {
141 	struct l2cap_chan *c;
142 
143 	mutex_lock(&conn->chan_lock);
144 	c = __l2cap_get_chan_by_dcid(conn, cid);
145 	if (c) {
146 		/* Only lock if chan reference is not 0 */
147 		c = l2cap_chan_hold_unless_zero(c);
148 		if (c)
149 			l2cap_chan_lock(c);
150 	}
151 	mutex_unlock(&conn->chan_lock);
152 
153 	return c;
154 }
155 
156 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
157 						    u8 ident)
158 {
159 	struct l2cap_chan *c;
160 
161 	list_for_each_entry(c, &conn->chan_l, list) {
162 		if (c->ident == ident)
163 			return c;
164 	}
165 	return NULL;
166 }
167 
168 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
169 						      u8 src_type)
170 {
171 	struct l2cap_chan *c;
172 
173 	list_for_each_entry(c, &chan_list, global_l) {
174 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
175 			continue;
176 
177 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
178 			continue;
179 
180 		if (c->sport == psm && !bacmp(&c->src, src))
181 			return c;
182 	}
183 	return NULL;
184 }
185 
186 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
187 {
188 	int err;
189 
190 	write_lock(&chan_list_lock);
191 
192 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
193 		err = -EADDRINUSE;
194 		goto done;
195 	}
196 
197 	if (psm) {
198 		chan->psm = psm;
199 		chan->sport = psm;
200 		err = 0;
201 	} else {
202 		u16 p, start, end, incr;
203 
204 		if (chan->src_type == BDADDR_BREDR) {
205 			start = L2CAP_PSM_DYN_START;
206 			end = L2CAP_PSM_AUTO_END;
207 			incr = 2;
208 		} else {
209 			start = L2CAP_PSM_LE_DYN_START;
210 			end = L2CAP_PSM_LE_DYN_END;
211 			incr = 1;
212 		}
213 
214 		err = -EINVAL;
215 		for (p = start; p <= end; p += incr)
216 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
217 							 chan->src_type)) {
218 				chan->psm   = cpu_to_le16(p);
219 				chan->sport = cpu_to_le16(p);
220 				err = 0;
221 				break;
222 			}
223 	}
224 
225 done:
226 	write_unlock(&chan_list_lock);
227 	return err;
228 }
229 EXPORT_SYMBOL_GPL(l2cap_add_psm);
230 
231 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
232 {
233 	write_lock(&chan_list_lock);
234 
235 	/* Override the defaults (which are for conn-oriented) */
236 	chan->omtu = L2CAP_DEFAULT_MTU;
237 	chan->chan_type = L2CAP_CHAN_FIXED;
238 
239 	chan->scid = scid;
240 
241 	write_unlock(&chan_list_lock);
242 
243 	return 0;
244 }
245 
246 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
247 {
248 	u16 cid, dyn_end;
249 
250 	if (conn->hcon->type == LE_LINK)
251 		dyn_end = L2CAP_CID_LE_DYN_END;
252 	else
253 		dyn_end = L2CAP_CID_DYN_END;
254 
255 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
256 		if (!__l2cap_get_chan_by_scid(conn, cid))
257 			return cid;
258 	}
259 
260 	return 0;
261 }
262 
263 static void l2cap_state_change(struct l2cap_chan *chan, int state)
264 {
265 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
266 	       state_to_string(state));
267 
268 	chan->state = state;
269 	chan->ops->state_change(chan, state, 0);
270 }
271 
272 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
273 						int state, int err)
274 {
275 	chan->state = state;
276 	chan->ops->state_change(chan, chan->state, err);
277 }
278 
279 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
280 {
281 	chan->ops->state_change(chan, chan->state, err);
282 }
283 
284 static void __set_retrans_timer(struct l2cap_chan *chan)
285 {
286 	if (!delayed_work_pending(&chan->monitor_timer) &&
287 	    chan->retrans_timeout) {
288 		l2cap_set_timer(chan, &chan->retrans_timer,
289 				msecs_to_jiffies(chan->retrans_timeout));
290 	}
291 }
292 
293 static void __set_monitor_timer(struct l2cap_chan *chan)
294 {
295 	__clear_retrans_timer(chan);
296 	if (chan->monitor_timeout) {
297 		l2cap_set_timer(chan, &chan->monitor_timer,
298 				msecs_to_jiffies(chan->monitor_timeout));
299 	}
300 }
301 
302 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
303 					       u16 seq)
304 {
305 	struct sk_buff *skb;
306 
307 	skb_queue_walk(head, skb) {
308 		if (bt_cb(skb)->l2cap.txseq == seq)
309 			return skb;
310 	}
311 
312 	return NULL;
313 }
314 
315 /* ---- L2CAP sequence number lists ---- */
316 
317 /* For ERTM, ordered lists of sequence numbers must be tracked for
318  * SREJ requests that are received and for frames that are to be
319  * retransmitted. These seq_list functions implement a singly-linked
320  * list in an array, where membership in the list can also be checked
321  * in constant time. Items can also be added to the tail of the list
322  * and removed from the head in constant time, without further memory
323  * allocs or frees.
324  */
325 
326 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
327 {
328 	size_t alloc_size, i;
329 
330 	/* Allocated size is a power of 2 to map sequence numbers
331 	 * (which may be up to 14 bits) in to a smaller array that is
332 	 * sized for the negotiated ERTM transmit windows.
333 	 */
334 	alloc_size = roundup_pow_of_two(size);
335 
336 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
337 	if (!seq_list->list)
338 		return -ENOMEM;
339 
340 	seq_list->mask = alloc_size - 1;
341 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
342 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
343 	for (i = 0; i < alloc_size; i++)
344 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
345 
346 	return 0;
347 }
348 
349 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
350 {
351 	kfree(seq_list->list);
352 }
353 
354 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
355 					   u16 seq)
356 {
357 	/* Constant-time check for list membership */
358 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
359 }
360 
361 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
362 {
363 	u16 seq = seq_list->head;
364 	u16 mask = seq_list->mask;
365 
366 	seq_list->head = seq_list->list[seq & mask];
367 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
368 
369 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
370 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
371 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
372 	}
373 
374 	return seq;
375 }
376 
377 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
378 {
379 	u16 i;
380 
381 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
382 		return;
383 
384 	for (i = 0; i <= seq_list->mask; i++)
385 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
386 
387 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
388 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
389 }
390 
391 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
392 {
393 	u16 mask = seq_list->mask;
394 
395 	/* All appends happen in constant time */
396 
397 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
398 		return;
399 
400 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
401 		seq_list->head = seq;
402 	else
403 		seq_list->list[seq_list->tail & mask] = seq;
404 
405 	seq_list->tail = seq;
406 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
407 }
408 
409 static void l2cap_chan_timeout(struct work_struct *work)
410 {
411 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
412 					       chan_timer.work);
413 	struct l2cap_conn *conn = chan->conn;
414 	int reason;
415 
416 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
417 
418 	if (!conn)
419 		return;
420 
421 	mutex_lock(&conn->chan_lock);
422 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
423 	 * this work. No need to call l2cap_chan_hold(chan) here again.
424 	 */
425 	l2cap_chan_lock(chan);
426 
427 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
428 		reason = ECONNREFUSED;
429 	else if (chan->state == BT_CONNECT &&
430 		 chan->sec_level != BT_SECURITY_SDP)
431 		reason = ECONNREFUSED;
432 	else
433 		reason = ETIMEDOUT;
434 
435 	l2cap_chan_close(chan, reason);
436 
437 	chan->ops->close(chan);
438 
439 	l2cap_chan_unlock(chan);
440 	l2cap_chan_put(chan);
441 
442 	mutex_unlock(&conn->chan_lock);
443 }
444 
445 struct l2cap_chan *l2cap_chan_create(void)
446 {
447 	struct l2cap_chan *chan;
448 
449 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
450 	if (!chan)
451 		return NULL;
452 
453 	skb_queue_head_init(&chan->tx_q);
454 	skb_queue_head_init(&chan->srej_q);
455 	mutex_init(&chan->lock);
456 
457 	/* Set default lock nesting level */
458 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
459 
460 	write_lock(&chan_list_lock);
461 	list_add(&chan->global_l, &chan_list);
462 	write_unlock(&chan_list_lock);
463 
464 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
465 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
466 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
467 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
468 
469 	chan->state = BT_OPEN;
470 
471 	kref_init(&chan->kref);
472 
473 	/* This flag is cleared in l2cap_chan_ready() */
474 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
475 
476 	BT_DBG("chan %p", chan);
477 
478 	return chan;
479 }
480 EXPORT_SYMBOL_GPL(l2cap_chan_create);
481 
482 static void l2cap_chan_destroy(struct kref *kref)
483 {
484 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
485 
486 	BT_DBG("chan %p", chan);
487 
488 	write_lock(&chan_list_lock);
489 	list_del(&chan->global_l);
490 	write_unlock(&chan_list_lock);
491 
492 	kfree(chan);
493 }
494 
495 void l2cap_chan_hold(struct l2cap_chan *c)
496 {
497 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
498 
499 	kref_get(&c->kref);
500 }
501 
502 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
503 {
504 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
505 
506 	if (!kref_get_unless_zero(&c->kref))
507 		return NULL;
508 
509 	return c;
510 }
511 
512 void l2cap_chan_put(struct l2cap_chan *c)
513 {
514 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
515 
516 	kref_put(&c->kref, l2cap_chan_destroy);
517 }
518 EXPORT_SYMBOL_GPL(l2cap_chan_put);
519 
520 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
521 {
522 	chan->fcs  = L2CAP_FCS_CRC16;
523 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
524 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
525 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
526 	chan->remote_max_tx = chan->max_tx;
527 	chan->remote_tx_win = chan->tx_win;
528 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
529 	chan->sec_level = BT_SECURITY_LOW;
530 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
531 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
532 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
533 
534 	chan->conf_state = 0;
535 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
536 
537 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
538 }
539 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
540 
541 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
542 {
543 	chan->sdu = NULL;
544 	chan->sdu_last_frag = NULL;
545 	chan->sdu_len = 0;
546 	chan->tx_credits = tx_credits;
547 	/* Derive MPS from connection MTU to stop HCI fragmentation */
548 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
549 	/* Give enough credits for a full packet */
550 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
551 
552 	skb_queue_head_init(&chan->tx_q);
553 }
554 
555 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
556 {
557 	l2cap_le_flowctl_init(chan, tx_credits);
558 
559 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
560 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
561 		chan->mps = L2CAP_ECRED_MIN_MPS;
562 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
563 	}
564 }
565 
566 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
567 {
568 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
569 	       __le16_to_cpu(chan->psm), chan->dcid);
570 
571 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
572 
573 	chan->conn = conn;
574 
575 	switch (chan->chan_type) {
576 	case L2CAP_CHAN_CONN_ORIENTED:
577 		/* Alloc CID for connection-oriented socket */
578 		chan->scid = l2cap_alloc_cid(conn);
579 		if (conn->hcon->type == ACL_LINK)
580 			chan->omtu = L2CAP_DEFAULT_MTU;
581 		break;
582 
583 	case L2CAP_CHAN_CONN_LESS:
584 		/* Connectionless socket */
585 		chan->scid = L2CAP_CID_CONN_LESS;
586 		chan->dcid = L2CAP_CID_CONN_LESS;
587 		chan->omtu = L2CAP_DEFAULT_MTU;
588 		break;
589 
590 	case L2CAP_CHAN_FIXED:
591 		/* Caller will set CID and CID specific MTU values */
592 		break;
593 
594 	default:
595 		/* Raw socket can send/recv signalling messages only */
596 		chan->scid = L2CAP_CID_SIGNALING;
597 		chan->dcid = L2CAP_CID_SIGNALING;
598 		chan->omtu = L2CAP_DEFAULT_MTU;
599 	}
600 
601 	chan->local_id		= L2CAP_BESTEFFORT_ID;
602 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
603 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
604 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
605 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
606 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
607 
608 	l2cap_chan_hold(chan);
609 
610 	/* Only keep a reference for fixed channels if they requested it */
611 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
612 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
613 		hci_conn_hold(conn->hcon);
614 
615 	list_add(&chan->list, &conn->chan_l);
616 }
617 
618 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
619 {
620 	mutex_lock(&conn->chan_lock);
621 	__l2cap_chan_add(conn, chan);
622 	mutex_unlock(&conn->chan_lock);
623 }
624 
625 void l2cap_chan_del(struct l2cap_chan *chan, int err)
626 {
627 	struct l2cap_conn *conn = chan->conn;
628 
629 	__clear_chan_timer(chan);
630 
631 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
632 	       state_to_string(chan->state));
633 
634 	chan->ops->teardown(chan, err);
635 
636 	if (conn) {
637 		/* Delete from channel list */
638 		list_del(&chan->list);
639 
640 		l2cap_chan_put(chan);
641 
642 		chan->conn = NULL;
643 
644 		/* Reference was only held for non-fixed channels or
645 		 * fixed channels that explicitly requested it using the
646 		 * FLAG_HOLD_HCI_CONN flag.
647 		 */
648 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
649 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
650 			hci_conn_drop(conn->hcon);
651 	}
652 
653 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
654 		return;
655 
656 	switch (chan->mode) {
657 	case L2CAP_MODE_BASIC:
658 		break;
659 
660 	case L2CAP_MODE_LE_FLOWCTL:
661 	case L2CAP_MODE_EXT_FLOWCTL:
662 		skb_queue_purge(&chan->tx_q);
663 		break;
664 
665 	case L2CAP_MODE_ERTM:
666 		__clear_retrans_timer(chan);
667 		__clear_monitor_timer(chan);
668 		__clear_ack_timer(chan);
669 
670 		skb_queue_purge(&chan->srej_q);
671 
672 		l2cap_seq_list_free(&chan->srej_list);
673 		l2cap_seq_list_free(&chan->retrans_list);
674 		fallthrough;
675 
676 	case L2CAP_MODE_STREAMING:
677 		skb_queue_purge(&chan->tx_q);
678 		break;
679 	}
680 }
681 EXPORT_SYMBOL_GPL(l2cap_chan_del);
682 
683 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
684 				 l2cap_chan_func_t func, void *data)
685 {
686 	struct l2cap_chan *chan, *l;
687 
688 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
689 		if (chan->ident == id)
690 			func(chan, data);
691 	}
692 }
693 
694 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
695 			      void *data)
696 {
697 	struct l2cap_chan *chan;
698 
699 	list_for_each_entry(chan, &conn->chan_l, list) {
700 		func(chan, data);
701 	}
702 }
703 
704 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
705 		     void *data)
706 {
707 	if (!conn)
708 		return;
709 
710 	mutex_lock(&conn->chan_lock);
711 	__l2cap_chan_list(conn, func, data);
712 	mutex_unlock(&conn->chan_lock);
713 }
714 
715 EXPORT_SYMBOL_GPL(l2cap_chan_list);
716 
717 static void l2cap_conn_update_id_addr(struct work_struct *work)
718 {
719 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
720 					       id_addr_timer.work);
721 	struct hci_conn *hcon = conn->hcon;
722 	struct l2cap_chan *chan;
723 
724 	mutex_lock(&conn->chan_lock);
725 
726 	list_for_each_entry(chan, &conn->chan_l, list) {
727 		l2cap_chan_lock(chan);
728 		bacpy(&chan->dst, &hcon->dst);
729 		chan->dst_type = bdaddr_dst_type(hcon);
730 		l2cap_chan_unlock(chan);
731 	}
732 
733 	mutex_unlock(&conn->chan_lock);
734 }
735 
736 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
737 {
738 	struct l2cap_conn *conn = chan->conn;
739 	struct l2cap_le_conn_rsp rsp;
740 	u16 result;
741 
742 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
743 		result = L2CAP_CR_LE_AUTHORIZATION;
744 	else
745 		result = L2CAP_CR_LE_BAD_PSM;
746 
747 	l2cap_state_change(chan, BT_DISCONN);
748 
749 	rsp.dcid    = cpu_to_le16(chan->scid);
750 	rsp.mtu     = cpu_to_le16(chan->imtu);
751 	rsp.mps     = cpu_to_le16(chan->mps);
752 	rsp.credits = cpu_to_le16(chan->rx_credits);
753 	rsp.result  = cpu_to_le16(result);
754 
755 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
756 		       &rsp);
757 }
758 
759 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
760 {
761 	l2cap_state_change(chan, BT_DISCONN);
762 
763 	__l2cap_ecred_conn_rsp_defer(chan);
764 }
765 
766 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
767 {
768 	struct l2cap_conn *conn = chan->conn;
769 	struct l2cap_conn_rsp rsp;
770 	u16 result;
771 
772 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
773 		result = L2CAP_CR_SEC_BLOCK;
774 	else
775 		result = L2CAP_CR_BAD_PSM;
776 
777 	l2cap_state_change(chan, BT_DISCONN);
778 
779 	rsp.scid   = cpu_to_le16(chan->dcid);
780 	rsp.dcid   = cpu_to_le16(chan->scid);
781 	rsp.result = cpu_to_le16(result);
782 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
783 
784 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
785 }
786 
787 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
788 {
789 	struct l2cap_conn *conn = chan->conn;
790 
791 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
792 
793 	switch (chan->state) {
794 	case BT_LISTEN:
795 		chan->ops->teardown(chan, 0);
796 		break;
797 
798 	case BT_CONNECTED:
799 	case BT_CONFIG:
800 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
801 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
802 			l2cap_send_disconn_req(chan, reason);
803 		} else
804 			l2cap_chan_del(chan, reason);
805 		break;
806 
807 	case BT_CONNECT2:
808 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
809 			if (conn->hcon->type == ACL_LINK)
810 				l2cap_chan_connect_reject(chan);
811 			else if (conn->hcon->type == LE_LINK) {
812 				switch (chan->mode) {
813 				case L2CAP_MODE_LE_FLOWCTL:
814 					l2cap_chan_le_connect_reject(chan);
815 					break;
816 				case L2CAP_MODE_EXT_FLOWCTL:
817 					l2cap_chan_ecred_connect_reject(chan);
818 					return;
819 				}
820 			}
821 		}
822 
823 		l2cap_chan_del(chan, reason);
824 		break;
825 
826 	case BT_CONNECT:
827 	case BT_DISCONN:
828 		l2cap_chan_del(chan, reason);
829 		break;
830 
831 	default:
832 		chan->ops->teardown(chan, 0);
833 		break;
834 	}
835 }
836 EXPORT_SYMBOL(l2cap_chan_close);
837 
838 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
839 {
840 	switch (chan->chan_type) {
841 	case L2CAP_CHAN_RAW:
842 		switch (chan->sec_level) {
843 		case BT_SECURITY_HIGH:
844 		case BT_SECURITY_FIPS:
845 			return HCI_AT_DEDICATED_BONDING_MITM;
846 		case BT_SECURITY_MEDIUM:
847 			return HCI_AT_DEDICATED_BONDING;
848 		default:
849 			return HCI_AT_NO_BONDING;
850 		}
851 		break;
852 	case L2CAP_CHAN_CONN_LESS:
853 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
854 			if (chan->sec_level == BT_SECURITY_LOW)
855 				chan->sec_level = BT_SECURITY_SDP;
856 		}
857 		if (chan->sec_level == BT_SECURITY_HIGH ||
858 		    chan->sec_level == BT_SECURITY_FIPS)
859 			return HCI_AT_NO_BONDING_MITM;
860 		else
861 			return HCI_AT_NO_BONDING;
862 		break;
863 	case L2CAP_CHAN_CONN_ORIENTED:
864 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
865 			if (chan->sec_level == BT_SECURITY_LOW)
866 				chan->sec_level = BT_SECURITY_SDP;
867 
868 			if (chan->sec_level == BT_SECURITY_HIGH ||
869 			    chan->sec_level == BT_SECURITY_FIPS)
870 				return HCI_AT_NO_BONDING_MITM;
871 			else
872 				return HCI_AT_NO_BONDING;
873 		}
874 		fallthrough;
875 
876 	default:
877 		switch (chan->sec_level) {
878 		case BT_SECURITY_HIGH:
879 		case BT_SECURITY_FIPS:
880 			return HCI_AT_GENERAL_BONDING_MITM;
881 		case BT_SECURITY_MEDIUM:
882 			return HCI_AT_GENERAL_BONDING;
883 		default:
884 			return HCI_AT_NO_BONDING;
885 		}
886 		break;
887 	}
888 }
889 
890 /* Service level security */
891 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
892 {
893 	struct l2cap_conn *conn = chan->conn;
894 	__u8 auth_type;
895 
896 	if (conn->hcon->type == LE_LINK)
897 		return smp_conn_security(conn->hcon, chan->sec_level);
898 
899 	auth_type = l2cap_get_auth_type(chan);
900 
901 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
902 				 initiator);
903 }
904 
905 static u8 l2cap_get_ident(struct l2cap_conn *conn)
906 {
907 	u8 id;
908 
909 	/* Get next available identificator.
910 	 *    1 - 128 are used by kernel.
911 	 *  129 - 199 are reserved.
912 	 *  200 - 254 are used by utilities like l2ping, etc.
913 	 */
914 
915 	mutex_lock(&conn->ident_lock);
916 
917 	if (++conn->tx_ident > 128)
918 		conn->tx_ident = 1;
919 
920 	id = conn->tx_ident;
921 
922 	mutex_unlock(&conn->ident_lock);
923 
924 	return id;
925 }
926 
927 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
928 			   void *data)
929 {
930 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
931 	u8 flags;
932 
933 	BT_DBG("code 0x%2.2x", code);
934 
935 	if (!skb)
936 		return;
937 
938 	/* Use NO_FLUSH if supported or we have an LE link (which does
939 	 * not support auto-flushing packets) */
940 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
941 	    conn->hcon->type == LE_LINK)
942 		flags = ACL_START_NO_FLUSH;
943 	else
944 		flags = ACL_START;
945 
946 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
947 	skb->priority = HCI_PRIO_MAX;
948 
949 	hci_send_acl(conn->hchan, skb, flags);
950 }
951 
952 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
953 {
954 	struct hci_conn *hcon = chan->conn->hcon;
955 	u16 flags;
956 
957 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
958 	       skb->priority);
959 
960 	/* Use NO_FLUSH for LE links (where this is the only option) or
961 	 * if the BR/EDR link supports it and flushing has not been
962 	 * explicitly requested (through FLAG_FLUSHABLE).
963 	 */
964 	if (hcon->type == LE_LINK ||
965 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
966 	     lmp_no_flush_capable(hcon->hdev)))
967 		flags = ACL_START_NO_FLUSH;
968 	else
969 		flags = ACL_START;
970 
971 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
972 	hci_send_acl(chan->conn->hchan, skb, flags);
973 }
974 
975 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
976 {
977 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
978 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
979 
980 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
981 		/* S-Frame */
982 		control->sframe = 1;
983 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
984 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
985 
986 		control->sar = 0;
987 		control->txseq = 0;
988 	} else {
989 		/* I-Frame */
990 		control->sframe = 0;
991 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
992 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
993 
994 		control->poll = 0;
995 		control->super = 0;
996 	}
997 }
998 
999 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1000 {
1001 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1002 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1003 
1004 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1005 		/* S-Frame */
1006 		control->sframe = 1;
1007 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1008 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1009 
1010 		control->sar = 0;
1011 		control->txseq = 0;
1012 	} else {
1013 		/* I-Frame */
1014 		control->sframe = 0;
1015 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1016 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1017 
1018 		control->poll = 0;
1019 		control->super = 0;
1020 	}
1021 }
1022 
1023 static inline void __unpack_control(struct l2cap_chan *chan,
1024 				    struct sk_buff *skb)
1025 {
1026 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1027 		__unpack_extended_control(get_unaligned_le32(skb->data),
1028 					  &bt_cb(skb)->l2cap);
1029 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1030 	} else {
1031 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1032 					  &bt_cb(skb)->l2cap);
1033 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1034 	}
1035 }
1036 
1037 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1038 {
1039 	u32 packed;
1040 
1041 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1042 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1043 
1044 	if (control->sframe) {
1045 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1046 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1047 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1048 	} else {
1049 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1050 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1051 	}
1052 
1053 	return packed;
1054 }
1055 
1056 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1057 {
1058 	u16 packed;
1059 
1060 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1061 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1062 
1063 	if (control->sframe) {
1064 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1065 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1066 		packed |= L2CAP_CTRL_FRAME_TYPE;
1067 	} else {
1068 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1069 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1070 	}
1071 
1072 	return packed;
1073 }
1074 
1075 static inline void __pack_control(struct l2cap_chan *chan,
1076 				  struct l2cap_ctrl *control,
1077 				  struct sk_buff *skb)
1078 {
1079 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1080 		put_unaligned_le32(__pack_extended_control(control),
1081 				   skb->data + L2CAP_HDR_SIZE);
1082 	} else {
1083 		put_unaligned_le16(__pack_enhanced_control(control),
1084 				   skb->data + L2CAP_HDR_SIZE);
1085 	}
1086 }
1087 
1088 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1089 {
1090 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1091 		return L2CAP_EXT_HDR_SIZE;
1092 	else
1093 		return L2CAP_ENH_HDR_SIZE;
1094 }
1095 
1096 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1097 					       u32 control)
1098 {
1099 	struct sk_buff *skb;
1100 	struct l2cap_hdr *lh;
1101 	int hlen = __ertm_hdr_size(chan);
1102 
1103 	if (chan->fcs == L2CAP_FCS_CRC16)
1104 		hlen += L2CAP_FCS_SIZE;
1105 
1106 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1107 
1108 	if (!skb)
1109 		return ERR_PTR(-ENOMEM);
1110 
1111 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1112 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1113 	lh->cid = cpu_to_le16(chan->dcid);
1114 
1115 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1116 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1117 	else
1118 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1119 
1120 	if (chan->fcs == L2CAP_FCS_CRC16) {
1121 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1122 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1123 	}
1124 
1125 	skb->priority = HCI_PRIO_MAX;
1126 	return skb;
1127 }
1128 
1129 static void l2cap_send_sframe(struct l2cap_chan *chan,
1130 			      struct l2cap_ctrl *control)
1131 {
1132 	struct sk_buff *skb;
1133 	u32 control_field;
1134 
1135 	BT_DBG("chan %p, control %p", chan, control);
1136 
1137 	if (!control->sframe)
1138 		return;
1139 
1140 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1141 	    !control->poll)
1142 		control->final = 1;
1143 
1144 	if (control->super == L2CAP_SUPER_RR)
1145 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1146 	else if (control->super == L2CAP_SUPER_RNR)
1147 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1148 
1149 	if (control->super != L2CAP_SUPER_SREJ) {
1150 		chan->last_acked_seq = control->reqseq;
1151 		__clear_ack_timer(chan);
1152 	}
1153 
1154 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1155 	       control->final, control->poll, control->super);
1156 
1157 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1158 		control_field = __pack_extended_control(control);
1159 	else
1160 		control_field = __pack_enhanced_control(control);
1161 
1162 	skb = l2cap_create_sframe_pdu(chan, control_field);
1163 	if (!IS_ERR(skb))
1164 		l2cap_do_send(chan, skb);
1165 }
1166 
1167 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1168 {
1169 	struct l2cap_ctrl control;
1170 
1171 	BT_DBG("chan %p, poll %d", chan, poll);
1172 
1173 	memset(&control, 0, sizeof(control));
1174 	control.sframe = 1;
1175 	control.poll = poll;
1176 
1177 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1178 		control.super = L2CAP_SUPER_RNR;
1179 	else
1180 		control.super = L2CAP_SUPER_RR;
1181 
1182 	control.reqseq = chan->buffer_seq;
1183 	l2cap_send_sframe(chan, &control);
1184 }
1185 
1186 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1187 {
1188 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1189 		return true;
1190 
1191 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1192 }
1193 
1194 void l2cap_send_conn_req(struct l2cap_chan *chan)
1195 {
1196 	struct l2cap_conn *conn = chan->conn;
1197 	struct l2cap_conn_req req;
1198 
1199 	req.scid = cpu_to_le16(chan->scid);
1200 	req.psm  = chan->psm;
1201 
1202 	chan->ident = l2cap_get_ident(conn);
1203 
1204 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1205 
1206 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1207 }
1208 
1209 static void l2cap_chan_ready(struct l2cap_chan *chan)
1210 {
1211 	/* The channel may have already been flagged as connected in
1212 	 * case of receiving data before the L2CAP info req/rsp
1213 	 * procedure is complete.
1214 	 */
1215 	if (chan->state == BT_CONNECTED)
1216 		return;
1217 
1218 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1219 	chan->conf_state = 0;
1220 	__clear_chan_timer(chan);
1221 
1222 	switch (chan->mode) {
1223 	case L2CAP_MODE_LE_FLOWCTL:
1224 	case L2CAP_MODE_EXT_FLOWCTL:
1225 		if (!chan->tx_credits)
1226 			chan->ops->suspend(chan);
1227 		break;
1228 	}
1229 
1230 	chan->state = BT_CONNECTED;
1231 
1232 	chan->ops->ready(chan);
1233 }
1234 
1235 static void l2cap_le_connect(struct l2cap_chan *chan)
1236 {
1237 	struct l2cap_conn *conn = chan->conn;
1238 	struct l2cap_le_conn_req req;
1239 
1240 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1241 		return;
1242 
1243 	if (!chan->imtu)
1244 		chan->imtu = chan->conn->mtu;
1245 
1246 	l2cap_le_flowctl_init(chan, 0);
1247 
1248 	memset(&req, 0, sizeof(req));
1249 	req.psm     = chan->psm;
1250 	req.scid    = cpu_to_le16(chan->scid);
1251 	req.mtu     = cpu_to_le16(chan->imtu);
1252 	req.mps     = cpu_to_le16(chan->mps);
1253 	req.credits = cpu_to_le16(chan->rx_credits);
1254 
1255 	chan->ident = l2cap_get_ident(conn);
1256 
1257 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1258 		       sizeof(req), &req);
1259 }
1260 
1261 struct l2cap_ecred_conn_data {
1262 	struct {
1263 		struct l2cap_ecred_conn_req req;
1264 		__le16 scid[5];
1265 	} __packed pdu;
1266 	struct l2cap_chan *chan;
1267 	struct pid *pid;
1268 	int count;
1269 };
1270 
1271 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1272 {
1273 	struct l2cap_ecred_conn_data *conn = data;
1274 	struct pid *pid;
1275 
1276 	if (chan == conn->chan)
1277 		return;
1278 
1279 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1280 		return;
1281 
1282 	pid = chan->ops->get_peer_pid(chan);
1283 
1284 	/* Only add deferred channels with the same PID/PSM */
1285 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1286 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1287 		return;
1288 
1289 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1290 		return;
1291 
1292 	l2cap_ecred_init(chan, 0);
1293 
1294 	/* Set the same ident so we can match on the rsp */
1295 	chan->ident = conn->chan->ident;
1296 
1297 	/* Include all channels deferred */
1298 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1299 
1300 	conn->count++;
1301 }
1302 
1303 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1304 {
1305 	struct l2cap_conn *conn = chan->conn;
1306 	struct l2cap_ecred_conn_data data;
1307 
1308 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1309 		return;
1310 
1311 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1312 		return;
1313 
1314 	l2cap_ecred_init(chan, 0);
1315 
1316 	memset(&data, 0, sizeof(data));
1317 	data.pdu.req.psm     = chan->psm;
1318 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1319 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1320 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1321 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1322 
1323 	chan->ident = l2cap_get_ident(conn);
1324 
1325 	data.count = 1;
1326 	data.chan = chan;
1327 	data.pid = chan->ops->get_peer_pid(chan);
1328 
1329 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1330 
1331 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1332 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1333 		       &data.pdu);
1334 }
1335 
1336 static void l2cap_le_start(struct l2cap_chan *chan)
1337 {
1338 	struct l2cap_conn *conn = chan->conn;
1339 
1340 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1341 		return;
1342 
1343 	if (!chan->psm) {
1344 		l2cap_chan_ready(chan);
1345 		return;
1346 	}
1347 
1348 	if (chan->state == BT_CONNECT) {
1349 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1350 			l2cap_ecred_connect(chan);
1351 		else
1352 			l2cap_le_connect(chan);
1353 	}
1354 }
1355 
1356 static void l2cap_start_connection(struct l2cap_chan *chan)
1357 {
1358 	if (chan->conn->hcon->type == LE_LINK) {
1359 		l2cap_le_start(chan);
1360 	} else {
1361 		l2cap_send_conn_req(chan);
1362 	}
1363 }
1364 
1365 static void l2cap_request_info(struct l2cap_conn *conn)
1366 {
1367 	struct l2cap_info_req req;
1368 
1369 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1370 		return;
1371 
1372 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1373 
1374 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1375 	conn->info_ident = l2cap_get_ident(conn);
1376 
1377 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1378 
1379 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1380 		       sizeof(req), &req);
1381 }
1382 
1383 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1384 {
1385 	/* The minimum encryption key size needs to be enforced by the
1386 	 * host stack before establishing any L2CAP connections. The
1387 	 * specification in theory allows a minimum of 1, but to align
1388 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1389 	 *
1390 	 * This check might also be called for unencrypted connections
1391 	 * that have no key size requirements. Ensure that the link is
1392 	 * actually encrypted before enforcing a key size.
1393 	 */
1394 	int min_key_size = hcon->hdev->min_enc_key_size;
1395 
1396 	/* On FIPS security level, key size must be 16 bytes */
1397 	if (hcon->sec_level == BT_SECURITY_FIPS)
1398 		min_key_size = 16;
1399 
1400 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1401 		hcon->enc_key_size >= min_key_size);
1402 }
1403 
1404 static void l2cap_do_start(struct l2cap_chan *chan)
1405 {
1406 	struct l2cap_conn *conn = chan->conn;
1407 
1408 	if (conn->hcon->type == LE_LINK) {
1409 		l2cap_le_start(chan);
1410 		return;
1411 	}
1412 
1413 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1414 		l2cap_request_info(conn);
1415 		return;
1416 	}
1417 
1418 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1419 		return;
1420 
1421 	if (!l2cap_chan_check_security(chan, true) ||
1422 	    !__l2cap_no_conn_pending(chan))
1423 		return;
1424 
1425 	if (l2cap_check_enc_key_size(conn->hcon))
1426 		l2cap_start_connection(chan);
1427 	else
1428 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1429 }
1430 
1431 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1432 {
1433 	u32 local_feat_mask = l2cap_feat_mask;
1434 	if (!disable_ertm)
1435 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1436 
1437 	switch (mode) {
1438 	case L2CAP_MODE_ERTM:
1439 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1440 	case L2CAP_MODE_STREAMING:
1441 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1442 	default:
1443 		return 0x00;
1444 	}
1445 }
1446 
1447 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1448 {
1449 	struct l2cap_conn *conn = chan->conn;
1450 	struct l2cap_disconn_req req;
1451 
1452 	if (!conn)
1453 		return;
1454 
1455 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1456 		__clear_retrans_timer(chan);
1457 		__clear_monitor_timer(chan);
1458 		__clear_ack_timer(chan);
1459 	}
1460 
1461 	req.dcid = cpu_to_le16(chan->dcid);
1462 	req.scid = cpu_to_le16(chan->scid);
1463 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1464 		       sizeof(req), &req);
1465 
1466 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1467 }
1468 
1469 /* ---- L2CAP connections ---- */
1470 static void l2cap_conn_start(struct l2cap_conn *conn)
1471 {
1472 	struct l2cap_chan *chan, *tmp;
1473 
1474 	BT_DBG("conn %p", conn);
1475 
1476 	mutex_lock(&conn->chan_lock);
1477 
1478 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1479 		l2cap_chan_lock(chan);
1480 
1481 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1482 			l2cap_chan_ready(chan);
1483 			l2cap_chan_unlock(chan);
1484 			continue;
1485 		}
1486 
1487 		if (chan->state == BT_CONNECT) {
1488 			if (!l2cap_chan_check_security(chan, true) ||
1489 			    !__l2cap_no_conn_pending(chan)) {
1490 				l2cap_chan_unlock(chan);
1491 				continue;
1492 			}
1493 
1494 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1495 			    && test_bit(CONF_STATE2_DEVICE,
1496 					&chan->conf_state)) {
1497 				l2cap_chan_close(chan, ECONNRESET);
1498 				l2cap_chan_unlock(chan);
1499 				continue;
1500 			}
1501 
1502 			if (l2cap_check_enc_key_size(conn->hcon))
1503 				l2cap_start_connection(chan);
1504 			else
1505 				l2cap_chan_close(chan, ECONNREFUSED);
1506 
1507 		} else if (chan->state == BT_CONNECT2) {
1508 			struct l2cap_conn_rsp rsp;
1509 			char buf[128];
1510 			rsp.scid = cpu_to_le16(chan->dcid);
1511 			rsp.dcid = cpu_to_le16(chan->scid);
1512 
1513 			if (l2cap_chan_check_security(chan, false)) {
1514 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1515 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1516 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1517 					chan->ops->defer(chan);
1518 
1519 				} else {
1520 					l2cap_state_change(chan, BT_CONFIG);
1521 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1522 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1523 				}
1524 			} else {
1525 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1526 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1527 			}
1528 
1529 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1530 				       sizeof(rsp), &rsp);
1531 
1532 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1533 			    rsp.result != L2CAP_CR_SUCCESS) {
1534 				l2cap_chan_unlock(chan);
1535 				continue;
1536 			}
1537 
1538 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1539 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1540 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1541 			chan->num_conf_req++;
1542 		}
1543 
1544 		l2cap_chan_unlock(chan);
1545 	}
1546 
1547 	mutex_unlock(&conn->chan_lock);
1548 }
1549 
1550 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1551 {
1552 	struct hci_conn *hcon = conn->hcon;
1553 	struct hci_dev *hdev = hcon->hdev;
1554 
1555 	BT_DBG("%s conn %p", hdev->name, conn);
1556 
1557 	/* For outgoing pairing which doesn't necessarily have an
1558 	 * associated socket (e.g. mgmt_pair_device).
1559 	 */
1560 	if (hcon->out)
1561 		smp_conn_security(hcon, hcon->pending_sec_level);
1562 
1563 	/* For LE peripheral connections, make sure the connection interval
1564 	 * is in the range of the minimum and maximum interval that has
1565 	 * been configured for this connection. If not, then trigger
1566 	 * the connection update procedure.
1567 	 */
1568 	if (hcon->role == HCI_ROLE_SLAVE &&
1569 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1570 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1571 		struct l2cap_conn_param_update_req req;
1572 
1573 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1574 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1575 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1576 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1577 
1578 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1579 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1580 	}
1581 }
1582 
1583 static void l2cap_conn_ready(struct l2cap_conn *conn)
1584 {
1585 	struct l2cap_chan *chan;
1586 	struct hci_conn *hcon = conn->hcon;
1587 
1588 	BT_DBG("conn %p", conn);
1589 
1590 	if (hcon->type == ACL_LINK)
1591 		l2cap_request_info(conn);
1592 
1593 	mutex_lock(&conn->chan_lock);
1594 
1595 	list_for_each_entry(chan, &conn->chan_l, list) {
1596 
1597 		l2cap_chan_lock(chan);
1598 
1599 		if (hcon->type == LE_LINK) {
1600 			l2cap_le_start(chan);
1601 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1602 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1603 				l2cap_chan_ready(chan);
1604 		} else if (chan->state == BT_CONNECT) {
1605 			l2cap_do_start(chan);
1606 		}
1607 
1608 		l2cap_chan_unlock(chan);
1609 	}
1610 
1611 	mutex_unlock(&conn->chan_lock);
1612 
1613 	if (hcon->type == LE_LINK)
1614 		l2cap_le_conn_ready(conn);
1615 
1616 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1617 }
1618 
1619 /* Notify sockets that we cannot guaranty reliability anymore */
1620 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1621 {
1622 	struct l2cap_chan *chan;
1623 
1624 	BT_DBG("conn %p", conn);
1625 
1626 	mutex_lock(&conn->chan_lock);
1627 
1628 	list_for_each_entry(chan, &conn->chan_l, list) {
1629 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1630 			l2cap_chan_set_err(chan, err);
1631 	}
1632 
1633 	mutex_unlock(&conn->chan_lock);
1634 }
1635 
1636 static void l2cap_info_timeout(struct work_struct *work)
1637 {
1638 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1639 					       info_timer.work);
1640 
1641 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1642 	conn->info_ident = 0;
1643 
1644 	l2cap_conn_start(conn);
1645 }
1646 
1647 /*
1648  * l2cap_user
1649  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1650  * callback is called during registration. The ->remove callback is called
1651  * during unregistration.
1652  * An l2cap_user object can either be explicitly unregistered or when the
1653  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1654  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1655  * External modules must own a reference to the l2cap_conn object if they intend
1656  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1657  * any time if they don't.
1658  */
1659 
1660 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1661 {
1662 	struct hci_dev *hdev = conn->hcon->hdev;
1663 	int ret;
1664 
1665 	/* We need to check whether l2cap_conn is registered. If it is not, we
1666 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1667 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1668 	 * relies on the parent hci_conn object to be locked. This itself relies
1669 	 * on the hci_dev object to be locked. So we must lock the hci device
1670 	 * here, too. */
1671 
1672 	hci_dev_lock(hdev);
1673 
1674 	if (!list_empty(&user->list)) {
1675 		ret = -EINVAL;
1676 		goto out_unlock;
1677 	}
1678 
1679 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1680 	if (!conn->hchan) {
1681 		ret = -ENODEV;
1682 		goto out_unlock;
1683 	}
1684 
1685 	ret = user->probe(conn, user);
1686 	if (ret)
1687 		goto out_unlock;
1688 
1689 	list_add(&user->list, &conn->users);
1690 	ret = 0;
1691 
1692 out_unlock:
1693 	hci_dev_unlock(hdev);
1694 	return ret;
1695 }
1696 EXPORT_SYMBOL(l2cap_register_user);
1697 
1698 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1699 {
1700 	struct hci_dev *hdev = conn->hcon->hdev;
1701 
1702 	hci_dev_lock(hdev);
1703 
1704 	if (list_empty(&user->list))
1705 		goto out_unlock;
1706 
1707 	list_del_init(&user->list);
1708 	user->remove(conn, user);
1709 
1710 out_unlock:
1711 	hci_dev_unlock(hdev);
1712 }
1713 EXPORT_SYMBOL(l2cap_unregister_user);
1714 
1715 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1716 {
1717 	struct l2cap_user *user;
1718 
1719 	while (!list_empty(&conn->users)) {
1720 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1721 		list_del_init(&user->list);
1722 		user->remove(conn, user);
1723 	}
1724 }
1725 
1726 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1727 {
1728 	struct l2cap_conn *conn = hcon->l2cap_data;
1729 	struct l2cap_chan *chan, *l;
1730 
1731 	if (!conn)
1732 		return;
1733 
1734 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1735 
1736 	kfree_skb(conn->rx_skb);
1737 
1738 	skb_queue_purge(&conn->pending_rx);
1739 
1740 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1741 	 * might block if we are running on a worker from the same workqueue
1742 	 * pending_rx_work is waiting on.
1743 	 */
1744 	if (work_pending(&conn->pending_rx_work))
1745 		cancel_work_sync(&conn->pending_rx_work);
1746 
1747 	cancel_delayed_work_sync(&conn->id_addr_timer);
1748 
1749 	l2cap_unregister_all_users(conn);
1750 
1751 	/* Force the connection to be immediately dropped */
1752 	hcon->disc_timeout = 0;
1753 
1754 	mutex_lock(&conn->chan_lock);
1755 
1756 	/* Kill channels */
1757 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1758 		l2cap_chan_hold(chan);
1759 		l2cap_chan_lock(chan);
1760 
1761 		l2cap_chan_del(chan, err);
1762 
1763 		chan->ops->close(chan);
1764 
1765 		l2cap_chan_unlock(chan);
1766 		l2cap_chan_put(chan);
1767 	}
1768 
1769 	mutex_unlock(&conn->chan_lock);
1770 
1771 	hci_chan_del(conn->hchan);
1772 
1773 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1774 		cancel_delayed_work_sync(&conn->info_timer);
1775 
1776 	hcon->l2cap_data = NULL;
1777 	conn->hchan = NULL;
1778 	l2cap_conn_put(conn);
1779 }
1780 
1781 static void l2cap_conn_free(struct kref *ref)
1782 {
1783 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1784 
1785 	hci_conn_put(conn->hcon);
1786 	kfree(conn);
1787 }
1788 
1789 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1790 {
1791 	kref_get(&conn->ref);
1792 	return conn;
1793 }
1794 EXPORT_SYMBOL(l2cap_conn_get);
1795 
1796 void l2cap_conn_put(struct l2cap_conn *conn)
1797 {
1798 	kref_put(&conn->ref, l2cap_conn_free);
1799 }
1800 EXPORT_SYMBOL(l2cap_conn_put);
1801 
1802 /* ---- Socket interface ---- */
1803 
1804 /* Find socket with psm and source / destination bdaddr.
1805  * Returns closest match.
1806  */
1807 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1808 						   bdaddr_t *src,
1809 						   bdaddr_t *dst,
1810 						   u8 link_type)
1811 {
1812 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1813 
1814 	read_lock(&chan_list_lock);
1815 
1816 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1817 		if (state && c->state != state)
1818 			continue;
1819 
1820 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1821 			continue;
1822 
1823 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1824 			continue;
1825 
1826 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1827 			int src_match, dst_match;
1828 			int src_any, dst_any;
1829 
1830 			/* Exact match. */
1831 			src_match = !bacmp(&c->src, src);
1832 			dst_match = !bacmp(&c->dst, dst);
1833 			if (src_match && dst_match) {
1834 				if (!l2cap_chan_hold_unless_zero(c))
1835 					continue;
1836 
1837 				read_unlock(&chan_list_lock);
1838 				return c;
1839 			}
1840 
1841 			/* Closest match */
1842 			src_any = !bacmp(&c->src, BDADDR_ANY);
1843 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1844 			if ((src_match && dst_any) || (src_any && dst_match) ||
1845 			    (src_any && dst_any))
1846 				c1 = c;
1847 		}
1848 	}
1849 
1850 	if (c1)
1851 		c1 = l2cap_chan_hold_unless_zero(c1);
1852 
1853 	read_unlock(&chan_list_lock);
1854 
1855 	return c1;
1856 }
1857 
1858 static void l2cap_monitor_timeout(struct work_struct *work)
1859 {
1860 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1861 					       monitor_timer.work);
1862 
1863 	BT_DBG("chan %p", chan);
1864 
1865 	l2cap_chan_lock(chan);
1866 
1867 	if (!chan->conn) {
1868 		l2cap_chan_unlock(chan);
1869 		l2cap_chan_put(chan);
1870 		return;
1871 	}
1872 
1873 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1874 
1875 	l2cap_chan_unlock(chan);
1876 	l2cap_chan_put(chan);
1877 }
1878 
1879 static void l2cap_retrans_timeout(struct work_struct *work)
1880 {
1881 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1882 					       retrans_timer.work);
1883 
1884 	BT_DBG("chan %p", chan);
1885 
1886 	l2cap_chan_lock(chan);
1887 
1888 	if (!chan->conn) {
1889 		l2cap_chan_unlock(chan);
1890 		l2cap_chan_put(chan);
1891 		return;
1892 	}
1893 
1894 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1895 	l2cap_chan_unlock(chan);
1896 	l2cap_chan_put(chan);
1897 }
1898 
1899 static void l2cap_streaming_send(struct l2cap_chan *chan,
1900 				 struct sk_buff_head *skbs)
1901 {
1902 	struct sk_buff *skb;
1903 	struct l2cap_ctrl *control;
1904 
1905 	BT_DBG("chan %p, skbs %p", chan, skbs);
1906 
1907 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1908 
1909 	while (!skb_queue_empty(&chan->tx_q)) {
1910 
1911 		skb = skb_dequeue(&chan->tx_q);
1912 
1913 		bt_cb(skb)->l2cap.retries = 1;
1914 		control = &bt_cb(skb)->l2cap;
1915 
1916 		control->reqseq = 0;
1917 		control->txseq = chan->next_tx_seq;
1918 
1919 		__pack_control(chan, control, skb);
1920 
1921 		if (chan->fcs == L2CAP_FCS_CRC16) {
1922 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1923 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1924 		}
1925 
1926 		l2cap_do_send(chan, skb);
1927 
1928 		BT_DBG("Sent txseq %u", control->txseq);
1929 
1930 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1931 		chan->frames_sent++;
1932 	}
1933 }
1934 
1935 static int l2cap_ertm_send(struct l2cap_chan *chan)
1936 {
1937 	struct sk_buff *skb, *tx_skb;
1938 	struct l2cap_ctrl *control;
1939 	int sent = 0;
1940 
1941 	BT_DBG("chan %p", chan);
1942 
1943 	if (chan->state != BT_CONNECTED)
1944 		return -ENOTCONN;
1945 
1946 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1947 		return 0;
1948 
1949 	while (chan->tx_send_head &&
1950 	       chan->unacked_frames < chan->remote_tx_win &&
1951 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1952 
1953 		skb = chan->tx_send_head;
1954 
1955 		bt_cb(skb)->l2cap.retries = 1;
1956 		control = &bt_cb(skb)->l2cap;
1957 
1958 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1959 			control->final = 1;
1960 
1961 		control->reqseq = chan->buffer_seq;
1962 		chan->last_acked_seq = chan->buffer_seq;
1963 		control->txseq = chan->next_tx_seq;
1964 
1965 		__pack_control(chan, control, skb);
1966 
1967 		if (chan->fcs == L2CAP_FCS_CRC16) {
1968 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1969 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1970 		}
1971 
1972 		/* Clone after data has been modified. Data is assumed to be
1973 		   read-only (for locking purposes) on cloned sk_buffs.
1974 		 */
1975 		tx_skb = skb_clone(skb, GFP_KERNEL);
1976 
1977 		if (!tx_skb)
1978 			break;
1979 
1980 		__set_retrans_timer(chan);
1981 
1982 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1983 		chan->unacked_frames++;
1984 		chan->frames_sent++;
1985 		sent++;
1986 
1987 		if (skb_queue_is_last(&chan->tx_q, skb))
1988 			chan->tx_send_head = NULL;
1989 		else
1990 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1991 
1992 		l2cap_do_send(chan, tx_skb);
1993 		BT_DBG("Sent txseq %u", control->txseq);
1994 	}
1995 
1996 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1997 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
1998 
1999 	return sent;
2000 }
2001 
2002 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2003 {
2004 	struct l2cap_ctrl control;
2005 	struct sk_buff *skb;
2006 	struct sk_buff *tx_skb;
2007 	u16 seq;
2008 
2009 	BT_DBG("chan %p", chan);
2010 
2011 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2012 		return;
2013 
2014 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2015 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2016 
2017 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2018 		if (!skb) {
2019 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2020 			       seq);
2021 			continue;
2022 		}
2023 
2024 		bt_cb(skb)->l2cap.retries++;
2025 		control = bt_cb(skb)->l2cap;
2026 
2027 		if (chan->max_tx != 0 &&
2028 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2029 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2030 			l2cap_send_disconn_req(chan, ECONNRESET);
2031 			l2cap_seq_list_clear(&chan->retrans_list);
2032 			break;
2033 		}
2034 
2035 		control.reqseq = chan->buffer_seq;
2036 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2037 			control.final = 1;
2038 		else
2039 			control.final = 0;
2040 
2041 		if (skb_cloned(skb)) {
2042 			/* Cloned sk_buffs are read-only, so we need a
2043 			 * writeable copy
2044 			 */
2045 			tx_skb = skb_copy(skb, GFP_KERNEL);
2046 		} else {
2047 			tx_skb = skb_clone(skb, GFP_KERNEL);
2048 		}
2049 
2050 		if (!tx_skb) {
2051 			l2cap_seq_list_clear(&chan->retrans_list);
2052 			break;
2053 		}
2054 
2055 		/* Update skb contents */
2056 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2057 			put_unaligned_le32(__pack_extended_control(&control),
2058 					   tx_skb->data + L2CAP_HDR_SIZE);
2059 		} else {
2060 			put_unaligned_le16(__pack_enhanced_control(&control),
2061 					   tx_skb->data + L2CAP_HDR_SIZE);
2062 		}
2063 
2064 		/* Update FCS */
2065 		if (chan->fcs == L2CAP_FCS_CRC16) {
2066 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2067 					tx_skb->len - L2CAP_FCS_SIZE);
2068 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2069 						L2CAP_FCS_SIZE);
2070 		}
2071 
2072 		l2cap_do_send(chan, tx_skb);
2073 
2074 		BT_DBG("Resent txseq %d", control.txseq);
2075 
2076 		chan->last_acked_seq = chan->buffer_seq;
2077 	}
2078 }
2079 
2080 static void l2cap_retransmit(struct l2cap_chan *chan,
2081 			     struct l2cap_ctrl *control)
2082 {
2083 	BT_DBG("chan %p, control %p", chan, control);
2084 
2085 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2086 	l2cap_ertm_resend(chan);
2087 }
2088 
2089 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2090 				 struct l2cap_ctrl *control)
2091 {
2092 	struct sk_buff *skb;
2093 
2094 	BT_DBG("chan %p, control %p", chan, control);
2095 
2096 	if (control->poll)
2097 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2098 
2099 	l2cap_seq_list_clear(&chan->retrans_list);
2100 
2101 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2102 		return;
2103 
2104 	if (chan->unacked_frames) {
2105 		skb_queue_walk(&chan->tx_q, skb) {
2106 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2107 			    skb == chan->tx_send_head)
2108 				break;
2109 		}
2110 
2111 		skb_queue_walk_from(&chan->tx_q, skb) {
2112 			if (skb == chan->tx_send_head)
2113 				break;
2114 
2115 			l2cap_seq_list_append(&chan->retrans_list,
2116 					      bt_cb(skb)->l2cap.txseq);
2117 		}
2118 
2119 		l2cap_ertm_resend(chan);
2120 	}
2121 }
2122 
2123 static void l2cap_send_ack(struct l2cap_chan *chan)
2124 {
2125 	struct l2cap_ctrl control;
2126 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2127 					 chan->last_acked_seq);
2128 	int threshold;
2129 
2130 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2131 	       chan, chan->last_acked_seq, chan->buffer_seq);
2132 
2133 	memset(&control, 0, sizeof(control));
2134 	control.sframe = 1;
2135 
2136 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2137 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2138 		__clear_ack_timer(chan);
2139 		control.super = L2CAP_SUPER_RNR;
2140 		control.reqseq = chan->buffer_seq;
2141 		l2cap_send_sframe(chan, &control);
2142 	} else {
2143 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2144 			l2cap_ertm_send(chan);
2145 			/* If any i-frames were sent, they included an ack */
2146 			if (chan->buffer_seq == chan->last_acked_seq)
2147 				frames_to_ack = 0;
2148 		}
2149 
2150 		/* Ack now if the window is 3/4ths full.
2151 		 * Calculate without mul or div
2152 		 */
2153 		threshold = chan->ack_win;
2154 		threshold += threshold << 1;
2155 		threshold >>= 2;
2156 
2157 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2158 		       threshold);
2159 
2160 		if (frames_to_ack >= threshold) {
2161 			__clear_ack_timer(chan);
2162 			control.super = L2CAP_SUPER_RR;
2163 			control.reqseq = chan->buffer_seq;
2164 			l2cap_send_sframe(chan, &control);
2165 			frames_to_ack = 0;
2166 		}
2167 
2168 		if (frames_to_ack)
2169 			__set_ack_timer(chan);
2170 	}
2171 }
2172 
2173 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2174 					 struct msghdr *msg, int len,
2175 					 int count, struct sk_buff *skb)
2176 {
2177 	struct l2cap_conn *conn = chan->conn;
2178 	struct sk_buff **frag;
2179 	int sent = 0;
2180 
2181 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2182 		return -EFAULT;
2183 
2184 	sent += count;
2185 	len  -= count;
2186 
2187 	/* Continuation fragments (no L2CAP header) */
2188 	frag = &skb_shinfo(skb)->frag_list;
2189 	while (len) {
2190 		struct sk_buff *tmp;
2191 
2192 		count = min_t(unsigned int, conn->mtu, len);
2193 
2194 		tmp = chan->ops->alloc_skb(chan, 0, count,
2195 					   msg->msg_flags & MSG_DONTWAIT);
2196 		if (IS_ERR(tmp))
2197 			return PTR_ERR(tmp);
2198 
2199 		*frag = tmp;
2200 
2201 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2202 				   &msg->msg_iter))
2203 			return -EFAULT;
2204 
2205 		sent += count;
2206 		len  -= count;
2207 
2208 		skb->len += (*frag)->len;
2209 		skb->data_len += (*frag)->len;
2210 
2211 		frag = &(*frag)->next;
2212 	}
2213 
2214 	return sent;
2215 }
2216 
2217 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2218 						 struct msghdr *msg, size_t len)
2219 {
2220 	struct l2cap_conn *conn = chan->conn;
2221 	struct sk_buff *skb;
2222 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2223 	struct l2cap_hdr *lh;
2224 
2225 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2226 	       __le16_to_cpu(chan->psm), len);
2227 
2228 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2229 
2230 	skb = chan->ops->alloc_skb(chan, hlen, count,
2231 				   msg->msg_flags & MSG_DONTWAIT);
2232 	if (IS_ERR(skb))
2233 		return skb;
2234 
2235 	/* Create L2CAP header */
2236 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2237 	lh->cid = cpu_to_le16(chan->dcid);
2238 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2239 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2240 
2241 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2242 	if (unlikely(err < 0)) {
2243 		kfree_skb(skb);
2244 		return ERR_PTR(err);
2245 	}
2246 	return skb;
2247 }
2248 
2249 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2250 					      struct msghdr *msg, size_t len)
2251 {
2252 	struct l2cap_conn *conn = chan->conn;
2253 	struct sk_buff *skb;
2254 	int err, count;
2255 	struct l2cap_hdr *lh;
2256 
2257 	BT_DBG("chan %p len %zu", chan, len);
2258 
2259 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2260 
2261 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2262 				   msg->msg_flags & MSG_DONTWAIT);
2263 	if (IS_ERR(skb))
2264 		return skb;
2265 
2266 	/* Create L2CAP header */
2267 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2268 	lh->cid = cpu_to_le16(chan->dcid);
2269 	lh->len = cpu_to_le16(len);
2270 
2271 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2272 	if (unlikely(err < 0)) {
2273 		kfree_skb(skb);
2274 		return ERR_PTR(err);
2275 	}
2276 	return skb;
2277 }
2278 
2279 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2280 					       struct msghdr *msg, size_t len,
2281 					       u16 sdulen)
2282 {
2283 	struct l2cap_conn *conn = chan->conn;
2284 	struct sk_buff *skb;
2285 	int err, count, hlen;
2286 	struct l2cap_hdr *lh;
2287 
2288 	BT_DBG("chan %p len %zu", chan, len);
2289 
2290 	if (!conn)
2291 		return ERR_PTR(-ENOTCONN);
2292 
2293 	hlen = __ertm_hdr_size(chan);
2294 
2295 	if (sdulen)
2296 		hlen += L2CAP_SDULEN_SIZE;
2297 
2298 	if (chan->fcs == L2CAP_FCS_CRC16)
2299 		hlen += L2CAP_FCS_SIZE;
2300 
2301 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2302 
2303 	skb = chan->ops->alloc_skb(chan, hlen, count,
2304 				   msg->msg_flags & MSG_DONTWAIT);
2305 	if (IS_ERR(skb))
2306 		return skb;
2307 
2308 	/* Create L2CAP header */
2309 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2310 	lh->cid = cpu_to_le16(chan->dcid);
2311 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2312 
2313 	/* Control header is populated later */
2314 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2315 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2316 	else
2317 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2318 
2319 	if (sdulen)
2320 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2321 
2322 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2323 	if (unlikely(err < 0)) {
2324 		kfree_skb(skb);
2325 		return ERR_PTR(err);
2326 	}
2327 
2328 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2329 	bt_cb(skb)->l2cap.retries = 0;
2330 	return skb;
2331 }
2332 
2333 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2334 			     struct sk_buff_head *seg_queue,
2335 			     struct msghdr *msg, size_t len)
2336 {
2337 	struct sk_buff *skb;
2338 	u16 sdu_len;
2339 	size_t pdu_len;
2340 	u8 sar;
2341 
2342 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2343 
2344 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2345 	 * so fragmented skbs are not used.  The HCI layer's handling
2346 	 * of fragmented skbs is not compatible with ERTM's queueing.
2347 	 */
2348 
2349 	/* PDU size is derived from the HCI MTU */
2350 	pdu_len = chan->conn->mtu;
2351 
2352 	/* Constrain PDU size for BR/EDR connections */
2353 	pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2354 
2355 	/* Adjust for largest possible L2CAP overhead. */
2356 	if (chan->fcs)
2357 		pdu_len -= L2CAP_FCS_SIZE;
2358 
2359 	pdu_len -= __ertm_hdr_size(chan);
2360 
2361 	/* Remote device may have requested smaller PDUs */
2362 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2363 
2364 	if (len <= pdu_len) {
2365 		sar = L2CAP_SAR_UNSEGMENTED;
2366 		sdu_len = 0;
2367 		pdu_len = len;
2368 	} else {
2369 		sar = L2CAP_SAR_START;
2370 		sdu_len = len;
2371 	}
2372 
2373 	while (len > 0) {
2374 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2375 
2376 		if (IS_ERR(skb)) {
2377 			__skb_queue_purge(seg_queue);
2378 			return PTR_ERR(skb);
2379 		}
2380 
2381 		bt_cb(skb)->l2cap.sar = sar;
2382 		__skb_queue_tail(seg_queue, skb);
2383 
2384 		len -= pdu_len;
2385 		if (sdu_len)
2386 			sdu_len = 0;
2387 
2388 		if (len <= pdu_len) {
2389 			sar = L2CAP_SAR_END;
2390 			pdu_len = len;
2391 		} else {
2392 			sar = L2CAP_SAR_CONTINUE;
2393 		}
2394 	}
2395 
2396 	return 0;
2397 }
2398 
2399 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2400 						   struct msghdr *msg,
2401 						   size_t len, u16 sdulen)
2402 {
2403 	struct l2cap_conn *conn = chan->conn;
2404 	struct sk_buff *skb;
2405 	int err, count, hlen;
2406 	struct l2cap_hdr *lh;
2407 
2408 	BT_DBG("chan %p len %zu", chan, len);
2409 
2410 	if (!conn)
2411 		return ERR_PTR(-ENOTCONN);
2412 
2413 	hlen = L2CAP_HDR_SIZE;
2414 
2415 	if (sdulen)
2416 		hlen += L2CAP_SDULEN_SIZE;
2417 
2418 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2419 
2420 	skb = chan->ops->alloc_skb(chan, hlen, count,
2421 				   msg->msg_flags & MSG_DONTWAIT);
2422 	if (IS_ERR(skb))
2423 		return skb;
2424 
2425 	/* Create L2CAP header */
2426 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2427 	lh->cid = cpu_to_le16(chan->dcid);
2428 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2429 
2430 	if (sdulen)
2431 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2432 
2433 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2434 	if (unlikely(err < 0)) {
2435 		kfree_skb(skb);
2436 		return ERR_PTR(err);
2437 	}
2438 
2439 	return skb;
2440 }
2441 
2442 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2443 				struct sk_buff_head *seg_queue,
2444 				struct msghdr *msg, size_t len)
2445 {
2446 	struct sk_buff *skb;
2447 	size_t pdu_len;
2448 	u16 sdu_len;
2449 
2450 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2451 
2452 	sdu_len = len;
2453 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2454 
2455 	while (len > 0) {
2456 		if (len <= pdu_len)
2457 			pdu_len = len;
2458 
2459 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2460 		if (IS_ERR(skb)) {
2461 			__skb_queue_purge(seg_queue);
2462 			return PTR_ERR(skb);
2463 		}
2464 
2465 		__skb_queue_tail(seg_queue, skb);
2466 
2467 		len -= pdu_len;
2468 
2469 		if (sdu_len) {
2470 			sdu_len = 0;
2471 			pdu_len += L2CAP_SDULEN_SIZE;
2472 		}
2473 	}
2474 
2475 	return 0;
2476 }
2477 
2478 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2479 {
2480 	int sent = 0;
2481 
2482 	BT_DBG("chan %p", chan);
2483 
2484 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2485 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2486 		chan->tx_credits--;
2487 		sent++;
2488 	}
2489 
2490 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2491 	       skb_queue_len(&chan->tx_q));
2492 }
2493 
2494 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2495 {
2496 	struct sk_buff *skb;
2497 	int err;
2498 	struct sk_buff_head seg_queue;
2499 
2500 	if (!chan->conn)
2501 		return -ENOTCONN;
2502 
2503 	/* Connectionless channel */
2504 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2505 		skb = l2cap_create_connless_pdu(chan, msg, len);
2506 		if (IS_ERR(skb))
2507 			return PTR_ERR(skb);
2508 
2509 		l2cap_do_send(chan, skb);
2510 		return len;
2511 	}
2512 
2513 	switch (chan->mode) {
2514 	case L2CAP_MODE_LE_FLOWCTL:
2515 	case L2CAP_MODE_EXT_FLOWCTL:
2516 		/* Check outgoing MTU */
2517 		if (len > chan->omtu)
2518 			return -EMSGSIZE;
2519 
2520 		__skb_queue_head_init(&seg_queue);
2521 
2522 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2523 
2524 		if (chan->state != BT_CONNECTED) {
2525 			__skb_queue_purge(&seg_queue);
2526 			err = -ENOTCONN;
2527 		}
2528 
2529 		if (err)
2530 			return err;
2531 
2532 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2533 
2534 		l2cap_le_flowctl_send(chan);
2535 
2536 		if (!chan->tx_credits)
2537 			chan->ops->suspend(chan);
2538 
2539 		err = len;
2540 
2541 		break;
2542 
2543 	case L2CAP_MODE_BASIC:
2544 		/* Check outgoing MTU */
2545 		if (len > chan->omtu)
2546 			return -EMSGSIZE;
2547 
2548 		/* Create a basic PDU */
2549 		skb = l2cap_create_basic_pdu(chan, msg, len);
2550 		if (IS_ERR(skb))
2551 			return PTR_ERR(skb);
2552 
2553 		l2cap_do_send(chan, skb);
2554 		err = len;
2555 		break;
2556 
2557 	case L2CAP_MODE_ERTM:
2558 	case L2CAP_MODE_STREAMING:
2559 		/* Check outgoing MTU */
2560 		if (len > chan->omtu) {
2561 			err = -EMSGSIZE;
2562 			break;
2563 		}
2564 
2565 		__skb_queue_head_init(&seg_queue);
2566 
2567 		/* Do segmentation before calling in to the state machine,
2568 		 * since it's possible to block while waiting for memory
2569 		 * allocation.
2570 		 */
2571 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2572 
2573 		if (err)
2574 			break;
2575 
2576 		if (chan->mode == L2CAP_MODE_ERTM)
2577 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2578 		else
2579 			l2cap_streaming_send(chan, &seg_queue);
2580 
2581 		err = len;
2582 
2583 		/* If the skbs were not queued for sending, they'll still be in
2584 		 * seg_queue and need to be purged.
2585 		 */
2586 		__skb_queue_purge(&seg_queue);
2587 		break;
2588 
2589 	default:
2590 		BT_DBG("bad state %1.1x", chan->mode);
2591 		err = -EBADFD;
2592 	}
2593 
2594 	return err;
2595 }
2596 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2597 
2598 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2599 {
2600 	struct l2cap_ctrl control;
2601 	u16 seq;
2602 
2603 	BT_DBG("chan %p, txseq %u", chan, txseq);
2604 
2605 	memset(&control, 0, sizeof(control));
2606 	control.sframe = 1;
2607 	control.super = L2CAP_SUPER_SREJ;
2608 
2609 	for (seq = chan->expected_tx_seq; seq != txseq;
2610 	     seq = __next_seq(chan, seq)) {
2611 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2612 			control.reqseq = seq;
2613 			l2cap_send_sframe(chan, &control);
2614 			l2cap_seq_list_append(&chan->srej_list, seq);
2615 		}
2616 	}
2617 
2618 	chan->expected_tx_seq = __next_seq(chan, txseq);
2619 }
2620 
2621 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2622 {
2623 	struct l2cap_ctrl control;
2624 
2625 	BT_DBG("chan %p", chan);
2626 
2627 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2628 		return;
2629 
2630 	memset(&control, 0, sizeof(control));
2631 	control.sframe = 1;
2632 	control.super = L2CAP_SUPER_SREJ;
2633 	control.reqseq = chan->srej_list.tail;
2634 	l2cap_send_sframe(chan, &control);
2635 }
2636 
2637 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2638 {
2639 	struct l2cap_ctrl control;
2640 	u16 initial_head;
2641 	u16 seq;
2642 
2643 	BT_DBG("chan %p, txseq %u", chan, txseq);
2644 
2645 	memset(&control, 0, sizeof(control));
2646 	control.sframe = 1;
2647 	control.super = L2CAP_SUPER_SREJ;
2648 
2649 	/* Capture initial list head to allow only one pass through the list. */
2650 	initial_head = chan->srej_list.head;
2651 
2652 	do {
2653 		seq = l2cap_seq_list_pop(&chan->srej_list);
2654 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2655 			break;
2656 
2657 		control.reqseq = seq;
2658 		l2cap_send_sframe(chan, &control);
2659 		l2cap_seq_list_append(&chan->srej_list, seq);
2660 	} while (chan->srej_list.head != initial_head);
2661 }
2662 
2663 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2664 {
2665 	struct sk_buff *acked_skb;
2666 	u16 ackseq;
2667 
2668 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2669 
2670 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2671 		return;
2672 
2673 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2674 	       chan->expected_ack_seq, chan->unacked_frames);
2675 
2676 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2677 	     ackseq = __next_seq(chan, ackseq)) {
2678 
2679 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2680 		if (acked_skb) {
2681 			skb_unlink(acked_skb, &chan->tx_q);
2682 			kfree_skb(acked_skb);
2683 			chan->unacked_frames--;
2684 		}
2685 	}
2686 
2687 	chan->expected_ack_seq = reqseq;
2688 
2689 	if (chan->unacked_frames == 0)
2690 		__clear_retrans_timer(chan);
2691 
2692 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2693 }
2694 
2695 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2696 {
2697 	BT_DBG("chan %p", chan);
2698 
2699 	chan->expected_tx_seq = chan->buffer_seq;
2700 	l2cap_seq_list_clear(&chan->srej_list);
2701 	skb_queue_purge(&chan->srej_q);
2702 	chan->rx_state = L2CAP_RX_STATE_RECV;
2703 }
2704 
2705 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2706 				struct l2cap_ctrl *control,
2707 				struct sk_buff_head *skbs, u8 event)
2708 {
2709 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2710 	       event);
2711 
2712 	switch (event) {
2713 	case L2CAP_EV_DATA_REQUEST:
2714 		if (chan->tx_send_head == NULL)
2715 			chan->tx_send_head = skb_peek(skbs);
2716 
2717 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2718 		l2cap_ertm_send(chan);
2719 		break;
2720 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2721 		BT_DBG("Enter LOCAL_BUSY");
2722 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2723 
2724 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2725 			/* The SREJ_SENT state must be aborted if we are to
2726 			 * enter the LOCAL_BUSY state.
2727 			 */
2728 			l2cap_abort_rx_srej_sent(chan);
2729 		}
2730 
2731 		l2cap_send_ack(chan);
2732 
2733 		break;
2734 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2735 		BT_DBG("Exit LOCAL_BUSY");
2736 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2737 
2738 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2739 			struct l2cap_ctrl local_control;
2740 
2741 			memset(&local_control, 0, sizeof(local_control));
2742 			local_control.sframe = 1;
2743 			local_control.super = L2CAP_SUPER_RR;
2744 			local_control.poll = 1;
2745 			local_control.reqseq = chan->buffer_seq;
2746 			l2cap_send_sframe(chan, &local_control);
2747 
2748 			chan->retry_count = 1;
2749 			__set_monitor_timer(chan);
2750 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2751 		}
2752 		break;
2753 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2754 		l2cap_process_reqseq(chan, control->reqseq);
2755 		break;
2756 	case L2CAP_EV_EXPLICIT_POLL:
2757 		l2cap_send_rr_or_rnr(chan, 1);
2758 		chan->retry_count = 1;
2759 		__set_monitor_timer(chan);
2760 		__clear_ack_timer(chan);
2761 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2762 		break;
2763 	case L2CAP_EV_RETRANS_TO:
2764 		l2cap_send_rr_or_rnr(chan, 1);
2765 		chan->retry_count = 1;
2766 		__set_monitor_timer(chan);
2767 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2768 		break;
2769 	case L2CAP_EV_RECV_FBIT:
2770 		/* Nothing to process */
2771 		break;
2772 	default:
2773 		break;
2774 	}
2775 }
2776 
2777 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2778 				  struct l2cap_ctrl *control,
2779 				  struct sk_buff_head *skbs, u8 event)
2780 {
2781 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2782 	       event);
2783 
2784 	switch (event) {
2785 	case L2CAP_EV_DATA_REQUEST:
2786 		if (chan->tx_send_head == NULL)
2787 			chan->tx_send_head = skb_peek(skbs);
2788 		/* Queue data, but don't send. */
2789 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2790 		break;
2791 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2792 		BT_DBG("Enter LOCAL_BUSY");
2793 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2794 
2795 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2796 			/* The SREJ_SENT state must be aborted if we are to
2797 			 * enter the LOCAL_BUSY state.
2798 			 */
2799 			l2cap_abort_rx_srej_sent(chan);
2800 		}
2801 
2802 		l2cap_send_ack(chan);
2803 
2804 		break;
2805 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2806 		BT_DBG("Exit LOCAL_BUSY");
2807 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2808 
2809 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2810 			struct l2cap_ctrl local_control;
2811 			memset(&local_control, 0, sizeof(local_control));
2812 			local_control.sframe = 1;
2813 			local_control.super = L2CAP_SUPER_RR;
2814 			local_control.poll = 1;
2815 			local_control.reqseq = chan->buffer_seq;
2816 			l2cap_send_sframe(chan, &local_control);
2817 
2818 			chan->retry_count = 1;
2819 			__set_monitor_timer(chan);
2820 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2821 		}
2822 		break;
2823 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2824 		l2cap_process_reqseq(chan, control->reqseq);
2825 		fallthrough;
2826 
2827 	case L2CAP_EV_RECV_FBIT:
2828 		if (control && control->final) {
2829 			__clear_monitor_timer(chan);
2830 			if (chan->unacked_frames > 0)
2831 				__set_retrans_timer(chan);
2832 			chan->retry_count = 0;
2833 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2834 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2835 		}
2836 		break;
2837 	case L2CAP_EV_EXPLICIT_POLL:
2838 		/* Ignore */
2839 		break;
2840 	case L2CAP_EV_MONITOR_TO:
2841 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2842 			l2cap_send_rr_or_rnr(chan, 1);
2843 			__set_monitor_timer(chan);
2844 			chan->retry_count++;
2845 		} else {
2846 			l2cap_send_disconn_req(chan, ECONNABORTED);
2847 		}
2848 		break;
2849 	default:
2850 		break;
2851 	}
2852 }
2853 
2854 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2855 		     struct sk_buff_head *skbs, u8 event)
2856 {
2857 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2858 	       chan, control, skbs, event, chan->tx_state);
2859 
2860 	switch (chan->tx_state) {
2861 	case L2CAP_TX_STATE_XMIT:
2862 		l2cap_tx_state_xmit(chan, control, skbs, event);
2863 		break;
2864 	case L2CAP_TX_STATE_WAIT_F:
2865 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2866 		break;
2867 	default:
2868 		/* Ignore event */
2869 		break;
2870 	}
2871 }
2872 
2873 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2874 			     struct l2cap_ctrl *control)
2875 {
2876 	BT_DBG("chan %p, control %p", chan, control);
2877 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2878 }
2879 
2880 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2881 				  struct l2cap_ctrl *control)
2882 {
2883 	BT_DBG("chan %p, control %p", chan, control);
2884 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2885 }
2886 
2887 /* Copy frame to all raw sockets on that connection */
2888 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2889 {
2890 	struct sk_buff *nskb;
2891 	struct l2cap_chan *chan;
2892 
2893 	BT_DBG("conn %p", conn);
2894 
2895 	mutex_lock(&conn->chan_lock);
2896 
2897 	list_for_each_entry(chan, &conn->chan_l, list) {
2898 		if (chan->chan_type != L2CAP_CHAN_RAW)
2899 			continue;
2900 
2901 		/* Don't send frame to the channel it came from */
2902 		if (bt_cb(skb)->l2cap.chan == chan)
2903 			continue;
2904 
2905 		nskb = skb_clone(skb, GFP_KERNEL);
2906 		if (!nskb)
2907 			continue;
2908 		if (chan->ops->recv(chan, nskb))
2909 			kfree_skb(nskb);
2910 	}
2911 
2912 	mutex_unlock(&conn->chan_lock);
2913 }
2914 
2915 /* ---- L2CAP signalling commands ---- */
2916 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2917 				       u8 ident, u16 dlen, void *data)
2918 {
2919 	struct sk_buff *skb, **frag;
2920 	struct l2cap_cmd_hdr *cmd;
2921 	struct l2cap_hdr *lh;
2922 	int len, count;
2923 
2924 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2925 	       conn, code, ident, dlen);
2926 
2927 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2928 		return NULL;
2929 
2930 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2931 	count = min_t(unsigned int, conn->mtu, len);
2932 
2933 	skb = bt_skb_alloc(count, GFP_KERNEL);
2934 	if (!skb)
2935 		return NULL;
2936 
2937 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2938 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2939 
2940 	if (conn->hcon->type == LE_LINK)
2941 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2942 	else
2943 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2944 
2945 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2946 	cmd->code  = code;
2947 	cmd->ident = ident;
2948 	cmd->len   = cpu_to_le16(dlen);
2949 
2950 	if (dlen) {
2951 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2952 		skb_put_data(skb, data, count);
2953 		data += count;
2954 	}
2955 
2956 	len -= skb->len;
2957 
2958 	/* Continuation fragments (no L2CAP header) */
2959 	frag = &skb_shinfo(skb)->frag_list;
2960 	while (len) {
2961 		count = min_t(unsigned int, conn->mtu, len);
2962 
2963 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2964 		if (!*frag)
2965 			goto fail;
2966 
2967 		skb_put_data(*frag, data, count);
2968 
2969 		len  -= count;
2970 		data += count;
2971 
2972 		frag = &(*frag)->next;
2973 	}
2974 
2975 	return skb;
2976 
2977 fail:
2978 	kfree_skb(skb);
2979 	return NULL;
2980 }
2981 
2982 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2983 				     unsigned long *val)
2984 {
2985 	struct l2cap_conf_opt *opt = *ptr;
2986 	int len;
2987 
2988 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2989 	*ptr += len;
2990 
2991 	*type = opt->type;
2992 	*olen = opt->len;
2993 
2994 	switch (opt->len) {
2995 	case 1:
2996 		*val = *((u8 *) opt->val);
2997 		break;
2998 
2999 	case 2:
3000 		*val = get_unaligned_le16(opt->val);
3001 		break;
3002 
3003 	case 4:
3004 		*val = get_unaligned_le32(opt->val);
3005 		break;
3006 
3007 	default:
3008 		*val = (unsigned long) opt->val;
3009 		break;
3010 	}
3011 
3012 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3013 	return len;
3014 }
3015 
3016 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3017 {
3018 	struct l2cap_conf_opt *opt = *ptr;
3019 
3020 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3021 
3022 	if (size < L2CAP_CONF_OPT_SIZE + len)
3023 		return;
3024 
3025 	opt->type = type;
3026 	opt->len  = len;
3027 
3028 	switch (len) {
3029 	case 1:
3030 		*((u8 *) opt->val)  = val;
3031 		break;
3032 
3033 	case 2:
3034 		put_unaligned_le16(val, opt->val);
3035 		break;
3036 
3037 	case 4:
3038 		put_unaligned_le32(val, opt->val);
3039 		break;
3040 
3041 	default:
3042 		memcpy(opt->val, (void *) val, len);
3043 		break;
3044 	}
3045 
3046 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3047 }
3048 
3049 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3050 {
3051 	struct l2cap_conf_efs efs;
3052 
3053 	switch (chan->mode) {
3054 	case L2CAP_MODE_ERTM:
3055 		efs.id		= chan->local_id;
3056 		efs.stype	= chan->local_stype;
3057 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3058 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3059 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3060 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3061 		break;
3062 
3063 	case L2CAP_MODE_STREAMING:
3064 		efs.id		= 1;
3065 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3066 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3067 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3068 		efs.acc_lat	= 0;
3069 		efs.flush_to	= 0;
3070 		break;
3071 
3072 	default:
3073 		return;
3074 	}
3075 
3076 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3077 			   (unsigned long) &efs, size);
3078 }
3079 
3080 static void l2cap_ack_timeout(struct work_struct *work)
3081 {
3082 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3083 					       ack_timer.work);
3084 	u16 frames_to_ack;
3085 
3086 	BT_DBG("chan %p", chan);
3087 
3088 	l2cap_chan_lock(chan);
3089 
3090 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3091 				     chan->last_acked_seq);
3092 
3093 	if (frames_to_ack)
3094 		l2cap_send_rr_or_rnr(chan, 0);
3095 
3096 	l2cap_chan_unlock(chan);
3097 	l2cap_chan_put(chan);
3098 }
3099 
3100 int l2cap_ertm_init(struct l2cap_chan *chan)
3101 {
3102 	int err;
3103 
3104 	chan->next_tx_seq = 0;
3105 	chan->expected_tx_seq = 0;
3106 	chan->expected_ack_seq = 0;
3107 	chan->unacked_frames = 0;
3108 	chan->buffer_seq = 0;
3109 	chan->frames_sent = 0;
3110 	chan->last_acked_seq = 0;
3111 	chan->sdu = NULL;
3112 	chan->sdu_last_frag = NULL;
3113 	chan->sdu_len = 0;
3114 
3115 	skb_queue_head_init(&chan->tx_q);
3116 
3117 	if (chan->mode != L2CAP_MODE_ERTM)
3118 		return 0;
3119 
3120 	chan->rx_state = L2CAP_RX_STATE_RECV;
3121 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3122 
3123 	skb_queue_head_init(&chan->srej_q);
3124 
3125 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3126 	if (err < 0)
3127 		return err;
3128 
3129 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3130 	if (err < 0)
3131 		l2cap_seq_list_free(&chan->srej_list);
3132 
3133 	return err;
3134 }
3135 
3136 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3137 {
3138 	switch (mode) {
3139 	case L2CAP_MODE_STREAMING:
3140 	case L2CAP_MODE_ERTM:
3141 		if (l2cap_mode_supported(mode, remote_feat_mask))
3142 			return mode;
3143 		fallthrough;
3144 	default:
3145 		return L2CAP_MODE_BASIC;
3146 	}
3147 }
3148 
3149 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3150 {
3151 	return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3152 }
3153 
3154 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3155 {
3156 	return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3157 }
3158 
3159 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3160 				      struct l2cap_conf_rfc *rfc)
3161 {
3162 	rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3163 	rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3164 }
3165 
3166 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3167 {
3168 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3169 	    __l2cap_ews_supported(chan->conn)) {
3170 		/* use extended control field */
3171 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3172 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3173 	} else {
3174 		chan->tx_win = min_t(u16, chan->tx_win,
3175 				     L2CAP_DEFAULT_TX_WINDOW);
3176 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3177 	}
3178 	chan->ack_win = chan->tx_win;
3179 }
3180 
3181 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3182 {
3183 	struct hci_conn *conn = chan->conn->hcon;
3184 
3185 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3186 
3187 	/* The 2-DH1 packet has between 2 and 56 information bytes
3188 	 * (including the 2-byte payload header)
3189 	 */
3190 	if (!(conn->pkt_type & HCI_2DH1))
3191 		chan->imtu = 54;
3192 
3193 	/* The 3-DH1 packet has between 2 and 85 information bytes
3194 	 * (including the 2-byte payload header)
3195 	 */
3196 	if (!(conn->pkt_type & HCI_3DH1))
3197 		chan->imtu = 83;
3198 
3199 	/* The 2-DH3 packet has between 2 and 369 information bytes
3200 	 * (including the 2-byte payload header)
3201 	 */
3202 	if (!(conn->pkt_type & HCI_2DH3))
3203 		chan->imtu = 367;
3204 
3205 	/* The 3-DH3 packet has between 2 and 554 information bytes
3206 	 * (including the 2-byte payload header)
3207 	 */
3208 	if (!(conn->pkt_type & HCI_3DH3))
3209 		chan->imtu = 552;
3210 
3211 	/* The 2-DH5 packet has between 2 and 681 information bytes
3212 	 * (including the 2-byte payload header)
3213 	 */
3214 	if (!(conn->pkt_type & HCI_2DH5))
3215 		chan->imtu = 679;
3216 
3217 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3218 	 * (including the 2-byte payload header)
3219 	 */
3220 	if (!(conn->pkt_type & HCI_3DH5))
3221 		chan->imtu = 1021;
3222 }
3223 
3224 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3225 {
3226 	struct l2cap_conf_req *req = data;
3227 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3228 	void *ptr = req->data;
3229 	void *endptr = data + data_size;
3230 	u16 size;
3231 
3232 	BT_DBG("chan %p", chan);
3233 
3234 	if (chan->num_conf_req || chan->num_conf_rsp)
3235 		goto done;
3236 
3237 	switch (chan->mode) {
3238 	case L2CAP_MODE_STREAMING:
3239 	case L2CAP_MODE_ERTM:
3240 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3241 			break;
3242 
3243 		if (__l2cap_efs_supported(chan->conn))
3244 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3245 
3246 		fallthrough;
3247 	default:
3248 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3249 		break;
3250 	}
3251 
3252 done:
3253 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3254 		if (!chan->imtu)
3255 			l2cap_mtu_auto(chan);
3256 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3257 				   endptr - ptr);
3258 	}
3259 
3260 	switch (chan->mode) {
3261 	case L2CAP_MODE_BASIC:
3262 		if (disable_ertm)
3263 			break;
3264 
3265 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3266 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3267 			break;
3268 
3269 		rfc.mode            = L2CAP_MODE_BASIC;
3270 		rfc.txwin_size      = 0;
3271 		rfc.max_transmit    = 0;
3272 		rfc.retrans_timeout = 0;
3273 		rfc.monitor_timeout = 0;
3274 		rfc.max_pdu_size    = 0;
3275 
3276 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3277 				   (unsigned long) &rfc, endptr - ptr);
3278 		break;
3279 
3280 	case L2CAP_MODE_ERTM:
3281 		rfc.mode            = L2CAP_MODE_ERTM;
3282 		rfc.max_transmit    = chan->max_tx;
3283 
3284 		__l2cap_set_ertm_timeouts(chan, &rfc);
3285 
3286 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3287 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3288 			     L2CAP_FCS_SIZE);
3289 		rfc.max_pdu_size = cpu_to_le16(size);
3290 
3291 		l2cap_txwin_setup(chan);
3292 
3293 		rfc.txwin_size = min_t(u16, chan->tx_win,
3294 				       L2CAP_DEFAULT_TX_WINDOW);
3295 
3296 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3297 				   (unsigned long) &rfc, endptr - ptr);
3298 
3299 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3300 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3301 
3302 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3303 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3304 					   chan->tx_win, endptr - ptr);
3305 
3306 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3307 			if (chan->fcs == L2CAP_FCS_NONE ||
3308 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3309 				chan->fcs = L2CAP_FCS_NONE;
3310 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3311 						   chan->fcs, endptr - ptr);
3312 			}
3313 		break;
3314 
3315 	case L2CAP_MODE_STREAMING:
3316 		l2cap_txwin_setup(chan);
3317 		rfc.mode            = L2CAP_MODE_STREAMING;
3318 		rfc.txwin_size      = 0;
3319 		rfc.max_transmit    = 0;
3320 		rfc.retrans_timeout = 0;
3321 		rfc.monitor_timeout = 0;
3322 
3323 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3324 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3325 			     L2CAP_FCS_SIZE);
3326 		rfc.max_pdu_size = cpu_to_le16(size);
3327 
3328 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3329 				   (unsigned long) &rfc, endptr - ptr);
3330 
3331 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3332 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3333 
3334 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3335 			if (chan->fcs == L2CAP_FCS_NONE ||
3336 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3337 				chan->fcs = L2CAP_FCS_NONE;
3338 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3339 						   chan->fcs, endptr - ptr);
3340 			}
3341 		break;
3342 	}
3343 
3344 	req->dcid  = cpu_to_le16(chan->dcid);
3345 	req->flags = cpu_to_le16(0);
3346 
3347 	return ptr - data;
3348 }
3349 
3350 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3351 {
3352 	struct l2cap_conf_rsp *rsp = data;
3353 	void *ptr = rsp->data;
3354 	void *endptr = data + data_size;
3355 	void *req = chan->conf_req;
3356 	int len = chan->conf_len;
3357 	int type, hint, olen;
3358 	unsigned long val;
3359 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3360 	struct l2cap_conf_efs efs;
3361 	u8 remote_efs = 0;
3362 	u16 mtu = L2CAP_DEFAULT_MTU;
3363 	u16 result = L2CAP_CONF_SUCCESS;
3364 	u16 size;
3365 
3366 	BT_DBG("chan %p", chan);
3367 
3368 	while (len >= L2CAP_CONF_OPT_SIZE) {
3369 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3370 		if (len < 0)
3371 			break;
3372 
3373 		hint  = type & L2CAP_CONF_HINT;
3374 		type &= L2CAP_CONF_MASK;
3375 
3376 		switch (type) {
3377 		case L2CAP_CONF_MTU:
3378 			if (olen != 2)
3379 				break;
3380 			mtu = val;
3381 			break;
3382 
3383 		case L2CAP_CONF_FLUSH_TO:
3384 			if (olen != 2)
3385 				break;
3386 			chan->flush_to = val;
3387 			break;
3388 
3389 		case L2CAP_CONF_QOS:
3390 			break;
3391 
3392 		case L2CAP_CONF_RFC:
3393 			if (olen != sizeof(rfc))
3394 				break;
3395 			memcpy(&rfc, (void *) val, olen);
3396 			break;
3397 
3398 		case L2CAP_CONF_FCS:
3399 			if (olen != 1)
3400 				break;
3401 			if (val == L2CAP_FCS_NONE)
3402 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3403 			break;
3404 
3405 		case L2CAP_CONF_EFS:
3406 			if (olen != sizeof(efs))
3407 				break;
3408 			remote_efs = 1;
3409 			memcpy(&efs, (void *) val, olen);
3410 			break;
3411 
3412 		case L2CAP_CONF_EWS:
3413 			if (olen != 2)
3414 				break;
3415 			return -ECONNREFUSED;
3416 
3417 		default:
3418 			if (hint)
3419 				break;
3420 			result = L2CAP_CONF_UNKNOWN;
3421 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3422 			break;
3423 		}
3424 	}
3425 
3426 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3427 		goto done;
3428 
3429 	switch (chan->mode) {
3430 	case L2CAP_MODE_STREAMING:
3431 	case L2CAP_MODE_ERTM:
3432 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3433 			chan->mode = l2cap_select_mode(rfc.mode,
3434 						       chan->conn->feat_mask);
3435 			break;
3436 		}
3437 
3438 		if (remote_efs) {
3439 			if (__l2cap_efs_supported(chan->conn))
3440 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3441 			else
3442 				return -ECONNREFUSED;
3443 		}
3444 
3445 		if (chan->mode != rfc.mode)
3446 			return -ECONNREFUSED;
3447 
3448 		break;
3449 	}
3450 
3451 done:
3452 	if (chan->mode != rfc.mode) {
3453 		result = L2CAP_CONF_UNACCEPT;
3454 		rfc.mode = chan->mode;
3455 
3456 		if (chan->num_conf_rsp == 1)
3457 			return -ECONNREFUSED;
3458 
3459 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3460 				   (unsigned long) &rfc, endptr - ptr);
3461 	}
3462 
3463 	if (result == L2CAP_CONF_SUCCESS) {
3464 		/* Configure output options and let the other side know
3465 		 * which ones we don't like. */
3466 
3467 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3468 			result = L2CAP_CONF_UNACCEPT;
3469 		else {
3470 			chan->omtu = mtu;
3471 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3472 		}
3473 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3474 
3475 		if (remote_efs) {
3476 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3477 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3478 			    efs.stype != chan->local_stype) {
3479 
3480 				result = L2CAP_CONF_UNACCEPT;
3481 
3482 				if (chan->num_conf_req >= 1)
3483 					return -ECONNREFUSED;
3484 
3485 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3486 						   sizeof(efs),
3487 						   (unsigned long) &efs, endptr - ptr);
3488 			} else {
3489 				/* Send PENDING Conf Rsp */
3490 				result = L2CAP_CONF_PENDING;
3491 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3492 			}
3493 		}
3494 
3495 		switch (rfc.mode) {
3496 		case L2CAP_MODE_BASIC:
3497 			chan->fcs = L2CAP_FCS_NONE;
3498 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3499 			break;
3500 
3501 		case L2CAP_MODE_ERTM:
3502 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3503 				chan->remote_tx_win = rfc.txwin_size;
3504 			else
3505 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3506 
3507 			chan->remote_max_tx = rfc.max_transmit;
3508 
3509 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3510 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3511 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3512 			rfc.max_pdu_size = cpu_to_le16(size);
3513 			chan->remote_mps = size;
3514 
3515 			__l2cap_set_ertm_timeouts(chan, &rfc);
3516 
3517 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3518 
3519 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3520 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3521 
3522 			if (remote_efs &&
3523 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3524 				chan->remote_id = efs.id;
3525 				chan->remote_stype = efs.stype;
3526 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3527 				chan->remote_flush_to =
3528 					le32_to_cpu(efs.flush_to);
3529 				chan->remote_acc_lat =
3530 					le32_to_cpu(efs.acc_lat);
3531 				chan->remote_sdu_itime =
3532 					le32_to_cpu(efs.sdu_itime);
3533 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3534 						   sizeof(efs),
3535 						   (unsigned long) &efs, endptr - ptr);
3536 			}
3537 			break;
3538 
3539 		case L2CAP_MODE_STREAMING:
3540 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3541 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3542 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3543 			rfc.max_pdu_size = cpu_to_le16(size);
3544 			chan->remote_mps = size;
3545 
3546 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3547 
3548 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3549 					   (unsigned long) &rfc, endptr - ptr);
3550 
3551 			break;
3552 
3553 		default:
3554 			result = L2CAP_CONF_UNACCEPT;
3555 
3556 			memset(&rfc, 0, sizeof(rfc));
3557 			rfc.mode = chan->mode;
3558 		}
3559 
3560 		if (result == L2CAP_CONF_SUCCESS)
3561 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3562 	}
3563 	rsp->scid   = cpu_to_le16(chan->dcid);
3564 	rsp->result = cpu_to_le16(result);
3565 	rsp->flags  = cpu_to_le16(0);
3566 
3567 	return ptr - data;
3568 }
3569 
3570 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3571 				void *data, size_t size, u16 *result)
3572 {
3573 	struct l2cap_conf_req *req = data;
3574 	void *ptr = req->data;
3575 	void *endptr = data + size;
3576 	int type, olen;
3577 	unsigned long val;
3578 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3579 	struct l2cap_conf_efs efs;
3580 
3581 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3582 
3583 	while (len >= L2CAP_CONF_OPT_SIZE) {
3584 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3585 		if (len < 0)
3586 			break;
3587 
3588 		switch (type) {
3589 		case L2CAP_CONF_MTU:
3590 			if (olen != 2)
3591 				break;
3592 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3593 				*result = L2CAP_CONF_UNACCEPT;
3594 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3595 			} else
3596 				chan->imtu = val;
3597 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3598 					   endptr - ptr);
3599 			break;
3600 
3601 		case L2CAP_CONF_FLUSH_TO:
3602 			if (olen != 2)
3603 				break;
3604 			chan->flush_to = val;
3605 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3606 					   chan->flush_to, endptr - ptr);
3607 			break;
3608 
3609 		case L2CAP_CONF_RFC:
3610 			if (olen != sizeof(rfc))
3611 				break;
3612 			memcpy(&rfc, (void *)val, olen);
3613 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3614 			    rfc.mode != chan->mode)
3615 				return -ECONNREFUSED;
3616 			chan->fcs = 0;
3617 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3618 					   (unsigned long) &rfc, endptr - ptr);
3619 			break;
3620 
3621 		case L2CAP_CONF_EWS:
3622 			if (olen != 2)
3623 				break;
3624 			chan->ack_win = min_t(u16, val, chan->ack_win);
3625 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3626 					   chan->tx_win, endptr - ptr);
3627 			break;
3628 
3629 		case L2CAP_CONF_EFS:
3630 			if (olen != sizeof(efs))
3631 				break;
3632 			memcpy(&efs, (void *)val, olen);
3633 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3634 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3635 			    efs.stype != chan->local_stype)
3636 				return -ECONNREFUSED;
3637 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3638 					   (unsigned long) &efs, endptr - ptr);
3639 			break;
3640 
3641 		case L2CAP_CONF_FCS:
3642 			if (olen != 1)
3643 				break;
3644 			if (*result == L2CAP_CONF_PENDING)
3645 				if (val == L2CAP_FCS_NONE)
3646 					set_bit(CONF_RECV_NO_FCS,
3647 						&chan->conf_state);
3648 			break;
3649 		}
3650 	}
3651 
3652 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3653 		return -ECONNREFUSED;
3654 
3655 	chan->mode = rfc.mode;
3656 
3657 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3658 		switch (rfc.mode) {
3659 		case L2CAP_MODE_ERTM:
3660 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3661 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3662 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3663 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3664 				chan->ack_win = min_t(u16, chan->ack_win,
3665 						      rfc.txwin_size);
3666 
3667 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3668 				chan->local_msdu = le16_to_cpu(efs.msdu);
3669 				chan->local_sdu_itime =
3670 					le32_to_cpu(efs.sdu_itime);
3671 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3672 				chan->local_flush_to =
3673 					le32_to_cpu(efs.flush_to);
3674 			}
3675 			break;
3676 
3677 		case L2CAP_MODE_STREAMING:
3678 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3679 		}
3680 	}
3681 
3682 	req->dcid   = cpu_to_le16(chan->dcid);
3683 	req->flags  = cpu_to_le16(0);
3684 
3685 	return ptr - data;
3686 }
3687 
3688 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3689 				u16 result, u16 flags)
3690 {
3691 	struct l2cap_conf_rsp *rsp = data;
3692 	void *ptr = rsp->data;
3693 
3694 	BT_DBG("chan %p", chan);
3695 
3696 	rsp->scid   = cpu_to_le16(chan->dcid);
3697 	rsp->result = cpu_to_le16(result);
3698 	rsp->flags  = cpu_to_le16(flags);
3699 
3700 	return ptr - data;
3701 }
3702 
3703 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3704 {
3705 	struct l2cap_le_conn_rsp rsp;
3706 	struct l2cap_conn *conn = chan->conn;
3707 
3708 	BT_DBG("chan %p", chan);
3709 
3710 	rsp.dcid    = cpu_to_le16(chan->scid);
3711 	rsp.mtu     = cpu_to_le16(chan->imtu);
3712 	rsp.mps     = cpu_to_le16(chan->mps);
3713 	rsp.credits = cpu_to_le16(chan->rx_credits);
3714 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3715 
3716 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3717 		       &rsp);
3718 }
3719 
3720 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3721 {
3722 	int *result = data;
3723 
3724 	if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3725 		return;
3726 
3727 	switch (chan->state) {
3728 	case BT_CONNECT2:
3729 		/* If channel still pending accept add to result */
3730 		(*result)++;
3731 		return;
3732 	case BT_CONNECTED:
3733 		return;
3734 	default:
3735 		/* If not connected or pending accept it has been refused */
3736 		*result = -ECONNREFUSED;
3737 		return;
3738 	}
3739 }
3740 
3741 struct l2cap_ecred_rsp_data {
3742 	struct {
3743 		struct l2cap_ecred_conn_rsp rsp;
3744 		__le16 scid[L2CAP_ECRED_MAX_CID];
3745 	} __packed pdu;
3746 	int count;
3747 };
3748 
3749 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3750 {
3751 	struct l2cap_ecred_rsp_data *rsp = data;
3752 
3753 	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3754 		return;
3755 
3756 	/* Reset ident so only one response is sent */
3757 	chan->ident = 0;
3758 
3759 	/* Include all channels pending with the same ident */
3760 	if (!rsp->pdu.rsp.result)
3761 		rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid);
3762 	else
3763 		l2cap_chan_del(chan, ECONNRESET);
3764 }
3765 
3766 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3767 {
3768 	struct l2cap_conn *conn = chan->conn;
3769 	struct l2cap_ecred_rsp_data data;
3770 	u16 id = chan->ident;
3771 	int result = 0;
3772 
3773 	if (!id)
3774 		return;
3775 
3776 	BT_DBG("chan %p id %d", chan, id);
3777 
3778 	memset(&data, 0, sizeof(data));
3779 
3780 	data.pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3781 	data.pdu.rsp.mps     = cpu_to_le16(chan->mps);
3782 	data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3783 	data.pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3784 
3785 	/* Verify that all channels are ready */
3786 	__l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3787 
3788 	if (result > 0)
3789 		return;
3790 
3791 	if (result < 0)
3792 		data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3793 
3794 	/* Build response */
3795 	__l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3796 
3797 	l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3798 		       sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3799 		       &data.pdu);
3800 }
3801 
3802 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3803 {
3804 	struct l2cap_conn_rsp rsp;
3805 	struct l2cap_conn *conn = chan->conn;
3806 	u8 buf[128];
3807 	u8 rsp_code;
3808 
3809 	rsp.scid   = cpu_to_le16(chan->dcid);
3810 	rsp.dcid   = cpu_to_le16(chan->scid);
3811 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3812 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3813 	rsp_code = L2CAP_CONN_RSP;
3814 
3815 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3816 
3817 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3818 
3819 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3820 		return;
3821 
3822 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3823 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3824 	chan->num_conf_req++;
3825 }
3826 
3827 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3828 {
3829 	int type, olen;
3830 	unsigned long val;
3831 	/* Use sane default values in case a misbehaving remote device
3832 	 * did not send an RFC or extended window size option.
3833 	 */
3834 	u16 txwin_ext = chan->ack_win;
3835 	struct l2cap_conf_rfc rfc = {
3836 		.mode = chan->mode,
3837 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3838 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3839 		.max_pdu_size = cpu_to_le16(chan->imtu),
3840 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3841 	};
3842 
3843 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3844 
3845 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3846 		return;
3847 
3848 	while (len >= L2CAP_CONF_OPT_SIZE) {
3849 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3850 		if (len < 0)
3851 			break;
3852 
3853 		switch (type) {
3854 		case L2CAP_CONF_RFC:
3855 			if (olen != sizeof(rfc))
3856 				break;
3857 			memcpy(&rfc, (void *)val, olen);
3858 			break;
3859 		case L2CAP_CONF_EWS:
3860 			if (olen != 2)
3861 				break;
3862 			txwin_ext = val;
3863 			break;
3864 		}
3865 	}
3866 
3867 	switch (rfc.mode) {
3868 	case L2CAP_MODE_ERTM:
3869 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3870 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3871 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3872 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3873 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3874 		else
3875 			chan->ack_win = min_t(u16, chan->ack_win,
3876 					      rfc.txwin_size);
3877 		break;
3878 	case L2CAP_MODE_STREAMING:
3879 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3880 	}
3881 }
3882 
3883 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3884 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3885 				    u8 *data)
3886 {
3887 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3888 
3889 	if (cmd_len < sizeof(*rej))
3890 		return -EPROTO;
3891 
3892 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3893 		return 0;
3894 
3895 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3896 	    cmd->ident == conn->info_ident) {
3897 		cancel_delayed_work(&conn->info_timer);
3898 
3899 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3900 		conn->info_ident = 0;
3901 
3902 		l2cap_conn_start(conn);
3903 	}
3904 
3905 	return 0;
3906 }
3907 
3908 static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
3909 			  u8 *data, u8 rsp_code, u8 amp_id)
3910 {
3911 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3912 	struct l2cap_conn_rsp rsp;
3913 	struct l2cap_chan *chan = NULL, *pchan = NULL;
3914 	int result, status = L2CAP_CS_NO_INFO;
3915 
3916 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3917 	__le16 psm = req->psm;
3918 
3919 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3920 
3921 	/* Check if we have socket listening on psm */
3922 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3923 					 &conn->hcon->dst, ACL_LINK);
3924 	if (!pchan) {
3925 		result = L2CAP_CR_BAD_PSM;
3926 		goto response;
3927 	}
3928 
3929 	mutex_lock(&conn->chan_lock);
3930 	l2cap_chan_lock(pchan);
3931 
3932 	/* Check if the ACL is secure enough (if not SDP) */
3933 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3934 	    !hci_conn_check_link_mode(conn->hcon)) {
3935 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3936 		result = L2CAP_CR_SEC_BLOCK;
3937 		goto response;
3938 	}
3939 
3940 	result = L2CAP_CR_NO_MEM;
3941 
3942 	/* Check for valid dynamic CID range (as per Erratum 3253) */
3943 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
3944 		result = L2CAP_CR_INVALID_SCID;
3945 		goto response;
3946 	}
3947 
3948 	/* Check if we already have channel with that dcid */
3949 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
3950 		result = L2CAP_CR_SCID_IN_USE;
3951 		goto response;
3952 	}
3953 
3954 	chan = pchan->ops->new_connection(pchan);
3955 	if (!chan)
3956 		goto response;
3957 
3958 	/* For certain devices (ex: HID mouse), support for authentication,
3959 	 * pairing and bonding is optional. For such devices, inorder to avoid
3960 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3961 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3962 	 */
3963 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3964 
3965 	bacpy(&chan->src, &conn->hcon->src);
3966 	bacpy(&chan->dst, &conn->hcon->dst);
3967 	chan->src_type = bdaddr_src_type(conn->hcon);
3968 	chan->dst_type = bdaddr_dst_type(conn->hcon);
3969 	chan->psm  = psm;
3970 	chan->dcid = scid;
3971 
3972 	__l2cap_chan_add(conn, chan);
3973 
3974 	dcid = chan->scid;
3975 
3976 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3977 
3978 	chan->ident = cmd->ident;
3979 
3980 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3981 		if (l2cap_chan_check_security(chan, false)) {
3982 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3983 				l2cap_state_change(chan, BT_CONNECT2);
3984 				result = L2CAP_CR_PEND;
3985 				status = L2CAP_CS_AUTHOR_PEND;
3986 				chan->ops->defer(chan);
3987 			} else {
3988 				/* Force pending result for AMP controllers.
3989 				 * The connection will succeed after the
3990 				 * physical link is up.
3991 				 */
3992 				if (amp_id == AMP_ID_BREDR) {
3993 					l2cap_state_change(chan, BT_CONFIG);
3994 					result = L2CAP_CR_SUCCESS;
3995 				} else {
3996 					l2cap_state_change(chan, BT_CONNECT2);
3997 					result = L2CAP_CR_PEND;
3998 				}
3999 				status = L2CAP_CS_NO_INFO;
4000 			}
4001 		} else {
4002 			l2cap_state_change(chan, BT_CONNECT2);
4003 			result = L2CAP_CR_PEND;
4004 			status = L2CAP_CS_AUTHEN_PEND;
4005 		}
4006 	} else {
4007 		l2cap_state_change(chan, BT_CONNECT2);
4008 		result = L2CAP_CR_PEND;
4009 		status = L2CAP_CS_NO_INFO;
4010 	}
4011 
4012 response:
4013 	rsp.scid   = cpu_to_le16(scid);
4014 	rsp.dcid   = cpu_to_le16(dcid);
4015 	rsp.result = cpu_to_le16(result);
4016 	rsp.status = cpu_to_le16(status);
4017 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4018 
4019 	if (!pchan)
4020 		return;
4021 
4022 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4023 		struct l2cap_info_req info;
4024 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4025 
4026 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4027 		conn->info_ident = l2cap_get_ident(conn);
4028 
4029 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4030 
4031 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4032 			       sizeof(info), &info);
4033 	}
4034 
4035 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4036 	    result == L2CAP_CR_SUCCESS) {
4037 		u8 buf[128];
4038 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4039 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4040 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4041 		chan->num_conf_req++;
4042 	}
4043 
4044 	l2cap_chan_unlock(pchan);
4045 	mutex_unlock(&conn->chan_lock);
4046 	l2cap_chan_put(pchan);
4047 }
4048 
4049 static int l2cap_connect_req(struct l2cap_conn *conn,
4050 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4051 {
4052 	struct hci_dev *hdev = conn->hcon->hdev;
4053 	struct hci_conn *hcon = conn->hcon;
4054 
4055 	if (cmd_len < sizeof(struct l2cap_conn_req))
4056 		return -EPROTO;
4057 
4058 	hci_dev_lock(hdev);
4059 	if (hci_dev_test_flag(hdev, HCI_MGMT))
4060 		mgmt_device_connected(hdev, hcon, NULL, 0);
4061 	hci_dev_unlock(hdev);
4062 
4063 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4064 	return 0;
4065 }
4066 
4067 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4068 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4069 				    u8 *data)
4070 {
4071 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4072 	u16 scid, dcid, result, status;
4073 	struct l2cap_chan *chan;
4074 	u8 req[128];
4075 	int err;
4076 
4077 	if (cmd_len < sizeof(*rsp))
4078 		return -EPROTO;
4079 
4080 	scid   = __le16_to_cpu(rsp->scid);
4081 	dcid   = __le16_to_cpu(rsp->dcid);
4082 	result = __le16_to_cpu(rsp->result);
4083 	status = __le16_to_cpu(rsp->status);
4084 
4085 	if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4086 					   dcid > L2CAP_CID_DYN_END))
4087 		return -EPROTO;
4088 
4089 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4090 	       dcid, scid, result, status);
4091 
4092 	mutex_lock(&conn->chan_lock);
4093 
4094 	if (scid) {
4095 		chan = __l2cap_get_chan_by_scid(conn, scid);
4096 		if (!chan) {
4097 			err = -EBADSLT;
4098 			goto unlock;
4099 		}
4100 	} else {
4101 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4102 		if (!chan) {
4103 			err = -EBADSLT;
4104 			goto unlock;
4105 		}
4106 	}
4107 
4108 	chan = l2cap_chan_hold_unless_zero(chan);
4109 	if (!chan) {
4110 		err = -EBADSLT;
4111 		goto unlock;
4112 	}
4113 
4114 	err = 0;
4115 
4116 	l2cap_chan_lock(chan);
4117 
4118 	switch (result) {
4119 	case L2CAP_CR_SUCCESS:
4120 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4121 			err = -EBADSLT;
4122 			break;
4123 		}
4124 
4125 		l2cap_state_change(chan, BT_CONFIG);
4126 		chan->ident = 0;
4127 		chan->dcid = dcid;
4128 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4129 
4130 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4131 			break;
4132 
4133 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4134 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4135 		chan->num_conf_req++;
4136 		break;
4137 
4138 	case L2CAP_CR_PEND:
4139 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4140 		break;
4141 
4142 	default:
4143 		l2cap_chan_del(chan, ECONNREFUSED);
4144 		break;
4145 	}
4146 
4147 	l2cap_chan_unlock(chan);
4148 	l2cap_chan_put(chan);
4149 
4150 unlock:
4151 	mutex_unlock(&conn->chan_lock);
4152 
4153 	return err;
4154 }
4155 
4156 static inline void set_default_fcs(struct l2cap_chan *chan)
4157 {
4158 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4159 	 * sides request it.
4160 	 */
4161 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4162 		chan->fcs = L2CAP_FCS_NONE;
4163 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4164 		chan->fcs = L2CAP_FCS_CRC16;
4165 }
4166 
4167 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4168 				    u8 ident, u16 flags)
4169 {
4170 	struct l2cap_conn *conn = chan->conn;
4171 
4172 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4173 	       flags);
4174 
4175 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4176 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4177 
4178 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4179 		       l2cap_build_conf_rsp(chan, data,
4180 					    L2CAP_CONF_SUCCESS, flags), data);
4181 }
4182 
4183 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4184 				   u16 scid, u16 dcid)
4185 {
4186 	struct l2cap_cmd_rej_cid rej;
4187 
4188 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4189 	rej.scid = __cpu_to_le16(scid);
4190 	rej.dcid = __cpu_to_le16(dcid);
4191 
4192 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4193 }
4194 
4195 static inline int l2cap_config_req(struct l2cap_conn *conn,
4196 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4197 				   u8 *data)
4198 {
4199 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4200 	u16 dcid, flags;
4201 	u8 rsp[64];
4202 	struct l2cap_chan *chan;
4203 	int len, err = 0;
4204 
4205 	if (cmd_len < sizeof(*req))
4206 		return -EPROTO;
4207 
4208 	dcid  = __le16_to_cpu(req->dcid);
4209 	flags = __le16_to_cpu(req->flags);
4210 
4211 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4212 
4213 	chan = l2cap_get_chan_by_scid(conn, dcid);
4214 	if (!chan) {
4215 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4216 		return 0;
4217 	}
4218 
4219 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4220 	    chan->state != BT_CONNECTED) {
4221 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4222 				       chan->dcid);
4223 		goto unlock;
4224 	}
4225 
4226 	/* Reject if config buffer is too small. */
4227 	len = cmd_len - sizeof(*req);
4228 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4229 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4230 			       l2cap_build_conf_rsp(chan, rsp,
4231 			       L2CAP_CONF_REJECT, flags), rsp);
4232 		goto unlock;
4233 	}
4234 
4235 	/* Store config. */
4236 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4237 	chan->conf_len += len;
4238 
4239 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4240 		/* Incomplete config. Send empty response. */
4241 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4242 			       l2cap_build_conf_rsp(chan, rsp,
4243 			       L2CAP_CONF_SUCCESS, flags), rsp);
4244 		goto unlock;
4245 	}
4246 
4247 	/* Complete config. */
4248 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4249 	if (len < 0) {
4250 		l2cap_send_disconn_req(chan, ECONNRESET);
4251 		goto unlock;
4252 	}
4253 
4254 	chan->ident = cmd->ident;
4255 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4256 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4257 		chan->num_conf_rsp++;
4258 
4259 	/* Reset config buffer. */
4260 	chan->conf_len = 0;
4261 
4262 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4263 		goto unlock;
4264 
4265 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4266 		set_default_fcs(chan);
4267 
4268 		if (chan->mode == L2CAP_MODE_ERTM ||
4269 		    chan->mode == L2CAP_MODE_STREAMING)
4270 			err = l2cap_ertm_init(chan);
4271 
4272 		if (err < 0)
4273 			l2cap_send_disconn_req(chan, -err);
4274 		else
4275 			l2cap_chan_ready(chan);
4276 
4277 		goto unlock;
4278 	}
4279 
4280 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4281 		u8 buf[64];
4282 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4283 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4284 		chan->num_conf_req++;
4285 	}
4286 
4287 	/* Got Conf Rsp PENDING from remote side and assume we sent
4288 	   Conf Rsp PENDING in the code above */
4289 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4290 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4291 
4292 		/* check compatibility */
4293 
4294 		/* Send rsp for BR/EDR channel */
4295 		l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4296 	}
4297 
4298 unlock:
4299 	l2cap_chan_unlock(chan);
4300 	l2cap_chan_put(chan);
4301 	return err;
4302 }
4303 
4304 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4305 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4306 				   u8 *data)
4307 {
4308 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4309 	u16 scid, flags, result;
4310 	struct l2cap_chan *chan;
4311 	int len = cmd_len - sizeof(*rsp);
4312 	int err = 0;
4313 
4314 	if (cmd_len < sizeof(*rsp))
4315 		return -EPROTO;
4316 
4317 	scid   = __le16_to_cpu(rsp->scid);
4318 	flags  = __le16_to_cpu(rsp->flags);
4319 	result = __le16_to_cpu(rsp->result);
4320 
4321 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4322 	       result, len);
4323 
4324 	chan = l2cap_get_chan_by_scid(conn, scid);
4325 	if (!chan)
4326 		return 0;
4327 
4328 	switch (result) {
4329 	case L2CAP_CONF_SUCCESS:
4330 		l2cap_conf_rfc_get(chan, rsp->data, len);
4331 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4332 		break;
4333 
4334 	case L2CAP_CONF_PENDING:
4335 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4336 
4337 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4338 			char buf[64];
4339 
4340 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4341 						   buf, sizeof(buf), &result);
4342 			if (len < 0) {
4343 				l2cap_send_disconn_req(chan, ECONNRESET);
4344 				goto done;
4345 			}
4346 
4347 			l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4348 		}
4349 		goto done;
4350 
4351 	case L2CAP_CONF_UNKNOWN:
4352 	case L2CAP_CONF_UNACCEPT:
4353 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4354 			char req[64];
4355 
4356 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4357 				l2cap_send_disconn_req(chan, ECONNRESET);
4358 				goto done;
4359 			}
4360 
4361 			/* throw out any old stored conf requests */
4362 			result = L2CAP_CONF_SUCCESS;
4363 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4364 						   req, sizeof(req), &result);
4365 			if (len < 0) {
4366 				l2cap_send_disconn_req(chan, ECONNRESET);
4367 				goto done;
4368 			}
4369 
4370 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4371 				       L2CAP_CONF_REQ, len, req);
4372 			chan->num_conf_req++;
4373 			if (result != L2CAP_CONF_SUCCESS)
4374 				goto done;
4375 			break;
4376 		}
4377 		fallthrough;
4378 
4379 	default:
4380 		l2cap_chan_set_err(chan, ECONNRESET);
4381 
4382 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4383 		l2cap_send_disconn_req(chan, ECONNRESET);
4384 		goto done;
4385 	}
4386 
4387 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4388 		goto done;
4389 
4390 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4391 
4392 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4393 		set_default_fcs(chan);
4394 
4395 		if (chan->mode == L2CAP_MODE_ERTM ||
4396 		    chan->mode == L2CAP_MODE_STREAMING)
4397 			err = l2cap_ertm_init(chan);
4398 
4399 		if (err < 0)
4400 			l2cap_send_disconn_req(chan, -err);
4401 		else
4402 			l2cap_chan_ready(chan);
4403 	}
4404 
4405 done:
4406 	l2cap_chan_unlock(chan);
4407 	l2cap_chan_put(chan);
4408 	return err;
4409 }
4410 
4411 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4412 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4413 				       u8 *data)
4414 {
4415 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4416 	struct l2cap_disconn_rsp rsp;
4417 	u16 dcid, scid;
4418 	struct l2cap_chan *chan;
4419 
4420 	if (cmd_len != sizeof(*req))
4421 		return -EPROTO;
4422 
4423 	scid = __le16_to_cpu(req->scid);
4424 	dcid = __le16_to_cpu(req->dcid);
4425 
4426 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4427 
4428 	chan = l2cap_get_chan_by_scid(conn, dcid);
4429 	if (!chan) {
4430 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4431 		return 0;
4432 	}
4433 
4434 	rsp.dcid = cpu_to_le16(chan->scid);
4435 	rsp.scid = cpu_to_le16(chan->dcid);
4436 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4437 
4438 	chan->ops->set_shutdown(chan);
4439 
4440 	l2cap_chan_unlock(chan);
4441 	mutex_lock(&conn->chan_lock);
4442 	l2cap_chan_lock(chan);
4443 	l2cap_chan_del(chan, ECONNRESET);
4444 	mutex_unlock(&conn->chan_lock);
4445 
4446 	chan->ops->close(chan);
4447 
4448 	l2cap_chan_unlock(chan);
4449 	l2cap_chan_put(chan);
4450 
4451 	return 0;
4452 }
4453 
4454 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4455 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4456 				       u8 *data)
4457 {
4458 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4459 	u16 dcid, scid;
4460 	struct l2cap_chan *chan;
4461 
4462 	if (cmd_len != sizeof(*rsp))
4463 		return -EPROTO;
4464 
4465 	scid = __le16_to_cpu(rsp->scid);
4466 	dcid = __le16_to_cpu(rsp->dcid);
4467 
4468 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4469 
4470 	chan = l2cap_get_chan_by_scid(conn, scid);
4471 	if (!chan) {
4472 		return 0;
4473 	}
4474 
4475 	if (chan->state != BT_DISCONN) {
4476 		l2cap_chan_unlock(chan);
4477 		l2cap_chan_put(chan);
4478 		return 0;
4479 	}
4480 
4481 	l2cap_chan_unlock(chan);
4482 	mutex_lock(&conn->chan_lock);
4483 	l2cap_chan_lock(chan);
4484 	l2cap_chan_del(chan, 0);
4485 	mutex_unlock(&conn->chan_lock);
4486 
4487 	chan->ops->close(chan);
4488 
4489 	l2cap_chan_unlock(chan);
4490 	l2cap_chan_put(chan);
4491 
4492 	return 0;
4493 }
4494 
4495 static inline int l2cap_information_req(struct l2cap_conn *conn,
4496 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4497 					u8 *data)
4498 {
4499 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4500 	u16 type;
4501 
4502 	if (cmd_len != sizeof(*req))
4503 		return -EPROTO;
4504 
4505 	type = __le16_to_cpu(req->type);
4506 
4507 	BT_DBG("type 0x%4.4x", type);
4508 
4509 	if (type == L2CAP_IT_FEAT_MASK) {
4510 		u8 buf[8];
4511 		u32 feat_mask = l2cap_feat_mask;
4512 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4513 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4514 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4515 		if (!disable_ertm)
4516 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4517 				| L2CAP_FEAT_FCS;
4518 
4519 		put_unaligned_le32(feat_mask, rsp->data);
4520 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4521 			       buf);
4522 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4523 		u8 buf[12];
4524 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4525 
4526 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4527 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4528 		rsp->data[0] = conn->local_fixed_chan;
4529 		memset(rsp->data + 1, 0, 7);
4530 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4531 			       buf);
4532 	} else {
4533 		struct l2cap_info_rsp rsp;
4534 		rsp.type   = cpu_to_le16(type);
4535 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4536 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4537 			       &rsp);
4538 	}
4539 
4540 	return 0;
4541 }
4542 
4543 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4544 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4545 					u8 *data)
4546 {
4547 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4548 	u16 type, result;
4549 
4550 	if (cmd_len < sizeof(*rsp))
4551 		return -EPROTO;
4552 
4553 	type   = __le16_to_cpu(rsp->type);
4554 	result = __le16_to_cpu(rsp->result);
4555 
4556 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4557 
4558 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4559 	if (cmd->ident != conn->info_ident ||
4560 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4561 		return 0;
4562 
4563 	cancel_delayed_work(&conn->info_timer);
4564 
4565 	if (result != L2CAP_IR_SUCCESS) {
4566 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4567 		conn->info_ident = 0;
4568 
4569 		l2cap_conn_start(conn);
4570 
4571 		return 0;
4572 	}
4573 
4574 	switch (type) {
4575 	case L2CAP_IT_FEAT_MASK:
4576 		conn->feat_mask = get_unaligned_le32(rsp->data);
4577 
4578 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4579 			struct l2cap_info_req req;
4580 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4581 
4582 			conn->info_ident = l2cap_get_ident(conn);
4583 
4584 			l2cap_send_cmd(conn, conn->info_ident,
4585 				       L2CAP_INFO_REQ, sizeof(req), &req);
4586 		} else {
4587 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4588 			conn->info_ident = 0;
4589 
4590 			l2cap_conn_start(conn);
4591 		}
4592 		break;
4593 
4594 	case L2CAP_IT_FIXED_CHAN:
4595 		conn->remote_fixed_chan = rsp->data[0];
4596 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4597 		conn->info_ident = 0;
4598 
4599 		l2cap_conn_start(conn);
4600 		break;
4601 	}
4602 
4603 	return 0;
4604 }
4605 
4606 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4607 					      struct l2cap_cmd_hdr *cmd,
4608 					      u16 cmd_len, u8 *data)
4609 {
4610 	struct hci_conn *hcon = conn->hcon;
4611 	struct l2cap_conn_param_update_req *req;
4612 	struct l2cap_conn_param_update_rsp rsp;
4613 	u16 min, max, latency, to_multiplier;
4614 	int err;
4615 
4616 	if (hcon->role != HCI_ROLE_MASTER)
4617 		return -EINVAL;
4618 
4619 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4620 		return -EPROTO;
4621 
4622 	req = (struct l2cap_conn_param_update_req *) data;
4623 	min		= __le16_to_cpu(req->min);
4624 	max		= __le16_to_cpu(req->max);
4625 	latency		= __le16_to_cpu(req->latency);
4626 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
4627 
4628 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4629 	       min, max, latency, to_multiplier);
4630 
4631 	memset(&rsp, 0, sizeof(rsp));
4632 
4633 	if (max > hcon->le_conn_max_interval) {
4634 		BT_DBG("requested connection interval exceeds current bounds.");
4635 		err = -EINVAL;
4636 	} else {
4637 		err = hci_check_conn_params(min, max, latency, to_multiplier);
4638 	}
4639 
4640 	if (err)
4641 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4642 	else
4643 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4644 
4645 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4646 		       sizeof(rsp), &rsp);
4647 
4648 	if (!err) {
4649 		u8 store_hint;
4650 
4651 		store_hint = hci_le_conn_update(hcon, min, max, latency,
4652 						to_multiplier);
4653 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4654 				    store_hint, min, max, latency,
4655 				    to_multiplier);
4656 
4657 	}
4658 
4659 	return 0;
4660 }
4661 
4662 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4663 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4664 				u8 *data)
4665 {
4666 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4667 	struct hci_conn *hcon = conn->hcon;
4668 	u16 dcid, mtu, mps, credits, result;
4669 	struct l2cap_chan *chan;
4670 	int err, sec_level;
4671 
4672 	if (cmd_len < sizeof(*rsp))
4673 		return -EPROTO;
4674 
4675 	dcid    = __le16_to_cpu(rsp->dcid);
4676 	mtu     = __le16_to_cpu(rsp->mtu);
4677 	mps     = __le16_to_cpu(rsp->mps);
4678 	credits = __le16_to_cpu(rsp->credits);
4679 	result  = __le16_to_cpu(rsp->result);
4680 
4681 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4682 					   dcid < L2CAP_CID_DYN_START ||
4683 					   dcid > L2CAP_CID_LE_DYN_END))
4684 		return -EPROTO;
4685 
4686 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4687 	       dcid, mtu, mps, credits, result);
4688 
4689 	mutex_lock(&conn->chan_lock);
4690 
4691 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4692 	if (!chan) {
4693 		err = -EBADSLT;
4694 		goto unlock;
4695 	}
4696 
4697 	err = 0;
4698 
4699 	l2cap_chan_lock(chan);
4700 
4701 	switch (result) {
4702 	case L2CAP_CR_LE_SUCCESS:
4703 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4704 			err = -EBADSLT;
4705 			break;
4706 		}
4707 
4708 		chan->ident = 0;
4709 		chan->dcid = dcid;
4710 		chan->omtu = mtu;
4711 		chan->remote_mps = mps;
4712 		chan->tx_credits = credits;
4713 		l2cap_chan_ready(chan);
4714 		break;
4715 
4716 	case L2CAP_CR_LE_AUTHENTICATION:
4717 	case L2CAP_CR_LE_ENCRYPTION:
4718 		/* If we already have MITM protection we can't do
4719 		 * anything.
4720 		 */
4721 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4722 			l2cap_chan_del(chan, ECONNREFUSED);
4723 			break;
4724 		}
4725 
4726 		sec_level = hcon->sec_level + 1;
4727 		if (chan->sec_level < sec_level)
4728 			chan->sec_level = sec_level;
4729 
4730 		/* We'll need to send a new Connect Request */
4731 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4732 
4733 		smp_conn_security(hcon, chan->sec_level);
4734 		break;
4735 
4736 	default:
4737 		l2cap_chan_del(chan, ECONNREFUSED);
4738 		break;
4739 	}
4740 
4741 	l2cap_chan_unlock(chan);
4742 
4743 unlock:
4744 	mutex_unlock(&conn->chan_lock);
4745 
4746 	return err;
4747 }
4748 
4749 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4750 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4751 				      u8 *data)
4752 {
4753 	int err = 0;
4754 
4755 	switch (cmd->code) {
4756 	case L2CAP_COMMAND_REJ:
4757 		l2cap_command_rej(conn, cmd, cmd_len, data);
4758 		break;
4759 
4760 	case L2CAP_CONN_REQ:
4761 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
4762 		break;
4763 
4764 	case L2CAP_CONN_RSP:
4765 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4766 		break;
4767 
4768 	case L2CAP_CONF_REQ:
4769 		err = l2cap_config_req(conn, cmd, cmd_len, data);
4770 		break;
4771 
4772 	case L2CAP_CONF_RSP:
4773 		l2cap_config_rsp(conn, cmd, cmd_len, data);
4774 		break;
4775 
4776 	case L2CAP_DISCONN_REQ:
4777 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4778 		break;
4779 
4780 	case L2CAP_DISCONN_RSP:
4781 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4782 		break;
4783 
4784 	case L2CAP_ECHO_REQ:
4785 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4786 		break;
4787 
4788 	case L2CAP_ECHO_RSP:
4789 		break;
4790 
4791 	case L2CAP_INFO_REQ:
4792 		err = l2cap_information_req(conn, cmd, cmd_len, data);
4793 		break;
4794 
4795 	case L2CAP_INFO_RSP:
4796 		l2cap_information_rsp(conn, cmd, cmd_len, data);
4797 		break;
4798 
4799 	default:
4800 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4801 		err = -EINVAL;
4802 		break;
4803 	}
4804 
4805 	return err;
4806 }
4807 
4808 static int l2cap_le_connect_req(struct l2cap_conn *conn,
4809 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4810 				u8 *data)
4811 {
4812 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4813 	struct l2cap_le_conn_rsp rsp;
4814 	struct l2cap_chan *chan, *pchan;
4815 	u16 dcid, scid, credits, mtu, mps;
4816 	__le16 psm;
4817 	u8 result;
4818 
4819 	if (cmd_len != sizeof(*req))
4820 		return -EPROTO;
4821 
4822 	scid = __le16_to_cpu(req->scid);
4823 	mtu  = __le16_to_cpu(req->mtu);
4824 	mps  = __le16_to_cpu(req->mps);
4825 	psm  = req->psm;
4826 	dcid = 0;
4827 	credits = 0;
4828 
4829 	if (mtu < 23 || mps < 23)
4830 		return -EPROTO;
4831 
4832 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4833 	       scid, mtu, mps);
4834 
4835 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4836 	 * page 1059:
4837 	 *
4838 	 * Valid range: 0x0001-0x00ff
4839 	 *
4840 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4841 	 */
4842 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4843 		result = L2CAP_CR_LE_BAD_PSM;
4844 		chan = NULL;
4845 		goto response;
4846 	}
4847 
4848 	/* Check if we have socket listening on psm */
4849 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4850 					 &conn->hcon->dst, LE_LINK);
4851 	if (!pchan) {
4852 		result = L2CAP_CR_LE_BAD_PSM;
4853 		chan = NULL;
4854 		goto response;
4855 	}
4856 
4857 	mutex_lock(&conn->chan_lock);
4858 	l2cap_chan_lock(pchan);
4859 
4860 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4861 				     SMP_ALLOW_STK)) {
4862 		result = L2CAP_CR_LE_AUTHENTICATION;
4863 		chan = NULL;
4864 		goto response_unlock;
4865 	}
4866 
4867 	/* Check for valid dynamic CID range */
4868 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4869 		result = L2CAP_CR_LE_INVALID_SCID;
4870 		chan = NULL;
4871 		goto response_unlock;
4872 	}
4873 
4874 	/* Check if we already have channel with that dcid */
4875 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4876 		result = L2CAP_CR_LE_SCID_IN_USE;
4877 		chan = NULL;
4878 		goto response_unlock;
4879 	}
4880 
4881 	chan = pchan->ops->new_connection(pchan);
4882 	if (!chan) {
4883 		result = L2CAP_CR_LE_NO_MEM;
4884 		goto response_unlock;
4885 	}
4886 
4887 	bacpy(&chan->src, &conn->hcon->src);
4888 	bacpy(&chan->dst, &conn->hcon->dst);
4889 	chan->src_type = bdaddr_src_type(conn->hcon);
4890 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4891 	chan->psm  = psm;
4892 	chan->dcid = scid;
4893 	chan->omtu = mtu;
4894 	chan->remote_mps = mps;
4895 
4896 	__l2cap_chan_add(conn, chan);
4897 
4898 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4899 
4900 	dcid = chan->scid;
4901 	credits = chan->rx_credits;
4902 
4903 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4904 
4905 	chan->ident = cmd->ident;
4906 
4907 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4908 		l2cap_state_change(chan, BT_CONNECT2);
4909 		/* The following result value is actually not defined
4910 		 * for LE CoC but we use it to let the function know
4911 		 * that it should bail out after doing its cleanup
4912 		 * instead of sending a response.
4913 		 */
4914 		result = L2CAP_CR_PEND;
4915 		chan->ops->defer(chan);
4916 	} else {
4917 		l2cap_chan_ready(chan);
4918 		result = L2CAP_CR_LE_SUCCESS;
4919 	}
4920 
4921 response_unlock:
4922 	l2cap_chan_unlock(pchan);
4923 	mutex_unlock(&conn->chan_lock);
4924 	l2cap_chan_put(pchan);
4925 
4926 	if (result == L2CAP_CR_PEND)
4927 		return 0;
4928 
4929 response:
4930 	if (chan) {
4931 		rsp.mtu = cpu_to_le16(chan->imtu);
4932 		rsp.mps = cpu_to_le16(chan->mps);
4933 	} else {
4934 		rsp.mtu = 0;
4935 		rsp.mps = 0;
4936 	}
4937 
4938 	rsp.dcid    = cpu_to_le16(dcid);
4939 	rsp.credits = cpu_to_le16(credits);
4940 	rsp.result  = cpu_to_le16(result);
4941 
4942 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
4943 
4944 	return 0;
4945 }
4946 
4947 static inline int l2cap_le_credits(struct l2cap_conn *conn,
4948 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4949 				   u8 *data)
4950 {
4951 	struct l2cap_le_credits *pkt;
4952 	struct l2cap_chan *chan;
4953 	u16 cid, credits, max_credits;
4954 
4955 	if (cmd_len != sizeof(*pkt))
4956 		return -EPROTO;
4957 
4958 	pkt = (struct l2cap_le_credits *) data;
4959 	cid	= __le16_to_cpu(pkt->cid);
4960 	credits	= __le16_to_cpu(pkt->credits);
4961 
4962 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
4963 
4964 	chan = l2cap_get_chan_by_dcid(conn, cid);
4965 	if (!chan)
4966 		return -EBADSLT;
4967 
4968 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
4969 	if (credits > max_credits) {
4970 		BT_ERR("LE credits overflow");
4971 		l2cap_send_disconn_req(chan, ECONNRESET);
4972 
4973 		/* Return 0 so that we don't trigger an unnecessary
4974 		 * command reject packet.
4975 		 */
4976 		goto unlock;
4977 	}
4978 
4979 	chan->tx_credits += credits;
4980 
4981 	/* Resume sending */
4982 	l2cap_le_flowctl_send(chan);
4983 
4984 	if (chan->tx_credits)
4985 		chan->ops->resume(chan);
4986 
4987 unlock:
4988 	l2cap_chan_unlock(chan);
4989 	l2cap_chan_put(chan);
4990 
4991 	return 0;
4992 }
4993 
4994 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
4995 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4996 				       u8 *data)
4997 {
4998 	struct l2cap_ecred_conn_req *req = (void *) data;
4999 	struct {
5000 		struct l2cap_ecred_conn_rsp rsp;
5001 		__le16 dcid[L2CAP_ECRED_MAX_CID];
5002 	} __packed pdu;
5003 	struct l2cap_chan *chan, *pchan;
5004 	u16 mtu, mps;
5005 	__le16 psm;
5006 	u8 result, len = 0;
5007 	int i, num_scid;
5008 	bool defer = false;
5009 
5010 	if (!enable_ecred)
5011 		return -EINVAL;
5012 
5013 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5014 		result = L2CAP_CR_LE_INVALID_PARAMS;
5015 		goto response;
5016 	}
5017 
5018 	cmd_len -= sizeof(*req);
5019 	num_scid = cmd_len / sizeof(u16);
5020 
5021 	if (num_scid > ARRAY_SIZE(pdu.dcid)) {
5022 		result = L2CAP_CR_LE_INVALID_PARAMS;
5023 		goto response;
5024 	}
5025 
5026 	mtu  = __le16_to_cpu(req->mtu);
5027 	mps  = __le16_to_cpu(req->mps);
5028 
5029 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5030 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5031 		goto response;
5032 	}
5033 
5034 	psm  = req->psm;
5035 
5036 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5037 	 * page 1059:
5038 	 *
5039 	 * Valid range: 0x0001-0x00ff
5040 	 *
5041 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5042 	 */
5043 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5044 		result = L2CAP_CR_LE_BAD_PSM;
5045 		goto response;
5046 	}
5047 
5048 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5049 
5050 	memset(&pdu, 0, sizeof(pdu));
5051 
5052 	/* Check if we have socket listening on psm */
5053 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5054 					 &conn->hcon->dst, LE_LINK);
5055 	if (!pchan) {
5056 		result = L2CAP_CR_LE_BAD_PSM;
5057 		goto response;
5058 	}
5059 
5060 	mutex_lock(&conn->chan_lock);
5061 	l2cap_chan_lock(pchan);
5062 
5063 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5064 				     SMP_ALLOW_STK)) {
5065 		result = L2CAP_CR_LE_AUTHENTICATION;
5066 		goto unlock;
5067 	}
5068 
5069 	result = L2CAP_CR_LE_SUCCESS;
5070 
5071 	for (i = 0; i < num_scid; i++) {
5072 		u16 scid = __le16_to_cpu(req->scid[i]);
5073 
5074 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
5075 
5076 		pdu.dcid[i] = 0x0000;
5077 		len += sizeof(*pdu.dcid);
5078 
5079 		/* Check for valid dynamic CID range */
5080 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5081 			result = L2CAP_CR_LE_INVALID_SCID;
5082 			continue;
5083 		}
5084 
5085 		/* Check if we already have channel with that dcid */
5086 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
5087 			result = L2CAP_CR_LE_SCID_IN_USE;
5088 			continue;
5089 		}
5090 
5091 		chan = pchan->ops->new_connection(pchan);
5092 		if (!chan) {
5093 			result = L2CAP_CR_LE_NO_MEM;
5094 			continue;
5095 		}
5096 
5097 		bacpy(&chan->src, &conn->hcon->src);
5098 		bacpy(&chan->dst, &conn->hcon->dst);
5099 		chan->src_type = bdaddr_src_type(conn->hcon);
5100 		chan->dst_type = bdaddr_dst_type(conn->hcon);
5101 		chan->psm  = psm;
5102 		chan->dcid = scid;
5103 		chan->omtu = mtu;
5104 		chan->remote_mps = mps;
5105 
5106 		__l2cap_chan_add(conn, chan);
5107 
5108 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5109 
5110 		/* Init response */
5111 		if (!pdu.rsp.credits) {
5112 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
5113 			pdu.rsp.mps = cpu_to_le16(chan->mps);
5114 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
5115 		}
5116 
5117 		pdu.dcid[i] = cpu_to_le16(chan->scid);
5118 
5119 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5120 
5121 		chan->ident = cmd->ident;
5122 		chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5123 
5124 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5125 			l2cap_state_change(chan, BT_CONNECT2);
5126 			defer = true;
5127 			chan->ops->defer(chan);
5128 		} else {
5129 			l2cap_chan_ready(chan);
5130 		}
5131 	}
5132 
5133 unlock:
5134 	l2cap_chan_unlock(pchan);
5135 	mutex_unlock(&conn->chan_lock);
5136 	l2cap_chan_put(pchan);
5137 
5138 response:
5139 	pdu.rsp.result = cpu_to_le16(result);
5140 
5141 	if (defer)
5142 		return 0;
5143 
5144 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5145 		       sizeof(pdu.rsp) + len, &pdu);
5146 
5147 	return 0;
5148 }
5149 
5150 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5151 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5152 				       u8 *data)
5153 {
5154 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5155 	struct hci_conn *hcon = conn->hcon;
5156 	u16 mtu, mps, credits, result;
5157 	struct l2cap_chan *chan, *tmp;
5158 	int err = 0, sec_level;
5159 	int i = 0;
5160 
5161 	if (cmd_len < sizeof(*rsp))
5162 		return -EPROTO;
5163 
5164 	mtu     = __le16_to_cpu(rsp->mtu);
5165 	mps     = __le16_to_cpu(rsp->mps);
5166 	credits = __le16_to_cpu(rsp->credits);
5167 	result  = __le16_to_cpu(rsp->result);
5168 
5169 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5170 	       result);
5171 
5172 	mutex_lock(&conn->chan_lock);
5173 
5174 	cmd_len -= sizeof(*rsp);
5175 
5176 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5177 		u16 dcid;
5178 
5179 		if (chan->ident != cmd->ident ||
5180 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5181 		    chan->state == BT_CONNECTED)
5182 			continue;
5183 
5184 		l2cap_chan_lock(chan);
5185 
5186 		/* Check that there is a dcid for each pending channel */
5187 		if (cmd_len < sizeof(dcid)) {
5188 			l2cap_chan_del(chan, ECONNREFUSED);
5189 			l2cap_chan_unlock(chan);
5190 			continue;
5191 		}
5192 
5193 		dcid = __le16_to_cpu(rsp->dcid[i++]);
5194 		cmd_len -= sizeof(u16);
5195 
5196 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5197 
5198 		/* Check if dcid is already in use */
5199 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5200 			/* If a device receives a
5201 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5202 			 * already-assigned Destination CID, then both the
5203 			 * original channel and the new channel shall be
5204 			 * immediately discarded and not used.
5205 			 */
5206 			l2cap_chan_del(chan, ECONNREFUSED);
5207 			l2cap_chan_unlock(chan);
5208 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
5209 			l2cap_chan_lock(chan);
5210 			l2cap_chan_del(chan, ECONNRESET);
5211 			l2cap_chan_unlock(chan);
5212 			continue;
5213 		}
5214 
5215 		switch (result) {
5216 		case L2CAP_CR_LE_AUTHENTICATION:
5217 		case L2CAP_CR_LE_ENCRYPTION:
5218 			/* If we already have MITM protection we can't do
5219 			 * anything.
5220 			 */
5221 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5222 				l2cap_chan_del(chan, ECONNREFUSED);
5223 				break;
5224 			}
5225 
5226 			sec_level = hcon->sec_level + 1;
5227 			if (chan->sec_level < sec_level)
5228 				chan->sec_level = sec_level;
5229 
5230 			/* We'll need to send a new Connect Request */
5231 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5232 
5233 			smp_conn_security(hcon, chan->sec_level);
5234 			break;
5235 
5236 		case L2CAP_CR_LE_BAD_PSM:
5237 			l2cap_chan_del(chan, ECONNREFUSED);
5238 			break;
5239 
5240 		default:
5241 			/* If dcid was not set it means channels was refused */
5242 			if (!dcid) {
5243 				l2cap_chan_del(chan, ECONNREFUSED);
5244 				break;
5245 			}
5246 
5247 			chan->ident = 0;
5248 			chan->dcid = dcid;
5249 			chan->omtu = mtu;
5250 			chan->remote_mps = mps;
5251 			chan->tx_credits = credits;
5252 			l2cap_chan_ready(chan);
5253 			break;
5254 		}
5255 
5256 		l2cap_chan_unlock(chan);
5257 	}
5258 
5259 	mutex_unlock(&conn->chan_lock);
5260 
5261 	return err;
5262 }
5263 
5264 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5265 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5266 					 u8 *data)
5267 {
5268 	struct l2cap_ecred_reconf_req *req = (void *) data;
5269 	struct l2cap_ecred_reconf_rsp rsp;
5270 	u16 mtu, mps, result;
5271 	struct l2cap_chan *chan;
5272 	int i, num_scid;
5273 
5274 	if (!enable_ecred)
5275 		return -EINVAL;
5276 
5277 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
5278 		result = L2CAP_CR_LE_INVALID_PARAMS;
5279 		goto respond;
5280 	}
5281 
5282 	mtu = __le16_to_cpu(req->mtu);
5283 	mps = __le16_to_cpu(req->mps);
5284 
5285 	BT_DBG("mtu %u mps %u", mtu, mps);
5286 
5287 	if (mtu < L2CAP_ECRED_MIN_MTU) {
5288 		result = L2CAP_RECONF_INVALID_MTU;
5289 		goto respond;
5290 	}
5291 
5292 	if (mps < L2CAP_ECRED_MIN_MPS) {
5293 		result = L2CAP_RECONF_INVALID_MPS;
5294 		goto respond;
5295 	}
5296 
5297 	cmd_len -= sizeof(*req);
5298 	num_scid = cmd_len / sizeof(u16);
5299 	result = L2CAP_RECONF_SUCCESS;
5300 
5301 	for (i = 0; i < num_scid; i++) {
5302 		u16 scid;
5303 
5304 		scid = __le16_to_cpu(req->scid[i]);
5305 		if (!scid)
5306 			return -EPROTO;
5307 
5308 		chan = __l2cap_get_chan_by_dcid(conn, scid);
5309 		if (!chan)
5310 			continue;
5311 
5312 		/* If the MTU value is decreased for any of the included
5313 		 * channels, then the receiver shall disconnect all
5314 		 * included channels.
5315 		 */
5316 		if (chan->omtu > mtu) {
5317 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
5318 			       chan->omtu, mtu);
5319 			result = L2CAP_RECONF_INVALID_MTU;
5320 		}
5321 
5322 		chan->omtu = mtu;
5323 		chan->remote_mps = mps;
5324 	}
5325 
5326 respond:
5327 	rsp.result = cpu_to_le16(result);
5328 
5329 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5330 		       &rsp);
5331 
5332 	return 0;
5333 }
5334 
5335 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5336 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5337 					 u8 *data)
5338 {
5339 	struct l2cap_chan *chan, *tmp;
5340 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5341 	u16 result;
5342 
5343 	if (cmd_len < sizeof(*rsp))
5344 		return -EPROTO;
5345 
5346 	result = __le16_to_cpu(rsp->result);
5347 
5348 	BT_DBG("result 0x%4.4x", rsp->result);
5349 
5350 	if (!result)
5351 		return 0;
5352 
5353 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5354 		if (chan->ident != cmd->ident)
5355 			continue;
5356 
5357 		l2cap_chan_del(chan, ECONNRESET);
5358 	}
5359 
5360 	return 0;
5361 }
5362 
5363 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5364 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5365 				       u8 *data)
5366 {
5367 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5368 	struct l2cap_chan *chan;
5369 
5370 	if (cmd_len < sizeof(*rej))
5371 		return -EPROTO;
5372 
5373 	mutex_lock(&conn->chan_lock);
5374 
5375 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5376 	if (!chan)
5377 		goto done;
5378 
5379 	chan = l2cap_chan_hold_unless_zero(chan);
5380 	if (!chan)
5381 		goto done;
5382 
5383 	l2cap_chan_lock(chan);
5384 	l2cap_chan_del(chan, ECONNREFUSED);
5385 	l2cap_chan_unlock(chan);
5386 	l2cap_chan_put(chan);
5387 
5388 done:
5389 	mutex_unlock(&conn->chan_lock);
5390 	return 0;
5391 }
5392 
5393 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5394 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5395 				   u8 *data)
5396 {
5397 	int err = 0;
5398 
5399 	switch (cmd->code) {
5400 	case L2CAP_COMMAND_REJ:
5401 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5402 		break;
5403 
5404 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5405 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5406 		break;
5407 
5408 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5409 		break;
5410 
5411 	case L2CAP_LE_CONN_RSP:
5412 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5413 		break;
5414 
5415 	case L2CAP_LE_CONN_REQ:
5416 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5417 		break;
5418 
5419 	case L2CAP_LE_CREDITS:
5420 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5421 		break;
5422 
5423 	case L2CAP_ECRED_CONN_REQ:
5424 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5425 		break;
5426 
5427 	case L2CAP_ECRED_CONN_RSP:
5428 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5429 		break;
5430 
5431 	case L2CAP_ECRED_RECONF_REQ:
5432 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5433 		break;
5434 
5435 	case L2CAP_ECRED_RECONF_RSP:
5436 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5437 		break;
5438 
5439 	case L2CAP_DISCONN_REQ:
5440 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5441 		break;
5442 
5443 	case L2CAP_DISCONN_RSP:
5444 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5445 		break;
5446 
5447 	default:
5448 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5449 		err = -EINVAL;
5450 		break;
5451 	}
5452 
5453 	return err;
5454 }
5455 
5456 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5457 					struct sk_buff *skb)
5458 {
5459 	struct hci_conn *hcon = conn->hcon;
5460 	struct l2cap_cmd_hdr *cmd;
5461 	u16 len;
5462 	int err;
5463 
5464 	if (hcon->type != LE_LINK)
5465 		goto drop;
5466 
5467 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5468 		goto drop;
5469 
5470 	cmd = (void *) skb->data;
5471 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5472 
5473 	len = le16_to_cpu(cmd->len);
5474 
5475 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5476 
5477 	if (len != skb->len || !cmd->ident) {
5478 		BT_DBG("corrupted command");
5479 		goto drop;
5480 	}
5481 
5482 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5483 	if (err) {
5484 		struct l2cap_cmd_rej_unk rej;
5485 
5486 		BT_ERR("Wrong link type (%d)", err);
5487 
5488 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5489 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5490 			       sizeof(rej), &rej);
5491 	}
5492 
5493 drop:
5494 	kfree_skb(skb);
5495 }
5496 
5497 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5498 {
5499 	struct l2cap_cmd_rej_unk rej;
5500 
5501 	rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5502 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5503 }
5504 
5505 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5506 				     struct sk_buff *skb)
5507 {
5508 	struct hci_conn *hcon = conn->hcon;
5509 	struct l2cap_cmd_hdr *cmd;
5510 	int err;
5511 
5512 	l2cap_raw_recv(conn, skb);
5513 
5514 	if (hcon->type != ACL_LINK)
5515 		goto drop;
5516 
5517 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5518 		u16 len;
5519 
5520 		cmd = (void *) skb->data;
5521 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5522 
5523 		len = le16_to_cpu(cmd->len);
5524 
5525 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5526 		       cmd->ident);
5527 
5528 		if (len > skb->len || !cmd->ident) {
5529 			BT_DBG("corrupted command");
5530 			l2cap_sig_send_rej(conn, cmd->ident);
5531 			skb_pull(skb, len > skb->len ? skb->len : len);
5532 			continue;
5533 		}
5534 
5535 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5536 		if (err) {
5537 			BT_ERR("Wrong link type (%d)", err);
5538 			l2cap_sig_send_rej(conn, cmd->ident);
5539 		}
5540 
5541 		skb_pull(skb, len);
5542 	}
5543 
5544 	if (skb->len > 0) {
5545 		BT_DBG("corrupted command");
5546 		l2cap_sig_send_rej(conn, 0);
5547 	}
5548 
5549 drop:
5550 	kfree_skb(skb);
5551 }
5552 
5553 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5554 {
5555 	u16 our_fcs, rcv_fcs;
5556 	int hdr_size;
5557 
5558 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5559 		hdr_size = L2CAP_EXT_HDR_SIZE;
5560 	else
5561 		hdr_size = L2CAP_ENH_HDR_SIZE;
5562 
5563 	if (chan->fcs == L2CAP_FCS_CRC16) {
5564 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5565 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5566 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5567 
5568 		if (our_fcs != rcv_fcs)
5569 			return -EBADMSG;
5570 	}
5571 	return 0;
5572 }
5573 
5574 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5575 {
5576 	struct l2cap_ctrl control;
5577 
5578 	BT_DBG("chan %p", chan);
5579 
5580 	memset(&control, 0, sizeof(control));
5581 	control.sframe = 1;
5582 	control.final = 1;
5583 	control.reqseq = chan->buffer_seq;
5584 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5585 
5586 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5587 		control.super = L2CAP_SUPER_RNR;
5588 		l2cap_send_sframe(chan, &control);
5589 	}
5590 
5591 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5592 	    chan->unacked_frames > 0)
5593 		__set_retrans_timer(chan);
5594 
5595 	/* Send pending iframes */
5596 	l2cap_ertm_send(chan);
5597 
5598 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5599 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5600 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5601 		 * send it now.
5602 		 */
5603 		control.super = L2CAP_SUPER_RR;
5604 		l2cap_send_sframe(chan, &control);
5605 	}
5606 }
5607 
5608 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5609 			    struct sk_buff **last_frag)
5610 {
5611 	/* skb->len reflects data in skb as well as all fragments
5612 	 * skb->data_len reflects only data in fragments
5613 	 */
5614 	if (!skb_has_frag_list(skb))
5615 		skb_shinfo(skb)->frag_list = new_frag;
5616 
5617 	new_frag->next = NULL;
5618 
5619 	(*last_frag)->next = new_frag;
5620 	*last_frag = new_frag;
5621 
5622 	skb->len += new_frag->len;
5623 	skb->data_len += new_frag->len;
5624 	skb->truesize += new_frag->truesize;
5625 }
5626 
5627 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5628 				struct l2cap_ctrl *control)
5629 {
5630 	int err = -EINVAL;
5631 
5632 	switch (control->sar) {
5633 	case L2CAP_SAR_UNSEGMENTED:
5634 		if (chan->sdu)
5635 			break;
5636 
5637 		err = chan->ops->recv(chan, skb);
5638 		break;
5639 
5640 	case L2CAP_SAR_START:
5641 		if (chan->sdu)
5642 			break;
5643 
5644 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5645 			break;
5646 
5647 		chan->sdu_len = get_unaligned_le16(skb->data);
5648 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5649 
5650 		if (chan->sdu_len > chan->imtu) {
5651 			err = -EMSGSIZE;
5652 			break;
5653 		}
5654 
5655 		if (skb->len >= chan->sdu_len)
5656 			break;
5657 
5658 		chan->sdu = skb;
5659 		chan->sdu_last_frag = skb;
5660 
5661 		skb = NULL;
5662 		err = 0;
5663 		break;
5664 
5665 	case L2CAP_SAR_CONTINUE:
5666 		if (!chan->sdu)
5667 			break;
5668 
5669 		append_skb_frag(chan->sdu, skb,
5670 				&chan->sdu_last_frag);
5671 		skb = NULL;
5672 
5673 		if (chan->sdu->len >= chan->sdu_len)
5674 			break;
5675 
5676 		err = 0;
5677 		break;
5678 
5679 	case L2CAP_SAR_END:
5680 		if (!chan->sdu)
5681 			break;
5682 
5683 		append_skb_frag(chan->sdu, skb,
5684 				&chan->sdu_last_frag);
5685 		skb = NULL;
5686 
5687 		if (chan->sdu->len != chan->sdu_len)
5688 			break;
5689 
5690 		err = chan->ops->recv(chan, chan->sdu);
5691 
5692 		if (!err) {
5693 			/* Reassembly complete */
5694 			chan->sdu = NULL;
5695 			chan->sdu_last_frag = NULL;
5696 			chan->sdu_len = 0;
5697 		}
5698 		break;
5699 	}
5700 
5701 	if (err) {
5702 		kfree_skb(skb);
5703 		kfree_skb(chan->sdu);
5704 		chan->sdu = NULL;
5705 		chan->sdu_last_frag = NULL;
5706 		chan->sdu_len = 0;
5707 	}
5708 
5709 	return err;
5710 }
5711 
5712 static int l2cap_resegment(struct l2cap_chan *chan)
5713 {
5714 	/* Placeholder */
5715 	return 0;
5716 }
5717 
5718 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5719 {
5720 	u8 event;
5721 
5722 	if (chan->mode != L2CAP_MODE_ERTM)
5723 		return;
5724 
5725 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5726 	l2cap_tx(chan, NULL, NULL, event);
5727 }
5728 
5729 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5730 {
5731 	int err = 0;
5732 	/* Pass sequential frames to l2cap_reassemble_sdu()
5733 	 * until a gap is encountered.
5734 	 */
5735 
5736 	BT_DBG("chan %p", chan);
5737 
5738 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5739 		struct sk_buff *skb;
5740 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5741 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5742 
5743 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5744 
5745 		if (!skb)
5746 			break;
5747 
5748 		skb_unlink(skb, &chan->srej_q);
5749 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5750 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5751 		if (err)
5752 			break;
5753 	}
5754 
5755 	if (skb_queue_empty(&chan->srej_q)) {
5756 		chan->rx_state = L2CAP_RX_STATE_RECV;
5757 		l2cap_send_ack(chan);
5758 	}
5759 
5760 	return err;
5761 }
5762 
5763 static void l2cap_handle_srej(struct l2cap_chan *chan,
5764 			      struct l2cap_ctrl *control)
5765 {
5766 	struct sk_buff *skb;
5767 
5768 	BT_DBG("chan %p, control %p", chan, control);
5769 
5770 	if (control->reqseq == chan->next_tx_seq) {
5771 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5772 		l2cap_send_disconn_req(chan, ECONNRESET);
5773 		return;
5774 	}
5775 
5776 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5777 
5778 	if (skb == NULL) {
5779 		BT_DBG("Seq %d not available for retransmission",
5780 		       control->reqseq);
5781 		return;
5782 	}
5783 
5784 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5785 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5786 		l2cap_send_disconn_req(chan, ECONNRESET);
5787 		return;
5788 	}
5789 
5790 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5791 
5792 	if (control->poll) {
5793 		l2cap_pass_to_tx(chan, control);
5794 
5795 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5796 		l2cap_retransmit(chan, control);
5797 		l2cap_ertm_send(chan);
5798 
5799 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5800 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5801 			chan->srej_save_reqseq = control->reqseq;
5802 		}
5803 	} else {
5804 		l2cap_pass_to_tx_fbit(chan, control);
5805 
5806 		if (control->final) {
5807 			if (chan->srej_save_reqseq != control->reqseq ||
5808 			    !test_and_clear_bit(CONN_SREJ_ACT,
5809 						&chan->conn_state))
5810 				l2cap_retransmit(chan, control);
5811 		} else {
5812 			l2cap_retransmit(chan, control);
5813 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5814 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5815 				chan->srej_save_reqseq = control->reqseq;
5816 			}
5817 		}
5818 	}
5819 }
5820 
5821 static void l2cap_handle_rej(struct l2cap_chan *chan,
5822 			     struct l2cap_ctrl *control)
5823 {
5824 	struct sk_buff *skb;
5825 
5826 	BT_DBG("chan %p, control %p", chan, control);
5827 
5828 	if (control->reqseq == chan->next_tx_seq) {
5829 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5830 		l2cap_send_disconn_req(chan, ECONNRESET);
5831 		return;
5832 	}
5833 
5834 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5835 
5836 	if (chan->max_tx && skb &&
5837 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5838 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5839 		l2cap_send_disconn_req(chan, ECONNRESET);
5840 		return;
5841 	}
5842 
5843 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5844 
5845 	l2cap_pass_to_tx(chan, control);
5846 
5847 	if (control->final) {
5848 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5849 			l2cap_retransmit_all(chan, control);
5850 	} else {
5851 		l2cap_retransmit_all(chan, control);
5852 		l2cap_ertm_send(chan);
5853 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5854 			set_bit(CONN_REJ_ACT, &chan->conn_state);
5855 	}
5856 }
5857 
5858 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5859 {
5860 	BT_DBG("chan %p, txseq %d", chan, txseq);
5861 
5862 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5863 	       chan->expected_tx_seq);
5864 
5865 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5866 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5867 		    chan->tx_win) {
5868 			/* See notes below regarding "double poll" and
5869 			 * invalid packets.
5870 			 */
5871 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5872 				BT_DBG("Invalid/Ignore - after SREJ");
5873 				return L2CAP_TXSEQ_INVALID_IGNORE;
5874 			} else {
5875 				BT_DBG("Invalid - in window after SREJ sent");
5876 				return L2CAP_TXSEQ_INVALID;
5877 			}
5878 		}
5879 
5880 		if (chan->srej_list.head == txseq) {
5881 			BT_DBG("Expected SREJ");
5882 			return L2CAP_TXSEQ_EXPECTED_SREJ;
5883 		}
5884 
5885 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5886 			BT_DBG("Duplicate SREJ - txseq already stored");
5887 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
5888 		}
5889 
5890 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5891 			BT_DBG("Unexpected SREJ - not requested");
5892 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5893 		}
5894 	}
5895 
5896 	if (chan->expected_tx_seq == txseq) {
5897 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5898 		    chan->tx_win) {
5899 			BT_DBG("Invalid - txseq outside tx window");
5900 			return L2CAP_TXSEQ_INVALID;
5901 		} else {
5902 			BT_DBG("Expected");
5903 			return L2CAP_TXSEQ_EXPECTED;
5904 		}
5905 	}
5906 
5907 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5908 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5909 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
5910 		return L2CAP_TXSEQ_DUPLICATE;
5911 	}
5912 
5913 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5914 		/* A source of invalid packets is a "double poll" condition,
5915 		 * where delays cause us to send multiple poll packets.  If
5916 		 * the remote stack receives and processes both polls,
5917 		 * sequence numbers can wrap around in such a way that a
5918 		 * resent frame has a sequence number that looks like new data
5919 		 * with a sequence gap.  This would trigger an erroneous SREJ
5920 		 * request.
5921 		 *
5922 		 * Fortunately, this is impossible with a tx window that's
5923 		 * less than half of the maximum sequence number, which allows
5924 		 * invalid frames to be safely ignored.
5925 		 *
5926 		 * With tx window sizes greater than half of the tx window
5927 		 * maximum, the frame is invalid and cannot be ignored.  This
5928 		 * causes a disconnect.
5929 		 */
5930 
5931 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5932 			BT_DBG("Invalid/Ignore - txseq outside tx window");
5933 			return L2CAP_TXSEQ_INVALID_IGNORE;
5934 		} else {
5935 			BT_DBG("Invalid - txseq outside tx window");
5936 			return L2CAP_TXSEQ_INVALID;
5937 		}
5938 	} else {
5939 		BT_DBG("Unexpected - txseq indicates missing frames");
5940 		return L2CAP_TXSEQ_UNEXPECTED;
5941 	}
5942 }
5943 
5944 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5945 			       struct l2cap_ctrl *control,
5946 			       struct sk_buff *skb, u8 event)
5947 {
5948 	struct l2cap_ctrl local_control;
5949 	int err = 0;
5950 	bool skb_in_use = false;
5951 
5952 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5953 	       event);
5954 
5955 	switch (event) {
5956 	case L2CAP_EV_RECV_IFRAME:
5957 		switch (l2cap_classify_txseq(chan, control->txseq)) {
5958 		case L2CAP_TXSEQ_EXPECTED:
5959 			l2cap_pass_to_tx(chan, control);
5960 
5961 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5962 				BT_DBG("Busy, discarding expected seq %d",
5963 				       control->txseq);
5964 				break;
5965 			}
5966 
5967 			chan->expected_tx_seq = __next_seq(chan,
5968 							   control->txseq);
5969 
5970 			chan->buffer_seq = chan->expected_tx_seq;
5971 			skb_in_use = true;
5972 
5973 			/* l2cap_reassemble_sdu may free skb, hence invalidate
5974 			 * control, so make a copy in advance to use it after
5975 			 * l2cap_reassemble_sdu returns and to avoid the race
5976 			 * condition, for example:
5977 			 *
5978 			 * The current thread calls:
5979 			 *   l2cap_reassemble_sdu
5980 			 *     chan->ops->recv == l2cap_sock_recv_cb
5981 			 *       __sock_queue_rcv_skb
5982 			 * Another thread calls:
5983 			 *   bt_sock_recvmsg
5984 			 *     skb_recv_datagram
5985 			 *     skb_free_datagram
5986 			 * Then the current thread tries to access control, but
5987 			 * it was freed by skb_free_datagram.
5988 			 */
5989 			local_control = *control;
5990 			err = l2cap_reassemble_sdu(chan, skb, control);
5991 			if (err)
5992 				break;
5993 
5994 			if (local_control.final) {
5995 				if (!test_and_clear_bit(CONN_REJ_ACT,
5996 							&chan->conn_state)) {
5997 					local_control.final = 0;
5998 					l2cap_retransmit_all(chan, &local_control);
5999 					l2cap_ertm_send(chan);
6000 				}
6001 			}
6002 
6003 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6004 				l2cap_send_ack(chan);
6005 			break;
6006 		case L2CAP_TXSEQ_UNEXPECTED:
6007 			l2cap_pass_to_tx(chan, control);
6008 
6009 			/* Can't issue SREJ frames in the local busy state.
6010 			 * Drop this frame, it will be seen as missing
6011 			 * when local busy is exited.
6012 			 */
6013 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6014 				BT_DBG("Busy, discarding unexpected seq %d",
6015 				       control->txseq);
6016 				break;
6017 			}
6018 
6019 			/* There was a gap in the sequence, so an SREJ
6020 			 * must be sent for each missing frame.  The
6021 			 * current frame is stored for later use.
6022 			 */
6023 			skb_queue_tail(&chan->srej_q, skb);
6024 			skb_in_use = true;
6025 			BT_DBG("Queued %p (queue len %d)", skb,
6026 			       skb_queue_len(&chan->srej_q));
6027 
6028 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6029 			l2cap_seq_list_clear(&chan->srej_list);
6030 			l2cap_send_srej(chan, control->txseq);
6031 
6032 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6033 			break;
6034 		case L2CAP_TXSEQ_DUPLICATE:
6035 			l2cap_pass_to_tx(chan, control);
6036 			break;
6037 		case L2CAP_TXSEQ_INVALID_IGNORE:
6038 			break;
6039 		case L2CAP_TXSEQ_INVALID:
6040 		default:
6041 			l2cap_send_disconn_req(chan, ECONNRESET);
6042 			break;
6043 		}
6044 		break;
6045 	case L2CAP_EV_RECV_RR:
6046 		l2cap_pass_to_tx(chan, control);
6047 		if (control->final) {
6048 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6049 
6050 			if (!test_and_clear_bit(CONN_REJ_ACT,
6051 						&chan->conn_state)) {
6052 				control->final = 0;
6053 				l2cap_retransmit_all(chan, control);
6054 			}
6055 
6056 			l2cap_ertm_send(chan);
6057 		} else if (control->poll) {
6058 			l2cap_send_i_or_rr_or_rnr(chan);
6059 		} else {
6060 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6061 					       &chan->conn_state) &&
6062 			    chan->unacked_frames)
6063 				__set_retrans_timer(chan);
6064 
6065 			l2cap_ertm_send(chan);
6066 		}
6067 		break;
6068 	case L2CAP_EV_RECV_RNR:
6069 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6070 		l2cap_pass_to_tx(chan, control);
6071 		if (control && control->poll) {
6072 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6073 			l2cap_send_rr_or_rnr(chan, 0);
6074 		}
6075 		__clear_retrans_timer(chan);
6076 		l2cap_seq_list_clear(&chan->retrans_list);
6077 		break;
6078 	case L2CAP_EV_RECV_REJ:
6079 		l2cap_handle_rej(chan, control);
6080 		break;
6081 	case L2CAP_EV_RECV_SREJ:
6082 		l2cap_handle_srej(chan, control);
6083 		break;
6084 	default:
6085 		break;
6086 	}
6087 
6088 	if (skb && !skb_in_use) {
6089 		BT_DBG("Freeing %p", skb);
6090 		kfree_skb(skb);
6091 	}
6092 
6093 	return err;
6094 }
6095 
6096 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6097 				    struct l2cap_ctrl *control,
6098 				    struct sk_buff *skb, u8 event)
6099 {
6100 	int err = 0;
6101 	u16 txseq = control->txseq;
6102 	bool skb_in_use = false;
6103 
6104 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6105 	       event);
6106 
6107 	switch (event) {
6108 	case L2CAP_EV_RECV_IFRAME:
6109 		switch (l2cap_classify_txseq(chan, txseq)) {
6110 		case L2CAP_TXSEQ_EXPECTED:
6111 			/* Keep frame for reassembly later */
6112 			l2cap_pass_to_tx(chan, control);
6113 			skb_queue_tail(&chan->srej_q, skb);
6114 			skb_in_use = true;
6115 			BT_DBG("Queued %p (queue len %d)", skb,
6116 			       skb_queue_len(&chan->srej_q));
6117 
6118 			chan->expected_tx_seq = __next_seq(chan, txseq);
6119 			break;
6120 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6121 			l2cap_seq_list_pop(&chan->srej_list);
6122 
6123 			l2cap_pass_to_tx(chan, control);
6124 			skb_queue_tail(&chan->srej_q, skb);
6125 			skb_in_use = true;
6126 			BT_DBG("Queued %p (queue len %d)", skb,
6127 			       skb_queue_len(&chan->srej_q));
6128 
6129 			err = l2cap_rx_queued_iframes(chan);
6130 			if (err)
6131 				break;
6132 
6133 			break;
6134 		case L2CAP_TXSEQ_UNEXPECTED:
6135 			/* Got a frame that can't be reassembled yet.
6136 			 * Save it for later, and send SREJs to cover
6137 			 * the missing frames.
6138 			 */
6139 			skb_queue_tail(&chan->srej_q, skb);
6140 			skb_in_use = true;
6141 			BT_DBG("Queued %p (queue len %d)", skb,
6142 			       skb_queue_len(&chan->srej_q));
6143 
6144 			l2cap_pass_to_tx(chan, control);
6145 			l2cap_send_srej(chan, control->txseq);
6146 			break;
6147 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6148 			/* This frame was requested with an SREJ, but
6149 			 * some expected retransmitted frames are
6150 			 * missing.  Request retransmission of missing
6151 			 * SREJ'd frames.
6152 			 */
6153 			skb_queue_tail(&chan->srej_q, skb);
6154 			skb_in_use = true;
6155 			BT_DBG("Queued %p (queue len %d)", skb,
6156 			       skb_queue_len(&chan->srej_q));
6157 
6158 			l2cap_pass_to_tx(chan, control);
6159 			l2cap_send_srej_list(chan, control->txseq);
6160 			break;
6161 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6162 			/* We've already queued this frame.  Drop this copy. */
6163 			l2cap_pass_to_tx(chan, control);
6164 			break;
6165 		case L2CAP_TXSEQ_DUPLICATE:
6166 			/* Expecting a later sequence number, so this frame
6167 			 * was already received.  Ignore it completely.
6168 			 */
6169 			break;
6170 		case L2CAP_TXSEQ_INVALID_IGNORE:
6171 			break;
6172 		case L2CAP_TXSEQ_INVALID:
6173 		default:
6174 			l2cap_send_disconn_req(chan, ECONNRESET);
6175 			break;
6176 		}
6177 		break;
6178 	case L2CAP_EV_RECV_RR:
6179 		l2cap_pass_to_tx(chan, control);
6180 		if (control->final) {
6181 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6182 
6183 			if (!test_and_clear_bit(CONN_REJ_ACT,
6184 						&chan->conn_state)) {
6185 				control->final = 0;
6186 				l2cap_retransmit_all(chan, control);
6187 			}
6188 
6189 			l2cap_ertm_send(chan);
6190 		} else if (control->poll) {
6191 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6192 					       &chan->conn_state) &&
6193 			    chan->unacked_frames) {
6194 				__set_retrans_timer(chan);
6195 			}
6196 
6197 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6198 			l2cap_send_srej_tail(chan);
6199 		} else {
6200 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6201 					       &chan->conn_state) &&
6202 			    chan->unacked_frames)
6203 				__set_retrans_timer(chan);
6204 
6205 			l2cap_send_ack(chan);
6206 		}
6207 		break;
6208 	case L2CAP_EV_RECV_RNR:
6209 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6210 		l2cap_pass_to_tx(chan, control);
6211 		if (control->poll) {
6212 			l2cap_send_srej_tail(chan);
6213 		} else {
6214 			struct l2cap_ctrl rr_control;
6215 			memset(&rr_control, 0, sizeof(rr_control));
6216 			rr_control.sframe = 1;
6217 			rr_control.super = L2CAP_SUPER_RR;
6218 			rr_control.reqseq = chan->buffer_seq;
6219 			l2cap_send_sframe(chan, &rr_control);
6220 		}
6221 
6222 		break;
6223 	case L2CAP_EV_RECV_REJ:
6224 		l2cap_handle_rej(chan, control);
6225 		break;
6226 	case L2CAP_EV_RECV_SREJ:
6227 		l2cap_handle_srej(chan, control);
6228 		break;
6229 	}
6230 
6231 	if (skb && !skb_in_use) {
6232 		BT_DBG("Freeing %p", skb);
6233 		kfree_skb(skb);
6234 	}
6235 
6236 	return err;
6237 }
6238 
6239 static int l2cap_finish_move(struct l2cap_chan *chan)
6240 {
6241 	BT_DBG("chan %p", chan);
6242 
6243 	chan->rx_state = L2CAP_RX_STATE_RECV;
6244 	chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6245 
6246 	return l2cap_resegment(chan);
6247 }
6248 
6249 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6250 				 struct l2cap_ctrl *control,
6251 				 struct sk_buff *skb, u8 event)
6252 {
6253 	int err;
6254 
6255 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6256 	       event);
6257 
6258 	if (!control->poll)
6259 		return -EPROTO;
6260 
6261 	l2cap_process_reqseq(chan, control->reqseq);
6262 
6263 	if (!skb_queue_empty(&chan->tx_q))
6264 		chan->tx_send_head = skb_peek(&chan->tx_q);
6265 	else
6266 		chan->tx_send_head = NULL;
6267 
6268 	/* Rewind next_tx_seq to the point expected
6269 	 * by the receiver.
6270 	 */
6271 	chan->next_tx_seq = control->reqseq;
6272 	chan->unacked_frames = 0;
6273 
6274 	err = l2cap_finish_move(chan);
6275 	if (err)
6276 		return err;
6277 
6278 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6279 	l2cap_send_i_or_rr_or_rnr(chan);
6280 
6281 	if (event == L2CAP_EV_RECV_IFRAME)
6282 		return -EPROTO;
6283 
6284 	return l2cap_rx_state_recv(chan, control, NULL, event);
6285 }
6286 
6287 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6288 				 struct l2cap_ctrl *control,
6289 				 struct sk_buff *skb, u8 event)
6290 {
6291 	int err;
6292 
6293 	if (!control->final)
6294 		return -EPROTO;
6295 
6296 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6297 
6298 	chan->rx_state = L2CAP_RX_STATE_RECV;
6299 	l2cap_process_reqseq(chan, control->reqseq);
6300 
6301 	if (!skb_queue_empty(&chan->tx_q))
6302 		chan->tx_send_head = skb_peek(&chan->tx_q);
6303 	else
6304 		chan->tx_send_head = NULL;
6305 
6306 	/* Rewind next_tx_seq to the point expected
6307 	 * by the receiver.
6308 	 */
6309 	chan->next_tx_seq = control->reqseq;
6310 	chan->unacked_frames = 0;
6311 	chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6312 
6313 	err = l2cap_resegment(chan);
6314 
6315 	if (!err)
6316 		err = l2cap_rx_state_recv(chan, control, skb, event);
6317 
6318 	return err;
6319 }
6320 
6321 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6322 {
6323 	/* Make sure reqseq is for a packet that has been sent but not acked */
6324 	u16 unacked;
6325 
6326 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6327 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6328 }
6329 
6330 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6331 		    struct sk_buff *skb, u8 event)
6332 {
6333 	int err = 0;
6334 
6335 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6336 	       control, skb, event, chan->rx_state);
6337 
6338 	if (__valid_reqseq(chan, control->reqseq)) {
6339 		switch (chan->rx_state) {
6340 		case L2CAP_RX_STATE_RECV:
6341 			err = l2cap_rx_state_recv(chan, control, skb, event);
6342 			break;
6343 		case L2CAP_RX_STATE_SREJ_SENT:
6344 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6345 						       event);
6346 			break;
6347 		case L2CAP_RX_STATE_WAIT_P:
6348 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6349 			break;
6350 		case L2CAP_RX_STATE_WAIT_F:
6351 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6352 			break;
6353 		default:
6354 			/* shut it down */
6355 			break;
6356 		}
6357 	} else {
6358 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6359 		       control->reqseq, chan->next_tx_seq,
6360 		       chan->expected_ack_seq);
6361 		l2cap_send_disconn_req(chan, ECONNRESET);
6362 	}
6363 
6364 	return err;
6365 }
6366 
6367 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6368 			   struct sk_buff *skb)
6369 {
6370 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6371 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
6372 	 * returns and to avoid the race condition, for example:
6373 	 *
6374 	 * The current thread calls:
6375 	 *   l2cap_reassemble_sdu
6376 	 *     chan->ops->recv == l2cap_sock_recv_cb
6377 	 *       __sock_queue_rcv_skb
6378 	 * Another thread calls:
6379 	 *   bt_sock_recvmsg
6380 	 *     skb_recv_datagram
6381 	 *     skb_free_datagram
6382 	 * Then the current thread tries to access control, but it was freed by
6383 	 * skb_free_datagram.
6384 	 */
6385 	u16 txseq = control->txseq;
6386 
6387 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6388 	       chan->rx_state);
6389 
6390 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6391 		l2cap_pass_to_tx(chan, control);
6392 
6393 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6394 		       __next_seq(chan, chan->buffer_seq));
6395 
6396 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6397 
6398 		l2cap_reassemble_sdu(chan, skb, control);
6399 	} else {
6400 		if (chan->sdu) {
6401 			kfree_skb(chan->sdu);
6402 			chan->sdu = NULL;
6403 		}
6404 		chan->sdu_last_frag = NULL;
6405 		chan->sdu_len = 0;
6406 
6407 		if (skb) {
6408 			BT_DBG("Freeing %p", skb);
6409 			kfree_skb(skb);
6410 		}
6411 	}
6412 
6413 	chan->last_acked_seq = txseq;
6414 	chan->expected_tx_seq = __next_seq(chan, txseq);
6415 
6416 	return 0;
6417 }
6418 
6419 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6420 {
6421 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6422 	u16 len;
6423 	u8 event;
6424 
6425 	__unpack_control(chan, skb);
6426 
6427 	len = skb->len;
6428 
6429 	/*
6430 	 * We can just drop the corrupted I-frame here.
6431 	 * Receiver will miss it and start proper recovery
6432 	 * procedures and ask for retransmission.
6433 	 */
6434 	if (l2cap_check_fcs(chan, skb))
6435 		goto drop;
6436 
6437 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6438 		len -= L2CAP_SDULEN_SIZE;
6439 
6440 	if (chan->fcs == L2CAP_FCS_CRC16)
6441 		len -= L2CAP_FCS_SIZE;
6442 
6443 	if (len > chan->mps) {
6444 		l2cap_send_disconn_req(chan, ECONNRESET);
6445 		goto drop;
6446 	}
6447 
6448 	if (chan->ops->filter) {
6449 		if (chan->ops->filter(chan, skb))
6450 			goto drop;
6451 	}
6452 
6453 	if (!control->sframe) {
6454 		int err;
6455 
6456 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6457 		       control->sar, control->reqseq, control->final,
6458 		       control->txseq);
6459 
6460 		/* Validate F-bit - F=0 always valid, F=1 only
6461 		 * valid in TX WAIT_F
6462 		 */
6463 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6464 			goto drop;
6465 
6466 		if (chan->mode != L2CAP_MODE_STREAMING) {
6467 			event = L2CAP_EV_RECV_IFRAME;
6468 			err = l2cap_rx(chan, control, skb, event);
6469 		} else {
6470 			err = l2cap_stream_rx(chan, control, skb);
6471 		}
6472 
6473 		if (err)
6474 			l2cap_send_disconn_req(chan, ECONNRESET);
6475 	} else {
6476 		const u8 rx_func_to_event[4] = {
6477 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6478 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6479 		};
6480 
6481 		/* Only I-frames are expected in streaming mode */
6482 		if (chan->mode == L2CAP_MODE_STREAMING)
6483 			goto drop;
6484 
6485 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6486 		       control->reqseq, control->final, control->poll,
6487 		       control->super);
6488 
6489 		if (len != 0) {
6490 			BT_ERR("Trailing bytes: %d in sframe", len);
6491 			l2cap_send_disconn_req(chan, ECONNRESET);
6492 			goto drop;
6493 		}
6494 
6495 		/* Validate F and P bits */
6496 		if (control->final && (control->poll ||
6497 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6498 			goto drop;
6499 
6500 		event = rx_func_to_event[control->super];
6501 		if (l2cap_rx(chan, control, skb, event))
6502 			l2cap_send_disconn_req(chan, ECONNRESET);
6503 	}
6504 
6505 	return 0;
6506 
6507 drop:
6508 	kfree_skb(skb);
6509 	return 0;
6510 }
6511 
6512 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6513 {
6514 	struct l2cap_conn *conn = chan->conn;
6515 	struct l2cap_le_credits pkt;
6516 	u16 return_credits;
6517 
6518 	return_credits = (chan->imtu / chan->mps) + 1;
6519 
6520 	if (chan->rx_credits >= return_credits)
6521 		return;
6522 
6523 	return_credits -= chan->rx_credits;
6524 
6525 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6526 
6527 	chan->rx_credits += return_credits;
6528 
6529 	pkt.cid     = cpu_to_le16(chan->scid);
6530 	pkt.credits = cpu_to_le16(return_credits);
6531 
6532 	chan->ident = l2cap_get_ident(conn);
6533 
6534 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6535 }
6536 
6537 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6538 {
6539 	int err;
6540 
6541 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6542 
6543 	/* Wait recv to confirm reception before updating the credits */
6544 	err = chan->ops->recv(chan, skb);
6545 
6546 	/* Update credits whenever an SDU is received */
6547 	l2cap_chan_le_send_credits(chan);
6548 
6549 	return err;
6550 }
6551 
6552 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6553 {
6554 	int err;
6555 
6556 	if (!chan->rx_credits) {
6557 		BT_ERR("No credits to receive LE L2CAP data");
6558 		l2cap_send_disconn_req(chan, ECONNRESET);
6559 		return -ENOBUFS;
6560 	}
6561 
6562 	if (chan->imtu < skb->len) {
6563 		BT_ERR("Too big LE L2CAP PDU");
6564 		return -ENOBUFS;
6565 	}
6566 
6567 	chan->rx_credits--;
6568 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6569 
6570 	/* Update if remote had run out of credits, this should only happens
6571 	 * if the remote is not using the entire MPS.
6572 	 */
6573 	if (!chan->rx_credits)
6574 		l2cap_chan_le_send_credits(chan);
6575 
6576 	err = 0;
6577 
6578 	if (!chan->sdu) {
6579 		u16 sdu_len;
6580 
6581 		sdu_len = get_unaligned_le16(skb->data);
6582 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6583 
6584 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6585 		       sdu_len, skb->len, chan->imtu);
6586 
6587 		if (sdu_len > chan->imtu) {
6588 			BT_ERR("Too big LE L2CAP SDU length received");
6589 			err = -EMSGSIZE;
6590 			goto failed;
6591 		}
6592 
6593 		if (skb->len > sdu_len) {
6594 			BT_ERR("Too much LE L2CAP data received");
6595 			err = -EINVAL;
6596 			goto failed;
6597 		}
6598 
6599 		if (skb->len == sdu_len)
6600 			return l2cap_ecred_recv(chan, skb);
6601 
6602 		chan->sdu = skb;
6603 		chan->sdu_len = sdu_len;
6604 		chan->sdu_last_frag = skb;
6605 
6606 		/* Detect if remote is not able to use the selected MPS */
6607 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6608 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6609 
6610 			/* Adjust the number of credits */
6611 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6612 			chan->mps = mps_len;
6613 			l2cap_chan_le_send_credits(chan);
6614 		}
6615 
6616 		return 0;
6617 	}
6618 
6619 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6620 	       chan->sdu->len, skb->len, chan->sdu_len);
6621 
6622 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6623 		BT_ERR("Too much LE L2CAP data received");
6624 		err = -EINVAL;
6625 		goto failed;
6626 	}
6627 
6628 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6629 	skb = NULL;
6630 
6631 	if (chan->sdu->len == chan->sdu_len) {
6632 		err = l2cap_ecred_recv(chan, chan->sdu);
6633 		if (!err) {
6634 			chan->sdu = NULL;
6635 			chan->sdu_last_frag = NULL;
6636 			chan->sdu_len = 0;
6637 		}
6638 	}
6639 
6640 failed:
6641 	if (err) {
6642 		kfree_skb(skb);
6643 		kfree_skb(chan->sdu);
6644 		chan->sdu = NULL;
6645 		chan->sdu_last_frag = NULL;
6646 		chan->sdu_len = 0;
6647 	}
6648 
6649 	/* We can't return an error here since we took care of the skb
6650 	 * freeing internally. An error return would cause the caller to
6651 	 * do a double-free of the skb.
6652 	 */
6653 	return 0;
6654 }
6655 
6656 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6657 			       struct sk_buff *skb)
6658 {
6659 	struct l2cap_chan *chan;
6660 
6661 	chan = l2cap_get_chan_by_scid(conn, cid);
6662 	if (!chan) {
6663 		BT_DBG("unknown cid 0x%4.4x", cid);
6664 		/* Drop packet and return */
6665 		kfree_skb(skb);
6666 		return;
6667 	}
6668 
6669 	BT_DBG("chan %p, len %d", chan, skb->len);
6670 
6671 	/* If we receive data on a fixed channel before the info req/rsp
6672 	 * procedure is done simply assume that the channel is supported
6673 	 * and mark it as ready.
6674 	 */
6675 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6676 		l2cap_chan_ready(chan);
6677 
6678 	if (chan->state != BT_CONNECTED)
6679 		goto drop;
6680 
6681 	switch (chan->mode) {
6682 	case L2CAP_MODE_LE_FLOWCTL:
6683 	case L2CAP_MODE_EXT_FLOWCTL:
6684 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
6685 			goto drop;
6686 
6687 		goto done;
6688 
6689 	case L2CAP_MODE_BASIC:
6690 		/* If socket recv buffers overflows we drop data here
6691 		 * which is *bad* because L2CAP has to be reliable.
6692 		 * But we don't have any other choice. L2CAP doesn't
6693 		 * provide flow control mechanism. */
6694 
6695 		if (chan->imtu < skb->len) {
6696 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6697 			goto drop;
6698 		}
6699 
6700 		if (!chan->ops->recv(chan, skb))
6701 			goto done;
6702 		break;
6703 
6704 	case L2CAP_MODE_ERTM:
6705 	case L2CAP_MODE_STREAMING:
6706 		l2cap_data_rcv(chan, skb);
6707 		goto done;
6708 
6709 	default:
6710 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6711 		break;
6712 	}
6713 
6714 drop:
6715 	kfree_skb(skb);
6716 
6717 done:
6718 	l2cap_chan_unlock(chan);
6719 	l2cap_chan_put(chan);
6720 }
6721 
6722 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6723 				  struct sk_buff *skb)
6724 {
6725 	struct hci_conn *hcon = conn->hcon;
6726 	struct l2cap_chan *chan;
6727 
6728 	if (hcon->type != ACL_LINK)
6729 		goto free_skb;
6730 
6731 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6732 					ACL_LINK);
6733 	if (!chan)
6734 		goto free_skb;
6735 
6736 	BT_DBG("chan %p, len %d", chan, skb->len);
6737 
6738 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6739 		goto drop;
6740 
6741 	if (chan->imtu < skb->len)
6742 		goto drop;
6743 
6744 	/* Store remote BD_ADDR and PSM for msg_name */
6745 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6746 	bt_cb(skb)->l2cap.psm = psm;
6747 
6748 	if (!chan->ops->recv(chan, skb)) {
6749 		l2cap_chan_put(chan);
6750 		return;
6751 	}
6752 
6753 drop:
6754 	l2cap_chan_put(chan);
6755 free_skb:
6756 	kfree_skb(skb);
6757 }
6758 
6759 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6760 {
6761 	struct l2cap_hdr *lh = (void *) skb->data;
6762 	struct hci_conn *hcon = conn->hcon;
6763 	u16 cid, len;
6764 	__le16 psm;
6765 
6766 	if (hcon->state != BT_CONNECTED) {
6767 		BT_DBG("queueing pending rx skb");
6768 		skb_queue_tail(&conn->pending_rx, skb);
6769 		return;
6770 	}
6771 
6772 	skb_pull(skb, L2CAP_HDR_SIZE);
6773 	cid = __le16_to_cpu(lh->cid);
6774 	len = __le16_to_cpu(lh->len);
6775 
6776 	if (len != skb->len) {
6777 		kfree_skb(skb);
6778 		return;
6779 	}
6780 
6781 	/* Since we can't actively block incoming LE connections we must
6782 	 * at least ensure that we ignore incoming data from them.
6783 	 */
6784 	if (hcon->type == LE_LINK &&
6785 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6786 				   bdaddr_dst_type(hcon))) {
6787 		kfree_skb(skb);
6788 		return;
6789 	}
6790 
6791 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6792 
6793 	switch (cid) {
6794 	case L2CAP_CID_SIGNALING:
6795 		l2cap_sig_channel(conn, skb);
6796 		break;
6797 
6798 	case L2CAP_CID_CONN_LESS:
6799 		psm = get_unaligned((__le16 *) skb->data);
6800 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6801 		l2cap_conless_channel(conn, psm, skb);
6802 		break;
6803 
6804 	case L2CAP_CID_LE_SIGNALING:
6805 		l2cap_le_sig_channel(conn, skb);
6806 		break;
6807 
6808 	default:
6809 		l2cap_data_channel(conn, cid, skb);
6810 		break;
6811 	}
6812 }
6813 
6814 static void process_pending_rx(struct work_struct *work)
6815 {
6816 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6817 					       pending_rx_work);
6818 	struct sk_buff *skb;
6819 
6820 	BT_DBG("");
6821 
6822 	while ((skb = skb_dequeue(&conn->pending_rx)))
6823 		l2cap_recv_frame(conn, skb);
6824 }
6825 
6826 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6827 {
6828 	struct l2cap_conn *conn = hcon->l2cap_data;
6829 	struct hci_chan *hchan;
6830 
6831 	if (conn)
6832 		return conn;
6833 
6834 	hchan = hci_chan_create(hcon);
6835 	if (!hchan)
6836 		return NULL;
6837 
6838 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6839 	if (!conn) {
6840 		hci_chan_del(hchan);
6841 		return NULL;
6842 	}
6843 
6844 	kref_init(&conn->ref);
6845 	hcon->l2cap_data = conn;
6846 	conn->hcon = hci_conn_get(hcon);
6847 	conn->hchan = hchan;
6848 
6849 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6850 
6851 	switch (hcon->type) {
6852 	case LE_LINK:
6853 		if (hcon->hdev->le_mtu) {
6854 			conn->mtu = hcon->hdev->le_mtu;
6855 			break;
6856 		}
6857 		fallthrough;
6858 	default:
6859 		conn->mtu = hcon->hdev->acl_mtu;
6860 		break;
6861 	}
6862 
6863 	conn->feat_mask = 0;
6864 
6865 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6866 
6867 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
6868 	    (bredr_sc_enabled(hcon->hdev) ||
6869 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
6870 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6871 
6872 	mutex_init(&conn->ident_lock);
6873 	mutex_init(&conn->chan_lock);
6874 
6875 	INIT_LIST_HEAD(&conn->chan_l);
6876 	INIT_LIST_HEAD(&conn->users);
6877 
6878 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6879 
6880 	skb_queue_head_init(&conn->pending_rx);
6881 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6882 	INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
6883 
6884 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6885 
6886 	return conn;
6887 }
6888 
6889 static bool is_valid_psm(u16 psm, u8 dst_type)
6890 {
6891 	if (!psm)
6892 		return false;
6893 
6894 	if (bdaddr_type_is_le(dst_type))
6895 		return (psm <= 0x00ff);
6896 
6897 	/* PSM must be odd and lsb of upper byte must be 0 */
6898 	return ((psm & 0x0101) == 0x0001);
6899 }
6900 
6901 struct l2cap_chan_data {
6902 	struct l2cap_chan *chan;
6903 	struct pid *pid;
6904 	int count;
6905 };
6906 
6907 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
6908 {
6909 	struct l2cap_chan_data *d = data;
6910 	struct pid *pid;
6911 
6912 	if (chan == d->chan)
6913 		return;
6914 
6915 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
6916 		return;
6917 
6918 	pid = chan->ops->get_peer_pid(chan);
6919 
6920 	/* Only count deferred channels with the same PID/PSM */
6921 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
6922 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
6923 		return;
6924 
6925 	d->count++;
6926 }
6927 
6928 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6929 		       bdaddr_t *dst, u8 dst_type)
6930 {
6931 	struct l2cap_conn *conn;
6932 	struct hci_conn *hcon;
6933 	struct hci_dev *hdev;
6934 	int err;
6935 
6936 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
6937 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
6938 
6939 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
6940 	if (!hdev)
6941 		return -EHOSTUNREACH;
6942 
6943 	hci_dev_lock(hdev);
6944 
6945 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6946 	    chan->chan_type != L2CAP_CHAN_RAW) {
6947 		err = -EINVAL;
6948 		goto done;
6949 	}
6950 
6951 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6952 		err = -EINVAL;
6953 		goto done;
6954 	}
6955 
6956 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6957 		err = -EINVAL;
6958 		goto done;
6959 	}
6960 
6961 	switch (chan->mode) {
6962 	case L2CAP_MODE_BASIC:
6963 		break;
6964 	case L2CAP_MODE_LE_FLOWCTL:
6965 		break;
6966 	case L2CAP_MODE_EXT_FLOWCTL:
6967 		if (!enable_ecred) {
6968 			err = -EOPNOTSUPP;
6969 			goto done;
6970 		}
6971 		break;
6972 	case L2CAP_MODE_ERTM:
6973 	case L2CAP_MODE_STREAMING:
6974 		if (!disable_ertm)
6975 			break;
6976 		fallthrough;
6977 	default:
6978 		err = -EOPNOTSUPP;
6979 		goto done;
6980 	}
6981 
6982 	switch (chan->state) {
6983 	case BT_CONNECT:
6984 	case BT_CONNECT2:
6985 	case BT_CONFIG:
6986 		/* Already connecting */
6987 		err = 0;
6988 		goto done;
6989 
6990 	case BT_CONNECTED:
6991 		/* Already connected */
6992 		err = -EISCONN;
6993 		goto done;
6994 
6995 	case BT_OPEN:
6996 	case BT_BOUND:
6997 		/* Can connect */
6998 		break;
6999 
7000 	default:
7001 		err = -EBADFD;
7002 		goto done;
7003 	}
7004 
7005 	/* Set destination address and psm */
7006 	bacpy(&chan->dst, dst);
7007 	chan->dst_type = dst_type;
7008 
7009 	chan->psm = psm;
7010 	chan->dcid = cid;
7011 
7012 	if (bdaddr_type_is_le(dst_type)) {
7013 		/* Convert from L2CAP channel address type to HCI address type
7014 		 */
7015 		if (dst_type == BDADDR_LE_PUBLIC)
7016 			dst_type = ADDR_LE_DEV_PUBLIC;
7017 		else
7018 			dst_type = ADDR_LE_DEV_RANDOM;
7019 
7020 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7021 			hcon = hci_connect_le(hdev, dst, dst_type, false,
7022 					      chan->sec_level,
7023 					      HCI_LE_CONN_TIMEOUT,
7024 					      HCI_ROLE_SLAVE);
7025 		else
7026 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7027 						   chan->sec_level,
7028 						   HCI_LE_CONN_TIMEOUT,
7029 						   CONN_REASON_L2CAP_CHAN);
7030 
7031 	} else {
7032 		u8 auth_type = l2cap_get_auth_type(chan);
7033 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7034 				       CONN_REASON_L2CAP_CHAN);
7035 	}
7036 
7037 	if (IS_ERR(hcon)) {
7038 		err = PTR_ERR(hcon);
7039 		goto done;
7040 	}
7041 
7042 	conn = l2cap_conn_add(hcon);
7043 	if (!conn) {
7044 		hci_conn_drop(hcon);
7045 		err = -ENOMEM;
7046 		goto done;
7047 	}
7048 
7049 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7050 		struct l2cap_chan_data data;
7051 
7052 		data.chan = chan;
7053 		data.pid = chan->ops->get_peer_pid(chan);
7054 		data.count = 1;
7055 
7056 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7057 
7058 		/* Check if there isn't too many channels being connected */
7059 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7060 			hci_conn_drop(hcon);
7061 			err = -EPROTO;
7062 			goto done;
7063 		}
7064 	}
7065 
7066 	mutex_lock(&conn->chan_lock);
7067 	l2cap_chan_lock(chan);
7068 
7069 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7070 		hci_conn_drop(hcon);
7071 		err = -EBUSY;
7072 		goto chan_unlock;
7073 	}
7074 
7075 	/* Update source addr of the socket */
7076 	bacpy(&chan->src, &hcon->src);
7077 	chan->src_type = bdaddr_src_type(hcon);
7078 
7079 	__l2cap_chan_add(conn, chan);
7080 
7081 	/* l2cap_chan_add takes its own ref so we can drop this one */
7082 	hci_conn_drop(hcon);
7083 
7084 	l2cap_state_change(chan, BT_CONNECT);
7085 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7086 
7087 	/* Release chan->sport so that it can be reused by other
7088 	 * sockets (as it's only used for listening sockets).
7089 	 */
7090 	write_lock(&chan_list_lock);
7091 	chan->sport = 0;
7092 	write_unlock(&chan_list_lock);
7093 
7094 	if (hcon->state == BT_CONNECTED) {
7095 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7096 			__clear_chan_timer(chan);
7097 			if (l2cap_chan_check_security(chan, true))
7098 				l2cap_state_change(chan, BT_CONNECTED);
7099 		} else
7100 			l2cap_do_start(chan);
7101 	}
7102 
7103 	err = 0;
7104 
7105 chan_unlock:
7106 	l2cap_chan_unlock(chan);
7107 	mutex_unlock(&conn->chan_lock);
7108 done:
7109 	hci_dev_unlock(hdev);
7110 	hci_dev_put(hdev);
7111 	return err;
7112 }
7113 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7114 
7115 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7116 {
7117 	struct l2cap_conn *conn = chan->conn;
7118 	struct {
7119 		struct l2cap_ecred_reconf_req req;
7120 		__le16 scid;
7121 	} pdu;
7122 
7123 	pdu.req.mtu = cpu_to_le16(chan->imtu);
7124 	pdu.req.mps = cpu_to_le16(chan->mps);
7125 	pdu.scid    = cpu_to_le16(chan->scid);
7126 
7127 	chan->ident = l2cap_get_ident(conn);
7128 
7129 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7130 		       sizeof(pdu), &pdu);
7131 }
7132 
7133 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7134 {
7135 	if (chan->imtu > mtu)
7136 		return -EINVAL;
7137 
7138 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7139 
7140 	chan->imtu = mtu;
7141 
7142 	l2cap_ecred_reconfigure(chan);
7143 
7144 	return 0;
7145 }
7146 
7147 /* ---- L2CAP interface with lower layer (HCI) ---- */
7148 
7149 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7150 {
7151 	int exact = 0, lm1 = 0, lm2 = 0;
7152 	struct l2cap_chan *c;
7153 
7154 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7155 
7156 	/* Find listening sockets and check their link_mode */
7157 	read_lock(&chan_list_lock);
7158 	list_for_each_entry(c, &chan_list, global_l) {
7159 		if (c->state != BT_LISTEN)
7160 			continue;
7161 
7162 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7163 			lm1 |= HCI_LM_ACCEPT;
7164 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7165 				lm1 |= HCI_LM_MASTER;
7166 			exact++;
7167 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7168 			lm2 |= HCI_LM_ACCEPT;
7169 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7170 				lm2 |= HCI_LM_MASTER;
7171 		}
7172 	}
7173 	read_unlock(&chan_list_lock);
7174 
7175 	return exact ? lm1 : lm2;
7176 }
7177 
7178 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7179  * from an existing channel in the list or from the beginning of the
7180  * global list (by passing NULL as first parameter).
7181  */
7182 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7183 						  struct hci_conn *hcon)
7184 {
7185 	u8 src_type = bdaddr_src_type(hcon);
7186 
7187 	read_lock(&chan_list_lock);
7188 
7189 	if (c)
7190 		c = list_next_entry(c, global_l);
7191 	else
7192 		c = list_entry(chan_list.next, typeof(*c), global_l);
7193 
7194 	list_for_each_entry_from(c, &chan_list, global_l) {
7195 		if (c->chan_type != L2CAP_CHAN_FIXED)
7196 			continue;
7197 		if (c->state != BT_LISTEN)
7198 			continue;
7199 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7200 			continue;
7201 		if (src_type != c->src_type)
7202 			continue;
7203 
7204 		c = l2cap_chan_hold_unless_zero(c);
7205 		read_unlock(&chan_list_lock);
7206 		return c;
7207 	}
7208 
7209 	read_unlock(&chan_list_lock);
7210 
7211 	return NULL;
7212 }
7213 
7214 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7215 {
7216 	struct hci_dev *hdev = hcon->hdev;
7217 	struct l2cap_conn *conn;
7218 	struct l2cap_chan *pchan;
7219 	u8 dst_type;
7220 
7221 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7222 		return;
7223 
7224 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7225 
7226 	if (status) {
7227 		l2cap_conn_del(hcon, bt_to_errno(status));
7228 		return;
7229 	}
7230 
7231 	conn = l2cap_conn_add(hcon);
7232 	if (!conn)
7233 		return;
7234 
7235 	dst_type = bdaddr_dst_type(hcon);
7236 
7237 	/* If device is blocked, do not create channels for it */
7238 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7239 		return;
7240 
7241 	/* Find fixed channels and notify them of the new connection. We
7242 	 * use multiple individual lookups, continuing each time where
7243 	 * we left off, because the list lock would prevent calling the
7244 	 * potentially sleeping l2cap_chan_lock() function.
7245 	 */
7246 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7247 	while (pchan) {
7248 		struct l2cap_chan *chan, *next;
7249 
7250 		/* Client fixed channels should override server ones */
7251 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7252 			goto next;
7253 
7254 		l2cap_chan_lock(pchan);
7255 		chan = pchan->ops->new_connection(pchan);
7256 		if (chan) {
7257 			bacpy(&chan->src, &hcon->src);
7258 			bacpy(&chan->dst, &hcon->dst);
7259 			chan->src_type = bdaddr_src_type(hcon);
7260 			chan->dst_type = dst_type;
7261 
7262 			__l2cap_chan_add(conn, chan);
7263 		}
7264 
7265 		l2cap_chan_unlock(pchan);
7266 next:
7267 		next = l2cap_global_fixed_chan(pchan, hcon);
7268 		l2cap_chan_put(pchan);
7269 		pchan = next;
7270 	}
7271 
7272 	l2cap_conn_ready(conn);
7273 }
7274 
7275 int l2cap_disconn_ind(struct hci_conn *hcon)
7276 {
7277 	struct l2cap_conn *conn = hcon->l2cap_data;
7278 
7279 	BT_DBG("hcon %p", hcon);
7280 
7281 	if (!conn)
7282 		return HCI_ERROR_REMOTE_USER_TERM;
7283 	return conn->disc_reason;
7284 }
7285 
7286 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7287 {
7288 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7289 		return;
7290 
7291 	BT_DBG("hcon %p reason %d", hcon, reason);
7292 
7293 	l2cap_conn_del(hcon, bt_to_errno(reason));
7294 }
7295 
7296 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7297 {
7298 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7299 		return;
7300 
7301 	if (encrypt == 0x00) {
7302 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7303 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7304 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7305 			   chan->sec_level == BT_SECURITY_FIPS)
7306 			l2cap_chan_close(chan, ECONNREFUSED);
7307 	} else {
7308 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7309 			__clear_chan_timer(chan);
7310 	}
7311 }
7312 
7313 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7314 {
7315 	struct l2cap_conn *conn = hcon->l2cap_data;
7316 	struct l2cap_chan *chan;
7317 
7318 	if (!conn)
7319 		return;
7320 
7321 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7322 
7323 	mutex_lock(&conn->chan_lock);
7324 
7325 	list_for_each_entry(chan, &conn->chan_l, list) {
7326 		l2cap_chan_lock(chan);
7327 
7328 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7329 		       state_to_string(chan->state));
7330 
7331 		if (!status && encrypt)
7332 			chan->sec_level = hcon->sec_level;
7333 
7334 		if (!__l2cap_no_conn_pending(chan)) {
7335 			l2cap_chan_unlock(chan);
7336 			continue;
7337 		}
7338 
7339 		if (!status && (chan->state == BT_CONNECTED ||
7340 				chan->state == BT_CONFIG)) {
7341 			chan->ops->resume(chan);
7342 			l2cap_check_encryption(chan, encrypt);
7343 			l2cap_chan_unlock(chan);
7344 			continue;
7345 		}
7346 
7347 		if (chan->state == BT_CONNECT) {
7348 			if (!status && l2cap_check_enc_key_size(hcon))
7349 				l2cap_start_connection(chan);
7350 			else
7351 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7352 		} else if (chan->state == BT_CONNECT2 &&
7353 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7354 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7355 			struct l2cap_conn_rsp rsp;
7356 			__u16 res, stat;
7357 
7358 			if (!status && l2cap_check_enc_key_size(hcon)) {
7359 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7360 					res = L2CAP_CR_PEND;
7361 					stat = L2CAP_CS_AUTHOR_PEND;
7362 					chan->ops->defer(chan);
7363 				} else {
7364 					l2cap_state_change(chan, BT_CONFIG);
7365 					res = L2CAP_CR_SUCCESS;
7366 					stat = L2CAP_CS_NO_INFO;
7367 				}
7368 			} else {
7369 				l2cap_state_change(chan, BT_DISCONN);
7370 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7371 				res = L2CAP_CR_SEC_BLOCK;
7372 				stat = L2CAP_CS_NO_INFO;
7373 			}
7374 
7375 			rsp.scid   = cpu_to_le16(chan->dcid);
7376 			rsp.dcid   = cpu_to_le16(chan->scid);
7377 			rsp.result = cpu_to_le16(res);
7378 			rsp.status = cpu_to_le16(stat);
7379 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7380 				       sizeof(rsp), &rsp);
7381 
7382 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7383 			    res == L2CAP_CR_SUCCESS) {
7384 				char buf[128];
7385 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7386 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7387 					       L2CAP_CONF_REQ,
7388 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
7389 					       buf);
7390 				chan->num_conf_req++;
7391 			}
7392 		}
7393 
7394 		l2cap_chan_unlock(chan);
7395 	}
7396 
7397 	mutex_unlock(&conn->chan_lock);
7398 }
7399 
7400 /* Append fragment into frame respecting the maximum len of rx_skb */
7401 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7402 			   u16 len)
7403 {
7404 	if (!conn->rx_skb) {
7405 		/* Allocate skb for the complete frame (with header) */
7406 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7407 		if (!conn->rx_skb)
7408 			return -ENOMEM;
7409 		/* Init rx_len */
7410 		conn->rx_len = len;
7411 	}
7412 
7413 	/* Copy as much as the rx_skb can hold */
7414 	len = min_t(u16, len, skb->len);
7415 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7416 	skb_pull(skb, len);
7417 	conn->rx_len -= len;
7418 
7419 	return len;
7420 }
7421 
7422 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7423 {
7424 	struct sk_buff *rx_skb;
7425 	int len;
7426 
7427 	/* Append just enough to complete the header */
7428 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7429 
7430 	/* If header could not be read just continue */
7431 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7432 		return len;
7433 
7434 	rx_skb = conn->rx_skb;
7435 	len = get_unaligned_le16(rx_skb->data);
7436 
7437 	/* Check if rx_skb has enough space to received all fragments */
7438 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7439 		/* Update expected len */
7440 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7441 		return L2CAP_LEN_SIZE;
7442 	}
7443 
7444 	/* Reset conn->rx_skb since it will need to be reallocated in order to
7445 	 * fit all fragments.
7446 	 */
7447 	conn->rx_skb = NULL;
7448 
7449 	/* Reallocates rx_skb using the exact expected length */
7450 	len = l2cap_recv_frag(conn, rx_skb,
7451 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7452 	kfree_skb(rx_skb);
7453 
7454 	return len;
7455 }
7456 
7457 static void l2cap_recv_reset(struct l2cap_conn *conn)
7458 {
7459 	kfree_skb(conn->rx_skb);
7460 	conn->rx_skb = NULL;
7461 	conn->rx_len = 0;
7462 }
7463 
7464 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7465 {
7466 	struct l2cap_conn *conn = hcon->l2cap_data;
7467 	int len;
7468 
7469 	/* For AMP controller do not create l2cap conn */
7470 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7471 		goto drop;
7472 
7473 	if (!conn)
7474 		conn = l2cap_conn_add(hcon);
7475 
7476 	if (!conn)
7477 		goto drop;
7478 
7479 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7480 
7481 	switch (flags) {
7482 	case ACL_START:
7483 	case ACL_START_NO_FLUSH:
7484 	case ACL_COMPLETE:
7485 		if (conn->rx_skb) {
7486 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7487 			l2cap_recv_reset(conn);
7488 			l2cap_conn_unreliable(conn, ECOMM);
7489 		}
7490 
7491 		/* Start fragment may not contain the L2CAP length so just
7492 		 * copy the initial byte when that happens and use conn->mtu as
7493 		 * expected length.
7494 		 */
7495 		if (skb->len < L2CAP_LEN_SIZE) {
7496 			l2cap_recv_frag(conn, skb, conn->mtu);
7497 			break;
7498 		}
7499 
7500 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7501 
7502 		if (len == skb->len) {
7503 			/* Complete frame received */
7504 			l2cap_recv_frame(conn, skb);
7505 			return;
7506 		}
7507 
7508 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7509 
7510 		if (skb->len > len) {
7511 			BT_ERR("Frame is too long (len %u, expected len %d)",
7512 			       skb->len, len);
7513 			l2cap_conn_unreliable(conn, ECOMM);
7514 			goto drop;
7515 		}
7516 
7517 		/* Append fragment into frame (with header) */
7518 		if (l2cap_recv_frag(conn, skb, len) < 0)
7519 			goto drop;
7520 
7521 		break;
7522 
7523 	case ACL_CONT:
7524 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7525 
7526 		if (!conn->rx_skb) {
7527 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7528 			l2cap_conn_unreliable(conn, ECOMM);
7529 			goto drop;
7530 		}
7531 
7532 		/* Complete the L2CAP length if it has not been read */
7533 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7534 			if (l2cap_recv_len(conn, skb) < 0) {
7535 				l2cap_conn_unreliable(conn, ECOMM);
7536 				goto drop;
7537 			}
7538 
7539 			/* Header still could not be read just continue */
7540 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7541 				break;
7542 		}
7543 
7544 		if (skb->len > conn->rx_len) {
7545 			BT_ERR("Fragment is too long (len %u, expected %u)",
7546 			       skb->len, conn->rx_len);
7547 			l2cap_recv_reset(conn);
7548 			l2cap_conn_unreliable(conn, ECOMM);
7549 			goto drop;
7550 		}
7551 
7552 		/* Append fragment into frame (with header) */
7553 		l2cap_recv_frag(conn, skb, skb->len);
7554 
7555 		if (!conn->rx_len) {
7556 			/* Complete frame received. l2cap_recv_frame
7557 			 * takes ownership of the skb so set the global
7558 			 * rx_skb pointer to NULL first.
7559 			 */
7560 			struct sk_buff *rx_skb = conn->rx_skb;
7561 			conn->rx_skb = NULL;
7562 			l2cap_recv_frame(conn, rx_skb);
7563 		}
7564 		break;
7565 	}
7566 
7567 drop:
7568 	kfree_skb(skb);
7569 }
7570 
7571 static struct hci_cb l2cap_cb = {
7572 	.name		= "L2CAP",
7573 	.connect_cfm	= l2cap_connect_cfm,
7574 	.disconn_cfm	= l2cap_disconn_cfm,
7575 	.security_cfm	= l2cap_security_cfm,
7576 };
7577 
7578 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7579 {
7580 	struct l2cap_chan *c;
7581 
7582 	read_lock(&chan_list_lock);
7583 
7584 	list_for_each_entry(c, &chan_list, global_l) {
7585 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7586 			   &c->src, c->src_type, &c->dst, c->dst_type,
7587 			   c->state, __le16_to_cpu(c->psm),
7588 			   c->scid, c->dcid, c->imtu, c->omtu,
7589 			   c->sec_level, c->mode);
7590 	}
7591 
7592 	read_unlock(&chan_list_lock);
7593 
7594 	return 0;
7595 }
7596 
7597 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7598 
7599 static struct dentry *l2cap_debugfs;
7600 
7601 int __init l2cap_init(void)
7602 {
7603 	int err;
7604 
7605 	err = l2cap_init_sockets();
7606 	if (err < 0)
7607 		return err;
7608 
7609 	hci_register_cb(&l2cap_cb);
7610 
7611 	if (IS_ERR_OR_NULL(bt_debugfs))
7612 		return 0;
7613 
7614 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7615 					    NULL, &l2cap_debugfs_fops);
7616 
7617 	return 0;
7618 }
7619 
7620 void l2cap_exit(void)
7621 {
7622 	debugfs_remove(l2cap_debugfs);
7623 	hci_unregister_cb(&l2cap_cb);
7624 	l2cap_cleanup_sockets();
7625 }
7626 
7627 module_param(disable_ertm, bool, 0644);
7628 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7629 
7630 module_param(enable_ecred, bool, 0644);
7631 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
7632